Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 25 additions & 6 deletions src/linalg/Gesvd_truncate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,27 @@

namespace cytnx {
namespace linalg {
namespace {
UniTensor BuildBlockDiscardedSingularValues(const Tensor &Sall, const cytnx_uint64 smidx,
const unsigned int return_err) {
Tensor terr({1}, Sall.dtype());
terr.storage().at(0) = 0;
if (smidx == 0) {
return UniTensor(terr);
}
if (return_err == 1) {
terr.storage().at(0) = Sall.storage()(smidx - 1);
return UniTensor(terr);
}

terr = Tensor({smidx}, Sall.dtype());
for (cytnx_uint64 i = 0; i < smidx; i++) {
terr.storage().at(i) = Sall.storage()(smidx - 1 - i);
}
return UniTensor(terr);
}
} // namespace

std::vector<Tensor> Gesvd_truncate(const Tensor &Tin, const cytnx_uint64 &keepdim,
const double &err, const bool &is_U, const bool &is_vT,
const unsigned int &return_err, const cytnx_uint64 &mindim) {
Expand Down Expand Up @@ -344,10 +365,9 @@ namespace cytnx {

// handle return_err!
if (return_err == 1) {
outCyT.push_back(UniTensor(Tensor({1}, Smin.dtype())));
outCyT.back().get_block_().storage().at(0) = Smin;
outCyT.push_back(BuildBlockDiscardedSingularValues(Sall, smidx, return_err));
} else if (return_err) {
outCyT.push_back(UniTensor(Sall.get({Accessor::tilend(smidx)})));
outCyT.push_back(BuildBlockDiscardedSingularValues(Sall, smidx, return_err));
}
} // _gesvd_truncate_Block_UT

Expand Down Expand Up @@ -638,10 +658,9 @@ namespace cytnx {
}
// handle return_err!
if (return_err == 1) {
outCyT.push_back(UniTensor(Tensor({1}, Smin.dtype())));
outCyT.back().get_block_().storage().at(0) = Smin;
outCyT.push_back(BuildBlockDiscardedSingularValues(Sall, smidx, return_err));
} else if (return_err) {
outCyT.push_back(UniTensor(Sall.get({Accessor::tilend(smidx)})));
outCyT.push_back(BuildBlockDiscardedSingularValues(Sall, smidx, return_err));
}
} else {
if (return_err >= 1) {
Expand Down
31 changes: 25 additions & 6 deletions src/linalg/Svd_truncate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,27 @@
#include "backend/linalg_internal_interface.hpp"
namespace cytnx {
namespace linalg {
namespace {
UniTensor BuildBlockDiscardedSingularValues(const Tensor &Sall, const cytnx_uint64 smidx,
const unsigned int return_err) {
Tensor terr({1}, Sall.dtype());
terr.storage().at(0) = 0;
if (smidx == 0) {
return UniTensor(terr);
}
if (return_err == 1) {
terr.storage().at(0) = Sall.storage()(smidx - 1);
return UniTensor(terr);
}

terr = Tensor({smidx}, Sall.dtype());
for (cytnx_uint64 i = 0; i < smidx; i++) {
terr.storage().at(i) = Sall.storage()(smidx - 1 - i);
}
return UniTensor(terr);
}
} // namespace

std::vector<Tensor> Svd_truncate(const Tensor &Tin, const cytnx_uint64 &keepdim,
const double &err, const bool &is_UvT,
const unsigned int &return_err, const cytnx_uint64 &mindim) {
Expand Down Expand Up @@ -331,10 +352,9 @@ namespace cytnx {

// handle return_err!
if (return_err == 1) {
outCyT.push_back(UniTensor(Tensor({1}, Smin.dtype())));
outCyT.back().get_block_().storage().at(0) = Smin;
outCyT.push_back(BuildBlockDiscardedSingularValues(Sall, smidx, return_err));
} else if (return_err) {
outCyT.push_back(UniTensor(Sall.get({Accessor::tilend(smidx)})));
outCyT.push_back(BuildBlockDiscardedSingularValues(Sall, smidx, return_err));
}
} // _svd_truncate_Block_UTs

Expand Down Expand Up @@ -464,10 +484,9 @@ namespace cytnx {
}
// handle return_err!
if (return_err == 1) {
outCyT.push_back(UniTensor(Tensor({1}, Smin.dtype())));
outCyT.back().get_block_().storage().at(0) = Smin;
outCyT.push_back(BuildBlockDiscardedSingularValues(Sall, smidx, return_err));
} else if (return_err) {
outCyT.push_back(UniTensor(Sall.get({Accessor::tilend(smidx)})));
outCyT.push_back(BuildBlockDiscardedSingularValues(Sall, smidx, return_err));
}
} else {
if (return_err >= 1) {
Expand Down
64 changes: 64 additions & 0 deletions tests/linalg_test/linalg_test.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,15 @@
#include "linalg_test.h"

namespace {
Tensor SortedBlockSingularValues(const UniTensor &S) {
Tensor all_svals = S.get_block_(0);
for (cytnx_int64 i = 1; i < S.Nblocks(); i++) {
all_svals = algo::Concatenate(all_svals, S.get_block_(i));
}
return algo::Sort(all_svals);
}
} // namespace

TEST_F(linalg_Test, BkUt_Svd_truncate1) {
std::vector<UniTensor> res = linalg::Svd_truncate(svd_T, 200, 0, true);
std::vector<double> vnm_S;
Expand All @@ -23,6 +33,60 @@ TEST_F(linalg_Test, BkUt_Svd_truncate2) {
auto con_T2 = Contract(Contract(res[1], res[0]), res[2]);
}

TEST_F(linalg_Test, BkUt_Svd_truncate_return_err_returns_discarded_values) {
std::vector<UniTensor> full = linalg::Svd_truncate(svd_T, 999, 0, true, 0);
std::vector<UniTensor> trunc = linalg::Svd_truncate(svd_T, 5, 0, true, 999);
Tensor all_svals = SortedBlockSingularValues(full[0]);

ASSERT_EQ(full.size(), 3);
ASSERT_EQ(trunc.size(), 4);
ASSERT_EQ(trunc[0].shape()[0], 5);
ASSERT_EQ(all_svals.shape()[0], 400);
ASSERT_EQ(trunc[3].shape()[0], all_svals.shape()[0] - trunc[0].shape()[0]);

for (cytnx_uint64 i = 0; i < trunc[3].shape()[0]; i++) {
EXPECT_EQ(all_svals.at({trunc[3].shape()[0] - 1 - i}), trunc[3].at({i}));
}
}

TEST_F(linalg_Test, BkUt_Gesvd_truncate_return_err_returns_discarded_values) {
std::vector<UniTensor> full = linalg::Gesvd_truncate(svd_T, 999, 0, true, true, 0);
std::vector<UniTensor> trunc = linalg::Gesvd_truncate(svd_T, 5, 0, true, true, 999);
Tensor all_svals = SortedBlockSingularValues(full[0]);

ASSERT_EQ(full.size(), 3);
ASSERT_EQ(trunc.size(), 4);
ASSERT_EQ(trunc[0].shape()[0], 5);
ASSERT_EQ(all_svals.shape()[0], 400);
ASSERT_EQ(trunc[3].shape()[0], all_svals.shape()[0] - trunc[0].shape()[0]);

for (cytnx_uint64 i = 0; i < trunc[3].shape()[0]; i++) {
EXPECT_EQ(all_svals.at({trunc[3].shape()[0] - 1 - i}), trunc[3].at({i}));
}
}

TEST_F(linalg_Test, BkUt_Svd_truncate_return_err_one_returns_first_discarded_value) {
std::vector<UniTensor> full = linalg::Svd_truncate(svd_T, 999, 0, true, 0);
std::vector<UniTensor> trunc = linalg::Svd_truncate(svd_T, 5, 0, true, 1);
Tensor all_svals = SortedBlockSingularValues(full[0]);

ASSERT_EQ(full.size(), 3);
ASSERT_EQ(trunc.size(), 4);
ASSERT_EQ(trunc[3].shape()[0], 1);
EXPECT_EQ(all_svals.at({all_svals.shape()[0] - trunc[0].shape()[0] - 1}), trunc[3].at({0}));
}

TEST_F(linalg_Test, BkUt_Gesvd_truncate_return_err_one_returns_first_discarded_value) {
std::vector<UniTensor> full = linalg::Gesvd_truncate(svd_T, 999, 0, true, true, 0);
std::vector<UniTensor> trunc = linalg::Gesvd_truncate(svd_T, 5, 0, true, true, 1);
Tensor all_svals = SortedBlockSingularValues(full[0]);

ASSERT_EQ(full.size(), 3);
ASSERT_EQ(trunc.size(), 4);
ASSERT_EQ(trunc[3].shape()[0], 1);
EXPECT_EQ(all_svals.at({all_svals.shape()[0] - trunc[0].shape()[0] - 1}), trunc[3].at({0}));
}

// TEST_F(linalg_Test, BkUt_Svd_truncate3) {
// Bond I = Bond(BD_IN, {Qs(-5), Qs(-3), Qs(-1), Qs(1), Qs(3), Qs(5)}, {1, 4, 10, 9, 5, 1});
// Bond J = Bond(BD_OUT, {Qs(1), Qs(-1)}, {1, 1});
Expand Down
Loading