From 9ab00e2352e1eb3ac639b6350a11a815ac711d6b Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Fri, 5 Dec 2025 00:02:20 +0800 Subject: [PATCH 01/13] changed behaviour of UniTensor.Transpose to exchange the roles of left and right legs for BlockUniTensor and tagged DenseUniTensor --- include/UniTensor.hpp | 22 +++++++++++++--------- src/BlockFermionicUniTensor.cpp | 8 +++----- src/BlockUniTensor.cpp | 14 +++++++++++--- src/DenseUniTensor.cpp | 20 ++++++++++++++------ 4 files changed, 41 insertions(+), 23 deletions(-) diff --git a/include/UniTensor.hpp b/include/UniTensor.hpp index f67b97cd2..5c178700e 100644 --- a/include/UniTensor.hpp +++ b/include/UniTensor.hpp @@ -4974,16 +4974,20 @@ namespace cytnx { /** @brief Take the transpose of the UniTensor. - @details This function will take the transpose of the UniTensor. If the UniTensor is - tagged (i.e. the Bonds are directional), it will swap the direction of the Bonds but - the rowrank will not change. If the UniTensor is untagged (i.e. the Bonds are - BondType::BD_REG), it will change the rowrank to the opposite side. - For fermionic UniTensors, the index order will be reversed without sign flips, and the - direction of all Bonds will swapped. - @return UniTensor + @details This function takes the transpose of a UniTensor: + 1) Swaps the roles of left and right indices: index numbers k < rowrank become k + rowrank, + indices k' >= rowrank become k' - rowrank. For fermions, the order of the indices is + inverted instead. + 2) Incoming legs become outgoing onces, and vice versa + 3) The rowrank is set to rank - old rowrank, such that left indices become right indices and + vice versa. + @return UniTensor @note Compared to Transpose_(), this function will return new UniTensor object. - @see Transpose_() - */ + @warning For fermionic UniTensors, the order of the indices is inverted, while for bosonic + UniTensors the role of the left-and right indices is exchanged without inverting the orders in + these two groups. + @see Transpose_() + */ UniTensor Transpose() const { UniTensor out; out._impl = this->_impl->Transpose(); diff --git a/src/BlockFermionicUniTensor.cpp b/src/BlockFermionicUniTensor.cpp index 095c1bea0..2b84b1cd4 100644 --- a/src/BlockFermionicUniTensor.cpp +++ b/src/BlockFermionicUniTensor.cpp @@ -1895,17 +1895,15 @@ namespace cytnx { }; void BlockFermionicUniTensor::Transpose_() { - //[21 Aug 2024] This is a copy from BlockUniTensor; - // modify tag // The index order is reversed without any sign flips! std::vector idxorder(this->_bonds.size()); - std::size_t idxnum = this->bonds().size() - 1; - for (int i = 0; i <= idxnum; i++) { + cytnx_int64 idxnum = this->bonds().size() - 1; + for (cytnx_int64 i = 0; i <= idxnum; i++) { this->bonds()[i].redirect_(); - // this->bonds()[i].qnums() = this->bonds()[i].calc_reverse_qnums(); idxorder[i] = idxnum - i; } this->permute_nosignflip_(idxorder); + this->_rowrank = idxnum + 1 - this->_rowrank; }; void BlockFermionicUniTensor::normalize_() { diff --git a/src/BlockUniTensor.cpp b/src/BlockUniTensor.cpp index 9c1bdf75e..64674926f 100644 --- a/src/BlockUniTensor.cpp +++ b/src/BlockUniTensor.cpp @@ -1214,11 +1214,19 @@ namespace cytnx { }; void BlockUniTensor::Transpose_() { - // modify tag - for (int i = 0; i < this->bonds().size(); i++) { + cytnx_int64 rank = this->bonds().size(); + std::vector idxorder(rank); + cytnx_int64 rowrank = this->_rowrank; + for (cytnx_int64 i = 0; i < rowrank; i++) { this->bonds()[i].redirect_(); - // this->bonds()[i].qnums() = this->bonds()[i].calc_reverse_qnums(); + idxorder[i] = i + rowrank; } + for (cytnx_int64 i = rowrank; i < rank; i++) { + this->bonds()[i].redirect_(); + idxorder[i] = i - rowrank; + } + this->permute_(idxorder); + this->_rowrank = rank - rowrank; }; void BlockUniTensor::normalize_() { diff --git a/src/DenseUniTensor.cpp b/src/DenseUniTensor.cpp index 28757f8ad..e2a51478d 100644 --- a/src/DenseUniTensor.cpp +++ b/src/DenseUniTensor.cpp @@ -1190,12 +1190,19 @@ namespace cytnx { void DenseUniTensor::Transpose_() { if (this->is_tag()) { - // this->_rowrank = this->rank() - this->_rowrank; - for (int i = 0; i < this->rank(); i++) { - this->_bonds[i].set_type((this->_bonds[i].type() == BD_KET) ? BD_BRA : BD_KET); + cytnx_int64 rank = this->bonds().size(); + std::vector idxorder(rank); + cytnx_int64 rowrank = this->_rowrank; + for (cytnx_int64 i = 0; i < rowrank; i++) { + this->bonds()[i].redirect_(); + idxorder[i] = i + rowrank; } - this->_is_braket_form = this->_update_braket(); - + for (cytnx_int64 i = rowrank; i < rank; i++) { + this->bonds()[i].redirect_(); + idxorder[i] = i - rowrank; + } + this->permute_(idxorder); + this->_rowrank = rank - rowrank; } else { std::vector new_permute = vec_concatenate(vec_range(this->rowrank(), this->rank()), @@ -1203,7 +1210,8 @@ namespace cytnx { this->permute_(new_permute); this->_rowrank = this->rank() - this->_rowrank; } - } + }; + void DenseUniTensor::normalize_() { this->_block /= linalg::Norm(this->_block); } void DenseUniTensor::_save_dispatch(std::fstream &f) const { this->_block._Save(f); } From a8f447ae88ee184b7d612a1910807e7bdd8c779d Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Fri, 5 Dec 2025 00:08:01 +0800 Subject: [PATCH 02/13] fixed unit tests --- tests/BlockUniTensor_test.cpp | 321 +++++++++++++++++----------------- 1 file changed, 165 insertions(+), 156 deletions(-) diff --git a/tests/BlockUniTensor_test.cpp b/tests/BlockUniTensor_test.cpp index 4e406f92d..c109c2495 100644 --- a/tests/BlockUniTensor_test.cpp +++ b/tests/BlockUniTensor_test.cpp @@ -532,8 +532,8 @@ TEST_F(BlockUniTensorTest, contract1) { auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans1.get_blocks(); for (int i = 0; i < ansbks.size(); i++) { - std::cout << outbks[i] << std::endl; - std::cout << ansbks[i] << std::endl; + // std::cout << outbks[i] << std::endl; + // std::cout << ansbks[i] << std::endl; EXPECT_EQ(AreNearlyEqTensor(outbks[i], ansbks[i], 1e-5), true); } } @@ -547,8 +547,8 @@ TEST_F(BlockUniTensorTest, contract2) { auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans2.get_blocks(); for (int i = 0; i < ansbks.size(); i++) { - std::cout << outbks[i] << std::endl; - std::cout << ansbks[i] << std::endl; + // std::cout << outbks[i] << std::endl; + // std::cout << ansbks[i] << std::endl; EXPECT_EQ(AreNearlyEqTensor(outbks[i], ansbks[i], 1e-5), true); } } @@ -562,8 +562,8 @@ TEST_F(BlockUniTensorTest, contract3) { auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans3.get_blocks(); for (int i = 0; i < ansbks.size(); i++) { - std::cout << outbks[i] << std::endl; - std::cout << ansbks[i] << std::endl; + // std::cout << outbks[i] << std::endl; + // std::cout << ansbks[i] << std::endl; EXPECT_EQ(AreNearlyEqTensor(outbks[i], ansbks[i], 1e-5), true); } } @@ -598,51 +598,51 @@ TEST_F(BlockUniTensorTest, Add) { // } BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx"); auto out2 = BUT4.Add(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out2.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out2.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).real()), + double(BUTpT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).imag()), + double(BUTpT2.at({i, j, k, l}).imag())); } BUT4.Add_(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTpT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTpT2.at({i, j, k, l}).imag())); } } TEST_F(BlockUniTensorTest, Mul) { auto out = BUT4.Mul(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).real()), + double(BUTm9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).imag()), + double(BUTm9.at({i, j, k, l}).imag())); } BUT4.Mul_(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTm9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTm9.at({i, j, k, l}).imag())); } } @@ -669,51 +669,51 @@ TEST_F(BlockUniTensorTest, Sub) { // } BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx"); auto out2 = BUT4.Sub(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out2.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out2.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).real()), + double(BUTsT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).imag()), + double(BUTsT2.at({i, j, k, l}).imag())); } BUT4.Sub_(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTsT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTsT2.at({i, j, k, l}).imag())); } } TEST_F(BlockUniTensorTest, Div) { auto out = BUT4.Div(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).real()), + double(BUTd9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).imag()), + double(BUTd9.at({i, j, k, l}).imag())); } BUT4.Div_(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTd9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTd9.at({i, j, k, l}).imag())); } // BUT4 = BUT4.Load("OriginalBUT.cytnx"); @@ -833,29 +833,29 @@ TEST_F(BlockUniTensorTest, Pow) { TEST_F(BlockUniTensorTest, Conj) { auto tmp = BUT4.Conj(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag())); + EXPECT_DOUBLE_EQ(double(tmp.at({i, j, k, l}).real()), + double(BUT4.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({i, j, k, l}).imag()), + -double(BUT4.at({i, j, k, l}).imag())); } tmp = BUT4.clone(); - BUT4.Conj_(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { + tmp.Conj_(); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(BUT4.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(tmp.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + -double(tmp.at({i, j, k, l}).imag())); } tmp = UT_diag_cplx.Conj(); @@ -871,29 +871,29 @@ TEST_F(BlockUniTensorTest, Conj) { } TEST_F(BlockUniTensorTest, Transpose) { - auto tmp = BUT1.Transpose(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[2].type(), BD_IN); - EXPECT_EQ(tmp.bonds()[3].type(), BD_IN); + auto tmp = BUT1.Transpose().set_name("BUT1.Transpose"); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[3].type(), BD_OUT); tmp = BUT5.Transpose(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_BRA); - EXPECT_EQ(tmp.bonds()[1].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[0].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[1].type(), BD_BRA); EXPECT_EQ(tmp.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(tmp.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); BUT1.Transpose_(); - EXPECT_EQ(BUT1.bonds()[0].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[1].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[2].type(), BD_IN); - EXPECT_EQ(BUT1.bonds()[3].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[0].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[1].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[2].type(), BD_OUT); + EXPECT_EQ(BUT1.bonds()[3].type(), BD_OUT); BUT5.Transpose_(); - EXPECT_EQ(BUT5.bonds()[0].type(), BD_BRA); - EXPECT_EQ(BUT5.bonds()[1].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[0].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[1].type(), BD_BRA); EXPECT_EQ(BUT5.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(BUT5.bonds()[1].qnums(), @@ -905,14 +905,12 @@ TEST_F(BlockUniTensorTest, Trace) { auto tmp = BUT4.Trace(0, 3); // std::cout<>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(tmp.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); BUT1.Dagger_(); - EXPECT_EQ(BUT1.bonds()[0].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[1].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[2].type(), BD_IN); - EXPECT_EQ(BUT1.bonds()[3].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[0].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[1].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[2].type(), BD_OUT); + EXPECT_EQ(BUT1.bonds()[3].type(), BD_OUT); BUT5.Dagger_(); - EXPECT_EQ(BUT5.bonds()[0].type(), BD_BRA); - EXPECT_EQ(BUT5.bonds()[1].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[0].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[1].type(), BD_BRA); EXPECT_EQ(BUT5.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(BUT5.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); - tmp = BUT4.Dagger(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { + tmp = BUT4.Dagger().set_name("BUT4.Dagger"); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { + if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag())); + // std::cout << " BUT4(i=" << i << ", j=" << j << ", k=" << k << ", l=" << l + // << ") = " << double(BUT4.at({i, j, k, l}).real()) << " + i * " + // << double(BUT4.at({i, j, k, l}).imag()) << std::endl; + // std::cout << "BUT4.Dagger(k=" << k << ", l=" << l << ", i=" << i << ", j=" << j + // << ") = " << double(tmp.at({k, l, i, j}).real()) << " + i * " + // << double(tmp.at({k, l, i, j}).imag()) << std::endl; + EXPECT_DOUBLE_EQ(double(tmp.at({k, l, i, j}).real()), + double(BUT4.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({k, l, i, j}).imag()), + -double(BUT4.at({i, j, k, l}).imag())); + } else { + EXPECT_FALSE(tmp.at({k, l, i, j}).exists()); } + } tmp = BUT4.clone(); - BUT4.Dagger_(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { + tmp.Dagger_(); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { + if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(BUT4.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(tmp.at({k, l, i, j}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + -double(tmp.at({k, l, i, j}).imag())); + } else { + EXPECT_FALSE(tmp.at({k, l, i, j}).exists()); } + } tmp = UT_diag_cplx.Dagger(); for (size_t i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { @@ -1005,15 +1015,14 @@ TEST_F(BlockUniTensorTest, Dagger) { } TEST_F(BlockUniTensorTest, elem_exist) { - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.elem_exists({i - 1, j - 1, k - 1, l - 1})) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.elem_exists({i, j, k, l})) { cytnx_int64 _a; std::vector _b; - ((BlockUniTensor*)BUT4._impl.get()) - ->_fx_locate_elem(_a, _b, {i - 1, j - 1, k - 1, l - 1}); + ((BlockUniTensor*)BUT4._impl.get())->_fx_locate_elem(_a, _b, {i, j, k, l}); std::vector qind = BUT4.get_qindices(_a); EXPECT_EQ(BUT4.bonds()[0].qnums()[qind[0]][0] - BUT4.bonds()[1].qnums()[qind[1]][0] + BUT4.bonds()[2].qnums()[qind[2]][0] - BUT4.bonds()[3].qnums()[qind[3]][0], From 623f8843a539ed869b74f0c85d6a7ba4252ca18d Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Fri, 5 Dec 2025 00:09:46 +0800 Subject: [PATCH 03/13] cleaned up unused functions in the unit tests that would fail with the changes in this branch some cleanup, comments, helpful labels --- tests/BlockUniTensor_test.h | 8 ++++---- tests/DenseUniTensor_test.cpp | 12 ++++++------ tests/gpu/linalg_test/GeSvd_test.cpp | 10 ---------- tests/gpu/linalg_test/Svd_test.cpp | 10 ---------- tests/linalg_test/GeSvd_test.cpp | 10 ---------- tests/linalg_test/Gesvd_truncate_test.cpp | 10 ---------- tests/linalg_test/Rsvd_test.cpp | 10 ---------- tests/linalg_test/Rsvd_truncate_test.cpp | 10 ---------- tests/linalg_test/Svd_test.cpp | 10 ---------- tests/linalg_test/Svd_truncate_test.cpp | 10 ---------- 10 files changed, 10 insertions(+), 90 deletions(-) diff --git a/tests/BlockUniTensor_test.h b/tests/BlockUniTensor_test.h index f4b975a1b..794e126f6 100644 --- a/tests/BlockUniTensor_test.h +++ b/tests/BlockUniTensor_test.h @@ -15,17 +15,17 @@ class BlockUniTensorTest : public ::testing::Test { Bond B2 = Bond(BD_IN, {Qs(0), Qs(1)}, {3, 4}); Bond B3 = Bond(BD_OUT, {Qs(0) >> 2, Qs(1) >> 3}); Bond B4 = Bond(BD_OUT, {Qs(0), Qs(1)}, {1, 2}); - UniTensor BUT1 = UniTensor({B1, B2, B3, B4}); + UniTensor BUT1 = UniTensor({B1, B2, B3, B4}).set_name("BUT1"); Bond bd_sym_a = Bond(BD_KET, {{0, 2}, {3, 5}, {1, 6}, {4, 1}}, {4, 7, 2, 3}); Bond bd_sym_b = Bond(BD_BRA, {{0, 2}, {3, 5}, {1, 6}, {4, 1}}, {4, 7, 2, 3}); - UniTensor BUT2 = UniTensor({bd_sym_a, bd_sym_b}); + UniTensor BUT2 = UniTensor({bd_sym_a, bd_sym_b}).set_name("BUT2"); Bond bd_sym_c = Bond(BD_KET, {{0, 2}, {1, 5}, {1, 6}, {0, 1}}, {4, 7, 2, 3}, {Symmetry::Zn(2), Symmetry::U1()}); Bond bd_sym_d = Bond(BD_BRA, {{0, 2}, {1, 5}, {1, 6}, {0, 1}}, {4, 7, 2, 3}, {Symmetry::Zn(2), Symmetry::U1()}); - UniTensor BUT3 = UniTensor({bd_sym_c, bd_sym_d}); + UniTensor BUT3 = UniTensor({bd_sym_c, bd_sym_d}).set_name("BUT3"); Bond B1p = Bond(BD_IN, {Qs(-1), Qs(0), Qs(1)}, {2, 1, 2}); Bond B2p = Bond(BD_OUT, {Qs(-1), Qs(0), Qs(1)}, {4, 3, 4}); @@ -114,7 +114,7 @@ class BlockUniTensorTest : public ::testing::Test { protected: void SetUp() override { - BUT4 = UniTensor::Load(data_dir + "OriginalBUT.cytnx"); + BUT4 = UniTensor::Load(data_dir + "OriginalBUT.cytnx").set_name("BUT4"); BUT4_2 = UniTensor::Load(data_dir + "OriginalBUT2.cytnx"); BUconjT4 = UniTensor::Load(data_dir + "BUconjT.cytnx"); BUtrT4 = UniTensor::Load(data_dir + "BUtrT.cytnx"); diff --git a/tests/DenseUniTensor_test.cpp b/tests/DenseUniTensor_test.cpp index 5b4b57c7b..e951acf40 100644 --- a/tests/DenseUniTensor_test.cpp +++ b/tests/DenseUniTensor_test.cpp @@ -4004,7 +4004,7 @@ TEST_F(DenseUniTensorTest, Conj_utuninit) { } /*=====test info===== -describe:test Trnaspose +describe:test Transpose ====================*/ TEST_F(DenseUniTensorTest, Transpose) { auto row_rank = 2u; @@ -4018,7 +4018,7 @@ TEST_F(DenseUniTensorTest, Transpose) { for (size_t i = 0; i < ut_t.rank(); i++) { EXPECT_EQ(ut_t.bonds()[i].type(), BD_REG); } - // a, b; c -> c;a, b + // a, b; c -> c; a, b EXPECT_EQ(ut.labels(), std::vector({"a", "b", "c"})); EXPECT_EQ(ut_t.labels(), std::vector({"c", "a", "b"})); EXPECT_EQ(ut.rowrank(), row_rank); @@ -4035,7 +4035,7 @@ TEST_F(DenseUniTensorTest, Transpose) { } /*=====test info===== -describe:test Trnaspose with diagonal UniTensor +describe:test Transpose with diagonal UniTensor ====================*/ TEST_F(DenseUniTensorTest, Transpose_diag) { auto row_rank = 1u; @@ -4052,7 +4052,7 @@ TEST_F(DenseUniTensorTest, Transpose_diag) { for (size_t i = 0; i < ut_t.rank(); i++) { EXPECT_EQ(ut_t.bonds()[i].type(), BD_REG); } - // a, b; c -> c;a, b + // a; b -> b; a EXPECT_EQ(ut_diag.labels(), std::vector({"a", "b"})); EXPECT_EQ(ut_t.labels(), std::vector({"b", "a"})); EXPECT_EQ(ut_diag.rowrank(), row_rank); @@ -4065,7 +4065,7 @@ TEST_F(DenseUniTensorTest, Transpose_diag) { } /*=====test info===== -describe:test Trnaspose_ +describe:test Transpose_ ====================*/ TEST_F(DenseUniTensorTest, Transpose_) { auto row_rank = 2u; @@ -4081,7 +4081,7 @@ TEST_F(DenseUniTensorTest, Transpose_) { } /*=====test info===== -describe:test Trnaspose with uninitialized UniTensor +describe:test Transpose with uninitialized UniTensor ====================*/ TEST_F(DenseUniTensorTest, Transpose_uninit) { EXPECT_ANY_THROW(ut_uninit.Transpose()); diff --git a/tests/gpu/linalg_test/GeSvd_test.cpp b/tests/gpu/linalg_test/GeSvd_test.cpp index b77d065a5..3e0fdbafc 100644 --- a/tests/gpu/linalg_test/GeSvd_test.cpp +++ b/tests/gpu/linalg_test/GeSvd_test.cpp @@ -381,16 +381,6 @@ namespace GesvdTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name) { // test data source file std::string src_file_name = src_data_root + case_name + ".cytnx"; diff --git a/tests/gpu/linalg_test/Svd_test.cpp b/tests/gpu/linalg_test/Svd_test.cpp index 139939fa8..a0f0f4f72 100644 --- a/tests/gpu/linalg_test/Svd_test.cpp +++ b/tests/gpu/linalg_test/Svd_test.cpp @@ -382,16 +382,6 @@ namespace SvdTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name) { // test data source file std::string src_file_name = src_data_root + case_name + ".cytnx"; diff --git a/tests/linalg_test/GeSvd_test.cpp b/tests/linalg_test/GeSvd_test.cpp index 1c4974734..4ee747ce9 100644 --- a/tests/linalg_test/GeSvd_test.cpp +++ b/tests/linalg_test/GeSvd_test.cpp @@ -366,16 +366,6 @@ namespace GesvdTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name) { // test data source file std::string src_file_name = src_data_root + case_name + ".cytnx"; diff --git a/tests/linalg_test/Gesvd_truncate_test.cpp b/tests/linalg_test/Gesvd_truncate_test.cpp index ab2998acd..494a65dc9 100644 --- a/tests/linalg_test/Gesvd_truncate_test.cpp +++ b/tests/linalg_test/Gesvd_truncate_test.cpp @@ -200,16 +200,6 @@ namespace GesvdTruncateTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name, const cytnx_uint64& keepdim) { // test data source file std::string src_file_name = src_data_root + case_name + ".cytnx"; diff --git a/tests/linalg_test/Rsvd_test.cpp b/tests/linalg_test/Rsvd_test.cpp index 735fac9ff..8df0798c7 100644 --- a/tests/linalg_test/Rsvd_test.cpp +++ b/tests/linalg_test/Rsvd_test.cpp @@ -181,16 +181,6 @@ namespace RsvdTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name, const cytnx_uint64& keepdim, const cytnx_uint64& power_iteration) { // test data source file diff --git a/tests/linalg_test/Rsvd_truncate_test.cpp b/tests/linalg_test/Rsvd_truncate_test.cpp index 7c2b1fdcc..741684629 100644 --- a/tests/linalg_test/Rsvd_truncate_test.cpp +++ b/tests/linalg_test/Rsvd_truncate_test.cpp @@ -181,16 +181,6 @@ namespace RsvdTruncateTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name, const cytnx_uint64& keepdim, const cytnx_uint64& power_iteration) { // test data source file diff --git a/tests/linalg_test/Svd_test.cpp b/tests/linalg_test/Svd_test.cpp index 7bdf16369..ee3d01ff2 100644 --- a/tests/linalg_test/Svd_test.cpp +++ b/tests/linalg_test/Svd_test.cpp @@ -367,16 +367,6 @@ namespace SvdTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name) { // test data source file std::string src_file_name = src_data_root + case_name + ".cytnx"; diff --git a/tests/linalg_test/Svd_truncate_test.cpp b/tests/linalg_test/Svd_truncate_test.cpp index 962fa926e..3484bbe0a 100644 --- a/tests/linalg_test/Svd_truncate_test.cpp +++ b/tests/linalg_test/Svd_truncate_test.cpp @@ -184,16 +184,6 @@ namespace SvdTruncateTest { return (relative_err < tol); } - // no use - void Check_UU_VV_Identity(const UniTensor& Tin, const std::vector& Tout) { - const UniTensor& U = Tout[1]; - const UniTensor& V = Tout[2]; - auto UD = U.Dagger(); - UD.set_labels({"0", "1", "9"}); - UD.permute_({2, 0, 1}, 1); - auto UUD = Contract(U, UD); - } - bool CheckResult(const std::string& case_name, const cytnx_uint64& keepdim) { // test data source file std::string src_file_name = src_data_root + case_name + ".cytnx"; From b0503a427f29a717d0db28bb7d76c5fb60f8990a Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Fri, 5 Dec 2025 01:37:01 +0800 Subject: [PATCH 04/13] fixed bug --- src/BlockUniTensor.cpp | 14 +++++++------- src/DenseUniTensor.cpp | 14 +++++++------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/BlockUniTensor.cpp b/src/BlockUniTensor.cpp index 64674926f..4257f7b9f 100644 --- a/src/BlockUniTensor.cpp +++ b/src/BlockUniTensor.cpp @@ -1214,19 +1214,19 @@ namespace cytnx { }; void BlockUniTensor::Transpose_() { - cytnx_int64 rank = this->bonds().size(); + const cytnx_int64 rank = this->bonds().size(); std::vector idxorder(rank); - cytnx_int64 rowrank = this->_rowrank; - for (cytnx_int64 i = 0; i < rowrank; i++) { + const cytnx_int64 oldrowrank = this->_rowrank; + this->_rowrank = rank - oldrowrank; + for (cytnx_int64 i = 0; i < this->_rowrank; i++) { this->bonds()[i].redirect_(); - idxorder[i] = i + rowrank; + idxorder[i] = i + oldrowrank; } - for (cytnx_int64 i = rowrank; i < rank; i++) { + for (cytnx_int64 i = this->_rowrank; i < rank; i++) { this->bonds()[i].redirect_(); - idxorder[i] = i - rowrank; + idxorder[i] = i - this->_rowrank; } this->permute_(idxorder); - this->_rowrank = rank - rowrank; }; void BlockUniTensor::normalize_() { diff --git a/src/DenseUniTensor.cpp b/src/DenseUniTensor.cpp index e2a51478d..a1cc60ff9 100644 --- a/src/DenseUniTensor.cpp +++ b/src/DenseUniTensor.cpp @@ -1190,19 +1190,19 @@ namespace cytnx { void DenseUniTensor::Transpose_() { if (this->is_tag()) { - cytnx_int64 rank = this->bonds().size(); + const cytnx_int64 rank = this->bonds().size(); std::vector idxorder(rank); - cytnx_int64 rowrank = this->_rowrank; - for (cytnx_int64 i = 0; i < rowrank; i++) { + const cytnx_int64 oldrowrank = this->_rowrank; + this->_rowrank = rank - oldrowrank; + for (cytnx_int64 i = 0; i < this->_rowrank; i++) { this->bonds()[i].redirect_(); - idxorder[i] = i + rowrank; + idxorder[i] = i + oldrowrank; } - for (cytnx_int64 i = rowrank; i < rank; i++) { + for (cytnx_int64 i = this->_rowrank; i < rank; i++) { this->bonds()[i].redirect_(); - idxorder[i] = i - rowrank; + idxorder[i] = i - this->_rowrank; } this->permute_(idxorder); - this->_rowrank = rank - rowrank; } else { std::vector new_permute = vec_concatenate(vec_range(this->rowrank(), this->rank()), From cf82d5fe982b7b688e016823ea61784500e0a339 Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Fri, 5 Dec 2025 10:20:17 +0800 Subject: [PATCH 05/13] added unit test to cover the bug --- tests/BlockUniTensor_test.cpp | 99 ++++++++++++++++++++--------------- 1 file changed, 57 insertions(+), 42 deletions(-) diff --git a/tests/BlockUniTensor_test.cpp b/tests/BlockUniTensor_test.cpp index c109c2495..5b5f01b37 100644 --- a/tests/BlockUniTensor_test.cpp +++ b/tests/BlockUniTensor_test.cpp @@ -104,9 +104,9 @@ TEST_F(BlockUniTensorTest, is_blockform) { } TEST_F(BlockUniTensorTest, clone) { UniTensor cloned = UT_pB_ans.clone(); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(cloned.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (cloned.at({i, j, k}).exists()) EXPECT_EQ(cloned.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -261,9 +261,9 @@ TEST_F(BlockUniTensorTest, permute1) { // rank-3 tensor std::vector a = {1, 2, 0}; auto permuted = UT_permute_1.permute(a, -1); - for (size_t i = 0; i < 10; i++) - for (size_t j = 0; j < 6; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 i = 0; i < 10; i++) + for (cytnx_int64 j = 0; j < 6; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({i, j, k}).exists(), UT_permute_ans1.at({i, j, k}).exists()); if (permuted.at({i, j, k}).exists()) EXPECT_EQ(double(permuted.at({i, j, k}).real()), @@ -275,8 +275,8 @@ TEST_F(BlockUniTensorTest, permute2) { std::vector a = {1, 0}; auto permuted = UT_permute_2.permute(a, -1); - for (size_t j = 0; j < 10; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 j = 0; j < 10; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({j, k}).exists(), UT_permute_ans2.at({j, k}).exists()); if (permuted.at({j, k}).exists()) EXPECT_EQ(double(permuted.at({j, k}).real()), double(UT_permute_ans2.at({j, k}).real())); @@ -288,9 +288,9 @@ TEST_F(BlockUniTensorTest, permute_1) { std::vector a = {1, 2, 0}; auto permuted = UT_permute_1.clone(); permuted.permute_(a, -1); - for (size_t i = 0; i < 10; i++) - for (size_t j = 0; j < 6; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 i = 0; i < 10; i++) + for (cytnx_int64 j = 0; j < 6; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({i, j, k}).exists(), UT_permute_ans1.at({i, j, k}).exists()); if (permuted.at({i, j, k}).exists()) EXPECT_EQ(double(permuted.at({i, j, k}).real()), @@ -302,8 +302,8 @@ TEST_F(BlockUniTensorTest, permute_2) { std::vector a = {1, 0}; auto permuted = UT_permute_2.clone(); permuted.permute_(a, -1); - for (size_t j = 0; j < 10; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 j = 0; j < 10; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({j, k}).exists(), UT_permute_ans2.at({j, k}).exists()); if (permuted.at({j, k}).exists()) EXPECT_EQ(double(permuted.at({j, k}).real()), double(UT_permute_ans2.at({j, k}).real())); @@ -424,9 +424,9 @@ TEST_F(BlockUniTensorTest, put_block_byidx) { UT_pB.put_block(t1a, 1); UT_pB.put_block(t1b, 2); UT_pB.put_block(t2, 3); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -449,9 +449,9 @@ TEST_F(BlockUniTensorTest, put_block__byidx) { UT_pB.put_block_(t1a, 1); UT_pB.put_block_(t1b, 2); UT_pB.put_block_(t2, 3); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -474,9 +474,9 @@ TEST_F(BlockUniTensorTest, put_block_byqnum) { UT_pB.put_block(t1a, {0, 1, 1}, true); UT_pB.put_block(t1b, {1, 0, 1}, true); UT_pB.put_block(t2, {1, 1, 2}, true); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -499,9 +499,9 @@ TEST_F(BlockUniTensorTest, put_block__byqnum) { UT_pB.put_block_(t1a, {0, 1, 1}, true); UT_pB.put_block_(t1b, {1, 0, 1}, true); UT_pB.put_block_(t2, {1, 1, 2}, true); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -768,7 +768,7 @@ TEST_F(BlockUniTensorTest, Norm) { cytnx_double tmp = double(UT_diag.Norm().at({0}).real()); cytnx_double ans = 0; - for (size_t i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag.bonds()[0]._impl->_degs[i]; for (int j = 0; j < deg; j++) ans += (i + 1) * (i + 1); } @@ -789,10 +789,10 @@ TEST_F(BlockUniTensorTest, Inv) { tmp.Inv_(clip); // test inline version EXPECT_TRUE(AreEqUniTensor(BUT4.Inv(clip), tmp)); tmp = BUT4.clone(); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 11; j++) - for (size_t k = 0; k < 3; k++) - for (size_t l = 0; l < 5; l++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { auto proxy = tmp.at({i, j, k, l}); if (proxy.exists()) { Scalar val = proxy; @@ -816,10 +816,10 @@ TEST_F(BlockUniTensorTest, Pow) { EXPECT_TRUE(AreEqUniTensor(BUT4.Pow(2.3), tmp)); for (double p = 0.; p < 1.6; p += 0.5) { tmp = BUT4.clone(); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 11; j++) - for (size_t k = 0; k < 3; k++) - for (size_t l = 0; l < 5; l++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { auto proxy = tmp.at({i, j, k, l}); if (proxy.exists()) { Scalar val = proxy; @@ -859,9 +859,9 @@ TEST_F(BlockUniTensorTest, Conj) { } tmp = UT_diag_cplx.Conj(); - for (size_t i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).real()), double(UT_diag_cplx.get_block_(i).at({j}).real())); EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).imag()), @@ -915,7 +915,7 @@ TEST_F(BlockUniTensorTest, Trace) { // std::cout<_degs[i]; for (int j = 0; j < deg; j++) ans += i + 1; } @@ -1002,10 +1002,25 @@ TEST_F(BlockUniTensorTest, Dagger) { } } + tmp = UT_pB.set_rowrank(2).Dagger().set_name("UT_pB.Dagger"); + EXPECT_EQ(tmp.rowrank(), 1); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 0; k < 30; k++) { + if (UT_pB.at({i, j, k}).exists()) { + EXPECT_DOUBLE_EQ(double(tmp.at({k, i, j}).real()), double(UT_pB.at({i, j, k}).real())); + } else { + EXPECT_FALSE(tmp.at({k, i, j}).exists()); + } + } + tmp = UT_diag_cplx.Dagger(); - for (size_t i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag_cplx.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).real()), double(UT_diag_cplx.get_block_(i).at({j}).real())); EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).imag()), @@ -1029,10 +1044,10 @@ TEST_F(BlockUniTensorTest, elem_exist) { 0); } - size_t offset = 0; - for (size_t i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { + cytnx_int64 offset = 0; + for (cytnx_int64 i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag_cplx.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_TRUE(UT_diag_cplx.elem_exists({offset + j, offset + j})); EXPECT_DOUBLE_EQ(double(UT_diag_cplx.at({offset + j, offset + j}).real()), double(i + 1)); EXPECT_DOUBLE_EQ(double(UT_diag_cplx.at({offset + j, offset + j}).imag()), double(i + 1)); From 557d6e1cd89b92d15e9c908fc3a246b671fb9e2e Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Fri, 5 Dec 2025 10:20:45 +0800 Subject: [PATCH 06/13] some cleaning of GPU-test --- tests/gpu/BlockUniTensor_test.cpp | 394 +++++++++++++++--------------- 1 file changed, 200 insertions(+), 194 deletions(-) diff --git a/tests/gpu/BlockUniTensor_test.cpp b/tests/gpu/BlockUniTensor_test.cpp index 2d42a592a..45dda38e0 100644 --- a/tests/gpu/BlockUniTensor_test.cpp +++ b/tests/gpu/BlockUniTensor_test.cpp @@ -6,8 +6,8 @@ TEST_F(BlockUniTensorTest, gpu_Trace) { auto tmp = BUT4.Trace(0, 3); // std::cout<_degs[i]; for (int j = 0; j < deg; j++) ans += i + 1; } @@ -76,22 +76,22 @@ TEST_F(BlockUniTensorTest, gpu_relabels_) { TEST_F(BlockUniTensorTest, gpu_relabel) { auto tmp = BUT1.clone(); - BUT1 = BUT1.relabels({"a", "b", "cd", "d"}); + BUT1 = BUT1.relabel({"a", "b", "cd", "d"}); EXPECT_EQ(BUT1.labels()[0], "a"); EXPECT_EQ(BUT1.labels()[1], "b"); EXPECT_EQ(BUT1.labels()[2], "cd"); EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1 = BUT1.relabels({"1", "-1", "2", "1000"}); + BUT1 = BUT1.relabel({"1", "-1", "2", "1000"}); EXPECT_EQ(BUT1.labels()[0], "1"); EXPECT_EQ(BUT1.labels()[1], "-1"); EXPECT_EQ(BUT1.labels()[2], "2"); EXPECT_EQ(BUT1.labels()[3], "1000"); - EXPECT_THROW(BUT1.relabels({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabels({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabel({"a", "b", "c", "d", "e"}), std::logic_error); BUT1 = tmp; BUT1 = BUT1.relabel("0", "a"); @@ -129,21 +129,21 @@ TEST_F(BlockUniTensorTest, gpu_relabel) { } TEST_F(BlockUniTensorTest, gpu_relabel_) { auto tmp = BUT1.clone(); - BUT1.relabels_({"a", "b", "cd", "d"}); + BUT1.relabel_({"a", "b", "cd", "d"}); EXPECT_EQ(BUT1.labels()[0], "a"); EXPECT_EQ(BUT1.labels()[1], "b"); EXPECT_EQ(BUT1.labels()[2], "cd"); EXPECT_EQ(BUT1.labels()[3], "d"); - BUT1.relabels_({"1", "-1", "2", "1000"}); + BUT1.relabel_({"1", "-1", "2", "1000"}); EXPECT_EQ(BUT1.labels()[0], "1"); EXPECT_EQ(BUT1.labels()[1], "-1"); EXPECT_EQ(BUT1.labels()[2], "2"); EXPECT_EQ(BUT1.labels()[3], "1000"); - EXPECT_THROW(BUT1.relabels_({"a", "a", "b", "c"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"1", "1", "0", "-1"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"a"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"1", "2"}), std::logic_error); - EXPECT_THROW(BUT1.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a", "a", "b", "c"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"1", "1", "0", "-1"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"1", "2"}), std::logic_error); + EXPECT_THROW(BUT1.relabel_({"a", "b", "c", "d", "e"}), std::logic_error); BUT1 = tmp; BUT1.relabel_("0", "a"); @@ -186,7 +186,7 @@ TEST_F(BlockUniTensorTest, gpu_Norm) { cytnx_double tmp = double(UT_diag.Norm().at({0}).real()); cytnx_double ans = 0; - for (size_t i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag.bonds()[0]._impl->_degs[i]; for (int j = 0; j < deg; j++) ans += (i + 1) * (i + 1); } @@ -228,35 +228,35 @@ TEST_F(BlockUniTensorTest, gpu_Pow) { TEST_F(BlockUniTensorTest, gpu_Conj) { auto tmp = BUT4.Conj(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + // EXPECT_TRUE(Scalar(tmp.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); + EXPECT_DOUBLE_EQ(double(tmp.at({i, j, k, l}).real()), + double(BUT4.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({i, j, k, l}).imag()), + -double(BUT4.at({i, j, k, l}).imag())); } tmp = BUT4.clone(); BUT4.Conj_(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(BUT4.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + // EXPECT_TRUE(Scalar(BUT4.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(tmp.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + -double(tmp.at({i, j, k, l}).imag())); } tmp = UT_diag_cplx.Conj(); - for (size_t i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).real()), double(UT_diag_cplx.get_block_(i).at({j}).real())); EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).imag()), @@ -267,28 +267,28 @@ TEST_F(BlockUniTensorTest, gpu_Conj) { TEST_F(BlockUniTensorTest, gpu_Transpose) { auto tmp = BUT1.Transpose(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[2].type(), BD_IN); - EXPECT_EQ(tmp.bonds()[3].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[3].type(), BD_OUT); tmp = BUT5.Transpose(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_BRA); - EXPECT_EQ(tmp.bonds()[1].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[0].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[1].type(), BD_BRA); EXPECT_EQ(tmp.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(tmp.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); BUT1.Transpose_(); - EXPECT_EQ(BUT1.bonds()[0].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[1].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[2].type(), BD_IN); - EXPECT_EQ(BUT1.bonds()[3].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[0].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[1].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[2].type(), BD_OUT); + EXPECT_EQ(BUT1.bonds()[3].type(), BD_OUT); BUT5.Transpose_(); - EXPECT_EQ(BUT5.bonds()[0].type(), BD_BRA); - EXPECT_EQ(BUT5.bonds()[1].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[0].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[1].type(), BD_BRA); EXPECT_EQ(BUT5.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(BUT5.bonds()[1].qnums(), @@ -297,63 +297,70 @@ TEST_F(BlockUniTensorTest, gpu_Transpose) { TEST_F(BlockUniTensorTest, gpu_Dagger) { auto tmp = BUT1.Dagger(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[2].type(), BD_IN); - EXPECT_EQ(tmp.bonds()[3].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[3].type(), BD_OUT); tmp = BUT5.Dagger(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_BRA); - EXPECT_EQ(tmp.bonds()[1].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[0].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[1].type(), BD_BRA); EXPECT_EQ(tmp.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(tmp.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); BUT1.Dagger_(); - EXPECT_EQ(BUT1.bonds()[0].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[1].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[2].type(), BD_IN); - EXPECT_EQ(BUT1.bonds()[3].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[0].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[1].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[2].type(), BD_OUT); + EXPECT_EQ(BUT1.bonds()[3].type(), BD_OUT); BUT5.Dagger_(); - EXPECT_EQ(BUT5.bonds()[0].type(), BD_BRA); - EXPECT_EQ(BUT5.bonds()[1].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[0].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[1].type(), BD_BRA); EXPECT_EQ(BUT5.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(BUT5.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); tmp = BUT4.Dagger(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { + if (BUT4.at({i, j, k, l}).exists()) { + // EXPECT_TRUE(Scalar(tmp.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); + EXPECT_DOUBLE_EQ(double(tmp.at({k, l, i, j}).real()), + double(BUT4.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({k, l, i, j}).imag()), + -double(BUT4.at({i, j, k, l}).imag())); + } else { + EXPECT_FALSE(tmp.at({k, l, i, j}).exists()); } + } + tmp = BUT4.clone(); - BUT4.Dagger_(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(BUT4.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + tmp.Dagger_(); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { + if (BUT4.at({i, j, k, l}).exists()) { + // EXPECT_TRUE(Scalar(BUT4.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(tmp.at({k, l, i, j}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + -double(tmp.at({k, l, i, j}).imag())); + } else { + EXPECT_FALSE(tmp.at({k, l, i, j}).exists()); } + } tmp = UT_diag_cplx.Dagger(); - for (size_t i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag_cplx.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).real()), double(UT_diag_cplx.get_block_(i).at({j}).real())); EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).imag()), @@ -389,25 +396,24 @@ TEST_F(BlockUniTensorTest, gpu_truncate) { } TEST_F(BlockUniTensorTest, gpu_elem_exist) { - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.elem_exists({i - 1, j - 1, k - 1, l - 1})) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.elem_exists({i, j, k, l})) { cytnx_int64 _a; std::vector _b; - ((BlockUniTensor*)BUT4._impl.get()) - ->_fx_locate_elem(_a, _b, {i - 1, j - 1, k - 1, l - 1}); + ((BlockUniTensor*)BUT4._impl.get())->_fx_locate_elem(_a, _b, {i, j, k, l}); std::vector qind = BUT4.get_qindices(_a); EXPECT_EQ(BUT4.bonds()[0].qnums()[qind[0]][0] - BUT4.bonds()[1].qnums()[qind[1]][0] + BUT4.bonds()[2].qnums()[qind[2]][0] - BUT4.bonds()[3].qnums()[qind[3]][0], 0); } - size_t offset = 0; - for (size_t i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { + cytnx_int64 offset = 0; + for (cytnx_int64 i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag_cplx.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_TRUE(UT_diag_cplx.elem_exists({offset + j, offset + j})); EXPECT_DOUBLE_EQ(double(UT_diag_cplx.at({offset + j, offset + j}).real()), double(i + 1)); EXPECT_DOUBLE_EQ(double(UT_diag_cplx.at({offset + j, offset + j}).imag()), double(i + 1)); @@ -615,9 +621,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block__byidx) { UT_pB.put_block_(t1a, 1); UT_pB.put_block_(t1b, 2); UT_pB.put_block_(t2, 3); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -640,9 +646,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block_byidx) { UT_pB.put_block(t1a, 1); UT_pB.put_block(t1b, 2); UT_pB.put_block(t2, 3); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -665,9 +671,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block__byqnum) { UT_pB.put_block_(t1a, {0, 1, 1}, true); UT_pB.put_block_(t1b, {1, 0, 1}, true); UT_pB.put_block_(t2, {1, 1, 2}, true); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -690,9 +696,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block_byqnum) { UT_pB.put_block(t1a, {0, 1, 1}, true); UT_pB.put_block(t1b, {1, 0, 1}, true); UT_pB.put_block(t2, {1, 1, 2}, true); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -712,9 +718,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block_byqnum) { TEST_F(BlockUniTensorTest, gpu_clone) { UniTensor cloned = UT_pB_ans.clone(); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(cloned.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (cloned.at({i, j, k}).exists()) EXPECT_EQ(cloned.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -724,9 +730,9 @@ TEST_F(BlockUniTensorTest, gpu_permute1) { // rank-3 tensor std::vector a = {1, 2, 0}; auto permuted = UT_permute_1.permute(a, -1); - for (size_t i = 0; i < 10; i++) - for (size_t j = 0; j < 6; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 i = 0; i < 10; i++) + for (cytnx_int64 j = 0; j < 6; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({i, j, k}).exists(), UT_permute_ans1.at({i, j, k}).exists()); if (permuted.at({i, j, k}).exists()) EXPECT_EQ(double(permuted.at({i, j, k}).real()), @@ -738,8 +744,8 @@ TEST_F(BlockUniTensorTest, gpu_permute2) { std::vector a = {1, 0}; auto permuted = UT_permute_2.permute(a, -1); - for (size_t j = 0; j < 10; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 j = 0; j < 10; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({j, k}).exists(), UT_permute_ans2.at({j, k}).exists()); if (permuted.at({j, k}).exists()) EXPECT_EQ(double(permuted.at({j, k}).real()), double(UT_permute_ans2.at({j, k}).real())); @@ -751,9 +757,9 @@ TEST_F(BlockUniTensorTest, gpu_permute_1) { std::vector a = {1, 2, 0}; auto permuted = UT_permute_1.clone(); permuted.permute_(a, -1); - for (size_t i = 0; i < 10; i++) - for (size_t j = 0; j < 6; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 i = 0; i < 10; i++) + for (cytnx_int64 j = 0; j < 6; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({i, j, k}).exists(), UT_permute_ans1.at({i, j, k}).exists()); if (permuted.at({i, j, k}).exists()) EXPECT_EQ(double(permuted.at({i, j, k}).real()), @@ -765,8 +771,8 @@ TEST_F(BlockUniTensorTest, gpu_permute_2) { std::vector a = {1, 0}; auto permuted = UT_permute_2.clone(); permuted.permute_(a, -1); - for (size_t j = 0; j < 10; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 j = 0; j < 10; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({j, k}).exists(), UT_permute_ans2.at({j, k}).exists()); if (permuted.at({j, k}).exists()) EXPECT_EQ(double(permuted.at({j, k}).real()), double(UT_permute_ans2.at({j, k}).real())); @@ -832,26 +838,26 @@ TEST_F(BlockUniTensorTest, gpu_Add) { // } BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx").to(cytnx::Device.cuda); auto out2 = BUT4.Add(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out2.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out2.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).real()), + double(BUTpT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).imag()), + double(BUTpT2.at({i, j, k, l}).imag())); } BUT4.Add_(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTpT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTpT2.at({i, j, k, l}).imag())); } } @@ -878,76 +884,76 @@ TEST_F(BlockUniTensorTest, gpu_Sub) { // } BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx").to(cytnx::Device.cuda); auto out2 = BUT4.Sub(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out2.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out2.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).real()), + double(BUTsT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).imag()), + double(BUTsT2.at({i, j, k, l}).imag())); } BUT4.Sub_(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTsT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTsT2.at({i, j, k, l}).imag())); } } TEST_F(BlockUniTensorTest, gpu_Mul) { auto out = BUT4.Mul(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).real()), + double(BUTm9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).imag()), + double(BUTm9.at({i, j, k, l}).imag())); } BUT4.Mul_(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTm9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTm9.at({i, j, k, l}).imag())); } } TEST_F(BlockUniTensorTest, gpu_Div) { auto out = BUT4.Div(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).real()), + double(BUTd9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).imag()), + double(BUTd9.at({i, j, k, l}).imag())); } BUT4.Div_(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTd9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTd9.at({i, j, k, l}).imag())); } // BUT4 = BUT4.Load("OriginalBUT.cytnx"); From 016deea46daf26a4f0672852c8b9dd2d26d2e4e9 Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Fri, 5 Dec 2025 12:59:49 +0800 Subject: [PATCH 07/13] fixed DMRG for BlockUniTensor to account for the new index order after Dagger --- example/DMRG/dmrg_two_sites_U1.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/example/DMRG/dmrg_two_sites_U1.py b/example/DMRG/dmrg_two_sites_U1.py index 2a4e5fbc1..005bf895a 100644 --- a/example/DMRG/dmrg_two_sites_U1.py +++ b/example/DMRG/dmrg_two_sites_U1.py @@ -111,7 +111,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): anet.FromString(["L: -2,-1,-3",\ "A: -1,-4,1",\ "M: -2,0,-4,-5",\ - "A_Conj: -3,-5,2",\ + "A_Conj: 2,-3,-5",\ "TOUT: 0;1,2"]) for p in range(Nsites - 1): anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Dagger(),M]) @@ -143,7 +143,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): anet.FromString(["R: -2,-1,-3",\ "B: 1,-4,-1",\ "M: 0,-2,-4,-5",\ - "B_Conj: 2,-5,-3",\ + "B_Conj: -3,2,-5",\ "TOUT: 0;1,2"]) anet.PutUniTensors(["R","B","M","B_Conj"],[LR[p+2],A[p+1],M,A[p+1].Dagger()]) LR[p+1] = anet.Launch() @@ -177,7 +177,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): anet.FromString(["L: -2,-1,-3",\ "A: -1,-4,1",\ "M: -2,0,-4,-5",\ - "A_Conj: -3,-5,2",\ + "A_Conj: 2,-3,-5",\ "TOUT: 0;1,2"]) anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Dagger(),M]) LR[p+1] = anet.Launch() From 033ece386e4c93a6e23e9b8264631917adfe9e58 Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Fri, 5 Dec 2025 15:07:45 +0800 Subject: [PATCH 08/13] replaced A.Dagger() by Dagger().permute_(A.labels()) this achieves that the output of the Dagger is in the same form as the original tensor --- example/DMRG/dmrg_two_sites_U1.py | 15 +++++++++------ example/DMRG/dmrg_two_sites_dense.py | 9 ++++++--- example/TDVP/tdvp1_dense.py | 18 ++++++++++++------ 3 files changed, 27 insertions(+), 15 deletions(-) diff --git a/example/DMRG/dmrg_two_sites_U1.py b/example/DMRG/dmrg_two_sites_U1.py index 005bf895a..87d0e4ede 100644 --- a/example/DMRG/dmrg_two_sites_U1.py +++ b/example/DMRG/dmrg_two_sites_U1.py @@ -111,10 +111,11 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): anet.FromString(["L: -2,-1,-3",\ "A: -1,-4,1",\ "M: -2,0,-4,-5",\ - "A_Conj: 2,-3,-5",\ + "A_Conj: -3,-5,2",\ "TOUT: 0;1,2"]) for p in range(Nsites - 1): - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Dagger(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() Ekeep = [] @@ -143,9 +144,10 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): anet.FromString(["R: -2,-1,-3",\ "B: 1,-4,-1",\ "M: 0,-2,-4,-5",\ - "B_Conj: -3,2,-5",\ + "B_Conj: 2,-3,-5",\ "TOUT: 0;1,2"]) - anet.PutUniTensors(["R","B","M","B_Conj"],[LR[p+2],A[p+1],M,A[p+1].Dagger()]) + anet.PutUniTensors(["R","B","M","B_Conj"], \ + [LR[p+2],A[p+1],M,A[p+1].Dagger().permute_(A[p+1].labels())]) LR[p+1] = anet.Launch() print('Sweep[r->l]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) @@ -177,9 +179,10 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): anet.FromString(["L: -2,-1,-3",\ "A: -1,-4,1",\ "M: -2,0,-4,-5",\ - "A_Conj: 2,-3,-5",\ + "A_Conj: -3,-5,2",\ "TOUT: 0;1,2"]) - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Dagger(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() print('Sweep[l->r]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) diff --git a/example/DMRG/dmrg_two_sites_dense.py b/example/DMRG/dmrg_two_sites_dense.py index 72a6775e9..7a0cb6e07 100644 --- a/example/DMRG/dmrg_two_sites_dense.py +++ b/example/DMRG/dmrg_two_sites_dense.py @@ -92,7 +92,8 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "A_Conj: -3,-5,2",\ "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Conj(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() # Recover the original MPS labels @@ -131,7 +132,8 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "B_Conj: 2,-5,-3",\ "TOUT: 0;1,2"]) # or you can do: anet = cytnx.Network("R_AMAH.net") - anet.PutUniTensors(["R","B","M","B_Conj"],[LR[p+2],A[p+1],M,A[p+1].Conj()]) + anet.PutUniTensors(["R","B","M","B_Conj"], \ + [LR[p+2],A[p+1],M,A[p+1].Dagger().permute_(A[p+1].labels())]) LR[p+1] = anet.Launch() print('Sweep[r->l]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) @@ -167,7 +169,8 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Conj(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() print('Sweep[l->r]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) diff --git a/example/TDVP/tdvp1_dense.py b/example/TDVP/tdvp1_dense.py index 7a55e3968..846ce9ee3 100644 --- a/example/TDVP/tdvp1_dense.py +++ b/example/TDVP/tdvp1_dense.py @@ -80,7 +80,8 @@ def get_energy(A, M): "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") for p in range(0, N): - anet.PutUniTensors(["L","A","A_Conj","M"],[L,A[p],A[p].Conj(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [L,A[p],A[p].Dagger().permute_(A[p].labels()),M]) L = anet.Launch() E = cytnx.Contract(L, R0).item() print('energy:', E) @@ -138,7 +139,8 @@ def get_energy(A, M): "A_Conj: -3,-5,2",\ "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Conj(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() # Recover the original MPS labels @@ -175,7 +177,8 @@ def get_energy(A, M): "B_Conj: 2,-5,-3",\ "TOUT: ;0,1,2"]) # or you can do: anet = cytnx.Network("R_AMAH.net") - anet.PutUniTensors(["R","B","M","B_Conj"],[LR[p+1],A[p],M,A[p].Conj()]) + anet.PutUniTensors(["R","B","M","B_Conj"], \ + [LR[p+1],A[p],M,A[p].Dagger().permute_(A[p].labels())]) old_LR = LR[p].clone() if p != 0: LR[p] = anet.Launch() @@ -215,7 +218,8 @@ def get_energy(A, M): "A_Conj: -3,-5,2",\ "TOUT: 0,1,2"]) - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Conj(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) old_LR = LR[p+1].clone() @@ -246,7 +250,8 @@ def Local_meas(A, B, Op, site): "TOUT: 2;4"]) for i in range(0, N): if i != site: - anet.PutUniTensors(["l","A","B"],[l,A[i],B[i].Conj()]) + anet.PutUniTensors(["l","A","B"], \ + [l,A[i],B[i].Dagger().permute_(B[i].labels())]) l = anet.Launch() else: tmp = A[i].relabel(1, "_aux_up") @@ -254,7 +259,8 @@ def Local_meas(A, B, Op, site): tmp = cytnx.Contract(tmp, Op) tmp.relabel_("_aux_low", A[i].labels()[1]) tmp.permute_(A[i].labels()) - anet.PutUniTensors(["l","A","B"],[l,tmp,B[i].Conj()]) + anet.PutUniTensors(["l","A","B"], \ + [l,tmp,B[i].Dagger().permute_(B[i].labels())]) l = anet.Launch() return l.reshape(1).item() From cd99f8ff9ce606d5eaf53731c31fbf405b68226d Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Mon, 23 Feb 2026 15:06:16 +0900 Subject: [PATCH 09/13] added test for tagged DenseUniTensor --- tests/DenseUniTensor_test.cpp | 37 +++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/tests/DenseUniTensor_test.cpp b/tests/DenseUniTensor_test.cpp index e951acf40..d92e54568 100644 --- a/tests/DenseUniTensor_test.cpp +++ b/tests/DenseUniTensor_test.cpp @@ -4064,6 +4064,43 @@ TEST_F(DenseUniTensorTest, Transpose_diag) { EXPECT_TRUE(AreEqUniTensor(ut_diag, clone)); } +/*=====test info===== +describe:test Transpose with tagged UniTensor +====================*/ +TEST_F(DenseUniTensorTest, Transpose_tagged) { + auto Spcd_t = Spcd.Transpose(); + // test tag, rowrank, rank + EXPECT_TRUE(Spcd_t.is_tag()); + EXPECT_EQ(Spcd.rowrank(), 1); + EXPECT_EQ(Spcd_t.rowrank(), 2); + EXPECT_EQ(Spcd_t.rank(), 3); + // test bond types + std::vector bonds_t = Spcd_t.bonds(); + EXPECT_EQ(bonds_t[0].type(), BD_IN); + EXPECT_EQ(bonds_t[1].type(), BD_OUT); + EXPECT_EQ(bonds_t[2].type(), BD_OUT); + // test labels + std::vector labels = Spcd.labels(); + std::vector labels_t = Spcd_t.labels(); + EXPECT_EQ(labels_t[0], labels[1]); + EXPECT_EQ(labels_t[1], labels[2]); + EXPECT_EQ(labels_t[2], labels[0]); + // test shape + auto shape = Spcd.shape(); + auto shape_t = Spcd_t.shape(); + EXPECT_EQ(shape_t[0], shape[1]); + EXPECT_EQ(shape_t[1], shape[2]); + EXPECT_EQ(shape_t[2], shape[0]); + // test tensors + EXPECT_TRUE(AreEqUniTensor(Spcd_t.Transpose(), Spcd)); + auto Spcd_p = Spcd_t.permute(Spcd.labels()); + std::vector bonds_p = Spcd_p.bonds(); + for (auto bond : bonds_p) { + bond.redirect_(); + } + EXPECT_TRUE(AreEqUniTensor(Spcd_p, Spcd)); +} + /*=====test info===== describe:test Transpose_ ====================*/ From ba011f8878929f07ce3672fd4b9e2bbd5dcfd610 Mon Sep 17 00:00:00 2001 From: Manuel Schneider Date: Mon, 23 Feb 2026 16:38:49 +0900 Subject: [PATCH 10/13] dmrg_two_sites: fixed bug in network, added labels to UniTensors --- example/DMRG/dmrg_two_sites_U1.py | 19 +++++++++++++++-- example/DMRG/dmrg_two_sites_dense.py | 32 +++++++++++++++++++++++----- 2 files changed, 44 insertions(+), 7 deletions(-) diff --git a/example/DMRG/dmrg_two_sites_U1.py b/example/DMRG/dmrg_two_sites_U1.py index 87d0e4ede..c801d1116 100644 --- a/example/DMRG/dmrg_two_sites_U1.py +++ b/example/DMRG/dmrg_two_sites_U1.py @@ -41,7 +41,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): return psivec, energy[0].item() - ## Initialiaze MPO + ## Initialize MPO ##>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> d = 2 s = 0.5 @@ -49,6 +49,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): bd_phys = cytnx.Bond(cytnx.BD_KET,[[1],[-1]],[1,1]) M = cytnx.UniTensor([bd_inner,bd_inner.redirect(),bd_phys, bd_phys.redirect()],rowrank=2) + M.set_name("MPO") # I M.set_elem([0,0,0,0],1); @@ -69,7 +70,9 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): VbdL = cytnx.Bond(cytnx.BD_KET,[[0]],[1]) VbdR = cytnx.Bond(cytnx.BD_KET,[[q]],[1]) L0 = cytnx.UniTensor([bd_inner.redirect(),VbdL.redirect(),VbdL],rowrank=1) #Left boundary + L0.set_name("L0") R0 = cytnx.UniTensor([bd_inner,VbdR,VbdR.redirect()],rowrank=1) #Right boundary + R0.set_name("R0") L0.set_elem([0,0,0],1) R0.set_elem([3,0,0],1) @@ -83,6 +86,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[0] = cytnx.UniTensor([VbdL,bd_phys.redirect(),cytnx.Bond(cytnx.BD_BRA,[[qcntr]],[1])],rowrank=2) A[0].get_block_()[0] = 1 + A[0].set_name("A0") lbls = [] lbls.append(["0","1","2"]) # store the labels for later convinience. @@ -96,6 +100,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): B3 = cytnx.Bond(cytnx.BD_BRA,[[qcntr]],[1]) A[k] = cytnx.UniTensor([B1,B2,B3],rowrank=2) + A[k].set_name(f"A{k}") lbl = [str(2*k),str(2*k+1),str(2*k+2)] A[k].set_labels(lbl) @@ -117,6 +122,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): anet.PutUniTensors(["L","A","A_Conj","M"], \ [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") Ekeep = [] for k in range(1, numsweeps+1): @@ -139,16 +145,20 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[p] = cytnx.Contract(A[p],s) # absorb s into next neighbor A[p].relabels_(lbls[p]); # set the label back to be consistent + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") + # update LR from right to left: anet = cytnx.Network() anet.FromString(["R: -2,-1,-3",\ "B: 1,-4,-1",\ "M: 0,-2,-4,-5",\ - "B_Conj: 2,-3,-5",\ + "B_Conj: 2,-5,-3",\ "TOUT: 0;1,2"]) anet.PutUniTensors(["R","B","M","B_Conj"], \ [LR[p+2],A[p+1],M,A[p+1].Dagger().permute_(A[p+1].labels())]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") print('Sweep[r->l]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) @@ -174,6 +184,9 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[p+1] = cytnx.Contract(s,A[p+1]) ## absorb s into next neighbor. A[p+1].relabels_(lbls[p+1]); #set the label back to be consistent + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") + # update LR from left to right: anet = cytnx.Network() anet.FromString(["L: -2,-1,-3",\ @@ -184,12 +197,14 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): anet.PutUniTensors(["L","A","A_Conj","M"], \ [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") print('Sweep[l->r]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) A[-1].set_rowrank_(2) _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. A[-1].relabels_(lbls[-1]); #set the label back to be consistent + A[-1].set_name(f"A{Nsites-1}") return Ekeep diff --git a/example/DMRG/dmrg_two_sites_dense.py b/example/DMRG/dmrg_two_sites_dense.py index 7a0cb6e07..9dd8e5abb 100644 --- a/example/DMRG/dmrg_two_sites_dense.py +++ b/example/DMRG/dmrg_two_sites_dense.py @@ -53,25 +53,32 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): M[0,1] = M[2,3] = 2**0.5*sp.real() M[0,2] = M[1,3] = 2**0.5*sm.real() M = cytnx.UniTensor(M,0) + M.set_name("MPO") L0 = cytnx.UniTensor(cytnx.zeros([4,1,1]), rowrank = 0) #Left boundary R0 = cytnx.UniTensor(cytnx.zeros([4,1,1]), rowrank = 0) #Right boundary - L0[0,0,0] = 1.; R0[3,0,0] = 1. + L0.set_name("L0") + R0.set_name("R0") + L0[0,0,0] = 1. + R0[3,0,0] = 1. - lbls = [] # List for storing the MPS labels A = [None for i in range(Nsites)] A[0] = cytnx.UniTensor(cytnx.random.normal([1, d, min(chi, d)], 0., 1.), rowrank = 2) A[0].relabels_(["0","1","2"]) - lbls.append(["0","1","2"]) # store the labels for later convinience. + A[0].set_name("A0") + + lbls = [] # List for storing the MPS labels + lbls.append(["0","1","2"]) # store the labels for later convenience. for k in range(1,Nsites): dim1 = A[k-1].shape()[2]; dim2 = d dim3 = min(min(chi, A[k-1].shape()[2] * d), d ** (Nsites - k - 1)) A[k] = cytnx.UniTensor(cytnx.random.normal([dim1, dim2, dim3],0.,1.), rowrank = 2) + A[k].set_name(f"A{k}") lbl = [str(2*k),str(2*k+1),str(2*k+2)] A[k].relabels_(lbl) - lbls.append(lbl) # store the labels for later convinience. + lbls.append(lbl) # store the labels for later convenience. LR = [None for i in range(Nsites+1)] LR[0] = L0 @@ -84,7 +91,10 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): s, A[p] ,vt = cytnx.linalg.Gesvd(A[p]) A[p+1] = cytnx.Contract(cytnx.Contract(s,vt),A[p+1]) - ## Calculate enviroments: + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") + + ## Calculate environments: anet = cytnx.Network() anet.FromString(["L: -2,-1,-3",\ "A: -1,-4,1",\ @@ -95,6 +105,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): anet.PutUniTensors(["L","A","A_Conj","M"], \ [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") # Recover the original MPS labels A[p].relabels_(lbls[p]) @@ -102,6 +113,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. A[-1].relabels_(lbls[-1]) # Recover the original MPS labels + A[-1].set_name(f"A{Nsites-1}") Ekeep = [] for k in range(1, numsweeps+1): @@ -124,6 +136,9 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[p] = cytnx.Contract(A[p],s) # absorb s into next neighbor A[p].relabels_(lbls[p]); # set the label back to be consistent + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") + # update LR from right to left: anet = cytnx.Network() anet.FromString(["R: -2,-1,-3",\ @@ -135,12 +150,14 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): anet.PutUniTensors(["R","B","M","B_Conj"], \ [LR[p+2],A[p+1],M,A[p+1].Dagger().permute_(A[p+1].labels())]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") print('Sweep[r->l]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) A[0].set_rowrank_(1) _,A[0] = cytnx.linalg.Gesvd(A[0],is_U=False, is_vT=True) A[0].relabels_(lbls[0]); #set the label back to be consistent + A[0].set_name("A0") for p in range(Nsites-1): dim_l = A[p].shape()[0] @@ -160,6 +177,9 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[p+1] = cytnx.Contract(s,A[p+1]) ## absorb s into next neighbor. A[p+1].relabels_(lbls[p+1]); #set the label back to be consistent + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") + # update LR from left to right: anet = cytnx.Network() anet.FromString(["L: -2,-1,-3",\ @@ -172,12 +192,14 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): anet.PutUniTensors(["L","A","A_Conj","M"], \ [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") print('Sweep[l->r]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) A[-1].set_rowrank_(2) _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. A[-1].relabels_(lbls[-1]); #set the label back to be consistent + A[-1].set_name(f"A{Nsites-1}") return Ekeep if __name__ == '__main__': From 026b321c5915f2b5a4272fa4ab1477aa89bd11da Mon Sep 17 00:00:00 2001 From: Pochung Chen Date: Sat, 4 Apr 2026 00:48:15 +0800 Subject: [PATCH 11/13] fix issues from PR #725 review: tests, docs, and example comments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - tests/BlockUniTensor_test.cpp: fix loop bounds j<5 → j<11 for BUT4 second bond dim (Add, Mul, Sub, Div, Conj, Trace, Dagger, elem_exist) - tests/gpu/BlockUniTensor_test.cpp: fix gpu_Trace 0-indexed loop body using {j-1,k-1} → {j,k} - tests/BlockFermionicUniTensor_test.cpp: add Transpose test covering rowrank update, bond redirection, element preservation, and involution - tests/DenseUniTensor_test.cpp: remove dead bond redirect loop in Transpose_tagged (modified local copies, had no effect) - include/UniTensor.hpp: fix typo "onces" → "ones" in Transpose docstring - example/DMRG, example/TDVP: add comments explaining Dagger().permute_() pattern needed after PR #725 index-order change Co-Authored-By: Claude Sonnet 4.6 --- example/DMRG/dmrg_two_sites_U1.py | 5 +-- example/DMRG/dmrg_two_sites_dense.py | 5 +-- example/TDVP/tdvp1_dense.py | 11 +++--- include/UniTensor.hpp | 2 +- tests/BlockFermionicUniTensor_test.cpp | 48 ++++++++++++++++++++++++++ tests/BlockUniTensor_test.cpp | 28 +++++++-------- tests/DenseUniTensor_test.cpp | 4 --- tests/gpu/BlockUniTensor_test.cpp | 12 +++---- 8 files changed, 81 insertions(+), 34 deletions(-) diff --git a/example/DMRG/dmrg_two_sites_U1.py b/example/DMRG/dmrg_two_sites_U1.py index c801d1116..806231699 100644 --- a/example/DMRG/dmrg_two_sites_U1.py +++ b/example/DMRG/dmrg_two_sites_U1.py @@ -119,6 +119,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "A_Conj: -3,-5,2",\ "TOUT: 0;1,2"]) for p in range(Nsites - 1): + # Dagger() swaps left/right index order; permute_ restores original label order anet.PutUniTensors(["L","A","A_Conj","M"], \ [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() @@ -156,7 +157,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "B_Conj: 2,-5,-3",\ "TOUT: 0;1,2"]) anet.PutUniTensors(["R","B","M","B_Conj"], \ - [LR[p+2],A[p+1],M,A[p+1].Dagger().permute_(A[p+1].labels())]) + [LR[p+2],A[p+1],M,A[p+1].Dagger().permute_(A[p+1].labels())]) # Dagger() swaps index order; permute_ restores it LR[p+1] = anet.Launch() LR[p+1].set_name(f"LR{p+1}") @@ -195,7 +196,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "A_Conj: -3,-5,2",\ "TOUT: 0;1,2"]) anet.PutUniTensors(["L","A","A_Conj","M"], \ - [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) # Dagger() swaps index order; permute_ restores it LR[p+1] = anet.Launch() LR[p+1].set_name(f"LR{p+1}") diff --git a/example/DMRG/dmrg_two_sites_dense.py b/example/DMRG/dmrg_two_sites_dense.py index 9dd8e5abb..6c48ca6c0 100644 --- a/example/DMRG/dmrg_two_sites_dense.py +++ b/example/DMRG/dmrg_two_sites_dense.py @@ -102,6 +102,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "A_Conj: -3,-5,2",\ "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") + # Dagger() swaps left/right index order; permute_ restores original label order anet.PutUniTensors(["L","A","A_Conj","M"], \ [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() @@ -148,7 +149,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "TOUT: 0;1,2"]) # or you can do: anet = cytnx.Network("R_AMAH.net") anet.PutUniTensors(["R","B","M","B_Conj"], \ - [LR[p+2],A[p+1],M,A[p+1].Dagger().permute_(A[p+1].labels())]) + [LR[p+2],A[p+1],M,A[p+1].Dagger().permute_(A[p+1].labels())]) # Dagger() swaps index order; permute_ restores it LR[p+1] = anet.Launch() LR[p+1].set_name(f"LR{p+1}") @@ -190,7 +191,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): # or you can do: anet = cytnx.Network("L_AMAH.net") anet.PutUniTensors(["L","A","A_Conj","M"], \ - [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) # Dagger() swaps index order; permute_ restores it LR[p+1] = anet.Launch() LR[p+1].set_name(f"LR{p+1}") diff --git a/example/TDVP/tdvp1_dense.py b/example/TDVP/tdvp1_dense.py index 846ce9ee3..32f61470a 100644 --- a/example/TDVP/tdvp1_dense.py +++ b/example/TDVP/tdvp1_dense.py @@ -80,6 +80,7 @@ def get_energy(A, M): "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") for p in range(0, N): + # Dagger() swaps left/right index order; permute_ restores original label order anet.PutUniTensors(["L","A","A_Conj","M"], \ [L,A[p],A[p].Dagger().permute_(A[p].labels()),M]) L = anet.Launch() @@ -140,7 +141,7 @@ def get_energy(A, M): "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") anet.PutUniTensors(["L","A","A_Conj","M"], \ - [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) # Dagger() swaps index order; permute_ restores it LR[p+1] = anet.Launch() # Recover the original MPS labels @@ -178,7 +179,7 @@ def get_energy(A, M): "TOUT: ;0,1,2"]) # or you can do: anet = cytnx.Network("R_AMAH.net") anet.PutUniTensors(["R","B","M","B_Conj"], \ - [LR[p+1],A[p],M,A[p].Dagger().permute_(A[p].labels())]) + [LR[p+1],A[p],M,A[p].Dagger().permute_(A[p].labels())]) # Dagger() swaps index order; permute_ restores it old_LR = LR[p].clone() if p != 0: LR[p] = anet.Launch() @@ -219,7 +220,7 @@ def get_energy(A, M): "TOUT: 0,1,2"]) anet.PutUniTensors(["L","A","A_Conj","M"], \ - [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) # Dagger() swaps index order; permute_ restores it old_LR = LR[p+1].clone() @@ -251,7 +252,7 @@ def Local_meas(A, B, Op, site): for i in range(0, N): if i != site: anet.PutUniTensors(["l","A","B"], \ - [l,A[i],B[i].Dagger().permute_(B[i].labels())]) + [l,A[i],B[i].Dagger().permute_(B[i].labels())]) # Dagger() swaps index order; permute_ restores it l = anet.Launch() else: tmp = A[i].relabel(1, "_aux_up") @@ -260,7 +261,7 @@ def Local_meas(A, B, Op, site): tmp.relabel_("_aux_low", A[i].labels()[1]) tmp.permute_(A[i].labels()) anet.PutUniTensors(["l","A","B"], \ - [l,tmp,B[i].Dagger().permute_(B[i].labels())]) + [l,tmp,B[i].Dagger().permute_(B[i].labels())]) # Dagger() swaps index order; permute_ restores it l = anet.Launch() return l.reshape(1).item() diff --git a/include/UniTensor.hpp b/include/UniTensor.hpp index 5c178700e..f9b12a0b0 100644 --- a/include/UniTensor.hpp +++ b/include/UniTensor.hpp @@ -4978,7 +4978,7 @@ namespace cytnx { 1) Swaps the roles of left and right indices: index numbers k < rowrank become k + rowrank, indices k' >= rowrank become k' - rowrank. For fermions, the order of the indices is inverted instead. - 2) Incoming legs become outgoing onces, and vice versa + 2) Incoming legs become outgoing ones, and vice versa 3) The rowrank is set to rank - old rowrank, such that left indices become right indices and vice versa. @return UniTensor diff --git a/tests/BlockFermionicUniTensor_test.cpp b/tests/BlockFermionicUniTensor_test.cpp index dcbe1ff57..7dfd4b8eb 100644 --- a/tests/BlockFermionicUniTensor_test.cpp +++ b/tests/BlockFermionicUniTensor_test.cpp @@ -191,3 +191,51 @@ TEST_F(BlockFermionicUniTensorTest, SaveLoad) { UniTensor BFUTloaded = BFUTloaded.Load("BFUT1.cytnx"); EXPECT_TRUE(AreEqUniTensor(BFUT1, BFUTloaded)); } + +/*=====test info===== +describe:test Transpose and Transpose_ for BlockFermionicUniTensor: + rowrank is updated, index order is reversed, bonds are redirected, + and element values are preserved without sign flips. +====================*/ +TEST_F(BlockFermionicUniTensorTest, Transpose) { + // BFUT1: rank=3, rowrank=2, bonds=[BD_IN(a), BD_IN(b), BD_OUT(c)], shape=(2,2,4) + EXPECT_EQ(BFUT1.rowrank(), 2); + + auto tmp = BFUT1.Transpose(); + + // rowrank must be rank - old_rowrank = 3 - 2 = 1 + EXPECT_EQ(tmp.rowrank(), 1); + EXPECT_EQ(tmp.rank(), 3); + + // index order is reversed: new [0,1,2] = old [c,b,a] + EXPECT_EQ(tmp.labels()[0], "c"); + EXPECT_EQ(tmp.labels()[1], "b"); + EXPECT_EQ(tmp.labels()[2], "a"); + + // bonds are redirected: old BD_OUT(c)->BD_IN, old BD_IN(b)->BD_OUT, old BD_IN(a)->BD_OUT + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + + // element at old {a,b,c} appears at new {c,b,a}; no sign flips + EXPECT_DOUBLE_EQ(double(tmp.at({0, 0, 0}).real()), 1.); + EXPECT_DOUBLE_EQ(double(tmp.at({1, 0, 0}).real()), 2.); + EXPECT_DOUBLE_EQ(double(tmp.at({2, 1, 0}).real()), 3.); + EXPECT_DOUBLE_EQ(double(tmp.at({3, 1, 0}).real()), 4.); + EXPECT_DOUBLE_EQ(double(tmp.at({2, 0, 1}).real()), 5.); + EXPECT_DOUBLE_EQ(double(tmp.at({3, 0, 1}).real()), 6.); + EXPECT_DOUBLE_EQ(double(tmp.at({0, 1, 1}).real()), 7.); + EXPECT_DOUBLE_EQ(double(tmp.at({1, 1, 1}).real()), 8.); + + // Transpose is an involution: T.Transpose().Transpose() == T + EXPECT_TRUE(AreEqUniTensor(tmp.Transpose(), BFUT1)); + + // in-place version must match + auto tmp2 = BFUT1.clone(); + tmp2.Transpose_(); + EXPECT_EQ(tmp2.rowrank(), 1); + EXPECT_EQ(tmp2.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp2.bonds()[1].type(), BD_OUT); + EXPECT_EQ(tmp2.bonds()[2].type(), BD_OUT); + EXPECT_TRUE(AreEqUniTensor(tmp2, tmp)); +} diff --git a/tests/BlockUniTensor_test.cpp b/tests/BlockUniTensor_test.cpp index 5b5f01b37..f8474a42e 100644 --- a/tests/BlockUniTensor_test.cpp +++ b/tests/BlockUniTensor_test.cpp @@ -599,7 +599,7 @@ TEST_F(BlockUniTensorTest, Add) { BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx"); auto out2 = BUT4.Add(BUT4_2); for (cytnx_int64 i = 0; i < 5; i++) - for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 j = 0; j < 11; j++) for (cytnx_int64 k = 0; k < 3; k++) for (cytnx_int64 l = 0; l < 5; l++) if (out2.at({i, j, k, l}).exists()) { @@ -610,7 +610,7 @@ TEST_F(BlockUniTensorTest, Add) { } BUT4.Add_(BUT4_2); for (cytnx_int64 i = 0; i < 5; i++) - for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 j = 0; j < 11; j++) for (cytnx_int64 k = 0; k < 3; k++) for (cytnx_int64 l = 0; l < 5; l++) if (BUT4.at({i, j, k, l}).exists()) { @@ -624,7 +624,7 @@ TEST_F(BlockUniTensorTest, Add) { TEST_F(BlockUniTensorTest, Mul) { auto out = BUT4.Mul(9); for (cytnx_int64 i = 0; i < 5; i++) - for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 j = 0; j < 11; j++) for (cytnx_int64 k = 0; k < 3; k++) for (cytnx_int64 l = 0; l < 5; l++) if (out.at({i, j, k, l}).exists()) { @@ -635,7 +635,7 @@ TEST_F(BlockUniTensorTest, Mul) { } BUT4.Mul_(9); for (cytnx_int64 i = 0; i < 5; i++) - for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 j = 0; j < 11; j++) for (cytnx_int64 k = 0; k < 3; k++) for (cytnx_int64 l = 0; l < 5; l++) if (BUT4.at({i, j, k, l}).exists()) { @@ -670,7 +670,7 @@ TEST_F(BlockUniTensorTest, Sub) { BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx"); auto out2 = BUT4.Sub(BUT4_2); for (cytnx_int64 i = 0; i < 5; i++) - for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 j = 0; j < 11; j++) for (cytnx_int64 k = 0; k < 3; k++) for (cytnx_int64 l = 0; l < 5; l++) if (out2.at({i, j, k, l}).exists()) { @@ -681,7 +681,7 @@ TEST_F(BlockUniTensorTest, Sub) { } BUT4.Sub_(BUT4_2); for (cytnx_int64 i = 0; i < 5; i++) - for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 j = 0; j < 11; j++) for (cytnx_int64 k = 0; k < 3; k++) for (cytnx_int64 l = 0; l < 5; l++) if (BUT4.at({i, j, k, l}).exists()) { @@ -695,7 +695,7 @@ TEST_F(BlockUniTensorTest, Sub) { TEST_F(BlockUniTensorTest, Div) { auto out = BUT4.Div(9); for (cytnx_int64 i = 0; i < 5; i++) - for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 j = 0; j < 11; j++) for (cytnx_int64 k = 0; k < 3; k++) for (cytnx_int64 l = 0; l < 5; l++) if (out.at({i, j, k, l}).exists()) { @@ -706,7 +706,7 @@ TEST_F(BlockUniTensorTest, Div) { } BUT4.Div_(9); for (cytnx_int64 i = 0; i < 5; i++) - for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 j = 0; j < 11; j++) for (cytnx_int64 k = 0; k < 3; k++) for (cytnx_int64 l = 0; l < 5; l++) if (BUT4.at({i, j, k, l}).exists()) { @@ -834,7 +834,7 @@ TEST_F(BlockUniTensorTest, Pow) { TEST_F(BlockUniTensorTest, Conj) { auto tmp = BUT4.Conj(); for (cytnx_int64 i = 0; i < 5; i++) - for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 j = 0; j < 11; j++) for (cytnx_int64 k = 0; k < 3; k++) for (cytnx_int64 l = 0; l < 5; l++) if (BUT4.at({i, j, k, l}).exists()) { @@ -847,7 +847,7 @@ TEST_F(BlockUniTensorTest, Conj) { tmp = BUT4.clone(); tmp.Conj_(); for (cytnx_int64 i = 0; i < 5; i++) - for (cytnx_int64 j = 0; j < 5; j++) + for (cytnx_int64 j = 0; j < 11; j++) for (cytnx_int64 k = 0; k < 3; k++) for (cytnx_int64 l = 0; l < 5; l++) if (BUT4.at({i, j, k, l}).exists()) { @@ -905,7 +905,7 @@ TEST_F(BlockUniTensorTest, Trace) { auto tmp = BUT4.Trace(0, 3); // std::cout< bonds_p = Spcd_p.bonds(); - for (auto bond : bonds_p) { - bond.redirect_(); - } EXPECT_TRUE(AreEqUniTensor(Spcd_p, Spcd)); } diff --git a/tests/gpu/BlockUniTensor_test.cpp b/tests/gpu/BlockUniTensor_test.cpp index 45dda38e0..820d80ae0 100644 --- a/tests/gpu/BlockUniTensor_test.cpp +++ b/tests/gpu/BlockUniTensor_test.cpp @@ -8,12 +8,12 @@ TEST_F(BlockUniTensorTest, gpu_Trace) { // std::cout< Date: Sat, 4 Apr 2026 10:10:37 +0800 Subject: [PATCH 12/13] clang-format: fix line wrapping in gpu/BlockUniTensor_test.cpp Co-Authored-By: Claude Sonnet 4.6 --- tests/gpu/BlockUniTensor_test.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/gpu/BlockUniTensor_test.cpp b/tests/gpu/BlockUniTensor_test.cpp index 5c32fb874..9bffe63bb 100644 --- a/tests/gpu/BlockUniTensor_test.cpp +++ b/tests/gpu/BlockUniTensor_test.cpp @@ -10,10 +10,8 @@ TEST_F(BlockUniTensorTest, gpu_Trace) { for (cytnx_int64 k = 0; k < 3; k++) if (BUtrT4.at({j, k}).exists()) { // EXPECT_TRUE(Scalar(tmp.at({j,k})-BUtrT4.at({j,k})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({j, k}).real()), - double(BUtrT4.at({j, k}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({j, k}).imag()), - double(BUtrT4.at({j, k}).imag())); + EXPECT_DOUBLE_EQ(double(tmp.at({j, k}).real()), double(BUtrT4.at({j, k}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({j, k}).imag()), double(BUtrT4.at({j, k}).imag())); } // std::cout< Date: Thu, 9 Apr 2026 12:15:26 +0800 Subject: [PATCH 13/13] Transpose inverts the index order for all UniTensor types --- include/Bond.hpp | 2 +- include/UniTensor.hpp | 34 +++++++++--------- misc_doc/version.log | 2 +- src/BlockFermionicUniTensor.cpp | 3 +- src/BlockUniTensor.cpp | 16 +++------ src/DenseUniTensor.cpp | 24 +++++-------- src/linalg/Lanczos_Exp.cpp | 13 +++---- tests/BlockUniTensor_test.cpp | 16 ++++----- tests/DenseUniTensor_test.cpp | 58 +++++++++++++++---------------- tests/gpu/BlockUniTensor_test.cpp | 12 +++---- tests/gpu/DenseUniTensor_test.cpp | 42 +++++++++++----------- 11 files changed, 104 insertions(+), 118 deletions(-) diff --git a/include/Bond.hpp b/include/Bond.hpp index 58a7497a8..8ee8ae46d 100644 --- a/include/Bond.hpp +++ b/include/Bond.hpp @@ -799,7 +799,7 @@ namespace cytnx { /** @brief Group the duplicated quantum number and return the new instance - of the Bond ojbect. + of the Bond object. @details This function will group the duplicated quantum number and return the new instance of the Bond object. It will also the \p mapper, where \p mapper is about the new index from old index via\n diff --git a/include/UniTensor.hpp b/include/UniTensor.hpp index c8266b4be..901bc64a6 100644 --- a/include/UniTensor.hpp +++ b/include/UniTensor.hpp @@ -5001,17 +5001,14 @@ namespace cytnx { /** @brief Take the transpose of the UniTensor. @details This function takes the transpose of a UniTensor: - 1) Swaps the roles of left and right indices: index numbers k < rowrank become k + rowrank, - indices k' >= rowrank become k' - rowrank. For fermions, the order of the indices is - inverted instead. - 2) Incoming legs become outgoing ones, and vice versa + 1) The order of the indices is inverted. + 2) Incoming legs become outgoing ones, and vice versa. 3) The rowrank is set to rank - old rowrank, such that left indices become right indices and vice versa. @return UniTensor + @note This function does not only exchange left- and right indices, but inverts the order of all + indices. @note Compared to Transpose_(), this function will return new UniTensor object. - @warning For fermionic UniTensors, the order of the indices is inverted, while for bosonic - UniTensors the role of the left-and right indices is exchanged without inverting the orders in - these two groups. @see Transpose_() */ UniTensor Transpose() const { @@ -5022,10 +5019,11 @@ namespace cytnx { /** @brief Take the transpose of the UniTensor, inplacely. - @return UniTensor + @return UniTensor + @note This function inverts the order of all indices. @note Compared to Transpose(), this function is an inplace function. - @see Transpose() - */ + @see Transpose() + */ UniTensor &Transpose_() { this->_impl->Transpose_(); return *this; @@ -5130,10 +5128,11 @@ namespace cytnx { /** @brief Take the conjugate transpose to the UniTensor. - @return UniTensor - @note Compared to Dagger_(), this function will create a new UniTensor ojbect. - @see Dagger_(), Transpose() - */ + @return UniTensor + @note This function inverts the order of all indices. + @note Compared to Dagger_(), this function will create a new UniTensor object. + @see Dagger_(), Transpose() + */ UniTensor Dagger() const { UniTensor out; out._impl = this->_impl->Dagger(); @@ -5142,10 +5141,11 @@ namespace cytnx { /** @brief Take the conjugate transpose to the UniTensor, inplacely. - @return UniTensor& + @return UniTensor& + @note This function inverts the order of all indices. @note Compared to Dagger(), this is an inplace function. - @see Dagger() - */ + @see Dagger() + */ UniTensor &Dagger_() { this->_impl->Dagger_(); return *this; diff --git a/misc_doc/version.log b/misc_doc/version.log index 3dabaa2b0..a14b635b4 100644 --- a/misc_doc/version.log +++ b/misc_doc/version.log @@ -4,7 +4,7 @@ v1.0.0 3. [Change] Merge relabel and relabels into relabel, and relabel_ and relabels_ into relabel_. 4. [New] Add an optional argument min_blockdim to svd_truncate to define a minimum dimension for each block. 5. [New] Add Eig/Eigh functions for Block UniTensor. -6. [New] Add Lancos-like algoirthm, Lanczos_Exp, to approximate exponential operator acting on a state. +6. [New] Add Lanczos-like algoirthm, Lanczos_Exp, to approximate exponential operator acting on a state. 7. [Change] Migrate cuTENSOR APIs to the version 2. 8. [Change] reshape_ and permute_ to return the object itself instead of None. 9. [Change] Remove the magma dependency. diff --git a/src/BlockFermionicUniTensor.cpp b/src/BlockFermionicUniTensor.cpp index 2b84b1cd4..49422a329 100644 --- a/src/BlockFermionicUniTensor.cpp +++ b/src/BlockFermionicUniTensor.cpp @@ -1902,8 +1902,7 @@ namespace cytnx { this->bonds()[i].redirect_(); idxorder[i] = idxnum - i; } - this->permute_nosignflip_(idxorder); - this->_rowrank = idxnum + 1 - this->_rowrank; + this->permute_nosignflip_(idxorder, idxnum + 1 - this->_rowrank); }; void BlockFermionicUniTensor::normalize_() { diff --git a/src/BlockUniTensor.cpp b/src/BlockUniTensor.cpp index 4257f7b9f..5de7c953f 100644 --- a/src/BlockUniTensor.cpp +++ b/src/BlockUniTensor.cpp @@ -1214,19 +1214,13 @@ namespace cytnx { }; void BlockUniTensor::Transpose_() { - const cytnx_int64 rank = this->bonds().size(); - std::vector idxorder(rank); - const cytnx_int64 oldrowrank = this->_rowrank; - this->_rowrank = rank - oldrowrank; - for (cytnx_int64 i = 0; i < this->_rowrank; i++) { + std::vector idxorder(this->_bonds.size()); + cytnx_int64 idxnum = this->bonds().size() - 1; + for (cytnx_int64 i = 0; i <= idxnum; i++) { this->bonds()[i].redirect_(); - idxorder[i] = i + oldrowrank; + idxorder[i] = idxnum - i; } - for (cytnx_int64 i = this->_rowrank; i < rank; i++) { - this->bonds()[i].redirect_(); - idxorder[i] = i - this->_rowrank; - } - this->permute_(idxorder); + this->permute_(idxorder, idxnum + 1 - this->_rowrank); }; void BlockUniTensor::normalize_() { diff --git a/src/DenseUniTensor.cpp b/src/DenseUniTensor.cpp index c3cdcf87f..532421153 100644 --- a/src/DenseUniTensor.cpp +++ b/src/DenseUniTensor.cpp @@ -1189,27 +1189,19 @@ namespace cytnx { } void DenseUniTensor::Transpose_() { + std::vector idxorder(this->_bonds.size()); + cytnx_int64 idxnum = this->bonds().size() - 1; if (this->is_tag()) { - const cytnx_int64 rank = this->bonds().size(); - std::vector idxorder(rank); - const cytnx_int64 oldrowrank = this->_rowrank; - this->_rowrank = rank - oldrowrank; - for (cytnx_int64 i = 0; i < this->_rowrank; i++) { + for (cytnx_int64 i = 0; i <= idxnum; i++) { this->bonds()[i].redirect_(); - idxorder[i] = i + oldrowrank; + idxorder[i] = idxnum - i; } - for (cytnx_int64 i = this->_rowrank; i < rank; i++) { - this->bonds()[i].redirect_(); - idxorder[i] = i - this->_rowrank; - } - this->permute_(idxorder); } else { - std::vector new_permute = - vec_concatenate(vec_range(this->rowrank(), this->rank()), - vec_range(0, this->rowrank())); - this->permute_(new_permute); - this->_rowrank = this->rank() - this->_rowrank; + for (cytnx_int64 i = 0; i <= idxnum; i++) { + idxorder[i] = idxnum - i; + } } + this->permute_(idxorder, idxnum + 1 - this->_rowrank); }; void DenseUniTensor::normalize_() { this->_block /= linalg::Norm(this->_block); } diff --git a/src/linalg/Lanczos_Exp.cpp b/src/linalg/Lanczos_Exp.cpp index d07508128..fd77db46c 100644 --- a/src/linalg/Lanczos_Exp.cpp +++ b/src/linalg/Lanczos_Exp.cpp @@ -172,7 +172,7 @@ namespace cytnx { // Let V_k be the n × (k + 1) matrix whose columns are v[0],...,v[k] respectively. UniTensor Vk_ut(Vk); Vk_ut.set_rowrank_(1); - auto VkDag_ut = Vk_ut.Dagger(); // left and right indices are exchanged here! + auto VkDag_ut = Vk_ut.Dagger(); // index order is inverted here! // Let T_k be the (k + 1) × (k + 1) matrix a[i,j] i,j is {0,...,k} and Tk_hat = 1 / 2 // (Tk^Dagger + Tk). auto asT = as.permute({1, 0}).Conj().contiguous(); @@ -213,7 +213,8 @@ namespace cytnx { auto Vk_labels = v0.labels(); Vk_labels.insert(Vk_labels.begin(), label_kl); Vk_ut.relabel_(Vk_labels); - auto VkDag_labels = v0.labels(); + auto VkDag_labels = + std::vector(v0.labels().rbegin(), v0.labels().rend()); // inverted order VkDag_labels.push_back(label_kr); VkDag_ut.relabel_(VkDag_labels); @@ -259,7 +260,7 @@ namespace cytnx { for (int i = 1; i < imp_maxiter; ++i) { if (verbose) { - std::cout << "Lancos iteration:" << i << std::endl; + std::cout << "Lanczos iteration:" << i << std::endl; } auto beta = std::sqrt(double(Dot_internal(w, w).real())); v_old = v.clone(); @@ -313,7 +314,7 @@ namespace cytnx { // Let V_k be the n × (k + 1) matrix whose columns are v[0],...,v[k] respectively. UniTensor Vk_ut(Vk); Vk_ut.set_rowrank_(1); - auto VkDag_ut = Vk_ut.Dagger(); // left and right indices are exchanged here! + auto VkDag_ut = Vk_ut.Dagger(); // Index order is inverted here! /* * ||| * |-----| @@ -348,10 +349,10 @@ namespace cytnx { auto Vk_labels = v.labels(); Vk_labels.insert(Vk_labels.begin(), label_kl); Vk_ut.relabel_(Vk_labels); - auto VkDag_labels = v.labels(); + auto VkDag_labels = + std::vector(v.labels().rbegin(), v.labels().rend()); // inverted order VkDag_labels.push_back(label_kr); VkDag_ut.relabel_(VkDag_labels); - out = Contracts({T, VkDag_ut, B}, "", true); out = Contract(out, Vk_ut); out.set_rowrank_(v.rowrank()); diff --git a/tests/BlockUniTensor_test.cpp b/tests/BlockUniTensor_test.cpp index f8474a42e..425d9f03d 100644 --- a/tests/BlockUniTensor_test.cpp +++ b/tests/BlockUniTensor_test.cpp @@ -977,12 +977,12 @@ TEST_F(BlockUniTensorTest, Dagger) { // std::cout << "BUT4.Dagger(k=" << k << ", l=" << l << ", i=" << i << ", j=" << j // << ") = " << double(tmp.at({k, l, i, j}).real()) << " + i * " // << double(tmp.at({k, l, i, j}).imag()) << std::endl; - EXPECT_DOUBLE_EQ(double(tmp.at({k, l, i, j}).real()), + EXPECT_DOUBLE_EQ(double(tmp.at({l, k, j, i}).real()), double(BUT4.at({i, j, k, l}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({k, l, i, j}).imag()), + EXPECT_DOUBLE_EQ(double(tmp.at({l, k, j, i}).imag()), -double(BUT4.at({i, j, k, l}).imag())); } else { - EXPECT_FALSE(tmp.at({k, l, i, j}).exists()); + EXPECT_FALSE(tmp.at({l, k, j, i}).exists()); } } tmp = BUT4.clone(); @@ -994,11 +994,11 @@ TEST_F(BlockUniTensorTest, Dagger) { if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(BUT4.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), - double(tmp.at({k, l, i, j}).real())); + double(tmp.at({l, k, j, i}).real())); EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), - -double(tmp.at({k, l, i, j}).imag())); + -double(tmp.at({l, k, j, i}).imag())); } else { - EXPECT_FALSE(tmp.at({k, l, i, j}).exists()); + EXPECT_FALSE(tmp.at({l, k, j, i}).exists()); } } @@ -1011,9 +1011,9 @@ TEST_F(BlockUniTensorTest, Dagger) { for (cytnx_int64 j = 0; j < 9; j++) for (cytnx_int64 k = 0; k < 30; k++) { if (UT_pB.at({i, j, k}).exists()) { - EXPECT_DOUBLE_EQ(double(tmp.at({k, i, j}).real()), double(UT_pB.at({i, j, k}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({k, j, i}).real()), double(UT_pB.at({i, j, k}).real())); } else { - EXPECT_FALSE(tmp.at({k, i, j}).exists()); + EXPECT_FALSE(tmp.at({k, j, i}).exists()); } } diff --git a/tests/DenseUniTensor_test.cpp b/tests/DenseUniTensor_test.cpp index 65345d332..42e92775d 100644 --- a/tests/DenseUniTensor_test.cpp +++ b/tests/DenseUniTensor_test.cpp @@ -4112,14 +4112,14 @@ TEST_F(DenseUniTensorTest, Transpose) { } // a, b; c -> c; a, b EXPECT_EQ(ut.labels(), std::vector({"a", "b", "c"})); - EXPECT_EQ(ut_t.labels(), std::vector({"c", "a", "b"})); + EXPECT_EQ(ut_t.labels(), std::vector({"c", "b", "a"})); EXPECT_EQ(ut.rowrank(), row_rank); EXPECT_EQ(ut_t.rowrank(), ut_t.rank() - row_rank); auto shape = ut.shape(); for (cytnx_uint64 i = 0; i < shape[0]; i++) { for (cytnx_uint64 j = 0; j < shape[1]; j++) { for (cytnx_uint64 k = 0; k < shape[2]; k++) { - EXPECT_EQ(ut.at({i, j, k}), ut_t.at({k, i, j})); + EXPECT_EQ(ut.at({i, j, k}), ut_t.at({k, j, i})); } } } @@ -4168,20 +4168,20 @@ TEST_F(DenseUniTensorTest, Transpose_tagged) { EXPECT_EQ(Spcd_t.rank(), 3); // test bond types std::vector bonds_t = Spcd_t.bonds(); - EXPECT_EQ(bonds_t[0].type(), BD_IN); - EXPECT_EQ(bonds_t[1].type(), BD_OUT); + EXPECT_EQ(bonds_t[0].type(), BD_OUT); + EXPECT_EQ(bonds_t[1].type(), BD_IN); EXPECT_EQ(bonds_t[2].type(), BD_OUT); // test labels std::vector labels = Spcd.labels(); std::vector labels_t = Spcd_t.labels(); - EXPECT_EQ(labels_t[0], labels[1]); - EXPECT_EQ(labels_t[1], labels[2]); + EXPECT_EQ(labels_t[0], labels[2]); + EXPECT_EQ(labels_t[1], labels[1]); EXPECT_EQ(labels_t[2], labels[0]); // test shape auto shape = Spcd.shape(); auto shape_t = Spcd_t.shape(); - EXPECT_EQ(shape_t[0], shape[1]); - EXPECT_EQ(shape_t[1], shape[2]); + EXPECT_EQ(shape_t[0], shape[2]); + EXPECT_EQ(shape_t[1], shape[1]); EXPECT_EQ(shape_t[2], shape[0]); // test tensors EXPECT_TRUE(AreEqUniTensor(Spcd_t.Transpose(), Spcd)); @@ -4381,29 +4381,29 @@ TEST_F(DenseUniTensorTest, Dagger) { EXPECT_EQ(utzero3456.bonds()[3].type(), BD_REG); tmp = utarcomplex3456.Dagger(); - for (size_t i = 1; i <= 3; i++) - for (size_t j = 1; j <= 4; j++) - for (size_t k = 1; k <= 5; k++) - for (size_t l = 1; l <= 6; l++) - if (utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (size_t i = 0; i < 3; i++) + for (size_t j = 0; j < 4; j++) + for (size_t k = 0; k < 5; k++) + for (size_t l = 0; l < 6; l++) + if (utarcomplex3456.at({i, j, k, l}).exists()) { + EXPECT_TRUE(tmp.at({l, k, j, i}).exists()); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).real()), + double(tmp.at({l, k, j, i}).real())); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).imag()), + -double(tmp.at({l, k, j, i}).imag())); } tmp = utarcomplex3456.clone(); - utarcomplex3456.Dagger_(); - for (size_t i = 1; i <= 3; i++) - for (size_t j = 1; j <= 4; j++) - for (size_t k = 1; k <= 5; k++) - for (size_t l = 1; l <= 6; l++) - if (utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(utarcomplex3456.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + tmp.Dagger_(); + for (size_t i = 0; i < 3; i++) + for (size_t j = 0; j < 4; j++) + for (size_t k = 0; k < 5; k++) + for (size_t l = 0; l < 6; l++) + if (utarcomplex3456.at({i, j, k, l}).exists()) { + EXPECT_TRUE(tmp.at({l, k, j, i}).exists()); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).real()), + double(tmp.at({l, k, j, i}).real())); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).imag()), + -double(tmp.at({l, k, j, i}).imag())); } } /*=====test info===== diff --git a/tests/gpu/BlockUniTensor_test.cpp b/tests/gpu/BlockUniTensor_test.cpp index 9bffe63bb..8e3c460fa 100644 --- a/tests/gpu/BlockUniTensor_test.cpp +++ b/tests/gpu/BlockUniTensor_test.cpp @@ -336,12 +336,12 @@ TEST_F(BlockUniTensorTest, gpu_Dagger) { for (cytnx_int64 l = 0; l < 5; l++) { if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(tmp.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({k, l, i, j}).real()), + EXPECT_DOUBLE_EQ(double(tmp.at({l, k, j, i}).real()), double(BUT4.at({i, j, k, l}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({k, l, i, j}).imag()), + EXPECT_DOUBLE_EQ(double(tmp.at({l, k, j, i}).imag()), -double(BUT4.at({i, j, k, l}).imag())); } else { - EXPECT_FALSE(tmp.at({k, l, i, j}).exists()); + EXPECT_FALSE(tmp.at({l, k, j, i}).exists()); } } @@ -354,11 +354,11 @@ TEST_F(BlockUniTensorTest, gpu_Dagger) { if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(BUT4.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), - double(tmp.at({k, l, i, j}).real())); + double(tmp.at({l, k, j, i}).real())); EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), - -double(tmp.at({k, l, i, j}).imag())); + -double(tmp.at({l, k, j, i}).imag())); } else { - EXPECT_FALSE(tmp.at({k, l, i, j}).exists()); + EXPECT_FALSE(tmp.at({l, k, j, i}).exists()); } } diff --git a/tests/gpu/DenseUniTensor_test.cpp b/tests/gpu/DenseUniTensor_test.cpp index c562c051d..5c5e55132 100644 --- a/tests/gpu/DenseUniTensor_test.cpp +++ b/tests/gpu/DenseUniTensor_test.cpp @@ -211,29 +211,29 @@ TEST_F(DenseUniTensorTest, gpu_Dagger) { EXPECT_EQ(utzero3456.bonds()[3].type(), BD_REG); tmp = utarcomplex3456.Dagger(); - for (size_t i = 1; i <= 3; i++) - for (size_t j = 1; j <= 4; j++) - for (size_t k = 1; k <= 5; k++) - for (size_t l = 1; l <= 6; l++) - if (utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (size_t i = 0; i < 3; i++) + for (size_t j = 0; j < 4; j++) + for (size_t k = 0; k < 5; k++) + for (size_t l = 0; l < 6; l++) + if (utarcomplex3456.at({i, j, k, l}).exists()) { + EXPECT_TRUE(tmp.at({l, k, j, i}).exists()); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).real()), + double(tmp.at({l, k, j, i}).real())); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).imag()), + -double(tmp.at({l, k, j, i}).imag())); } tmp = utarcomplex3456.clone(); - utarcomplex3456.Dagger_(); - for (size_t i = 1; i <= 3; i++) - for (size_t j = 1; j <= 4; j++) - for (size_t k = 1; k <= 5; k++) - for (size_t l = 1; l <= 6; l++) - if (utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(utarcomplex3456.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + tmp.Dagger_(); + for (size_t i = 0; i < 3; i++) + for (size_t j = 0; j < 4; j++) + for (size_t k = 0; k < 5; k++) + for (size_t l = 0; l < 6; l++) + if (utarcomplex3456.at({i, j, k, l}).exists()) { + EXPECT_TRUE(tmp.at({l, k, j, i}).exists()); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).real()), + double(tmp.at({l, k, j, i}).real())); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).imag()), + -double(tmp.at({l, k, j, i}).imag())); } }