diff --git a/example/DMRG/dmrg_two_sites_U1.py b/example/DMRG/dmrg_two_sites_U1.py index e99747bc3..806231699 100644 --- a/example/DMRG/dmrg_two_sites_U1.py +++ b/example/DMRG/dmrg_two_sites_U1.py @@ -19,7 +19,7 @@ def matvec(self, v): lbl = v.labels() self.anet.PutUniTensor("psi",v) out = self.anet.Launch() - out.relabel_(lbl) + out.relabels_(lbl) return out def optimize_psi(psi, functArgs, maxit=2, krydim=4): @@ -41,7 +41,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): return psivec, energy[0].item() - ## Initialiaze MPO + ## Initialize MPO ##>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> d = 2 s = 0.5 @@ -49,6 +49,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): bd_phys = cytnx.Bond(cytnx.BD_KET,[[1],[-1]],[1,1]) M = cytnx.UniTensor([bd_inner,bd_inner.redirect(),bd_phys, bd_phys.redirect()],rowrank=2) + M.set_name("MPO") # I M.set_elem([0,0,0,0],1); @@ -69,7 +70,9 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): VbdL = cytnx.Bond(cytnx.BD_KET,[[0]],[1]) VbdR = cytnx.Bond(cytnx.BD_KET,[[q]],[1]) L0 = cytnx.UniTensor([bd_inner.redirect(),VbdL.redirect(),VbdL],rowrank=1) #Left boundary + L0.set_name("L0") R0 = cytnx.UniTensor([bd_inner,VbdR,VbdR.redirect()],rowrank=1) #Right boundary + R0.set_name("R0") L0.set_elem([0,0,0],1) R0.set_elem([3,0,0],1) @@ -83,6 +86,7 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): A[0] = cytnx.UniTensor([VbdL,bd_phys.redirect(),cytnx.Bond(cytnx.BD_BRA,[[qcntr]],[1])],rowrank=2) A[0].get_block_()[0] = 1 + A[0].set_name("A0") lbls = [] lbls.append(["0","1","2"]) # store the labels for later convinience. @@ -96,9 +100,10 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): B3 = cytnx.Bond(cytnx.BD_BRA,[[qcntr]],[1]) A[k] = cytnx.UniTensor([B1,B2,B3],rowrank=2) + A[k].set_name(f"A{k}") lbl = [str(2*k),str(2*k+1),str(2*k+2)] - A[k].relabel_(lbl) + A[k].set_labels(lbl) A[k].get_block_()[0] = 1 lbls.append(lbl) # store the labels for later convinience. @@ -114,8 +119,11 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "A_Conj: -3,-5,2",\ "TOUT: 0;1,2"]) for p in range(Nsites - 1): - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Dagger(),M]) + # Dagger() swaps left/right index order; permute_ restores original label order + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") Ekeep = [] for k in range(1, numsweeps+1): @@ -131,12 +139,15 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): psi.set_rowrank_(2) # maintain rowrank to perform the svd s,A[p],A[p+1] = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p+1].relabel_(lbls[p+1]); # set the label back to be consistent + A[p+1].relabels_(lbls[p+1]); # set the label back to be consistent s = s/s.Norm().item() # normalize s A[p] = cytnx.Contract(A[p],s) # absorb s into next neighbor - A[p].relabel_(lbls[p]); # set the label back to be consistent + A[p].relabels_(lbls[p]); # set the label back to be consistent + + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") # update LR from right to left: anet = cytnx.Network() @@ -145,14 +156,16 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "M: 0,-2,-4,-5",\ "B_Conj: 2,-5,-3",\ "TOUT: 0;1,2"]) - anet.PutUniTensors(["R","B","M","B_Conj"],[LR[p+2],A[p+1],M,A[p+1].Dagger()]) + anet.PutUniTensors(["R","B","M","B_Conj"], \ + [LR[p+2],A[p+1],M,A[p+1].Dagger().permute_(A[p+1].labels())]) # Dagger() swaps index order; permute_ restores it LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") print('Sweep[r->l]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) A[0].set_rowrank_(1) _,A[0] = cytnx.linalg.Gesvd(A[0],is_U=False, is_vT=True) - A[0].relabel_(lbls[0]); #set the label back to be consistent + A[0].relabels_(lbls[0]); #set the label back to be consistent for p in range(Nsites-1): dim_l = A[p].shape()[0] @@ -165,12 +178,15 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): psi.set_rowrank_(2) # maintain rowrank to perform the svd s,A[p],A[p+1] = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p].relabel_(lbls[p]); #set the label back to be consistent + A[p].relabels_(lbls[p]); #set the label back to be consistent s = s/s.Norm().item() # normalize s A[p+1] = cytnx.Contract(s,A[p+1]) ## absorb s into next neighbor. - A[p+1].relabel_(lbls[p+1]); #set the label back to be consistent + A[p+1].relabels_(lbls[p+1]); #set the label back to be consistent + + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") # update LR from left to right: anet = cytnx.Network() @@ -179,14 +195,17 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "M: -2,0,-4,-5",\ "A_Conj: -3,-5,2",\ "TOUT: 0;1,2"]) - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Dagger(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) # Dagger() swaps index order; permute_ restores it LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") print('Sweep[l->r]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) A[-1].set_rowrank_(2) _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. - A[-1].relabel_(lbls[-1]); #set the label back to be consistent + A[-1].relabels_(lbls[-1]); #set the label back to be consistent + A[-1].set_name(f"A{Nsites-1}") return Ekeep diff --git a/example/DMRG/dmrg_two_sites_dense.py b/example/DMRG/dmrg_two_sites_dense.py index 4bbe2109f..6c48ca6c0 100644 --- a/example/DMRG/dmrg_two_sites_dense.py +++ b/example/DMRG/dmrg_two_sites_dense.py @@ -18,7 +18,7 @@ def matvec(self, v): lbl = v.labels() self.anet.PutUniTensor("psi",v) out = self.anet.Launch() - out.relabel_(lbl) + out.relabels_(lbl) return out def optimize_psi(psi, functArgs, maxit=2, krydim=4): @@ -53,25 +53,32 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): M[0,1] = M[2,3] = 2**0.5*sp.real() M[0,2] = M[1,3] = 2**0.5*sm.real() M = cytnx.UniTensor(M,0) + M.set_name("MPO") L0 = cytnx.UniTensor(cytnx.zeros([4,1,1]), rowrank = 0) #Left boundary R0 = cytnx.UniTensor(cytnx.zeros([4,1,1]), rowrank = 0) #Right boundary - L0[0,0,0] = 1.; R0[3,0,0] = 1. + L0.set_name("L0") + R0.set_name("R0") + L0[0,0,0] = 1. + R0[3,0,0] = 1. - lbls = [] # List for storing the MPS labels A = [None for i in range(Nsites)] A[0] = cytnx.UniTensor(cytnx.random.normal([1, d, min(chi, d)], 0., 1.), rowrank = 2) - A[0].relabel_(["0","1","2"]) - lbls.append(["0","1","2"]) # store the labels for later convinience. + A[0].relabels_(["0","1","2"]) + A[0].set_name("A0") + + lbls = [] # List for storing the MPS labels + lbls.append(["0","1","2"]) # store the labels for later convenience. for k in range(1,Nsites): dim1 = A[k-1].shape()[2]; dim2 = d dim3 = min(min(chi, A[k-1].shape()[2] * d), d ** (Nsites - k - 1)) A[k] = cytnx.UniTensor(cytnx.random.normal([dim1, dim2, dim3],0.,1.), rowrank = 2) + A[k].set_name(f"A{k}") lbl = [str(2*k),str(2*k+1),str(2*k+2)] - A[k].relabel_(lbl) - lbls.append(lbl) # store the labels for later convinience. + A[k].relabels_(lbl) + lbls.append(lbl) # store the labels for later convenience. LR = [None for i in range(Nsites+1)] LR[0] = L0 @@ -84,7 +91,10 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): s, A[p] ,vt = cytnx.linalg.Gesvd(A[p]) A[p+1] = cytnx.Contract(cytnx.Contract(s,vt),A[p+1]) - ## Calculate enviroments: + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") + + ## Calculate environments: anet = cytnx.Network() anet.FromString(["L: -2,-1,-3",\ "A: -1,-4,1",\ @@ -92,15 +102,19 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "A_Conj: -3,-5,2",\ "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Conj(),M]) + # Dagger() swaps left/right index order; permute_ restores original label order + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") # Recover the original MPS labels - A[p].relabel_(lbls[p]) - A[p+1].relabel_(lbls[p+1]) + A[p].relabels_(lbls[p]) + A[p+1].relabels_(lbls[p+1]) _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. - A[-1].relabel_(lbls[-1]) # Recover the original MPS labels + A[-1].relabels_(lbls[-1]) # Recover the original MPS labels + A[-1].set_name(f"A{Nsites-1}") Ekeep = [] for k in range(1, numsweeps+1): @@ -116,12 +130,15 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): psi.set_rowrank_(2) # maintain rowrank to perform the svd s,A[p],A[p+1] = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p+1].relabel_(lbls[p+1]); # set the label back to be consistent + A[p+1].relabels_(lbls[p+1]); # set the label back to be consistent s = s/s.Norm().item() # normalize s A[p] = cytnx.Contract(A[p],s) # absorb s into next neighbor - A[p].relabel_(lbls[p]); # set the label back to be consistent + A[p].relabels_(lbls[p]); # set the label back to be consistent + + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") # update LR from right to left: anet = cytnx.Network() @@ -131,14 +148,17 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "B_Conj: 2,-5,-3",\ "TOUT: 0;1,2"]) # or you can do: anet = cytnx.Network("R_AMAH.net") - anet.PutUniTensors(["R","B","M","B_Conj"],[LR[p+2],A[p+1],M,A[p+1].Conj()]) + anet.PutUniTensors(["R","B","M","B_Conj"], \ + [LR[p+2],A[p+1],M,A[p+1].Dagger().permute_(A[p+1].labels())]) # Dagger() swaps index order; permute_ restores it LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") print('Sweep[r->l]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) A[0].set_rowrank_(1) _,A[0] = cytnx.linalg.Gesvd(A[0],is_U=False, is_vT=True) - A[0].relabel_(lbls[0]); #set the label back to be consistent + A[0].relabels_(lbls[0]); #set the label back to be consistent + A[0].set_name("A0") for p in range(Nsites-1): dim_l = A[p].shape()[0] @@ -151,12 +171,15 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): psi.set_rowrank_(2) # maintain rowrank to perform the svd s,A[p],A[p+1] = cytnx.linalg.Svd_truncate(psi,new_dim) - A[p].relabel_(lbls[p]); #set the label back to be consistent + A[p].relabels_(lbls[p]); #set the label back to be consistent s = s/s.Norm().item() # normalize s A[p+1] = cytnx.Contract(s,A[p+1]) ## absorb s into next neighbor. - A[p+1].relabel_(lbls[p+1]); #set the label back to be consistent + A[p+1].relabels_(lbls[p+1]); #set the label back to be consistent + + A[p].set_name(f"A{p}") + A[p+1].set_name(f"A{p+1}") # update LR from left to right: anet = cytnx.Network() @@ -167,14 +190,17 @@ def optimize_psi(psi, functArgs, maxit=2, krydim=4): "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Conj(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) # Dagger() swaps index order; permute_ restores it LR[p+1] = anet.Launch() + LR[p+1].set_name(f"LR{p+1}") print('Sweep[l->r]: %d/%d, Loc: %d,Energy: %f' % (k, numsweeps, p, Ekeep[-1])) A[-1].set_rowrank_(2) _,A[-1] = cytnx.linalg.Gesvd(A[-1],is_U=True,is_vT=False) ## last one. - A[-1].relabel_(lbls[-1]); #set the label back to be consistent + A[-1].relabels_(lbls[-1]); #set the label back to be consistent + A[-1].set_name(f"A{Nsites-1}") return Ekeep if __name__ == '__main__': diff --git a/example/TDVP/tdvp1_dense.py b/example/TDVP/tdvp1_dense.py index 480f5535c..bb91d7685 100644 --- a/example/TDVP/tdvp1_dense.py +++ b/example/TDVP/tdvp1_dense.py @@ -80,7 +80,9 @@ def get_energy(A, M): "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") for p in range(0, N): - anet.PutUniTensors(["L","A","A_Conj","M"],[L,A[p],A[p].Conj(),M]) + # Dagger() swaps left/right index order; permute_ restores original label order + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [L,A[p],A[p].Dagger().permute_(A[p].labels()),M]) L = anet.Launch() E = cytnx.Contract(L, R0).item() print('energy:', E) @@ -138,7 +140,8 @@ def get_energy(A, M): "A_Conj: -3,-5,2",\ "TOUT: 0,1,2"]) # or you can do: anet = cytnx.Network("L_AMAH.net") - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Conj(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) # Dagger() swaps index order; permute_ restores it LR[p+1] = anet.Launch() # Recover the original MPS labels @@ -175,7 +178,8 @@ def get_energy(A, M): "B_Conj: 2,-5,-3",\ "TOUT: ;0,1,2"]) # or you can do: anet = cytnx.Network("R_AMAH.net") - anet.PutUniTensors(["R","B","M","B_Conj"],[LR[p+1],A[p],M,A[p].Conj()]) + anet.PutUniTensors(["R","B","M","B_Conj"], \ + [LR[p+1],A[p],M,A[p].Dagger().permute_(A[p].labels())]) # Dagger() swaps index order; permute_ restores it old_LR = LR[p].clone() if p != 0: LR[p] = anet.Launch() @@ -215,7 +219,8 @@ def get_energy(A, M): "A_Conj: -3,-5,2",\ "TOUT: 0,1,2"]) - anet.PutUniTensors(["L","A","A_Conj","M"],[LR[p],A[p],A[p].Conj(),M]) + anet.PutUniTensors(["L","A","A_Conj","M"], \ + [LR[p],A[p],A[p].Dagger().permute_(A[p].labels()),M]) # Dagger() swaps index order; permute_ restores it old_LR = LR[p+1].clone() @@ -246,7 +251,8 @@ def Local_meas(A, B, Op, site): "TOUT: 2;4"]) for i in range(0, N): if i != site: - anet.PutUniTensors(["l","A","B"],[l,A[i],B[i].Conj()]) + anet.PutUniTensors(["l","A","B"], \ + [l,A[i],B[i].Dagger().permute_(B[i].labels())]) # Dagger() swaps index order; permute_ restores it l = anet.Launch() else: tmp = A[i].relabel(1, "_aux_up") @@ -254,7 +260,8 @@ def Local_meas(A, B, Op, site): tmp = cytnx.Contract(tmp, Op) tmp.relabel_("_aux_low", A[i].labels()[1]) tmp.permute_(A[i].labels()) - anet.PutUniTensors(["l","A","B"],[l,tmp,B[i].Conj()]) + anet.PutUniTensors(["l","A","B"], \ + [l,tmp,B[i].Dagger().permute_(B[i].labels())]) # Dagger() swaps index order; permute_ restores it l = anet.Launch() return l.reshape(1).item() diff --git a/include/Bond.hpp b/include/Bond.hpp index 58a7497a8..8ee8ae46d 100644 --- a/include/Bond.hpp +++ b/include/Bond.hpp @@ -799,7 +799,7 @@ namespace cytnx { /** @brief Group the duplicated quantum number and return the new instance - of the Bond ojbect. + of the Bond object. @details This function will group the duplicated quantum number and return the new instance of the Bond object. It will also the \p mapper, where \p mapper is about the new index from old index via\n diff --git a/include/UniTensor.hpp b/include/UniTensor.hpp index 14a77fd6a..901bc64a6 100644 --- a/include/UniTensor.hpp +++ b/include/UniTensor.hpp @@ -5000,16 +5000,17 @@ namespace cytnx { /** @brief Take the transpose of the UniTensor. - @details This function will take the transpose of the UniTensor. If the UniTensor is - tagged (i.e. the Bonds are directional), it will swap the direction of the Bonds but - the rowrank will not change. If the UniTensor is untagged (i.e. the Bonds are - BondType::BD_REG), it will change the rowrank to the opposite side. - For fermionic UniTensors, the index order will be reversed without sign flips, and the - direction of all Bonds will swapped. - @return UniTensor + @details This function takes the transpose of a UniTensor: + 1) The order of the indices is inverted. + 2) Incoming legs become outgoing ones, and vice versa. + 3) The rowrank is set to rank - old rowrank, such that left indices become right indices and + vice versa. + @return UniTensor + @note This function does not only exchange left- and right indices, but inverts the order of all + indices. @note Compared to Transpose_(), this function will return new UniTensor object. - @see Transpose_() - */ + @see Transpose_() + */ UniTensor Transpose() const { UniTensor out; out._impl = this->_impl->Transpose(); @@ -5018,10 +5019,11 @@ namespace cytnx { /** @brief Take the transpose of the UniTensor, inplacely. - @return UniTensor + @return UniTensor + @note This function inverts the order of all indices. @note Compared to Transpose(), this function is an inplace function. - @see Transpose() - */ + @see Transpose() + */ UniTensor &Transpose_() { this->_impl->Transpose_(); return *this; @@ -5126,10 +5128,11 @@ namespace cytnx { /** @brief Take the conjugate transpose to the UniTensor. - @return UniTensor - @note Compared to Dagger_(), this function will create a new UniTensor ojbect. - @see Dagger_(), Transpose() - */ + @return UniTensor + @note This function inverts the order of all indices. + @note Compared to Dagger_(), this function will create a new UniTensor object. + @see Dagger_(), Transpose() + */ UniTensor Dagger() const { UniTensor out; out._impl = this->_impl->Dagger(); @@ -5138,10 +5141,11 @@ namespace cytnx { /** @brief Take the conjugate transpose to the UniTensor, inplacely. - @return UniTensor& + @return UniTensor& + @note This function inverts the order of all indices. @note Compared to Dagger(), this is an inplace function. - @see Dagger() - */ + @see Dagger() + */ UniTensor &Dagger_() { this->_impl->Dagger_(); return *this; diff --git a/misc_doc/version.log b/misc_doc/version.log index 3dabaa2b0..a14b635b4 100644 --- a/misc_doc/version.log +++ b/misc_doc/version.log @@ -4,7 +4,7 @@ v1.0.0 3. [Change] Merge relabel and relabels into relabel, and relabel_ and relabels_ into relabel_. 4. [New] Add an optional argument min_blockdim to svd_truncate to define a minimum dimension for each block. 5. [New] Add Eig/Eigh functions for Block UniTensor. -6. [New] Add Lancos-like algoirthm, Lanczos_Exp, to approximate exponential operator acting on a state. +6. [New] Add Lanczos-like algoirthm, Lanczos_Exp, to approximate exponential operator acting on a state. 7. [Change] Migrate cuTENSOR APIs to the version 2. 8. [Change] reshape_ and permute_ to return the object itself instead of None. 9. [Change] Remove the magma dependency. diff --git a/src/BlockFermionicUniTensor.cpp b/src/BlockFermionicUniTensor.cpp index 095c1bea0..49422a329 100644 --- a/src/BlockFermionicUniTensor.cpp +++ b/src/BlockFermionicUniTensor.cpp @@ -1895,17 +1895,14 @@ namespace cytnx { }; void BlockFermionicUniTensor::Transpose_() { - //[21 Aug 2024] This is a copy from BlockUniTensor; - // modify tag // The index order is reversed without any sign flips! std::vector idxorder(this->_bonds.size()); - std::size_t idxnum = this->bonds().size() - 1; - for (int i = 0; i <= idxnum; i++) { + cytnx_int64 idxnum = this->bonds().size() - 1; + for (cytnx_int64 i = 0; i <= idxnum; i++) { this->bonds()[i].redirect_(); - // this->bonds()[i].qnums() = this->bonds()[i].calc_reverse_qnums(); idxorder[i] = idxnum - i; } - this->permute_nosignflip_(idxorder); + this->permute_nosignflip_(idxorder, idxnum + 1 - this->_rowrank); }; void BlockFermionicUniTensor::normalize_() { diff --git a/src/BlockUniTensor.cpp b/src/BlockUniTensor.cpp index 9c1bdf75e..5de7c953f 100644 --- a/src/BlockUniTensor.cpp +++ b/src/BlockUniTensor.cpp @@ -1214,11 +1214,13 @@ namespace cytnx { }; void BlockUniTensor::Transpose_() { - // modify tag - for (int i = 0; i < this->bonds().size(); i++) { + std::vector idxorder(this->_bonds.size()); + cytnx_int64 idxnum = this->bonds().size() - 1; + for (cytnx_int64 i = 0; i <= idxnum; i++) { this->bonds()[i].redirect_(); - // this->bonds()[i].qnums() = this->bonds()[i].calc_reverse_qnums(); + idxorder[i] = idxnum - i; } + this->permute_(idxorder, idxnum + 1 - this->_rowrank); }; void BlockUniTensor::normalize_() { diff --git a/src/DenseUniTensor.cpp b/src/DenseUniTensor.cpp index 3686ecd26..532421153 100644 --- a/src/DenseUniTensor.cpp +++ b/src/DenseUniTensor.cpp @@ -1189,21 +1189,21 @@ namespace cytnx { } void DenseUniTensor::Transpose_() { + std::vector idxorder(this->_bonds.size()); + cytnx_int64 idxnum = this->bonds().size() - 1; if (this->is_tag()) { - // this->_rowrank = this->rank() - this->_rowrank; - for (int i = 0; i < this->rank(); i++) { - this->_bonds[i].set_type((this->_bonds[i].type() == BD_KET) ? BD_BRA : BD_KET); + for (cytnx_int64 i = 0; i <= idxnum; i++) { + this->bonds()[i].redirect_(); + idxorder[i] = idxnum - i; } - this->_is_braket_form = this->_update_braket(); - } else { - std::vector new_permute = - vec_concatenate(vec_range(this->rowrank(), this->rank()), - vec_range(0, this->rowrank())); - this->permute_(new_permute); - this->_rowrank = this->rank() - this->_rowrank; + for (cytnx_int64 i = 0; i <= idxnum; i++) { + idxorder[i] = idxnum - i; + } } - } + this->permute_(idxorder, idxnum + 1 - this->_rowrank); + }; + void DenseUniTensor::normalize_() { this->_block /= linalg::Norm(this->_block); } void DenseUniTensor::_save_dispatch(std::fstream &f) const { this->_block._Save(f); } diff --git a/src/linalg/Lanczos_Exp.cpp b/src/linalg/Lanczos_Exp.cpp index d07508128..fd77db46c 100644 --- a/src/linalg/Lanczos_Exp.cpp +++ b/src/linalg/Lanczos_Exp.cpp @@ -172,7 +172,7 @@ namespace cytnx { // Let V_k be the n × (k + 1) matrix whose columns are v[0],...,v[k] respectively. UniTensor Vk_ut(Vk); Vk_ut.set_rowrank_(1); - auto VkDag_ut = Vk_ut.Dagger(); // left and right indices are exchanged here! + auto VkDag_ut = Vk_ut.Dagger(); // index order is inverted here! // Let T_k be the (k + 1) × (k + 1) matrix a[i,j] i,j is {0,...,k} and Tk_hat = 1 / 2 // (Tk^Dagger + Tk). auto asT = as.permute({1, 0}).Conj().contiguous(); @@ -213,7 +213,8 @@ namespace cytnx { auto Vk_labels = v0.labels(); Vk_labels.insert(Vk_labels.begin(), label_kl); Vk_ut.relabel_(Vk_labels); - auto VkDag_labels = v0.labels(); + auto VkDag_labels = + std::vector(v0.labels().rbegin(), v0.labels().rend()); // inverted order VkDag_labels.push_back(label_kr); VkDag_ut.relabel_(VkDag_labels); @@ -259,7 +260,7 @@ namespace cytnx { for (int i = 1; i < imp_maxiter; ++i) { if (verbose) { - std::cout << "Lancos iteration:" << i << std::endl; + std::cout << "Lanczos iteration:" << i << std::endl; } auto beta = std::sqrt(double(Dot_internal(w, w).real())); v_old = v.clone(); @@ -313,7 +314,7 @@ namespace cytnx { // Let V_k be the n × (k + 1) matrix whose columns are v[0],...,v[k] respectively. UniTensor Vk_ut(Vk); Vk_ut.set_rowrank_(1); - auto VkDag_ut = Vk_ut.Dagger(); // left and right indices are exchanged here! + auto VkDag_ut = Vk_ut.Dagger(); // Index order is inverted here! /* * ||| * |-----| @@ -348,10 +349,10 @@ namespace cytnx { auto Vk_labels = v.labels(); Vk_labels.insert(Vk_labels.begin(), label_kl); Vk_ut.relabel_(Vk_labels); - auto VkDag_labels = v.labels(); + auto VkDag_labels = + std::vector(v.labels().rbegin(), v.labels().rend()); // inverted order VkDag_labels.push_back(label_kr); VkDag_ut.relabel_(VkDag_labels); - out = Contracts({T, VkDag_ut, B}, "", true); out = Contract(out, Vk_ut); out.set_rowrank_(v.rowrank()); diff --git a/tests/BlockFermionicUniTensor_test.cpp b/tests/BlockFermionicUniTensor_test.cpp index 1a9a30912..04b6480fc 100644 --- a/tests/BlockFermionicUniTensor_test.cpp +++ b/tests/BlockFermionicUniTensor_test.cpp @@ -198,3 +198,51 @@ TEST_F(BlockFermionicUniTensorTest, SaveLoad) { UniTensor BFUT1_loaded_char_load = BFUT1_loaded_char_load.Load(fname); EXPECT_TRUE(AreEqUniTensor(BFUT1, BFUT1_loaded_char_load)); } + +/*=====test info===== +describe:test Transpose and Transpose_ for BlockFermionicUniTensor: + rowrank is updated, index order is reversed, bonds are redirected, + and element values are preserved without sign flips. +====================*/ +TEST_F(BlockFermionicUniTensorTest, Transpose) { + // BFUT1: rank=3, rowrank=2, bonds=[BD_IN(a), BD_IN(b), BD_OUT(c)], shape=(2,2,4) + EXPECT_EQ(BFUT1.rowrank(), 2); + + auto tmp = BFUT1.Transpose(); + + // rowrank must be rank - old_rowrank = 3 - 2 = 1 + EXPECT_EQ(tmp.rowrank(), 1); + EXPECT_EQ(tmp.rank(), 3); + + // index order is reversed: new [0,1,2] = old [c,b,a] + EXPECT_EQ(tmp.labels()[0], "c"); + EXPECT_EQ(tmp.labels()[1], "b"); + EXPECT_EQ(tmp.labels()[2], "a"); + + // bonds are redirected: old BD_OUT(c)->BD_IN, old BD_IN(b)->BD_OUT, old BD_IN(a)->BD_OUT + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + + // element at old {a,b,c} appears at new {c,b,a}; no sign flips + EXPECT_DOUBLE_EQ(double(tmp.at({0, 0, 0}).real()), 1.); + EXPECT_DOUBLE_EQ(double(tmp.at({1, 0, 0}).real()), 2.); + EXPECT_DOUBLE_EQ(double(tmp.at({2, 1, 0}).real()), 3.); + EXPECT_DOUBLE_EQ(double(tmp.at({3, 1, 0}).real()), 4.); + EXPECT_DOUBLE_EQ(double(tmp.at({2, 0, 1}).real()), 5.); + EXPECT_DOUBLE_EQ(double(tmp.at({3, 0, 1}).real()), 6.); + EXPECT_DOUBLE_EQ(double(tmp.at({0, 1, 1}).real()), 7.); + EXPECT_DOUBLE_EQ(double(tmp.at({1, 1, 1}).real()), 8.); + + // Transpose is an involution: T.Transpose().Transpose() == T + EXPECT_TRUE(AreEqUniTensor(tmp.Transpose(), BFUT1)); + + // in-place version must match + auto tmp2 = BFUT1.clone(); + tmp2.Transpose_(); + EXPECT_EQ(tmp2.rowrank(), 1); + EXPECT_EQ(tmp2.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp2.bonds()[1].type(), BD_OUT); + EXPECT_EQ(tmp2.bonds()[2].type(), BD_OUT); + EXPECT_TRUE(AreEqUniTensor(tmp2, tmp)); +} diff --git a/tests/BlockUniTensor_test.cpp b/tests/BlockUniTensor_test.cpp index 6631883e0..425d9f03d 100644 --- a/tests/BlockUniTensor_test.cpp +++ b/tests/BlockUniTensor_test.cpp @@ -52,22 +52,6 @@ TEST_F(BlockUniTensorTest, Init) { EXPECT_ANY_THROW(BkUt.Init({phy, phy}, {"a", "b"}, 1, Type.Float, Device.cpu, true, false)); } -/*=====test info===== -describe:write to disc -====================*/ -TEST_F(BlockUniTensorTest, SaveLoad) { - BUT1.Save(temp_file_path); - UniTensor BUT1_loaded = BUT1_loaded.Load(temp_file_path); - EXPECT_TRUE(AreEqUniTensor(BUT1, BUT1_loaded)); - // for char* - const char *fname = temp_file_path.c_str(); - BUT1.Save(fname); - UniTensor BUT1_loaded_char_save = BUT1_loaded_char_save.Load(temp_file_path); - EXPECT_TRUE(AreEqUniTensor(BUT1, BUT1_loaded_char_save)); - UniTensor BUT1_loaded_char_load = BUT1_loaded_char_load.Load(fname); - EXPECT_TRUE(AreEqUniTensor(BUT1, BUT1_loaded_char_load)); -} - TEST_F(BlockUniTensorTest, set_rowrank) { // Spf is a rank-3 tensor EXPECT_ANY_THROW(Spf.set_rowrank(-2)); // set_rowrank cannot be negative! @@ -120,19 +104,14 @@ TEST_F(BlockUniTensorTest, is_blockform) { } TEST_F(BlockUniTensorTest, clone) { UniTensor cloned = UT_pB_ans.clone(); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(cloned.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (cloned.at({i, j, k}).exists()) EXPECT_EQ(cloned.at({i, j, k}), UT_pB_ans.at({i, j, k})); } } -// Deprecated-function tests: suppress warnings so the compiler does not error -// on [[deprecated]] calls. These tests verify backward compatibility. -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" - TEST_F(BlockUniTensorTest, relabels) { BUT1 = BUT1.relabels({"a", "b", "cd", "d"}); EXPECT_EQ(BUT1.labels()[0], "a"); @@ -151,7 +130,6 @@ TEST_F(BlockUniTensorTest, relabels) { EXPECT_THROW(BUT1.relabels({"1", "2"}), std::logic_error); EXPECT_THROW(BUT1.relabels({"a", "b", "c", "d", "e"}), std::logic_error); } - TEST_F(BlockUniTensorTest, relabels_) { BUT1.relabels_({"a", "b", "cd", "d"}); EXPECT_EQ(BUT1.labels()[0], "a"); @@ -170,8 +148,6 @@ TEST_F(BlockUniTensorTest, relabels_) { EXPECT_THROW(BUT1.relabels_({"a", "b", "c", "d", "e"}), std::logic_error); } -#pragma GCC diagnostic pop - TEST_F(BlockUniTensorTest, relabel) { auto tmp = BUT1.clone(); BUT1 = BUT1.relabel({"a", "b", "cd", "d"}); @@ -285,9 +261,9 @@ TEST_F(BlockUniTensorTest, permute1) { // rank-3 tensor std::vector a = {1, 2, 0}; auto permuted = UT_permute_1.permute(a, -1); - for (size_t i = 0; i < 10; i++) - for (size_t j = 0; j < 6; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 i = 0; i < 10; i++) + for (cytnx_int64 j = 0; j < 6; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({i, j, k}).exists(), UT_permute_ans1.at({i, j, k}).exists()); if (permuted.at({i, j, k}).exists()) EXPECT_EQ(double(permuted.at({i, j, k}).real()), @@ -299,8 +275,8 @@ TEST_F(BlockUniTensorTest, permute2) { std::vector a = {1, 0}; auto permuted = UT_permute_2.permute(a, -1); - for (size_t j = 0; j < 10; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 j = 0; j < 10; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({j, k}).exists(), UT_permute_ans2.at({j, k}).exists()); if (permuted.at({j, k}).exists()) EXPECT_EQ(double(permuted.at({j, k}).real()), double(UT_permute_ans2.at({j, k}).real())); @@ -312,9 +288,9 @@ TEST_F(BlockUniTensorTest, permute_1) { std::vector a = {1, 2, 0}; auto permuted = UT_permute_1.clone(); permuted.permute_(a, -1); - for (size_t i = 0; i < 10; i++) - for (size_t j = 0; j < 6; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 i = 0; i < 10; i++) + for (cytnx_int64 j = 0; j < 6; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({i, j, k}).exists(), UT_permute_ans1.at({i, j, k}).exists()); if (permuted.at({i, j, k}).exists()) EXPECT_EQ(double(permuted.at({i, j, k}).real()), @@ -326,8 +302,8 @@ TEST_F(BlockUniTensorTest, permute_2) { std::vector a = {1, 0}; auto permuted = UT_permute_2.clone(); permuted.permute_(a, -1); - for (size_t j = 0; j < 10; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 j = 0; j < 10; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({j, k}).exists(), UT_permute_ans2.at({j, k}).exists()); if (permuted.at({j, k}).exists()) EXPECT_EQ(double(permuted.at({j, k}).real()), double(UT_permute_ans2.at({j, k}).real())); @@ -448,9 +424,9 @@ TEST_F(BlockUniTensorTest, put_block_byidx) { UT_pB.put_block(t1a, 1); UT_pB.put_block(t1b, 2); UT_pB.put_block(t2, 3); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -473,9 +449,9 @@ TEST_F(BlockUniTensorTest, put_block__byidx) { UT_pB.put_block_(t1a, 1); UT_pB.put_block_(t1b, 2); UT_pB.put_block_(t2, 3); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -498,9 +474,9 @@ TEST_F(BlockUniTensorTest, put_block_byqnum) { UT_pB.put_block(t1a, {0, 1, 1}, true); UT_pB.put_block(t1b, {1, 0, 1}, true); UT_pB.put_block(t2, {1, 1, 2}, true); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -523,9 +499,9 @@ TEST_F(BlockUniTensorTest, put_block__byqnum) { UT_pB.put_block_(t1a, {0, 1, 1}, true); UT_pB.put_block_(t1b, {1, 0, 1}, true); UT_pB.put_block_(t2, {1, 1, 2}, true); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -550,14 +526,14 @@ TEST_F(BlockUniTensorTest, reshape_) { EXPECT_ANY_THROW(Spf.reshape_({4, 1}, 1)) TEST_F(BlockUniTensorTest, contract1) { // two sparse matrix - UT_contract_L1.relabel_({"a", "b"}); - UT_contract_R1.relabel_({"b", "c"}); + UT_contract_L1.set_labels({"a", "b"}); + UT_contract_R1.set_labels({"b", "c"}); UniTensor out = UT_contract_L1.contract(UT_contract_R1); auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans1.get_blocks(); for (int i = 0; i < ansbks.size(); i++) { - std::cout << outbks[i] << std::endl; - std::cout << ansbks[i] << std::endl; + // std::cout << outbks[i] << std::endl; + // std::cout << ansbks[i] << std::endl; EXPECT_EQ(AreNearlyEqTensor(outbks[i], ansbks[i], 1e-5), true); } } @@ -565,14 +541,14 @@ TEST_F(BlockUniTensorTest, contract1) { TEST_F(BlockUniTensorTest, contract2) { // two sparse matrix with degeneracy - UT_contract_L2.relabel_({"a", "b"}); - UT_contract_R2.relabel_({"b", "c"}); + UT_contract_L2.set_labels({"a", "b"}); + UT_contract_R2.set_labels({"b", "c"}); UniTensor out = UT_contract_L2.contract(UT_contract_R2); auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans2.get_blocks(); for (int i = 0; i < ansbks.size(); i++) { - std::cout << outbks[i] << std::endl; - std::cout << ansbks[i] << std::endl; + // std::cout << outbks[i] << std::endl; + // std::cout << ansbks[i] << std::endl; EXPECT_EQ(AreNearlyEqTensor(outbks[i], ansbks[i], 1e-5), true); } } @@ -580,14 +556,14 @@ TEST_F(BlockUniTensorTest, contract2) { TEST_F(BlockUniTensorTest, contract3) { //// two 3 legs tensor - UT_contract_L3.relabel_({"a", "b", "c"}); - UT_contract_R3.relabel_({"c", "d", "e"}); + UT_contract_L3.set_labels({"a", "b", "c"}); + UT_contract_R3.set_labels({"c", "d", "e"}); UniTensor out = UT_contract_L3.contract(UT_contract_R3); auto outbks = out.get_blocks(); auto ansbks = UT_contract_ans3.get_blocks(); for (int i = 0; i < ansbks.size(); i++) { - std::cout << outbks[i] << std::endl; - std::cout << ansbks[i] << std::endl; + // std::cout << outbks[i] << std::endl; + // std::cout << ansbks[i] << std::endl; EXPECT_EQ(AreNearlyEqTensor(outbks[i], ansbks[i], 1e-5), true); } } @@ -622,51 +598,51 @@ TEST_F(BlockUniTensorTest, Add) { // } BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx"); auto out2 = BUT4.Add(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out2.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out2.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).real()), + double(BUTpT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).imag()), + double(BUTpT2.at({i, j, k, l}).imag())); } BUT4.Add_(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTpT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTpT2.at({i, j, k, l}).imag())); } } TEST_F(BlockUniTensorTest, Mul) { auto out = BUT4.Mul(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).real()), + double(BUTm9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).imag()), + double(BUTm9.at({i, j, k, l}).imag())); } BUT4.Mul_(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTm9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTm9.at({i, j, k, l}).imag())); } } @@ -693,51 +669,51 @@ TEST_F(BlockUniTensorTest, Sub) { // } BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx"); auto out2 = BUT4.Sub(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out2.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out2.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).real()), + double(BUTsT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).imag()), + double(BUTsT2.at({i, j, k, l}).imag())); } BUT4.Sub_(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTsT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTsT2.at({i, j, k, l}).imag())); } } TEST_F(BlockUniTensorTest, Div) { auto out = BUT4.Div(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).real()), + double(BUTd9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).imag()), + double(BUTd9.at({i, j, k, l}).imag())); } BUT4.Div_(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTd9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTd9.at({i, j, k, l}).imag())); } // BUT4 = BUT4.Load("OriginalBUT.cytnx"); @@ -792,7 +768,7 @@ TEST_F(BlockUniTensorTest, Norm) { cytnx_double tmp = double(UT_diag.Norm().at({0}).real()); cytnx_double ans = 0; - for (size_t i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag.bonds()[0]._impl->_degs[i]; for (int j = 0; j < deg; j++) ans += (i + 1) * (i + 1); } @@ -813,10 +789,10 @@ TEST_F(BlockUniTensorTest, Inv) { tmp.Inv_(clip); // test inline version EXPECT_TRUE(AreEqUniTensor(BUT4.Inv(clip), tmp)); tmp = BUT4.clone(); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 11; j++) - for (size_t k = 0; k < 3; k++) - for (size_t l = 0; l < 5; l++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { auto proxy = tmp.at({i, j, k, l}); if (proxy.exists()) { Scalar val = proxy; @@ -840,10 +816,10 @@ TEST_F(BlockUniTensorTest, Pow) { EXPECT_TRUE(AreEqUniTensor(BUT4.Pow(2.3), tmp)); for (double p = 0.; p < 1.6; p += 0.5) { tmp = BUT4.clone(); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 11; j++) - for (size_t k = 0; k < 3; k++) - for (size_t l = 0; l < 5; l++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { auto proxy = tmp.at({i, j, k, l}); if (proxy.exists()) { Scalar val = proxy; @@ -857,35 +833,35 @@ TEST_F(BlockUniTensorTest, Pow) { TEST_F(BlockUniTensorTest, Conj) { auto tmp = BUT4.Conj(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag())); + EXPECT_DOUBLE_EQ(double(tmp.at({i, j, k, l}).real()), + double(BUT4.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({i, j, k, l}).imag()), + -double(BUT4.at({i, j, k, l}).imag())); } tmp = BUT4.clone(); - BUT4.Conj_(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { + tmp.Conj_(); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(BUT4.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(tmp.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + -double(tmp.at({i, j, k, l}).imag())); } tmp = UT_diag_cplx.Conj(); - for (size_t i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).real()), double(UT_diag_cplx.get_block_(i).at({j}).real())); EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).imag()), @@ -895,29 +871,29 @@ TEST_F(BlockUniTensorTest, Conj) { } TEST_F(BlockUniTensorTest, Transpose) { - auto tmp = BUT1.Transpose(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[2].type(), BD_IN); - EXPECT_EQ(tmp.bonds()[3].type(), BD_IN); + auto tmp = BUT1.Transpose().set_name("BUT1.Transpose"); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[3].type(), BD_OUT); tmp = BUT5.Transpose(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_BRA); - EXPECT_EQ(tmp.bonds()[1].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[0].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[1].type(), BD_BRA); EXPECT_EQ(tmp.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(tmp.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); BUT1.Transpose_(); - EXPECT_EQ(BUT1.bonds()[0].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[1].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[2].type(), BD_IN); - EXPECT_EQ(BUT1.bonds()[3].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[0].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[1].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[2].type(), BD_OUT); + EXPECT_EQ(BUT1.bonds()[3].type(), BD_OUT); BUT5.Transpose_(); - EXPECT_EQ(BUT5.bonds()[0].type(), BD_BRA); - EXPECT_EQ(BUT5.bonds()[1].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[0].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[1].type(), BD_BRA); EXPECT_EQ(BUT5.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(BUT5.bonds()[1].qnums(), @@ -929,19 +905,17 @@ TEST_F(BlockUniTensorTest, Trace) { auto tmp = BUT4.Trace(0, 3); // std::cout<_degs[i]; for (int j = 0; j < deg; j++) ans += i + 1; } @@ -963,63 +937,90 @@ TEST_F(BlockUniTensorTest, Trace) { TEST_F(BlockUniTensorTest, Dagger) { auto tmp = BUT1.Dagger(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[2].type(), BD_IN); - EXPECT_EQ(tmp.bonds()[3].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[3].type(), BD_OUT); tmp = BUT5.Dagger(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_BRA); - EXPECT_EQ(tmp.bonds()[1].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[0].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[1].type(), BD_BRA); EXPECT_EQ(tmp.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(tmp.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); BUT1.Dagger_(); - EXPECT_EQ(BUT1.bonds()[0].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[1].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[2].type(), BD_IN); - EXPECT_EQ(BUT1.bonds()[3].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[0].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[1].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[2].type(), BD_OUT); + EXPECT_EQ(BUT1.bonds()[3].type(), BD_OUT); BUT5.Dagger_(); - EXPECT_EQ(BUT5.bonds()[0].type(), BD_BRA); - EXPECT_EQ(BUT5.bonds()[1].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[0].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[1].type(), BD_BRA); EXPECT_EQ(BUT5.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(BUT5.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); - tmp = BUT4.Dagger(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { + tmp = BUT4.Dagger().set_name("BUT4.Dagger"); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { + if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag())); + // std::cout << " BUT4(i=" << i << ", j=" << j << ", k=" << k << ", l=" << l + // << ") = " << double(BUT4.at({i, j, k, l}).real()) << " + i * " + // << double(BUT4.at({i, j, k, l}).imag()) << std::endl; + // std::cout << "BUT4.Dagger(k=" << k << ", l=" << l << ", i=" << i << ", j=" << j + // << ") = " << double(tmp.at({k, l, i, j}).real()) << " + i * " + // << double(tmp.at({k, l, i, j}).imag()) << std::endl; + EXPECT_DOUBLE_EQ(double(tmp.at({l, k, j, i}).real()), + double(BUT4.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({l, k, j, i}).imag()), + -double(BUT4.at({i, j, k, l}).imag())); + } else { + EXPECT_FALSE(tmp.at({l, k, j, i}).exists()); } + } tmp = BUT4.clone(); - BUT4.Dagger_(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { + tmp.Dagger_(); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { + if (BUT4.at({i, j, k, l}).exists()) { // EXPECT_TRUE(Scalar(BUT4.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(tmp.at({l, k, j, i}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + -double(tmp.at({l, k, j, i}).imag())); + } else { + EXPECT_FALSE(tmp.at({l, k, j, i}).exists()); } + } + + tmp = UT_pB.set_rowrank(2).Dagger().set_name("UT_pB.Dagger"); + EXPECT_EQ(tmp.rowrank(), 1); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 0; k < 30; k++) { + if (UT_pB.at({i, j, k}).exists()) { + EXPECT_DOUBLE_EQ(double(tmp.at({k, j, i}).real()), double(UT_pB.at({i, j, k}).real())); + } else { + EXPECT_FALSE(tmp.at({k, j, i}).exists()); + } + } tmp = UT_diag_cplx.Dagger(); - for (size_t i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag_cplx.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).real()), double(UT_diag_cplx.get_block_(i).at({j}).real())); EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).imag()), @@ -1029,25 +1030,24 @@ TEST_F(BlockUniTensorTest, Dagger) { } TEST_F(BlockUniTensorTest, elem_exist) { - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.elem_exists({i - 1, j - 1, k - 1, l - 1})) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.elem_exists({i, j, k, l})) { cytnx_int64 _a; std::vector _b; - ((BlockUniTensor *)BUT4._impl.get()) - ->_fx_locate_elem(_a, _b, {i - 1, j - 1, k - 1, l - 1}); + ((BlockUniTensor*)BUT4._impl.get())->_fx_locate_elem(_a, _b, {i, j, k, l}); std::vector qind = BUT4.get_qindices(_a); EXPECT_EQ(BUT4.bonds()[0].qnums()[qind[0]][0] - BUT4.bonds()[1].qnums()[qind[1]][0] + BUT4.bonds()[2].qnums()[qind[2]][0] - BUT4.bonds()[3].qnums()[qind[3]][0], 0); } - size_t offset = 0; - for (size_t i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { + cytnx_int64 offset = 0; + for (cytnx_int64 i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag_cplx.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_TRUE(UT_diag_cplx.elem_exists({offset + j, offset + j})); EXPECT_DOUBLE_EQ(double(UT_diag_cplx.at({offset + j, offset + j}).real()), double(i + 1)); EXPECT_DOUBLE_EQ(double(UT_diag_cplx.at({offset + j, offset + j}).imag()), double(i + 1)); diff --git a/tests/BlockUniTensor_test.h b/tests/BlockUniTensor_test.h index d25e382f0..7741a7191 100644 --- a/tests/BlockUniTensor_test.h +++ b/tests/BlockUniTensor_test.h @@ -16,17 +16,17 @@ class BlockUniTensorTest : public ::testing::Test { Bond B2 = Bond(BD_IN, {Qs(0), Qs(1)}, {3, 4}); Bond B3 = Bond(BD_OUT, {Qs(0) >> 2, Qs(1) >> 3}); Bond B4 = Bond(BD_OUT, {Qs(0), Qs(1)}, {1, 2}); - UniTensor BUT1 = UniTensor({B1, B2, B3, B4}); + UniTensor BUT1 = UniTensor({B1, B2, B3, B4}).set_name("BUT1"); Bond bd_sym_a = Bond(BD_KET, {{0, 2}, {3, 5}, {1, 6}, {4, 1}}, {4, 7, 2, 3}); Bond bd_sym_b = Bond(BD_BRA, {{0, 2}, {3, 5}, {1, 6}, {4, 1}}, {4, 7, 2, 3}); - UniTensor BUT2 = UniTensor({bd_sym_a, bd_sym_b}); + UniTensor BUT2 = UniTensor({bd_sym_a, bd_sym_b}).set_name("BUT2"); Bond bd_sym_c = Bond(BD_KET, {{0, 2}, {1, 5}, {1, 6}, {0, 1}}, {4, 7, 2, 3}, {Symmetry::Zn(2), Symmetry::U1()}); Bond bd_sym_d = Bond(BD_BRA, {{0, 2}, {1, 5}, {1, 6}, {0, 1}}, {4, 7, 2, 3}, {Symmetry::Zn(2), Symmetry::U1()}); - UniTensor BUT3 = UniTensor({bd_sym_c, bd_sym_d}); + UniTensor BUT3 = UniTensor({bd_sym_c, bd_sym_d}).set_name("BUT3"); Bond B1p = Bond(BD_IN, {Qs(-1), Qs(0), Qs(1)}, {2, 1, 2}); Bond B2p = Bond(BD_OUT, {Qs(-1), Qs(0), Qs(1)}, {4, 3, 4}); @@ -115,7 +115,7 @@ class BlockUniTensorTest : public ::testing::Test { protected: void SetUp() override { - BUT4 = UniTensor::Load(data_dir + "OriginalBUT.cytnx"); + BUT4 = UniTensor::Load(data_dir + "OriginalBUT.cytnx").set_name("BUT4"); BUT4_2 = UniTensor::Load(data_dir + "OriginalBUT2.cytnx"); BUconjT4 = UniTensor::Load(data_dir + "BUconjT.cytnx"); BUtrT4 = UniTensor::Load(data_dir + "BUtrT.cytnx"); diff --git a/tests/DenseUniTensor_test.cpp b/tests/DenseUniTensor_test.cpp index 2653edc91..42e92775d 100644 --- a/tests/DenseUniTensor_test.cpp +++ b/tests/DenseUniTensor_test.cpp @@ -4096,7 +4096,7 @@ TEST_F(DenseUniTensorTest, Conj_utuninit) { } /*=====test info===== -describe:test Trnaspose +describe:test Transpose ====================*/ TEST_F(DenseUniTensorTest, Transpose) { auto row_rank = 2u; @@ -4110,16 +4110,16 @@ TEST_F(DenseUniTensorTest, Transpose) { for (size_t i = 0; i < ut_t.rank(); i++) { EXPECT_EQ(ut_t.bonds()[i].type(), BD_REG); } - // a, b; c -> c;a, b + // a, b; c -> c; a, b EXPECT_EQ(ut.labels(), std::vector({"a", "b", "c"})); - EXPECT_EQ(ut_t.labels(), std::vector({"c", "a", "b"})); + EXPECT_EQ(ut_t.labels(), std::vector({"c", "b", "a"})); EXPECT_EQ(ut.rowrank(), row_rank); EXPECT_EQ(ut_t.rowrank(), ut_t.rank() - row_rank); auto shape = ut.shape(); for (cytnx_uint64 i = 0; i < shape[0]; i++) { for (cytnx_uint64 j = 0; j < shape[1]; j++) { for (cytnx_uint64 k = 0; k < shape[2]; k++) { - EXPECT_EQ(ut.at({i, j, k}), ut_t.at({k, i, j})); + EXPECT_EQ(ut.at({i, j, k}), ut_t.at({k, j, i})); } } } @@ -4127,7 +4127,7 @@ TEST_F(DenseUniTensorTest, Transpose) { } /*=====test info===== -describe:test Trnaspose with diagonal UniTensor +describe:test Transpose with diagonal UniTensor ====================*/ TEST_F(DenseUniTensorTest, Transpose_diag) { auto row_rank = 1u; @@ -4144,7 +4144,7 @@ TEST_F(DenseUniTensorTest, Transpose_diag) { for (size_t i = 0; i < ut_t.rank(); i++) { EXPECT_EQ(ut_t.bonds()[i].type(), BD_REG); } - // a, b; c -> c;a, b + // a; b -> b; a EXPECT_EQ(ut_diag.labels(), std::vector({"a", "b"})); EXPECT_EQ(ut_t.labels(), std::vector({"b", "a"})); EXPECT_EQ(ut_diag.rowrank(), row_rank); @@ -4157,7 +4157,40 @@ TEST_F(DenseUniTensorTest, Transpose_diag) { } /*=====test info===== -describe:test Trnaspose_ +describe:test Transpose with tagged UniTensor +====================*/ +TEST_F(DenseUniTensorTest, Transpose_tagged) { + auto Spcd_t = Spcd.Transpose(); + // test tag, rowrank, rank + EXPECT_TRUE(Spcd_t.is_tag()); + EXPECT_EQ(Spcd.rowrank(), 1); + EXPECT_EQ(Spcd_t.rowrank(), 2); + EXPECT_EQ(Spcd_t.rank(), 3); + // test bond types + std::vector bonds_t = Spcd_t.bonds(); + EXPECT_EQ(bonds_t[0].type(), BD_OUT); + EXPECT_EQ(bonds_t[1].type(), BD_IN); + EXPECT_EQ(bonds_t[2].type(), BD_OUT); + // test labels + std::vector labels = Spcd.labels(); + std::vector labels_t = Spcd_t.labels(); + EXPECT_EQ(labels_t[0], labels[2]); + EXPECT_EQ(labels_t[1], labels[1]); + EXPECT_EQ(labels_t[2], labels[0]); + // test shape + auto shape = Spcd.shape(); + auto shape_t = Spcd_t.shape(); + EXPECT_EQ(shape_t[0], shape[2]); + EXPECT_EQ(shape_t[1], shape[1]); + EXPECT_EQ(shape_t[2], shape[0]); + // test tensors + EXPECT_TRUE(AreEqUniTensor(Spcd_t.Transpose(), Spcd)); + auto Spcd_p = Spcd_t.permute(Spcd.labels()); + EXPECT_TRUE(AreEqUniTensor(Spcd_p, Spcd)); +} + +/*=====test info===== +describe:test Transpose_ ====================*/ TEST_F(DenseUniTensorTest, Transpose_) { auto row_rank = 2u; @@ -4173,7 +4206,7 @@ TEST_F(DenseUniTensorTest, Transpose_) { } /*=====test info===== -describe:test Trnaspose with uninitialized UniTensor +describe:test Transpose with uninitialized UniTensor ====================*/ TEST_F(DenseUniTensorTest, Transpose_uninit) { EXPECT_ANY_THROW(ut_uninit.Transpose()); @@ -4348,29 +4381,29 @@ TEST_F(DenseUniTensorTest, Dagger) { EXPECT_EQ(utzero3456.bonds()[3].type(), BD_REG); tmp = utarcomplex3456.Dagger(); - for (size_t i = 1; i <= 3; i++) - for (size_t j = 1; j <= 4; j++) - for (size_t k = 1; k <= 5; k++) - for (size_t l = 1; l <= 6; l++) - if (utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (size_t i = 0; i < 3; i++) + for (size_t j = 0; j < 4; j++) + for (size_t k = 0; k < 5; k++) + for (size_t l = 0; l < 6; l++) + if (utarcomplex3456.at({i, j, k, l}).exists()) { + EXPECT_TRUE(tmp.at({l, k, j, i}).exists()); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).real()), + double(tmp.at({l, k, j, i}).real())); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).imag()), + -double(tmp.at({l, k, j, i}).imag())); } tmp = utarcomplex3456.clone(); - utarcomplex3456.Dagger_(); - for (size_t i = 1; i <= 3; i++) - for (size_t j = 1; j <= 4; j++) - for (size_t k = 1; k <= 5; k++) - for (size_t l = 1; l <= 6; l++) - if (utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(utarcomplex3456.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + tmp.Dagger_(); + for (size_t i = 0; i < 3; i++) + for (size_t j = 0; j < 4; j++) + for (size_t k = 0; k < 5; k++) + for (size_t l = 0; l < 6; l++) + if (utarcomplex3456.at({i, j, k, l}).exists()) { + EXPECT_TRUE(tmp.at({l, k, j, i}).exists()); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).real()), + double(tmp.at({l, k, j, i}).real())); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).imag()), + -double(tmp.at({l, k, j, i}).imag())); } } /*=====test info===== diff --git a/tests/gpu/BlockUniTensor_test.cpp b/tests/gpu/BlockUniTensor_test.cpp index 165630f73..8e3c460fa 100644 --- a/tests/gpu/BlockUniTensor_test.cpp +++ b/tests/gpu/BlockUniTensor_test.cpp @@ -6,19 +6,17 @@ TEST_F(BlockUniTensorTest, gpu_Trace) { auto tmp = BUT4.Trace(0, 3); // std::cout<_degs[i]; for (int j = 0; j < deg; j++) ans += i + 1; } @@ -193,7 +191,7 @@ TEST_F(BlockUniTensorTest, gpu_Norm) { cytnx_double tmp = double(UT_diag.Norm().at({0}).real()); cytnx_double ans = 0; - for (size_t i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag.bonds()[0]._impl->_degs[i]; for (int j = 0; j < deg; j++) ans += (i + 1) * (i + 1); } @@ -235,35 +233,35 @@ TEST_F(BlockUniTensorTest, gpu_Pow) { TEST_F(BlockUniTensorTest, gpu_Conj) { auto tmp = BUT4.Conj(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + // EXPECT_TRUE(Scalar(tmp.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); + EXPECT_DOUBLE_EQ(double(tmp.at({i, j, k, l}).real()), + double(BUT4.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({i, j, k, l}).imag()), + -double(BUT4.at({i, j, k, l}).imag())); } tmp = BUT4.clone(); BUT4.Conj_(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(BUT4.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + // EXPECT_TRUE(Scalar(BUT4.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(tmp.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + -double(tmp.at({i, j, k, l}).imag())); } tmp = UT_diag_cplx.Conj(); - for (size_t i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).real()), double(UT_diag_cplx.get_block_(i).at({j}).real())); EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).imag()), @@ -274,28 +272,28 @@ TEST_F(BlockUniTensorTest, gpu_Conj) { TEST_F(BlockUniTensorTest, gpu_Transpose) { auto tmp = BUT1.Transpose(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[2].type(), BD_IN); - EXPECT_EQ(tmp.bonds()[3].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[3].type(), BD_OUT); tmp = BUT5.Transpose(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_BRA); - EXPECT_EQ(tmp.bonds()[1].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[0].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[1].type(), BD_BRA); EXPECT_EQ(tmp.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(tmp.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); BUT1.Transpose_(); - EXPECT_EQ(BUT1.bonds()[0].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[1].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[2].type(), BD_IN); - EXPECT_EQ(BUT1.bonds()[3].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[0].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[1].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[2].type(), BD_OUT); + EXPECT_EQ(BUT1.bonds()[3].type(), BD_OUT); BUT5.Transpose_(); - EXPECT_EQ(BUT5.bonds()[0].type(), BD_BRA); - EXPECT_EQ(BUT5.bonds()[1].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[0].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[1].type(), BD_BRA); EXPECT_EQ(BUT5.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(BUT5.bonds()[1].qnums(), @@ -304,63 +302,70 @@ TEST_F(BlockUniTensorTest, gpu_Transpose) { TEST_F(BlockUniTensorTest, gpu_Dagger) { auto tmp = BUT1.Dagger(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[1].type(), BD_OUT); - EXPECT_EQ(tmp.bonds()[2].type(), BD_IN); - EXPECT_EQ(tmp.bonds()[3].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[0].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[1].type(), BD_IN); + EXPECT_EQ(tmp.bonds()[2].type(), BD_OUT); + EXPECT_EQ(tmp.bonds()[3].type(), BD_OUT); tmp = BUT5.Dagger(); - EXPECT_EQ(tmp.bonds()[0].type(), BD_BRA); - EXPECT_EQ(tmp.bonds()[1].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[0].type(), BD_KET); + EXPECT_EQ(tmp.bonds()[1].type(), BD_BRA); EXPECT_EQ(tmp.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(tmp.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); BUT1.Dagger_(); - EXPECT_EQ(BUT1.bonds()[0].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[1].type(), BD_OUT); - EXPECT_EQ(BUT1.bonds()[2].type(), BD_IN); - EXPECT_EQ(BUT1.bonds()[3].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[0].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[1].type(), BD_IN); + EXPECT_EQ(BUT1.bonds()[2].type(), BD_OUT); + EXPECT_EQ(BUT1.bonds()[3].type(), BD_OUT); BUT5.Dagger_(); - EXPECT_EQ(BUT5.bonds()[0].type(), BD_BRA); - EXPECT_EQ(BUT5.bonds()[1].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[0].type(), BD_KET); + EXPECT_EQ(BUT5.bonds()[1].type(), BD_BRA); EXPECT_EQ(BUT5.bonds()[0].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); EXPECT_EQ(BUT5.bonds()[1].qnums(), std::vector>({{0, 2}, {1, 5}, {1, 6}, {0, 1}})); tmp = BUT4.Dagger(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { + if (BUT4.at({i, j, k, l}).exists()) { + // EXPECT_TRUE(Scalar(tmp.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); + EXPECT_DOUBLE_EQ(double(tmp.at({l, k, j, i}).real()), + double(BUT4.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(tmp.at({l, k, j, i}).imag()), + -double(BUT4.at({i, j, k, l}).imag())); + } else { + EXPECT_FALSE(tmp.at({l, k, j, i}).exists()); } + } + tmp = BUT4.clone(); - BUT4.Dagger_(); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(BUT4.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + tmp.Dagger_(); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) { + if (BUT4.at({i, j, k, l}).exists()) { + // EXPECT_TRUE(Scalar(BUT4.at({i, j, k, l})-BUconjT4.at({i, j, k, l})).abs()<1e-5); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(tmp.at({l, k, j, i}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + -double(tmp.at({l, k, j, i}).imag())); + } else { + EXPECT_FALSE(tmp.at({l, k, j, i}).exists()); } + } tmp = UT_diag_cplx.Dagger(); - for (size_t i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { + for (cytnx_int64 i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag_cplx.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).real()), double(UT_diag_cplx.get_block_(i).at({j}).real())); EXPECT_DOUBLE_EQ(double(tmp.get_block_(i).at({j}).imag()), @@ -396,25 +401,24 @@ TEST_F(BlockUniTensorTest, gpu_truncate) { } TEST_F(BlockUniTensorTest, gpu_elem_exist) { - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.elem_exists({i - 1, j - 1, k - 1, l - 1})) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.elem_exists({i, j, k, l})) { cytnx_int64 _a; std::vector _b; - ((BlockUniTensor*)BUT4._impl.get()) - ->_fx_locate_elem(_a, _b, {i - 1, j - 1, k - 1, l - 1}); + ((BlockUniTensor*)BUT4._impl.get())->_fx_locate_elem(_a, _b, {i, j, k, l}); std::vector qind = BUT4.get_qindices(_a); EXPECT_EQ(BUT4.bonds()[0].qnums()[qind[0]][0] - BUT4.bonds()[1].qnums()[qind[1]][0] + BUT4.bonds()[2].qnums()[qind[2]][0] - BUT4.bonds()[3].qnums()[qind[3]][0], 0); } - size_t offset = 0; - for (size_t i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { + cytnx_int64 offset = 0; + for (cytnx_int64 i = 0; i < UT_diag_cplx.bonds()[0].qnums().size(); i++) { cytnx_uint64 deg = UT_diag_cplx.bonds()[0]._impl->_degs[i]; - for (size_t j = 0; j < deg; j++) { + for (cytnx_int64 j = 0; j < deg; j++) { EXPECT_TRUE(UT_diag_cplx.elem_exists({offset + j, offset + j})); EXPECT_DOUBLE_EQ(double(UT_diag_cplx.at({offset + j, offset + j}).real()), double(i + 1)); EXPECT_DOUBLE_EQ(double(UT_diag_cplx.at({offset + j, offset + j}).imag()), double(i + 1)); @@ -622,9 +626,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block__byidx) { UT_pB.put_block_(t1a, 1); UT_pB.put_block_(t1b, 2); UT_pB.put_block_(t2, 3); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -647,9 +651,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block_byidx) { UT_pB.put_block(t1a, 1); UT_pB.put_block(t1b, 2); UT_pB.put_block(t2, 3); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -672,9 +676,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block__byqnum) { UT_pB.put_block_(t1a, {0, 1, 1}, true); UT_pB.put_block_(t1b, {1, 0, 1}, true); UT_pB.put_block_(t2, {1, 1, 2}, true); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -697,9 +701,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block_byqnum) { UT_pB.put_block(t1a, {0, 1, 1}, true); UT_pB.put_block(t1b, {1, 0, 1}, true); UT_pB.put_block(t2, {1, 1, 2}, true); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(UT_pB.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (UT_pB.at({i, j, k}).exists()) EXPECT_EQ(UT_pB.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -719,9 +723,9 @@ TEST_F(BlockUniTensorTest, gpu_put_block_byqnum) { TEST_F(BlockUniTensorTest, gpu_clone) { UniTensor cloned = UT_pB_ans.clone(); - for (size_t i = 0; i < 5; i++) - for (size_t j = 0; j < 9; j++) - for (size_t k = 1; k < 30; k++) { + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 9; j++) + for (cytnx_int64 k = 1; k < 30; k++) { EXPECT_EQ(cloned.at({i, j, k}).exists(), UT_pB_ans.at({i, j, k}).exists()); if (cloned.at({i, j, k}).exists()) EXPECT_EQ(cloned.at({i, j, k}), UT_pB_ans.at({i, j, k})); } @@ -731,9 +735,9 @@ TEST_F(BlockUniTensorTest, gpu_permute1) { // rank-3 tensor std::vector a = {1, 2, 0}; auto permuted = UT_permute_1.permute(a, -1); - for (size_t i = 0; i < 10; i++) - for (size_t j = 0; j < 6; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 i = 0; i < 10; i++) + for (cytnx_int64 j = 0; j < 6; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({i, j, k}).exists(), UT_permute_ans1.at({i, j, k}).exists()); if (permuted.at({i, j, k}).exists()) EXPECT_EQ(double(permuted.at({i, j, k}).real()), @@ -745,8 +749,8 @@ TEST_F(BlockUniTensorTest, gpu_permute2) { std::vector a = {1, 0}; auto permuted = UT_permute_2.permute(a, -1); - for (size_t j = 0; j < 10; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 j = 0; j < 10; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({j, k}).exists(), UT_permute_ans2.at({j, k}).exists()); if (permuted.at({j, k}).exists()) EXPECT_EQ(double(permuted.at({j, k}).real()), double(UT_permute_ans2.at({j, k}).real())); @@ -758,9 +762,9 @@ TEST_F(BlockUniTensorTest, gpu_permute_1) { std::vector a = {1, 2, 0}; auto permuted = UT_permute_1.clone(); permuted.permute_(a, -1); - for (size_t i = 0; i < 10; i++) - for (size_t j = 0; j < 6; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 i = 0; i < 10; i++) + for (cytnx_int64 j = 0; j < 6; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({i, j, k}).exists(), UT_permute_ans1.at({i, j, k}).exists()); if (permuted.at({i, j, k}).exists()) EXPECT_EQ(double(permuted.at({i, j, k}).real()), @@ -772,8 +776,8 @@ TEST_F(BlockUniTensorTest, gpu_permute_2) { std::vector a = {1, 0}; auto permuted = UT_permute_2.clone(); permuted.permute_(a, -1); - for (size_t j = 0; j < 10; j++) - for (size_t k = 0; k < 10; k++) { + for (cytnx_int64 j = 0; j < 10; j++) + for (cytnx_int64 k = 0; k < 10; k++) { EXPECT_EQ(permuted.at({j, k}).exists(), UT_permute_ans2.at({j, k}).exists()); if (permuted.at({j, k}).exists()) EXPECT_EQ(double(permuted.at({j, k}).real()), double(UT_permute_ans2.at({j, k}).real())); @@ -839,26 +843,26 @@ TEST_F(BlockUniTensorTest, gpu_Add) { // } BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx").to(cytnx::Device.cuda); auto out2 = BUT4.Add(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out2.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out2.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).real()), + double(BUTpT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).imag()), + double(BUTpT2.at({i, j, k, l}).imag())); } BUT4.Add_(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTpT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTpT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTpT2.at({i, j, k, l}).imag())); } } @@ -885,76 +889,76 @@ TEST_F(BlockUniTensorTest, gpu_Sub) { // } BUT4 = BUT4.Load(data_dir + "OriginalBUT.cytnx").to(cytnx::Device.cuda); auto out2 = BUT4.Sub(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out2.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out2.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out2.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).real()), + double(BUTsT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out2.at({i, j, k, l}).imag()), + double(BUTsT2.at({i, j, k, l}).imag())); } BUT4.Sub_(BUT4_2); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTsT2.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTsT2.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTsT2.at({i, j, k, l}).imag())); } } TEST_F(BlockUniTensorTest, gpu_Mul) { auto out = BUT4.Mul(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).real()), + double(BUTm9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).imag()), + double(BUTm9.at({i, j, k, l}).imag())); } BUT4.Mul_(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTm9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTm9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTm9.at({i, j, k, l}).imag())); } } TEST_F(BlockUniTensorTest, gpu_Div) { auto out = BUT4.Div(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (out.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(out.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (out.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).real()), + double(BUTd9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(out.at({i, j, k, l}).imag()), + double(BUTd9.at({i, j, k, l}).imag())); } BUT4.Div_(9); - for (size_t i = 1; i <= 5; i++) - for (size_t j = 1; j <= 11; j++) - for (size_t k = 1; k <= 3; k++) - for (size_t l = 1; l <= 5; l++) - if (BUT4.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(BUT4.at({i - 1, j - 1, k - 1, l - 1}).imag()), - double(BUTd9.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (cytnx_int64 i = 0; i < 5; i++) + for (cytnx_int64 j = 0; j < 11; j++) + for (cytnx_int64 k = 0; k < 3; k++) + for (cytnx_int64 l = 0; l < 5; l++) + if (BUT4.at({i, j, k, l}).exists()) { + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).real()), + double(BUTd9.at({i, j, k, l}).real())); + EXPECT_DOUBLE_EQ(double(BUT4.at({i, j, k, l}).imag()), + double(BUTd9.at({i, j, k, l}).imag())); } // BUT4 = BUT4.Load("OriginalBUT.cytnx"); diff --git a/tests/gpu/DenseUniTensor_test.cpp b/tests/gpu/DenseUniTensor_test.cpp index c562c051d..5c5e55132 100644 --- a/tests/gpu/DenseUniTensor_test.cpp +++ b/tests/gpu/DenseUniTensor_test.cpp @@ -211,29 +211,29 @@ TEST_F(DenseUniTensorTest, gpu_Dagger) { EXPECT_EQ(utzero3456.bonds()[3].type(), BD_REG); tmp = utarcomplex3456.Dagger(); - for (size_t i = 1; i <= 3; i++) - for (size_t j = 1; j <= 4; j++) - for (size_t k = 1; k <= 5; k++) - for (size_t l = 1; l <= 6; l++) - if (utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(tmp.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).imag())); + for (size_t i = 0; i < 3; i++) + for (size_t j = 0; j < 4; j++) + for (size_t k = 0; k < 5; k++) + for (size_t l = 0; l < 6; l++) + if (utarcomplex3456.at({i, j, k, l}).exists()) { + EXPECT_TRUE(tmp.at({l, k, j, i}).exists()); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).real()), + double(tmp.at({l, k, j, i}).real())); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).imag()), + -double(tmp.at({l, k, j, i}).imag())); } tmp = utarcomplex3456.clone(); - utarcomplex3456.Dagger_(); - for (size_t i = 1; i <= 3; i++) - for (size_t j = 1; j <= 4; j++) - for (size_t k = 1; k <= 5; k++) - for (size_t l = 1; l <= 6; l++) - if (utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).exists()) { - // EXPECT_TRUE(Scalar(utarcomplex3456.at({i-1,j-1,k-1,l-1})-BUconjT4.at({i-1,j-1,k-1,l-1})).abs()<1e-5); - EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).real()), - double(tmp.at({i - 1, j - 1, k - 1, l - 1}).real())); - EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i - 1, j - 1, k - 1, l - 1}).imag()), - -double(tmp.at({i - 1, j - 1, k - 1, l - 1}).imag())); + tmp.Dagger_(); + for (size_t i = 0; i < 3; i++) + for (size_t j = 0; j < 4; j++) + for (size_t k = 0; k < 5; k++) + for (size_t l = 0; l < 6; l++) + if (utarcomplex3456.at({i, j, k, l}).exists()) { + EXPECT_TRUE(tmp.at({l, k, j, i}).exists()); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).real()), + double(tmp.at({l, k, j, i}).real())); + EXPECT_DOUBLE_EQ(double(utarcomplex3456.at({i, j, k, l}).imag()), + -double(tmp.at({l, k, j, i}).imag())); } }