diff --git a/CMakeLists.txt b/CMakeLists.txt index 4d91a9d3b..4b9d70dbb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -497,7 +497,7 @@ endif() # ##################################################################### # Final Information Messages # ##################################################################### -message(STATUS "|= Final FLAGS infomation for install >>>>> ") +message(STATUS "|= Final FLAGS information for install >>>>> ") message(STATUS " CXX Compiler: ${CMAKE_CXX_COMPILER}") message(STATUS " CXX Flags: ${CMAKE_CXX_FLAGS}") message(STATUS " BLAS and LAPACK Libraries: ${LAPACK_LIBRARIES}") diff --git a/include/UniTensor.hpp b/include/UniTensor.hpp index f67b97cd2..e7b32a04c 100644 --- a/include/UniTensor.hpp +++ b/include/UniTensor.hpp @@ -257,9 +257,13 @@ namespace cytnx { const cytnx_int64 &rowrank = -1); virtual void permute_nosignflip_(const std::vector &mapper, const cytnx_int64 &rowrank = -1); - // virtual void permute_(const std::vector &mapper, const cytnx_int64 &rowrank = // -1); + + virtual void twist_(const cytnx_int64 &idx); + virtual void twist_(const std::string label); + virtual void fermion_twists_(); + virtual boost::intrusive_ptr contiguous_(); virtual boost::intrusive_ptr contiguous(); virtual boost::intrusive_ptr apply_(); @@ -553,6 +557,20 @@ namespace cytnx { void permute_(const std::vector &mapper, const cytnx_int64 &rowrank = -1); void permute_(const std::vector &mapper, const cytnx_int64 &rowrank = -1); + void twist_(const cytnx_int64 &idx) override { + // do nothing for bosonic UniTensor + return; + } + void twist_(const std::string label) override { + // do nothing for bosonic UniTensor + return; + } + + void fermion_twists_() override { + // do nothing for bosonic UniTensor + return; + } + boost::intrusive_ptr relabel(const std::vector &new_labels); boost::intrusive_ptr relabels(const std::vector &new_labels); @@ -1381,6 +1399,19 @@ namespace cytnx { void permute_(const std::vector &mapper, const cytnx_int64 &rowrank = -1); void permute_(const std::vector &mapper, const cytnx_int64 &rowrank = -1); + void twist_(const cytnx_int64 &idx) override { + // do nothing for bosonic UniTensor + return; + } + void fermion_twists_() override { + // do nothing for bosonic UniTensor + return; + } + void twist_(const std::string label) override { + // do nothing for bosonic UniTensor + return; + } + boost::intrusive_ptr contiguous_() { for (unsigned int b = 0; b < this->_blocks.size(); b++) this->_blocks[b].contiguous_(); return boost::intrusive_ptr(this); @@ -2136,17 +2167,21 @@ namespace cytnx { boost::intrusive_ptr permute(const std::vector &mapper, const cytnx_int64 &rowrank = -1); - void permute_(const std::vector &mapper, const cytnx_int64 &rowrank = -1); - void permute_(const std::vector &mapper, const cytnx_int64 &rowrank = -1); + void permute_(const std::vector &mapper, const cytnx_int64 &rowrank = -1) override; + void permute_(const std::vector &mapper, const cytnx_int64 &rowrank = -1) override; - boost::intrusive_ptr permute_nosignflip(const std::vector &mapper, - const cytnx_int64 &rowrank = -1); - boost::intrusive_ptr permute_nosignflip(const std::vector &mapper, - const cytnx_int64 &rowrank = -1); + boost::intrusive_ptr permute_nosignflip( + const std::vector &mapper, const cytnx_int64 &rowrank = -1) override; + boost::intrusive_ptr permute_nosignflip( + const std::vector &mapper, const cytnx_int64 &rowrank = -1) override; void permute_nosignflip_(const std::vector &mapper, - const cytnx_int64 &rowrank = -1); + const cytnx_int64 &rowrank = -1) override; void permute_nosignflip_(const std::vector &mapper, - const cytnx_int64 &rowrank = -1); + const cytnx_int64 &rowrank = -1) override; + + void twist_(const cytnx_int64 &idx) override; + void twist_(const std::string label) override; + void fermion_twists_() override; // Helper function; implements the sign flips when permuting indices std::vector _swapsigns_(const std::vector &mapper) const; @@ -3128,7 +3163,7 @@ namespace cytnx { * @details Length is the number of blocks in the UniTensor. If the return is true, the sign of * the elements of the corresponding block needs to be flipped. * @warning This is an inline version which returns a reference. Changes to the reference affect - * the tensor! + * the UniTensor! * @return std::vector & */ std::vector &signflip_() { return this->_impl->signflip_(); } @@ -3521,7 +3556,7 @@ namespace cytnx { } /** - @brief Return a new UniTensor that cast to different data type. + @brief Return a new UniTensor whose elements are casted to a different data type. @param[in] new_type the new data type. It an be any type defined in cytnx::Type. @return UniTensor @attention If the \p new_type is same as dtype of the original UniTensor, return self. @@ -3612,10 +3647,10 @@ namespace cytnx { * usually not intended, since fermionic permutations create sign flips! This should typically * only be used to compare tensors in different sign conventions with each other. * @param[in] mapper the mapper of the permutation. This mapper is mapped by bond index if - * \p by_label is false, otherwise it is mapped by bond label. * @param[in] rowrank the new rowrank after the permutation * @return UniTensor - * @warning \p by_label will be deprecated! + * @warning Usually, the signs should change when permuting a fermionic UniTensor. Use permute + * instead, unless you are really sure you want to permute without signflips! */ UniTensor permute_nosignflip(const std::vector &mapper, const cytnx_int64 &rowrank = -1) const { @@ -3632,6 +3667,8 @@ namespace cytnx { * @param[in] mapper the mapper by babels * @param[in] rowrank the row rank * @return UniTensor + * @warning Usually, the signs should change when permuting a fermionic UniTensor. Use permute_ + * instead, unless you are really sure you want to permute without signflips! */ UniTensor permute_nosignflip(const std::vector &mapper, const cytnx_int64 &rowrank = -1) const { @@ -3663,7 +3700,8 @@ namespace cytnx { permute_(const std::vector &mapper, const cytnx_int64 &rowrank = -1) @param[in] mapper the mapper by labels @param[in] rowrank the row rank after the permutation - @warning \p by_label will be deprecated! + @warning Usually, the signs should change when permuting a fermionic UniTensor. Use permute_ + instead, unless you are really sure you want to permute without signflips! */ UniTensor &permute_nosignflip_(const std::vector &mapper, const cytnx_int64 &rowrank = -1) { @@ -3679,6 +3717,8 @@ namespace cytnx { @param[in] mapper the mapper by labels @param[in] rowrank the row rank after the permutation @see permute(const std::vector &mapper, const cytnx_int64 &rowrank = -1) + @warning Usually, the signs should change when permuting a fermionic UniTensor. Use permute + instead, unless you are really sure you want to permute without signflips! */ UniTensor &permute_nosignflip_(const std::vector &mapper, const cytnx_int64 &rowrank = -1) { @@ -3699,6 +3739,80 @@ namespace cytnx { // this->_impl->permute_(mapper, rowrank); // } + /** + @brief Apply a twist (or braids/self-swap) operation to a given bond; No effect for bosonic + tensors; for a fermionic tensor, this means that a signflip occurs for all blocks where the + bond has odd fermion parity + @param[in] label bond label on which the twist shall be applied + @note This always applies the twist to the bond, ignoring its direction or weather they are + incoming or outgoing bonds. + */ + UniTensor twist(const std::string label) const { + UniTensor out = this->clone(); + out._impl->twist_(label); + return out; + } + /** + @brief Apply a twist (or braids/self-swap) operation to a given bond; No effect for bosonic + tensors; for a fermionic tensor, this means that a signflip occurs for all blocks where the + bond has odd fermion parity + @param[in] idx bond index on which the twist shall be applied + @note This always applies the twist to the bond, ignoring its direction or weather they are + incoming or outgoing bonds. + */ + UniTensor twist(const cytnx_int64 &idx) const { + UniTensor out = this->clone(); + out._impl->twist_(idx); + return out; + } + /** + @brief Inline version + @param[in] label bond label on which the twist shall be applied + @see twist(const std::string label) + */ + UniTensor &twist_(const std::string label) { + this->_impl->twist_(label); + return *this; + } + /** + @brief Inline version + @param[in] idx bond index on which the twist shall be applied + @see twist(const cytnx_int64 &idx) + */ + UniTensor &twist_(const cytnx_int64 &idx) { + this->_impl->twist_(idx); + return *this; + } + + /** + @brief Apply twists to all bra bonds with type BD_KET + @details For bosonic tensors, nothing changes. For fermions, this makes sure that bra- and + ket-states can be contracted correctly. For example, a scalar product between ket states A + and B represented by fermionic tensors with incoming and outgoing legs, can be calculated + correctly by contract(Adag.fermion_twists_(), B). This example asumes that Adag is the dagger of + A and has rowrank=0, while B has maximal rowrank. Applying fermion_twists_ to B would have no + effect in this case and is thus safe to do. Similarly, this can be applied to linear operators. + If M is an operator, then its first rowrank indices correspond to ket bonds and are not affected + by fermion_twists_. For the remaining bonds, a twist is applied if they are not of type BD_BRA. + The combination of this method to bra states, ket states and linear operators in a Hilbert + space, together with contractions, ensures that scalar products can be executed as desired. + @warning The rowrank must be set correctly before applying this method. + */ + UniTensor fermion_twists() const { + UniTensor out = this->clone(); + out._impl->fermion_twists_(); + return out; + } + /** + @brief Inline version + @see fermion_twists() + @warning The rowrank must be set correctly before applying this method. + */ + UniTensor &fermion_twists_() { + this->_impl->fermion_twists_(); + return *this; + } + /** @brief Make the UniTensor contiguous by coalescing the memory (storage). @see contiguous_() @@ -3753,13 +3867,13 @@ namespace cytnx { } /** - @brief Print all of the blocks in the UniTensor. + @brief Print all blocks of the UniTensor. @param[in] full_info whether need to print the full information of the blocks */ void print_blocks(const bool &full_info = true) const { this->_impl->print_blocks(full_info); } /** - @brief Given a index and print out the corresponding block of the UniTensor. + @brief Print out the block of the UniTensor with a given block index number. @param[in] idx the input index @param[in] full_info whether need to print the full information of the block */ @@ -3785,9 +3899,12 @@ namespace cytnx { } /** - @brief Get an element at specific location. + @brief Get an element at a specific location. @param[in] locator the location of the element we want to access. @note this API is only for C++. + @warning For fermions, the signflip is not included and has to be multiplied by the user! + The reason behind this is that several UniTensors in different permutations can share the same + memory. Use signflip() to get the sign structure for each block. */ template T &at(const std::vector &locator) { @@ -3811,9 +3928,12 @@ namespace cytnx { } /** - @brief Get an element at specific location. + @brief Get an element at a specific location. @param[in] locator the location of the element we want to access. @note this API is only for C++. + @warning For fermions, the signflip is not included and has to be multiplied by the user! + The reason behind this is that several UniTensors in different permutations can share the same + memory. Use signflip() to get the sign structure for each block. */ template const T &at(const std::vector &locator) const { @@ -3881,8 +4001,11 @@ namespace cytnx { } /** - @brief Get an element at specific location. + @brief Get an element at a specific location. @details see more information at user guide 6.3.5. + @warning For fermions, the signflip is not included and has to be multiplied by the user! The + reason behind this is that several UniTensors in different permutations can share the same + memory. Use signflip() to get the sign structure for each block. */ const Scalar::Sproxy at(const std::vector &locator) const { if (this->uten_type() == UTenType.Block || this->uten_type() == UTenType.BlockFermionic) { @@ -3900,8 +4023,11 @@ namespace cytnx { } /** - @brief Get an element at specific location. + @brief Get an element at a specific location. @details see more information at user guide 6.3.5. + @warning For fermions, the signflip is not included and has to be multiplied by the user! The + reason behind this is that several UniTensors in different permutations can share the same + memory. Use signflip() to get the sign structure for each block. */ Scalar::Sproxy at(const std::vector &locator) { if (this->uten_type() == UTenType.Block || this->uten_type() == UTenType.BlockFermionic) { @@ -3962,11 +4088,15 @@ namespace cytnx { return this->at(new_locator); } - // return a clone of block + // return a copy of the block with the given block index /** - @brief Get the block of the UniTensor for a given index. + @brief Get the block of the UniTensor for a given block index. @param[in] idx the index of the block we want to get @return Tensor + @warning For fermions, the signflip is not included and has to be multiplied by the user! The + reason behind this is that several UniTensors in different permutations can share the same + memory. Use signflip() to get the sign structure for each block. Use signflip() to get the sign + structure for each block. */ Tensor get_block(const cytnx_uint64 &idx = 0) const { return this->_impl->get_block(idx); }; //================================ @@ -3978,6 +4108,9 @@ namespace cytnx { corresponding block is empty, it will return void type tensor if \p force is set as true. Otherwise, it will trow the exception.) @return Tensor + @warning For fermions, the signflip is not included and has to be multiplied by the user! The + reason behind this is that several UniTensors in different permutations can share the same + memory. Use signflip() to get the sign structure for each block. */ Tensor get_block(const std::vector &qidx, const bool &force = false) const { return this->_impl->get_block(qidx, force); @@ -4036,9 +4169,12 @@ namespace cytnx { } /** - @brief Get the shared view of block for the given index. + @brief Get the shared view of the block for the given block index. @param[in] idx input the index you want to get the corresponding block @return const Tensor& + @warning For fermions, the signflip is not included and has to be multiplied by the user! The + reason behind this is that several UniTensors in different permutations can share the same + memory. Use signflip() to get the sign structure for each block. */ const Tensor &get_block_(const cytnx_uint64 &idx = 0) const { return this->_impl->get_block_(idx); @@ -4051,29 +4187,34 @@ namespace cytnx { Tensor &get_block_(const cytnx_uint64 &idx = 0) { return this->_impl->get_block_(idx); } /** - @brief Get the shared view of block for the given quantum indices. + @brief Get the shared view of the block for the given quantum indices. @param[in] qidx input the quantum indices you want to get the corresponding block. @param[in] force If force is true, it will return the tensor anyway (Even the corresponding block is empty, it will return void type tensor if \p force is set as true. Otherwise, it will trow the exception.) @return Tensor& + @warning For fermions, the signflip is not included and has to be multiplied by the user! The + reason behind this is that several UniTensors in different permutations can share the same + memory. Use signflip() to get the sign structure for each block. */ Tensor &get_block_(const std::vector &qidx, const bool &force = false) { return this->_impl->get_block_(qidx, force); } /** - @brief Get the shared (data) view of block for the given quantum indices on given labels + @brief Get the shared (data) view of the block for the given quantum indices on given labels @param[in] labels the labels of the bonds. @param[in] qidx input the quantum indices you want to get the corresponding block. @param[in] force If force is true, it will return the tensor anyway (Even the corresponding block is empty, it will return void type tensor if \p force is set as true. Otherwise, it will trow the exception.) @return Tensor& - @note labels and qidx forming one to one pairs. e.g. it means get `qidx[i]` qnum at Bond `labels[i]`. Also note that the return Tensor will have axes in the same order specified by labels. + @warning For fermions, the signflip is not included and has to be multiplied by the user! The + reason behind this is that several UniTensors in different permutations can share the same + memory. Use signflip() to get the sign structure for each block. */ // developer note: Tensor is not the same object (Thus Tensor instead of Tensor& ), @@ -4165,6 +4306,9 @@ namespace cytnx { deep copy of blocks. 2. For non-symmetric UniTensor, it will return the deep copy of blocks. @return std::vector + @warning For fermions, the signflip is not included and has to be multiplied by the user! The + reason behind this is that several UniTensors in different permutations can share the same + memory. Use signflip() to get the sign structure for each block. */ //[dev] std::vector get_blocks() const { return this->_impl->get_blocks(); } @@ -4173,6 +4317,9 @@ namespace cytnx { @brief Get all the blocks of the UniTensor, inplacely. @see get_blocks() @param[in] silent whether need to print out the warning messages. + @warning For fermions, the signflip is not included and has to be multiplied by the user! The + reason behind this is that several UniTensors in different permutations can share the same + memory. Use signflip() to get the sign structure for each block. */ //[dev] const std::vector &get_blocks_(const bool &silent = false) const { @@ -4191,6 +4338,10 @@ namespace cytnx { @brief Put the block into the UniTensor with given index. @param[in] in the block you want to put into UniTensor @param[in] in the index of the UniTensor you want to put the block \p in in. + @warning For fermions, the signflip is not included and has to be multiplied by the user! Use + signflip() to get the sign, and multiply the block by -1 if the corresponding signflip is + true/ODD. The reason behind this is that several UniTensors in different permutations can share + the same memory. */ UniTensor &put_block(const Tensor &in, const cytnx_uint64 &idx = 0) { this->_impl->put_block(in, idx); @@ -4203,6 +4354,10 @@ namespace cytnx { @param[in] qidx the quantum indices of the UniTensor you want to put the block \p in_tens in. @warning @p force will be deprecated soon! + @warning For fermions, the signflip is not included and has to be multiplied by the user! Use + signflip() to get the sign, and multiply the block by -1 if the corresponding signflip is + true/ODD. The reason behind this is that several UniTensors in different permutations can share + the same memory. */ UniTensor &put_block(const Tensor &in_tens, const std::vector &qidx, const bool &force) { @@ -4213,6 +4368,10 @@ namespace cytnx { /** * @brief Put the block into the UniTensor with given quantum indices, will copy the input * tensor. + * @warning For fermions, the signflip is not included and has to be multiplied by the user! Use + * signflip() to get the sign, and multiply the block by -1 if the corresponding signflip is + * true/ODD. The reason behind this is that several UniTensors in different permutations can + * share the same memory. */ UniTensor &put_block(Tensor &in, const std::vector &lbls, const std::vector &qidx, const bool &force = false) { @@ -4246,6 +4405,10 @@ namespace cytnx { @brief Put the block into the UniTensor with given index, inplacely. @note the put block will have shared view with the internal block, i.e. non-clone. @see put_block(const Tensor &in, const cytnx_uint64 &idx) + @warning For fermions, the signflip is not included and has to be multiplied by the user! Use + signflip() to get the sign, and multiply the block by -1 if the corresponding signflip is + true/ODD. The reason behind this is that several UniTensors in different permutations can share + the same memory. */ UniTensor &put_block_(Tensor &in, const cytnx_uint64 &idx = 0) { this->_impl->put_block_(in, idx); @@ -4256,7 +4419,11 @@ namespace cytnx { @brief Put the block into the UniTensor with given quantum indices, inplacely. @note the put block will have shared view with the internal block, i.e. non-clone. @see put_block(const Tensor &in, const cytnx_uint64 &idx) - @warning @p force will be deprecated soon! + @warning @p force will be deprecated soon! + @warning For fermions, the signflip is not included and has to be multiplied by the user! Use + signflip() to get the sign, and multiply the block by -1 if the corresponding signflip is + true/ODD. The reason behind this is that several UniTensors in different permutations can share + the same memory. */ UniTensor &put_block_(Tensor &in, const std::vector &qidx, const bool &force) { this->_impl->put_block_(in, qidx, force); @@ -4266,6 +4433,10 @@ namespace cytnx { /** * @brief Put the block into the UniTensor with given quantum indices, inplacely. * @note the put block will have shared view with the internal block, i.e. non-clone. + * @warning For fermions, the signflip is not included and has to be multiplied by the user! Use + * signflip() to get the sign, and multiply the block by -1 if the corresponding signflip is + * true/ODD. The reason behind this is that several UniTensors in different permutations can + * share the same memory. */ UniTensor &put_block_(Tensor &in, const std::vector &lbls, const std::vector &qidx, const bool &force = false) { @@ -4979,7 +5150,7 @@ namespace cytnx { the rowrank will not change. If the UniTensor is untagged (i.e. the Bonds are BondType::BD_REG), it will change the rowrank to the opposite side. For fermionic UniTensors, the index order will be reversed without sign flips, and the - direction of all Bonds will swapped. + direction of all Bonds will swapped. @return UniTensor @note Compared to Transpose_(), this function will return new UniTensor object. @see Transpose_() @@ -5102,6 +5273,8 @@ namespace cytnx { @brief Take the conjugate transpose to the UniTensor. @return UniTensor @note Compared to Dagger_(), this function will create a new UniTensor ojbect. + @note For fermionic UniTensors, the index order will be reversed without sign flips, and the + direction of all Bonds will swapped. @see Dagger_(), Transpose() */ UniTensor Dagger() const { @@ -5114,6 +5287,8 @@ namespace cytnx { @brief Take the conjugate transpose to the UniTensor, inplacely. @return UniTensor& @note Compared to Dagger(), this is an inplace function. + @note For fermionic UniTensors, the index order will be reversed without sign flips, and the + direction of all Bonds will swapped. @see Dagger() */ UniTensor &Dagger_() { @@ -5391,7 +5566,7 @@ namespace cytnx { } /** - @brief Generate a identity UniTensor. + @brief Generate an identity UniTensor. @param[in] dim the dimension of the diagnal. @param[in] in_labels the labels of the UniTensor. @param[in] is_diag determine if the UniTensor is diagonal or not. Default is false. @@ -5730,7 +5905,7 @@ namespace cytnx { /// @endcond /** - @brief Contract multiple UniTensor by tracing the ranks with common labels with pairwise + @brief Contract multiple UniTensor by tracing the indices with common labels with pairwise operation. @param in the Tensors. @param args the Tensors. diff --git a/pybind/unitensor_py.cpp b/pybind/unitensor_py.cpp index eb08890b0..49d8f5a01 100644 --- a/pybind/unitensor_py.cpp +++ b/pybind/unitensor_py.cpp @@ -594,7 +594,7 @@ void unitensor_binding(py::module &m) { [](UniTensor &self, const cytnx_int64 &device) { cytnx_error_msg(self.device() == device, "[ERROR][pybind][to_diffferent_device] same device for to() should be " - "handle in python side.%s", + "handled on the Python side.%s", "\n"); return self.to(device); }, @@ -644,15 +644,27 @@ void unitensor_binding(py::module &m) { return self.permute_nosignflip(mapper,rowrank); },py::arg("mapper"), py::arg("rowrank")=(cytnx_int64)(-1)) .def("permute_nosignflip_", [](UniTensor &self, const std::vector &mapper, const cytnx_int64 &rowrank){ - self.permute_nosignflip_(mapper,rowrank); + return self.permute_nosignflip_(mapper,rowrank); },py::arg("mapper"), py::arg("rowrank")=(cytnx_int64)(-1)) .def("permute_nosignflip_", [](UniTensor &self, const std::vector &mapper, const cytnx_int64 &rowrank){ - self.permute_nosignflip_(mapper,rowrank); + return self.permute_nosignflip_(mapper,rowrank); },py::arg("mapper"), py::arg("rowrank")=(cytnx_int64)(-1)) - - + .def("twist", [](UniTensor &self, const cytnx_int64 &idx){ + return self.twist(idx); + },py::arg("idx")) + .def("twist", [](UniTensor &self, const std::string label){ + return self.twist(label); + },py::arg("label")) + .def("twist_", [](UniTensor &self, const cytnx_int64 &idx){ + return self.twist_(idx); + },py::arg("idx")) + .def("twist_", [](UniTensor &self, const std::string label){ + return self.twist_(label); + },py::arg("label")) + .def("fermion_twists", &UniTensor::fermion_twists) + .def("fermion_twists_", &UniTensor::fermion_twists_) .def("make_contiguous", &UniTensor::contiguous) .def("contiguous_", &UniTensor::contiguous_) diff --git a/src/BlockFermionicUniTensor.cpp b/src/BlockFermionicUniTensor.cpp index 095c1bea0..4990beac6 100644 --- a/src/BlockFermionicUniTensor.cpp +++ b/src/BlockFermionicUniTensor.cpp @@ -573,7 +573,7 @@ namespace cytnx { } boost::intrusive_ptr BlockFermionicUniTensor::contiguous() { - //[21 Aug 2024] This is a copy from BlockUniTensor; + //[21 Aug 2024] This is a copy from BlockUniTensor; changed to create a BlockFermionicUniTensor if (this->is_contiguous()) { boost::intrusive_ptr out(this); return out; @@ -672,7 +672,7 @@ namespace cytnx { boost::intrusive_ptr BlockFermionicUniTensor::permute( const std::vector &mapper, const cytnx_int64 &rowrank) { //[21 Aug 2024] This is a copy from BlockUniTensor; calls permute(std::vector) - // which takes care of the fermionic sign struture + // which takes care of the fermionic sign struture; also, creates a BlockFermionicUniTensor BlockFermionicUniTensor *out_raw = this->clone_meta(true, true); out_raw->_blocks.resize(this->_blocks.size()); @@ -910,6 +910,7 @@ namespace cytnx { std::vector(mapper.begin(), mapper.end()); cytnx_int64 actind; fermionParity actparity; + // permute; we exchange i with permutation[i], until permutation[i] == i for (cytnx_int64 qnum = 0; qnum < qindices.size(); qnum++) { // cout << "[DEBUG] permutation[" << qnum << "] = " << permutation[qnum] << endl; while (permutation[qnum] != qnum) { // exchange until the correct qindex is here @@ -1005,8 +1006,8 @@ namespace cytnx { actind = permutation[qnum]; actparity = parities[permutation[actind]]; // cout << "[DEBUG] parities[" << permutation[actind] << "] = " << actparity << "; - // parities[" << actind << "] = " << parities[actind] << endl; find the sign flips of the - // exchange, depending on the statistics of qnum and actind + // parities[" << actind << "] = " << parities[actind] << endl; + // find the sign flips of the exchange, depending on the statistics of qnum and actind if (actparity == ODD) { if (parities[actind] == ODD) { // both fermionic, one sign flip signs[b] = !signs[b]; @@ -1044,8 +1045,8 @@ namespace cytnx { } // cout << "[DEBUG] permutation before permute: " << endl << permutation << "; signs // before permute: " << endl << parities << endl; cout << "qnum = " << qnum << "; actind = - // " << actind << "; permutation[actind] = " << permutation[actind] << endl; exchange the - // sites + // " << actind << "; permutation[actind] = " << permutation[actind] << endl; + // exchange the sites permutation[qnum] = permutation[actind]; permutation[actind] = actind; // parities[qnum] = parities[actind]; @@ -1279,8 +1280,44 @@ namespace cytnx { return signs; } + void BlockFermionicUniTensor::twist_(const cytnx_int64 &idx) { + // apply fermion twist on bond with given index; this means that the signs of all blocks with + // negative parity for this index are swapped + + // Check for invalid index + cytnx_error_msg(idx >= this->_labels.size() || idx < 0, + "[ERROR][BlockFermionicUniTensor::twist_] index %d out of bounds [0, %d].\n", + idx, this->_labels.size() - 1); + Bond bnd = this->_bonds[idx]; + + for (cytnx_int64 b = 0; b < this->_inner_to_outer_idx.size(); b++) { + // this->_inner_to_outer_idx[blockindex][bondindex] contains the quantum index for the given + // indices bnd.qnums()[quantum index] or bnd._impl->_qnums[quantum index] gives the quantum + // number for a given quantum index + // bnd.get_fermion_parity(qnum) contains the fermion parity + if (bnd.get_fermion_parity(bnd._impl->_qnums[this->_inner_to_outer_idx[b][idx]])) + this->_signflip[b] = !this->_signflip[b]; + } + } + void BlockFermionicUniTensor::twist_(const std::string label) { + std::vector::iterator it; + it = std::find(this->_labels.begin(), this->_labels.end(), label); + cytnx_error_msg(it == this->_labels.end(), + "[ERROR] label %d does not exist in current UniTensor.\n", label.c_str()); + this->twist_(std::distance(this->_labels.begin(), it)); + } + + void BlockFermionicUniTensor::fermion_twists_() { + // apply fermion twists on all bra bonds (bond index >= rowrank), which are of type BD_KET + for (cytnx_int64 idx = this->_rowrank; idx < this->_bonds.size(); idx++) { + if (this->_bonds[idx]._impl->_type == BD_KET) { + this->twist_(idx); + } + } + } + boost::intrusive_ptr BlockFermionicUniTensor::relabel( - //[21 Aug 2024] This is a copy from BlockUniTensor; + //[21 Aug 2024] This is a copy from BlockUniTensor; creates a BlockFermionicUniTensor const std::vector &new_labels) { BlockFermionicUniTensor *tmp = this->clone_meta(true, true); tmp->_blocks = this->_blocks; @@ -1290,7 +1327,7 @@ namespace cytnx { } boost::intrusive_ptr BlockFermionicUniTensor::relabel( - //[21 Aug 2024] This is a copy from BlockUniTensor; + //[21 Aug 2024] This is a copy from BlockUniTensor; creates a BlockFermionicUniTensor const std::vector &old_labels, const std::vector &new_labels) { BlockFermionicUniTensor *tmp = this->clone_meta(true, true); tmp->_blocks = this->_blocks; @@ -1300,7 +1337,7 @@ namespace cytnx { } boost::intrusive_ptr BlockFermionicUniTensor::relabels( - //[21 Aug 2024] This is a copy from BlockUniTensor; + //[21 Aug 2024] This is a copy from BlockUniTensor; creates a BlockFermionicUniTensor const std::vector &new_labels) { BlockFermionicUniTensor *tmp = this->clone_meta(true, true); tmp->_blocks = this->_blocks; @@ -1310,7 +1347,7 @@ namespace cytnx { } boost::intrusive_ptr BlockFermionicUniTensor::relabels( - //[21 Aug 2024] This is a copy from BlockUniTensor; + //[21 Aug 2024] This is a copy from BlockUniTensor; creates a BlockFermionicUniTensor const std::vector &old_labels, const std::vector &new_labels) { BlockFermionicUniTensor *tmp = this->clone_meta(true, true); tmp->_blocks = this->_blocks; @@ -1321,7 +1358,7 @@ namespace cytnx { boost::intrusive_ptr BlockFermionicUniTensor::relabel(const cytnx_int64 &inx, const string &new_label) { - //[21 Aug 2024] This is a copy from BlockUniTensor; + //[21 Aug 2024] This is a copy from BlockUniTensor; creates a BlockFermionicUniTensor BlockFermionicUniTensor *tmp = this->clone_meta(true, true); tmp->_blocks = this->_blocks; tmp->set_label(inx, new_label); @@ -1331,7 +1368,7 @@ namespace cytnx { boost::intrusive_ptr BlockFermionicUniTensor::relabel(const string &inx, const string &new_label) { - //[21 Aug 2024] This is a copy from BlockUniTensor; + //[21 Aug 2024] This is a copy from BlockUniTensor; creates a BlockFermionicUniTensor BlockFermionicUniTensor *tmp = this->clone_meta(true, true); tmp->_blocks = this->_blocks; tmp->set_label(inx, new_label); @@ -1398,14 +1435,6 @@ namespace cytnx { auto IDL = vec_argwhere(this->_inner_to_outer_idx, Lidx); auto IDR = vec_argwhere(Rtn->_inner_to_outer_idx, Ridx); - /* - cout << b << endl; - //vec_print_simple(std::cout,tmp->_inner_to_outer_idx[b]); - //vec_print_simple(std::cout,Lidx); - //vec_print_simple(std::cout,Ridx); - vec_print_simple(std::cout,IDL); - vec_print_simple(std::cout,IDR); - */ if (User_debug) { if (IDL.size() == IDR.size()) { cytnx_error_msg( @@ -1475,12 +1504,8 @@ namespace cytnx { // sign flip for this tensor is computed explictly, then a permutation without signflip is // performed; sign flip of rhs is accounted for in usual permutation - // std::cout << "[DEBUG] Contract; signs of lhs before lhssigns: " << this->_signflip - // << std::endl; std::vector signfliplhs = this->_lhssigns_(_shadow_comm_idx1_reversed, comm_idx1.size()); - // std::cout << "[DEBUG] Contract; signs of lhs after lhssigns: " << this->_signflip - // << std::endl; boost::intrusive_ptr Lperm = this->permute_nosignflip(_shadow_comm_idx1); boost::intrusive_ptr Rperm = rhs->permute(_shadow_comm_idx2); @@ -1491,7 +1516,6 @@ namespace cytnx { // pair the block and contract using vectordot! // naive way! std::vector signfliprhs = Rperm_raw->_signflip; - // cout << "Lsigns = " << Lsigns << "; Rsigns = " << Rsigns << endl; for (unsigned int b = 0; b < Lperm_raw->_blocks.size(); b++) { for (unsigned int a = 0; a < Rperm_raw->_blocks.size(); a++) { if (Lperm_raw->_inner_to_outer_idx[b] == Rperm_raw->_inner_to_outer_idx[a]) { @@ -1510,7 +1534,6 @@ namespace cytnx { tmp->_block += linalg::Vectordot(Lperm_raw->_blocks[b].flatten(), Rperm_raw->_blocks[a].flatten()); } - // std::cout << "b=" << b << "; a=" << a << "; sum=" << tmp->_block.at({0}) << endl; } } } @@ -1630,15 +1653,9 @@ namespace cytnx { } // fermion signs - // std::cout << "[DEBUG] Contract; signs of lhs before lhssigns: " << this->_signflip - // << std::endl; std::vector signfliplhs = this->_lhssigns_(mapperL_reversed, comm_idx1.size()); - // std::cout << "[DEBUG] Contract; signs of lhs after lhssigns: " << signfliplhs << - // std::endl; std::vector signfliprhs = Rtn->_swapsigns_(mapperR); - // cout << "signfliplhs = " << signfliplhs << "; signfliprhs = " << signfliprhs << endl; - if (this->is_diag() != Rtn->is_diag()) { for (cytnx_int64 a = 0; a < this->_blocks.size(); a++) { cytnx_int64 comm_dim = 1; @@ -1896,8 +1913,10 @@ namespace cytnx { void BlockFermionicUniTensor::Transpose_() { //[21 Aug 2024] This is a copy from BlockUniTensor; + // The bonds are redirected, as in the bosonic case. Additionally, the order of the indices is + // reversed and a permutation WITHOUT signflips is applied. + // modify tag - // The index order is reversed without any sign flips! std::vector idxorder(this->_bonds.size()); std::size_t idxnum = this->bonds().size() - 1; for (int i = 0; i <= idxnum; i++) { @@ -1906,6 +1925,7 @@ namespace cytnx { idxorder[i] = idxnum - i; } this->permute_nosignflip_(idxorder); + this->_rowrank = this->_bonds.size() - this->_rowrank; }; void BlockFermionicUniTensor::normalize_() { @@ -2076,7 +2096,7 @@ namespace cytnx { void BlockFermionicUniTensor::_fx_locate_elem(cytnx_int64 &bidx, std::vector &loc_in_T, const std::vector &locator) const { - //[21 Aug 2024] This is a copy from BlockUniTensor; + //[21 Aug 2024] This is a copy from BlockUniTensor; error message differs // 1. check if out of range: cytnx_error_msg(locator.size() != this->_bonds.size(), "[ERROR] len(locator) does not match the rank of tensor.%s", "\n"); @@ -2241,8 +2261,8 @@ namespace cytnx { const Scalar::Sproxy BlockFermionicUniTensor::at_for_sparse( const std::vector &locator) const { //[21 Aug 2024] This is a copy from BlockUniTensor; - // TODO: one might want to store the sign in Sproxy as well in the future (here and in other - // instances) + // TODOfermions: one might want to store the sign in Sproxy as well in the future (here and in + // other instances) cytnx_int64 bidx; std::vector loc_in_T; this->_fx_locate_elem(bidx, loc_in_T, locator); @@ -2563,7 +2583,7 @@ namespace cytnx { // 2) dirty way: change _signflip; this is dirty because other BlockFermionicUniTensors // could depend on the block and are not aware of this sign flip! // this->_signflip[b] = this->_signflip[b] ? true : false; - // 3) fast way: TODOfermion: implement Tensor.negmul, which does the multiplication and + // 3) fast way: TODOfermions: implement Tensor.negmul, which does the multiplication and // sign flip in one step } break; @@ -2613,7 +2633,8 @@ namespace cytnx { void BlockFermionicUniTensor::_fx_group_duplicates( const std::vector &dup_bond_idxs, const std::vector> &idx_mappers) { - //[21 Aug 2024] This is a copy from BlockUniTensor; adds signflips by taking -Tensor + //[21 Aug 2024] This is a copy from BlockUniTensor; adds signflips by taking -Tensor; might be + // more effiecent if it is done in the contiguous() step. // checking the bonds that are duplicates // auto mod_idxs = dup_bond_idxs; std::sort(mod_idx.begin(),mod_idx.end()); @@ -2652,7 +2673,7 @@ namespace cytnx { if (mask[a] == 1) continue; if (tmp_inner_to_outer_idx[a] == tmp_inner_to_outer_idx[b]) { // need to combine two! - // checking which bonds does not need to combine! + // checking which bonds do not need to be combined! mask[a] = 1; /* std::cout << "CALL DS:\n"; @@ -2898,6 +2919,8 @@ namespace cytnx { } void _bkf_from_dn(BlockFermionicUniTensor *ths, DenseUniTensor *rhs, const bool &force) { + //[21 Aug 2024] This is a copy from BlockUniTensor; The name is changed (BKF instead of BK); + // signflips are initialized to be all EVEN if (!force) { // more checking: if (int(rhs->bond_(0).type()) != bondType::BD_NONE) { @@ -2947,6 +2970,9 @@ namespace cytnx { void BlockFermionicUniTensor::from_(const boost::intrusive_ptr &rhs, const bool &force) { + //[21 Aug 2024] This is a copy from BlockUniTensor; calls the right functions (with BKS instead + // of BK) + // checking shape: cytnx_error_msg(this->shape() != rhs->shape(), "[ERROR][from_] shape does not match.%s", "\n"); diff --git a/src/BlockUniTensor.cpp b/src/BlockUniTensor.cpp index 9c1bdf75e..002a0b1c0 100644 --- a/src/BlockUniTensor.cpp +++ b/src/BlockUniTensor.cpp @@ -776,14 +776,6 @@ namespace cytnx { auto IDL = vec_argwhere(this->_inner_to_outer_idx, Lidx); auto IDR = vec_argwhere(Rtn->_inner_to_outer_idx, Ridx); - /* - cout << b << endl; - //vec_print_simple(std::cout,tmp->_inner_to_outer_idx[b]); - //vec_print_simple(std::cout,Lidx); - //vec_print_simple(std::cout,Ridx); - vec_print_simple(std::cout,IDL); - vec_print_simple(std::cout,IDR); - */ if (User_debug) { if (IDL.size() == IDR.size()) { cytnx_error_msg(IDL.size() > 1, @@ -864,8 +856,6 @@ namespace cytnx { else tmp->_block += linalg::Vectordot(Lperm_raw->_blocks[b].flatten(), Rperm_raw->_blocks[a].flatten()); - - // std::cout << b << " " << a << endl; } } } @@ -1833,23 +1823,9 @@ namespace cytnx { if (mask[a] == 1) continue; if (tmp_inner_to_outer_idx[a] == tmp_inner_to_outer_idx[b]) { // need to combine two! - // checking which bonds does not need to combine! + // checking which bonds do not need to be combined! mask[a] = 1; - /* - std::cout << "CALL DS:\n"; - std::cout << no_combine << std::endl; - std::cout << "targ: old/new itoi:\n"; - std::cout << this->_inner_to_outer_idx[b] << std::endl; - std::cout << tmp_inner_to_outer_idx[b] << std::endl; - std::cout << "----------\n" << std::endl; - std::cout << "src: old/new itoi:\n"; - std::cout << this->_inner_to_outer_idx[a] << std::endl; - std::cout << tmp_inner_to_outer_idx[a] << std::endl; - std::cout << "----------\n" << std::endl; - std::cout << new_blocks.back().shape() << std::endl; - std::cout << this->_blocks[a].shape() << std::endl; - std::cout << "=============\n" << std::endl; - */ + new_blocks.back() = linalg::Directsum(new_blocks.back(), this->_blocks[a], no_combine); } } diff --git a/src/UniTensor_base.cpp b/src/UniTensor_base.cpp index f207db3b8..be5aab010 100644 --- a/src/UniTensor_base.cpp +++ b/src/UniTensor_base.cpp @@ -195,6 +195,25 @@ namespace cytnx { "\n"); } + void UniTensor_base::twist_(const cytnx_int64 &idx) { + // do nothing for bosonic UniTensor, override for fermionic! + cytnx_error_msg( + true, "[ERROR] fatal internal, cannot call on an un-initialized UniTensor_base%s", "\n"); + return; + } + void UniTensor_base::twist_(const std::string label) { + // do nothing for bosonic UniTensor, override for fermionic! + cytnx_error_msg( + true, "[ERROR] fatal internal, cannot call on an un-initialized UniTensor_base%s", "\n"); + return; + } + void UniTensor_base::fermion_twists_() { + // do nothing for bosonic UniTensor, override for fermionic! + cytnx_error_msg( + true, "[ERROR] fatal internal, cannot call on an un-initialized UniTensor_base%s", "\n"); + return; + } + boost::intrusive_ptr UniTensor_base::contiguous_() { cytnx_error_msg( true, "[ERROR] fatal internal, cannot call on an un-initialized UniTensor_base%s", "\n"); diff --git a/src/linalg/Eig.cpp b/src/linalg/Eig.cpp index fe994e9d2..0168cb73a 100644 --- a/src/linalg/Eig.cpp +++ b/src/linalg/Eig.cpp @@ -1,15 +1,21 @@ #include "linalg.hpp" -#include "algo.hpp" + #include +#include +#include + #include "Tensor.hpp" -using namespace std; +#include "UniTensor.hpp" +#include "algo.hpp" #ifdef BACKEND_TORCH #else #include "backend/linalg_internal_interface.hpp" + namespace cytnx { namespace linalg { + std::vector Eig(const Tensor &Tin, const bool &is_V, const bool &row_v) { cytnx_error_msg(Tin.shape().size() != 2, "[Eig] error, Eig can only operate on rank-2 Tensor.%s", "\n"); @@ -74,9 +80,9 @@ namespace cytnx { std::vector out; out.push_back(S); - if(is_V){ + if (is_V) { out.push_back(V); - if(!row_v) out.back().permute_({1,0}).contiguous_(); + if (!row_v) out.back().permute_({1, 0}).contiguous_(); } */ cytnx_error_msg(true, "[ERROR]currently Eig for non-symmetric matrix is not supported.%s", @@ -91,12 +97,12 @@ namespace cytnx { } // actual impls: - void _Eig_Dense_UT(std::vector &outCyT, const UniTensor &Tin, - const bool &is_V, const bool &row_v) { + static void Eig_Dense_UT_internal(std::vector &outCyT, const UniTensor &Tin, + const bool &is_V, const bool &row_v) { //[Note] outCyT must be empty! // DenseUniTensor: - // cout << "entry Dense UT" << endl; + // std::cout << "entry Dense UT" << std::endl; Tensor tmp; if (Tin.is_contiguous()) @@ -106,17 +112,17 @@ namespace cytnx { tmp.contiguous_(); } - vector tmps = tmp.shape(); - vector oldshape(tmps.begin(), tmps.end()); + std::vector tmps = tmp.shape(); + std::vector oldshape(tmps.begin(), tmps.end()); tmps.clear(); - vector oldlabel = Tin.labels(); + std::vector oldlabel = Tin.labels(); // collapse as Matrix: cytnx_int64 rowdim = 1; for (cytnx_uint64 i = 0; i < Tin.rowrank(); i++) rowdim *= tmp.shape()[i]; tmp.reshape_({rowdim, -1}); - vector outT = cytnx::linalg::Eig(tmp, is_V, row_v); + std::vector outT = cytnx::linalg::Eig(tmp, is_V, row_v); if (Tin.is_contiguous()) tmp.reshape_(oldshape); int t = 0; @@ -127,8 +133,8 @@ namespace cytnx { cytnx::Bond newBond(outT[t].shape()[0]); Cy_S.Init({newBond, newBond}, {std::string("0"), std::string("1")}, 1, Type.Double, - Device.cpu, true); // it is just reference so no hurt to alias ^^. All eigvals are - // real for eigh so Type.Double. + Tin.device(), true); // it is just reference so no hurt to alias ^^. All eigvals + // are real for eigh so Type.Double. Cy_S.put_block_(outT[t]); t++; @@ -136,10 +142,10 @@ namespace cytnx { cytnx::UniTensor &Cy_U = outCyT[t]; Cy_U.Init(outT[t], false, 1); // Tin is a rowrank = 1 square UniTensor. } // V - } + } // Eig_Dense_UT_internal - void _Eig_Block_UT(std::vector &outCyT, const UniTensor &Tin, - const bool &is_V, const bool &row_v) { + static void Eig_Block_UT_internal(std::vector &outCyT, const UniTensor &Tin, + const bool &is_V, const bool &row_v) { // outCyT must be empty and Tin must be checked with proper rowrank! // 1) getting the combineBond L and combineBond R for qnum list without grouping: @@ -196,9 +202,9 @@ namespace cytnx { } // 4) for each qcharge in key, combining the blocks into a big chunk! - // ->a initialize an empty shell of UniTensor! + // ->a initialize an empty shell of UniTensors! vec2d aux_qnums; // for sharing bond - std::vector aux_degs; // forsharing bond + std::vector aux_degs; // for sharing bond std::vector e_blocks; // for eigenvalues vec2d v_itoi; // for eigen vectors @@ -209,10 +215,10 @@ namespace cytnx { for (auto const &x : mgrp) { vec2d itoi_indicators(x.second.size()); - // cout << x.second.size() << "-------" << endl; // + // std::cout << x.second.size() << "-------" << std::endl; for (int i = 0; i < x.second.size(); i++) { itoi_indicators[i] = new_itoi[x.second[i]]; - // std::cout << new_itoi[x.second[i]] << std::endl; // + // std::cout << new_itoi[x.second[i]] << std::endl; } auto order = vec_sort(itoi_indicators, true); std::vector Tlist(itoi_indicators.size()); @@ -270,13 +276,13 @@ namespace cytnx { v_itoi.back().resize(Tin.rowrank() + 1); } } // is_V - } + } // for each qcharge // process e: Bond Bd_aux = Bond(BD_IN, aux_qnums, aux_degs, Tin.syms()); BlockUniTensor *e_ptr = new BlockUniTensor(); e_ptr->Init({Bd_aux, Bd_aux.redirect()}, {"_aux_L", "_aux_R"}, 1, Type.Double, - Device.cpu, // this two will be overwrite later, so doesnt matter. + Device.cpu, // these two will be overwritten later, so doesnt matter. true, // is_diag! true); // no_alloc! e_ptr->_blocks = e_blocks; @@ -302,7 +308,183 @@ namespace cytnx { V._impl = boost::intrusive_ptr(v_ptr); outCyT.push_back(V); } - } + } // Eig_Block_UT_internal + + static void Eig_BlockFermionic_UT_internal(std::vector &outCyT, + const UniTensor &Tin, const bool &is_V, + const bool &row_v) { + // outCyT must be empty and Tin must be checked with proper rowrank! + + // 1) getting the combineBond L and combineBond R for qnum list without grouping: + // + // BDLeft -[ ]- BDRight + // + std::vector strides; + std::vector signflip = Tin.signflip(); + strides.reserve(Tin.rank()); + auto BdLeft = Tin.bonds()[0].clone(); + for (int i = 1; i < Tin.rowrank(); i++) { + strides.push_back(Tin.bonds()[i].qnums().size()); + BdLeft._impl->force_combineBond_(Tin.bonds()[i]._impl, false); // no grouping + } + // std::cout << BdLeft << std::endl; + strides.push_back(1); + auto BdRight = Tin.bonds()[Tin.rowrank()].clone(); + for (int i = Tin.rowrank() + 1; i < Tin.rank(); i++) { + strides.push_back(Tin.bonds()[i].qnums().size()); + BdRight._impl->force_combineBond_(Tin.bonds()[i]._impl, false); // no grouping + } + strides.push_back(1); + // std::cout << BdRight << std::endl; + // std::cout << strides << std::endl; + + // 2) making new inner_to_outer_idx lists for each block: + // -> a. get stride: + for (int i = Tin.rowrank() - 2; i >= 0; i--) { + strides[i] *= strides[i + 1]; + } + for (int i = Tin.rank() - 2; i >= Tin.rowrank(); i--) { + strides[i] *= strides[i + 1]; + } + // std::cout << strides << std::endl; + // ->b. calc new inner_to_outer_idx! + vec2d new_itoi(Tin.Nblocks(), std::vector(2)); + + int cnt; + for (cytnx_uint64 b = 0; b < Tin.Nblocks(); b++) { + const std::vector &tmpv = Tin.get_qindices(b); + for (cnt = 0; cnt < Tin.rowrank(); cnt++) { + new_itoi[b][0] += tmpv[cnt] * strides[cnt]; + } + for (cnt = Tin.rowrank(); cnt < Tin.rank(); cnt++) { + new_itoi[b][1] += tmpv[cnt] * strides[cnt]; + } + } + // std::cout << new_itoi << std::endl; + + // 3) categorize: + // key = qnum, val = list of block locations: + std::map, std::vector> mgrp; + for (cytnx_uint64 b = 0; b < Tin.Nblocks(); b++) { + mgrp[BdLeft.qnums()[new_itoi[b][0]]].push_back(b); + } + + // 4) for each qcharge in key, combining the blocks into a big chunk! + // ->a initialize an empty shell of UniTensors! + vec2d aux_qnums; // for sharing bond + std::vector aux_degs; // for sharing bond + std::vector e_blocks; // for eigenvalues + + vec2d v_itoi; // for eigen vectors + std::vector v_blocks; + + // vec2d vT_itoi; // for vT + // std::vector vT_blocks; + + for (auto const &x : mgrp) { + vec2d itoi_indicators(x.second.size()); + // std::cout << x.second.size() << "-------" << std::endl; + for (int i = 0; i < x.second.size(); i++) { + itoi_indicators[i] = new_itoi[x.second[i]]; + // std::cout << new_itoi[x.second[i]] << std::endl; + } + auto order = vec_sort(itoi_indicators, true); + std::vector Tlist(itoi_indicators.size()); + std::vector row_szs(order.size(), 1); + cytnx_uint64 Rblk_dim = 0; + cytnx_int64 tmp = -1; + cytnx_int64 current_block; + for (int i = 0; i < order.size(); i++) { + current_block = x.second[order[i]]; + if (itoi_indicators[i][0] != tmp) { + tmp = itoi_indicators[i][0]; + Rblk_dim++; + } + Tlist[i] = Tin.get_blocks_()[current_block]; + for (int j = 0; j < Tin.rowrank(); j++) { + row_szs[i] *= Tlist[i].shape()[j]; + } + if (signflip[current_block]) { + Tlist[i] = -Tlist[i]; // copies Tensor + // Tlist[i] = Tlist[i].Mul(-1); // copies Tensor + Tlist[i].reshape_({row_szs[i], -1}); + } else + Tlist[i] = Tlist[i].reshape({row_szs[i], -1}); + } + cytnx_error_msg(Tlist.size() % Rblk_dim, "[Internal ERROR] Tlist is not complete!%s", "\n"); + // BTen is the big block!! + cytnx_uint64 Cblk_dim = Tlist.size() / Rblk_dim; + Tensor BTen = algo::_fx_Matric_combine(Tlist, Rblk_dim, Cblk_dim); + // std::cout << BTen; + // Now we can perform linalg! + aux_qnums.push_back(x.first); + auto out = linalg::Eig(BTen, is_V, row_v); + aux_degs.push_back(out[0].shape()[0]); + e_blocks.push_back(out[0]); + + if (is_V) { + // std::cout << row_szs << std::endl; + // std::cout << out[tr].shape() << std::endl; + std::vector split_dims; + for (int i = 0; i < Rblk_dim; i++) { + split_dims.push_back(row_szs[i * Cblk_dim]); + } + std::vector blks; + // std::cout< new_shape(Tin.rowrank() + 1); + new_shape.back() = -1; + for (int ti = 0; ti < blks.size(); ti++) { + v_blocks.push_back(blks[ti]); + v_itoi.push_back(Tin.get_qindices(x.second[order[ti * Cblk_dim]])); + + // reshaping: + for (int i = 0; i < Tin.rowrank(); i++) { + new_shape[i] = + Tin.bonds()[i] + .getDegeneracies()[Tin.get_qindices(x.second[order[ti * Cblk_dim]])[i]]; + } + v_blocks.back().reshape_(new_shape); + + v_itoi.back()[Tin.rowrank()] = e_blocks.size() - 1; + v_itoi.back().resize(Tin.rowrank() + 1); + } + } // is_V + } // for each qcharge + + // process e: + Bond Bd_aux = Bond(BD_IN, aux_qnums, aux_degs, Tin.syms()); + BlockFermionicUniTensor *e_ptr = new BlockFermionicUniTensor(); + e_ptr->Init({Bd_aux, Bd_aux.redirect()}, {"_aux_L", "_aux_R"}, 1, Type.Double, + Device.cpu, // these two will be overwritten later, so doesnt matter. + true, // is_diag! + true); // no_alloc! + e_ptr->_blocks = e_blocks; + UniTensor e; + e._impl = boost::intrusive_ptr(e_ptr); + + outCyT.push_back(e); + + if (is_V) { + BlockFermionicUniTensor *v_ptr = new BlockFermionicUniTensor(); + for (int i = 0; i < Tin.rowrank(); i++) { + v_ptr->_bonds.push_back(Tin.bonds()[i].clone()); + v_ptr->_labels.push_back(Tin.labels()[i]); + } + v_ptr->_bonds.push_back(Bd_aux.redirect()); + v_ptr->_labels.push_back("_aux_L"); + v_ptr->_rowrank = Tin.rowrank(); + v_ptr->_is_diag = false; + v_ptr->_is_braket_form = v_ptr->_update_braket(); + v_ptr->_inner_to_outer_idx = v_itoi; + v_ptr->_blocks = v_blocks; + v_ptr->_signflip = std::vector(v_blocks.size(), false); + UniTensor V; + V._impl = boost::intrusive_ptr(v_ptr); + outCyT.push_back(V); + } + } // Eig_BlockFermionic_UT_internal std::vector Eig(const UniTensor &Tin, const bool &is_V, const bool &row_v) { // using rowrank to split the bond to form a matrix. @@ -316,20 +498,22 @@ namespace cytnx { std::vector outCyT; if (Tin.uten_type() == UTenType.Dense) { - _Eig_Dense_UT(outCyT, Tin, is_V, row_v); + Eig_Dense_UT_internal(outCyT, Tin, is_V, row_v); } else if (Tin.uten_type() == UTenType.Block) { - _Eig_Block_UT(outCyT, Tin, is_V, row_v); + Eig_Block_UT_internal(outCyT, Tin, is_V, row_v); + + } else if (Tin.uten_type() == UTenType.BlockFermionic) { + Eig_BlockFermionic_UT_internal(outCyT, Tin, is_V, row_v); + } else { - cytnx_error_msg(true, - "[ERROR] Eig, unsupported type of UniTensor only support (Dense, Block). " - "something wrong internal%s", + cytnx_error_msg(true, "[ERROR] Eig only supports Dense/Block/BlockFermionic UniTensors.%s", "\n"); } // ut type return outCyT; - }; // Eig + } // Eig } // namespace linalg } // namespace cytnx diff --git a/src/linalg/Eigh.cpp b/src/linalg/Eigh.cpp index 1d518f3a3..67e739c06 100644 --- a/src/linalg/Eigh.cpp +++ b/src/linalg/Eigh.cpp @@ -1,8 +1,12 @@ #include "linalg.hpp" -#include "algo.hpp" + #include +#include +#include + #include "Tensor.hpp" -using namespace std; +#include "UniTensor.hpp" +#include "algo.hpp" #ifdef BACKEND_TORCH #else @@ -10,6 +14,7 @@ using namespace std; namespace cytnx { namespace linalg { + std::vector Eigh(const Tensor &Tin, const bool &is_V, const bool &row_v) { cytnx_error_msg(Tin.shape().size() != 2, "[Eigh] error, Eigh can only operate on rank-2 Tensor.%s", "\n"); @@ -44,7 +49,6 @@ namespace cytnx { out.back().dtype() == Type.ComplexDouble) { out.back().permute_({1, 0}).contiguous_(); out.back().Conj_(); - // std::cout << "ok"; } else out.back().permute_({1, 0}).contiguous_(); } @@ -84,12 +88,12 @@ namespace cytnx { } // actual impls: - void _Eigh_Dense_UT(std::vector &outCyT, const UniTensor &Tin, - const bool &is_V, const bool &row_v) { + static void Eigh_Dense_UT_internal(std::vector &outCyT, const UniTensor &Tin, + const bool &is_V, const bool &row_v) { //[Note] outCyT must be empty! // DenseUniTensor: - // cout << "entry Dense UT" << endl; + // std::cout << "entry Dense UT" << std::endl; Tensor tmp; if (Tin.is_contiguous()) @@ -99,17 +103,17 @@ namespace cytnx { tmp.contiguous_(); } - vector tmps = tmp.shape(); - vector oldshape(tmps.begin(), tmps.end()); + std::vector tmps = tmp.shape(); + std::vector oldshape(tmps.begin(), tmps.end()); tmps.clear(); - vector oldlabel = Tin.labels(); + std::vector oldlabel = Tin.labels(); // collapse as Matrix: cytnx_int64 rowdim = 1; for (cytnx_uint64 i = 0; i < Tin.rowrank(); i++) rowdim *= tmp.shape()[i]; tmp.reshape_({rowdim, -1}); - vector outT = cytnx::linalg::Eigh(tmp, is_V, row_v); + std::vector outT = cytnx::linalg::Eigh(tmp, is_V, row_v); if (Tin.is_contiguous()) tmp.reshape_(oldshape); int t = 0; @@ -120,8 +124,8 @@ namespace cytnx { cytnx::Bond newBond(outT[t].shape()[0]); Cy_S.Init({newBond, newBond}, {std::string("0"), std::string("1")}, 1, Type.Double, - Device.cpu, true); // it is just reference so no hurt to alias ^^. All eigvals are - // real for eigh so Type.Double. + Tin.device(), true); // it is just reference so no hurt to alias ^^. All eigvals + // are real for eigh so Type.Double. Cy_S.put_block_(outT[t]); t++; @@ -129,10 +133,11 @@ namespace cytnx { cytnx::UniTensor &Cy_U = outCyT[t]; Cy_U.Init(outT[t], false, 1); // Tin is a rowrank = 1 square UniTensor. } // V - } //_Eigh_Dense_UT + } // Eigh_Dense_UT_internal - void _Eigh_Block_UT(std::vector &outCyT, const UniTensor &Tin, - const bool &is_V, const bool &row_v) { + static void Eigh_Block_UT_internal(std::vector &outCyT, const UniTensor &Tin, + const bool &is_V, const bool &row_v) { + // Exactly the same as Eig_Block_UT_internal with Eig() -> Eigh() // outCyT must be empty and Tin must be checked with proper rowrank! // 1) getting the combineBond L and combineBond R for qnum list without grouping: @@ -189,9 +194,9 @@ namespace cytnx { } // 4) for each qcharge in key, combining the blocks into a big chunk! - // ->a initialize an empty shell of UniTensor! + // ->a initialize an empty shell of UniTensors! vec2d aux_qnums; // for sharing bond - std::vector aux_degs; // forsharing bond + std::vector aux_degs; // for sharing bond std::vector e_blocks; // for eigenvalues vec2d v_itoi; // for eigen vectors @@ -202,10 +207,10 @@ namespace cytnx { for (auto const &x : mgrp) { vec2d itoi_indicators(x.second.size()); - // cout << x.second.size() << "-------" << endl; // + // std::cout << x.second.size() << "-------" << std::endl; for (int i = 0; i < x.second.size(); i++) { itoi_indicators[i] = new_itoi[x.second[i]]; - // std::cout << new_itoi[x.second[i]] << std::endl; // + // std::cout << new_itoi[x.second[i]] << std::endl; } auto order = vec_sort(itoi_indicators, true); std::vector Tlist(itoi_indicators.size()); @@ -263,13 +268,13 @@ namespace cytnx { v_itoi.back().resize(Tin.rowrank() + 1); } } // is_V - } + } // for each qcharge // process e: Bond Bd_aux = Bond(BD_IN, aux_qnums, aux_degs, Tin.syms()); BlockUniTensor *e_ptr = new BlockUniTensor(); e_ptr->Init({Bd_aux, Bd_aux.redirect()}, {"_aux_L", "_aux_R"}, 1, Type.Double, - Device.cpu, // this two will be overwrite later, so doesnt matter. + Device.cpu, // these two will be overwritten later, so doesnt matter. true, // is_diag! true); // no_alloc! e_ptr->_blocks = e_blocks; @@ -295,8 +300,184 @@ namespace cytnx { V._impl = boost::intrusive_ptr(v_ptr); outCyT.push_back(V); } + } // Eigh_Block_UT_internal - } //_Eigh_Block_UT + static void Eigh_BlockFermionic_UT_internal(std::vector &outCyT, + const UniTensor &Tin, const bool &is_V, + const bool &row_v) { + // Exactly the same as Eig_BlockFermionic_UT_internal with Eig() -> Eigh() + // outCyT must be empty and Tin must be checked with proper rowrank! + + // 1) getting the combineBond L and combineBond R for qnum list without grouping: + // + // BDLeft -[ ]- BDRight + // + std::vector strides; + std::vector signflip = Tin.signflip(); + strides.reserve(Tin.rank()); + auto BdLeft = Tin.bonds()[0].clone(); + for (int i = 1; i < Tin.rowrank(); i++) { + strides.push_back(Tin.bonds()[i].qnums().size()); + BdLeft._impl->force_combineBond_(Tin.bonds()[i]._impl, false); // no grouping + } + // std::cout << BdLeft << std::endl; + strides.push_back(1); + auto BdRight = Tin.bonds()[Tin.rowrank()].clone(); + for (int i = Tin.rowrank() + 1; i < Tin.rank(); i++) { + strides.push_back(Tin.bonds()[i].qnums().size()); + BdRight._impl->force_combineBond_(Tin.bonds()[i]._impl, false); // no grouping + } + strides.push_back(1); + // std::cout << BdRight << std::endl; + // std::cout << strides << std::endl; + + // 2) making new inner_to_outer_idx lists for each block: + // -> a. get stride: + for (int i = Tin.rowrank() - 2; i >= 0; i--) { + strides[i] *= strides[i + 1]; + } + for (int i = Tin.rank() - 2; i >= Tin.rowrank(); i--) { + strides[i] *= strides[i + 1]; + } + // std::cout << strides << std::endl; + // ->b. calc new inner_to_outer_idx! + vec2d new_itoi(Tin.Nblocks(), std::vector(2)); + + int cnt; + for (cytnx_uint64 b = 0; b < Tin.Nblocks(); b++) { + const std::vector &tmpv = Tin.get_qindices(b); + for (cnt = 0; cnt < Tin.rowrank(); cnt++) { + new_itoi[b][0] += tmpv[cnt] * strides[cnt]; + } + for (cnt = Tin.rowrank(); cnt < Tin.rank(); cnt++) { + new_itoi[b][1] += tmpv[cnt] * strides[cnt]; + } + } + // std::cout << new_itoi << std::endl; + + // 3) categorize: + // key = qnum, val = list of block locations: + std::map, std::vector> mgrp; + for (cytnx_uint64 b = 0; b < Tin.Nblocks(); b++) { + mgrp[BdLeft.qnums()[new_itoi[b][0]]].push_back(b); + } + + // 4) for each qcharge in key, combining the blocks into a big chunk! + // ->a initialize an empty shell of UniTensors! + vec2d aux_qnums; // for sharing bond + std::vector aux_degs; // for sharing bond + std::vector e_blocks; // for eigenvalues + + vec2d v_itoi; // for eigen vectors + std::vector v_blocks; + + // vec2d vT_itoi; // for vT + // std::vector vT_blocks; + + for (auto const &x : mgrp) { + vec2d itoi_indicators(x.second.size()); + // std::cout << x.second.size() << "-------" << std::endl; + for (int i = 0; i < x.second.size(); i++) { + itoi_indicators[i] = new_itoi[x.second[i]]; + // std::cout << new_itoi[x.second[i]] << std::endl; + } + auto order = vec_sort(itoi_indicators, true); + std::vector Tlist(itoi_indicators.size()); + std::vector row_szs(order.size(), 1); + cytnx_uint64 Rblk_dim = 0; + cytnx_int64 tmp = -1; + cytnx_int64 current_block; + for (int i = 0; i < order.size(); i++) { + current_block = x.second[order[i]]; + if (itoi_indicators[i][0] != tmp) { + tmp = itoi_indicators[i][0]; + Rblk_dim++; + } + Tlist[i] = Tin.get_blocks_()[current_block]; + for (int j = 0; j < Tin.rowrank(); j++) { + row_szs[i] *= Tlist[i].shape()[j]; + } + if (signflip[current_block]) { + Tlist[i] = -Tlist[i]; // copies Tensor + // Tlist[i] = Tlist[i].Mul(-1); // copies Tensor + Tlist[i].reshape_({row_szs[i], -1}); + } else + Tlist[i] = Tlist[i].reshape({row_szs[i], -1}); + } + cytnx_error_msg(Tlist.size() % Rblk_dim, "[Internal ERROR] Tlist is not complete!%s", "\n"); + // BTen is the big block!! + cytnx_uint64 Cblk_dim = Tlist.size() / Rblk_dim; + Tensor BTen = algo::_fx_Matric_combine(Tlist, Rblk_dim, Cblk_dim); + // std::cout << BTen; + // Now we can perform linalg! + aux_qnums.push_back(x.first); + auto out = linalg::Eigh(BTen, is_V, row_v); + aux_degs.push_back(out[0].shape()[0]); + e_blocks.push_back(out[0]); + + if (is_V) { + // std::cout << row_szs << std::endl; + // std::cout << out[tr].shape() << std::endl; + std::vector split_dims; + for (int i = 0; i < Rblk_dim; i++) { + split_dims.push_back(row_szs[i * Cblk_dim]); + } + std::vector blks; + // std::cout< new_shape(Tin.rowrank() + 1); + new_shape.back() = -1; + for (int ti = 0; ti < blks.size(); ti++) { + v_blocks.push_back(blks[ti]); + v_itoi.push_back(Tin.get_qindices(x.second[order[ti * Cblk_dim]])); + + // reshaping: + for (int i = 0; i < Tin.rowrank(); i++) { + new_shape[i] = + Tin.bonds()[i] + .getDegeneracies()[Tin.get_qindices(x.second[order[ti * Cblk_dim]])[i]]; + } + v_blocks.back().reshape_(new_shape); + + v_itoi.back()[Tin.rowrank()] = e_blocks.size() - 1; + v_itoi.back().resize(Tin.rowrank() + 1); + } + } // is_V + } // for each qcharge + + // process e: + Bond Bd_aux = Bond(BD_IN, aux_qnums, aux_degs, Tin.syms()); + BlockFermionicUniTensor *e_ptr = new BlockFermionicUniTensor(); + e_ptr->Init({Bd_aux, Bd_aux.redirect()}, {"_aux_L", "_aux_R"}, 1, Type.Double, + Device.cpu, // these two will be overwritten later, so doesnt matter. + true, // is_diag! + true); // no_alloc! + e_ptr->_blocks = e_blocks; + UniTensor e; + e._impl = boost::intrusive_ptr(e_ptr); + + outCyT.push_back(e); + + if (is_V) { + BlockFermionicUniTensor *v_ptr = new BlockFermionicUniTensor(); + for (int i = 0; i < Tin.rowrank(); i++) { + v_ptr->_bonds.push_back(Tin.bonds()[i].clone()); + v_ptr->_labels.push_back(Tin.labels()[i]); + } + v_ptr->_bonds.push_back(Bd_aux.redirect()); + v_ptr->_labels.push_back("_aux_L"); + v_ptr->_rowrank = Tin.rowrank(); + v_ptr->_is_diag = false; + v_ptr->_is_braket_form = v_ptr->_update_braket(); + v_ptr->_inner_to_outer_idx = v_itoi; + v_ptr->_blocks = v_blocks; + v_ptr->_signflip = std::vector(v_blocks.size(), false); + UniTensor V; + V._impl = boost::intrusive_ptr(v_ptr); + outCyT.push_back(V); + } + } // Eigh_BlockFermionic_UT_internal std::vector Eigh(const UniTensor &Tin, const bool &is_V, const bool &row_v) { // using rowrank to split the bond to form a matrix. @@ -310,21 +491,22 @@ namespace cytnx { std::vector outCyT; if (Tin.uten_type() == UTenType.Dense) { - _Eigh_Dense_UT(outCyT, Tin, is_V, row_v); + Eigh_Dense_UT_internal(outCyT, Tin, is_V, row_v); } else if (Tin.uten_type() == UTenType.Block) { - _Eigh_Block_UT(outCyT, Tin, is_V, row_v); + Eigh_Block_UT_internal(outCyT, Tin, is_V, row_v); + + } else if (Tin.uten_type() == UTenType.BlockFermionic) { + Eigh_BlockFermionic_UT_internal(outCyT, Tin, is_V, row_v); + } else { - cytnx_error_msg( - true, - "[ERROR] Eigh, unsupported type of UniTensor only support (Dense and Block). " - "something wrong internal%s", - "\n"); - } // is block form ? + cytnx_error_msg(true, "[ERROR] Eigh only supports Dense/Block/BlockFermionic UniTensors.%s", + "\n"); + } // ut type return outCyT; - }; // Eigh + } // Eigh } // namespace linalg } // namespace cytnx diff --git a/src/linalg/Gesvd.cpp b/src/linalg/Gesvd.cpp index d6d1bb86d..37191a561 100644 --- a/src/linalg/Gesvd.cpp +++ b/src/linalg/Gesvd.cpp @@ -1,3 +1,5 @@ +#include "linalg.hpp" + #include #include #include @@ -5,8 +7,6 @@ #include "Tensor.hpp" #include "UniTensor.hpp" #include "algo.hpp" -#include "linalg.hpp" -using namespace std; #ifdef BACKEND_TORCH #else @@ -77,16 +77,10 @@ namespace cytnx { } } - } // namespace linalg - -} // namespace cytnx - -namespace cytnx { - namespace linalg { - // actual impls: - void _Gesvd_Dense_UT(std::vector &outCyT, const cytnx::UniTensor &Tin, - const bool &is_U, const bool &is_vT) { + static void Gesvd_Dense_UT_internal(std::vector &outCyT, + const cytnx::UniTensor &Tin, const bool &is_U, + const bool &is_vT) { //[Note] outCyT must be empty! // DenseUniTensor: @@ -100,17 +94,17 @@ namespace cytnx { tmp.contiguous_(); } - vector tmps = tmp.shape(); - vector oldshape(tmps.begin(), tmps.end()); + std::vector tmps = tmp.shape(); + std::vector oldshape(tmps.begin(), tmps.end()); tmps.clear(); - vector oldlabel = Tin.labels(); + std::vector oldlabel = Tin.labels(); // collapse as Matrix: cytnx_int64 rowdim = 1; for (cytnx_uint64 i = 0; i < Tin.rowrank(); i++) rowdim *= tmp.shape()[i]; tmp.reshape_({rowdim, -1}); - vector outT = cytnx::linalg::Gesvd(tmp, is_U, is_vT); + std::vector outT = cytnx::linalg::Gesvd(tmp, is_U, is_vT); if (Tin.is_contiguous()) tmp.reshape_(oldshape); int t = 0; @@ -123,35 +117,35 @@ namespace cytnx { Cy_S.Init({newBond, newBond}, {std::string("_aux_L"), std::string("_aux_R")}, 1, Type.Double, Tin.device(), true); // it is just reference so no hurt to alias ^^ - // cout << "[AFTER INIT]" << endl; + // std::cout << "[AFTER INIT]" << std::endl; Cy_S.put_block_(outT[t]); t++; if (is_U) { cytnx::UniTensor &Cy_U = outCyT[t]; cytnx_error_msg(Tin.rowrank() > oldshape.size(), - "[ERROR] The rowrank of the input unitensor is larger than the rank of the " + "[ERROR] The rowrank of the input UniTensor is larger than the rank of the " "contained tensor.%s", "\n"); - vector shapeU(oldshape.begin(), oldshape.begin() + Tin.rowrank()); + std::vector shapeU(oldshape.begin(), oldshape.begin() + Tin.rowrank()); shapeU.push_back(-1); outT[t].reshape_(shapeU); Cy_U.Init(outT[t], false, Tin.rowrank()); - vector labelU(oldlabel.begin(), oldlabel.begin() + Tin.rowrank()); + std::vector labelU(oldlabel.begin(), oldlabel.begin() + Tin.rowrank()); labelU.push_back(Cy_S.labels()[0]); Cy_U.set_labels(labelU); t++; // U } if (is_vT) { cytnx::UniTensor &Cy_vT = outCyT[t]; - vector shapevT(Tin.rank() - Tin.rowrank() + 1); + std::vector shapevT(Tin.rank() - Tin.rowrank() + 1); shapevT[0] = -1; memcpy(&shapevT[1], &oldshape[Tin.rowrank()], sizeof(cytnx_int64) * (shapevT.size() - 1)); outT[t].reshape_(shapevT); Cy_vT.Init(outT[t], false, 1); - // cout << shapevT.size() << endl; - vector labelvT(shapevT.size()); + // std::cout << shapevT.size() << std::endl; + std::vector labelvT(shapevT.size()); labelvT[0] = Cy_S.labels()[1]; // memcpy(&labelvT[1], &oldlabel[Tin.rowrank()], sizeof(cytnx_int64) * (labelvT.size() - // 1)); @@ -185,10 +179,11 @@ namespace cytnx { } } // if tag - } + } // Gesvd_Dense_UT_internal - void _Gesvd_Block_UT(std::vector &outCyT, const cytnx::UniTensor &Tin, - const bool &is_U, const bool &is_vT) { + static void Gesvd_Block_UT_internal(std::vector &outCyT, + const cytnx::UniTensor &Tin, const bool &is_U, + const bool &is_vT) { // outCyT must be empty and Tin must be checked with proper rowrank! // 1) getting the combineBond L and combineBond R for qnum list without grouping: @@ -245,9 +240,9 @@ namespace cytnx { } // 4) for each qcharge in key, combining the blocks into a big chunk! - // ->a initialize an empty shell of UniTensor! + // ->a initialize an empty shell of UniTensors! vec2d aux_qnums; // for sharing bond - std::vector aux_degs; // forsharing bond + std::vector aux_degs; // for sharing bond std::vector S_blocks; vec2d U_itoi; // for U @@ -259,7 +254,7 @@ namespace cytnx { int tr; for (auto const &x : mgrp) { vec2d itoi_indicators(x.second.size()); - // cout << x.second.size() << "-------" << endl; + // std::cout << x.second.size() << "-------" << std::endl; for (int i = 0; i < x.second.size(); i++) { itoi_indicators[i] = new_itoi[x.second[i]]; // std::cout << new_itoi[x.second[i]] << std::endl; @@ -350,13 +345,13 @@ namespace cytnx { tr++; } // is_vT - } + } // for each qcharge // process S: Bond Bd_aux = Bond(BD_IN, aux_qnums, aux_degs, Tin.syms()); BlockUniTensor *S_ptr = new BlockUniTensor(); S_ptr->Init({Bd_aux, Bd_aux.redirect()}, {"_aux_L", "_aux_R"}, 1, Type.Double, - Device.cpu, // this two will be overwrite later, so doesnt matter. + Device.cpu, // these two will be overwritten later, so doesnt matter. true, // is_diag! true); // no_alloc! S_ptr->_blocks = S_blocks; @@ -402,11 +397,11 @@ namespace cytnx { outCyT.push_back(vT); } - } // _Gesvd_Block_UT + } // Gesvd_Block_UT_internal - void _Gesvd_BlockFermionic_UT(std::vector &outCyT, - const cytnx::UniTensor &Tin, const bool &is_U, - const bool &is_vT) { + static void Gesvd_BlockFermionic_UT_internal(std::vector &outCyT, + const cytnx::UniTensor &Tin, const bool &is_U, + const bool &is_vT) { //[8 Oct 2024] This is a copy from BlockUniTensor; signflips included // outCyT must be empty and Tin must be checked with proper rowrank! @@ -479,7 +474,7 @@ namespace cytnx { int tr; for (auto const &x : mgrp) { vec2d itoi_indicators(x.second.size()); - // cout << x.second.size() << "-------" << endl; + // std::cout << x.second.size() << "-------" << std::endl; for (int i = 0; i < x.second.size(); i++) { itoi_indicators[i] = new_itoi[x.second[i]]; // std::cout << new_itoi[x.second[i]] << std::endl; @@ -583,7 +578,7 @@ namespace cytnx { Bond Bd_aux = Bond(BD_IN, aux_qnums, aux_degs, Tin.syms()); BlockFermionicUniTensor *S_ptr = new BlockFermionicUniTensor(); S_ptr->Init({Bd_aux, Bd_aux.redirect()}, {"_aux_L", "_aux_R"}, 1, Type.Double, - Device.cpu, // this two will be overwrite later, so doesnt matter. + Device.cpu, // these two will be overwritten later, so doesnt matter. true, // is_diag! true); // no_alloc! S_ptr->_blocks = S_blocks; @@ -631,7 +626,7 @@ namespace cytnx { outCyT.push_back(vT); } - } // _Gesvd_BlockFermionic_UT + } // Gesvd_BlockFermionic_UT_internal std::vector Gesvd(const cytnx::UniTensor &Tin, const bool &is_U, const bool &is_vT) { @@ -642,23 +637,23 @@ namespace cytnx { cytnx_error_msg(Tin.is_diag(), "[Gesvd][ERROR] SVD for diagonal UniTensor is trivial and currently not " - "support. Use other manipulation.%s", + "supported. Use other manipulations.%s", "\n"); std::vector outCyT; if (Tin.uten_type() == UTenType.Dense) { - _Gesvd_Dense_UT(outCyT, Tin, is_U, is_vT); + Gesvd_Dense_UT_internal(outCyT, Tin, is_U, is_vT); } else if (Tin.uten_type() == UTenType.Block) { - _Gesvd_Block_UT(outCyT, Tin, is_U, is_vT); + Gesvd_Block_UT_internal(outCyT, Tin, is_U, is_vT); } else if (Tin.uten_type() == UTenType.BlockFermionic) { - _Gesvd_BlockFermionic_UT(outCyT, Tin, is_U, is_vT); + Gesvd_BlockFermionic_UT_internal(outCyT, Tin, is_U, is_vT); } else { cytnx_error_msg( true, "[ERROR] Gesvd only supports Dense/Block/BlockFermionic UniTensors.%s", "\n"); - } // is block form ? + } // ut type return outCyT; diff --git a/src/linalg/Gesvd_truncate.cpp b/src/linalg/Gesvd_truncate.cpp index 4b0c29995..ea16c4523 100644 --- a/src/linalg/Gesvd_truncate.cpp +++ b/src/linalg/Gesvd_truncate.cpp @@ -1,10 +1,13 @@ +#include "linalg.hpp" + +#include +#include #include -#include "Accessor.hpp" #include "Tensor.hpp" #include "UniTensor.hpp" #include "algo.hpp" -#include "linalg.hpp" +#include "Accessor.hpp" #ifdef BACKEND_TORCH #else @@ -99,10 +102,12 @@ namespace cytnx { } } - void _gesvd_truncate_Dense_UT(std::vector &outCyT, const cytnx::UniTensor &Tin, - const cytnx_uint64 &keepdim, const double &err, const bool &is_U, - const bool &is_vT, const unsigned int &return_err, - const cytnx_uint64 &mindim) { + static void Gesvd_truncate_Dense_UT_internal(std::vector &outCyT, + const cytnx::UniTensor &Tin, + const cytnx_uint64 &keepdim, const double &err, + const bool &is_U, const bool &is_vT, + const unsigned int &return_err, + const cytnx_uint64 &mindim) { // DenseUniTensor: cytnx_uint64 keep_dim = keepdim; @@ -144,7 +149,7 @@ namespace cytnx { cytnx::UniTensor &Cy_U = outCyT[t]; // shape cytnx_error_msg(Tin.rowrank() > oldshape.size(), - "[ERROR] The rowrank of the input unitensor is larger than the rank of the " + "[ERROR] The rowrank of the input UniTensor is larger than the rank of the " "contained tensor.%s", "\n"); std::vector shapeU(oldshape.begin(), oldshape.begin() + Tin.rowrank()); @@ -205,12 +210,17 @@ namespace cytnx { } // if tag if (return_err) outCyT.back().Init(outT.back(), false, 0); - }; // svdt Dense - - void _gesvd_truncate_Block_UT(std::vector &outCyT, const cytnx::UniTensor &Tin, - const cytnx_uint64 &keepdim, const double &err, const bool &is_U, - const bool &is_vT, const unsigned int &return_err, - const cytnx_uint64 &mindim) { + } // Gesvd_truncate_Dense_UT_internal + + static void Gesvd_truncate_Block_UTs_internal(std::vector &outCyT, + const cytnx::UniTensor &Tin, + const cytnx_uint64 &keepdim, const double &err, + const bool &is_U, const bool &is_vT, + const unsigned int &return_err, + const cytnx_uint64 &mindim) { + // currently, Gesvd is used as a standard for the full SVD before truncation + // handles BlockFermionicUniTensor as well: elements of _signflip are removed if blocks are + // erased cytnx_uint64 keep_dim = keepdim; outCyT = linalg::Gesvd(Tin, is_U, is_vT); @@ -283,8 +293,13 @@ namespace cytnx { // remove: // vec_erase_(S.get_itoi(),to_be_removed); S.get_itoi() = new_itoi; - vec_erase_(S.get_blocks_(), to_be_removed); - vec_erase_(S.bonds()[0].qnums(), to_be_removed); + if (!to_be_removed.empty()) { + vec_erase_(S.get_blocks_(), to_be_removed); + vec_erase_(S.bonds()[0].qnums(), to_be_removed); + if (Tin.uten_type() == UTenType.BlockFermionic) { + vec_erase_(S.signflip_(), to_be_removed); + } + } S.bonds()[0]._impl->_degs = new_dims; S.bonds()[0]._impl->_dim = tot_dim; S.bonds()[1] = S.bonds()[0].redirect(); @@ -311,8 +326,13 @@ namespace cytnx { U.get_qindices(b).back() = new_qid[U.get_qindices(b).back()]; } } - vec_erase_(U.get_itoi(), to_be_removed); - vec_erase_(U.get_blocks_(), to_be_removed); + if (!to_be_removed.empty()) { + vec_erase_(U.get_itoi(), to_be_removed); + vec_erase_(U.get_blocks_(), to_be_removed); + if (Tin.uten_type() == UTenType.BlockFermionic) { + vec_erase_(U.signflip_(), to_be_removed); + } + } t++; } @@ -337,168 +357,14 @@ namespace cytnx { vT.get_qindices(b)[0] = new_qid[vT.get_qindices(b)[0]]; } } - vec_erase_(vT.get_itoi(), to_be_removed); - vec_erase_(vT.get_blocks_(), to_be_removed); - t++; - } - - // handle return_err! - if (return_err == 1) { - outCyT.push_back(UniTensor(Tensor({1}, Smin.dtype()))); - outCyT.back().get_block_().storage().at(0) = Smin; - } else if (return_err) { - outCyT.push_back(UniTensor(Sall.get({Accessor::tilend(smidx)}))); - } - } // _gesvd_truncate_Block_UT - - void _gesvd_truncate_BlockFermionic_UT(std::vector &outCyT, - const cytnx::UniTensor &Tin, const cytnx_uint64 &keepdim, - const double &err, const bool &is_U, const bool &is_vT, - const unsigned int &return_err, - const unsigned int &mindim) { - //[9 Oct 2024] This is a copy from _gesvd_truncate_Block_UT; - // TODOfermionic: remove signs if blocks are deleted - cytnx_error_msg(true, - "[ERROR][_gesvd_truncate_BlockFermionic_UT] not implemented yet. The " - "signflips need to be removed if blocks are deleted in the truncation.%s", - "\n") cytnx_uint64 keep_dim = keepdim; - - outCyT = linalg::Gesvd(Tin, is_U, is_vT); - - // process truncate: - // 1) concate all s vals from all blk - Tensor Sall = outCyT[0].get_block_(0); - for (int i = 1; i < outCyT[0].Nblocks(); i++) { - Sall = algo::Concatenate(Sall, outCyT[0].get_block_(i)); - } - Sall = algo::Sort(Sall); - - // 2) get the minimum base on the args input. - Scalar Smin; - cytnx_uint64 smidx; - if (keep_dim < Sall.shape()[0]) { - smidx = Sall.shape()[0] - keep_dim; - Smin = Sall.storage()(smidx); - while ((Smin < err) and keep_dim - 1 > mindim) { - keep_dim -= 1; - if (keep_dim == 0) break; - smidx = Sall.shape()[0] - keep_dim; - Smin = Sall.storage()(smidx); - } - - } else { - keep_dim = Sall.shape()[0]; - Smin = Sall.storage()(0); - smidx = 0; - while ((Smin < err)) { - keep_dim -= 1; - if (keep_dim == 0) break; - smidx = Sall.shape()[0] - keep_dim; - Smin = Sall.storage()(smidx); - } - } - - // traversal each block and truncate! - UniTensor &S = outCyT[0]; - std::vector new_dims; // keep_dims for each block! - std::vector keep_dims; - keep_dims.reserve(S.Nblocks()); - std::vector new_qid; - new_qid.reserve(S.Nblocks()); - - std::vector> new_itoi; // assume S block is in same order as qnum: - std::vector to_be_removed; - - cytnx_uint64 tot_dim = 0; - cytnx_uint64 cnt = 0; - for (int b = 0; b < S.Nblocks(); b++) { - Storage stmp = S.get_block_(b).storage(); - cytnx_int64 kdim = 0; - for (int i = stmp.size() - 1; i >= 0; i--) { - if (stmp(i) >= Smin) { - kdim = i + 1; - break; - } - } - keep_dims.push_back(kdim); - if (kdim == 0) { - to_be_removed.push_back(b); - new_qid.push_back(-1); - - } else { - new_qid.push_back(new_dims.size()); - new_itoi.push_back({new_dims.size(), new_dims.size()}); - new_dims.push_back(kdim); - tot_dim += kdim; - if (kdim != S.get_blocks_()[b].shape()[0]) - S.get_blocks_()[b] = S.get_blocks_()[b].get({Accessor::range(0, kdim)}); - } - } - - // remove: - // vec_erase_(S.get_itoi(),to_be_removed); - S.get_itoi() = new_itoi; - vec_erase_(S.get_blocks_(), to_be_removed); - // TODOfermionic: remove signs for this block - // vec_erase_(S.signflip(), to_be_removed); - vec_erase_(S.bonds()[0].qnums(), to_be_removed); - S.bonds()[0]._impl->_degs = new_dims; - S.bonds()[0]._impl->_dim = tot_dim; - S.bonds()[1] = S.bonds()[0].redirect(); - - int t = 1; - if (is_U) { - UniTensor &U = outCyT[t]; - to_be_removed.clear(); - U.bonds().back() = S.bonds()[1].clone(); - std::vector acs(U.rank()); - for (int i = 0; i < U.rowrank(); i++) acs[i] = Accessor::all(); - - for (int b = 0; b < U.Nblocks(); b++) { - if (keep_dims[U.get_qindices(b).back()] == 0) - to_be_removed.push_back(b); - else { - /// process blocks: - if (keep_dims[U.get_qindices(b).back()] != U.get_blocks_()[b].shape().back()) { - acs.back() = Accessor::range(0, keep_dims[U.get_qindices(b).back()]); - U.get_blocks_()[b] = U.get_blocks_()[b].get(acs); - } - - // change to new qindices: - U.get_qindices(b).back() = new_qid[U.get_qindices(b).back()]; + if (!to_be_removed.empty()) { + vec_erase_(vT.get_itoi(), to_be_removed); + vec_erase_(vT.get_blocks_(), to_be_removed); + if (Tin.uten_type() == UTenType.BlockFermionic) { + vec_erase_(vT.signflip_(), to_be_removed); } } - vec_erase_(U.get_itoi(), to_be_removed); - vec_erase_(U.get_blocks_(), to_be_removed); - // TODOfermionic: remove signs for this block - // vec_erase_(U.signflip(), to_be_removed); - t++; - } - - if (is_vT) { - UniTensor &vT = outCyT[t]; - to_be_removed.clear(); - vT.bonds().front() = S.bonds()[0].clone(); - std::vector acs(vT.rank()); - for (int i = 1; i < vT.rank(); i++) acs[i] = Accessor::all(); - for (int b = 0; b < vT.Nblocks(); b++) { - if (keep_dims[vT.get_qindices(b)[0]] == 0) - to_be_removed.push_back(b); - else { - /// process blocks: - if (keep_dims[vT.get_qindices(b)[0]] != vT.get_blocks_()[b].shape()[0]) { - acs[0] = Accessor::range(0, keep_dims[vT.get_qindices(b)[0]]); - vT.get_blocks_()[b] = vT.get_blocks_()[b].get(acs); - } - // change to new qindices: - vT.get_qindices(b)[0] = new_qid[vT.get_qindices(b)[0]]; - } - } - vec_erase_(vT.get_itoi(), to_be_removed); - vec_erase_(vT.get_blocks_(), to_be_removed); - // TODOfermionic: remove signs for this block - // vec_erase_(vT.signflip(), to_be_removed); t++; } @@ -509,7 +375,7 @@ namespace cytnx { } else if (return_err) { outCyT.push_back(UniTensor(Sall.get({Accessor::tilend(smidx)}))); } - } // _gesvd_truncate_BlockFermionic_UT + } // Gesvd_truncate_Block_UTs_internal std::vector Gesvd_truncate(const cytnx::UniTensor &Tin, const cytnx_uint64 &keepdim, const double &err, @@ -535,11 +401,11 @@ namespace cytnx { std::vector outCyT; if (Tin.uten_type() == UTenType.Dense) { - _gesvd_truncate_Dense_UT(outCyT, Tin, keepdim, err, is_U, is_vT, return_err, mindim); - } else if (Tin.uten_type() == UTenType.Block) { - _gesvd_truncate_Block_UT(outCyT, Tin, keepdim, err, is_U, is_vT, return_err, mindim); - } else if (Tin.uten_type() == UTenType.BlockFermionic) { - _gesvd_truncate_BlockFermionic_UT(outCyT, Tin, keepdim, err, is_U, is_vT, return_err, + Gesvd_truncate_Dense_UT_internal(outCyT, Tin, keepdim, err, is_U, is_vT, return_err, + mindim); + } else if ((Tin.uten_type() == UTenType.Block) || + (Tin.uten_type() == UTenType.BlockFermionic)) { + Gesvd_truncate_Block_UTs_internal(outCyT, Tin, keepdim, err, is_U, is_vT, return_err, mindim); } else { cytnx_error_msg( @@ -550,12 +416,13 @@ namespace cytnx { } // Gesvd_truncate - void _gesvd_truncate_Block_UT(std::vector &outCyT, const cytnx::UniTensor &Tin, - const cytnx_uint64 &keepdim, - std::vector min_blockdim, const double &err, - const bool &is_U, const bool &is_vT, - const unsigned int &return_err, const cytnx_uint64 &mindim) { + static void Gesvd_truncate_Block_UTs_internal( + std::vector &outCyT, const cytnx::UniTensor &Tin, const cytnx_uint64 &keepdim, + std::vector min_blockdim, const double &err, const bool &is_U, + const bool &is_vT, const unsigned int &return_err, const cytnx_uint64 &mindim) { // currently, Gesvd is used as a standard for the full SVD before truncation + // handles BlockFermionicUniTensor as well: elements of _signflip are removed if blocks are + // erased cytnx_int64 keep_dim = keepdim; // these must be signed int, because they can become // negative! cytnx_int64 min_dim = (mindim < 1 ? 1 : mindim); @@ -563,11 +430,10 @@ namespace cytnx { outCyT = linalg::Gesvd(Tin, is_U, is_vT); if (min_blockdim.size() == 1) // if only one element given, make it a vector min_blockdim.resize(outCyT[0].Nblocks(), min_blockdim.front()); - cytnx_error_msg( - min_blockdim.size() != outCyT[0].Nblocks(), - "[ERROR][Gesvd_truncate] min_blockdim must have the same number of elements as " - "blocks in the singular value UniTensor%s", - "\n"); + cytnx_error_msg(min_blockdim.size() != outCyT[0].Nblocks(), + "[ERROR][Gesvd_truncate] min_blockdim must have the same number of elements " + "as blocks in the singular value UniTensor%s", + "\n"); // process truncation: // 1) concate all S vals from all blk but exclude the first min_blockdim Svals in each block @@ -659,7 +525,7 @@ namespace cytnx { std::vector> new_itoi; // assume S block is in same order as qnum: - std::vector to_be_remove; + std::vector to_be_removed; cytnx_uint64 tot_dim = 0; cytnx_uint64 cnt = 0; @@ -680,7 +546,7 @@ namespace cytnx { } keep_dims.push_back(kdim); if (kdim == 0) { - to_be_remove.push_back(b); + to_be_removed.push_back(b); new_qid.push_back(-1); } else { new_qid.push_back(new_dims.size()); @@ -693,10 +559,15 @@ namespace cytnx { } // remove: - // vec_erase_(S.get_itoi(),to_be_remove); + // vec_erase_(S.get_itoi(),to_be_removed); S.get_itoi() = new_itoi; - vec_erase_(S.get_blocks_(), to_be_remove); - vec_erase_(S.bonds()[0].qnums(), to_be_remove); + if (!to_be_removed.empty()) { + vec_erase_(S.get_blocks_(), to_be_removed); + vec_erase_(S.bonds()[0].qnums(), to_be_removed); + if (Tin.uten_type() == UTenType.BlockFermionic) { + vec_erase_(S.signflip_(), to_be_removed); + } + } S.bonds()[0]._impl->_degs = new_dims; S.bonds()[0]._impl->_dim = tot_dim; S.bonds()[1] = S.bonds()[0].redirect(); @@ -704,14 +575,14 @@ namespace cytnx { int t = 1; if (is_U) { UniTensor &U = outCyT[t]; - to_be_remove.clear(); + to_be_removed.clear(); U.bonds().back() = S.bonds()[1].clone(); std::vector acs(U.rank()); for (int i = 0; i < U.rowrank(); i++) acs[i] = Accessor::all(); for (int b = 0; b < U.Nblocks(); b++) { if (keep_dims[U.get_qindices(b).back()] == 0) - to_be_remove.push_back(b); + to_be_removed.push_back(b); else { /// process blocks: if (keep_dims[U.get_qindices(b).back()] != U.get_blocks_()[b].shape().back()) { @@ -723,22 +594,27 @@ namespace cytnx { U.get_qindices(b).back() = new_qid[U.get_qindices(b).back()]; } } - vec_erase_(U.get_itoi(), to_be_remove); - vec_erase_(U.get_blocks_(), to_be_remove); + if (!to_be_removed.empty()) { + vec_erase_(U.get_itoi(), to_be_removed); + vec_erase_(U.get_blocks_(), to_be_removed); + if (Tin.uten_type() == UTenType.BlockFermionic) { + vec_erase_(U.signflip_(), to_be_removed); + } + } t++; } if (is_vT) { UniTensor &vT = outCyT[t]; - to_be_remove.clear(); + to_be_removed.clear(); vT.bonds().front() = S.bonds()[0].clone(); std::vector acs(vT.rank()); for (int i = 1; i < vT.rank(); i++) acs[i] = Accessor::all(); for (int b = 0; b < vT.Nblocks(); b++) { if (keep_dims[vT.get_qindices(b)[0]] == 0) - to_be_remove.push_back(b); + to_be_removed.push_back(b); else { /// process blocks: if (keep_dims[vT.get_qindices(b)[0]] != vT.get_blocks_()[b].shape()[0]) { @@ -749,12 +625,18 @@ namespace cytnx { vT.get_qindices(b)[0] = new_qid[vT.get_qindices(b)[0]]; } } - vec_erase_(vT.get_itoi(), to_be_remove); - vec_erase_(vT.get_blocks_(), to_be_remove); + if (!to_be_removed.empty()) { + vec_erase_(vT.get_itoi(), to_be_removed); + vec_erase_(vT.get_blocks_(), to_be_removed); + if (Tin.uten_type() == UTenType.BlockFermionic) { + vec_erase_(vT.signflip_(), to_be_removed); + } + } + t++; } } - } // _gesvd_truncate_Block_UT + } // Gesvd_truncate_Block_UTs_internal std::vector Gesvd_truncate(const cytnx::UniTensor &Tin, const cytnx_uint64 &keepdim, @@ -784,16 +666,18 @@ namespace cytnx { cytnx_error_msg( min_blockdim.size() != 1, "[ERROR][Gesvd_truncate] min_blockdim must have one element for dense UniTensor%s", "\n"); - _gesvd_truncate_Dense_UT(outCyT, Tin, keepdim, err, is_U, is_vT, return_err, - std::max(mindim, min_blockdim[0])); + Gesvd_truncate_Dense_UT_internal(outCyT, Tin, keepdim, err, is_U, is_vT, return_err, + std::max(mindim, min_blockdim[0])); - } else if (Tin.uten_type() == UTenType.Block) { - _gesvd_truncate_Block_UT(outCyT, Tin, keepdim, min_blockdim, err, is_U, is_vT, return_err, - mindim); + } else if ((Tin.uten_type() == UTenType.Block) || + (Tin.uten_type() == UTenType.BlockFermionic)) { + Gesvd_truncate_Block_UTs_internal(outCyT, Tin, keepdim, min_blockdim, err, is_U, is_vT, + return_err, mindim); } else { cytnx_error_msg( - true, "[ERROR][Gesvd_truncate] only Dense/Block UniTensors are supported.%s", "\n"); + true, "[ERROR] Gesvd_truncate only supports Dense/Block/BlockFermionic UniTensors.%s", + "\n"); } return outCyT; diff --git a/src/linalg/Qr.cpp b/src/linalg/Qr.cpp index 01bd9bd8a..231ac2965 100644 --- a/src/linalg/Qr.cpp +++ b/src/linalg/Qr.cpp @@ -1,10 +1,13 @@ #include "linalg.hpp" + +#include +#include +#include + #include "Tensor.hpp" #include "UniTensor.hpp" #include "algo.hpp" -#include -#include -using namespace std; + typedef cytnx::Accessor ac; #ifdef BACKEND_TORCH @@ -29,7 +32,7 @@ namespace cytnx { Tensor tau, Q, R, D; // D is not used here. tau.Init({n_tau}, in.dtype(), in.device()); - tau.storage().set_zeros(); // if type is complex, S should be real + tau.storage().set_zeros(); R.Init({n_tau, Tin.shape()[1]}, in.dtype(), in.device()); R.storage().set_zeros(); @@ -87,14 +90,8 @@ namespace cytnx { } } - } // namespace linalg - -} // namespace cytnx - -namespace cytnx { - namespace linalg { - - void _qr_Dense_UT(std::vector &outCyT, const UniTensor &Tin, const bool &is_tau) { + static void Qr_Dense_UT_internal(std::vector &outCyT, const UniTensor &Tin, + const bool &is_tau) { Tensor tmp; if (Tin.is_contiguous()) tmp = Tin.get_block_(); @@ -103,27 +100,27 @@ namespace cytnx { tmp.contiguous_(); } - vector tmps = tmp.shape(); - vector oldshape(tmps.begin(), tmps.end()); + std::vector tmps = tmp.shape(); + std::vector oldshape(tmps.begin(), tmps.end()); tmps.clear(); - vector oldlabel = Tin.labels(); + std::vector oldlabel = Tin.labels(); // collapse as Matrix: cytnx_int64 rowdim = 1; for (cytnx_uint64 i = 0; i < Tin.rowrank(); i++) rowdim *= tmp.shape()[i]; tmp.reshape_({rowdim, -1}); - vector outT = cytnx::linalg::Qr(tmp, is_tau); + std::vector outT = cytnx::linalg::Qr(tmp, is_tau); if (Tin.is_contiguous()) tmp.reshape_(oldshape); int t = 0; outCyT.resize(outT.size()); - string newlbl = "_aux_"; + std::string newlbl = "_aux_"; // Q - vector Qshape; - vector Qlbl; + std::vector Qshape; + std::vector Qlbl; for (int i = 0; i < Tin.rowrank(); i++) { Qshape.push_back(oldshape[i]); Qlbl.push_back(oldlabel[i]); @@ -166,10 +163,11 @@ namespace cytnx { outCyT[1].bonds()[i].set_type(Tin.bonds()[Tin.rowrank() + i - 1].type()); } outCyT[1]._impl->_is_braket_form = outCyT[1]._impl->_update_braket(); - } - }; + } // if tag + } // QR_Dense_UT_internal - void _qr_Block_UT(std::vector &outCyT, const UniTensor &Tin, const bool &is_tau) { + static void Qr_Block_UT_internal(std::vector &outCyT, const UniTensor &Tin, + const bool &is_tau) { // outCyT must be empty and Tin must be checked with proper rowrank! // 1) getting the combineBond L and combineBond R for qnum list without grouping: @@ -226,10 +224,9 @@ namespace cytnx { } // 4) for each qcharge in key, combining the blocks into a big chunk! - // ->a initialize an empty shell of UniTensor! + // ->a initialize an empty shell of UniTensors! vec2d aux_qnums; // for sharing bond - std::vector aux_degs; // forsharing bond - + std::vector aux_degs; // for sharing bond std::vector tau_blocks; vec2d Q_itoi; // for Q @@ -241,7 +238,7 @@ namespace cytnx { cytnx_uint64 trcntr = 0; for (auto const &x : mgrp) { vec2d itoi_indicators(x.second.size()); - // cout << x.second.size() << "-------" << endl; + // std::cout << x.second.size() << "-------" << std::endl; for (int i = 0; i < x.second.size(); i++) { itoi_indicators[i] = new_itoi[x.second[i]]; // std::cout << new_itoi[x.second[i]] << std::endl; @@ -370,7 +367,229 @@ namespace cytnx { if (is_tau) { BlockUniTensor *tau_ptr = new BlockUniTensor(); tau_ptr->Init({Bd_aux, Bd_aux.redirect()}, {"_tau_L", "_tau_R"}, 1, Type.Double, - Device.cpu, // this two will be overwrite later, so doesnt matter. + Device.cpu, // these two will be overwritten later, so doesnt matter. + true, // is_diag! + true); // no_alloc! + tau_ptr->_blocks = tau_blocks; + UniTensor tau; + tau._impl = boost::intrusive_ptr(tau_ptr); + + outCyT.push_back(tau); + } + } // Qr_Block_UT_internal + + static void Qr_BlockFermionic_UT_internal(std::vector &outCyT, const UniTensor &Tin, + const bool &is_tau) { + // outCyT must be empty and Tin must be checked with proper rowrank! + + // 1) getting the combineBond L and combineBond R for qnum list without grouping: + // + // BDLeft -[ ]- BDRight + // + std::vector strides; + std::vector signflip = Tin.signflip(); + strides.reserve(Tin.rank()); + auto BdLeft = Tin.bonds()[0].clone(); + for (int i = 1; i < Tin.rowrank(); i++) { + strides.push_back(Tin.bonds()[i].qnums().size()); + BdLeft._impl->force_combineBond_(Tin.bonds()[i]._impl, false); // no grouping + } + // std::cout << BdLeft << std::endl; + strides.push_back(1); + auto BdRight = Tin.bonds()[Tin.rowrank()].clone(); + for (int i = Tin.rowrank() + 1; i < Tin.rank(); i++) { + strides.push_back(Tin.bonds()[i].qnums().size()); + BdRight._impl->force_combineBond_(Tin.bonds()[i]._impl, false); // no grouping + } + strides.push_back(1); + // std::cout << BdRight << std::endl; + // std::cout << strides << std::endl; + + // 2) making new inner_to_outer_idx lists for each block: + // -> a. get stride: + for (int i = Tin.rowrank() - 2; i >= 0; i--) { + strides[i] *= strides[i + 1]; + } + for (int i = Tin.rank() - 2; i >= Tin.rowrank(); i--) { + strides[i] *= strides[i + 1]; + } + // std::cout << strides << std::endl; + // ->b. calc new inner_to_outer_idx! + vec2d new_itoi(Tin.Nblocks(), std::vector(2)); + + int cnt; + for (cytnx_uint64 b = 0; b < Tin.Nblocks(); b++) { + const std::vector &tmpv = Tin.get_qindices(b); + for (cnt = 0; cnt < Tin.rowrank(); cnt++) { + new_itoi[b][0] += tmpv[cnt] * strides[cnt]; + } + for (cnt = Tin.rowrank(); cnt < Tin.rank(); cnt++) { + new_itoi[b][1] += tmpv[cnt] * strides[cnt]; + } + } + // std::cout << new_itoi << std::endl; + + // 3) categorize: + // key = qnum, val = list of block locations: + std::map, std::vector> mgrp; + for (cytnx_uint64 b = 0; b < Tin.Nblocks(); b++) { + mgrp[BdLeft.qnums()[new_itoi[b][0]]].push_back(b); + } + + // 4) for each qcharge in key, combining the blocks into a big chunk! + // ->a initialize an empty shell of UniTensors! + vec2d aux_qnums; // for sharing bond + std::vector aux_degs; // for sharing bond + std::vector tau_blocks; + + vec2d Q_itoi; // for Q + std::vector Q_blocks; + + vec2d R_itoi; // for R + std::vector R_blocks; + + cytnx_uint64 trcntr = 0; + for (auto const &x : mgrp) { + vec2d itoi_indicators(x.second.size()); + // std::cout << x.second.size() << "-------" << std::endl; + for (int i = 0; i < x.second.size(); i++) { + itoi_indicators[i] = new_itoi[x.second[i]]; + // std::cout << new_itoi[x.second[i]] << std::endl; + } + auto order = vec_sort(itoi_indicators, true); + std::vector Tlist(itoi_indicators.size()); + std::vector row_szs(order.size(), 1); + cytnx_uint64 Rblk_dim = 0; + cytnx_int64 tmp = -1; + cytnx_int64 current_block; + for (int i = 0; i < order.size(); i++) { + current_block = x.second[order[i]]; + if (itoi_indicators[i][0] != tmp) { + tmp = itoi_indicators[i][0]; + Rblk_dim++; + } + Tlist[i] = Tin.get_blocks_()[current_block]; + for (int j = 0; j < Tin.rowrank(); j++) { + row_szs[i] *= Tlist[i].shape()[j]; + } + if (signflip[current_block]) { + Tlist[i] = -Tlist[i]; // copies Tensor + // Tlist[i] = Tlist[i].Mul(-1); // copies Tensor + Tlist[i].reshape_({row_szs[i], -1}); + } else + Tlist[i] = Tlist[i].reshape({row_szs[i], -1}); // copies Tensor + } + cytnx_error_msg(Tlist.size() % Rblk_dim, "[Internal ERROR] Tlist is not complete!%s", "\n"); + // BTen is the big block!! + cytnx_uint64 Cblk_dim = Tlist.size() / Rblk_dim; + Tensor BTen = algo::_fx_Matric_combine(Tlist, Rblk_dim, Cblk_dim); + + // Now we can perform linalg! + aux_qnums.push_back(x.first); + auto out = linalg::Qr(BTen, is_tau); + aux_degs.push_back(out[0].shape().back()); + + // Q + std::vector split_dims; + for (int i = 0; i < Rblk_dim; i++) { + split_dims.push_back(row_szs[i * Cblk_dim]); + } + std::vector blks; + algo::Vsplit_(blks, out[0], split_dims); + out[0] = Tensor(); + std::vector new_shape(Tin.rowrank() + 1); + new_shape.back() = -1; + for (int ti = 0; ti < blks.size(); ti++) { + Q_blocks.push_back(blks[ti]); + Q_itoi.push_back(Tin.get_qindices(x.second[order[ti * Cblk_dim]])); + + // reshaping: + for (int i = 0; i < Tin.rowrank(); i++) { + new_shape[i] = + Tin.bonds()[i].getDegeneracies()[Tin.get_qindices(x.second[order[ti * Cblk_dim]])[i]]; + } + Q_blocks.back().reshape_(new_shape); + + Q_itoi.back()[Tin.rowrank()] = trcntr; + Q_itoi.back().resize(Tin.rowrank() + 1); + } + + // R + split_dims.clear(); + for (int i = 0; i < Cblk_dim; i++) { + split_dims.push_back(Tlist[i].shape().back()); + } + blks.clear(); + algo::Hsplit_(blks, out[1], split_dims); + out[1] = Tensor(); + + new_shape.resize(Tin.rank() - Tin.rowrank() + 1); + new_shape[0] = -1; + for (int ti = 0; ti < blks.size(); ti++) { + R_blocks.push_back(blks[ti]); + auto &tpitoi = Tin.get_qindices(x.second[order[ti]]); + R_itoi.push_back({trcntr}); + for (int i = Tin.rowrank(); i < Tin.rank(); i++) { + R_itoi.back().push_back(tpitoi[i]); + } + + // reshaping: + for (int i = Tin.rowrank(); i < Tin.rank(); i++) { + new_shape[i - Tin.rowrank() + 1] = Tin.bonds()[i].getDegeneracies()[tpitoi[i]]; + } + R_blocks.back().reshape_(new_shape); + } + + if (is_tau) { + tau_blocks.push_back(out[2]); + } + + trcntr++; + + } // for each qcharge + + Bond Bd_aux = Bond(BD_IN, aux_qnums, aux_degs, Tin.syms()); + + // process Q + BlockFermionicUniTensor *Q_ptr = new BlockFermionicUniTensor(); + for (int i = 0; i < Tin.rowrank(); i++) { + Q_ptr->_bonds.push_back(Tin.bonds()[i].clone()); + Q_ptr->_labels.push_back(Tin.labels()[i]); + } + Q_ptr->_bonds.push_back(Bd_aux.redirect()); + Q_ptr->_labels.push_back("_aux_"); + Q_ptr->_rowrank = Tin.rowrank(); + Q_ptr->_is_diag = false; + Q_ptr->_is_braket_form = Q_ptr->_update_braket(); + Q_ptr->_inner_to_outer_idx = Q_itoi; + Q_ptr->_blocks = Q_blocks; + Q_ptr->_signflip = std::vector(Q_blocks.size(), false); + UniTensor Q; + Q._impl = boost::intrusive_ptr(Q_ptr); + outCyT.push_back(Q); + + // process R: + BlockFermionicUniTensor *R_ptr = new BlockFermionicUniTensor(); + R_ptr->_bonds.push_back(Bd_aux); + R_ptr->_labels.push_back("_aux_"); + for (int i = Tin.rowrank(); i < Tin.rank(); i++) { + R_ptr->_bonds.push_back(Tin.bonds()[i].clone()); + R_ptr->_labels.push_back(Tin.labels()[i]); + } + R_ptr->_rowrank = 1; + R_ptr->_is_diag = false; + R_ptr->_is_braket_form = R_ptr->_update_braket(); + R_ptr->_inner_to_outer_idx = R_itoi; + R_ptr->_blocks = R_blocks; + R_ptr->_signflip = std::vector(R_blocks.size(), false); + UniTensor R; + R._impl = boost::intrusive_ptr(R_ptr); + outCyT.push_back(R); + + if (is_tau) { + BlockFermionicUniTensor *tau_ptr = new BlockFermionicUniTensor(); + tau_ptr->Init({Bd_aux, Bd_aux.redirect()}, {"_tau_L", "_tau_R"}, 1, Type.Double, + Device.cpu, // these two will be overwritten later, so doesnt matter. true, // is_diag! true); // no_alloc! tau_ptr->_blocks = tau_blocks; @@ -379,7 +598,7 @@ namespace cytnx { outCyT.push_back(tau); } - }; + } // Qr_BlockFermionic_UT_internal std::vector Qr(const UniTensor &Tin, const bool &is_tau) { // using rowrank to split the bond to form a matrix. @@ -389,16 +608,16 @@ namespace cytnx { std::vector outCyT; if (Tin.uten_type() == UTenType.Dense) { - _qr_Dense_UT(outCyT, Tin, is_tau); - return outCyT; + Qr_Dense_UT_internal(outCyT, Tin, is_tau); } else if (Tin.uten_type() == UTenType.Block) { - _qr_Block_UT(outCyT, Tin, is_tau); - return outCyT; - + Qr_Block_UT_internal(outCyT, Tin, is_tau); + } else if (Tin.uten_type() == UTenType.BlockFermionic) { + Qr_BlockFermionic_UT_internal(outCyT, Tin, is_tau); } else { cytnx_error_msg(true, "[QR for sparse UniTensor is developling%s]", "\n"); } - }; + return outCyT; + } } // namespace linalg } // namespace cytnx diff --git a/src/linalg/Rsvd.cpp b/src/linalg/Rsvd.cpp index 0699a9ef0..6f4c3781f 100644 --- a/src/linalg/Rsvd.cpp +++ b/src/linalg/Rsvd.cpp @@ -143,7 +143,7 @@ namespace cytnx { if (is_U) { cytnx::UniTensor &Cy_U = outCyT[t]; cytnx_error_msg(Tin.rowrank() > oldshape.size(), - "[ERROR] The rowrank of the input unitensor is larger than the rank of the " + "[ERROR] The rowrank of the input UniTensor is larger than the rank of the " "contained tensor.%s", "\n"); std::vector shapeU(oldshape.begin(), oldshape.begin() + Tin.rowrank()); diff --git a/src/linalg/Rsvd_truncate.cpp b/src/linalg/Rsvd_truncate.cpp index d251edc3e..f702ac0d9 100644 --- a/src/linalg/Rsvd_truncate.cpp +++ b/src/linalg/Rsvd_truncate.cpp @@ -1,11 +1,13 @@ +#include "linalg.hpp" + +#include #include #include -#include "Accessor.hpp" #include "Tensor.hpp" #include "UniTensor.hpp" #include "algo.hpp" -#include "linalg.hpp" +#include "Accessor.hpp" #ifdef BACKEND_TORCH #else @@ -135,11 +137,13 @@ namespace cytnx { } } // Rsvd_truncate(Tensor) - void _rsvd_truncate_Dense_UT(std::vector &outCyT, const cytnx::UniTensor &Tin, - cytnx_uint64 keepdim, double err, bool is_U, bool is_vT, - unsigned int return_err, cytnx_uint64 mindim, - cytnx_uint64 oversampling_summand, double oversampling_factor, - cytnx_uint64 power_iteration, unsigned int seed) { + static void Rsvd_truncate_Dense_UT_internal(std::vector &outCyT, + const cytnx::UniTensor &Tin, cytnx_uint64 keepdim, + double err, bool is_U, bool is_vT, + unsigned int return_err, cytnx_uint64 mindim, + cytnx_uint64 oversampling_summand, + double oversampling_factor, + cytnx_uint64 power_iteration, unsigned int seed) { // DenseUniTensor: cytnx_uint64 keep_dim = keepdim; @@ -180,7 +184,7 @@ namespace cytnx { cytnx::UniTensor &Cy_U = outCyT[t]; // shape cytnx_error_msg(Tin.rowrank() > oldshape.size(), - "[ERROR] The rowrank of the input unitensor is larger than the rank of the " + "[ERROR] The rowrank of the input UniTensor is larger than the rank of the " "contained tensor.%s", "\n"); std::vector shapeU(oldshape.begin(), oldshape.begin() + Tin.rowrank()); @@ -241,7 +245,7 @@ namespace cytnx { } // if tag if (return_err) outCyT.back().Init(outT.back(), false, 0); - }; // _rsvd_truncate_Dense_UT + }; // Rsvd_truncate_Dense_UT_internal std::vector Rsvd_truncate(const cytnx::UniTensor &Tin, cytnx_uint64 keepdim, double err, bool is_U, bool is_vT, @@ -268,14 +272,14 @@ namespace cytnx { std::vector outCyT; if (Tin.uten_type() == UTenType.Dense) { - _rsvd_truncate_Dense_UT(outCyT, Tin, keepdim, err, is_U, is_vT, return_err, mindim, - oversampling_summand, oversampling_factor, power_iteration, seed); + Rsvd_truncate_Dense_UT_internal(outCyT, Tin, keepdim, err, is_U, is_vT, return_err, mindim, + oversampling_summand, oversampling_factor, power_iteration, + seed); // } else if (Tin.uten_type() == UTenType.Block) { - // _rsvd_truncate_Block_UT(outCyT, Tin, keepdim, err, is_U, is_vT, + // Rsvd_truncate_Block_UT_internal(outCyT, Tin, keepdim, err, is_U, is_vT, // return_err, mindim); } else { - cytnx_error_msg(true, "[ERROR][Rsvd_truncate] only Dense UniTensors are supported.%s", - "\n"); + cytnx_error_msg(true, "[ERROR] Rsvd_truncate only supports Dense UniTensors.%s", "\n"); } return outCyT; diff --git a/src/linalg/Svd.cpp b/src/linalg/Svd.cpp index db5fa338c..182d35462 100644 --- a/src/linalg/Svd.cpp +++ b/src/linalg/Svd.cpp @@ -1,3 +1,5 @@ +#include "linalg.hpp" + #include #include #include @@ -5,16 +7,12 @@ #include "Tensor.hpp" #include "UniTensor.hpp" #include "algo.hpp" -#include "linalg.hpp" -using namespace std; #ifdef BACKEND_TORCH #else #include "backend/linalg_internal_interface.hpp" - namespace cytnx { namespace linalg { - std::vector Svd(const Tensor &Tin, const bool &is_UvT) { cytnx_error_msg(Tin.shape().size() != 2, "[Svd] error, Svd can only operate on rank-2 Tensor.%s", "\n"); @@ -68,11 +66,11 @@ namespace cytnx { std::vector out; out.push_back(S); if (is_UvT) { - // cout << "Original:\n" << in << endl; - // cout << "S:\n" << S << endl; - // cout << "Recompose1!:\n" << Matmul(Matmul(U, Diag(S)), vT) << endl; - // cout << "Recompose2!:\n" - // << Tensordot(Tensordot(U, Diag(S), {1}, {0}), vT, {1}, {0}) << endl; + // std::cout << "Original:\n" << in << std::endl; + // std::cout << "S:\n" << S << std::endl; + // std::cout << "Recompose1!:\n" << Matmul(Matmul(U, Diag(S)), vT) << std::endl; + // std::cout << "Recompose2!:\n" + // << Tensordot(Tensordot(U, Diag(S), {1}, {0}), vT, {1}, {0}) << std::endl; out.push_back(U); out.push_back(vT); } @@ -86,20 +84,13 @@ namespace cytnx { } } - } // namespace linalg - -} // namespace cytnx - -namespace cytnx { - namespace linalg { - // actual impls: - void _svd_Dense_UT(std::vector &outCyT, const cytnx::UniTensor &Tin, - const bool &compute_uv) { + static void Svd_Dense_UT_internal(std::vector &outCyT, + const cytnx::UniTensor &Tin, const bool &compute_uv) { //[Note] outCyT must be empty! // DenseUniTensor: - // cout << "entry Dense UT" << endl; + // std::cout << "entry Dense UT" << std::endl; Tensor tmp; if (Tin.is_contiguous()) @@ -109,17 +100,17 @@ namespace cytnx { tmp.contiguous_(); } - vector tmps = tmp.shape(); - vector oldshape(tmps.begin(), tmps.end()); + std::vector tmps = tmp.shape(); + std::vector oldshape(tmps.begin(), tmps.end()); tmps.clear(); - vector oldlabel = Tin.labels(); + std::vector oldlabel = Tin.labels(); // collapse as Matrix: cytnx_int64 rowdim = 1; for (cytnx_uint64 i = 0; i < Tin.rowrank(); i++) rowdim *= tmp.shape()[i]; tmp.reshape_({rowdim, -1}); - vector outT = cytnx::linalg::Svd(tmp, compute_uv); + std::vector outT = cytnx::linalg::Svd(tmp, compute_uv); if (Tin.is_contiguous()) tmp.reshape_(oldshape); int t = 0; @@ -132,35 +123,35 @@ namespace cytnx { Cy_S.Init({newBond, newBond}, {std::string("_aux_L"), std::string("_aux_R")}, 1, Type.Double, Tin.device(), true); // it is just reference so no hurt to alias ^^ - // cout << "[AFTER INIT]" << endl; + // std::cout << "[AFTER INIT]" << std::endl; Cy_S.put_block_(outT[t]); t++; if (compute_uv) { cytnx::UniTensor &Cy_U = outCyT[t]; cytnx_error_msg(Tin.rowrank() > oldshape.size(), - "[ERROR] The rowrank of the input unitensor is larger than the rank of the " + "[ERROR] The rowrank of the input UniTensor is larger than the rank of the " "contained tensor.%s", "\n"); - vector shapeU(oldshape.begin(), oldshape.begin() + Tin.rowrank()); + std::vector shapeU(oldshape.begin(), oldshape.begin() + Tin.rowrank()); shapeU.push_back(-1); outT[t].reshape_(shapeU); Cy_U.Init(outT[t], false, Tin.rowrank()); - vector labelU(oldlabel.begin(), oldlabel.begin() + Tin.rowrank()); + std::vector labelU(oldlabel.begin(), oldlabel.begin() + Tin.rowrank()); labelU.push_back(Cy_S.labels()[0]); Cy_U.set_labels(labelU); t++; // U } if (compute_uv) { cytnx::UniTensor &Cy_vT = outCyT[t]; - vector shapevT(Tin.rank() - Tin.rowrank() + 1); + std::vector shapevT(Tin.rank() - Tin.rowrank() + 1); shapevT[0] = -1; memcpy(&shapevT[1], &oldshape[Tin.rowrank()], sizeof(cytnx_int64) * (shapevT.size() - 1)); outT[t].reshape_(shapevT); Cy_vT.Init(outT[t], false, 1); - // cout << shapevT.size() << endl; - vector labelvT(shapevT.size()); + // std::cout << shapevT.size() << std::endl; + std::vector labelvT(shapevT.size()); labelvT[0] = Cy_S.labels()[1]; // memcpy(&labelvT[1], &oldlabel[Tin.rowrank()], sizeof(cytnx_int64) * (labelvT.size() - // 1)); @@ -194,10 +185,10 @@ namespace cytnx { } } // if tag - } + } // Svd_Dense_UT_internal - void _svd_Block_UT(std::vector &outCyT, const cytnx::UniTensor &Tin, - const bool &compute_uv) { + static void Svd_Block_UT_internal(std::vector &outCyT, + const cytnx::UniTensor &Tin, const bool &compute_uv) { // outCyT must be empty and Tin must be checked with proper rowrank! // 1) getting the combineBond L and combineBond R for qnum list without grouping: @@ -268,7 +259,7 @@ namespace cytnx { int tr; for (auto const &x : mgrp) { vec2d itoi_indicators(x.second.size()); - // cout << x.second.size() << "-------" << endl; + // std::cout << x.second.size() << "-------" << std::endl; for (int i = 0; i < x.second.size(); i++) { itoi_indicators[i] = new_itoi[x.second[i]]; // std::cout << new_itoi[x.second[i]] << std::endl; @@ -359,13 +350,13 @@ namespace cytnx { tr++; } // is_vT - } + } // for each qcharge // process S: Bond Bd_aux = Bond(BD_IN, aux_qnums, aux_degs, Tin.syms()); BlockUniTensor *S_ptr = new BlockUniTensor(); S_ptr->Init({Bd_aux, Bd_aux.redirect()}, {"_aux_L", "_aux_R"}, 1, Type.Double, - Device.cpu, // this two will be overwrite later, so doesnt matter. + Device.cpu, // these two will be overwritten later, so doesnt matter. true, // is_diag! true); // no_alloc! S_ptr->_blocks = S_blocks; @@ -411,10 +402,11 @@ namespace cytnx { outCyT.push_back(vT); } - } // _svd_Block_UT + } // Svd_Block_UT_internal - void _svd_BlockFermionic_UT(std::vector &outCyT, const cytnx::UniTensor &Tin, - const bool &compute_uv) { + static void Svd_BlockFermionic_UT_internal(std::vector &outCyT, + const cytnx::UniTensor &Tin, + const bool &compute_uv) { //[8 Oct 2024] This is a copy from BlockUniTensor; signflips included // outCyT must be empty and Tin must be checked with proper rowrank! @@ -487,7 +479,7 @@ namespace cytnx { int tr; for (auto const &x : mgrp) { vec2d itoi_indicators(x.second.size()); - // cout << x.second.size() << "-------" << endl; + // std::cout << x.second.size() << "-------" << std::endl; for (int i = 0; i < x.second.size(); i++) { itoi_indicators[i] = new_itoi[x.second[i]]; // std::cout << new_itoi[x.second[i]] << std::endl; @@ -585,13 +577,13 @@ namespace cytnx { tr++; } // is_vT - } + } // for each qcharge // process S: Bond Bd_aux = Bond(BD_IN, aux_qnums, aux_degs, Tin.syms()); BlockFermionicUniTensor *S_ptr = new BlockFermionicUniTensor(); S_ptr->Init({Bd_aux, Bd_aux.redirect()}, {"_aux_L", "_aux_R"}, 1, Type.Double, - Device.cpu, // this two will be overwrite later, so doesnt matter. + Device.cpu, // these two will be overwritten later, so doesnt matter. true, // is_diag! true); // no_alloc! S_ptr->_blocks = S_blocks; @@ -638,8 +630,7 @@ namespace cytnx { vT._impl = boost::intrusive_ptr(vT_ptr); outCyT.push_back(vT); } - - } // _svd_BlockFermionic_UT + } // Svd_BlockFermionic_UT_internal std::vector Svd(const cytnx::UniTensor &Tin, const bool &is_UvT) { // using rowrank to split the bond to form a matrix. @@ -653,19 +644,19 @@ namespace cytnx { std::vector outCyT; if (Tin.uten_type() == UTenType.Dense) { - _svd_Dense_UT(outCyT, Tin, is_UvT); + Svd_Dense_UT_internal(outCyT, Tin, is_UvT); } else if (Tin.uten_type() == UTenType.Block) { - _svd_Block_UT(outCyT, Tin, is_UvT); + Svd_Block_UT_internal(outCyT, Tin, is_UvT); } else if (Tin.uten_type() == UTenType.BlockFermionic) { - _svd_BlockFermionic_UT(outCyT, Tin, is_UvT); + Svd_BlockFermionic_UT_internal(outCyT, Tin, is_UvT); } else { cytnx_error_msg(true, "[ERROR] Svd only supports Dense/Block/BlockFermionic UniTensors.%s", "\n"); - } // is block form ? + } // ut type return outCyT; diff --git a/src/linalg/Svd_truncate.cpp b/src/linalg/Svd_truncate.cpp index 19147ae2c..04567999b 100644 --- a/src/linalg/Svd_truncate.cpp +++ b/src/linalg/Svd_truncate.cpp @@ -1,10 +1,13 @@ +#include "linalg.hpp" + +#include +#include #include -#include "Accessor.hpp" #include "Tensor.hpp" #include "UniTensor.hpp" #include "algo.hpp" -#include "linalg.hpp" +#include "Accessor.hpp" #ifdef BACKEND_TORCH #else @@ -68,9 +71,11 @@ namespace cytnx { } } - void _svd_truncate_Dense_UT(std::vector &outCyT, const cytnx::UniTensor &Tin, - const cytnx_uint64 &keepdim, const double &err, const bool &is_UvT, - const unsigned int &return_err, const cytnx_uint64 &mindim) { + static void Svd_truncate_Dense_UT_internal(std::vector &outCyT, + const cytnx::UniTensor &Tin, + const cytnx_uint64 &keepdim, const double &err, + const bool &is_UvT, const unsigned int &return_err, + const cytnx_uint64 &mindim) { // DenseUniTensor: cytnx_uint64 keep_dim = keepdim; @@ -112,7 +117,7 @@ namespace cytnx { cytnx::UniTensor &Cy_U = outCyT[t]; // shape cytnx_error_msg(Tin.rowrank() > oldshape.size(), - "[ERROR] The rowrank of the input unitensor is larger than the rank of the " + "[ERROR] The rowrank of the input UniTensor is larger than the rank of the " "contained tensor.%s", "\n"); std::vector shapeU(oldshape.begin(), oldshape.begin() + Tin.rowrank()); @@ -173,11 +178,13 @@ namespace cytnx { } // if tag if (return_err) outCyT.back().Init(outT.back(), false, 0); - }; // svdt Dense + } // Svd_truncate_Dense_UT_internal - void _svd_truncate_Block_UTs(std::vector &outCyT, const cytnx::UniTensor &Tin, - const cytnx_uint64 &keepdim, const double &err, const bool &is_UvT, - const int &return_err, const cytnx_uint64 &mindim) { + static void Svd_truncate_Block_UTs_internal(std::vector &outCyT, + const cytnx::UniTensor &Tin, + const cytnx_uint64 &keepdim, const double &err, + const bool &is_UvT, const int &return_err, + const cytnx_uint64 &mindim) { // currently, Gesvd is used as a standard for the full SVD before truncation // handles BlockFermionicUniTensor as well: elements of _signflip are removed if blocks are // erased @@ -222,7 +229,7 @@ namespace cytnx { new_qid.reserve(S.Nblocks()); std::vector> new_itoi; // assume S block is in same order as qnum: - std::vector to_be_remove; + std::vector to_be_removed; cytnx_uint64 tot_dim = 0; cytnx_uint64 cnt = 0; @@ -237,7 +244,7 @@ namespace cytnx { } keep_dims.push_back(kdim); if (kdim == 0) { - to_be_remove.push_back(b); + to_be_removed.push_back(b); new_qid.push_back(-1); } else { @@ -251,13 +258,13 @@ namespace cytnx { } // remove: - // vec_erase_(S.get_itoi(),to_be_remove); + // vec_erase_(S.get_itoi(),to_be_removed); S.get_itoi() = new_itoi; - if (!to_be_remove.empty()) { - vec_erase_(S.get_blocks_(), to_be_remove); - vec_erase_(S.bonds()[0].qnums(), to_be_remove); + if (!to_be_removed.empty()) { + vec_erase_(S.get_blocks_(), to_be_removed); + vec_erase_(S.bonds()[0].qnums(), to_be_removed); if (Tin.uten_type() == UTenType.BlockFermionic) { - vec_erase_(S.signflip_(), to_be_remove); + vec_erase_(S.signflip_(), to_be_removed); } } S.bonds()[0]._impl->_degs = new_dims; @@ -268,14 +275,14 @@ namespace cytnx { if (is_UvT) { // depends on S.bonds()[1], keep_dims, new_qid UniTensor &U = outCyT[t]; - to_be_remove.clear(); + to_be_removed.clear(); U.bonds().back() = S.bonds()[1].clone(); std::vector acs(U.rank()); for (int i = 0; i < U.rowrank(); i++) acs[i] = Accessor::all(); for (int b = 0; b < U.Nblocks(); b++) { if (keep_dims[U.get_qindices(b).back()] == 0) - to_be_remove.push_back(b); + to_be_removed.push_back(b); else { /// process blocks: if (keep_dims[U.get_qindices(b).back()] != U.get_blocks_()[b].shape().back()) { @@ -287,11 +294,11 @@ namespace cytnx { U.get_qindices(b).back() = new_qid[U.get_qindices(b).back()]; } } - if (!to_be_remove.empty()) { - vec_erase_(U.get_itoi(), to_be_remove); - vec_erase_(U.get_blocks_(), to_be_remove); + if (!to_be_removed.empty()) { + vec_erase_(U.get_itoi(), to_be_removed); + vec_erase_(U.get_blocks_(), to_be_removed); if (Tin.uten_type() == UTenType.BlockFermionic) { - vec_erase_(U.signflip_(), to_be_remove); + vec_erase_(U.signflip_(), to_be_removed); } } @@ -300,14 +307,14 @@ namespace cytnx { if (is_UvT) { UniTensor &vT = outCyT[t]; - to_be_remove.clear(); + to_be_removed.clear(); vT.bonds().front() = S.bonds()[0].clone(); std::vector acs(vT.rank()); for (int i = 1; i < vT.rank(); i++) acs[i] = Accessor::all(); for (int b = 0; b < vT.Nblocks(); b++) { if (keep_dims[vT.get_qindices(b)[0]] == 0) - to_be_remove.push_back(b); + to_be_removed.push_back(b); else { /// process blocks: if (keep_dims[vT.get_qindices(b)[0]] != vT.get_blocks_()[b].shape()[0]) { @@ -318,11 +325,11 @@ namespace cytnx { vT.get_qindices(b)[0] = new_qid[vT.get_qindices(b)[0]]; } } - if (!to_be_remove.empty()) { - vec_erase_(vT.get_itoi(), to_be_remove); - vec_erase_(vT.get_blocks_(), to_be_remove); + if (!to_be_removed.empty()) { + vec_erase_(vT.get_itoi(), to_be_removed); + vec_erase_(vT.get_blocks_(), to_be_removed); if (Tin.uten_type() == UTenType.BlockFermionic) { - vec_erase_(vT.signflip_(), to_be_remove); + vec_erase_(vT.signflip_(), to_be_removed); } } @@ -336,7 +343,7 @@ namespace cytnx { } else if (return_err) { outCyT.push_back(UniTensor(Sall.get({Accessor::tilend(smidx)}))); } - } // _svd_truncate_Block_UTs + } // Svd_truncate_Block_UTs_internal std::vector Svd_truncate(const cytnx::UniTensor &Tin, const cytnx_uint64 &keepdim, const double &err, @@ -361,11 +368,11 @@ namespace cytnx { std::vector outCyT; if (Tin.uten_type() == UTenType.Dense) { - _svd_truncate_Dense_UT(outCyT, Tin, keepdim, err, is_UvT, return_err, mindim); + Svd_truncate_Dense_UT_internal(outCyT, Tin, keepdim, err, is_UvT, return_err, mindim); } else if ((Tin.uten_type() == UTenType.Block) || (Tin.uten_type() == UTenType.BlockFermionic)) { - _svd_truncate_Block_UTs(outCyT, Tin, keepdim, err, is_UvT, return_err, mindim); + Svd_truncate_Block_UTs_internal(outCyT, Tin, keepdim, err, is_UvT, return_err, mindim); } else { cytnx_error_msg( true, "[ERROR] Svd_truncate only supports Dense/Block/BlockFermionic UniTensors.%s", @@ -375,11 +382,12 @@ namespace cytnx { } // Svd_truncate - void _svd_truncate_Block_UTs(std::vector &outCyT, const cytnx::UniTensor &Tin, - const cytnx_uint64 &keepdim, - std::vector min_blockdim, const double &err, - const bool &is_UvT, const int &return_err, - const cytnx_uint64 &mindim) { + static void Svd_truncate_Block_UTs_internal(std::vector &outCyT, + const cytnx::UniTensor &Tin, + const cytnx_uint64 &keepdim, + std::vector min_blockdim, + const double &err, const bool &is_UvT, + const int &return_err, const cytnx_uint64 &mindim) { // currently, Gesvd is used as a standard for the full SVD before truncation // handles BlockFermionicUniTensor as well: elements of _signflip are removed if blocks are // erased @@ -485,7 +493,7 @@ namespace cytnx { std::vector> new_itoi; // assume S block is in same order as qnum: - std::vector to_be_remove; + std::vector to_be_removed; cytnx_uint64 tot_dim = 0; cytnx_uint64 cnt = 0; @@ -506,7 +514,7 @@ namespace cytnx { } keep_dims.push_back(kdim); if (kdim == 0) { - to_be_remove.push_back(b); + to_be_removed.push_back(b); new_qid.push_back(-1); } else { new_qid.push_back(new_dims.size()); @@ -519,13 +527,13 @@ namespace cytnx { } // remove: - // vec_erase_(S.get_itoi(),to_be_remove); + // vec_erase_(S.get_itoi(),to_be_removed); S.get_itoi() = new_itoi; - if (!to_be_remove.empty()) { - vec_erase_(S.get_blocks_(), to_be_remove); - vec_erase_(S.bonds()[0].qnums(), to_be_remove); + if (!to_be_removed.empty()) { + vec_erase_(S.get_blocks_(), to_be_removed); + vec_erase_(S.bonds()[0].qnums(), to_be_removed); if (Tin.uten_type() == UTenType.BlockFermionic) { - vec_erase_(S.signflip_(), to_be_remove); + vec_erase_(S.signflip_(), to_be_removed); } } S.bonds()[0]._impl->_degs = new_dims; @@ -535,14 +543,14 @@ namespace cytnx { int t = 1; if (is_UvT) { UniTensor &U = outCyT[t]; - to_be_remove.clear(); + to_be_removed.clear(); U.bonds().back() = S.bonds()[1].clone(); std::vector acs(U.rank()); for (int i = 0; i < U.rowrank(); i++) acs[i] = Accessor::all(); for (int b = 0; b < U.Nblocks(); b++) { if (keep_dims[U.get_qindices(b).back()] == 0) - to_be_remove.push_back(b); + to_be_removed.push_back(b); else { /// process blocks: if (keep_dims[U.get_qindices(b).back()] != U.get_blocks_()[b].shape().back()) { @@ -554,11 +562,11 @@ namespace cytnx { U.get_qindices(b).back() = new_qid[U.get_qindices(b).back()]; } } - if (!to_be_remove.empty()) { - vec_erase_(U.get_itoi(), to_be_remove); - vec_erase_(U.get_blocks_(), to_be_remove); + if (!to_be_removed.empty()) { + vec_erase_(U.get_itoi(), to_be_removed); + vec_erase_(U.get_blocks_(), to_be_removed); if (Tin.uten_type() == UTenType.BlockFermionic) { - vec_erase_(U.signflip_(), to_be_remove); + vec_erase_(U.signflip_(), to_be_removed); } } @@ -567,14 +575,14 @@ namespace cytnx { if (is_UvT) { UniTensor &vT = outCyT[t]; - to_be_remove.clear(); + to_be_removed.clear(); vT.bonds().front() = S.bonds()[0].clone(); std::vector acs(vT.rank()); for (int i = 1; i < vT.rank(); i++) acs[i] = Accessor::all(); for (int b = 0; b < vT.Nblocks(); b++) { if (keep_dims[vT.get_qindices(b)[0]] == 0) - to_be_remove.push_back(b); + to_be_removed.push_back(b); else { /// process blocks: if (keep_dims[vT.get_qindices(b)[0]] != vT.get_blocks_()[b].shape()[0]) { @@ -585,18 +593,18 @@ namespace cytnx { vT.get_qindices(b)[0] = new_qid[vT.get_qindices(b)[0]]; } } - if (!to_be_remove.empty()) { - vec_erase_(vT.get_itoi(), to_be_remove); - vec_erase_(vT.get_blocks_(), to_be_remove); + if (!to_be_removed.empty()) { + vec_erase_(vT.get_itoi(), to_be_removed); + vec_erase_(vT.get_blocks_(), to_be_removed); if (Tin.uten_type() == UTenType.BlockFermionic) { - vec_erase_(vT.signflip_(), to_be_remove); + vec_erase_(vT.signflip_(), to_be_removed); } } t++; } } - } // _svd_truncate_Block_UTs + } // Svd_truncate_Block_UTs_internal std::vector Svd_truncate(const cytnx::UniTensor &Tin, const cytnx_uint64 &keepdim, @@ -626,13 +634,13 @@ namespace cytnx { cytnx_error_msg( min_blockdim.size() != 1, "[ERROR][Svd_truncate] min_blockdim must have one element for dense UniTensor%s", "\n"); - _svd_truncate_Dense_UT(outCyT, Tin, keepdim, err, is_UvT, return_err, - std::max(mindim, min_blockdim[0])); + Svd_truncate_Dense_UT_internal(outCyT, Tin, keepdim, err, is_UvT, return_err, + std::max(mindim, min_blockdim[0])); } else if ((Tin.uten_type() == UTenType.Block) || (Tin.uten_type() == UTenType.BlockFermionic)) { - _svd_truncate_Block_UTs(outCyT, Tin, keepdim, min_blockdim, err, is_UvT, return_err, - mindim); + Svd_truncate_Block_UTs_internal(outCyT, Tin, keepdim, min_blockdim, err, is_UvT, return_err, + mindim); } else { cytnx_error_msg( true, "[ERROR] Svd_truncate only supports Dense/Block/BlockFermionic UniTensors.%s",