diff --git a/src/AFQMC/Estimators/BackPropagatedEstimator.hpp b/src/AFQMC/Estimators/BackPropagatedEstimator.hpp index c55f208c2c..96e4df05b5 100644 --- a/src/AFQMC/Estimators/BackPropagatedEstimator.hpp +++ b/src/AFQMC/Estimators/BackPropagatedEstimator.hpp @@ -187,15 +187,16 @@ class BackPropagatedEstimator : public EstimatorBase int ncol(NAEA + ((walker_type == CLOSED) ? 0 : NAEB)); int nx((walker_type == COLLINEAR) ? 2 : 1); + using std::get; // 1. check structures - if (std::get<0>(Refs.sizes()) != wset.size() || std::get<1>(Refs.sizes()) != nrefs || std::get<2>(Refs.sizes()) != nrow * ncol) + if (get<0>(Refs.sizes()) != wset.size() || get<1>(Refs.sizes()) != nrefs || get<2>(Refs.sizes()) != nrow * ncol) Refs = mpi3CTensor({wset.size(), nrefs, nrow * ncol}, Refs.get_allocator()); DeviceBufferManager buffer_manager; StaticMatrix detR({wset.size(), nrefs * nx}, buffer_manager.get_generator().template get_allocator()); int n0, n1; - std::tie(n0, n1) = FairDivideBoundary(TG.getLocalTGRank(), int(std::get<2>(Refs.sizes())), TG.getNCoresPerTG()); + std::tie(n0, n1) = FairDivideBoundary(TG.getLocalTGRank(), int(get<2>(Refs.sizes())), TG.getNCoresPerTG()); boost::multi::array_ref Refs_(to_address(Refs.origin()), Refs.extensions()); // 2. setup back propagated references diff --git a/src/AFQMC/Estimators/EnergyEstimator.h b/src/AFQMC/Estimators/EnergyEstimator.h index 24ba850d05..bef526fc0b 100644 --- a/src/AFQMC/Estimators/EnergyEstimator.h +++ b/src/AFQMC/Estimators/EnergyEstimator.h @@ -59,11 +59,13 @@ class EnergyEstimator : public EstimatorBase { ScopedTimer local_timer(AFQMCTimers[energy_timer]); size_t nwalk = wset.size(); - if (std::get<0>(eloc.sizes()) != nwalk || std::get<1>(eloc.sizes()) != 3) + + using std::get; + if (get<0>(eloc.sizes()) != nwalk || get<1>(eloc.sizes()) != 3) eloc.reextent({static_cast(nwalk), 3}); - if (std::get<0>(ovlp.sizes()) != nwalk) + if (get<0>(ovlp.sizes()) != nwalk) ovlp.reextent(iextensions<1u>(nwalk)); - if (std::get<0>(wprop.sizes()) != 4 || std::get<1>(wprop.sizes()) != nwalk) + if (get<0>(wprop.sizes()) != 4 || get<1>(wprop.sizes()) != nwalk) wprop.reextent({4, static_cast(nwalk)}); ComplexType dum, et; diff --git a/src/AFQMC/Estimators/FullObsHandler.hpp b/src/AFQMC/Estimators/FullObsHandler.hpp index b09d80d005..b47db8b32d 100644 --- a/src/AFQMC/Estimators/FullObsHandler.hpp +++ b/src/AFQMC/Estimators/FullObsHandler.hpp @@ -193,10 +193,12 @@ class FullObsHandler : public AFQMCInfo APP_ABORT("Runtime Error: iav out of range in full1rdm::accumulate. \n\n\n"); int nw(wset.size()); - int nrefs(std::get<1>(Refs.sizes())); + + using std::get; + int nrefs(get<1>(Refs.sizes())); double LogOverlapFactor(wset.getLogOverlapFactor()); LocalTGBufferManager shm_buffer_manager; - StaticSHM4Tensor G4D({nw, nspins, std::get<0>(Gdims), std::get<1>(Gdims)}, + StaticSHM4Tensor G4D({nw, nspins, get<0>(Gdims), get<1>(Gdims)}, shm_buffer_manager.get_generator().template get_allocator()); StaticSHMVector DevOv(iextensions<1u>{2 * nw}, shm_buffer_manager.get_generator().template get_allocator()); diff --git a/src/AFQMC/Estimators/MixedRDMEstimator.h b/src/AFQMC/Estimators/MixedRDMEstimator.h index 5bd42764d9..1074735120 100644 --- a/src/AFQMC/Estimators/MixedRDMEstimator.h +++ b/src/AFQMC/Estimators/MixedRDMEstimator.h @@ -92,9 +92,11 @@ class MixedRDMEstimator : public EstimatorBase wset.getProperty(WEIGHT, wgt); int nx((wset.getWalkerType() == COLLINEAR) ? 2 : 1); - if (std::get<0>(wDMsum.sizes()) != wset.size() || std::get<1>(wDMsum.sizes()) != nx) + + using std::get; + if (get<0>(wDMsum.sizes()) != wset.size() || get<1>(wDMsum.sizes()) != nx) wDMsum.reextent({wset.size(), nx}); - if (std::get<0>(wOvlp.sizes()) != wset.size() || std::get<1>(wOvlp.sizes()) != nx) + if (get<0>(wOvlp.sizes()) != wset.size() || get<1>(wOvlp.sizes()) != nx) wOvlp.reextent({wset.size(), nx}); if (!importanceSampling) @@ -126,8 +128,10 @@ class MixedRDMEstimator : public EstimatorBase denom_average[0] /= block_size; dump.push("Mixed"); std::string padded_iblock = std::string(n_zero - std::to_string(iblock).length(), '0') + std::to_string(iblock); - boost::multi::array_ref wOvlp_(wOvlp.origin(), {std::get<0>(wOvlp.sizes()) * std::get<1>(wOvlp.sizes())}); - boost::multi::array_ref wDMsum_(wDMsum.origin(), {std::get<0>(wDMsum.sizes()) * std::get<1>(wDMsum.sizes())}); + + using std::get; + boost::multi::array_ref wOvlp_(wOvlp.origin(), {get<0>(wOvlp.sizes()) * get<1>(wOvlp.sizes())}); + boost::multi::array_ref wDMsum_(wDMsum.origin(), {get<0>(wDMsum.sizes()) * get<1>(wDMsum.sizes())}); dump.write(DMAverage, "one_rdm_" + padded_iblock); dump.write(denom_average, "one_rdm_denom_" + padded_iblock); dump.write(wOvlp_, "one_rdm_walker_overlaps_" + padded_iblock); diff --git a/src/AFQMC/Estimators/Observables/atomcentered_correlators.hpp b/src/AFQMC/Estimators/Observables/atomcentered_correlators.hpp index 8165f77f35..28986513c0 100644 --- a/src/AFQMC/Estimators/Observables/atomcentered_correlators.hpp +++ b/src/AFQMC/Estimators/Observables/atomcentered_correlators.hpp @@ -253,6 +253,7 @@ class atomcentered_correlators : public AFQMCInfo else nsp = 2; + using std::get; // check structure dimensions if (iref == 0) { @@ -260,19 +261,19 @@ class atomcentered_correlators : public AFQMCInfo { denom = mpi3CVector(iextensions<1u>{nw}, shared_allocator{TG.TG_local()}); } - if (std::get<0>(DMWork1D.sizes()) != nw || std::get<1>(DMWork1D.sizes()) != 3 || std::get<2>(DMWork1D.sizes()) != nsites) + if (get<0>(DMWork1D.sizes()) != nw || get<1>(DMWork1D.sizes()) != 3 || get<2>(DMWork1D.sizes()) != nsites) { DMWork1D = mpi3CTensor({nw, 3, nsites}, shared_allocator{TG.TG_local()}); } - if (std::get<0>(DMWork2D.sizes()) != nw || std::get<1>(DMWork2D.sizes()) != 3 || std::get<2>(DMWork2D.sizes()) != ns2) + if (get<0>(DMWork2D.sizes()) != nw || get<1>(DMWork2D.sizes()) != 3 || get<2>(DMWork2D.sizes()) != ns2) { DMWork2D = mpi3CTensor({nw, 3, ns2}, shared_allocator{TG.TG_local()}); } - if (std::get<0>(NwIJ.sizes()) != nsp || std::get<1>(NwIJ.sizes()) != nw || std::get<2>(NwIJ.sizes()) != nsites || std::get<3>(NwIJ.sizes()) != nsites) + if (get<0>(NwIJ.sizes()) != nsp || get<1>(NwIJ.sizes()) != nw || get<2>(NwIJ.sizes()) != nsites || get<3>(NwIJ.sizes()) != nsites) { NwIJ = mpi3C4Tensor({nsp, nw, nsites, nsites}, shared_allocator{TG.TG_local()}); } - if (std::get<0>(NwI.sizes()) != nsp || std::get<1>(NwI.sizes()) != nw || std::get<2>(NwI.sizes()) != nsites) + if (get<0>(NwI.sizes()) != nsp || get<1>(NwI.sizes()) != nw || get<2>(NwI.sizes()) != nsites) { NwI = mpi3CTensor({nsp, nw, nsites}, shared_allocator{TG.TG_local()}); } @@ -284,12 +285,12 @@ class atomcentered_correlators : public AFQMCInfo } else { - if (std::get<0>(denom.sizes()) != nw || std::get<0>(DMWork1D.sizes()) != nw || std::get<1>(DMWork1D.sizes()) != 3 || std::get<2>(DMWork1D.sizes()) != nsites || - std::get<0>(DMWork2D.sizes()) != nw || std::get<1>(DMWork2D.sizes()) != 3 || std::get<2>(DMWork2D.sizes()) != ns2 || std::get<0>(NwI.sizes()) != nsp || - std::get<1>(NwI.sizes()) != nw || std::get<2>(NwI.sizes()) != nsites || std::get<0>(NwIJ.sizes()) != nsp || std::get<1>(NwIJ.sizes()) != nw || - std::get<2>(NwIJ.sizes()) != nsites || std::get<3>(NwIJ.sizes()) != nsites || std::get<0>(DMAverage1D.sizes()) != nave || std::get<1>(DMAverage1D.sizes()) != 3 || - std::get<2>(DMAverage1D.sizes()) != nsites || std::get<0>(DMAverage2D.sizes()) != nave || std::get<1>(DMAverage2D.sizes()) != 3 || - std::get<2>(DMAverage2D.sizes()) != ns2) + if (get<0>(denom.sizes()) != nw || get<0>(DMWork1D.sizes()) != nw || get<1>(DMWork1D.sizes()) != 3 || get<2>(DMWork1D.sizes()) != nsites || + get<0>(DMWork2D.sizes()) != nw || get<1>(DMWork2D.sizes()) != 3 || get<2>(DMWork2D.sizes()) != ns2 || get<0>(NwI.sizes()) != nsp || + get<1>(NwI.sizes()) != nw || get<2>(NwI.sizes()) != nsites || get<0>(NwIJ.sizes()) != nsp || get<1>(NwIJ.sizes()) != nw || + get<2>(NwIJ.sizes()) != nsites || get<3>(NwIJ.sizes()) != nsites || get<0>(DMAverage1D.sizes()) != nave || get<1>(DMAverage1D.sizes()) != 3 || + get<2>(DMAverage1D.sizes()) != nsites || get<0>(DMAverage2D.sizes()) != nave || get<1>(DMAverage2D.sizes()) != 3 || + get<2>(DMAverage2D.sizes()) != ns2) APP_ABORT(" Error: Invalid state in accumulate_reference. \n\n\n"); } diff --git a/src/AFQMC/Estimators/Observables/diagonal2rdm.hpp b/src/AFQMC/Estimators/Observables/diagonal2rdm.hpp index ab75a4fcda..c7d5002dc7 100644 --- a/src/AFQMC/Estimators/Observables/diagonal2rdm.hpp +++ b/src/AFQMC/Estimators/Observables/diagonal2rdm.hpp @@ -133,6 +133,7 @@ class diagonal2rdm : public AFQMCInfo assert(G.num_elements() == G_host.num_elements()); assert(G.extensions() == G_host.extensions()); + using std::get; // check structure dimensions if (iref == 0) { @@ -140,7 +141,7 @@ class diagonal2rdm : public AFQMCInfo { denom = mpi3CVector(iextensions<1u>{nw}, shared_allocator{TG.TG_local()}); } - if (std::get<0>(DMWork.sizes()) != nw || std::get<1>(DMWork.sizes()) != dm_size) + if (get<0>(DMWork.sizes()) != nw || get<1>(DMWork.sizes()) != dm_size) { DMWork = mpi3CMatrix({nw, dm_size}, shared_allocator{TG.TG_local()}); } @@ -149,8 +150,8 @@ class diagonal2rdm : public AFQMCInfo } else { - if (std::get<0>(denom.sizes()) != nw || std::get<0>(DMWork.sizes()) != nw || std::get<1>(DMWork.sizes()) != dm_size || std::get<0>(DMAverage.sizes()) != nave || - std::get<1>(DMAverage.sizes()) != dm_size) + if (get<0>(denom.sizes()) != nw || get<0>(DMWork.sizes()) != nw || get<1>(DMWork.sizes()) != dm_size || get<0>(DMAverage.sizes()) != nave || + get<1>(DMAverage.sizes()) != dm_size) APP_ABORT(" Error: Invalid state in accumulate_reference. \n\n\n"); } diff --git a/src/AFQMC/Estimators/Observables/full1rdm.hpp b/src/AFQMC/Estimators/Observables/full1rdm.hpp index cf93020cd7..6b5b4aa2d9 100644 --- a/src/AFQMC/Estimators/Observables/full1rdm.hpp +++ b/src/AFQMC/Estimators/Observables/full1rdm.hpp @@ -131,7 +131,8 @@ class full1rdm : public AFQMCInfo stdCMatrix R; if (!dump.readEntry(R, "RotationMatrix")) APP_ABORT("Error reading RotationMatrix.\n"); - if (std::get<1>(R.sizes()) != NMO) + using std::get; + if (get<1>(R.sizes()) != NMO) APP_ABORT("Error Wrong dimensions in RotationMatrix.\n"); dim[0] = R.size(); dim[1] = 0; @@ -143,9 +144,10 @@ class full1rdm : public AFQMCInfo { if (!dump.readEntry(I, "Indices")) APP_ABORT("Error reading Indices.\n"); - if (std::get<1>(I.sizes()) != 2) + using std::get; + if (get<1>(I.sizes()) != 2) APP_ABORT("Error Wrong dimensions in Indices.\n"); - dim[1] = std::get<0>(I.sizes()); + dim[1] = get<0>(I.sizes()); } TG.Node().broadcast_n(dim, 2, 0); XRot = sharedCMatrix({dim[0], NMO}, make_node_allocator(TG)); @@ -244,6 +246,7 @@ class full1rdm : public AFQMCInfo assert(G.num_elements() == G_host.num_elements()); assert(G.extensions() == G_host.extensions()); + using std::get; // check structure dimensions if (iref == 0) { @@ -251,7 +254,7 @@ class full1rdm : public AFQMCInfo { denom = mpi3CVector(iextensions<1u>{nw}, shared_allocator{TG.TG_local()}); } - if (std::get<0>(DMWork.sizes()) != nw || std::get<1>(DMWork.sizes()) != dm_size) + if (get<0>(DMWork.sizes()) != nw || get<1>(DMWork.sizes()) != dm_size) { DMWork = mpi3CMatrix({nw, dm_size}, shared_allocator{TG.TG_local()}); } @@ -260,8 +263,8 @@ class full1rdm : public AFQMCInfo } else { - if (std::get<0>(denom.sizes()) != nw || std::get<0>(DMWork.sizes()) != nw || std::get<1>(DMWork.sizes()) != dm_size || std::get<0>(DMAverage.sizes()) != nave || - std::get<1>(DMAverage.sizes()) != dm_size) + if (get<0>(denom.sizes()) != nw || get<0>(DMWork.sizes()) != nw || get<1>(DMWork.sizes()) != dm_size || get<0>(DMAverage.sizes()) != nave || + get<1>(DMAverage.sizes()) != dm_size) APP_ABORT(" Error: Invalid state in accumulate_reference. \n\n\n"); } @@ -427,9 +430,11 @@ class full1rdm : public AFQMCInfo template void acc_with_rotation(MatG&& G, CVec&& Xw) { + using std::get; + int nw(G.size()); - assert(std::get<2>(G.sizes()) == std::get<3>(G.sizes())); - assert(std::get<2>(G.sizes()) == std::get<1>(XRot.sizes())); + assert(get<2>(G.sizes()) == get<3>(G.sizes())); + assert(get<2>(G.sizes()) == get<1>(XRot.sizes())); if (walker_type == NONCOLLINEAR) APP_ABORT("Error: Not yet implemented: acc_with_rotation && noncollinear.\n"); diff --git a/src/AFQMC/Estimators/Observables/full2rdm.hpp b/src/AFQMC/Estimators/Observables/full2rdm.hpp index 1886966a9e..3836b21927 100644 --- a/src/AFQMC/Estimators/Observables/full2rdm.hpp +++ b/src/AFQMC/Estimators/Observables/full2rdm.hpp @@ -114,6 +114,8 @@ class full2rdm : public AFQMCInfo apply_rotation = true; int dim[2]; + using std::get; + hdf_archive dump; if (TG.Node().root()) { @@ -124,7 +126,7 @@ class full2rdm : public AFQMCInfo stdCMatrix R; if (!dump.readEntry(R, "RotationMatrix")) APP_ABORT("Error reading RotationMatrix.\n"); - if (std::get<1>(R.sizes()) != NMO) + if (get<1>(R.sizes()) != NMO) APP_ABORT("Error Wrong dimensions in RotationMatrix.\n"); dim[0] = R.size(); dim[1] = 0; @@ -196,6 +198,7 @@ class full2rdm : public AFQMCInfo assert(G.num_elements() == G_host.num_elements()); assert(G.extensions() == G_host.extensions()); + using std::get; // check structure dimensions if (iref == 0) { @@ -203,7 +206,7 @@ class full2rdm : public AFQMCInfo { denom = mpi3CVector(iextensions<1u>{nw}, shared_allocator{TG.TG_local()}); } - if (std::get<0>(DMWork.sizes()) != nw || std::get<1>(DMWork.sizes()) != dm_size) + if (get<0>(DMWork.sizes()) != nw || get<1>(DMWork.sizes()) != dm_size) { DMWork = mpi3CMatrix({nw, dm_size}, shared_allocator{TG.TG_local()}); } @@ -212,8 +215,8 @@ class full2rdm : public AFQMCInfo } else { - if (std::get<0>(denom.sizes()) != nw || std::get<0>(DMWork.sizes()) != nw || std::get<1>(DMWork.sizes()) != dm_size || std::get<0>(DMAverage.sizes()) != nave || - std::get<1>(DMAverage.sizes()) != dm_size) + if (get<0>(denom.sizes()) != nw || get<0>(DMWork.sizes()) != nw || get<1>(DMWork.sizes()) != dm_size || get<0>(DMAverage.sizes()) != nave || + get<1>(DMAverage.sizes()) != dm_size) APP_ABORT(" Error: Invalid state in accumulate_reference. \n\n\n"); } diff --git a/src/AFQMC/Estimators/Observables/generalizedFockMatrix.hpp b/src/AFQMC/Estimators/Observables/generalizedFockMatrix.hpp index 26614fc29c..98df7f20d3 100644 --- a/src/AFQMC/Estimators/Observables/generalizedFockMatrix.hpp +++ b/src/AFQMC/Estimators/Observables/generalizedFockMatrix.hpp @@ -133,6 +133,7 @@ class generalizedFockMatrix : public AFQMCInfo assert(G.extensions() == G_host.extensions()); assert(G[0].num_elements() == dm_size); + using std::get; // check structure dimensions if (iref == 0) { @@ -140,7 +141,7 @@ class generalizedFockMatrix : public AFQMCInfo { denom = mpi3CVector(iextensions<1u>{nw}, shared_allocator{TG.TG_local()}); } - if (std::get<0>(DMWork.sizes()) != 3 || std::get<1>(DMWork.sizes()) != nw || std::get<2>(DMWork.sizes()) != dm_size) + if (get<0>(DMWork.sizes()) != 3 || get<1>(DMWork.sizes()) != nw || get<2>(DMWork.sizes()) != dm_size) { DMWork = mpi3CTensor({3, nw, dm_size}, shared_allocator{TG.TG_local()}); } @@ -149,8 +150,8 @@ class generalizedFockMatrix : public AFQMCInfo } else { - if (std::get<0>(denom.sizes()) != nw || std::get<0>(DMWork.sizes()) != 2 || std::get<1>(DMWork.sizes()) != nw || std::get<2>(DMWork.sizes()) != dm_size || - std::get<0>(DMAverage.sizes()) != 2 || std::get<1>(DMAverage.sizes()) != nave || std::get<2>(DMAverage.sizes()) != dm_size) + if (get<0>(denom.sizes()) != nw || get<0>(DMWork.sizes()) != 2 || get<1>(DMWork.sizes()) != nw || get<2>(DMWork.sizes()) != dm_size || + get<0>(DMAverage.sizes()) != 2 || get<1>(DMAverage.sizes()) != nave || get<2>(DMAverage.sizes()) != dm_size) APP_ABORT(" Error: Invalid state in accumulate_reference. \n\n\n"); } diff --git a/src/AFQMC/Estimators/Observables/n2r.hpp b/src/AFQMC/Estimators/Observables/n2r.hpp index 60490231e9..8ee083a44e 100644 --- a/src/AFQMC/Estimators/Observables/n2r.hpp +++ b/src/AFQMC/Estimators/Observables/n2r.hpp @@ -224,6 +224,7 @@ class n2r : public AFQMCInfo else nsp = 2; + using std::get; // check structure dimensions if (iref == 0) { @@ -231,7 +232,7 @@ class n2r : public AFQMCInfo { denom = mpi3CVector(iextensions<1u>{nw}, shared_allocator{TG.TG_local()}); } - if (std::get<0>(DMWork.sizes()) != nw || std::get<1>(DMWork.sizes()) != dm_size) + if (get<0>(DMWork.sizes()) != nw || get<1>(DMWork.sizes()) != dm_size) { DMWork = mpi3CMatrix({nw, dm_size}, shared_allocator{TG.TG_local()}); } @@ -240,8 +241,8 @@ class n2r : public AFQMCInfo } else { - if (std::get<0>(denom.sizes()) != nw || std::get<0>(DMWork.sizes()) != nw || std::get<1>(DMWork.sizes()) != dm_size || std::get<0>(DMAverage.sizes()) != nave || - std::get<1>(DMAverage.sizes()) != dm_size) + if (get<0>(denom.sizes()) != nw || get<0>(DMWork.sizes()) != nw || get<1>(DMWork.sizes()) != dm_size || get<0>(DMAverage.sizes()) != nave || + get<1>(DMAverage.sizes()) != dm_size) APP_ABORT(" Error: Invalid state in accumulate_reference. \n\n\n"); } diff --git a/src/AFQMC/Estimators/Observables/realspace_correlators.hpp b/src/AFQMC/Estimators/Observables/realspace_correlators.hpp index 597fb0e381..011581317c 100644 --- a/src/AFQMC/Estimators/Observables/realspace_correlators.hpp +++ b/src/AFQMC/Estimators/Observables/realspace_correlators.hpp @@ -224,7 +224,9 @@ class realspace_correlators : public AFQMCInfo using std::fill_n; // assumes G[nwalk][spin][M][M] int nw(G.size()); - int npts(std::get<1>(Orbitals.sizes())); + + using std::get; + int npts(get<1>(Orbitals.sizes())); assert(G.size() == wgt.size()); assert(wgt.size() == nw); assert(Xw.size() == nw); @@ -245,11 +247,11 @@ class realspace_correlators : public AFQMCInfo { denom = mpi3CVector(iextensions<1u>{nw}, shared_allocator{TG.TG_local()}); } - if (std::get<0>(DMWork.sizes()) != nw || std::get<1>(DMWork.sizes()) != 3 || std::get<2>(DMWork.sizes()) != dm_size) + if (get<0>(DMWork.sizes()) != nw || get<1>(DMWork.sizes()) != 3 || get<2>(DMWork.sizes()) != dm_size) { DMWork = mpi3CTensor({nw, 3, dm_size}, shared_allocator{TG.TG_local()}); } - if (std::get<0>(Gr_host.sizes()) != nw || std::get<1>(Gr_host.sizes()) != nsp || std::get<2>(Gr_host.sizes()) != npts || std::get<3>(Gr_host.sizes()) != npts) + if (get<0>(Gr_host.sizes()) != nw || get<1>(Gr_host.sizes()) != nsp || get<2>(Gr_host.sizes()) != npts || get<3>(Gr_host.sizes()) != npts) { Gr_host = mpi3C4Tensor({nw, nsp, npts, npts}, shared_allocator{TG.TG_local()}); } @@ -258,9 +260,9 @@ class realspace_correlators : public AFQMCInfo } else { - if (std::get<0>(denom.sizes()) != nw || std::get<0>(DMWork.sizes()) != nw || std::get<1>(DMWork.sizes()) != 3 || std::get<2>(DMWork.sizes()) != dm_size || - std::get<0>(Gr_host.sizes()) != nw || std::get<1>(Gr_host.sizes()) != nsp || std::get<2>(Gr_host.sizes()) != npts || std::get<3>(Gr_host.sizes()) != npts || - std::get<0>(DMAverage.sizes()) != nave || std::get<1>(DMAverage.sizes()) != 3 || std::get<2>(DMAverage.sizes()) != dm_size) + if (get<0>(denom.sizes()) != nw || get<0>(DMWork.sizes()) != nw || get<1>(DMWork.sizes()) != 3 || get<2>(DMWork.sizes()) != dm_size || + get<0>(Gr_host.sizes()) != nw || get<1>(Gr_host.sizes()) != nsp || get<2>(Gr_host.sizes()) != npts || get<3>(Gr_host.sizes()) != npts || + get<0>(DMAverage.sizes()) != nave || get<1>(DMAverage.sizes()) != 3 || get<2>(DMAverage.sizes()) != dm_size) APP_ABORT(" Error: Invalid state in accumulate_reference. \n\n\n"); } @@ -277,7 +279,7 @@ class realspace_correlators : public AFQMCInfo // T1[iw][ispin][i][r] = sum_j G[iw][ispin][i][j] * Psi(j,r) int i0, iN; - std::tie(i0, iN) = FairDivideBoundary(TG.TG_local().rank(), int(std::get<0>(G2D.sizes())), TG.TG_local().size()); + std::tie(i0, iN) = FairDivideBoundary(TG.TG_local().rank(), int(get<0>(G2D.sizes())), TG.TG_local().size()); ma::product(G2D.sliced(i0, iN), Orbitals, T.sliced(i0, iN)); TG.TG_local().barrier(); diff --git a/src/AFQMC/Estimators/tests/test_estimators.cpp b/src/AFQMC/Estimators/tests/test_estimators.cpp index 518096769e..3995e6a0a2 100644 --- a/src/AFQMC/Estimators/tests/test_estimators.cpp +++ b/src/AFQMC/Estimators/tests/test_estimators.cpp @@ -144,9 +144,11 @@ void reduced_density_matrix(boost::mpi3::communicator& world) WalkerSet wset(TG, doc3.getRoot(), InfoMap["info0"], rng); auto initial_guess = WfnFac.getInitialGuess(wfn_name); - REQUIRE(std::get<0>(initial_guess.sizes()) == 2); - REQUIRE(std::get<1>(initial_guess.sizes()) == NMO); - REQUIRE(std::get<2>(initial_guess.sizes()) == NAEA); + + using std::get; + REQUIRE(get<0>(initial_guess.sizes()) == 2); + REQUIRE(get<1>(initial_guess.sizes()) == NMO); + REQUIRE(get<2>(initial_guess.sizes()) == NAEA); wset.resize(nwalk, initial_guess[0], initial_guess[0]); using EstimPtr = std::shared_ptr; std::vector estimators; diff --git a/src/AFQMC/HamiltonianOperations/KP3IndexFactorization.hpp b/src/AFQMC/HamiltonianOperations/KP3IndexFactorization.hpp index f305229e22..9a23a78b49 100644 --- a/src/AFQMC/HamiltonianOperations/KP3IndexFactorization.hpp +++ b/src/AFQMC/HamiltonianOperations/KP3IndexFactorization.hpp @@ -317,11 +317,13 @@ class KP3IndexFactorization bool addEJ = true, bool addEXX = true) { + using std::get; // for C++17 compatibility + int nkpts = nopk.size(); - assert(std::get<1>(E.sizes()) >= 3); + assert(get<1>(E.sizes()) >= 3); assert(nd >= 0 && nd < nelpk.size()); - int nwalk = std::get<1>(Gc.sizes()); + int nwalk = get<1>(Gc.sizes()); int nspin = (walker_type == COLLINEAR ? 2 : 1); int npol = (walker_type == NONCOLLINEAR ? 2 : 1); int nmo_tot = std::accumulate(nopk.begin(), nopk.end(), 0); @@ -334,7 +336,7 @@ class KP3IndexFactorization noccb_tot = std::accumulate(nelpk[nd].begin() + nkpts, nelpk[nd].begin() + 2 * nkpts, 0); int getKr = KEright != nullptr; int getKl = KEleft != nullptr; - if (std::get<0>(E.sizes()) != nwalk || std::get<1>(E.sizes()) < 3) + if (get<0>(E.sizes()) != nwalk || get<1>(E.sizes()) < 3) APP_ABORT( " Error in AFQMC/HamiltonianOperations/KP3IndexFactorization::energy(). Incorrect matrix dimensions \n"); @@ -361,17 +363,18 @@ class KP3IndexFactorization Knr = nwalk; Knc = local_nCV; cnt = 0; + using std::get; // for C++17 compatibility #if defined(MIXED_PRECISION) if (getKr) { - assert(std::get<0>(KEright->sizes()) == nwalk && std::get<1>(KEright->sizes()) == local_nCV); - assert(KEright->stride() == std::get<1>(KEright->sizes())); + assert(get<0>(KEright->sizes()) == nwalk && get<1>(KEright->sizes()) == local_nCV); + assert(KEright->stride() == get<1>(KEright->sizes())); } #else if (getKr) { - assert(std::get<0>(KEright->sizes()) == nwalk && std::get<1>(KEright->sizes()) == local_nCV); - assert(KEright->stride() == std::get<1>(KEright->sizes())); + assert(get<0>(KEright->sizes()) == nwalk && get<1>(KEright->sizes()) == local_nCV); + assert(KEright->stride() == get<1>(KEright->sizes())); Krptr = to_address(KEright->origin()); } else @@ -383,14 +386,14 @@ class KP3IndexFactorization #if defined(MIXED_PRECISION) if (getKl) { - assert(std::get<0>(KEleft->sizes()) == nwalk && std::get<1>(KEleft->sizes()) == local_nCV); - assert(KEleft->stride() == std::get<1>(KEleft->sizes())); + assert(get<0>(KEleft->sizes()) == nwalk && get<1>(KEleft->sizes()) == local_nCV); + assert(KEleft->stride() == get<1>(KEleft->sizes())); } #else if (getKl) { - assert(std::get<0>(KEleft->sizes()) == nwalk && std::get<1>(KEleft->sizes()) == local_nCV); - assert(KEleft->stride() == std::get<1>(KEleft->sizes())); + assert(get<0>(KEleft->sizes()) == nwalk && get<1>(KEleft->sizes()) == local_nCV); + assert(KEleft->stride() == get<1>(KEleft->sizes())); Klptr = to_address(KEleft->origin()); } else @@ -687,11 +690,13 @@ class KP3IndexFactorization APP_ABORT(" Error: Incomplete implementation. \n"); // need to finish modifications for distribution of Q + using std::get; // for C++17 compatibility + int nkpts = nopk.size(); - assert(std::get<1>(E.sizes()) >= 3); + assert(get<1>(E.sizes()) >= 3); assert(nd >= 0 && nd < nelpk.size()); - int nwalk = std::get<1>(Gc.sizes()); + int nwalk = get<1>(Gc.sizes()); int nspin = (walker_type == COLLINEAR ? 2 : 1); int nmo_tot = std::accumulate(nopk.begin(), nopk.end(), 0); int nmo_max = *std::max_element(nopk.begin(), nopk.end()); @@ -703,7 +708,7 @@ class KP3IndexFactorization noccb_tot = std::accumulate(nelpk[nd].begin() + nkpts, nelpk[nd].begin() + 2 * nkpts, 0); int getKr = KEright != nullptr; int getKl = KEleft != nullptr; - if (std::get<0>(E.sizes()) != nwalk || std::get<1>(E.sizes()) < 3) + if (get<0>(E.sizes()) != nwalk || get<1>(E.sizes()) < 3) APP_ABORT(" Error in AFQMC/HamiltonianOperations/KP3IndexFactorization::energy(). Incorrect matrix dimensions\n"); size_t mem_needs(nwalk * nkpts * nkpts * nspin * nocca_max * nmo_max); @@ -732,14 +737,14 @@ class KP3IndexFactorization #if defined(MIXED_PRECISION) if (getKr) { - assert(std::get<0>(KEright->sizes()) == nwalk && std::get<1>(KEright->sizes()) == local_nCV); - assert(KEright->stride() == std::get<1>(KEright->sizes())); + assert(get<0>(KEright->sizes()) == nwalk && get<1>(KEright->sizes()) == local_nCV); + assert(KEright->stride() == get<1>(KEright->sizes())); } #else if (getKr) { - assert(std::get<0>(KEright->sizes()) == nwalk && std::get<1>(KEright->sizes()) == local_nCV); - assert(KEright->stride() == std::get<1>(KEright->sizes())); + assert(get<0>(KEright->sizes()) == nwalk && get<1>(KEright->sizes()) == local_nCV); + assert(KEright->stride() == get<1>(KEright->sizes())); Krptr = to_address(KEright->origin()); } else @@ -751,14 +756,14 @@ class KP3IndexFactorization #if defined(MIXED_PRECISION) if (getKl) { - assert(std::get<0>(KEleft->sizes()) == nwalk && std::get<1>(KEleft->sizes()) == local_nCV); - assert(KEleft->stride() == std::get<1>(KEleft->sizes())); + assert(get<0>(KEleft->sizes()) == nwalk && get<1>(KEleft->sizes()) == local_nCV); + assert(KEleft->stride() == get<1>(KEleft->sizes())); } #else if (getKl) { - assert(std::get<0>(KEleft->sizes()) == nwalk && std::get<1>(KEleft->sizes()) == local_nCV); - assert(KEleft->stride() == std::get<1>(KEleft->sizes())); + assert(get<0>(KEleft->sizes()) == nwalk && get<1>(KEleft->sizes()) == local_nCV); + assert(KEleft->stride() == get<1>(KEleft->sizes())); Klptr = to_address(KEleft->origin()); } else @@ -842,9 +847,11 @@ class KP3IndexFactorization // move calculation of H1 here // NOTE: For CLOSED/NONCOLLINEAR, can do all walkers simultaneously to improve perf. of GEMM // Not sure how to do it for COLLINEAR. + + using std::get; // for C++17 compatibility if (addEXX) { - if (std::get<0>(Qwn.sizes()) != nwalk || std::get<1>(Qwn.sizes()) != nsampleQ) + if (get<0>(Qwn.sizes()) != nwalk || get<1>(Qwn.sizes()) != nsampleQ) Qwn.reextent({nwalk, nsampleQ}); comm->barrier(); if (comm->root()) @@ -1125,8 +1132,9 @@ class KP3IndexFactorization typename = typename std::enable_if_t<(std::decay::type::dimensionality == 2)>> void vHS(MatA& Xw, MatB&& v, double a = 1., double c = 0.) { + using std::get; // for C++17 compatibility int nkpts = nopk.size(); - int nwalk = std::get<1>(Xw.sizes()); + int nwalk = get<1>(Xw.sizes()); assert(v.size() == nwalk); int nspin = (walker_type == COLLINEAR ? 2 : 1); int nmo_tot = std::accumulate(nopk.begin(), nopk.end(), 0); @@ -1326,14 +1334,15 @@ class KP3IndexFactorization typename = typename std::enable_if_t<(std::decay::type::dimensionality == 2)>> void vbias(const MatA& Gw, MatB&& v, double a = 1., double c = 0., int nd = 0) { + using std::get; // for C++17 compatibility using std::copy_n; using GType = typename std::decay_t; using vType = typename std::decay::type::element; int nkpts = nopk.size(); assert(nd >= 0 && nd < nelpk.size()); - int nwalk = std::get<1>(Gw.sizes()); - assert(std::get<0>(v.sizes()) == 2 * local_nCV); - assert(std::get<1>(v.sizes()) == nwalk); + int nwalk = get<1>(Gw.sizes()); + assert(get<0>(v.sizes()) == 2 * local_nCV); + assert(get<1>(v.sizes()) == nwalk); int nspin = (walker_type == COLLINEAR ? 2 : 1); int npol = (walker_type == NONCOLLINEAR ? 2 : 1); int nmo_tot = std::accumulate(nopk.begin(), nopk.end(), 0); @@ -1662,9 +1671,10 @@ class KP3IndexFactorization template void GKaKjw_to_GKKwaj(int nd, MatA const& GKaKj, MatB&& GKKaj, int nocca_tot, int noccb_tot, int nmo_tot, int akmax) { + using std::get; // for C++17 compatibility int nspin = (walker_type == COLLINEAR ? 2 : 1); int npol = (walker_type == NONCOLLINEAR ? 2 : 1); - int nwalk = std::get<1>(GKaKj.sizes()); + int nwalk = get<1>(GKaKj.sizes()); int nkpts = nopk.size(); assert(GKaKj.num_elements() == (nocca_tot + noccb_tot) * npol * nmo_tot * nwalk); assert(GKKaj.num_elements() == nspin * nkpts * nkpts * npol * akmax * nwalk); @@ -1750,8 +1760,9 @@ class KP3IndexFactorization template void GwAK_to_GAKw(MatA const& GwAK, MatB&& GAKw) { - int nwalk = std::get<0>(GwAK.sizes()); - int nAK = std::get<1>(GwAK.sizes()); + using std::get; // for C++17 compatibility + int nwalk = get<0>(GwAK.sizes()); + int nAK = get<1>(GwAK.sizes()); for (int w = 0; w < nwalk; w++) for (int AK = 0; AK < nAK; AK++) GAKw[AK][w] = GwAK[w][AK]; diff --git a/src/AFQMC/HamiltonianOperations/KP3IndexFactorization_batched.hpp b/src/AFQMC/HamiltonianOperations/KP3IndexFactorization_batched.hpp index 7f642ab1a6..9936ac8e98 100644 --- a/src/AFQMC/HamiltonianOperations/KP3IndexFactorization_batched.hpp +++ b/src/AFQMC/HamiltonianOperations/KP3IndexFactorization_batched.hpp @@ -40,6 +40,8 @@ namespace afqmc // testing the use of dynamic data transfer during execution to reduce memory in GPU // when an approach is found, integrate in original class through additional template parameter +using std::get; // for C++17 compatibility + template class KP3IndexFactorization_batched { @@ -181,10 +183,11 @@ class KP3IndexFactorization_batched dev_Q2vbias(typename IVector::extensions_type{nopk.size()}, IAllocator{allocator_}), dev_Qmap(Qmap), dev_nelpk(nelpk), - dev_a0pk(typename IMatrix::extensions_type{std::get<0>(nelpk.sizes()), std::get<1>(nelpk.sizes())}, IAllocator{allocator_}), + dev_a0pk(typename IMatrix::extensions_type{get<0>(nelpk.sizes()), get<1>(nelpk.sizes())}, IAllocator{allocator_}), dev_QKToK2(QKToK2), EQ(nopk.size() + 2) { + using std::get; using std::copy_n; using std::fill_n; nocc_max = *std::max_element(nelpk.origin(), nelpk.origin() + nelpk.num_elements()); @@ -478,13 +481,15 @@ class KP3IndexFactorization_batched bool addEJ = true, bool addEXX = true) { + using std::get; // for C++17 compatibility + using std::copy_n; using std::fill_n; int nkpts = nopk.size(); - assert(std::get<1>(E.sizes()) >= 3); + assert(get<1>(E.sizes()) >= 3); assert(nd >= 0 && nd < nelpk.size()); - int nwalk = std::get<1>(Gc.sizes()); + int nwalk = get<1>(Gc.sizes()); int nspin = (walker_type == COLLINEAR ? 2 : 1); int npol = (walker_type == NONCOLLINEAR ? 2 : 1); int nmo_tot = std::accumulate(nopk.begin(), nopk.end(), 0); @@ -497,7 +502,7 @@ class KP3IndexFactorization_batched noccb_tot = std::accumulate(nelpk[nd].begin() + nkpts, nelpk[nd].begin() + 2 * nkpts, 0); int getKr = KEright != nullptr; int getKl = KEleft != nullptr; - if (std::get<0>(E.sizes()) != nwalk || std::get<1>(E.sizes()) < 3) + if (get<0>(E.sizes()) != nwalk || get<1>(E.sizes()) < 3) APP_ABORT(" Error in AFQMC/HamiltonianOperations/sparse_matrix_energy::calculate_energy(). Incorrect matrix " "dimensions \n"); @@ -523,13 +528,13 @@ class KP3IndexFactorization_batched Knc = local_nCV; if (getKr) { - assert(std::get<0>(KEright->sizes()) == nwalk && std::get<1>(KEright->sizes()) == local_nCV); - assert(KEright->stride(0) == std::get<1>(KEright->sizes())); + assert(get<0>(KEright->sizes()) == nwalk && get<1>(KEright->sizes()) == local_nCV); + assert(KEright->stride(0) == get<1>(KEright->sizes())); } if (getKl) { - assert(std::get<0>(KEleft->sizes()) == nwalk && std::get<1>(KEleft->sizes()) == local_nCV); - assert(KEleft->stride(0) == std::get<1>(KEleft->sizes())); + assert(get<0>(KEleft->sizes()) == nwalk && get<1>(KEleft->sizes()) == local_nCV); + assert(KEleft->stride(0) == get<1>(KEleft->sizes())); } } else if (getKr or getKl) @@ -938,7 +943,7 @@ class KP3IndexFactorization_batched } } - // move calculation of H1 here + // move calculation of H1 here // NOTE: For CLOSED/NONCOLLINEAR, can do all walkers simultaneously to improve perf. of GEMM // Not sure how to do it for COLLINEAR. if(addEXX) { @@ -1198,8 +1203,9 @@ class KP3IndexFactorization_batched > void vHS(MatA& X, MatB&& v, double a = 1., double c = 0.) { + using std::get; // for C++17 compatibility int nkpts = nopk.size(); - int nwalk = std::get<1>(X.sizes()); + int nwalk = get<1>(X.sizes()); assert(v.size() == nwalk); int nspin = (walker_type == COLLINEAR ? 2 : 1); int nmo_tot = std::accumulate(nopk.begin(), nopk.end(), 0); @@ -1367,13 +1373,14 @@ class KP3IndexFactorization_batched > void vbias(const MatA& G, MatB&& v, double a = 1., double c = 0., int nd = 0) { + using std::get; // for C++17 compatibility using ma::gemmBatched; int nkpts = nopk.size(); assert(nd >= 0 && nd < nelpk.size()); - int nwalk = std::get<1>(G.sizes()); - assert(std::get<0>(v.sizes()) == 2 * local_nCV); - assert(std::get<1>(v.sizes()) == nwalk); + int nwalk = get<1>(G.sizes()); + assert(get<0>(v.sizes()) == 2 * local_nCV); + assert(get<1>(v.sizes()) == nwalk); int nspin = (walker_type == COLLINEAR ? 2 : 1); int npol = (walker_type == NONCOLLINEAR ? 2 : 1); int nmo_tot = std::accumulate(nopk.begin(), nopk.end(), 0); @@ -1583,11 +1590,13 @@ class KP3IndexFactorization_batched template void GKaKjw_to_GKKwaj(MatA const& GKaKj, MatB&& GKKaj, IVec&& nocc, IVec2&& dev_no, IVec2&& dev_a0) { + using std::get; // for C++17 compatibility + int npol = (walker_type == NONCOLLINEAR) ? 2 : 1; int nmo_max = *std::max_element(nopk.begin(), nopk.end()); // int nocc_max = *std::max_element(nocc.begin(),nocc.end()); - int nmo_tot = std::get<1>(GKaKj.sizes()); - int nwalk = std::get<2>(GKaKj.sizes()); + int nmo_tot = get<1>(GKaKj.sizes()); + int nwalk = get<2>(GKaKj.sizes()); int nkpts = nopk.size(); assert(GKKaj.num_elements() >= nkpts * nkpts * nwalk * nocc_max * npol * nmo_max); @@ -1602,8 +1611,10 @@ class KP3IndexFactorization_batched int npol = (walker_type == NONCOLLINEAR) ? 2 : 1; int nmo_max = *std::max_element(nopk.begin(), nopk.end()); // int nocc_max = *std::max_element(nocc.begin(),nocc.end()); - int nmo_tot = std::get<1>(GKaKj.sizes()); - int nwalk = std::get<2>(GKaKj.sizes()); + + using std::get; // for C++17 compatibility + int nmo_tot = get<1>(GKaKj.sizes()); + int nwalk = get<2>(GKaKj.sizes()); int nkpts = nopk.size(); assert(GQKaj.num_elements() >= nkpts * nkpts * nwalk * nocc_max * npol * nmo_max); @@ -1620,9 +1631,11 @@ class KP3IndexFactorization_batched template void vKKwij_to_vwKiKj(MatA const& vKK, MatB&& vKiKj) { + using std::get; + int nmo_max = *std::max_element(nopk.begin(), nopk.end()); - int nwalk = std::get<0>(vKiKj.sizes()); - int nmo_tot = std::get<1>(vKiKj.sizes()); + int nwalk = get<0>(vKiKj.sizes()); + int nmo_tot = get<1>(vKiKj.sizes()); int nkpts = nopk.size(); using ma::vKKwij_to_vwKiKj; @@ -1633,8 +1646,10 @@ class KP3IndexFactorization_batched template void vbias_from_v1(ComplexType a, MatA const& v1, MatB&& vbias) { + using std::get; + using BType = typename std::decay::type::element; - int nwalk = std::get<1>(vbias.sizes()); + int nwalk = get<1>(vbias.sizes()); int nkpts = nopk.size(); int nchol_max = *std::max_element(ncholpQ.begin(), ncholpQ.end()); diff --git a/src/AFQMC/HamiltonianOperations/Real3IndexFactorization.hpp b/src/AFQMC/HamiltonianOperations/Real3IndexFactorization.hpp index 6301de667d..54171bd651 100644 --- a/src/AFQMC/HamiltonianOperations/Real3IndexFactorization.hpp +++ b/src/AFQMC/HamiltonianOperations/Real3IndexFactorization.hpp @@ -116,7 +116,8 @@ class Real3IndexFactorization vn0(std::move(vn0_)), SM_TMats({1, 1}, shared_allocator{TG.TG_local()}) { - local_nCV = std::get<1>(Likn.sizes()); + using std::get; + local_nCV = get<1>(Likn.sizes()); TG.Node().barrier(); } @@ -185,7 +186,8 @@ class Real3IndexFactorization bool addEJ = true, bool addEXX = true) { - assert(std::get<1>(E.sizes()) >= 3); + using std::get; + assert(get<1>(E.sizes()) >= 3); assert(nd >= 0); assert(nd < haj.size()); if (walker_type == COLLINEAR) @@ -199,18 +201,21 @@ class Real3IndexFactorization int nel[2]; nel[0] = Lank[nspin * nd].size(); nel[1] = ((nspin == 2) ? Lank[nspin * nd + 1].size() : 0); - assert(std::get<1>(Lank[nspin * nd].sizes()) == local_nCV); - assert(std::get<2>(Lank[nspin * nd].sizes()) == NMO); + using std::get; + assert(get<1>(Lank[nspin * nd].sizes()) == local_nCV); + assert(get<2>(Lank[nspin * nd].sizes()) == NMO); if (nspin == 2) { - assert(std::get<1>(Lank[nspin * nd + 1].sizes()) == local_nCV); - assert(std::get<2>(Lank[nspin * nd + 1].sizes()) == NMO); + assert(get<1>(Lank[nspin * nd + 1].sizes()) == local_nCV); + assert(get<2>(Lank[nspin * nd + 1].sizes()) == NMO); } assert(Gc.num_elements() == nwalk * (nel[0] + nel[1]) * NMO); int getKr = KEright != nullptr; int getKl = KEleft != nullptr; - if (std::get<0>(E.sizes()) != nwalk || std::get<1>(E.sizes()) < 3) + + using std::get; + if (get<0>(E.sizes()) != nwalk || get<1>(E.sizes()) < 3) APP_ABORT( " Error in AFQMC/HamiltonianOperations/Real3IndexFactorization::energy(...). Incorrect matrix dimensions \n"); @@ -241,26 +246,28 @@ class Real3IndexFactorization // messy SPComplexType* Klptr(nullptr); long Knr = 0, Knc = 0; + + using std::get; if (addEJ) { Knr = nwalk; Knc = local_nCV; if (getKr) { - assert(std::get<0>(KEright->sizes()) == nwalk && std::get<1>(KEright->sizes()) == local_nCV); - assert(KEright->stride(0) == std::get<1>(KEright->sizes())); + assert(get<0>(KEright->sizes()) == nwalk && get<1>(KEright->sizes()) == local_nCV); + assert(KEright->stride(0) == get<1>(KEright->sizes())); } #if defined(MIXED_PRECISION) if (getKl) { - assert(std::get<0>(KEleft->sizes()) == nwalk && std::get<1>(KEleft->sizes()) == local_nCV); - assert(KEleft->stride(0) == std::get<1>(KEleft->sizes())); + assert(get<0>(KEleft->sizes()) == nwalk && get<1>(KEleft->sizes()) == local_nCV); + assert(KEleft->stride(0) == get<1>(KEleft->sizes())); } #else if (getKl) { - assert(std::get<0>(KEleft->sizes()) == nwalk && std::get<1>(KEleft->sizes()) == local_nCV); - assert(KEleft->stride(0) == std::get<1>(KEleft->sizes())); + assert(get<0>(KEleft->sizes()) == nwalk && get<1>(KEleft->sizes()) == local_nCV); + assert(KEleft->stride(0) == get<1>(KEleft->sizes())); Klptr = to_address(KEleft->origin()); } else @@ -423,8 +430,10 @@ class Real3IndexFactorization { using BType = typename std::decay::type::element; using AType = typename std::decay::type::element; - boost::multi::array_ref< BType, 2> v_(to_address(v.origin()), {std::get<0>(v.sizes()), 1}); - boost::multi::array_ref X_(to_address(X.origin()), {std::get<0>(X.sizes()), 1}); + + using std::get; + boost::multi::array_ref< BType, 2> v_(to_address(v.origin()), {get<0>(v.sizes()), 1}); + boost::multi::array_ref X_(to_address(X.origin()), {get<0>(X.sizes()), 1}); return vHS(X_, v_, a, c); } @@ -436,9 +445,11 @@ class Real3IndexFactorization { using XType = typename std::decay_t; using vType = typename std::decay::type::element; - assert(std::get<1>(Likn.sizes()) == std::get<0>(X.sizes())); - assert(std::get<0>(Likn.sizes()) == std::get<0>(v.sizes())); - assert(std::get<1>(X.sizes()) == std::get<1>(v.sizes())); + + using std::get; + assert(get<1>(Likn.sizes()) == get<0>(X.sizes())); + assert(get<0>(Likn.sizes()) == get<0>(v.sizes())); + assert(get<1>(X.sizes()) == get<1>(v.sizes())); long ik0, ikN; std::tie(ik0, ikN) = FairDivideBoundary(long(TG.TG_local().rank()), long(Likn.size()), long(TG.TG_local().size())); // setup buffer space if changing precision in X or v @@ -484,9 +495,10 @@ class Real3IndexFactorization ma::product(SPValueType(a), Likn.sliced(ik0, ikN), Xsp, SPValueType(c), vsp.sliced(ik0, ikN)); + using std::get; if (not std::is_same::value) { - copy_n_cast(to_address(vsp[ik0].origin()), std::get<1>(vsp.sizes()) * (ikN - ik0), to_address(v[ik0].origin())); + copy_n_cast(to_address(vsp[ik0].origin()), get<1>(vsp.sizes()) * (ikN - ik0), to_address(v[ik0].origin())); } TG.TG_local().barrier(); } @@ -498,10 +510,12 @@ class Real3IndexFactorization typename = void> void vbias(const MatA& G, MatB&& v, double a = 1., double c = 0., int k = 0) { + using std::get; + using BType = typename std::decay::type::element; using AType = typename std::decay::type::element; - boost::multi::array_ref v_(to_address(v.origin()), {std::get<0>(v.sizes()), 1}); - boost::multi::array_cref G_(to_address(G.origin()), {std::get<0>(G.sizes()), 1}); + boost::multi::array_ref v_(to_address(v.origin()), {get<0>(v.sizes()), 1}); + boost::multi::array_cref G_(to_address(G.origin()), {get<0>(G.sizes()), 1}); return vbias(G_, v_, a, c, k); } @@ -556,13 +570,14 @@ class Real3IndexFactorization boost::multi::array_ref vsp(vptr, v.extensions()); TG.TG_local().barrier(); + using std::get; if (haj.size() == 1) { - assert(std::get<0>(Lakn.sizes()) == std::get<0>(G.sizes())); - assert(std::get<1>(Lakn.sizes()) == std::get<0>(v.sizes())); - assert(std::get<1>(G.sizes()) == std::get<1>(v.sizes())); + assert(get<0>(Lakn.sizes()) == get<0>(G.sizes())); + assert(get<1>(Lakn.sizes()) == get<0>(v.sizes())); + assert(get<1>(G.sizes()) == get<1>(v.sizes())); std::tie(ic0, icN) = - FairDivideBoundary(long(TG.TG_local().rank()), long(std::get<1>(Lakn.sizes())), long(TG.TG_local().size())); + FairDivideBoundary(long(TG.TG_local().rank()), long(get<1>(Lakn.sizes())), long(TG.TG_local().size())); if (walker_type == CLOSED) a *= 2.0; @@ -572,11 +587,11 @@ class Real3IndexFactorization else { // multideterminant is not half-rotated, so use Likn - assert(std::get<0>(Likn.sizes()) == std::get<0>(G.sizes())); - assert(std::get<1>(Likn.sizes()) == std::get<0>(v.sizes())); - assert(std::get<1>(G.sizes()) == std::get<1>(v.sizes())); + assert(get<0>(Likn.sizes()) == get<0>(G.sizes())); + assert(get<1>(Likn.sizes()) == get<0>(v.sizes())); + assert(get<1>(G.sizes()) == get<1>(v.sizes())); std::tie(ic0, icN) = - FairDivideBoundary(long(TG.TG_local().rank()), long(std::get<1>(Likn.sizes())), long(TG.TG_local().size())); + FairDivideBoundary(long(TG.TG_local().rank()), long(get<1>(Likn.sizes())), long(TG.TG_local().size())); if (walker_type == CLOSED) a *= 2.0; @@ -584,9 +599,10 @@ class Real3IndexFactorization vsp.sliced(ic0, icN)); } // copy data back if changing precision + using std::get; if (not std::is_same::value) { - copy_n_cast(to_address(vsp[ic0].origin()), std::get<1>(vsp.sizes()) * (icN - ic0), to_address(v[ic0].origin())); + copy_n_cast(to_address(vsp[ic0].origin()), get<1>(vsp.sizes()) * (icN - ic0), to_address(v[ic0].origin())); } TG.TG_local().barrier(); } diff --git a/src/AFQMC/HamiltonianOperations/Real3IndexFactorization_batched_v2.hpp b/src/AFQMC/HamiltonianOperations/Real3IndexFactorization_batched_v2.hpp index e2e375bca8..d407a66332 100644 --- a/src/AFQMC/HamiltonianOperations/Real3IndexFactorization_batched_v2.hpp +++ b/src/AFQMC/HamiltonianOperations/Real3IndexFactorization_batched_v2.hpp @@ -123,13 +123,14 @@ class Real3IndexFactorization_batched_v2 Lnak(std::move(move_vector(std::move(vnak)))), vn0(std::move(vn0_)) { - local_nCV = std::get<1>(Likn.sizes()); + using std::get; + local_nCV = get<1>(Likn.sizes()); size_t lnak(0); for (auto& v : Lnak) lnak += v.num_elements(); - for (int i = 0; i < std::get<0>(hij.sizes()); i++) + for (int i = 0; i < get<0>(hij.sizes()); i++) { - for (int j = 0; j < std::get<1>(hij.sizes()); j++) + for (int j = 0; j < get<1>(hij.sizes()); j++) { hij_dev[i][j] = ComplexType(hij[i][j]); } @@ -210,7 +211,8 @@ class Real3IndexFactorization_batched_v2 bool addEJ = true, bool addEXX = true) { - assert(std::get<1>(E.sizes()) >= 3); + using std::get; + assert(get<1>(E.sizes()) >= 3); assert(nd >= 0); assert(nd < haj.size()); if (walker_type == COLLINEAR) @@ -222,20 +224,21 @@ class Real3IndexFactorization_batched_v2 int nspin = (walker_type == COLLINEAR ? 2 : 1); int NMO = hij.size(); int nel[2]; - nel[0] = std::get<1>(Lnak[nspin * nd].sizes()); - nel[1] = ((nspin == 2) ? std::get<1>(Lnak[nspin * nd + 1].sizes()) : 0); - assert(std::get<0>(Lnak[nspin * nd].sizes()) == local_nCV); - assert(std::get<2>(Lnak[nspin * nd].sizes()) == NMO); + + nel[0] = get<1>(Lnak[nspin * nd].sizes()); + nel[1] = ((nspin == 2) ? get<1>(Lnak[nspin * nd + 1].sizes()) : 0); + assert(get<0>(Lnak[nspin * nd].sizes()) == local_nCV); + assert(get<2>(Lnak[nspin * nd].sizes()) == NMO); if (nspin == 2) { - assert(std::get<0>(Lnak[nspin * nd + 1].sizes()) == local_nCV); - assert(std::get<2>(Lnak[nspin * nd + 1].sizes()) == NMO); + assert(get<0>(Lnak[nspin * nd + 1].sizes()) == local_nCV); + assert(get<2>(Lnak[nspin * nd + 1].sizes()) == NMO); } assert(Gc.num_elements() == nwalk * (nel[0] + nel[1]) * NMO); int getKr = KEright != nullptr; int getKl = KEleft != nullptr; - if (std::get<0>(E.sizes()) != nwalk || std::get<1>(E.sizes()) < 3) + if (get<0>(E.sizes()) != nwalk || get<1>(E.sizes()) < 3) APP_ABORT(" Error in AFQMC/HamiltonianOperations/Real3IndexFactorization_batched_v2::energy(...). Incorrect " "matrix dimensions \n"); @@ -255,13 +258,13 @@ class Real3IndexFactorization_batched_v2 Knc = local_nCV; if (getKr) { - assert(std::get<0>(KEright->sizes()) == nwalk && std::get<1>(KEright->sizes()) == local_nCV); - assert(KEright->stride(0) == std::get<1>(KEright->sizes())); + assert(get<0>(KEright->sizes()) == nwalk && get<1>(KEright->sizes()) == local_nCV); + assert(KEright->stride(0) == get<1>(KEright->sizes())); } if (getKl) { - assert(std::get<0>(KEleft->sizes()) == nwalk && std::get<1>(KEleft->sizes()) == local_nCV); - assert(KEleft->stride(0) == std::get<1>(KEleft->sizes())); + assert(get<0>(KEleft->sizes()) == nwalk && get<1>(KEleft->sizes()) == local_nCV); + assert(KEleft->stride(0) == get<1>(KEleft->sizes())); } } else if (getKr or getKl) @@ -387,8 +390,10 @@ class Real3IndexFactorization_batched_v2 { using BType = typename std::decay::type::element; using AType = typename std::decay::type::element; - boost::multi::array_ref v_(v.origin(), {std::get<0>(v.sizes()), 1}); - boost::multi::array_ref X_(X.origin(), {std::get<0>(X.sizes()), 1}); + + using std::get; + boost::multi::array_ref v_(v.origin(), {get<0>(v.sizes()), 1}); + boost::multi::array_ref X_(X.origin(), {get<0>(X.sizes()), 1}); return vHS(X_, v_, a, c); } @@ -400,9 +405,11 @@ class Real3IndexFactorization_batched_v2 { using XType = typename std::decay_t; using vType = typename std::decay::type::element; - assert(std::get<1>(Likn.sizes()) == std::get<0>(X.sizes())); - assert(std::get<0>(Likn.sizes()) == std::get<0>(v.sizes())); - assert(std::get<1>(X.sizes()) == std::get<1>(v.sizes())); + + using std::get; + assert(get<1>(Likn.sizes()) == get<0>(X.sizes())); + assert(get<0>(Likn.sizes()) == get<0>(v.sizes())); + assert(get<1>(X.sizes()) == get<1>(v.sizes())); // setup buffer space if changing precision in X or v size_t vmem(0), Xmem(0); if (not std::is_same::value) @@ -506,25 +513,27 @@ class Real3IndexFactorization_batched_v2 boost::multi::array_cref Gsp(Gptr, G.extensions()); boost::multi::array_ref vsp(vptr, v.extensions()); + using std::get; + if (haj.size() == 1) { - int nwalk = std::get<1>(v.sizes()); + int nwalk = get<1>(v.sizes()); if (walker_type == COLLINEAR) { - assert(std::get<1>(G.sizes()) == std::get<1>(v.sizes())); + assert(get<1>(G.sizes()) == get<1>(v.sizes())); int NMO, nel[2]; - NMO = std::get<2>(Lnak[0].sizes()); - nel[0] = std::get<1>(Lnak[0].sizes()); - nel[1] = std::get<1>(Lnak[1].sizes()); + NMO = get<2>(Lnak[0].sizes()); + nel[0] = get<1>(Lnak[0].sizes()); + nel[1] = get<1>(Lnak[1].sizes()); double c_[2]; c_[0] = c; c_[1] = c; if (std::abs(c) < 1e-8) c_[1] = 1.0; - assert((nel[0]+nel[1])*NMO == std::get<0>(G.sizes())); + assert((nel[0]+nel[1])*NMO == get<0>(G.sizes())); for (int ispin = 0, is0 = 0; ispin < 2; ispin++) { - assert(std::get<0>(Lnak[ispin].sizes()) == std::get<0>(v.sizes())); + assert(get<0>(Lnak[ispin].sizes()) == get<0>(v.sizes())); SpCMatrix_ref Ln(make_device_ptr(Lnak[ispin].origin()), {local_nCV, nel[ispin] * NMO}); ma::product(SPComplexType(a), Ln, Gsp.sliced(is0, is0 + nel[ispin] * NMO), SPComplexType(c_[ispin]), vsp); is0 += nel[ispin] * NMO; @@ -532,19 +541,19 @@ class Real3IndexFactorization_batched_v2 } else { - assert(std::get<1>(G.sizes()) == std::get<1>(v.sizes())); - assert(std::get<1>(Lnak[0].sizes()) * std::get<2>(Lnak[0].sizes()) == std::get<0>(G.sizes())); - assert(std::get<0>(Lnak[0].sizes()) == std::get<0>(v.sizes())); - SpCMatrix_ref Ln(make_device_ptr(Lnak[0].origin()), {local_nCV, std::get<1>(Lnak[0].sizes()) * std::get<2>(Lnak[0].sizes())}); + assert(get<1>(G.sizes()) == get<1>(v.sizes())); + assert(get<1>(Lnak[0].sizes()) * get<2>(Lnak[0].sizes()) == get<0>(G.sizes())); + assert(get<0>(Lnak[0].sizes()) == get<0>(v.sizes())); + SpCMatrix_ref Ln(make_device_ptr(Lnak[0].origin()), {local_nCV, get<1>(Lnak[0].sizes()) * get<2>(Lnak[0].sizes())}); ma::product(SPComplexType(a), Ln, Gsp, SPComplexType(c), vsp); } } else { // multideterminant is not half-rotated, so use Likn - assert(std::get<0>(Likn.sizes()) == std::get<0>(G.sizes())); - assert(std::get<1>(Likn.sizes()) == std::get<0>(v.sizes())); - assert(std::get<1>(G.sizes()) == std::get<1>(v.sizes())); + assert(get<0>(Likn.sizes()) == get<0>(G.sizes())); + assert(get<1>(Likn.sizes()) == get<0>(v.sizes())); + assert(get<1>(G.sizes()) == get<1>(v.sizes())); ma::product(SPValueType(a), ma::T(Likn), Gsp, SPValueType(c), vsp); } diff --git a/src/AFQMC/HamiltonianOperations/SparseTensor.hpp b/src/AFQMC/HamiltonianOperations/SparseTensor.hpp index 6dee540a10..58a02350f8 100644 --- a/src/AFQMC/HamiltonianOperations/SparseTensor.hpp +++ b/src/AFQMC/HamiltonianOperations/SparseTensor.hpp @@ -188,25 +188,27 @@ class SparseTensor bool addEJ = true, bool addEXX = true) { - assert(std::get<1>(E.sizes()) >= 3); + using std::get; + assert(get<1>(E.sizes()) >= 3); assert(k >= 0 && k < haj.size()); assert(k >= 0 && k < Vakbl_view.size()); - if (Gcloc.num_elements() < std::get<1>(Gc.sizes()) * std::get<0>(Vakbl_view[k].sizes())) - Gcloc.reextent(iextensions<1u>(std::get<0>(Vakbl_view[k].sizes()) * std::get<1>(Gc.sizes()))); - boost::multi::array_ref buff(Gcloc.data(), {long(std::get<0>(Vakbl_view[k].sizes())), long(std::get<1>(Gc.sizes()))}); + using std::get; + if (Gcloc.num_elements() < get<1>(Gc.sizes()) * get<0>(Vakbl_view[k].sizes())) + Gcloc.reextent(iextensions<1u>(get<0>(Vakbl_view[k].sizes()) * get<1>(Gc.sizes()))); + boost::multi::array_ref buff(Gcloc.data(), {long(get<0>(Vakbl_view[k].sizes())), long(get<1>(Gc.sizes()))}); - int nwalk = std::get<1>(Gc.sizes()); + int nwalk = get<1>(Gc.sizes()); int getKr = Kr != nullptr; int getKl = Kl != nullptr; - if (std::get<0>(E.sizes()) != nwalk || std::get<1>(E.sizes()) < 3) + if (get<0>(E.sizes()) != nwalk || get<1>(E.sizes()) < 3) APP_ABORT(" Error in AFQMC/HamiltonianOperations/sparse_matrix_energy::calculate_energy(). Incorrect matrix " "dimensions \n"); for (int n = 0; n < nwalk; n++) std::fill_n(E[n].origin(), 3, ComplexType(0.)); if (addEJ and getKl) - assert(std::get<0>(Kl->sizes()) == nwalk && std::get<1>(Kl->sizes()) == std::get<0>(SpvnT[k].sizes())); + assert(get<0>(Kl->sizes()) == nwalk && get<1>(Kl->sizes()) == get<0>(SpvnT[k].sizes())); if (addEJ and getKr) - assert(std::get<0>(Kr->sizes()) == nwalk && std::get<1>(Kr->sizes()) == std::get<0>(SpvnT[k].sizes())); + assert(get<0>(Kr->sizes()) == nwalk && get<1>(Kr->sizes()) == get<0>(SpvnT[k].sizes())); #if defined(MIXED_PRECISION) size_t mem_needs = Gc.num_elements(); @@ -236,20 +238,23 @@ class SparseTensor shm::calculate_energy(std::forward(E), Gsp, buff, Vakbl_view[k]); } + using std::get; if (separateEJ && addEJ) { using ma::T; - if (Gcloc.num_elements() < std::get<0>(SpvnT[k].sizes()) * std::get<1>(Gc.sizes())) - Gcloc.reextent(iextensions<1u>(std::get<0>(SpvnT[k].sizes()) * std::get<1>(Gc.sizes()))); - assert(std::get<1>(SpvnT_view[k].sizes()) == std::get<0>(Gc.sizes())); + if (Gcloc.num_elements() < get<0>(SpvnT[k].sizes()) * get<1>(Gc.sizes())) + Gcloc.reextent(iextensions<1u>(get<0>(SpvnT[k].sizes()) * get<1>(Gc.sizes()))); + assert(get<1>(SpvnT_view[k].sizes()) == get<0>(Gc.sizes())); RealType scl = (walker_type == CLOSED ? 4.0 : 1.0); + + using std::get; // SpvnT*G - boost::multi::array_ref v_(Gcloc.origin() + SpvnT_view[k].local_origin()[0] * std::get<1>(Gc.sizes()), - {long(std::get<0>(SpvnT_view[k].sizes())), long(std::get<1>(Gc.sizes()))}); + boost::multi::array_ref v_(Gcloc.origin() + SpvnT_view[k].local_origin()[0] * get<1>(Gc.sizes()), + {long(get<0>(SpvnT_view[k].sizes())), long(get<1>(Gc.sizes()))}); ma::product(SpvnT_view[k], Gsp, v_); if (getKl || getKr) { - for (int wi = 0; wi < std::get<1>(Gc.sizes()); wi++) + for (int wi = 0; wi < get<1>(Gc.sizes()); wi++) { auto _v_ = v_(v_.extension(0), wi); if (getKl) @@ -266,7 +271,7 @@ class SparseTensor } } } - for (int wi = 0; wi < std::get<1>(Gc.sizes()); wi++) + for (int wi = 0; wi < get<1>(Gc.sizes()); wi++) E[wi][2] = 0.5 * scl * static_cast(ma::dot(v_(v_.extension(0), wi), v_(v_.extension(0), wi))); } #if defined(MIXED_PRECISION) @@ -301,9 +306,11 @@ class SparseTensor { using vType = typename std::decay::type::element; using XType = typename std::decay_t; - assert(std::get<1>(Spvn.sizes()) == std::get<0>(X.sizes())); - assert(std::get<0>(Spvn.sizes()) == std::get<0>(v.sizes())); - assert(std::get<1>(X.sizes()) == std::get<1>(v.sizes())); + + using std::get; + assert(get<1>(Spvn.sizes()) == get<0>(X.sizes())); + assert(get<0>(Spvn.sizes()) == get<0>(v.sizes())); + assert(get<1>(X.sizes()) == get<1>(v.sizes())); // setup buffer space if changing precision in X or v size_t vmem(0), Xmem(0); @@ -344,8 +351,9 @@ class SparseTensor boost::multi::array_ref vsp(vptr, v.extensions()); comm->barrier(); + using std::get; boost::multi::array_ref v_(to_address(vsp[Spvn_view.local_origin()[0]].origin()), - {long(std::get<0>(Spvn_view.sizes())), long(std::get<1>(vsp.sizes()))}); + {long(get<0>(Spvn_view.sizes())), long(get<1>(vsp.sizes()))}); ma::product(SPValueType(a), Spvn_view, Xsp, SPValueType(c), v_); // copy data back if changing precision @@ -363,10 +371,12 @@ class SparseTensor typename = void> void vbias(const MatA& G, MatB&& v, double a = 1., double c = 0., int k = 0) { + using std::get; + using BType = typename std::decay::type::element; using AType = typename std::decay::type::element; - boost::multi::array_ref v_(v.origin(), {std::get<0>(v.sizes()), 1}); - boost::multi::array_cref G_(G.origin(), {std::get<0>(G.sizes()), 1}); + boost::multi::array_ref v_(v.origin(), {get<0>(v.sizes()), 1}); + boost::multi::array_cref G_(G.origin(), {get<0>(G.sizes()), 1}); return vbias(G_, v_, a, c, k); } @@ -382,9 +392,11 @@ class SparseTensor k = 0; if (walker_type == CLOSED) a *= 2.0; - assert(std::get<1>(SpvnT[k].sizes()) == std::get<0>(G.sizes())); - assert(std::get<0>(SpvnT[k].sizes()) == std::get<0>(v.sizes())); - assert(std::get<1>(G.sizes()) == std::get<1>(v.sizes())); + + using std::get; + assert(get<1>(SpvnT[k].sizes()) == get<0>(G.sizes())); + assert(get<0>(SpvnT[k].sizes()) == get<0>(v.sizes())); + assert(get<1>(G.sizes()) == get<1>(v.sizes())); // setup buffer space if changing precision in G or v size_t vmem(0), Gmem(0); @@ -424,8 +436,10 @@ class SparseTensor boost::multi::array_cref Gsp(Gptr, G.extensions()); boost::multi::array_ref vsp(vptr, v.extensions()); comm->barrier(); + + using std::get; boost::multi::array_ref v_(to_address(vsp[SpvnT_view[k].local_origin()[0]].origin()), - {long(std::get<0>(SpvnT_view[k].sizes())), long(std::get<1>(vsp.sizes()))}); + {long(get<0>(SpvnT_view[k].sizes())), long(get<1>(vsp.sizes()))}); ma::product(SpT2(a), SpvnT_view[k], Gsp, SpT2(c), v_); // copy data back if changing precision diff --git a/src/AFQMC/HamiltonianOperations/THCOps.hpp b/src/AFQMC/HamiltonianOperations/THCOps.hpp index 631937eda5..ee9fb78160 100644 --- a/src/AFQMC/HamiltonianOperations/THCOps.hpp +++ b/src/AFQMC/HamiltonianOperations/THCOps.hpp @@ -133,33 +133,36 @@ class THCOps vn0(std::move(v0_)), E0(e0_) { - gnmu = std::get<1>(Luv.sizes()); - grotnmu = std::get<1>(rotMuv.sizes()); + using std::get; + gnmu = get<1>(Luv.sizes()); + grotnmu = get<1>(rotMuv.sizes()); if (haj.size() > 1) APP_ABORT(" Error: THC not yet implemented for multiple references.\n"); assert(comm); // current partition over 'u' for L/Piu - assert(Luv.size() == std::get<1>(Piu.sizes())); + assert(Luv.size() == get<1>(Piu.sizes())); + + using std::get; for (int i = 0; i < rotcPua.size(); i++) { // rot Ps are not yet distributed - assert(rotcPua[i].size() == std::get<1>(rotPiu.sizes())); + assert(rotcPua[i].size() == get<1>(rotPiu.sizes())); if (walker_type == CLOSED) - assert(std::get<1>(rotcPua[i].sizes()) == nup); + assert(get<1>(rotcPua[i].sizes()) == nup); else if (walker_type == COLLINEAR) - assert(std::get<1>(rotcPua[i].sizes()) == nup + ndown); + assert(get<1>(rotcPua[i].sizes()) == nup + ndown); else if (walker_type == NONCOLLINEAR) - assert(std::get<1>(rotcPua[i].sizes()) == nup + ndown); + assert(get<1>(rotcPua[i].sizes()) == nup + ndown); } for (int i = 0; i < cPua.size(); i++) { assert(cPua[i].size() == Luv.size()); if (walker_type == CLOSED) - assert(std::get<1>(cPua[i].sizes()) == nup); + assert(get<1>(cPua[i].sizes()) == nup); else if (walker_type == COLLINEAR) - assert(std::get<1>(cPua[i].sizes()) == nup + ndown); + assert(get<1>(cPua[i].sizes()) == nup + ndown); else if (walker_type == NONCOLLINEAR) - assert(std::get<1>(cPua[i].sizes()) == nup + ndown); + assert(get<1>(cPua[i].sizes()) == nup + ndown); } if (walker_type == NONCOLLINEAR) { @@ -257,8 +260,10 @@ class THCOps if (k > 0) APP_ABORT(" Error: THC not yet implemented for multiple references.\n"); // G[nel][nmo] - assert(std::get<0>(E.sizes()) == std::get<0>(G.sizes())); - assert(std::get<1>(E.sizes()) == 3); + + using std::get; + assert(get<0>(E.sizes()) == get<0>(G.sizes())); + assert(get<1>(E.sizes()) == 3); int nwalk = G.size(); int getKr = Kr != nullptr; int getKl = Kl != nullptr; @@ -274,17 +279,19 @@ class THCOps if (not(addEJ || addEXX)) return; + using std::get; + int nmo_ = rotPiu.size(); int nu = rotMuv.size(); int nu0 = rotnmu0; - int nv = std::get<1>(rotMuv.sizes()); - int nel_ = std::get<1>(rotcPua[0].sizes()); + int nv = get<1>(rotMuv.sizes()); + int nel_ = get<1>(rotcPua[0].sizes()); int nspin = (walker_type == COLLINEAR) ? 2 : 1; - assert(std::get<1>(G.sizes()) == nel_ * nmo_); + assert(get<1>(G.sizes()) == nel_ * nmo_); if (addEJ and getKl) - assert(std::get<0>(Kl->sizes()) == nwalk && std::get<1>(Kl->sizes()) == nu); + assert(get<0>(Kl->sizes()) == nwalk && get<1>(Kl->sizes()) == nu); if (addEJ and getKr) - assert(std::get<0>(Kr->sizes()) == nwalk && std::get<1>(Kr->sizes()) == nu); + assert(get<0>(Kr->sizes()) == nwalk && get<1>(Kr->sizes()) == nu); using ma::T; int u0, uN; std::tie(u0, uN) = FairDivideBoundary(comm->rank(), nu, comm->size()); @@ -681,18 +688,20 @@ class THCOps using ma::T; using XType = typename std::decay_t; using vType = typename std::decay::type::element; - int nwalk = std::get<1>(X.sizes()); + + using std::get; + int nwalk = get<1>(X.sizes()); #if defined(QMC_COMPLEX) - int nchol = 2 * std::get<1>(Luv.sizes()); + int nchol = 2 * get<1>(Luv.sizes()); #else - int nchol = std::get<1>(Luv.sizes()); + int nchol = get<1>(Luv.sizes()); #endif - int nmo_ = std::get<0>(Piu.sizes()); - int nu = std::get<1>(Piu.sizes()); - assert(std::get<0>(Luv.sizes()) == nu); - assert(std::get<0>(X.sizes()) == nchol); - assert(std::get<0>(v.sizes()) == nwalk); - assert(std::get<1>(v.sizes()) == nmo_ * nmo_); + int nmo_ = get<0>(Piu.sizes()); + int nu = get<1>(Piu.sizes()); + assert(get<0>(Luv.sizes()) == nu); + assert(get<0>(X.sizes()) == nchol); + assert(get<0>(v.sizes()) == nwalk); + assert(get<1>(v.sizes()) == nmo_ * nmo_); size_t memory_needs = nu * nwalk; if (not std::is_same::value) @@ -748,11 +757,13 @@ class THCOps std::tie(u0, uN) = FairDivideBoundary(comm->rank(), nu, comm->size()); Array_ref Tuw(make_device_ptr(SM_TMats.origin()) + cnt, {nu, nwalk}); // O[nwalk * nmu * nmu] + + using std::get; #if defined(QMC_COMPLEX) // reinterpret as RealType matrices with 2x the columns Array_ref Luv_R(pointer_cast(make_device_ptr(Luv.origin())), - {std::get<0>(Luv.sizes()), 2 * std::get<1>(Luv.sizes())}); - Array_cref X_R(pointer_cast(Xsp.origin()), {std::get<0>(Xsp.sizes()), 2 * std::get<1>(Xsp.sizes())}); + {get<0>(Luv.sizes()), 2 * get<1>(Luv.sizes())}); + Array_cref X_R(pointer_cast(Xsp.origin()), {get<0>(Xsp.sizes()), 2 * get<1>(Xsp.sizes())}); Array_ref Tuw_R(pointer_cast(Tuw.origin()), {nu, 2 * nwalk}); ma::product(Luv_R.sliced(u0, uN), X_R, Tuw_R.sliced(u0, uN)); #else @@ -822,10 +833,11 @@ class THCOps typename = void> void vbias(MatA const& G, MatB&& v, double a = 1., double c = 0., int k = 0) { + using std::get; using GType = typename std::decay_t; using vType = typename std::decay::type::element; - boost::multi::array_ref v_(v.origin(), {std::get<0>(v.sizes()), 1}); - boost::multi::array_ref G_(G.origin(), {1, std::get<0>(G.sizes())}); + boost::multi::array_ref v_(v.origin(), {get<0>(v.sizes()), 1}); + boost::multi::array_ref G_(G.origin(), {1, get<0>(G.sizes())}); vbias(G_, v_, a, c, k); } @@ -839,17 +851,19 @@ class THCOps using vType = typename std::decay::type::element; if (k > 0) APP_ABORT(" Error: THC not yet implemented for multiple references.\n"); - int nwalk = std::get<0>(G.sizes()); - int nmo_ = std::get<0>(Piu.sizes()); - int nu = std::get<1>(Piu.sizes()); - int nel_ = std::get<1>(cPua[0].sizes()); + + using std::get; + int nwalk = get<0>(G.sizes()); + int nmo_ = get<0>(Piu.sizes()); + int nu = get<1>(Piu.sizes()); + int nel_ = get<1>(cPua[0].sizes()); #if defined(QMC_COMPLEX) - int nchol = 2 * std::get<1>(Luv.sizes()); + int nchol = 2 * get<1>(Luv.sizes()); #else - int nchol = std::get<1>(Luv.sizes()); + int nchol = get<1>(Luv.sizes()); #endif - assert(std::get<1>(v.sizes()) == nwalk); - assert(std::get<0>(v.sizes()) == nchol); + assert(get<1>(v.sizes()) == nwalk); + assert(get<0>(v.sizes()) == nchol); using ma::T; int c0, cN; std::tie(c0, cN) = FairDivideBoundary(comm->rank(), nchol, comm->size()); @@ -895,6 +909,7 @@ class THCOps Array_cref Gsp(Gptr, G.extensions()); Array_ref vsp(vptr, v.extensions()); + using std::get; if (haj.size() == 1) { Array_ref Guu(make_device_ptr(SM_TMats.origin()) + cnt, {nu, nwalk}); @@ -902,9 +917,9 @@ class THCOps #if defined(QMC_COMPLEX) // reinterpret as RealType matrices with 2x the columns Array_ref Luv_R(pointer_cast(make_device_ptr(Luv.origin())), - {std::get<0>(Luv.sizes()), 2 * std::get<1>(Luv.sizes())}); + {get<0>(Luv.sizes()), 2 * get<1>(Luv.sizes())}); Array_ref Guu_R(pointer_cast(Guu.origin()), {nu, 2 * nwalk}); - Array_ref vsp_R(pointer_cast(vsp.origin()), {std::get<0>(vsp.sizes()), 2 * std::get<1>(vsp.sizes())}); + Array_ref vsp_R(pointer_cast(vsp.origin()), {get<0>(vsp.sizes()), 2 * get<1>(vsp.sizes())}); ma::product(SPRealType(a), T(Luv_R(Luv_R.extension(0), {c0, cN})), Guu_R, SPRealType(c), vsp_R.sliced(c0, cN)); #else ma::product(SPRealType(a), T(Luv(Luv.extension(0), {c0, cN})), Guu, SPRealType(c), vsp.sliced(c0, cN)); @@ -917,9 +932,9 @@ class THCOps #if defined(QMC_COMPLEX) // reinterpret as RealType matrices with 2x the columns Array_ref Luv_R(pointer_cast(make_device_ptr(Luv.origin())), - {std::get<0>(Luv.sizes()), 2 * std::get<1>(Luv.sizes())}); + {get<0>(Luv.sizes()), 2 * get<1>(Luv.sizes())}); Array_ref Guu_R(pointer_cast(Guu.origin()), {nu, 2 * nwalk}); - Array_ref vsp_R(pointer_cast(vsp.origin()), {std::get<0>(vsp.sizes()), 2 * std::get<1>(vsp.sizes())}); + Array_ref vsp_R(pointer_cast(vsp.origin()), {get<0>(vsp.sizes()), 2 * get<1>(vsp.sizes())}); ma::product(SPRealType(a), T(Luv_R(Luv_R.extension(0), {c0, cN})), Guu_R, SPRealType(c), vsp_R.sliced(c0, cN)); #else ma::product(SPRealType(a), T(Luv(Luv.extension(0), {c0, cN})), Guu, SPRealType(c), vsp.sliced(c0, cN)); @@ -927,7 +942,7 @@ class THCOps } if (not std::is_same::value) { - copy_n_cast(make_device_ptr(vsp[c0].origin()), std::get<1>(vsp.sizes()) * (cN - c0), make_device_ptr(v[c0].origin())); + copy_n_cast(make_device_ptr(vsp[c0].origin()), get<1>(vsp.sizes()) * (cN - c0), make_device_ptr(v[c0].origin())); } comm->barrier(); } @@ -939,13 +954,13 @@ class THCOps } bool distribution_over_cholesky_vectors() const { return false; } - int number_of_ke_vectors() const { return std::get<0>(rotMuv.sizes()); } + int number_of_ke_vectors() const { using std::get; return get<0>(rotMuv.sizes()); } #if defined(QMC_COMPLEX) - int local_number_of_cholesky_vectors() const { return 2 * std::get<1>(Luv.sizes()); } - int global_number_of_cholesky_vectors() const { return 2 * std::get<1>(Luv.sizes()); } + int local_number_of_cholesky_vectors() const { using std::get; return 2 * get<1>(Luv.sizes()); } + int global_number_of_cholesky_vectors() const { using std::get; return 2 * get<1>(Luv.sizes()); } #else - int local_number_of_cholesky_vectors() const { return std::get<1>(Luv.sizes()); } - int global_number_of_cholesky_vectors() const { return std::get<1>(Luv.sizes()); } + int local_number_of_cholesky_vectors() const { using std::get; return get<1>(Luv.sizes()); } + int global_number_of_cholesky_vectors() const { using std::get; return get<1>(Luv.sizes()); } #endif int global_origin_cholesky_vector() const { return 0; } @@ -964,16 +979,17 @@ class THCOps template void Guu_from_compact(MatA const& G, MatB&& Guu) { - int nmo_ = int(std::get<0>(Piu.sizes())); - int nu = int(std::get<1>(Piu.sizes())); - int nel_ = std::get<1>(cPua[0].sizes()); + using std::get; + int nmo_ = int(get<0>(Piu.sizes())); + int nu = int(get<1>(Piu.sizes())); + int nel_ = get<1>(cPua[0].sizes()); int u0, uN; std::tie(u0, uN) = FairDivideBoundary(comm->rank(), nu, comm->size()); - int nw = std::get<0>(G.sizes()); + int nw = get<0>(G.sizes()); - assert(std::get<0>(G.sizes()) == std::get<1>(Guu.sizes())); - assert(std::get<1>(G.sizes()) == nel_ * nmo_); - assert(std::get<0>(Guu.sizes()) == nu); + assert(get<0>(G.sizes()) == get<1>(Guu.sizes())); + assert(get<1>(G.sizes()) == nel_ * nmo_); + assert(get<0>(Guu.sizes()) == nu); ComplexType a = (walker_type == CLOSED) ? ComplexType(2.0) : ComplexType(1.0); Array T1({(uN - u0), nw * nel_}, @@ -1007,15 +1023,16 @@ class THCOps void Guu_from_full(MatA const& G, MatB&& Guu) { using std::fill_n; - int nmo_ = int(std::get<0>(Piu.sizes())); - int nu = int(std::get<1>(Piu.sizes())); + using std::get; + int nmo_ = int(get<0>(Piu.sizes())); + int nu = int(get<1>(Piu.sizes())); int u0, uN; std::tie(u0, uN) = FairDivideBoundary(comm->rank(), nu, comm->size()); int nwalk = G.size(); - assert(std::get<0>(G.sizes()) == std::get<1>(Guu.sizes())); - assert(std::get<0>(Guu.sizes()) == nu); - assert(std::get<1>(G.sizes()) == nmo_ * nmo_); + assert(get<0>(G.sizes()) == get<1>(Guu.sizes())); + assert(get<0>(Guu.sizes()) == nu); + assert(get<1>(G.sizes()) == nmo_ * nmo_); // calculate how many walkers can be done concurrently long Bytes = default_buffer_size_in_MB * 1024L * 1024L; @@ -1058,11 +1075,13 @@ class THCOps static_assert(std::decay::type::dimensionality == 3, "Wrong dimensionality"); static_assert(std::decay::type::dimensionality == 2, "Wrong dimensionality"); static_assert(std::decay::type::dimensionality == 3, "Wrong dimensionality"); - int nmo_ = int(std::get<0>(rotPiu.sizes())); - int nu = int(std::get<0>(rotMuv.sizes())); // potentially distributed over nodes - int nv = int(std::get<1>(rotMuv.sizes())); // not distributed over nodes + + using std::get; + int nmo_ = int(get<0>(rotPiu.sizes())); + int nu = int(get<0>(rotMuv.sizes())); // potentially distributed over nodes + int nv = int(get<1>(rotMuv.sizes())); // not distributed over nodes int nw = int(G.size()); - assert(std::get<1>(rotPiu.sizes()) == nv); + assert(get<1>(rotPiu.sizes()) == nv); int v0, vN; std::tie(v0, vN) = FairDivideBoundary(comm->rank(), nv, comm->size()); int k0, kN; diff --git a/src/AFQMC/HamiltonianOperations/THCOpsIO.hpp b/src/AFQMC/HamiltonianOperations/THCOpsIO.hpp index 365ff26673..07ca636118 100644 --- a/src/AFQMC/HamiltonianOperations/THCOpsIO.hpp +++ b/src/AFQMC/HamiltonianOperations/THCOpsIO.hpp @@ -295,8 +295,9 @@ inline void writeTHCOps(hdf_archive& dump, shmCMatrix& vn0, ValueType E0) { - size_t gnmu(std::get<1>(Luv.sizes())); - size_t grotnmu(std::get<1>(rotMuv.sizes())); + using std::get; + size_t gnmu(get<1>(Luv.sizes())); + size_t grotnmu(get<1>(rotMuv.sizes())); if (TGwfn.Global().root()) { dump.push("HamiltonianOperations"); diff --git a/src/AFQMC/HamiltonianOperations/sparse_matrix_energy.hpp b/src/AFQMC/HamiltonianOperations/sparse_matrix_energy.hpp index 51ff5d5276..36819d8a85 100644 --- a/src/AFQMC/HamiltonianOperations/sparse_matrix_energy.hpp +++ b/src/AFQMC/HamiltonianOperations/sparse_matrix_energy.hpp @@ -98,15 +98,16 @@ inline void calculate_energy(EMat&& locV, const MatA& Gc, MatB&& Gcloc, const Sp { // W[nwalk][2][NMO][NAEA] + using std::get; assert(locV.dimensionality == 2); - assert(std::get<1>(Gc.sizes()) == std::get<1>(Gcloc.sizes())); - assert(Vakbl.size(0) == std::get<0>(Gcloc.sizes())); - assert(std::get<0>(Gc.sizes()) == Vakbl.size(1)); + assert(get<1>(Gc.sizes()) == get<1>(Gcloc.sizes())); + assert(Vakbl.size(0) == get<0>(Gcloc.sizes())); + assert(get<0>(Gc.sizes()) == Vakbl.size(1)); using Type = typename std::decay::type::element; const Type half = Type(0.5); - int nwalk = std::get<1>(Gc.sizes()); + int nwalk = get<1>(Gc.sizes()); // Vakbl * Gc(bl,nw) = Gcloc(ak,nw) ma::product(Vakbl, Gc, std::forward(Gcloc)); diff --git a/src/AFQMC/HamiltonianOperations/tests/test_hamiltonian_operations.cpp b/src/AFQMC/HamiltonianOperations/tests/test_hamiltonian_operations.cpp index 7a227812c8..6e88f6a6b8 100644 --- a/src/AFQMC/HamiltonianOperations/tests/test_hamiltonian_operations.cpp +++ b/src/AFQMC/HamiltonianOperations/tests/test_hamiltonian_operations.cpp @@ -255,14 +255,15 @@ void ham_ops_basic_serial(boost::mpi3::communicator& world) HOps.vHS(X, vHS, sqrtdt); TG.local_barrier(); ComplexType Vsum = 0; + using std::get; if (HOps.transposed_vHS()) { - for (int i = 0; i < std::get<1>(vHS.sizes()); i++) + for (int i = 0; i < get<1>(vHS.sizes()); i++) Vsum += vHS[0][i]; } else { - for (int i = 0; i < std::get<0>(vHS.sizes()); i++) + for (int i = 0; i < get<0>(vHS.sizes()); i++) Vsum += vHS[i][0]; } if (std::abs(file_data.Vsum) > 1e-8) diff --git a/src/AFQMC/Hamiltonians/KPFactorizedHamiltonian.cpp b/src/AFQMC/Hamiltonians/KPFactorizedHamiltonian.cpp index 5dc2547b77..f80c2e0f38 100644 --- a/src/AFQMC/Hamiltonians/KPFactorizedHamiltonian.cpp +++ b/src/AFQMC/Hamiltonians/KPFactorizedHamiltonian.cpp @@ -163,13 +163,15 @@ HamiltonianOperations KPFactorizedHamiltonian::getHamiltonianOperations_shared(b APP_ABORT(""); } E0 = E_[0] + E_[1]; + + using std::get; if (nmo_per_kp.size() != nkpts || nchol_per_kp.size() != nkpts || kminus.size() != nkpts || - std::get<0>(QKtok2.sizes()) != nkpts || std::get<1>(QKtok2.sizes()) != nkpts) + get<0>(QKtok2.sizes()) != nkpts || get<1>(QKtok2.sizes()) != nkpts) { app_error() << " Error in KPFactorizedHamiltonian::getHamiltonianOperations():" << " Inconsistent dimension (NMOPerKP,NCholPerKP,QKtTok2): " << nkpts << " " << nmo_per_kp.size() - << " " << nchol_per_kp.size() << " " << kminus.size() << " " << std::get<0>(QKtok2.sizes()) << " " - << std::get<1>(QKtok2.sizes()) << std::endl; + << " " << nchol_per_kp.size() << " " << kminus.size() << " " << get<0>(QKtok2.sizes()) << " " + << get<1>(QKtok2.sizes()) << std::endl; APP_ABORT(""); } } @@ -265,11 +267,11 @@ HamiltonianOperations KPFactorizedHamiltonian::getHamiltonianOperations_shared(b << " Problems reading /Hamiltonian/KPFactorized/L" << Q << ". \n"; APP_ABORT(""); } - if (std::get<0>(LQKikn[Q].sizes()) != nkpts || std::get<1>(LQKikn[Q].sizes()) != nmo_max * nmo_max * nchol_per_kp[Q]) + if (get<0>(LQKikn[Q].sizes()) != nkpts || get<1>(LQKikn[Q].sizes()) != nmo_max * nmo_max * nchol_per_kp[Q]) { app_error() << " Error in KPFactorizedHamiltonian::getHamiltonianOperations():" << " Problems reading /Hamiltonian/KPFactorized/L" << Q << ". \n" - << " Unexpected dimensins: " << std::get<0>(LQKikn[Q].sizes()) << " " << std::get<1>(LQKikn[Q].sizes()) << std::endl; + << " Unexpected dimensins: " << get<0>(LQKikn[Q].sizes()) << " " << get<1>(LQKikn[Q].sizes()) << std::endl; APP_ABORT(""); } } @@ -441,14 +443,14 @@ HamiltonianOperations KPFactorizedHamiltonian::getHamiltonianOperations_shared(b { { // Alpha auto Psi = get_PsiK>(nmo_per_kp, PsiT[2 * nd], K); - assert(std::get<0>(Psi.sizes()) == na); + assert(get<0>(Psi.sizes()) == na); boost::multi::array_ref haj_r(to_address(haj[nd * nkpts + K].origin()), {na, ni}); if (na > 0) ma::product(Psi, H1[K]({0, ni}, {0, ni}), haj_r); } { // Beta auto Psi = get_PsiK>(nmo_per_kp, PsiT[2 * nd + 1], K); - assert(std::get<0>(Psi.sizes()) == nb); + assert(get<0>(Psi.sizes()) == nb); boost::multi::array_ref haj_r(to_address(haj[nd * nkpts + K].origin()) + na * ni, {nb, ni}); if (nb > 0) ma::product(Psi, H1[K]({0, ni}, {0, ni}), haj_r); @@ -458,7 +460,7 @@ HamiltonianOperations KPFactorizedHamiltonian::getHamiltonianOperations_shared(b { RealType scl = (type == CLOSED ? 2.0 : 1.0); auto Psi = get_PsiK>(nmo_per_kp, PsiT[nd], K, npol == 2); - assert(std::get<0>(Psi.sizes()) == na); + assert(get<0>(Psi.sizes()) == na); boost::multi::array_ref haj_r(to_address(haj[nd * nkpts + K].origin()), {na, npol * ni}); if (na > 0) ma::product(ComplexType(scl), Psi, H1[K]({0, npol * ni}, {0, npol * ni}), ComplexType(0.0), haj_r); @@ -485,11 +487,13 @@ HamiltonianOperations KPFactorizedHamiltonian::getHamiltonianOperations_shared(b int ni = nmo_per_kp[K]; int nk = nmo_per_kp[QK]; int nchol = nchol_per_kp[Q]; + + using std::get; if (type == COLLINEAR) { { // Alpha auto Psi = get_PsiK>(nmo_per_kp, PsiT[2 * nd], K); - assert(std::get<0>(Psi.sizes()) == nocc_per_kp[nd][K]); + assert(get<0>(Psi.sizes()) == nocc_per_kp[nd][K]); if (Q <= Qm) { Sp3Tensor_ref Likn(to_address(LQKikn[Q][K].origin()), {ni, nk, nchol}); @@ -505,7 +509,7 @@ HamiltonianOperations KPFactorizedHamiltonian::getHamiltonianOperations_shared(b } { // Beta auto Psi = get_PsiK>(nmo_per_kp, PsiT[2 * nd + 1], K); - assert(std::get<0>(Psi.sizes()) == nb); + assert(get<0>(Psi.sizes()) == nb); if (Q <= Qm) { Sp3Tensor_ref Likn(to_address(LQKikn[Q][K].origin()), {ni, nk, nchol}); @@ -523,7 +527,8 @@ HamiltonianOperations KPFactorizedHamiltonian::getHamiltonianOperations_shared(b else { auto Psi = get_PsiK(nmo_per_kp, PsiT[nd], K, npol == 2); - assert(std::get<0>(Psi.sizes()) == na); + using std::get; + assert(get<0>(Psi.sizes()) == na); if (Q <= Qm) { Sp3Tensor_ref Likn(to_address(LQKikn[Q][K].origin()), {ni, nk, nchol}); @@ -562,6 +567,8 @@ HamiltonianOperations KPFactorizedHamiltonian::getHamiltonianOperations_shared(b int nchol = nchol_per_kp[Q]; Sp3Tensor_ref Likn(to_address(LQKikn[Q][K].origin()), {ni, nk, nchol}); // NOTE: LQKbnl is indexed by the K index of 'b', L[Q][Kb] + using std::get; + if (type == COLLINEAR) { { // Alpha @@ -571,7 +578,7 @@ HamiltonianOperations KPFactorizedHamiltonian::getHamiltonianOperations_shared(b } { // Beta auto PsiQK = get_PsiK>(nmo_per_kp, PsiT[2 * nd + 1], QK); - assert(std::get<0>(PsiQK.sizes()) == nb); + assert(get<0>(PsiQK.sizes()) == nb); Sp3Tensor_ref Lbnl(to_address(LQKbnl[nq0 + number_of_symmetric_Q + Qmap[Q] - 1][QK].origin()), {nb, nchol, ni}); ma_rotate::getLank_from_Lkin(PsiQK, Likn, Lbnl, buff); @@ -580,7 +587,7 @@ HamiltonianOperations KPFactorizedHamiltonian::getHamiltonianOperations_shared(b else { auto PsiQK = get_PsiK(nmo_per_kp, PsiT[nd], QK, npol == 2); - assert(std::get<0>(PsiQK.sizes()) == na); + assert(get<0>(PsiQK.sizes()) == na); Sp3Tensor_ref Lbnl(to_address(LQKbnl[nq0 + Qmap[Q] - 1][QK].origin()), {na, nchol, npol * ni}); ma_rotate::getLank_from_Lkin(PsiQK, Likn, Lbnl, buff, npol == 2); } @@ -889,13 +896,16 @@ HamiltonianOperations KPFactorizedHamiltonian::getHamiltonianOperations_batched( APP_ABORT(""); } E0 = E_[0] + E_[1]; + + using std::get; + if (nmo_per_kp.size() != nkpts || nchol_per_kp.size() != nkpts || kminus.size() != nkpts || - std::get<0>(QKtok2.sizes()) != nkpts || std::get<1>(QKtok2.sizes()) != nkpts) + get<0>(QKtok2.sizes()) != nkpts || get<1>(QKtok2.sizes()) != nkpts) { app_error() << " Error in KPFactorizedHamiltonian::getHamiltonianOperations():" << " Inconsistent dimension (NMOPerKP,NCholPerKP,QKtTok2): " << nkpts << " " << nmo_per_kp.size() - << " " << nchol_per_kp.size() << " " << kminus.size() << " " << std::get<0>(QKtok2.sizes()) << " " - << std::get<1>(QKtok2.sizes()) << std::endl; + << " " << nchol_per_kp.size() << " " << kminus.size() << " " << get<0>(QKtok2.sizes()) << " " + << get<1>(QKtok2.sizes()) << std::endl; APP_ABORT(""); } } diff --git a/src/AFQMC/Hamiltonians/RealDenseHamiltonian.cpp b/src/AFQMC/Hamiltonians/RealDenseHamiltonian.cpp index 39ad191af1..4929670334 100644 --- a/src/AFQMC/Hamiltonians/RealDenseHamiltonian.cpp +++ b/src/AFQMC/Hamiltonians/RealDenseHamiltonian.cpp @@ -61,14 +61,16 @@ HamiltonianOperations RealDenseHamiltonian::getHamiltonianOperations(bool pureSD using RMatrix_ref = boost::multi::array_ref; using Sp3Tensor_ref = boost::multi::array_ref; + using std::get; + if (type == COLLINEAR) assert(PsiT.size() % 2 == 0); int nspins = ((type != COLLINEAR) ? 1 : 2); int ndet = PsiT.size() / nspins; - int nup = std::get<0>(PsiT[0].sizes()); + int nup = get<0>(PsiT[0].sizes()); int ndown = 0; if (nspins == 2) - ndown = std::get<0>(PsiT[1].sizes()); + ndown = get<0>(PsiT[1].sizes()); int NEL = nup + ndown; // distribute work over equivalent nodes in TGprop.TG() across TG.Global() @@ -176,11 +178,14 @@ HamiltonianOperations RealDenseHamiltonian::getHamiltonianOperations(bool pureSD << " Problems reading /Hamiltonian/DenseFactorized/L. \n"; APP_ABORT(""); } - if (std::get<0>(Likn.sizes()) != NMO * NMO || std::get<1>(Likn.sizes()) != local_ncv) + + using std::get; + + if (get<0>(Likn.sizes()) != NMO * NMO || get<1>(Likn.sizes()) != local_ncv) { app_error() << " Error in RealDenseHamiltonian::getHamiltonianOperations():" << " Problems reading /Hamiltonian/DenseFactorized/L. \n" - << " Unexpected dimensions: " << std::get<0>(Likn.sizes()) << " " << std::get<1>(Likn.sizes()) << std::endl; + << " Unexpected dimensions: " << get<0>(Likn.sizes()) << " " << get<1>(Likn.sizes()) << std::endl; APP_ABORT(""); } dump.pop(); diff --git a/src/AFQMC/Hamiltonians/RealDenseHamiltonian_v2.cpp b/src/AFQMC/Hamiltonians/RealDenseHamiltonian_v2.cpp index 29b4422def..1daa9193af 100644 --- a/src/AFQMC/Hamiltonians/RealDenseHamiltonian_v2.cpp +++ b/src/AFQMC/Hamiltonians/RealDenseHamiltonian_v2.cpp @@ -175,11 +175,12 @@ HamiltonianOperations RealDenseHamiltonian_v2::getHamiltonianOperations(bool pur << " Problems reading /Hamiltonian/DenseFactorized/L. \n"; APP_ABORT(""); } - if (std::get<0>(Likn.sizes()) != NMO * NMO || std::get<1>(Likn.sizes()) != local_ncv) + using std::get; + if (get<0>(Likn.sizes()) != NMO * NMO || get<1>(Likn.sizes()) != local_ncv) { app_error() << " Error in RealDenseHamiltonian_v2::getHamiltonianOperations():" << " Problems reading /Hamiltonian/DenseFactorized/L. \n" - << " Unexpected dimensins: " << std::get<0>(Likn.sizes()) << " " << std::get<1>(Likn.sizes()) << std::endl; + << " Unexpected dimensins: " << get<0>(Likn.sizes()) << " " << get<1>(Likn.sizes()) << std::endl; APP_ABORT(""); } dump.pop(); diff --git a/src/AFQMC/Hamiltonians/THCHamiltonian.cpp b/src/AFQMC/Hamiltonians/THCHamiltonian.cpp index a72d6674fc..131e6c177a 100644 --- a/src/AFQMC/Hamiltonians/THCHamiltonian.cpp +++ b/src/AFQMC/Hamiltonians/THCHamiltonian.cpp @@ -234,7 +234,9 @@ HamiltonianOperations THCHamiltonian::getHamiltonianOperations(bool pureSD, auto itT = Tuv.origin(); for (size_t i = 0; i < Muv.num_elements(); ++i, ++itT, ++itM) *(itT) = ma::conj(*itT) * (*itM); - boost::multi::array T_({static_cast(std::get<1>(Tuv.sizes())), NMO}); + + using std::get; + boost::multi::array T_({static_cast(get<1>(Tuv.sizes())), NMO}); ma::product(T(Tuv), H(Piu__), T_); ma::product(SPValueType(-0.5), T(T_), T(Piu__({0, long(NMO)}, {long(c0), long(cN)})), SPValueType(0.0), v0_); @@ -285,7 +287,9 @@ HamiltonianOperations THCHamiltonian::getHamiltonianOperations(bool pureSD, auto itT = Tuv.origin(); for (size_t i = 0; i < Muv.num_elements(); ++i, ++itT, ++itM) *(itT) = ma::conj(*itT) * (*itM); - boost::multi::array T_({static_cast(std::get<1>(Tuv.sizes())), NMO}); + + using std::get; + boost::multi::array T_({static_cast(get<1>(Tuv.sizes())), NMO}); ma::product(T(Tuv), H(Piu), T_); ma::product(SPValueType(-0.5), T(T_), T(Piu({0, long(NMO)}, {long(c0), long(cN)})), SPValueType(0.0), v0_); diff --git a/src/AFQMC/Hamiltonians/rotateHamiltonian.hpp b/src/AFQMC/Hamiltonians/rotateHamiltonian.hpp index f2c8a7162e..906d3e468a 100644 --- a/src/AFQMC/Hamiltonians/rotateHamiltonian.hpp +++ b/src/AFQMC/Hamiltonians/rotateHamiltonian.hpp @@ -773,6 +773,7 @@ inline void rotateHijkl_single_node(std::string& type, std::fill_n(Rl.origin(), Rl.num_elements(), SPComplexType(0.0)); { + using std::get; // Q(k,a,n) = sum_i ma::conj(Amat(i,a)) * V2_fact(ik,n) // R(l,a,n) = sum_i ma::conj(Amat(i,a)) * ma::conj(V2_fact(li,n)) @@ -798,7 +799,7 @@ inline void rotateHijkl_single_node(std::string& type, { // Qk[norb*NAEA,nvec] // Rl[nvec,norb*NAEA] - int n0_, n1_, sz_ = std::get<0>(Qk.sizes()); + int n0_, n1_, sz_ = get<0>(Qk.sizes()); std::tie(n0_, n1_) = FairDivideBoundary(coreid, sz_, ncores); if (n1_ - n0_ > 0) ma::transpose(Qk.sliced(n0_, n1_), Rl(Rl.extension(0), {n0_, n1_})); diff --git a/src/AFQMC/Hamiltonians/rotateHamiltonian_Helper2.hpp b/src/AFQMC/Hamiltonians/rotateHamiltonian_Helper2.hpp index c1624fd245..e97c57a66a 100644 --- a/src/AFQMC/Hamiltonians/rotateHamiltonian_Helper2.hpp +++ b/src/AFQMC/Hamiltonians/rotateHamiltonian_Helper2.hpp @@ -50,10 +50,12 @@ inline void count_Qk_x_Rl(WALKER_TYPES walker_type, MatTa&& Ta, const SPRealType cut) { + using std::get; + using Type = typename std::decay::type::element; - assert(std::get<0>(Qk.sizes()) == std::get<0>(Ta.sizes())); - assert(std::get<1>(Qk.sizes()) == std::get<0>(Rl.sizes())); - assert(std::get<1>(Rl.sizes()) == std::get<1>(Rl.sizes())); + assert(get<0>(Qk.sizes()) == get<0>(Ta.sizes())); + assert(get<1>(Qk.sizes()) == get<0>(Rl.sizes())); + assert(get<1>(Rl.sizes()) == get<1>(Rl.sizes())); int ncores = TG.getTotalCores(), coreid = TG.getCoreID(); bool amIAlpha = true; @@ -61,13 +63,13 @@ inline void count_Qk_x_Rl(WALKER_TYPES walker_type, amIAlpha = false; int bl0 = -1, blN = -1; - int nwork = std::min(int(std::get<1>(Rl.sizes())), ncores); + int nwork = std::min(int(get<1>(Rl.sizes())), ncores); if (coreid < nwork) - std::tie(bl0, blN) = FairDivideBoundary(coreid, int(std::get<1>(Rl.sizes())), nwork); + std::tie(bl0, blN) = FairDivideBoundary(coreid, int(get<1>(Rl.sizes())), nwork); int ka0 = -1, kaN = -1; - nwork = std::min(int(std::get<0>(Qk.sizes())), ncores); + nwork = std::min(int(get<0>(Qk.sizes())), ncores); if (coreid < nwork) - std::tie(ka0, kaN) = FairDivideBoundary(coreid, int(std::get<0>(Qk.sizes())), nwork); + std::tie(ka0, kaN) = FairDivideBoundary(coreid, int(get<0>(Qk.sizes())), nwork); Type four(4.0); Type two(2.0); @@ -210,10 +212,12 @@ inline void Qk_x_Rl(WALKER_TYPES walker_type, Container& Vijkl, const SPRealType cut) { + using std::get; + using Type = typename std::decay::type::element; - assert(std::get<0>(Qk.sizes()) == std::get<0>(Ta.sizes())); - assert(std::get<1>(Qk.sizes()) == std::get<0>(Rl.sizes())); - assert(std::get<1>(Rl.sizes()) == std::get<1>(Rl.sizes())); + assert(get<0>(Qk.sizes()) == get<0>(Ta.sizes())); + assert(get<1>(Qk.sizes()) == get<0>(Rl.sizes())); + assert(get<1>(Rl.sizes()) == get<1>(Rl.sizes())); int ncores = TG.getTotalCores(), coreid = TG.getCoreID(); bool amIAlpha = true; @@ -222,12 +226,12 @@ inline void Qk_x_Rl(WALKER_TYPES walker_type, int bl0 = -1, blN = -1; int ka0 = -1, kaN = -1; - int nwork = std::min(int(std::get<1>(Rl.sizes())), ncores); + int nwork = std::min(int(get<1>(Rl.sizes())), ncores); if (coreid < nwork) - std::tie(bl0, blN) = FairDivideBoundary(coreid, int(std::get<1>(Rl.sizes())), nwork); - nwork = std::min(int(std::get<0>(Qk.sizes())), ncores); + std::tie(bl0, blN) = FairDivideBoundary(coreid, int(get<1>(Rl.sizes())), nwork); + nwork = std::min(int(get<0>(Qk.sizes())), ncores); if (coreid < nwork) - std::tie(ka0, kaN) = FairDivideBoundary(coreid, int(std::get<0>(Qk.sizes())), nwork); + std::tie(ka0, kaN) = FairDivideBoundary(coreid, int(get<0>(Qk.sizes())), nwork); Type four(4.0); Type two(2.0); diff --git a/src/AFQMC/Matrix/csr_matrix_construct.hpp b/src/AFQMC/Matrix/csr_matrix_construct.hpp index c1d1942906..3ac0f41663 100644 --- a/src/AFQMC/Matrix/csr_matrix_construct.hpp +++ b/src/AFQMC/Matrix/csr_matrix_construct.hpp @@ -56,12 +56,14 @@ CSR construct_csr_matrix_single_input(MultiArray2D&& M, double cutoff, char TA, std::vector counts; using int_type = typename CSR::index_type; int_type nr, nc; + + using std::get; if (comm.rank() == 0) { if (TA == 'N') { - nr = std::get<0>(M.sizes()); - nc = std::get<1>(M.sizes()); + nr = get<0>(M.sizes()); + nc = get<1>(M.sizes()); counts.resize(nr); for (int_type i = 0; i < nr; i++) for (int_type j = 0; j < nc; j++) @@ -70,11 +72,11 @@ CSR construct_csr_matrix_single_input(MultiArray2D&& M, double cutoff, char TA, } else { - nr = std::get<1>(M.sizes()); - nc = std::get<0>(M.sizes()); + nr = get<1>(M.sizes()); + nc = get<0>(M.sizes()); counts.resize(nr); - for (int_type i = 0; i < std::get<0>(M.sizes()); i++) - for (int_type j = 0; j < std::get<1>(M.sizes()); j++) + for (int_type i = 0; i < get<0>(M.sizes()); i++) + for (int_type j = 0; j < get<1>(M.sizes()); j++) if (std::abs(M[i][j]) > cutoff) ++counts[j]; } @@ -88,6 +90,7 @@ CSR construct_csr_matrix_single_input(MultiArray2D&& M, double cutoff, char TA, CSR csr_mat(std::tuple{nr, nc}, std::tuple{0, 0}, counts, qmcplusplus::afqmc::shared_allocator(comm)); + using std::get; if (comm.rank() == 0) { if (TA == 'N') @@ -99,15 +102,15 @@ CSR construct_csr_matrix_single_input(MultiArray2D&& M, double cutoff, char TA, } else if (TA == 'T') { - for (int_type i = 0; i < std::get<1>(M.sizes()); i++) - for (int_type j = 0; j < std::get<0>(M.sizes()); j++) + for (int_type i = 0; i < get<1>(M.sizes()); i++) + for (int_type j = 0; j < get<0>(M.sizes()); j++) if (std::abs(M[j][i]) > cutoff) csr_mat.emplace_back({i, j}, static_cast(M[j][i])); } else if (TA == 'H') { - for (int_type i = 0; i < std::get<1>(M.sizes()); i++) - for (int_type j = 0; j < std::get<0>(M.sizes()); j++) + for (int_type i = 0; i < get<1>(M.sizes()); i++) + for (int_type j = 0; j < get<0>(M.sizes()); j++) if (std::abs(M[j][i]) > cutoff) csr_mat.emplace_back({i, j}, static_cast(ma::conj(M[j][i]))); } diff --git a/src/AFQMC/Matrix/ma_hdf5_readers.hpp b/src/AFQMC/Matrix/ma_hdf5_readers.hpp index 91129b41bb..acc95952ba 100644 --- a/src/AFQMC/Matrix/ma_hdf5_readers.hpp +++ b/src/AFQMC/Matrix/ma_hdf5_readers.hpp @@ -64,6 +64,8 @@ inline void write_distributed_MA(MultiArray& A, // data distribution depends on whether we have devices or not! int TG_number(TG.getTGNumber()); int TG_local_rank(TG.TG_local().rank()); + + using std::get; // assumes that Global.root() lives in TG_number if (TG.Global().root()) { @@ -71,15 +73,15 @@ inline void write_distributed_MA(MultiArray& A, std::vector ndim(4 * nnodes_per_TG); ndim[0] = offset[0]; ndim[1] = offset[1]; - ndim[2] = std::get<0>(A.sizes()); - ndim[3] = std::get<1>(A.sizes()); + ndim[2] = get<0>(A.sizes()); + ndim[3] = get<1>(A.sizes()); TG.TG_Cores().all_reduce_in_place_n(ndim.begin(), ndim.size(), std::plus<>()); // write local piece { using Mat_ref = boost::multi::array_ref; Mat_ref A_(to_address(A.origin()), A.extensions()); - hyperslab_proxy slab(A_, gdim, std::array{size_t(std::get<0>(A.sizes())), size_t(std::get<1>(A.sizes()))}, offset); + hyperslab_proxy slab(A_, gdim, std::array{size_t(get<0>(A.sizes())), size_t(get<1>(A.sizes()))}, offset); dump.write(slab, name); } @@ -101,8 +103,8 @@ inline void write_distributed_MA(MultiArray& A, // all tasks on the TG have a section of the matrix ndim[4 * TG.TG_Cores().rank()] = offset[0]; ndim[4 * TG.TG_Cores().rank() + 1] = offset[1]; - ndim[4 * TG.TG_Cores().rank() + 2] = std::get<0>(A.sizes()); - ndim[4 * TG.TG_Cores().rank() + 3] = std::get<1>(A.sizes()); + ndim[4 * TG.TG_Cores().rank() + 2] = get<0>(A.sizes()); + ndim[4 * TG.TG_Cores().rank() + 3] = get<1>(A.sizes()); TG.TG_Cores().all_reduce_in_place_n(ndim.begin(), ndim.size(), std::plus<>()); TG.TG_Cores().send_n(to_address(A.origin()), A.num_elements(), 0, TG.TG_Cores().rank()); } diff --git a/src/AFQMC/Matrix/tests/matrix_helpers.h b/src/AFQMC/Matrix/tests/matrix_helpers.h index 6cf3b31db8..2c43f070bc 100644 --- a/src/AFQMC/Matrix/tests/matrix_helpers.h +++ b/src/AFQMC/Matrix/tests/matrix_helpers.h @@ -46,8 +46,10 @@ void verify_approx(M1 const& A, M2 const& B) // casting in case operator[] returns a fancy reference using element1 = typename std::decay::type::element; using element2 = typename std::decay::type::element; - REQUIRE(std::get<0>(A.sizes()) == std::get<0>(B.sizes())); - for (int i = 0; i < std::get<0>(A.sizes()); i++) + + using std::get; + REQUIRE(get<0>(A.sizes()) == get<0>(B.sizes())); + for (int i = 0; i < get<0>(A.sizes()); i++) myCHECK(element1(A[i]), element2(B[i])); } diff --git a/src/AFQMC/Numerics/csr_blas.hpp b/src/AFQMC/Numerics/csr_blas.hpp index a9ed507cca..1d5d8d7858 100644 --- a/src/AFQMC/Numerics/csr_blas.hpp +++ b/src/AFQMC/Numerics/csr_blas.hpp @@ -41,8 +41,10 @@ template::type::dimensionality == 1>::type> MultiArray1D axpy(char TA, T a, SparseArray1D&& x, MultiArray1D&& y) { + using std::get; + using ma::conj; - assert(std::get<0>(x.sizes()) == std::get<0>(y.sizes())); + assert(get<0>(x.sizes()) == get<0>(y.sizes())); auto vals = x.non_zero_values_data(); auto cols = x.non_zero_indices2_data(); if (TA == 'C') @@ -378,8 +380,11 @@ MultiArray2D transpose(csr_matrix&& A, MultiArray2D&& AT) { using integer = typename std::decay::type::index_type; using Type = typename std::decay::type::element; - assert(std::get<0>(A.sizes()) == std::get<1>(AT.sizes())); - assert(std::get<1>(A.sizes()) == std::get<0>(AT.sizes())); + + using std::get; + + assert(get<0>(A.sizes()) == get<1>(AT.sizes())); + assert(get<1>(A.sizes()) == get<0>(AT.sizes())); auto& comm = *A.getAlloc().commP_; integer r0, rN, nrows = integer(A.size(0)); integer rank = comm.rank(), size = comm.size(); diff --git a/src/AFQMC/Numerics/ma_blas.hpp b/src/AFQMC/Numerics/ma_blas.hpp index 2d055fd743..71b53ca85a 100644 --- a/src/AFQMC/Numerics/ma_blas.hpp +++ b/src/AFQMC/Numerics/ma_blas.hpp @@ -48,14 +48,16 @@ MultiArray2DY&& copy(MultiArray2DX&& x, MultiArray2DY&& y) assert(x.stride(1) == 1); assert(y.stride(1) == 1); assert(x.size() == y.size()); - assert(std::get<1>(x.sizes()) == std::get<1>(y.sizes())); - if ((x.stride() == std::get<1>(x.sizes())) && (y.stride() == std::get<1>(y.sizes()))) + + using std::get; + assert(get<1>(x.sizes()) == get<1>(y.sizes())); + if ((x.stride() == get<1>(x.sizes())) && (y.stride() == get<1>(y.sizes()))) { copy(x.num_elements(), pointer_dispatch(x.origin()), 1, pointer_dispatch(y.origin()), 1); } else { - copy2D(x.size(), std::get<1>(x.sizes()), pointer_dispatch(x.origin()), x.stride(), pointer_dispatch(y.origin()), y.stride()); + copy2D(x.size(), get<1>(x.sizes()), pointer_dispatch(x.origin()), x.stride(), pointer_dispatch(y.origin()), y.stride()); } return std::forward(y); } @@ -68,18 +70,19 @@ template MultiArrayNDY&& copy(MultiArrayNDX&& x, MultiArrayNDY&& y) { + using std::get; #ifndef NDEBUG // only on contiguous arrays // long sz(x.size()); // for (int i = 1; i < int(std::decay::type::dimensionality); ++i) // sz *= x.size(i); // assert(x.num_elements() == sz); - assert(std::get::type::dimensionality - 1>(x.strides()) == 1); + assert(get::type::dimensionality - 1>(x.strides()) == 1); // sz = y.size(); // for (int i = 1; i < int(std::decay::type::dimensionality); ++i) // sz *= y.size(i); // assert(y.num_elements() == sz); - assert(std::get::type::dimensionality - 1>(y.strides()) == 1); + assert(get::type::dimensionality - 1>(y.strides()) == 1); assert(x.num_elements() == y.num_elements()); #endif copy(x.num_elements(), pointer_dispatch(x.origin()), 1, pointer_dispatch(y.origin()), 1); @@ -104,9 +107,10 @@ template typename std::decay::type::element dot(MultiArray2Dx&& x, MultiArray2Dy&& y) { - assert(x.stride() == std::get<1>(x.sizes())); // only on contiguous arrays + using std::get; + assert(x.stride() == get<1>(x.sizes())); // only on contiguous arrays assert(x.stride(1) == 1); // only on contiguous arrays - assert(y.stride() == std::get<1>(y.sizes())); // only on contiguous arrays + assert(y.stride() == get<1>(y.sizes())); // only on contiguous arrays assert(y.stride(1) == 1); // only on contiguous arrays assert(x.num_elements() == y.num_elements()); return dot(x.num_elements(), pointer_dispatch(x.origin()), 1, pointer_dispatch(y.origin()), 1); @@ -133,7 +137,8 @@ MultiArrayND&& scal(T a, MultiArrayND&& x) // for (int i = 1; i < int(std::decay::type::dimensionality); ++i) // sz *= x.size(i); // assert(x.num_elements() == sz); - assert(std::get::type::dimensionality - 1>(x.strides()) == 1); // only on contiguous arrays + using std::get; + assert(get::type::dimensionality - 1>(x.strides()) == 1); // only on contiguous arrays #endif scal(x.num_elements(), a, pointer_dispatch(x.origin()), 1); return std::forward(x); @@ -180,10 +185,11 @@ template MultiArray2DB&& axpy(T x, MultiArray2DA const& a, MultiArray2DB&& b) { + using std::get; assert(a.num_elements() == b.num_elements()); - assert(a.stride() == std::get<1>(a.sizes())); // only on contiguous arrays + assert(a.stride() == get<1>(a.sizes())); // only on contiguous arrays assert(a.stride(1) == 1); // only on contiguous arrays - assert(b.stride() == std::get<1>(b.sizes())); // only on contiguous arrays + assert(b.stride() == get<1>(b.sizes())); // only on contiguous arrays assert(b.stride(1) == 1); // only on contiguous arrays axpy(a.num_elements(), x, pointer_dispatch(a.origin()), 1, pointer_dispatch(b.origin()), 1); return std::forward(b); @@ -199,13 +205,14 @@ template< std::decay::type::dimensionality == 1>::type> MultiArray1DY&& gemv(T alpha, MultiArray2DA const& A, MultiArray1DX const& x, T beta, MultiArray1DY&& y) { + using std::get; assert((IN == 'N') || (IN == 'T') || (IN == 'C')); if (IN == 'T' or IN == 'C') - assert(x.size() == std::get<1>(A.sizes()) and y.size() == A.size()); + assert(x.size() == get<1>(A.sizes()) and y.size() == A.size()); else if (IN == 'N') - assert(x.size() == A.size() and y.size() == std::get<1>(A.sizes())); + assert(x.size() == A.size() and y.size() == get<1>(A.sizes())); assert(A.stride(1) == 1); // gemv is not implemented for arrays with non-leading stride != 1 - int M = std::get<1>(A.sizes()); + int M = get<1>(A.sizes()); int N = A.size(); gemv(IN, M, N, alpha, pointer_dispatch(A.origin()), A.stride(), pointer_dispatch(x.origin()), x.stride(), beta, pointer_dispatch(y.origin()), y.stride()); @@ -218,10 +225,10 @@ MultiArray1DY&& gemv(MultiArray2DA const& A, MultiArray1DX const& x, MultiArray1 return gemv(1., A, x, 0., std::forward(y)); } //y := alpha*A*x -// gemm<'T', 'T'>(1., A, B, 0., C); // C = T(A*B) = T(B)*T(A) or T(C) = A*B -// gemm<'N', 'N'>(1., A, B, 0., C); // C = B*A = T(T(A)*T(B)) or T(C) = T(A)*T(B) -// gemm<'T', 'N'>(1., A, B, 0., C); // C = T(A*T(B)) = B*T(A) or T(C) = A*T(B) -// gemm<'N', 'T'>(1., A, B, 0., C); // C = T(T(A)*B) = T(B)*A or T(C) = T(A)*B +// gemm<'T', 'T'>(1., A, B, 0., C); // C = T(A*B) = T(B)*T(A) or T(C) = A*B +// gemm<'N', 'N'>(1., A, B, 0., C); // C = B*A = T(T(A)*T(B)) or T(C) = T(A)*T(B) +// gemm<'T', 'N'>(1., A, B, 0., C); // C = T(A*T(B)) = B*T(A) or T(C) = A*T(B) +// gemm<'N', 'T'>(1., A, B, 0., C); // C = T(T(A)*B) = T(B)*A or T(C) = T(A)*B template< char TA, @@ -242,33 +249,34 @@ MultiArray2DC&& gemm(T alpha, MultiArray2DA const& a, MultiArray2DB const& b, T int M = -1; int N = -1; int K = -1; + using std::get; if (TA == 'N' and TB == 'N') { - M = std::get<1>(a.sizes()); + M = get<1>(a.sizes()); N = b.size(); K = a.size(); - assert(a.size() == std::get<1>(b.sizes()) and c.size() == b.size() and std::get<1>(c.sizes()) == std::get<1>(a.sizes())); + assert(a.size() == get<1>(b.sizes()) and c.size() == b.size() and get<1>(c.sizes()) == get<1>(a.sizes())); } if ((TA == 'T' or TA == 'C') and (TB == 'T' or TB == 'C')) { M = a.size(); - N = std::get<1>(b.sizes()); - K = std::get<1>(a.sizes()); - assert(std::get<1>(a.sizes()) == b.size() and c.size() == std::get<1>(b.sizes()) and std::get<1>(c.sizes()) == a.size()); + N = get<1>(b.sizes()); + K = get<1>(a.sizes()); + assert(get<1>(a.sizes()) == b.size() and c.size() == get<1>(b.sizes()) and get<1>(c.sizes()) == a.size()); } if ((TA == 'T' or TA == 'C') and TB == 'N') { M = a.size(); N = b.size(); - K = std::get<1>(a.sizes()); - assert(std::get<1>(a.sizes()) == std::get<1>(b.sizes()) and c.size() == b.size() and std::get<1>(c.sizes()) == a.size()); + K = get<1>(a.sizes()); + assert(get<1>(a.sizes()) == get<1>(b.sizes()) and c.size() == b.size() and get<1>(c.sizes()) == a.size()); } if (TA == 'N' and (TB == 'T' or TB == 'C')) { - M = std::get<1>(a.sizes()); - N = std::get<1>(b.sizes()); + M = get<1>(a.sizes()); + N = get<1>(b.sizes()); K = a.size(); - assert(a.size() == b.size() and c.size() == std::get<1>(b.sizes()) and std::get<1>(c.sizes()) == std::get<1>(a.sizes())); + assert(a.size() == b.size() and c.size() == get<1>(b.sizes()) and get<1>(c.sizes()) == get<1>(a.sizes())); } gemm(TA, TB, M, N, K, alpha, pointer_dispatch(a.origin()), a.stride(), pointer_dispatch(b.origin()), b.stride(), beta, pointer_dispatch(c.origin()), c.stride()); @@ -297,33 +305,35 @@ MultiArray3DC&& gemmStridedBatched(T alpha, MultiArray3DA const& a, MultiArray3D int M = -1; int N = -1; int K = -1; + + using std::get; if (TA == 'N' and TB == 'N') { - M = std::get<2>(a.sizes()); - N = std::get<1>(b.sizes()); - K = std::get<1>(a.sizes()); - assert(std::get<1>(a.sizes()) == std::get<2>(b.sizes()) and std::get<1>(c.sizes()) == std::get<1>(b.sizes()) and std::get<2>(c.sizes()) == std::get<2>(a.sizes())); + M = get<2>(a.sizes()); + N = get<1>(b.sizes()); + K = get<1>(a.sizes()); + assert(get<1>(a.sizes()) == get<2>(b.sizes()) and get<1>(c.sizes()) == get<1>(b.sizes()) and get<2>(c.sizes()) == get<2>(a.sizes())); } if ((TA == 'T' or TA == 'C') and (TB == 'T' or TB == 'C')) { - M = std::get<1>(a.sizes()); - N = std::get<2>(b.sizes()); - K = std::get<2>(a.sizes()); - assert(std::get<2>(a.sizes()) == std::get<1>(b.sizes()) and std::get<1>(c.sizes()) == std::get<2>(b.sizes()) and std::get<2>(c.sizes()) == std::get<1>(a.sizes())); + M = get<1>(a.sizes()); + N = get<2>(b.sizes()); + K = get<2>(a.sizes()); + assert(get<2>(a.sizes()) == get<1>(b.sizes()) and get<1>(c.sizes()) == get<2>(b.sizes()) and get<2>(c.sizes()) == get<1>(a.sizes())); } if ((TA == 'T' or TA == 'C') and TB == 'N') { - M = std::get<1>(a.sizes()); - N = std::get<1>(b.sizes()); - K = std::get<2>(a.sizes()); - assert(std::get<2>(a.sizes()) == std::get<2>(b.sizes()) and std::get<1>(c.sizes()) == std::get<1>(b.sizes()) and std::get<2>(c.sizes()) == std::get<1>(a.sizes())); + M = get<1>(a.sizes()); + N = get<1>(b.sizes()); + K = get<2>(a.sizes()); + assert(get<2>(a.sizes()) == get<2>(b.sizes()) and get<1>(c.sizes()) == get<1>(b.sizes()) and get<2>(c.sizes()) == get<1>(a.sizes())); } if (TA == 'N' and (TB == 'T' or TB == 'C')) { - M = std::get<2>(a.sizes()); - N = std::get<2>(b.sizes()); - K = std::get<1>(a.sizes()); - assert(std::get<1>(a.sizes()) == std::get<1>(b.sizes()) and std::get<1>(c.sizes()) == std::get<2>(b.sizes()) and std::get<2>(c.sizes()) == std::get<2>(a.sizes())); + M = get<2>(a.sizes()); + N = get<2>(b.sizes()); + K = get<1>(a.sizes()); + assert(get<1>(a.sizes()) == get<1>(b.sizes()) and get<1>(c.sizes()) == get<2>(b.sizes()) and get<2>(c.sizes()) == get<2>(a.sizes())); } gemmStridedBatched(TA, TB, M, N, K, alpha, pointer_dispatch(a.origin()), a.stride(1), a.stride(), pointer_dispatch(b.origin()), b.stride(1), b.stride(), beta, pointer_dispatch(c.origin()), @@ -353,27 +363,29 @@ MultiArray2DC&& geam(T alpha, MultiArray2DA const& a, T beta, MultiArray2DB cons assert(c.stride(1) == 1); assert((TA == 'N') || (TA == 'T') || (TA == 'C')); assert((TB == 'N') || (TB == 'T') || (TB == 'C')); + + using std::get; if (TA == 'N' and TB == 'N') { - assert(a.size() == c.size() and std::get<1>(a.sizes()) == std::get<1>(c.sizes())); - assert(b.size() == c.size() and std::get<1>(b.sizes()) == std::get<1>(c.sizes())); + assert(a.size() == c.size() and get<1>(a.sizes()) == get<1>(c.sizes())); + assert(b.size() == c.size() and get<1>(b.sizes()) == get<1>(c.sizes())); } if ((TA == 'T' or TA == 'C') and (TB == 'T' or TB == 'C')) { - assert(std::get<1>(a.sizes()) == c.size() and a.size() == std::get<1>(c.sizes())); - assert(std::get<1>(b.sizes()) == c.size() and b.size() == std::get<1>(c.sizes())); + assert(get<1>(a.sizes()) == c.size() and a.size() == get<1>(c.sizes())); + assert(get<1>(b.sizes()) == c.size() and b.size() == get<1>(c.sizes())); } if ((TA == 'T' or TA == 'C') and TB == 'N') { - assert(std::get<1>(a.sizes()) == c.size() and a.size() == std::get<1>(c.sizes())); - assert(b.size() == c.size() and std::get<1>(b.sizes()) == std::get<1>(c.sizes())); + assert(get<1>(a.sizes()) == c.size() and a.size() == get<1>(c.sizes())); + assert(b.size() == c.size() and get<1>(b.sizes()) == get<1>(c.sizes())); } if (TA == 'N' and (TB == 'T' or TB == 'C')) { - assert(a.size() == c.size() and std::get<1>(a.sizes()) == std::get<1>(c.sizes())); - assert(std::get<1>(b.sizes()) == c.size() and b.size() == std::get<1>(c.sizes())); + assert(a.size() == c.size() and get<1>(a.sizes()) == get<1>(c.sizes())); + assert(get<1>(b.sizes()) == c.size() and b.size() == get<1>(c.sizes())); } - geam(TA, TB, std::get<1>(c.sizes()), c.size(), alpha, pointer_dispatch(a.origin()), a.stride(), beta, + geam(TA, TB, get<1>(c.sizes()), c.size(), alpha, pointer_dispatch(a.origin()), a.stride(), beta, pointer_dispatch(b.origin()), b.stride(), pointer_dispatch(c.origin()), c.stride()); return std::forward(c); } @@ -389,15 +401,18 @@ MultiArray2DC&& geam(T alpha, MultiArray2DA const& a, MultiArray2DC&& c) assert(a.stride(1) == 1); assert(c.stride(1) == 1); assert((TA == 'N') || (TA == 'T') || (TA == 'C')); + + using std::get; if (TA == 'N') { - assert(a.size() == c.size() and std::get<1>(a.sizes()) == std::get<1>(c.sizes())); + assert(a.size() == c.size() and get<1>(a.sizes()) == get<1>(c.sizes())); } if ((TA == 'T' or TA == 'C')) { - assert(std::get<1>(a.sizes()) == c.size() and a.size() == std::get<1>(c.sizes())); + assert(get<1>(a.sizes()) == c.size() and a.size() == get<1>(c.sizes())); } - geam(TA, TA, std::get<1>(c.sizes()), c.size(), alpha, pointer_dispatch(a.origin()), a.stride(), T(0), + using std::get; + geam(TA, TA, get<1>(c.sizes()), c.size(), alpha, pointer_dispatch(a.origin()), a.stride(), T(0), pointer_dispatch(a.origin()), a.stride(), pointer_dispatch(c.origin()), c.stride()); return std::forward(c); } diff --git a/src/AFQMC/Numerics/ma_blas_extensions.hpp b/src/AFQMC/Numerics/ma_blas_extensions.hpp index 83675608aa..b88c00d87c 100644 --- a/src/AFQMC/Numerics/ma_blas_extensions.hpp +++ b/src/AFQMC/Numerics/ma_blas_extensions.hpp @@ -51,10 +51,11 @@ template::type::dimensionality == 1>::type> void adotpby(T const alpha, MultiArray2Dx const& x, MultiArray2Dy const& y, Q const beta, MultiArray1D res) { - if (std::get<0>(x.sizes()) != std::get<0>(y.sizes()) || std::get<0>(x.sizes()) != std::get<0>(res.sizes()) || std::get<1>(x.sizes()) != std::get<1>(y.sizes()) || x.stride(1) != 1 || + using std::get; + if (get<0>(x.sizes()) != get<0>(y.sizes()) || get<0>(x.sizes()) != get<0>(res.sizes()) || get<1>(x.sizes()) != get<1>(y.sizes()) || x.stride(1) != 1 || y.stride(1) != 1) throw std::runtime_error(" Error: Inconsistent matrix dimensions in adotpby(2D).\n"); - strided_adotpby(std::get<0>(x.sizes()), std::get<1>(x.sizes()), alpha, pointer_dispatch(x.origin()), x.stride(0), pointer_dispatch(y.origin()), + strided_adotpby(get<0>(x.sizes()), get<1>(x.sizes()), alpha, pointer_dispatch(x.origin()), x.stride(0), pointer_dispatch(y.origin()), y.stride(0), beta, to_address(res.origin()), res.stride(0)); } @@ -78,11 +79,12 @@ template MultiArray2DB&& axty(T const alpha, MultiArray2DA const& A, MultiArray2DB&& B) { + using std::get; assert(A.num_elements() == B.num_elements()); assert(A.stride(1) == 1); - assert(A.stride(0) == std::get<1>(A.sizes())); + assert(A.stride(0) == get<1>(A.sizes())); assert(B.stride(1) == 1); - assert(B.stride(0) == std::get<1>(B.sizes())); + assert(B.stride(0) == get<1>(B.sizes())); axty(A.num_elements(), alpha, pointer_dispatch(A.origin()), 1, pointer_dispatch(B.origin()), 1); return B; } @@ -98,11 +100,12 @@ template::type::dimensionality == 2)>> MultiArray2DB&& acAxpbB(T const alpha, MultiArray2DA const& A, MultiArray1D const& x, T const beta, MultiArray2DB&& B) { + using std::get; assert(A.num_elements() == B.num_elements()); - assert(std::get<0>(A.sizes()) == std::get<0>(B.sizes())); - assert(std::get<1>(A.sizes()) == std::get<1>(B.sizes())); - assert(std::get<1>(A.sizes()) == std::get<0>(x.sizes())); - acAxpbB(std::get<1>(A.sizes()), std::get<0>(A.sizes()), alpha, pointer_dispatch(A.origin()), A.stride(0), pointer_dispatch(x.origin()), + assert(get<0>(A.sizes()) == get<0>(B.sizes())); + assert(get<1>(A.sizes()) == get<1>(B.sizes())); + assert(get<1>(A.sizes()) == get<0>(x.sizes())); + acAxpbB(get<1>(A.sizes()), get<0>(A.sizes()), alpha, pointer_dispatch(A.origin()), A.stride(0), pointer_dispatch(x.origin()), x.stride(0), beta, pointer_dispatch(B.origin()), B.stride(0)); return B; } @@ -114,8 +117,9 @@ template::type::dimensionality == 1>::type> MultiArray1Dy&& adiagApy(T const alpha, MultiArray2DA const& A, MultiArray1Dy&& y) { - assert(std::get<0>(A.sizes()) == std::get<1>(A.sizes())); - assert(std::get<0>(A.sizes()) == y.size()); + using std::get; + assert(get<0>(A.sizes()) == get<1>(A.sizes())); + assert(get<0>(A.sizes()) == y.size()); adiagApy(y.size(), alpha, pointer_dispatch(A.origin()), A.stride(0), pointer_dispatch(y.origin()), y.stride(0)); return y; } @@ -133,8 +137,9 @@ template(A.sizes()), std::get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0)); + return sum(get<1>(A.sizes()), get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0)); } template auto sum(MultiArray3D const& A) { + using std::get; // only arrays and array_refs for now - assert(A.stride(0) == std::get<1>(A.sizes()) * std::get<2>(A.sizes())); - assert(A.stride(1) == std::get<2>(A.sizes())); + assert(A.stride(0) == get<1>(A.sizes()) * get<2>(A.sizes())); + assert(A.stride(1) == get<2>(A.sizes())); assert(A.stride(2) == 1); return sum(A.num_elements(), pointer_dispatch(A.origin()), 1); } @@ -157,10 +163,11 @@ template auto sum(MultiArray4D const& A) { + using std::get; // only arrays and array_refs for now - assert(A.stride(0) == std::get<1>(A.sizes()) * std::get<2>(A.sizes()) * std::get<3>(A.sizes())); - assert(A.stride(1) == std::get<2>(A.sizes()) * std::get<3>(A.sizes())); - assert(A.stride(2) == std::get<3>(A.sizes())); + assert(A.stride(0) == get<1>(A.sizes()) * get<2>(A.sizes()) * get<3>(A.sizes())); + assert(A.stride(1) == get<2>(A.sizes()) * get<3>(A.sizes())); + assert(A.stride(2) == get<3>(A.sizes())); assert(A.stride(3) == 1); return sum(A.num_elements(), pointer_dispatch(A.origin()), 1); } @@ -170,7 +177,8 @@ template::type::dimensionality == 1>> MultiArray1D&& setVector(T alpha, MultiArray1D&& a) { - set1D(std::get<0>(a.sizes()), alpha, pointer_dispatch(a.origin()), a.stride(0)); + using std::get; + set1D(get<0>(a.sizes()), alpha, pointer_dispatch(a.origin()), a.stride(0)); return std::forward(a); } @@ -183,7 +191,8 @@ void zero_complex_part(MultiArray1D&& a) template::type::dimensionality == 2>> MultiArray2D&& set_identity(MultiArray2D&& m) { - set_identity(std::get<1>(m.sizes()), std::get<0>(m.sizes()), pointer_dispatch(m.origin()), m.stride(0)); + using std::get; + set_identity(get<1>(m.sizes()), get<0>(m.sizes()), pointer_dispatch(m.origin()), m.stride(0)); return std::forward(m); } @@ -192,7 +201,8 @@ template MultiArray3D&& set_identity(MultiArray3D&& m) { - set_identity_strided(std::get<0>(m.sizes()), m.stride(0), std::get<2>(m.sizes()), std::get<1>(m.sizes()), pointer_dispatch(m.origin()), m.stride(1)); + using std::get; + set_identity_strided(get<0>(m.sizes()), m.stride(0), get<2>(m.sizes()), get<1>(m.sizes()), pointer_dispatch(m.origin()), m.stride(1)); return std::forward(m); } @@ -202,7 +212,8 @@ template(m.sizes()), std::get<1>(m.sizes()), pointer_dispatch(m.origin()), m.stride(0), value); + using std::get; + fill2D(get<0>(m.sizes()), get<1>(m.sizes()), pointer_dispatch(m.origin()), m.stride(0), value); return std::forward(m); } @@ -213,10 +224,11 @@ template::type::dimensionality == 2>> void get_diagonal_strided(MultiArray3D const& B, MultiArray2D&& A) { - if (std::get<0>(A.sizes()) != std::get<0>(B.sizes()) || std::get<1>(A.sizes()) != std::get<1>(B.sizes()) || std::get<1>(A.sizes()) != std::get<2>(B.sizes()) || A.stride(1) != 1 || + using std::get; + if (get<0>(A.sizes()) != get<0>(B.sizes()) || get<1>(A.sizes()) != get<1>(B.sizes()) || get<1>(A.sizes()) != get<2>(B.sizes()) || A.stride(1) != 1 || B.stride(2) != 1) throw std::runtime_error(" Error: Inconsistent matrix dimensions in get_diagonal_strided.\n"); - get_diagonal_strided(std::get<0>(A.sizes()), std::get<1>(A.sizes()), pointer_dispatch(B.origin()), B.stride(1), B.stride(0), + get_diagonal_strided(get<0>(A.sizes()), get<1>(A.sizes()), pointer_dispatch(B.origin()), B.stride(1), B.stride(0), pointer_dispatch(A.origin()), A.stride(0)); } @@ -226,18 +238,21 @@ template> void Matrix2MA(char TA, CSR const& A, MultiArray2D& M) { + using std::get; using Type = typename MultiArray2D::element; using int_type = typename CSR::int_type; + + using std::get; assert(TA == 'N' || TA == 'H' || TA == 'T' || TA == 'Z'); if (TA == 'N' || TA == 'Z') { - if (std::get<0>(M.sizes()) != std::get<0>(A.sizes()) or std::get<1>(M.sizes()) != std::get<1>(A.sizes())) + if (get<0>(M.sizes()) != get<0>(A.sizes()) or get<1>(M.sizes()) != get<1>(A.sizes())) M.reextent({static_cast(A.size(0)), static_cast(A.size(1))}); } else if (TA == 'T' || TA == 'H') { - if (std::get<0>(M.sizes()) != std::get<1>(A.sizes()) or std::get<1>(M.sizes()) != std::get<0>(A.sizes())) - M.reextent({static_cast(std::get<1>(A.sizes())), static_cast(std::get<0>(A.sizes()))}); + if (get<0>(M.sizes()) != get<1>(A.sizes()) or get<1>(M.sizes()) != get<0>(A.sizes())) + M.reextent({static_cast(get<1>(A.sizes())), static_cast(get<0>(A.sizes()))}); } else { @@ -287,12 +302,13 @@ template> void Matrix2MAREF(char TA, CSR const& A, MultiArray2D& M) { + using std::get; using Type = typename MultiArray2D::element; using int_type = typename CSR::int_type; assert(TA == 'N' || TA == 'H' || TA == 'T' || TA == 'Z'); - if ((TA == 'N' || TA == 'Z') && ((std::get<0>(M.sizes()) != std::get<0>(A.sizes())) || (std::get<1>(M.sizes()) != std::get<1>(A.sizes())))) + if ((TA == 'N' || TA == 'Z') && ((get<0>(M.sizes()) != get<0>(A.sizes())) || (get<1>(M.sizes()) != get<1>(A.sizes())))) throw std::runtime_error(" Error: Wrong dimensions in Matrix2MAREF.\n"); - else if ((TA == 'T' || TA == 'H') && ((std::get<0>(M.sizes()) != std::get<1>(A.sizes())) || (std::get<1>(M.sizes()) != std::get<0>(A.sizes())))) + else if ((TA == 'T' || TA == 'H') && ((get<0>(M.sizes()) != get<1>(A.sizes())) || (get<1>(M.sizes()) != get<0>(A.sizes())))) throw std::runtime_error(" Error: Wrong dimensions in Matrix2MAREF.\n"); using std::fill_n; fill_n(M.origin(), M.num_elements(), Type(0)); @@ -340,21 +356,25 @@ template> void Matrix2MA(char TA, CSR const& A, MultiArray2D& M, Vector const& occups) { + using std::get; + using Type = typename MultiArray2D::element; if (occups.size() == 0) throw std::runtime_error(" Error: Empty occupation array in Matrix2MA.\n"); assert(occups.size() <= A.size(0)); int nrows = occups.size(); + + using std::get; assert(TA == 'N' || TA == 'H' || TA == 'T' || TA == 'Z'); if (TA == 'N' || TA == 'Z') { - if (std::get<0>(M.sizes()) != nrows || std::get<1>(M.sizes()) != std::get<1>(A.sizes())) - M.reextent({nrows, static_cast(std::get<1>(A.sizes()))}); + if (get<0>(M.sizes()) != nrows || get<1>(M.sizes()) != get<1>(A.sizes())) + M.reextent({nrows, static_cast(get<1>(A.sizes()))}); } else if (TA == 'T' || TA == 'H') { - if (std::get<1>(M.sizes()) != nrows || std::get<0>(M.sizes()) != std::get<1>(A.sizes())) - M.reextent({static_cast(std::get<1>(A.sizes())), nrows}); + if (get<1>(M.sizes()) != nrows || get<0>(M.sizes()) != get<1>(A.sizes())) + M.reextent({static_cast(get<1>(A.sizes())), nrows}); } else throw std::runtime_error(" Error: Unknown operation in Matrix2MA.\n"); @@ -414,18 +434,20 @@ template void Matrix2MA(char TA, MA const& A, MultiArray2D& M) { + using std::get; + using Type1 = typename std::decay::type::element; using Type2 = typename MultiArray2D::element; assert(TA == 'N' || TA == 'H' || TA == 'T' || TA == 'Z'); if (TA == 'N' || TA == 'Z') { - if (std::get<0>(M.sizes()) != std::get<0>(A.sizes()) or std::get<1>(M.sizes()) != std::get<1>(A.sizes())) - M.reextent({std::get<0>(A.sizes()), std::get<1>(A.sizes())}); + if (get<0>(M.sizes()) != get<0>(A.sizes()) or get<1>(M.sizes()) != get<1>(A.sizes())) + M.reextent({get<0>(A.sizes()), get<1>(A.sizes())}); } else if (TA == 'T' || TA == 'H') { - if (std::get<0>(M.sizes()) != std::get<1>(A.sizes()) or std::get<1>(M.sizes()) != std::get<0>(A.sizes())) - M.reextent({std::get<1>(A.sizes()), std::get<0>(A.sizes())}); + if (get<0>(M.sizes()) != get<1>(A.sizes()) or get<1>(M.sizes()) != get<0>(A.sizes())) + M.reextent({get<1>(A.sizes()), get<0>(A.sizes())}); } else { @@ -439,34 +461,34 @@ void Matrix2MA(char TA, MA const& A, MultiArray2D& M) TA = 'C'; if (TA == 'Z') { - for (int i = 0; i < std::get<0>(M.sizes()); i++) - for (int j = 0; j < std::get<1>(M.sizes()); j++) + for (int i = 0; i < get<0>(M.sizes()); i++) + for (int j = 0; j < get<1>(M.sizes()); j++) M[i][j] = ma::conj(A[i][j]); } else if (not std::is_same::value) { if (TA == 'N') { - for (int i = 0; i < std::get<0>(M.sizes()); i++) - for (int j = 0; j < std::get<1>(M.sizes()); j++) + for (int i = 0; i < get<0>(M.sizes()); i++) + for (int j = 0; j < get<1>(M.sizes()); j++) M[i][j] = A[i][j]; } else if (TA == 'T') { - for (int i = 0; i < std::get<0>(M.sizes()); i++) - for (int j = 0; j < std::get<1>(M.sizes()); j++) + for (int i = 0; i < get<0>(M.sizes()); i++) + for (int j = 0; j < get<1>(M.sizes()); j++) M[i][j] = A[j][i]; } else if (TA == 'C') { - for (int i = 0; i < std::get<0>(M.sizes()); i++) - for (int j = 0; j < std::get<1>(M.sizes()); j++) + for (int i = 0; i < get<0>(M.sizes()); i++) + for (int j = 0; j < get<1>(M.sizes()); j++) M[i][j] = ma::conj(A[j][i]); } } else { - geam(TA, TA, std::get<1>(M.sizes()), std::get<0>(M.sizes()), Type2(1.0), pointer_dispatch(A.origin()), A.stride(0), Type2(0.0), + geam(TA, TA, get<1>(M.sizes()), get<0>(M.sizes()), Type2(1.0), pointer_dispatch(A.origin()), A.stride(0), Type2(0.0), pointer_dispatch(A.origin()), A.stride(0), pointer_dispatch(M.origin()), M.stride(0)); } } @@ -481,14 +503,16 @@ void Matrix2MAREF(char TA, MA const& A, MultiArray2D& M) using Type1 = typename std::decay::type::element; using Type2 = typename MultiArray2D::element; assert(TA == 'N' || TA == 'H' || TA == 'T' || TA == 'Z'); + + using std::get; if (TA == 'N' || TA == 'Z') { - if (std::get<0>(M.sizes()) != std::get<0>(A.sizes()) or std::get<1>(M.sizes()) != std::get<1>(A.sizes())) + if (get<0>(M.sizes()) != get<0>(A.sizes()) or get<1>(M.sizes()) != get<1>(A.sizes())) throw std::runtime_error(" Error: Wrong dimensions in Matrix2MAREF.\n"); } else if (TA == 'T' || TA == 'H') { - if (std::get<0>(M.sizes()) != std::get<1>(A.sizes()) or std::get<1>(M.sizes()) != std::get<0>(A.sizes())) + if (get<0>(M.sizes()) != get<1>(A.sizes()) or get<1>(M.sizes()) != get<0>(A.sizes())) throw std::runtime_error(" Error: Wrong dimensions in Matrix2MAREF.\n"); } else @@ -504,34 +528,34 @@ void Matrix2MAREF(char TA, MA const& A, MultiArray2D& M) if (TA == 'Z') { // bad i gpu's - for (int i = 0; i < std::get<0>(M.sizes()); i++) - for (int j = 0; j < std::get<1>(M.sizes()); j++) + for (int i = 0; i < get<0>(M.sizes()); i++) + for (int j = 0; j < get<1>(M.sizes()); j++) M[i][j] = ma::conj(A[i][j]); } else if (not std::is_same::value) { if (TA == 'N') { - for (int i = 0; i < std::get<0>(M.sizes()); i++) - for (int j = 0; j < std::get<1>(M.sizes()); j++) + for (int i = 0; i < get<0>(M.sizes()); i++) + for (int j = 0; j < get<1>(M.sizes()); j++) M[i][j] = A[i][j]; } else if (TA == 'T') { - for (int i = 0; i < std::get<0>(M.sizes()); i++) - for (int j = 0; j < std::get<1>(M.sizes()); j++) + for (int i = 0; i < get<0>(M.sizes()); i++) + for (int j = 0; j < get<1>(M.sizes()); j++) M[i][j] = A[j][i]; } else if (TA == 'C') { - for (int i = 0; i < std::get<0>(M.sizes()); i++) - for (int j = 0; j < std::get<1>(M.sizes()); j++) + for (int i = 0; i < get<0>(M.sizes()); i++) + for (int j = 0; j < get<1>(M.sizes()); j++) M[i][j] = ma::conj(A[j][i]); } } else { - geam(TA, TA, std::get<1>(M.sizes()), std::get<0>(M.sizes()), Type2(1.0), pointer_dispatch(A.origin()), A.stride(0), Type2(0.0), + geam(TA, TA, get<1>(M.sizes()), get<0>(M.sizes()), Type2(1.0), pointer_dispatch(A.origin()), A.stride(0), Type2(0.0), pointer_dispatch(A.origin()), A.stride(0), pointer_dispatch(M.origin()), M.stride(0)); } } diff --git a/src/AFQMC/Numerics/ma_lapack.hpp b/src/AFQMC/Numerics/ma_lapack.hpp index fe93adc32b..8ee74043bd 100644 --- a/src/AFQMC/Numerics/ma_lapack.hpp +++ b/src/AFQMC/Numerics/ma_lapack.hpp @@ -31,22 +31,24 @@ int getrf_optimal_workspace_size(MultiArray2D&& A) assert(A.stride(0) > 0); assert(A.stride(1) == 1); + using std::get; int res; - getrf_bufferSize(std::get<1>(A.sizes()), std::get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0), res); + getrf_bufferSize(get<1>(A.sizes()), get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0), res); return res; } template MultiArray2D&& getrf(MultiArray2D&& m, Array1D& pivot, Buffer&& WORK) { - assert(m.stride(0) >= std::max(std::size_t(1), std::size_t(std::get<1>(m.sizes())))); + using std::get; + assert(m.stride(0) >= std::max(std::size_t(1), std::size_t(get<1>(m.sizes())))); assert(m.stride(1) == 1); - assert(pivot.size() >= std::min(std::get<1>(m.sizes()), std::get<0>(m.sizes()) + 1)); + assert(pivot.size() >= std::min(get<1>(m.sizes()), get<0>(m.sizes()) + 1)); int status = -1; - getrf(std::get<1>(m.sizes()), std::get<0>(m.sizes()), pointer_dispatch(m.origin()), m.stride(0), pointer_dispatch(pivot.data()), status, + getrf(get<1>(m.sizes()), get<0>(m.sizes()), pointer_dispatch(m.origin()), m.stride(0), pointer_dispatch(pivot.data()), status, pointer_dispatch(WORK.data())); - //assert(status==0); + // assert(status==0); return std::forward(m); } @@ -54,7 +56,9 @@ template int getri_optimal_workspace_size(MultiArray2D&& A) { assert(A.stride(1) == 1); - assert(std::get<0>(A.sizes()) ==std::get<1>(A.sizes())); + + using std::get; + assert(get<0>(A.sizes()) == get<1>(A.sizes())); int lwork = -1; getri_bufferSize(A.size(), pointer_dispatch(A.origin()), A.stride(), lwork); return lwork; @@ -63,7 +67,7 @@ int getri_optimal_workspace_size(MultiArray2D&& A) template MultiArray2D&& getri(MultiArray2D&& A, MultiArray1D const& IPIV, Buffer&& WORK) { - // assert(A.stride(0) > std::max(std::size_t(1), A.size(1))); + // assert(A.stride(0) > std::max(std::size_t(1), A.size(1))); assert(A.stride(1) == 1); assert(IPIV.size() >= size_t(A.size())); assert(WORK.size() >= std::max(std::size_t(1), size_t(A.size()))); @@ -81,23 +85,25 @@ int geqrf_optimal_workspace_size(MultiArray2D&& A) assert(A.stride(0) > 0); assert(A.stride(1) == 1); + using std::get; int res; - geqrf_bufferSize(std::get<1>(A.sizes()), std::get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0), res); + geqrf_bufferSize(get<1>(A.sizes()), get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0), res); return res; } template MultiArray2D&& geqrf(MultiArray2D&& A, Array1D&& TAU, Buffer&& WORK) { + using std::get; // why was this here??? //assert(A.stride(0) > std::max(std::size_t(1), A.size(0))); assert(A.stride(1) == 1); assert(TAU.stride(0) == 1); - assert(TAU.size() >= std::max(std::size_t(1), size_t(std::min(std::get<0>(A.sizes()), std::get<1>(A.sizes()))))); + assert(TAU.size() >= std::max(std::size_t(1), size_t(std::min(get<0>(A.sizes()), get<1>(A.sizes()))))); assert(WORK.size() >= std::max(std::size_t(1), size_t(A.size()))); int status = -1; - geqrf(std::get<1>(A.sizes()), std::get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0), pointer_dispatch(TAU.origin()), + geqrf(get<1>(A.sizes()), get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0), pointer_dispatch(TAU.origin()), pointer_dispatch(WORK.data()), WORK.size(), status); assert(status == 0); return std::forward(A); @@ -109,22 +115,25 @@ int gelqf_optimal_workspace_size(MultiArray2D&& A) assert(A.stride(0) > 0); assert(A.stride(1) == 1); + using std::get; int res; - gelqf_bufferSize(std::get<1>(A.sizes()), std::get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0), res); + gelqf_bufferSize(get<1>(A.sizes()), get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0), res); return res; } template MultiArray2D&& gelqf(MultiArray2D&& A, Array1D&& TAU, Buffer&& WORK) { + using std::get; assert(A.stride(1) > 0); assert(A.stride(1) == 1); assert(TAU.stride(0) == 1); - assert(TAU.size() >= std::max(std::size_t(1), size_t(std::min(std::get<0>(A.sizes()), std::get<1>(A.sizes()))))); - assert(WORK.size() >= std::max(std::size_t(1), size_t(std::get<1>(A.sizes())))); + assert(TAU.size() >= std::max(std::size_t(1), size_t(std::min(get<0>(A.sizes()), get<1>(A.sizes()))))); + assert(WORK.size() >= std::max(std::size_t(1), size_t(get<1>(A.sizes())))); + using std::get; int status = -1; - gelqf(std::get<1>(A.sizes()), std::get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0), pointer_dispatch(TAU.data()), + gelqf(get<1>(A.sizes()), get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0), pointer_dispatch(TAU.data()), pointer_dispatch(WORK.data()), WORK.size(), status); assert(status == 0); return std::forward(A); @@ -137,8 +146,9 @@ int gqr_optimal_workspace_size(MultiArray2D&& A) assert(A.stride(0) > 0); assert(A.stride(1) == 1); + using std::get; int res; - gqr_bufferSize(std::get<1>(A.sizes()), std::get<0>(A.sizes()), std::max(std::size_t(1), size_t(std::min(std::get<0>(A.sizes()), std::get<1>(A.sizes())))), + gqr_bufferSize(get<1>(A.sizes()), get<0>(A.sizes()), std::max(std::size_t(1), size_t(std::min(get<0>(A.sizes()), get<1>(A.sizes())))), pointer_dispatch(A.origin()), A.stride(0), res); return res; } @@ -146,13 +156,14 @@ int gqr_optimal_workspace_size(MultiArray2D&& A) template MultiArray2D&& gqr(MultiArray2D&& A, Array1D&& TAU, Buffer&& WORK) { + using std::get; assert(A.stride(1) == 1); assert(TAU.stride(0) == 1); - assert(TAU.size() >= std::max(std::size_t(1), size_t(std::min(std::get<0>(A.sizes()), std::get<1>(A.sizes()))))); + assert(TAU.size() >= std::max(std::size_t(1), size_t(std::min(get<0>(A.sizes()), get<1>(A.sizes()))))); assert(WORK.size() >= std::max(std::size_t(1), size_t(A.size()))); int status = -1; - gqr(std::get<1>(A.sizes()), std::get<0>(A.sizes()), std::max(std::size_t(1), size_t(std::min(std::get<0>(A.sizes()), std::get<1>(A.sizes())))), + gqr(get<1>(A.sizes()), get<0>(A.sizes()), std::max(std::size_t(1), size_t(std::min(get<0>(A.sizes()), get<1>(A.sizes())))), pointer_dispatch(A.origin()), A.stride(0), pointer_dispatch(TAU.origin()), pointer_dispatch(WORK.data()), WORK.size(), status); assert(status == 0); @@ -165,8 +176,9 @@ int glq_optimal_workspace_size(MultiArray2D&& A) assert(A.stride(0) > 0); assert(A.stride(1) == 1); + using std::get; int res; - glq_bufferSize(std::get<1>(A.sizes()), std::get<0>(A.sizes()), std::max(std::size_t(1), size_t(std::min(std::get<0>(A.sizes()), std::get<1>(A.sizes())))), + glq_bufferSize(get<1>(A.sizes()), get<0>(A.sizes()), std::max(std::size_t(1), size_t(std::min(get<0>(A.sizes()), get<1>(A.sizes())))), pointer_dispatch(A.origin()), A.stride(0), res); return res; } @@ -174,13 +186,15 @@ int glq_optimal_workspace_size(MultiArray2D&& A) template MultiArray2D&& glq(MultiArray2D&& A, Array1D&& TAU, Buffer&& WORK) { + using std::get; + assert(A.stride(1) == 1); assert(TAU.stride(0) == 1); - assert(TAU.size() >= std::max(std::size_t(1), size_t(std::min(std::get<0>(A.sizes()), std::get<1>(A.sizes()))))); - assert(WORK.size() >= std::max(std::size_t(1), size_t(std::get<1>(A.sizes())))); + assert(TAU.size() >= std::max(std::size_t(1), size_t(std::min(get<0>(A.sizes()), get<1>(A.sizes()))))); + assert(WORK.size() >= std::max(std::size_t(1), size_t(get<1>(A.sizes())))); int status = -1; - glq(std::get<1>(A.sizes()), std::get<0>(A.sizes()), std::max(std::size_t(1), size_t(std::min(std::get<0>(A.sizes()), std::get<1>(A.sizes())))), + glq(get<1>(A.sizes()), get<0>(A.sizes()), std::max(std::size_t(1), size_t(std::min(get<0>(A.sizes()), get<1>(A.sizes())))), pointer_dispatch(A.origin()), A.stride(0), pointer_dispatch(TAU.data()), pointer_dispatch(WORK.data()), WORK.size(), status); assert(status == 0); @@ -203,8 +217,9 @@ int gesvd_optimal_workspace_size(MultiArray2D&& A) assert(A.stride(0) > 0); assert(A.stride(1) == 1); + using std::get; int res; - gesvd_bufferSize(std::get<1>(A.sizes()), std::get<0>(A.sizes()), pointer_dispatch(A.origin()), res); + gesvd_bufferSize(get<1>(A.sizes()), get<0>(A.sizes()), pointer_dispatch(A.origin()), res); return res; } @@ -221,11 +236,13 @@ MultiArray2D&& gesvd(char jobU, assert(A.stride(1) > 0); assert(A.stride(1) == 1); + using std::get; + // in C: A = U * S * VT // in F: At = (U * S * VT)t = VTt * S * Ut // so I need to switch U <--> VT when calling fortran interface int status = -1; - gesvd(jobVT, jobU, std::get<1>(A.sizes()), std::get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0), pointer_dispatch(S.origin()), + gesvd(jobVT, jobU, get<1>(A.sizes()), get<0>(A.sizes()), pointer_dispatch(A.origin()), A.stride(0), pointer_dispatch(S.origin()), pointer_dispatch(VT.origin()), VT.stride(0), // !!! pointer_dispatch(U.origin()), U.stride(0), // !!! pointer_dispatch(WORK.data()), WORK.size(), pointer_dispatch(RWORK.origin()), status); @@ -243,7 +260,9 @@ std::pair symEig(MultiArray2D const& A) using Type = typename MultiArray2D::element; using RealType = typename qmcplusplus::afqmc::remove_complex::value_type; using extensions = typename boost::multi::layout_t<1u>::extensions_type; - assert(A.size() == std::get<1>(A.sizes())); + + using std::get; + assert(A.size() == get<1>(A.sizes())); assert(A.stride(1) == 1); assert(A.size() > 0); int N = A.size(); @@ -316,10 +335,12 @@ std::pair symEigSelect(MultiArray2DA& A, int neig) static_assert(std::is_same::value, "Wrong types."); using RealType = typename qmcplusplus::afqmc::remove_complex::value_type; using extensions = typename boost::multi::layout_t<1u>::extensions_type; - assert(std::get<0>(A.sizes()) == std::get<1>(A.sizes())); + + using std::get; + assert(get<0>(A.sizes()) == get<1>(A.sizes())); assert(A.stride(1) == 1); - assert(std::get<0>(A.sizes()) > 0); - int N = std::get<0>(A.sizes()); + assert(get<0>(A.sizes()) > 0); + int N = get<0>(A.sizes()); int LDA = A.stride(0); MultiArray1D eigVal(extensions{neig}); @@ -394,14 +415,16 @@ std::pair genEigSelect(MultiArray2DA& A, MultiArray2 static_assert(std::is_same::value, "Wrong types."); using RealType = typename qmcplusplus::afqmc::remove_complex::value_type; using extensions = typename boost::multi::layout_t<1u>::extensions_type; - assert(std::get<0>(A.sizes()) == std::get<1>(A.sizes())); - assert(std::get<0>(A.sizes()) == std::get<0>(S.sizes())); - assert(std::get<0>(S.sizes()) == std::get<1>(S.sizes())); + + using std::get; + assert(get<0>(A.sizes()) == get<1>(A.sizes())); + assert(get<0>(A.sizes()) == get<0>(S.sizes())); + assert(get<0>(S.sizes()) == get<1>(S.sizes())); assert(A.stride(1) == 1); - assert(std::get<0>(A.sizes()) > 0); + assert(get<0>(A.sizes()) > 0); assert(S.stride(1) == 1); - assert(std::get<0>(S.sizes()) > 0); - int N = std::get<0>(A.sizes()); + assert(get<0>(S.sizes()) > 0); + int N = get<0>(A.sizes()); int LDA = A.stride(0); int LDS = S.stride(0); diff --git a/src/AFQMC/Numerics/ma_operations.hpp b/src/AFQMC/Numerics/ma_operations.hpp index 5e4c8c47f6..a0f2a83a30 100644 --- a/src/AFQMC/Numerics/ma_operations.hpp +++ b/src/AFQMC/Numerics/ma_operations.hpp @@ -42,11 +42,13 @@ using qmcplusplus::afqmc::to_address; template 1)>::type> bool is_hermitian(MultiArray2D const& A) { + using std::get; + using ma::conj; - if (A.size() != std::get<1>(A.sizes())) + if (A.size() != get<1>(A.sizes())) return false; - for (int i = 0; i != std::get<0>(A.sizes()); ++i) - for (int j = i + 1; j != std::get<1>(A.sizes()); ++j) + for (int i = 0; i != get<0>(A.sizes()); ++i) + for (int j = i + 1; j != get<1>(A.sizes()); ++j) if (std::abs(A[i][j] - ma::conj(A[j][i])) > 1e-12) return false; return true; @@ -256,20 +258,23 @@ MultiArray2DC&& product(T alpha, SparseMatrixA const& A, MultiArray2DB const& B, assert(op_tag::value == 'N'); assert(arg(B).stride(1) == 1); assert(std::forward(C).stride(1) == 1); + + using std::get; if (op_tag::value == 'N') { assert(arg(A).size() == std::forward(C).size()); assert( arg(A).size(1) == arg(B).size() ); - assert( std::get<1>(arg(B).sizes()) == std::get<1>(std::forward(C).sizes()) ); + + assert( get<1>(arg(B).sizes()) == get<1>(std::forward(C).sizes()) ); } else { assert(arg(A).size() == arg(B).size()); - assert(std::get<1>(arg(A).sizes()) == std::forward(C).size()); - assert(std::get<1>(arg(B).sizes()) == std::get<1>(std::forward(C).sizes())); + assert(get<1>(arg(A).sizes()) == std::forward(C).size()); + assert(get<1>(arg(B).sizes()) == get<1>(std::forward(C).sizes())); } - csrmm(op_tag::value, arg(A).size(), std::get<1>(arg(B).sizes()), std::get<1>(arg(A).sizes()), elementA(alpha), "GxxCxx", + csrmm(op_tag::value, arg(A).size(), get<1>(arg(B).sizes()), get<1>(arg(A).sizes()), elementA(alpha), "GxxCxx", pointer_dispatch(arg(A).non_zero_values_data()), pointer_dispatch(arg(A).non_zero_indices2_data()), pointer_dispatch(arg(A).pointers_begin()), pointer_dispatch(arg(A).pointers_end()), pointer_dispatch(arg(B).origin()), arg(B).stride(), elementA(beta), pointer_dispatch(C.origin()), C.stride()); @@ -336,13 +341,14 @@ void BatchedProduct(char TA, using pointerC = decltype(pointer_dispatch((*C[0]).origin())); using element = typename pointedType::element; - int M = std::get<1>((*C[0]).sizes()); + using std::get; + int M = get<1>((*C[0]).sizes()); int N = (*C[0]).size(); int K; if (TB == 'N') K = (*B[0]).size(); else - K = std::get<1>((*B[0]).sizes()); + K = get<1>((*B[0]).sizes()); int lda = (*A[0]).stride(); int ldb = (*B[0]).stride(); int ldc = (*C[0]).stride(); @@ -357,27 +363,27 @@ void BatchedProduct(char TA, assert(lda == (*A[i]).stride()); assert(ldb == (*B[i]).stride()); assert(ldc == (*C[i]).stride()); - assert(M == std::get<1>((*C[i]).sizes())); + assert(M == get<1>((*C[i]).sizes())); assert(N == (*C[i]).size()); if (TB == 'N') { assert(K == (*B[i]).size()); - assert(M == std::get<1>((*B[i]).sizes())); + assert(M == get<1>((*B[i]).sizes())); } else { - assert(K == std::get<1>((*B[i]).sizes())); + assert(K == get<1>((*B[i]).sizes())); assert(M == (*B[i]).size()); } if (TA == 'N') { - assert(K == std::get<1>((*A[i]).sizes())); + assert(K == get<1>((*A[i]).sizes())); assert(N == (*A[i]).size()); } else { assert(K == (*A[i]).size()); - assert(N == std::get<1>((*A[i]).sizes())); + assert(N == get<1>((*A[i]).sizes())); } Ai.emplace_back(pointer_dispatch((*A[i]).origin())); Bi.emplace_back(pointer_dispatch((*B[i]).origin())); @@ -426,9 +432,11 @@ void BatchedProduct(char TA, } */ + using std::get; + for (int i = 0; i < nbatch; i++) { - csrmm(TA, (*A[i]).size(), std::get<1>((*B[i]).sizes()), std::get<1>((*A[i]).sizes()), elementA(alpha), "GxxCxx", + csrmm(TA, (*A[i]).size(), get<1>((*B[i]).sizes()), get<1>((*A[i]).sizes()), elementA(alpha), "GxxCxx", pointer_dispatch((*A[i]).non_zero_values_data()), pointer_dispatch((*A[i]).non_zero_indices2_data()), pointer_dispatch((*A[i]).pointers_begin()), pointer_dispatch((*A[i]).pointers_end()), pointer_dispatch((*B[i]).origin()), (*B[i]).stride(), elementA(beta), pointer_dispatch((*C[i]).origin()), @@ -568,7 +576,7 @@ auto herm(MA2D&& arg) -> decltype(hermitian(std::forward(arg))) } //template auto norm(MA2D&& arg) //->decltype(normal(std::forward(arg))){ -// return normal(std::forward(arg)); +// return normal(std::forward(arg)); //} template @@ -592,8 +600,10 @@ T invert(MultiArray2D&& m, T LogOverlapFactor) getrf(std::forward(m), pivot, WORK); T detvalue = determinant_from_getrf(m.size(), pointer_dispatch(m.origin()), m.stride(), pointer_dispatch(pivot.data()), LogOverlapFactor); + using std::get; + if (std::abs(detvalue) == 0.0) - fill2D(m.size(), std::get<1>(m.sizes()), pointer_dispatch(m.origin()), m.stride(), element(0.0)); + fill2D(m.size(), get<1>(m.sizes()), pointer_dispatch(m.origin()), m.stride(), element(0.0)); else getri(std::forward(m), pivot, WORK); return detvalue; @@ -602,7 +612,8 @@ T invert(MultiArray2D&& m, T LogOverlapFactor) template T invert(MultiArray2D&& m, MultiArray1D&& pivot, Buffer&& WORK, T LogOverlapFactor) { - assert(m.size() == std::get<1>(m.sizes())); + using std::get; + assert(m.size() == get<1>(m.sizes())); assert(pivot.size() >= m.size() + 1); using element = typename std::decay::type::element; using qmcplusplus::afqmc::fill2D; @@ -610,8 +621,10 @@ T invert(MultiArray2D&& m, MultiArray1D&& pivot, Buffer&& WORK, T LogOverlapFact getrf(std::forward(m), pivot, WORK); T detvalue = determinant_from_getrf(m.size(), pointer_dispatch(m.origin()), m.stride(), pointer_dispatch(pivot.data()), LogOverlapFactor); + + using std::get; if (std::abs(detvalue) == 0.0) - fill2D(m.size(), std::get<1>(m.sizes()), pointer_dispatch(m.origin()), m.stride(), element(0.0)); + fill2D(m.size(), get<1>(m.sizes()), pointer_dispatch(m.origin()), m.stride(), element(0.0)); else getri(std::forward(m), pivot, WORK); return detvalue; @@ -646,7 +659,8 @@ void invert_withSVD(MultiArray2D&& m, MultiArray1DS&& S, MultiArray2DU&& U, Mult template T determinant(MultiArray2D&& m, MultiArray1D&& pivot, Buffer&& WORK, T LogOverlapFactor) { - assert(m.size() == std::get<1>(m.sizes())); + using std::get; + assert(m.size() == get<1>(m.sizes())); assert(pivot.size() >= m.size()); getrf(std::forward(m), std::forward(pivot), WORK); @@ -662,7 +676,9 @@ MultiArray2D exp(MultiArray2D const& A, bool printeV = false) using TVec = boost::multi::array; using TMat = boost::multi::array; using eigSys = std::pair; - assert(A.size() == std::get<1>(A.sizes())); + + using std::get; + assert(A.size() == get<1>(A.sizes())); typename MultiArray2D::size_type N = A.size(); MultiArray2D ExpA({N, N}); @@ -745,7 +761,7 @@ int main() std::vector m = { 9., 24., 30., 4., 10., 12., 14., 16., 36. //, - // 9., 6., 1. + // 9., 6., 1. }; boost::multi::array_ref M(m.data(), {3, 3}); assert(M.num_elements() == m.size()); diff --git a/src/AFQMC/Numerics/tests/test_batched_operations.cpp b/src/AFQMC/Numerics/tests/test_batched_operations.cpp index 875330bfe2..8811a0f19f 100644 --- a/src/AFQMC/Numerics/tests/test_batched_operations.cpp +++ b/src/AFQMC/Numerics/tests/test_batched_operations.cpp @@ -277,7 +277,9 @@ TEST_CASE("Awiu_Biu_Cuw", "[Numerics][batched_operations]") ComplexType alpha = 0.5; // C = alpha * numpy.einsum('wnu,nu->uw', A, B) using ma::Awiu_Biu_Cuw; - Awiu_Biu_Cuw(nu, nw, nn, alpha, A.origin(), B.origin(), std::get<1>(B.sizes()), C.origin(), std::get<1>(C.sizes())); + + using std::get; + Awiu_Biu_Cuw(nu, nw, nn, alpha, A.origin(), B.origin(), get<1>(B.sizes()), C.origin(), get<1>(C.sizes())); Tensor2D ref({nu, nw}, 4.0, alloc); ref[1][0] = 3.0; ref[1][1] = 3.0; @@ -296,7 +298,8 @@ TEST_CASE("Aijk_Bkj_Cik", "[Numerics][batched_operations]") Tensor2D C({ni, nk}, 0.0, alloc); // C = alpha * numpy.einsum('wnu,nu->uw', A, B) using ma::Aijk_Bkj_Cik; - Aijk_Bkj_Cik(ni, nj, nk, A.origin(), std::get<1>(A.sizes()), A.stride(0), B.origin(), B.stride(0), C.origin(), C.stride(0)); + using std::get; + Aijk_Bkj_Cik(ni, nj, nk, A.origin(), get<1>(A.sizes()), A.stride(0), B.origin(), B.stride(0), C.origin(), C.stride(0)); Tensor2D ref({ni, nk}, 4.0, alloc); ref[0][0] = 2.0; ref[1][0] = 2.0; @@ -331,8 +334,9 @@ TEST_CASE("element_wise_Aij_Bjk_Ckij", "[Numerics][batched_operations]") Tensor2D A({ni, nj}, 3.0, alloc); Tensor2D B({nj, nk}, 2.0, alloc); Tensor3D C({nk, ni, nj}, 0.0, alloc); - element_wise_Aij_Bjk_Ckij('N', ni, nj, nk, A.origin(), A.stride(0), B.origin(), B.stride(0), C.origin(), std::get<1>(C.sizes()), - std::get<2>(C.sizes())); + using std::get; + element_wise_Aij_Bjk_Ckij('N', ni, nj, nk, A.origin(), A.stride(0), B.origin(), B.stride(0), C.origin(), get<1>(C.sizes()), + get<2>(C.sizes())); Tensor3D ref({nk, ni, nj}, 6.0, alloc); verify_approx(C, ref); } @@ -340,8 +344,10 @@ TEST_CASE("element_wise_Aij_Bjk_Ckij", "[Numerics][batched_operations]") Tensor2D A({ni, nj}, ComplexType(0.0, -3.0), alloc); Tensor2D B({nj, nk}, ComplexType(1.0, 2.0), alloc); Tensor3D C({nk, ni, nj}, 0.0, alloc); - element_wise_Aij_Bjk_Ckij('C', ni, nj, nk, A.origin(), A.stride(0), B.origin(), B.stride(0), C.origin(), std::get<1>(C.sizes()), - std::get<2>(C.sizes())); + + using std::get; + element_wise_Aij_Bjk_Ckij('C', ni, nj, nk, A.origin(), A.stride(0), B.origin(), B.stride(0), C.origin(), get<1>(C.sizes()), + get<2>(C.sizes())); Tensor3D ref({nk, ni, nj}, ComplexType(-6.0, 3.0), alloc); verify_approx(C, ref); } @@ -359,7 +365,9 @@ void test_Aij_Bjk_Ckji() Tensor2D A({ni, nj}, -3.0, alloc_a); Tensor2D B({nj, nk}, T2(1.0, 2.0), alloc_b); Tensor3D C({nk, nj, ni}, 0.0, alloc_b); - element_wise_Aij_Bjk_Ckji(ni, nj, nk, A.origin(), A.stride(0), B.origin(), B.stride(0), C.origin(), std::get<2>(C.sizes()), + + using std::get; + element_wise_Aij_Bjk_Ckji(ni, nj, nk, A.origin(), A.stride(0), B.origin(), B.stride(0), C.origin(), get<2>(C.sizes()), C.stride(0)); Tensor3D ref({nk, nj, ni}, T2(-3.0, -6.0), alloc_b); verify_approx(C, ref); @@ -383,7 +391,8 @@ TEST_CASE("inplace_product", "[Numerics][batched_operations]") Tensor3D A({nb, ni, nj}, ComplexType(1.0, -3.0), alloc); Tensor2D B({ni, nj}, 2.0, dalloc); using ma::inplace_product; - inplace_product(nb, ni, nj, B.origin(), std::get<1>(B.sizes()), A.origin(), std::get<2>(A.sizes())); + using std::get; + inplace_product(nb, ni, nj, B.origin(), get<1>(B.sizes()), A.origin(), get<2>(A.sizes())); Tensor3D ref({nb, ni, nj}, ComplexType(2.0, -6.0), alloc); verify_approx(A, ref); } diff --git a/src/AFQMC/Numerics/tests/test_dense_numerics.cpp b/src/AFQMC/Numerics/tests/test_dense_numerics.cpp index 9d5bf2c717..2b14d6b10c 100644 --- a/src/AFQMC/Numerics/tests/test_dense_numerics.cpp +++ b/src/AFQMC/Numerics/tests/test_dense_numerics.cpp @@ -321,11 +321,12 @@ void test_dense_matrix_mult() array, 2> A({3, 3}); array, 2> B({3, 3}); - for (int i = 0, k = 0; i < std::get<0>(A.sizes()); i++) - for (int j = 0; j < std::get<1>(A.sizes()); j++, k++) + using std::get; + for (int i = 0, k = 0; i < get<0>(A.sizes()); i++) + for (int j = 0; j < get<1>(A.sizes()); j++, k++) A[i][j] = m_a[k]; - for (int i = 0, k = 0; i < std::get<0>(A.sizes()); i++) - for (int j = 0; j < std::get<1>(A.sizes()); j++, k++) + for (int i = 0, k = 0; i < get<0>(A.sizes()); i++) + for (int j = 0; j < get<1>(A.sizes()); j++, k++) B[i][j] = m_b[k]; array, 2> C = ma::exp(A); diff --git a/src/AFQMC/Numerics/tests/test_determinant.cpp b/src/AFQMC/Numerics/tests/test_determinant.cpp index 0b1a452b46..1f6006ce1a 100644 --- a/src/AFQMC/Numerics/tests/test_determinant.cpp +++ b/src/AFQMC/Numerics/tests/test_determinant.cpp @@ -79,7 +79,8 @@ TEST_CASE("determinant_from_getrf", "[Numerics][determinant]") double log_factor = 0.0; double detx = 0.06317052169675352; using ma::determinant_from_getrf; - double ovlp = determinant_from_getrf(std::get<0>(x.sizes()), lu.origin(), std::get<1>(lu.sizes()), pivot.origin(), log_factor); + using std::get; + double ovlp = determinant_from_getrf(get<0>(x.sizes()), lu.origin(), get<1>(lu.sizes()), pivot.origin(), log_factor); CHECK(ovlp == Approx(detx)); } @@ -104,8 +105,9 @@ TEST_CASE("strided_determinant_from_getrf", "[Numerics][determinant]") double log_factor = 0.0; double detx = 0.06317052169675352; using ma::strided_determinant_from_getrf; - strided_determinant_from_getrf(std::get<0>(x.sizes()), lus.origin(), std::get<1>(lu.sizes()), lu.num_elements(), pivot.origin(), std::get<1>(pivot.sizes()), - log_factor, to_address(ovlps.origin()), std::get<0>(lus.sizes())); + using std::get; + strided_determinant_from_getrf(get<0>(x.sizes()), lus.origin(), get<1>(lu.sizes()), lu.num_elements(), pivot.origin(), get<1>(pivot.sizes()), + log_factor, to_address(ovlps.origin()), get<0>(lus.sizes())); CHECK(ovlps[0] == Approx(detx)); CHECK(ovlps[1] == Approx(detx)); CHECK(ovlps[2] == Approx(detx)); @@ -132,7 +134,8 @@ TEST_CASE("batched_determinant_from_getrf", "[Numerics][determinant]") double log_factor = 0.0; double detx = 0.06317052169675352; using ma::batched_determinant_from_getrf; - batched_determinant_from_getrf(std::get<0>(x.sizes()), lu_array.data(), std::get<1>(lu.sizes()), pivot.origin(), std::get<1>(pivot.sizes()), log_factor, + using std::get; + batched_determinant_from_getrf(get<0>(x.sizes()), lu_array.data(), get<1>(lu.sizes()), pivot.origin(), get<1>(pivot.sizes()), log_factor, to_address(ovlps.origin()), lu_array.size()); CHECK(ovlps[0] == Approx(detx)); CHECK(ovlps[1] == Approx(detx)); @@ -160,7 +163,8 @@ TEST_CASE("batched_determinant_from_getrf_complex", "[Numerics][determinant]") std::complex log_factor = 0.0; std::complex detx = 0.06317052169675352; using ma::batched_determinant_from_getrf; - batched_determinant_from_getrf(std::get<0>(x.sizes()), lu_array.data(), std::get<1>(lu.sizes()), pivot.origin(), std::get<1>(pivot.sizes()), log_factor, + using std::get; + batched_determinant_from_getrf(get<0>(x.sizes()), lu_array.data(), get<1>(lu.sizes()), pivot.origin(), get<1>(pivot.sizes()), log_factor, to_address(ovlps.origin()), lu_array.size()); CHECK(ovlps[0] == ComplexApprox(detx)); CHECK(ovlps[1] == ComplexApprox(detx)); diff --git a/src/AFQMC/Numerics/tests/test_misc_kernels.cpp b/src/AFQMC/Numerics/tests/test_misc_kernels.cpp index 33566c7795..f0e11fcce5 100644 --- a/src/AFQMC/Numerics/tests/test_misc_kernels.cpp +++ b/src/AFQMC/Numerics/tests/test_misc_kernels.cpp @@ -76,13 +76,14 @@ TEST_CASE("axpyBatched", "[Numerics][misc_kernels]") Tensor2D> x({3, 4}, 1.0, alloc); Tensor1D> a(iextensions<1u>{3}, 2.0, alloc); std::vector>> x_batched, y_batched; - for (int i = 0; i < std::get<0>(x.sizes()); i++) + using std::get; + for (int i = 0; i < get<0>(x.sizes()); i++) { x_batched.emplace_back(x[i].origin()); y_batched.emplace_back(y[i].origin()); } using ma::axpyBatched; - axpyBatched(std::get<1>(x.sizes()), to_address(a.origin()), x_batched.data(), 1, y_batched.data(), 1, x_batched.size()); + axpyBatched(get<1>(x.sizes()), to_address(a.origin()), x_batched.data(), 1, y_batched.data(), 1, x_batched.size()); // 1 + 2 = 3. Tensor2D> ref({3, 4}, 3.0, alloc); verify_approx(y, ref); diff --git a/src/AFQMC/Propagators/AFQMCBasePropagator.icc b/src/AFQMC/Propagators/AFQMCBasePropagator.icc index 59c94ae606..b53f0e829a 100644 --- a/src/AFQMC/Propagators/AFQMCBasePropagator.icc +++ b/src/AFQMC/Propagators/AFQMCBasePropagator.icc @@ -56,13 +56,14 @@ void AFQMCBasePropagator::step(int nsteps_, WlkSet& wset, RealType Eshift, RealT if (transposed_G_) G_ext = iextensions<2u>{nwalk, Gsize}; - if (std::get<0>(MFfactor.sizes()) != nsteps || std::get<1>(MFfactor.sizes()) != nwalk) + using std::get; + if (get<0>(MFfactor.sizes()) != nsteps || get<1>(MFfactor.sizes()) != nwalk) MFfactor = CMatrix({long(nsteps), long(nwalk)}); - if (std::get<0>(hybrid_weight.sizes()) != nsteps || std::get<1>(hybrid_weight.sizes()) != nwalk) + if (get<0>(hybrid_weight.sizes()) != nsteps || get<1>(hybrid_weight.sizes()) != nwalk) hybrid_weight = CMatrix({long(nsteps), long(nwalk)}); - if (std::get<0>(new_overlaps.sizes()) != nwalk) + if (get<0>(new_overlaps.sizes()) != nwalk) new_overlaps = CVector(iextensions<1u>{nwalk}); - if (std::get<0>(new_energies.sizes()) != nwalk || std::get<1>(new_energies.sizes()) != 3) + if (get<0>(new_energies.sizes()) != nwalk || get<1>(new_energies.sizes()) != 3) new_energies = CMatrix({long(nwalk), 3}); @@ -279,16 +280,20 @@ void AFQMCBasePropagator::BackPropagate(int nbpsteps, int nStabalize, WlkSet& ws buffer_manager.get_generator().template get_allocator()); C3Tensor_ref vHS3D(make_device_ptr(vHS.origin()), vhs3d_ext); + using std::get; + auto&& Fields(*wset.getFields()); - assert(std::get<0>(Fields.sizes()) >= nbpsteps); - assert(std::get<1>(Fields.sizes()) == globalnCV); - assert(std::get<2>(Fields.sizes()) == nwalk); + assert(get<0>(Fields.sizes()) >= nbpsteps); + assert(get<1>(Fields.sizes()) == globalnCV); + assert(get<2>(Fields.sizes()) == nwalk); int nrow(NMO * npol); int ncol(NAEA + ((walker_type == CLOSED) ? 0 : NAEB)); assert(Refs.size() == nwalk); - int nrefs = std::get<1>(Refs.sizes()); - assert(std::get<2>(Refs.sizes()) == nrow * ncol); + + using std::get; + int nrefs = get<1>(Refs.sizes()); + assert(get<2>(Refs.sizes()) == nrow * ncol); int cv0, cvN; std::tie(cv0, cvN) = FairDivideBoundary(TG.getLocalTGRank(), globalnCV, TG.getNCoresPerTG()); @@ -299,8 +304,8 @@ void AFQMCBasePropagator::BackPropagate(int nbpsteps, int nStabalize, WlkSet& ws if (walker_type == COLLINEAR) nx = 2; - assert(std::get<0>(detR.sizes()) == nwalk); - assert(std::get<1>(detR.sizes()) == nrefs * nx); + assert(get<0>(detR.sizes()) == nwalk); + assert(get<1>(detR.sizes()) == nrefs * nx); std::fill_n(detR.origin(), detR.num_elements(), ComplexType(1.0, 0.0)); // from now on, individual work on each walker/step @@ -467,7 +472,8 @@ void AFQMCBasePropagator::apply_propagators(char TA, } else { - if (std::get<0>(local_vHS.sizes()) != NMO || std::get<1>(local_vHS.sizes()) != NMO) + using std::get; + if (get<0>(local_vHS.sizes()) != NMO || get<1>(local_vHS.sizes()) != NMO) local_vHS = CMatrix({NMO, NMO}); // vHS3D[M][M][nstep*nwalk]: need temporary buffer in this case if (walker_type == COLLINEAR) @@ -561,11 +567,12 @@ void AFQMCBasePropagator::apply_propagators_batched(char TA, WSet& wset, int ni, } else { - if (std::get<0>(local_vHS.sizes()) != nbatch || std::get<1>(local_vHS.sizes()) != NMO * NMO) + using std::get; + if (get<0>(local_vHS.sizes()) != nbatch || get<1>(local_vHS.sizes()) != NMO * NMO) local_vHS = CMatrix({nbatch, NMO * NMO}); // vHS3D[M][M][nstep*nwalk]: need temporary buffer in this case - int N2 = std::get<0>(vHS3D.sizes()) * std::get<1>(vHS3D.sizes()); - CMatrix_ref vHS2D(vHS3D.origin(), {N2, std::get<2>(vHS3D.sizes())}); + int N2 = get<0>(vHS3D.sizes()) * get<1>(vHS3D.sizes()); + CMatrix_ref vHS2D(vHS3D.origin(), {N2, get<2>(vHS3D.sizes())}); C3Tensor_ref local3D(local_vHS.origin(), {nbatch, NMO, NMO}); int nt = ni * nwalk; for (int iw = 0; iw < nwalk; iw += nbatch, nt += nbatch) diff --git a/src/AFQMC/Propagators/AFQMCDistributedPropagator.icc b/src/AFQMC/Propagators/AFQMCDistributedPropagator.icc index 3b63c4ba05..340708397a 100644 --- a/src/AFQMC/Propagators/AFQMCDistributedPropagator.icc +++ b/src/AFQMC/Propagators/AFQMCDistributedPropagator.icc @@ -60,14 +60,15 @@ void AFQMCDistributedPropagator::step(int nsteps_, WlkSet& wset, RealType Eshift if (transposed_G_) G_ext = iextensions<2u>{nwalk, Gsize}; - if (std::get<1>(MFfactor.sizes()) != nsteps || std::get<2>(MFfactor.sizes()) != nwalk) + using std::get; + if (get<1>(MFfactor.sizes()) != nsteps || get<2>(MFfactor.sizes()) != nwalk) MFfactor = C3Tensor({2, nsteps, nwalk}); - if (std::get<1>(hybrid_weight.sizes()) != nsteps || std::get<2>(hybrid_weight.sizes()) != nwalk) + if (get<1>(hybrid_weight.sizes()) != nsteps || get<2>(hybrid_weight.sizes()) != nwalk) hybrid_weight = C3Tensor({2, nsteps, nwalk}); - if (std::get<0>(new_overlaps.sizes()) != nwalk) + if (get<0>(new_overlaps.sizes()) != nwalk) new_overlaps = CVector(iextensions<1u>{nwalk}); - if (std::get<0>(new_energies.sizes()) != nwalk || std::get<1>(new_energies.sizes()) != 3) + if (get<0>(new_energies.sizes()) != nwalk || get<1>(new_energies.sizes()) != 3) new_energies = CMatrix({nwalk, 3}); // Summary of temporary memory usage: diff --git a/src/AFQMC/Propagators/AFQMCDistributedPropagatorDistCV.icc b/src/AFQMC/Propagators/AFQMCDistributedPropagatorDistCV.icc index 63ae868436..edf9e99717 100644 --- a/src/AFQMC/Propagators/AFQMCDistributedPropagatorDistCV.icc +++ b/src/AFQMC/Propagators/AFQMCDistributedPropagatorDistCV.icc @@ -66,13 +66,14 @@ void AFQMCDistributedPropagatorDistCV::step(int nsteps_, WlkSet& wset, RealType if (transposed_G_) G_ext = iextensions<2u>{nwalk, Gsize}; - if (std::get<0>(MFfactor.sizes()) != nsteps || std::get<1>(MFfactor.sizes()) != nwalk) + using std::get; + if (get<0>(MFfactor.sizes()) != nsteps || get<1>(MFfactor.sizes()) != nwalk) MFfactor = CMatrix({long(nsteps), long(nwalk)}); - if (std::get<0>(hybrid_weight.sizes()) != nsteps || std::get<1>(hybrid_weight.sizes()) != nwalk) + if (get<0>(hybrid_weight.sizes()) != nsteps || get<1>(hybrid_weight.sizes()) != nwalk) hybrid_weight = CMatrix({long(nsteps), long(nwalk)}); - if (std::get<0>(new_overlaps.sizes()) != nwalk) + if (get<0>(new_overlaps.sizes()) != nwalk) new_overlaps = CVector(iextensions<1u>{nwalk}); - if (std::get<0>(new_energies.sizes()) != nwalk || std::get<1>(new_energies.sizes()) != 3) + if (get<0>(new_energies.sizes()) != nwalk || get<1>(new_energies.sizes()) != 3) new_energies = CMatrix({long(nwalk), 3}); // Temporary memory usage summary: @@ -435,13 +436,14 @@ void AFQMCDistributedPropagatorDistCV::step_collective(int nsteps_, WlkSet& wset if (transposed_G_) G_ext = iextensions<2u>{nwalk, Gsize}; - if (std::get<0>(MFfactor.sizes()) != nsteps || std::get<1>(MFfactor.sizes()) != nwalk) + using std::get; + if (get<0>(MFfactor.sizes()) != nsteps || get<1>(MFfactor.sizes()) != nwalk) MFfactor = CMatrix({long(nsteps), long(nwalk)}); - if (std::get<0>(hybrid_weight.sizes()) != nsteps || std::get<1>(hybrid_weight.sizes()) != nwalk) + if (get<0>(hybrid_weight.sizes()) != nsteps || get<1>(hybrid_weight.sizes()) != nwalk) hybrid_weight = CMatrix({long(nsteps), long(nwalk)}); - if (std::get<0>(new_overlaps.sizes()) != nwalk) + if (get<0>(new_overlaps.sizes()) != nwalk) new_overlaps = CVector(iextensions<1u>{nwalk}); - if (std::get<0>(new_energies.sizes()) != nwalk || std::get<1>(new_energies.sizes()) != 3) + if (get<0>(new_energies.sizes()) != nwalk || get<1>(new_energies.sizes()) != 3) new_energies = CMatrix({long(nwalk), 3}); // Temporary memory usage summary: @@ -831,15 +833,19 @@ void AFQMCDistributedPropagatorDistCV::BackPropagate(int nbpsteps, TG.local_barrier(); auto&& Fields(*wset.getFields()); - assert(std::get<0>(Fields.sizes()) >= nbpsteps); - assert(std::get<1>(Fields.sizes()) == globalnCV); - assert(std::get<2>(Fields.sizes()) == nwalk); + + using std::get; + assert(get<0>(Fields.sizes()) >= nbpsteps); + assert(get<1>(Fields.sizes()) == globalnCV); + assert(get<2>(Fields.sizes()) == nwalk); int nrow(NMO * ((walker_type == NONCOLLINEAR) ? 2 : 1)); int ncol(NAEA + ((walker_type == CLOSED) ? 0 : NAEB)); - assert(std::get<0>(Refs.sizes()) == nwalk); - int nrefs = std::get<1>(Refs.sizes()); - assert(std::get<2>(Refs.sizes()) == nrow * ncol); + + using std::get; + assert(get<0>(Refs.sizes()) == nwalk); + int nrefs = get<1>(Refs.sizes()); + assert(get<2>(Refs.sizes()) == nrow * ncol); int cv0, cvN; std::tie(cv0, cvN) = FairDivideBoundary(TG.getLocalTGRank(), localnCV, TG.getNCoresPerTG()); @@ -852,8 +858,8 @@ void AFQMCDistributedPropagatorDistCV::BackPropagate(int nbpsteps, if (walker_type == COLLINEAR) nx = 2; - assert(std::get<0>(detR.sizes()) == nwalk); - assert(std::get<1>(detR.sizes()) == nrefs * nx); + assert(get<0>(detR.sizes()) == nwalk); + assert(get<1>(detR.sizes()) == nrefs * nx); std::fill_n(detR.origin(), detR.num_elements(), SPComplexType(1.0, 0.0)); // from now on, individual work on each walker/step diff --git a/src/AFQMC/Propagators/WalkerSetUpdate.hpp b/src/AFQMC/Propagators/WalkerSetUpdate.hpp index cfff10ceb4..6bc17936a7 100644 --- a/src/AFQMC/Propagators/WalkerSetUpdate.hpp +++ b/src/AFQMC/Propagators/WalkerSetUpdate.hpp @@ -37,10 +37,12 @@ void free_projection_walker_update(Wlk&& w, Mat&& hybrid_weight, WMat& work) { + using std::get; + int nwalk = w.size(); // constexpr if can be used to avoid the memory copy, by comparing the pointer types // between WMat and Mat/OMat - if (std::get<0>(work.sizes()) < 7 || std::get<1>(work.sizes()) < nwalk) + if (get<0>(work.sizes()) < 7 || get<1>(work.sizes()) < nwalk) work.reextent({7, nwalk}); w.getProperty(WEIGHT, work[0]); @@ -83,10 +85,12 @@ void hybrid_walker_update(Wlk&& w, Mat&& hybrid_weight, WMat& work) { + using std::get; + int nwalk = w.size(); // constexpr if can be used to avoid the memory copy, by comparing the pointer types // between WMat and Mat/OMat - if (std::get<0>(work.sizes()) < 7 || std::get<1>(work.sizes()) < nwalk) + if (get<0>(work.sizes()) < 7 || get<1>(work.sizes()) < nwalk) work.reextent({7, nwalk}); bool BackProp = (w.getBPPos() >= 0 && w.getBPPos() < w.NumBackProp()); @@ -177,10 +181,12 @@ void local_energy_walker_update(Wlk&& w, Mat&& hybrid_weight, WMat& work) { + using std::get; + int nwalk = w.size(); // constexpr if can be used to avoid the memory copy, by comparing the pointer types // between WMat and Mat/OMat - if (std::get<0>(work.sizes()) < 12 || std::get<1>(work.sizes()) < nwalk) + if (get<0>(work.sizes()) < 12 || get<1>(work.sizes()) < nwalk) work.reextent({12, nwalk}); bool BackProp = (w.getBPPos() >= 0 && w.getBPPos() < w.NumBackProp()); diff --git a/src/AFQMC/Propagators/generate1BodyPropagator.hpp b/src/AFQMC/Propagators/generate1BodyPropagator.hpp index 184ea70164..ec0035f79f 100644 --- a/src/AFQMC/Propagators/generate1BodyPropagator.hpp +++ b/src/AFQMC/Propagators/generate1BodyPropagator.hpp @@ -45,8 +45,9 @@ P_Type generate1BodyPropagator(TaskGroup_& TG, MultiArray2D const& H1, bool printP1eV = false) { + using std::get; assert(H1.dimensionality == 2); - assert(std::get<0>(H1.sizes()) == std::get<1>(H1.sizes())); + assert(get<0>(H1.sizes()) == get<1>(H1.sizes())); assert(H1.stride(1) == 1); int NMO = H1.size(); if (TG.TG_local().root()) @@ -83,13 +84,14 @@ P_Type generate1BodyPropagator(TaskGroup_& TG, MultiArray2DB const& H1ext, bool printP1eV = false) { + using std::get; assert(H1.dimensionality == 2); - assert(std::get<0>(H1.sizes()) == std::get<1>(H1.sizes())); + assert(get<0>(H1.sizes()) == get<1>(H1.sizes())); assert(H1.stride(1) == 1); assert(H1ext.dimensionality == 2); - assert(std::get<0>(H1ext.sizes()) == std::get<1>(H1ext.sizes())); + assert(get<0>(H1ext.sizes()) == get<1>(H1ext.sizes())); assert(H1ext.stride(1) == 1); - assert(std::get<0>(H1.sizes()) == std::get<1>(H1ext.sizes())); + assert(get<0>(H1.sizes()) == get<1>(H1ext.sizes())); int NMO = H1.size(); if (TG.TG_local().root()) { diff --git a/src/AFQMC/Propagators/tests/test_propagator_factory.cpp b/src/AFQMC/Propagators/tests/test_propagator_factory.cpp index be4974ec11..a2cc791649 100644 --- a/src/AFQMC/Propagators/tests/test_propagator_factory.cpp +++ b/src/AFQMC/Propagators/tests/test_propagator_factory.cpp @@ -143,9 +143,11 @@ void propg_fac_shared(boost::mpi3::communicator& world) WalkerSet wset(TG, doc3.getRoot(), InfoMap["info0"], rng); auto initial_guess = WfnFac.getInitialGuess(wfn_name); - REQUIRE(std::get<0>(initial_guess.sizes()) == 2); - REQUIRE(std::get<1>(initial_guess.sizes()) == NPOL * NMO); - REQUIRE(std::get<2>(initial_guess.sizes()) == NAEA); + + using std::get; + REQUIRE(get<0>(initial_guess.sizes()) == 2); + REQUIRE(get<1>(initial_guess.sizes()) == NPOL * NMO); + REQUIRE(get<2>(initial_guess.sizes()) == NAEA); wset.resize(nwalk, initial_guess[0], initial_guess[0]); // initial_guess[1](XXX.extension(0),{0,NAEB})); @@ -315,9 +317,11 @@ void propg_fac_distributed(boost::mpi3::communicator& world, int ngrp) WalkerSet wset(TG, doc3.getRoot(), InfoMap["info0"], rng); auto initial_guess = WfnFac.getInitialGuess(wfn_name); - REQUIRE(std::get<0>(initial_guess.sizes()) == 2); - REQUIRE(std::get<1>(initial_guess.sizes()) == NPOL * NMO); - REQUIRE(std::get<2>(initial_guess.sizes()) == NAEA); + + using std::get; + REQUIRE(get<0>(initial_guess.sizes()) == 2); + REQUIRE(get<1>(initial_guess.sizes()) == NPOL * NMO); + REQUIRE(get<2>(initial_guess.sizes()) == NAEA); wset.resize(nwalk, initial_guess[0], initial_guess[0]); const char* propg_xml_block0 = R"()"; diff --git a/src/AFQMC/SlaterDeterminantOperations/SlaterDetOperations_base.hpp b/src/AFQMC/SlaterDeterminantOperations/SlaterDetOperations_base.hpp index 66c09f8e4a..64d15513a8 100644 --- a/src/AFQMC/SlaterDeterminantOperations/SlaterDetOperations_base.hpp +++ b/src/AFQMC/SlaterDeterminantOperations/SlaterDetOperations_base.hpp @@ -82,8 +82,9 @@ class SlaterDetOperations_base template T MixedDensityMatrix(const MatA& hermA, const MatB& B, MatC&& C, T LogOverlapFactor, bool compact, bool herm = true) { - int NMO = (herm ? std::get<1>(hermA.sizes()) : std::get<0>(hermA.sizes())); - int NAEA = (herm ? std::get<0>(hermA.sizes()) : std::get<1>(hermA.sizes())); + using std::get; + int NMO = (herm ? get<1>(hermA.sizes()) : get<0>(hermA.sizes())); + int NAEA = (herm ? get<0>(hermA.sizes()) : get<1>(hermA.sizes())); TMatrix TNN({NAEA, NAEA}, buffer_manager.get_generator().template get_allocator()); TMatrix TNM({NAEA, NMO}, buffer_manager.get_generator().template get_allocator()); TVector WORK(iextensions<1u>{work_size}, buffer_manager.get_generator().template get_allocator()); @@ -95,8 +96,10 @@ class SlaterDetOperations_base template T MixedDensityMatrix(const MatA& A, MatC&& C, T LogOverlapFactor, bool compact = false) { - int NMO = std::get<0>(A.sizes()); - int NAEA = std::get<1>(A.sizes()); + using std::get; + + int NMO = get<0>(A.sizes()); + int NAEA = get<1>(A.sizes()); TMatrix TNN({NAEA, NAEA}, buffer_manager.get_generator().template get_allocator()); TMatrix TNM({NAEA, NMO}, buffer_manager.get_generator().template get_allocator()); TVector WORK(iextensions<1u>{work_size}, buffer_manager.get_generator().template get_allocator()); @@ -113,8 +116,9 @@ class SlaterDetOperations_base bool compact = false, bool useSVD = false) { - int NMO = std::get<0>(A.sizes()); - int NAEA = std::get<1>(A.sizes()); + using std::get; + int NMO = get<0>(A.sizes()); + int NAEA = get<1>(A.sizes()); if (useSVD) { TMatrix TNN1({NAEA, NAEA}, buffer_manager.get_generator().template get_allocator()); @@ -149,12 +153,13 @@ class SlaterDetOperations_base MatQ&& QQ0, bool compact = false) { - int Nact = std::get<0>(hermA.sizes()); - int NEL = std::get<1>(B.sizes()); - int NMO = std::get<0>(B.sizes()); - assert(std::get<1>(hermA.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(QQ0.sizes()) == Nact); - assert(std::get<1>(QQ0.sizes()) == NEL); + using std::get; + int Nact = get<0>(hermA.sizes()); + int NEL = get<1>(B.sizes()); + int NMO = get<0>(B.sizes()); + assert(get<1>(hermA.sizes()) == get<0>(B.sizes())); + assert(get<0>(QQ0.sizes()) == Nact); + assert(get<1>(QQ0.sizes()) == NEL); TMatrix TNN({NEL, NEL}, buffer_manager.get_generator().template get_allocator()); TMatrix TAB({Nact, NEL}, buffer_manager.get_generator().template get_allocator()); TMatrix TNM({NEL, NMO}, buffer_manager.get_generator().template get_allocator()); @@ -174,10 +179,11 @@ class SlaterDetOperations_base integer* ref, bool compact = false) { - int Nact = std::get<0>(hermA.sizes()); - int NEL = std::get<1>(B.sizes()); - int NMO = std::get<0>(B.sizes()); - assert(std::get<1>(hermA.sizes()) == std::get<0>(B.sizes())); + using std::get; + int Nact = get<0>(hermA.sizes()); + int NEL = get<1>(B.sizes()); + int NMO = get<0>(B.sizes()); + assert(get<1>(hermA.sizes()) == get<0>(B.sizes())); TMatrix TNN({NEL, NEL}, buffer_manager.get_generator().template get_allocator()); TMatrix TAB({Nact, NEL}, buffer_manager.get_generator().template get_allocator()); TMatrix TNM({NEL, NMO}, buffer_manager.get_generator().template get_allocator()); @@ -191,7 +197,8 @@ class SlaterDetOperations_base template T Overlap(const MatA& A, T LogOverlapFactor) { - int NAEA = std::get<1>(A.sizes()); + using std::get; + int NAEA = get<1>(A.sizes()); TMatrix TNN({NAEA, NAEA}, buffer_manager.get_generator().template get_allocator()); TMatrix TNN2({NAEA, NAEA}, buffer_manager.get_generator().template get_allocator()); IVector IWORK(iextensions<1u>{NAEA + 1}, buffer_manager.get_generator().template get_allocator()); @@ -201,7 +208,8 @@ class SlaterDetOperations_base template T Overlap(const MatA& hermA, const MatB& B, T LogOverlapFactor, bool herm = true) { - int NAEA = (herm ? std::get<0>(hermA.sizes()) : std::get<1>(hermA.sizes())); + using std::get; + int NAEA = (herm ? get<0>(hermA.sizes()) : get<1>(hermA.sizes())); TMatrix TNN({NAEA, NAEA}, buffer_manager.get_generator().template get_allocator()); TMatrix TNN2({NAEA, NAEA}, buffer_manager.get_generator().template get_allocator()); IVector IWORK(iextensions<1u>{NAEA + 1}, buffer_manager.get_generator().template get_allocator()); @@ -211,7 +219,8 @@ class SlaterDetOperations_base template T Overlap_noHerm(const MatA& A, const MatB& B, T LogOverlapFactor) { - int NAEA = std::get<1>(A.sizes()); + using std::get; + int NAEA = get<1>(A.sizes()); TMatrix TNN({NAEA, NAEA}, buffer_manager.get_generator().template get_allocator()); TMatrix TNN2({NAEA, NAEA}, buffer_manager.get_generator().template get_allocator()); IVector IWORK(iextensions<1u>{NAEA + 1}, buffer_manager.get_generator().template get_allocator()); @@ -222,11 +231,12 @@ class SlaterDetOperations_base template T OverlapForWoodbury(const MatA& hermA, const MatB& B, T LogOverlapFactor, integer* ref, MatC&& QQ0) { - int Nact = std::get<0>(hermA.sizes()); - int NEL = std::get<1>(B.sizes()); - assert(std::get<1>(hermA.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(QQ0.sizes()) == Nact); - assert(std::get<1>(QQ0.sizes()) == NEL); + using std::get; + int Nact = get<0>(hermA.sizes()); + int NEL = get<1>(B.sizes()); + assert(get<1>(hermA.sizes()) == get<0>(B.sizes())); + assert(get<0>(QQ0.sizes()) == Nact); + assert(get<1>(QQ0.sizes()) == NEL); TMatrix TNN({NEL, NEL}, buffer_manager.get_generator().template get_allocator()); TMatrix TMN({Nact, NEL}, buffer_manager.get_generator().template get_allocator()); TVector WORK(iextensions<1u>{work_size}, buffer_manager.get_generator().template get_allocator()); @@ -239,14 +249,18 @@ class SlaterDetOperations_base void Propagate(Mat&& A, const MatP1& P1, const MatV& V, int order = 6, char TA = 'N', bool noncollinear = false) { int npol = noncollinear ? 2 : 1; - int NMO = std::get<0>(A.sizes()); - int NAEA = std::get<1>(A.sizes()); + + using std::get; + int NMO = get<0>(A.sizes()); + int NAEA = get<1>(A.sizes()); int M = NMO / npol; assert(NMO % npol == 0); - assert(std::get<0>(P1.sizes()) == NMO); - assert(std::get<1>(P1.sizes()) == NMO); - assert(std::get<0>(V.sizes()) == M); - assert(std::get<1>(V.sizes()) == M); + + using std::get; + assert(get<0>(P1.sizes()) == NMO); + assert(get<1>(P1.sizes()) == NMO); + assert(get<0>(V.sizes()) == M); + assert(get<1>(V.sizes()) == M); TMatrix TMN({NMO, NAEA}, buffer_manager.get_generator().template get_allocator()); TMatrix T1({M, NAEA}, buffer_manager.get_generator().template get_allocator()); TMatrix T2({M, NAEA}, buffer_manager.get_generator().template get_allocator()); @@ -279,10 +293,11 @@ class SlaterDetOperations_base template T Orthogonalize(Mat&& A, T LogOverlapFactor) { + using std::get; #if defined(ENABLE_CUDA) || defined(ENABLE_HIP) // QR on the transpose - int NMO = std::get<0>(A.sizes()); - int NAEA = std::get<1>(A.sizes()); + int NMO = get<0>(A.sizes()); + int NAEA = get<1>(A.sizes()); TMatrix AT({NAEA, NMO}, buffer_manager.get_generator().template get_allocator()); TVector scl(iextensions<1u>{NMO}, buffer_manager.get_generator().template get_allocator()); TVector TAU(iextensions<1u>{NMO}, buffer_manager.get_generator().template get_allocator()); @@ -292,10 +307,10 @@ class SlaterDetOperations_base ma::geqrf(AT, TAU, WORK); using ma::determinant_from_geqrf; using ma::scale_columns; - T res = determinant_from_geqrf(std::get<0>(AT.sizes()), AT.origin(), AT.stride(0), scl.origin(), LogOverlapFactor); + T res = determinant_from_geqrf(get<0>(AT.sizes()), AT.origin(), AT.stride(0), scl.origin(), LogOverlapFactor); ma::gqr(AT, TAU, WORK); ma::transpose(AT, A); - scale_columns(std::get<0>(A.sizes()), std::get<1>(A.sizes()), A.origin(), A.stride(0), scl.origin()); + scale_columns(get<0>(A.sizes()), get<1>(A.sizes()), A.origin(), A.stride(0), scl.origin()); #else int NMO = A.size(); TVector TAU(iextensions<1u>{NMO}, buffer_manager.get_generator().template get_allocator()); @@ -303,7 +318,9 @@ class SlaterDetOperations_base IVector IWORK(iextensions<1u>{NMO + 1}, buffer_manager.get_generator().template get_allocator()); ma::gelqf(std::forward(A), TAU, WORK); T res(0.0); - for (int i = 0; i < std::get<1>(A.sizes()); i++) + + using std::get; + for (int i = 0; i < get<1>(A.sizes()); i++) { if (real(A[i][i]) < 0) IWORK[i] = -1; @@ -313,8 +330,8 @@ class SlaterDetOperations_base } res = std::exp(res - LogOverlapFactor); ma::glq(std::forward(A), TAU, WORK); - for (int i = 0; i < std::get<0>(A.sizes()); ++i) - for (int j = 0; j < std::get<1>(A.sizes()); ++j) + for (int i = 0; i < get<0>(A.sizes()); ++i) + for (int j = 0; j < get<1>(A.sizes()); ++j) A[i][j] *= T(IWORK[j]); #endif return res; diff --git a/src/AFQMC/SlaterDeterminantOperations/SlaterDetOperations_serial.hpp b/src/AFQMC/SlaterDeterminantOperations/SlaterDetOperations_serial.hpp index 03f656e641..b2c82a0eee 100644 --- a/src/AFQMC/SlaterDeterminantOperations/SlaterDetOperations_serial.hpp +++ b/src/AFQMC/SlaterDeterminantOperations/SlaterDetOperations_serial.hpp @@ -149,21 +149,25 @@ class SlaterDetOperations_serial : public SlaterDetOperations_base::dimensionality == 2, " dimenionality == 2"); static_assert(std::decay::type::dimensionality == 3, " dimenionality == 3"); if (Ai.size() == 0) return; - assert(Ai.size() == std::get<0>(V.sizes())); + assert(Ai.size() == get<0>(V.sizes())); int nbatch = Ai.size(); int npol = noncollinear ? 2 : 1; - int NMO = std::get<0>((*Ai[0]).sizes()); - int NAEA = std::get<1>((*Ai[0]).sizes()); + int NMO = get<0>((*Ai[0]).sizes()); + int NAEA = get<1>((*Ai[0]).sizes()); int M = NMO / npol; assert(NMO % npol == 0); - assert(std::get<0>(P1.sizes()) == NMO); - assert(std::get<1>(P1.sizes()) == NMO); - assert(std::get<1>(V.sizes()) == M); - assert(std::get<2>(V.sizes()) == M); + + using std::get; + assert(get<0>(P1.sizes()) == NMO); + assert(get<1>(P1.sizes()) == NMO); + assert(get<1>(V.sizes()) == M); + assert(get<2>(V.sizes()) == M); TTensor TMN({nbatch, NMO, NAEA}, buffer_manager.get_generator().template get_allocator()); TTensor T1({nbatch, NMO, NAEA}, buffer_manager.get_generator().template get_allocator()); TTensor T2({nbatch, NMO, NAEA}, buffer_manager.get_generator().template get_allocator()); @@ -233,8 +237,10 @@ class SlaterDetOperations_serial : public SlaterDetOperations_base::dimensionality == 2, "Wrong dimensionality"); static_assert(std::decay::type::dimensionality == 3, "Wrong dimensionality"); static_assert(std::decay::type::dimensionality == 1, "Wrong dimensionality"); - int NMO = (herm ? std::get<1>((*hermA[0]).sizes()) : std::get<0>((*hermA[0]).sizes())); - int NAEA = (herm ? std::get<0>((*hermA[0]).sizes()) : std::get<1>((*hermA[0]).sizes())); + + using std::get; + int NMO = (herm ? get<1>((*hermA[0]).sizes()) : get<0>((*hermA[0]).sizes())); + int NAEA = (herm ? get<0>((*hermA[0]).sizes()) : get<1>((*hermA[0]).sizes())); int nbatch = Bi.size(); assert(C.size() == nbatch); assert(ovlp.size() == nbatch); @@ -269,8 +275,10 @@ class SlaterDetOperations_serial : public SlaterDetOperations_base::dimensionality == 2, "Wrong dimensionality"); static_assert(pointedType::dimensionality == 2, "Wrong dimensionality"); static_assert(std::decay::type::dimensionality == 1, "Wrong dimensionality"); - int NMO = (herm ? std::get<1>((*Left[0]).sizes()) : std::get<0>((*Left[0]).sizes())); - int NAEA = (herm ? std::get<0>((*Left[0]).sizes()) : std::get<1>((*Left[0]).sizes())); + + using std::get; + int NMO = (herm ? get<1>((*Left[0]).sizes()) : get<0>((*Left[0]).sizes())); + int NAEA = (herm ? get<0>((*Left[0]).sizes()) : get<1>((*Left[0]).sizes())); int nbatch = Left.size(); assert(Right.size() == nbatch); assert(G.size() == nbatch); @@ -301,8 +309,10 @@ class SlaterDetOperations_serial : public SlaterDetOperations_base 0); static_assert(std::decay::type::dimensionality == 1, "Wrong dimensionality"); - int NMO = (herm ? std::get<1>((*hermA[0]).sizes()) : std::get<0>((*hermA[0]).sizes())); - int NAEA = (herm ? std::get<0>((*hermA[0]).sizes()) : std::get<1>((*hermA[0]).sizes())); + + using std::get; + int NMO = (herm ? get<1>((*hermA[0]).sizes()) : get<0>((*hermA[0]).sizes())); + int NAEA = (herm ? get<0>((*hermA[0]).sizes()) : get<1>((*hermA[0]).sizes())); int nbatch = Bi.size(); assert(ovlp.size() == nbatch); TTensor TNN3D({nbatch, NAEA, NAEA}, buffer_manager.get_generator().template get_allocator()); @@ -315,12 +325,14 @@ class SlaterDetOperations_serial : public SlaterDetOperations_base& Ai, T LogOverlapFactor, PTR detR) { static_assert(pointedType::dimensionality == 2, "Wrong dimensionality"); + + using std::get; #if defined(ENABLE_CUDA) || defined(ENABLE_HIP) // QR on the transpose if (Ai.size() == 0) return; - int NMO = std::get<0>((*Ai[0]).sizes()); - int NAEA = std::get<1>((*Ai[0]).sizes()); + int NMO = get<0>((*Ai[0]).sizes()); + int NAEA = get<1>((*Ai[0]).sizes()); int nbatch = Ai.size(); TTensor AT({nbatch, NAEA, NMO}, buffer_manager.get_generator().template get_allocator()); TMatrix T_({nbatch, NMO}, buffer_manager.get_generator().template get_allocator()); @@ -353,12 +365,13 @@ class SlaterDetOperations_serial : public SlaterDetOperations_base void BatchedOrthogonalize(std::vector& Ai, T LogOverlapFactor) { + using std::get; #if defined(ENABLE_CUDA) || defined(ENABLE_HIP) // QR on the transpose if (Ai.size() == 0) return; - int NMO = std::get<0>((*Ai[0]).sizes()); - int NAEA = std::get<1>((*Ai[0]).sizes()); + int NMO = get<0>((*Ai[0]).sizes()); + int NAEA = get<1>((*Ai[0]).sizes()); int nbatch = Ai.size(); TTensor AT({nbatch, NAEA, NMO}, buffer_manager.get_generator().template get_allocator()); TMatrix T_({nbatch, NMO}, buffer_manager.get_generator().template get_allocator()); diff --git a/src/AFQMC/SlaterDeterminantOperations/SlaterDetOperations_shared.hpp b/src/AFQMC/SlaterDeterminantOperations/SlaterDetOperations_shared.hpp index e4cf5b96f6..77ef5cc597 100644 --- a/src/AFQMC/SlaterDeterminantOperations/SlaterDetOperations_shared.hpp +++ b/src/AFQMC/SlaterDeterminantOperations/SlaterDetOperations_shared.hpp @@ -74,8 +74,10 @@ class SlaterDetOperations_shared : public SlaterDetOperations_base(hermA.sizes()) : std::get<0>(hermA.sizes())); - int NAEA = (herm ? std::get<0>(hermA.sizes()) : std::get<1>(hermA.sizes())); + using std::get; + + int NMO = (herm ? get<1>(hermA.sizes()) : get<0>(hermA.sizes())); + int NAEA = (herm ? get<0>(hermA.sizes()) : get<1>(hermA.sizes())); set_shm_buffer(comm, NAEA * (NAEA + NMO)); assert(SM_TMats->num_elements() >= NAEA * (NAEA + NMO)); boost::multi::array_ref TNN(to_address(SM_TMats->origin()), {NAEA, NAEA}); @@ -96,12 +98,14 @@ class SlaterDetOperations_shared : public SlaterDetOperations_base(hermA.sizes()); - int NEL = std::get<1>(B.sizes()); - int NMO = std::get<0>(B.sizes()); - assert(std::get<1>(hermA.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(QQ0.sizes()) == Nact); - assert(std::get<1>(QQ0.sizes()) == NEL); + using std::get; + + int Nact = get<0>(hermA.sizes()); + int NEL = get<1>(B.sizes()); + int NMO = get<0>(B.sizes()); + assert(get<1>(hermA.sizes()) == get<0>(B.sizes())); + assert(get<0>(QQ0.sizes()) == Nact); + assert(get<1>(QQ0.sizes()) == NEL); set_shm_buffer(comm, NEL * (NEL + Nact + NMO)); assert(SM_TMats->num_elements() >= NEL * (NEL + Nact + NMO)); @@ -122,7 +126,8 @@ class SlaterDetOperations_shared : public SlaterDetOperations_base T Overlap(const MatA& hermA, const MatB& B, T LogOverlapFactor, communicator& comm, bool herm = true) { - int NAEA = (herm ? std::get<0>(hermA.sizes()) : std::get<1>(hermA.sizes())); + using std::get; + int NAEA = (herm ? get<0>(hermA.sizes()) : get<1>(hermA.sizes())); set_shm_buffer(comm, 2 * NAEA * NAEA); assert(SM_TMats->num_elements() >= 2 * NAEA * NAEA); boost::multi::array_ref TNN(to_address(SM_TMats->origin()), {NAEA, NAEA}); @@ -139,11 +144,13 @@ class SlaterDetOperations_shared : public SlaterDetOperations_base(hermA.sizes()); - int NEL = std::get<1>(B.sizes()); - assert(std::get<1>(hermA.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(QQ0.sizes()) == Nact); - assert(std::get<1>(QQ0.sizes()) == NEL); + using std::get; + + int Nact = get<0>(hermA.sizes()); + int NEL = get<1>(B.sizes()); + assert(get<1>(hermA.sizes()) == get<0>(B.sizes())); + assert(get<0>(QQ0.sizes()) == Nact); + assert(get<1>(QQ0.sizes()) == NEL); set_shm_buffer(comm, NEL * (Nact + NEL)); assert(SM_TMats->num_elements() >= NEL * (Nact + NEL)); boost::multi::array_ref TNN(to_address(SM_TMats->origin()), {NEL, NEL}); @@ -164,14 +171,16 @@ class SlaterDetOperations_shared : public SlaterDetOperations_base(A.sizes()); - int NAEA = std::get<1>(A.sizes()); + + using std::get; + int NMO = get<0>(A.sizes()); + int NAEA = get<1>(A.sizes()); int M = NMO / npol; assert(NMO % npol == 0); - assert(std::get<0>(P1.sizes()) == NMO); - assert(std::get<1>(P1.sizes()) == NMO); - assert(std::get<0>(V.sizes()) == M); - assert(std::get<1>(V.sizes()) == M); + assert(get<0>(P1.sizes()) == NMO); + assert(get<1>(P1.sizes()) == NMO); + assert(get<0>(V.sizes()) == M); + assert(get<1>(V.sizes()) == M); set_shm_buffer(comm, NAEA * (NMO + 2 * M)); assert(SM_TMats->num_elements() >= NAEA * (NMO + 2 * M)); boost::multi::array_ref T0(to_address(SM_TMats->origin()), {NMO, NAEA}); diff --git a/src/AFQMC/SlaterDeterminantOperations/apply_expM.hpp b/src/AFQMC/SlaterDeterminantOperations/apply_expM.hpp index 9d47d465d8..2ea72ca3bb 100644 --- a/src/AFQMC/SlaterDeterminantOperations/apply_expM.hpp +++ b/src/AFQMC/SlaterDeterminantOperations/apply_expM.hpp @@ -36,12 +36,13 @@ namespace base template inline void apply_expM(const MatA& V, MatB&& S, MatC& T1, MatC& T2, int order = 6, char TA = 'N') { - assert(std::get<0>(V.sizes()) == std::get<1>(V.sizes())); - assert(std::get<1>(V.sizes()) == std::get<0>(S.sizes())); - assert(std::get<0>(S.sizes()) == std::get<0>(T1.sizes())); - assert(std::get<1>(S.sizes()) == std::get<1>(T1.sizes())); - assert(std::get<0>(S.sizes()) == std::get<0>(T2.sizes())); - assert(std::get<1>(S.sizes()) == std::get<1>(T2.sizes())); + using std::get; + assert(get<0>(V.sizes()) == get<1>(V.sizes())); + assert(get<1>(V.sizes()) == get<0>(S.sizes())); + assert(get<0>(S.sizes()) == get<0>(T1.sizes())); + assert(get<1>(S.sizes()) == get<1>(T1.sizes())); + assert(get<0>(S.sizes()) == get<0>(T2.sizes())); + assert(get<1>(S.sizes()) == get<1>(T2.sizes())); using ma::H; using ma::T; @@ -56,7 +57,7 @@ inline void apply_expM(const MatA& V, MatB&& S, MatC& T1, MatC& T2, int order = // getting around issue in multi, fix later //T1 = S; - T1.sliced(0, std::get<0>(T1.sizes())) = S; + T1.sliced(0, get<0>(T1.sizes())) = S; for (int n = 1; n <= order; n++) { ComplexType fact = im * static_cast(1.0 / static_cast(n)); @@ -82,12 +83,13 @@ namespace shm template inline void apply_expM(const MatA& V, MatB&& S, MatC& T1, MatC& T2, communicator& comm, int order = 6, char TA = 'N') { - assert(std::get<0>(V.sizes()) == std::get<0>(S.sizes())); - assert(std::get<1>(V.sizes()) == std::get<0>(S.sizes())); - assert(std::get<0>(S.sizes()) == std::get<0>(T1.sizes())); - assert(std::get<1>(S.sizes()) == std::get<1>(T1.sizes())); - assert(std::get<0>(S.sizes()) == std::get<0>(T2.sizes())); - assert(std::get<1>(S.sizes()) == std::get<1>(T2.sizes())); + using std::get; + assert(get<0>(V.sizes()) == get<0>(S.sizes())); + assert(get<1>(V.sizes()) == get<0>(S.sizes())); + assert(get<0>(S.sizes()) == get<0>(T1.sizes())); + assert(get<1>(S.sizes()) == get<1>(T1.sizes())); + assert(get<0>(S.sizes()) == get<0>(T2.sizes())); + assert(get<1>(S.sizes()) == get<1>(T2.sizes())); using ComplexType = typename std::decay::type::element; @@ -118,7 +120,7 @@ inline void apply_expM(const MatA& V, MatB&& S, MatC& T1, MatC& T2, communicator ma::product(fact, V.sliced(M0, Mn), *pT1, zero, (*pT2).sliced(M0, Mn)); // overload += ??? for (int i = M0; i < Mn; i++) - for (int j = 0, je = std::get<1>(S.sizes()); j < je; j++) + for (int j = 0, je = get<1>(S.sizes()); j < je; j++) S[i][j] += (*pT2)[i][j]; comm.barrier(); std::swap(pT1, pT2); @@ -139,22 +141,24 @@ inline void apply_expM(const MatA& V, MatB&& S, MatC& T1, MatC& T2, int order = static_assert(std::decay::type::dimensionality == 3, " batched::apply_expM::dimenionality == 3"); static_assert(std::decay::type::dimensionality == 3, " batched::apply_expM::dimenionality == 3"); static_assert(std::decay::type::dimensionality == 3, " batched::apply_expM::dimenionality == 3"); - assert(std::get<0>(V.sizes()) == std::get<0>(S.sizes())); - assert(std::get<0>(V.sizes()) == std::get<0>(T1.sizes())); - assert(std::get<0>(V.sizes()) == std::get<0>(T2.sizes())); - assert(std::get<1>(V.sizes()) == std::get<2>(V.sizes())); - assert(std::get<2>(V.sizes()) == std::get<1>(S.sizes())); - assert(std::get<1>(S.sizes()) == std::get<1>(T1.sizes())); - assert(std::get<2>(S.sizes()) == std::get<2>(T1.sizes())); - assert(std::get<1>(S.sizes()) == std::get<1>(T2.sizes())); - assert(std::get<2>(S.sizes()) == std::get<2>(T2.sizes())); + + using std::get; + assert(get<0>(V.sizes()) == get<0>(S.sizes())); + assert(get<0>(V.sizes()) == get<0>(T1.sizes())); + assert(get<0>(V.sizes()) == get<0>(T2.sizes())); + assert(get<1>(V.sizes()) == get<2>(V.sizes())); + assert(get<2>(V.sizes()) == get<1>(S.sizes())); + assert(get<1>(S.sizes()) == get<1>(T1.sizes())); + assert(get<2>(S.sizes()) == get<2>(T1.sizes())); + assert(get<1>(S.sizes()) == get<1>(T2.sizes())); + assert(get<2>(S.sizes()) == get<2>(T2.sizes())); // for now limit to continuous - assert(S.stride(0) == std::get<1>(S.sizes()) * std::get<2>(S.sizes())); - assert(T1.stride(0) == std::get<1>(T1.sizes()) * std::get<2>(T1.sizes())); - assert(T2.stride(0) == std::get<1>(T2.sizes()) * std::get<2>(T2.sizes())); - assert(S.stride(1) == std::get<2>(S.sizes())); - assert(T1.stride(1) == std::get<2>(T1.sizes())); - assert(T2.stride(1) == std::get<2>(T2.sizes())); + assert(S.stride(0) == get<1>(S.sizes()) * get<2>(S.sizes())); + assert(T1.stride(0) == get<1>(T1.sizes()) * get<2>(T1.sizes())); + assert(T2.stride(0) == get<1>(T2.sizes()) * get<2>(T2.sizes())); + assert(S.stride(1) == get<2>(S.sizes())); + assert(T1.stride(1) == get<2>(T1.sizes())); + assert(T2.stride(1) == get<2>(T2.sizes())); assert(S.stride(2) == 1); assert(T1.stride(2) == 1); assert(T2.stride(2) == 1); @@ -197,22 +201,24 @@ inline void apply_expM_noncollinear(const MatA& V, MatB&& S, MatC& T1, MatC& T2, static_assert(std::decay::type::dimensionality == 3, " batched::apply_expM::dimenionality == 3"); static_assert(std::decay::type::dimensionality == 3, " batched::apply_expM::dimenionality == 3"); static_assert(std::decay::type::dimensionality == 3, " batched::apply_expM::dimenionality == 3"); - assert(std::get<0>(V.sizes()) * 2 == std::get<0>(S.sizes())); - assert(std::get<0>(V.sizes()) * 2 == std::get<0>(T1.sizes())); - assert(std::get<0>(V.sizes()) * 2 == std::get<0>(T2.sizes())); - assert(std::get<1>(V.sizes()) == std::get<2>(V.sizes())); - assert(std::get<2>(V.sizes()) == std::get<1>(S.sizes())); - assert(std::get<1>(S.sizes()) == std::get<1>(T1.sizes())); - assert(std::get<2>(S.sizes()) == std::get<2>(T1.sizes())); - assert(std::get<1>(S.sizes()) == std::get<1>(T2.sizes())); - assert(std::get<2>(S.sizes()) == std::get<2>(T2.sizes())); + + using std::get; + assert(get<0>(V.sizes()) * 2 == get<0>(S.sizes())); + assert(get<0>(V.sizes()) * 2 == get<0>(T1.sizes())); + assert(get<0>(V.sizes()) * 2 == get<0>(T2.sizes())); + assert(get<1>(V.sizes()) == get<2>(V.sizes())); + assert(get<2>(V.sizes()) == get<1>(S.sizes())); + assert(get<1>(S.sizes()) == get<1>(T1.sizes())); + assert(get<2>(S.sizes()) == get<2>(T1.sizes())); + assert(get<1>(S.sizes()) == get<1>(T2.sizes())); + assert(get<2>(S.sizes()) == get<2>(T2.sizes())); // for now limit to continuous - assert(S.stride(0) == std::get<1>(S.sizes()) * std::get<2>(S.sizes())); - assert(T1.stride(0) == std::get<1>(T1.sizes()) * std::get<2>(T1.sizes())); - assert(T2.stride(0) == std::get<1>(T2.sizes()) * std::get<2>(T2.sizes())); - assert(S.stride(1) == std::get<2>(S.sizes())); - assert(T1.stride(1) == std::get<2>(T1.sizes())); - assert(T2.stride(1) == std::get<2>(T2.sizes())); + assert(S.stride(0) == get<1>(S.sizes()) * get<2>(S.sizes())); + assert(T1.stride(0) == get<1>(T1.sizes()) * get<2>(T1.sizes())); + assert(T2.stride(0) == get<1>(T2.sizes()) * get<2>(T2.sizes())); + assert(S.stride(1) == get<2>(S.sizes())); + assert(T1.stride(1) == get<2>(T1.sizes())); + assert(T2.stride(1) == get<2>(T2.sizes())); assert(S.stride(2) == 1); assert(T1.stride(2) == 1); assert(T2.stride(2) == 1); @@ -230,9 +236,9 @@ inline void apply_expM_noncollinear(const MatA& V, MatB&& S, MatC& T1, MatC& T2, int nbatch = S.size(); int ldv = V.stride(1); - int M = std::get<2>(T2.sizes()); - int N = std::get<1>(T2.sizes()); - int K = std::get<1>(T1.sizes()); + int M = get<2>(T2.sizes()); + int N = get<1>(T2.sizes()); + int K = get<1>(T1.sizes()); std::vector Vi; std::vector T1i; diff --git a/src/AFQMC/SlaterDeterminantOperations/mixed_density_matrix.hpp b/src/AFQMC/SlaterDeterminantOperations/mixed_density_matrix.hpp index 3fea256bb5..d987951d33 100644 --- a/src/AFQMC/SlaterDeterminantOperations/mixed_density_matrix.hpp +++ b/src/AFQMC/SlaterDeterminantOperations/mixed_density_matrix.hpp @@ -62,24 +62,26 @@ Tp MixedDensityMatrix(const MatA& hermA, bool compact = true, bool herm = true) { + using std::get; // check dimensions are consistent - int NMO = (herm ? std::get<1>(hermA.sizes()) : std::get<0>(hermA.sizes())); - int NEL = (herm ? std::get<0>(hermA.sizes()) : std::get<1>(hermA.sizes())); - assert(NMO == std::get<0>(B.sizes())); - assert(NEL == std::get<1>(B.sizes())); + int NMO = (herm ? get<1>(hermA.sizes()) : get<0>(hermA.sizes())); + int NEL = (herm ? get<0>(hermA.sizes()) : get<1>(hermA.sizes())); + + assert(NMO == get<0>(B.sizes())); + assert(NEL == get<1>(B.sizes())); assert(NEL == T1.size()); - assert(std::get<1>(B.sizes()) == std::get<1>(T1.sizes())); + assert(get<1>(B.sizes()) == get<1>(T1.sizes())); if (compact) { - assert(std::get<0>(C.sizes()) == std::get<1>(T1.sizes())); - assert(std::get<1>(C.sizes()) == std::get<0>(B.sizes())); + assert(get<0>(C.sizes()) == get<1>(T1.sizes())); + assert(get<1>(C.sizes()) == get<0>(B.sizes())); } else { - assert(std::get<1>(T2.sizes()) == B.size()); - assert(T2.size() == std::get<1>(T1.sizes())); + assert(get<1>(T2.sizes()) == B.size()); + assert(T2.size() == get<1>(T1.sizes())); assert(C.size() == NMO); - assert(std::get<1>(C.sizes()) == std::get<1>(T2.sizes())); + assert(get<1>(C.sizes()) == get<1>(T2.sizes())); } using ma::H; @@ -152,26 +154,28 @@ Tp MixedDensityMatrixForWoodbury(const MatA& hermA, TBuffer& WORK, bool compact = true) { + using std::get; + // check dimensions are consistent - int NEL = std::get<1>(B.sizes()); - assert(std::get<1>(hermA.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(hermA.sizes()) == std::get<0>(TAB.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(TAB.sizes())); - assert(std::get<1>(B.sizes()) == std::get<0>(TNN.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(TNN.sizes())); - assert(std::get<0>(hermA.sizes()) == std::get<0>(QQ0.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(QQ0.sizes())); + int NEL = get<1>(B.sizes()); + assert(get<1>(hermA.sizes()) == get<0>(B.sizes())); + assert(get<0>(hermA.sizes()) == get<0>(TAB.sizes())); + assert(get<1>(B.sizes()) == get<1>(TAB.sizes())); + assert(get<1>(B.sizes()) == get<0>(TNN.sizes())); + assert(get<1>(B.sizes()) == get<1>(TNN.sizes())); + assert(get<0>(hermA.sizes()) == get<0>(QQ0.sizes())); + assert(get<1>(B.sizes()) == get<1>(QQ0.sizes())); if (compact) { - assert(std::get<0>(C.sizes()) == std::get<1>(TNN.sizes())); - assert(std::get<1>(C.sizes()) == std::get<0>(B.sizes())); + assert(get<0>(C.sizes()) == get<1>(TNN.sizes())); + assert(get<1>(C.sizes()) == get<0>(B.sizes())); } else { - assert(std::get<1>(TNM.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(TNM.sizes()) == std::get<1>(TNN.sizes())); - assert(std::get<0>(C.sizes()) == std::get<1>(hermA.sizes())); - assert(std::get<1>(C.sizes()) == std::get<1>(TNM.sizes())); + assert(get<1>(TNM.sizes()) == get<0>(B.sizes())); + assert(get<0>(TNM.sizes()) == get<1>(TNN.sizes())); + assert(get<0>(C.sizes()) == get<1>(hermA.sizes())); + assert(get<1>(C.sizes()) == get<1>(TNM.sizes())); } using ma::T; @@ -233,24 +237,26 @@ Tp MixedDensityMatrixFromConfiguration(const MatA& hermA, TBuffer& WORK, bool compact = true) { + using std::get; + // check dimensions are consistent - int NEL = std::get<1>(B.sizes()); - assert(std::get<1>(hermA.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(hermA.sizes()) == std::get<0>(TAB.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(TAB.sizes())); - assert(std::get<1>(B.sizes()) == std::get<0>(TNN.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(TNN.sizes())); + int NEL = get<1>(B.sizes()); + assert(get<1>(hermA.sizes()) == get<0>(B.sizes())); + assert(get<0>(hermA.sizes()) == get<0>(TAB.sizes())); + assert(get<1>(B.sizes()) == get<1>(TAB.sizes())); + assert(get<1>(B.sizes()) == get<0>(TNN.sizes())); + assert(get<1>(B.sizes()) == get<1>(TNN.sizes())); if (compact) { - assert(std::get<0>(C.sizes()) == std::get<1>(TNN.sizes())); - assert(std::get<1>(C.sizes()) == std::get<0>(B.sizes())); + assert(get<0>(C.sizes()) == get<1>(TNN.sizes())); + assert(get<1>(C.sizes()) == get<0>(B.sizes())); } else { - assert(std::get<1>(TNM.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(TNM.sizes()) == std::get<1>(TNN.sizes())); - assert(std::get<0>(C.sizes()) == std::get<1>(hermA.sizes())); - assert(std::get<1>(C.sizes()) == std::get<1>(TNM.sizes())); + assert(get<1>(TNM.sizes()) == get<0>(B.sizes())); + assert(get<0>(TNM.sizes()) == get<1>(TNN.sizes())); + assert(get<0>(C.sizes()) == get<1>(hermA.sizes())); + assert(get<1>(C.sizes()) == get<1>(TNM.sizes())); } using ma::T; @@ -316,22 +322,24 @@ Tp MixedDensityMatrix_noHerm(const MatA& A, TBuffer& WORK, bool compact = true) { + using std::get; + // check dimensions are consistent - assert(std::get<0>(A.sizes()) == std::get<0>(B.sizes())); - assert(std::get<1>(A.sizes()) == std::get<1>(B.sizes())); - assert(std::get<1>(A.sizes()) == std::get<0>(T1.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(T1.sizes())); + assert(get<0>(A.sizes()) == get<0>(B.sizes())); + assert(get<1>(A.sizes()) == get<1>(B.sizes())); + assert(get<1>(A.sizes()) == get<0>(T1.sizes())); + assert(get<1>(B.sizes()) == get<1>(T1.sizes())); if (compact) { - assert(std::get<0>(C.sizes()) == std::get<1>(T1.sizes())); - assert(std::get<1>(C.sizes()) == std::get<0>(B.sizes())); + assert(get<0>(C.sizes()) == get<1>(T1.sizes())); + assert(get<1>(C.sizes()) == get<0>(B.sizes())); } else { - assert(std::get<1>(T2.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(T2.sizes()) == std::get<1>(T1.sizes())); - assert(std::get<0>(C.sizes()) == std::get<0>(A.sizes())); - assert(std::get<1>(C.sizes()) == std::get<1>(T2.sizes())); + assert(get<1>(T2.sizes()) == get<0>(B.sizes())); + assert(get<0>(T2.sizes()) == get<1>(T1.sizes())); + assert(get<0>(C.sizes()) == get<0>(A.sizes())); + assert(get<1>(C.sizes()) == get<1>(T2.sizes())); } using ma::H; @@ -380,27 +388,29 @@ Tp MixedDensityMatrix_noHerm_wSVD(const MatA& A, TBuffer& WORK, bool compact = true) { + using std::get; + // check dimensions are consistent - assert(std::get<0>(A.sizes()) == std::get<0>(B.sizes())); - assert(std::get<1>(A.sizes()) == std::get<1>(B.sizes())); - assert(std::get<1>(A.sizes()) == std::get<0>(U.sizes())); // [U] = [NxN] - assert(std::get<1>(A.sizes()) == std::get<1>(U.sizes())); - assert(std::get<1>(A.sizes()) == std::get<0>(VT.sizes())); // [V] = [NxN] - assert(std::get<1>(A.sizes()) == std::get<1>(VT.sizes())); - assert(std::get<1>(A.sizes()) <= (6 * S.size() + 1)); // [S] = [N+1] - assert(std::get<1>(A.sizes()) == std::get<0>(UA.sizes())); // [UA] = [NxM] - assert(std::get<0>(A.sizes()) == std::get<1>(UA.sizes())); + assert(get<0>(A.sizes()) == get<0>(B.sizes())); + assert(get<1>(A.sizes()) == get<1>(B.sizes())); + assert(get<1>(A.sizes()) == get<0>(U.sizes())); // [U] = [NxN] + assert(get<1>(A.sizes()) == get<1>(U.sizes())); + assert(get<1>(A.sizes()) == get<0>(VT.sizes())); // [V] = [NxN] + assert(get<1>(A.sizes()) == get<1>(VT.sizes())); + assert(get<1>(A.sizes()) <= (6 * S.size() + 1)); // [S] = [N+1] + assert(get<1>(A.sizes()) == get<0>(UA.sizes())); // [UA] = [NxM] + assert(get<0>(A.sizes()) == get<1>(UA.sizes())); if (compact) { - assert(std::get<0>(C.sizes()) == std::get<1>(B.sizes())); - assert(std::get<1>(C.sizes()) == std::get<0>(B.sizes())); + assert(get<0>(C.sizes()) == get<1>(B.sizes())); + assert(get<1>(C.sizes()) == get<0>(B.sizes())); } else { - assert( std::get<0>(A.sizes()) == std::get<0>(BV.sizes()) ); // [BV] = [MxN] - assert( std::get<1>(A.sizes()) == std::get<1>(BV.sizes()) ); - assert( std::get<0>(C.sizes()) == std::get<0>(A.sizes()) ); - assert( std::get<1>(C.sizes()) == std::get<0>(A.sizes()) ); + assert( get<0>(A.sizes()) == get<0>(BV.sizes()) ); // [BV] = [MxN] + assert( get<1>(A.sizes()) == get<1>(BV.sizes()) ); + assert( get<0>(C.sizes()) == get<0>(A.sizes()) ); + assert( get<1>(C.sizes()) == get<0>(A.sizes()) ); } using std::real; @@ -453,7 +463,7 @@ Tp MixedDensityMatrix_noHerm_wSVD(const MatA& A, // VT = VT * inv(S), which works since S is diagonal and real - term_by_term_matrix_vector(ma::TOp_DIV, 0, std::get<0>(VT.sizes()), std::get<1>(VT.sizes()), ma::pointer_dispatch(VT.origin()), VT.stride(0), + term_by_term_matrix_vector(ma::TOp_DIV, 0, get<0>(VT.sizes()), get<1>(VT.sizes()), ma::pointer_dispatch(VT.origin()), VT.stride(0), ma::pointer_dispatch(S.origin()), 1); // BV = H(VT) * H(U) @@ -473,7 +483,7 @@ Tp MixedDensityMatrix_noHerm_wSVD(const MatA& A, ma::product(B, H(VT), BV); // BV = BV * inv(S), which works since S is diagonal and real - term_by_term_matrix_vector(ma::TOp_DIV, 1, std::get<0>(BV.sizes()), std::get<1>(BV.sizes()), ma::pointer_dispatch(BV.origin()), BV.stride(0), + term_by_term_matrix_vector(ma::TOp_DIV, 1, get<0>(BV.sizes()), get<1>(BV.sizes()), ma::pointer_dispatch(BV.origin()), BV.stride(0), ma::pointer_dispatch(S.origin()), 1); // UA = H(U) * H(A) @@ -505,13 +515,14 @@ Tp Overlap(const MatA& hermA, Buffer&& WORK, bool herm = true) { - int NMO = (herm ? std::get<1>(hermA.sizes()) : std::get<0>(hermA.sizes())); - int NEL = (herm ? std::get<0>(hermA.sizes()) : std::get<1>(hermA.sizes())); + using std::get; + int NMO = (herm ? get<1>(hermA.sizes()) : get<0>(hermA.sizes())); + int NEL = (herm ? get<0>(hermA.sizes()) : get<1>(hermA.sizes())); // check dimensions are consistent - assert(NMO == std::get<0>(B.sizes())); - assert(NEL == std::get<1>(B.sizes())); - assert(NEL == std::get<0>(T1.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(T1.sizes())); + assert(NMO == get<0>(B.sizes())); + assert(NEL == get<1>(B.sizes())); + assert(NEL == get<0>(T1.sizes())); + assert(get<1>(B.sizes()) == get<1>(T1.sizes())); using ma::H; using ma::T; @@ -544,15 +555,17 @@ Tp OverlapForWoodbury(const MatA& hermA, IBuffer& IWORK, TBuffer& WORK) { + using std::get; + // check dimensions are consistent - int NEL = std::get<1>(B.sizes()); - assert(std::get<1>(hermA.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(hermA.sizes()) == std::get<0>(TMN.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(TMN.sizes())); - assert(std::get<1>(B.sizes()) == std::get<0>(TNN.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(TNN.sizes())); - assert(std::get<0>(hermA.sizes()) == std::get<0>(QQ0.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(QQ0.sizes())); + int NEL = get<1>(B.sizes()); + assert(get<1>(hermA.sizes()) == get<0>(B.sizes())); + assert(get<0>(hermA.sizes()) == get<0>(TMN.sizes())); + assert(get<1>(B.sizes()) == get<1>(TMN.sizes())); + assert(get<1>(B.sizes()) == get<0>(TNN.sizes())); + assert(get<1>(B.sizes()) == get<1>(TNN.sizes())); + assert(get<0>(hermA.sizes()) == get<0>(QQ0.sizes())); + assert(get<1>(B.sizes()) == get<1>(QQ0.sizes())); using ma::T; @@ -592,11 +605,12 @@ Tp OverlapForWoodbury(const MatA& hermA, template Tp Overlap_noHerm(const MatA& A, const MatB& B, Tp LogOverlapFactor, Mat&& T1, IBuffer& IWORK, Buffer& WORK) { + using std::get; // check dimensions are consistent - assert(std::get<0>(A.sizes()) == std::get<0>(B.sizes())); - assert(std::get<1>(A.sizes()) == std::get<1>(B.sizes())); - assert(std::get<1>(A.sizes()) == std::get<0>(T1.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(T1.sizes())); + assert(get<0>(A.sizes()) == get<0>(B.sizes())); + assert(get<1>(A.sizes()) == get<1>(B.sizes())); + assert(get<1>(A.sizes()) == get<0>(T1.sizes())); + assert(get<1>(B.sizes()) == get<1>(T1.sizes())); using ma::H; using ma::T; @@ -642,30 +656,32 @@ Tp MixedDensityMatrix(const MatA& hermA, bool compact = true, bool herm = true) { - int NMO = (herm ? std::get<1>(hermA.sizes()) : std::get<0>(hermA.sizes())); - int NEL = (herm ? std::get<0>(hermA.sizes()) : std::get<1>(hermA.sizes())); + using std::get; + int NMO = (herm ? get<1>(hermA.sizes()) : get<0>(hermA.sizes())); + int NEL = (herm ? get<0>(hermA.sizes()) : get<1>(hermA.sizes())); // check dimensions are consistent - assert(NMO == std::get<0>(B.sizes())); - assert(NEL == std::get<1>(B.sizes())); - assert(NEL == std::get<0>(T1.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(T1.sizes())); + using std::get; + assert(NMO == get<0>(B.sizes())); + assert(NEL == get<1>(B.sizes())); + assert(NEL == get<0>(T1.sizes())); + assert(get<1>(B.sizes()) == get<1>(T1.sizes())); if (compact) { - assert(std::get<0>(C.sizes()) == std::get<1>(T1.sizes())); - assert(std::get<1>(C.sizes()) == std::get<0>(B.sizes())); + assert(get<0>(C.sizes()) == get<1>(T1.sizes())); + assert(get<1>(C.sizes()) == get<0>(B.sizes())); } else { - assert(std::get<1>(T2.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(T2.sizes()) == std::get<1>(T1.sizes())); - assert(std::get<0>(C.sizes()) == NMO); - assert(std::get<1>(C.sizes()) == std::get<1>(T2.sizes())); + assert(get<1>(T2.sizes()) == get<0>(B.sizes())); + assert(get<0>(T2.sizes()) == get<1>(T1.sizes())); + assert(get<0>(C.sizes()) == NMO); + assert(get<1>(C.sizes()) == get<1>(T2.sizes())); } using ma::H; using ma::T; - int N0, Nn, sz = std::get<1>(B.sizes()); + int N0, Nn, sz = get<1>(B.sizes()); std::tie(N0, Nn) = FairDivideBoundary(comm.rank(), sz, comm.size()); // T(B)*conj(A) @@ -718,7 +734,7 @@ Tp MixedDensityMatrix(const MatA& hermA, comm.barrier(); - sz = std::get<1>(T2.sizes()); + sz = get<1>(T2.sizes()); std::tie(N0, Nn) = FairDivideBoundary(comm.rank(), sz, comm.size()); // C = conj(A) * T2 @@ -734,7 +750,7 @@ Tp MixedDensityMatrix(const MatA& hermA, comm.barrier(); - sz = std::get<1>(T2.sizes()); + sz = get<1>(T2.sizes()); std::tie(N0, Nn) = FairDivideBoundary(comm.rank(), sz, comm.size()); // C = T( B * T2) = T(T2) * T(B) @@ -768,18 +784,20 @@ Tp Overlap(const MatA& hermA, communicator& comm, bool herm = true) { - int NMO = (herm ? std::get<1>(hermA.sizes()) : std::get<0>(hermA.sizes())); - int NEL = (herm ? std::get<0>(hermA.sizes()) : std::get<1>(hermA.sizes())); + using std::get; + + int NMO = (herm ? get<1>(hermA.sizes()) : get<0>(hermA.sizes())); + int NEL = (herm ? get<0>(hermA.sizes()) : get<1>(hermA.sizes())); // check dimensions are consistent - assert(NMO == std::get<0>(B.sizes())); - assert(NEL == std::get<1>(B.sizes())); - assert(NEL == std::get<0>(T1.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(T1.sizes())); + assert(NMO == get<0>(B.sizes())); + assert(NEL == get<1>(B.sizes())); + assert(NEL == get<0>(T1.sizes())); + assert(get<1>(B.sizes()) == get<1>(T1.sizes())); using ma::H; using ma::T; - int N0, Nn, sz = std::get<1>(B.sizes()); + int N0, Nn, sz = get<1>(B.sizes()); std::tie(N0, Nn) = FairDivideBoundary(comm.rank(), sz, comm.size()); // T(B)*conj(A) @@ -822,19 +840,20 @@ Tp OverlapForWoodbury(const MatA& hermA, TBuffer& WORK, communicator& comm) { + using std::get; // check dimensions are consistent - int NEL = std::get<1>(B.sizes()); - assert(std::get<1>(hermA.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(hermA.sizes()) == std::get<0>(TMN.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(TMN.sizes())); - assert(std::get<1>(B.sizes()) == std::get<0>(TNN.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(TNN.sizes())); - assert(std::get<0>(hermA.sizes()) == std::get<0>(QQ0.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(QQ0.sizes())); + int NEL = get<1>(B.sizes()); + assert(get<1>(hermA.sizes()) == get<0>(B.sizes())); + assert(get<0>(hermA.sizes()) == get<0>(TMN.sizes())); + assert(get<1>(B.sizes()) == get<1>(TMN.sizes())); + assert(get<1>(B.sizes()) == get<0>(TNN.sizes())); + assert(get<1>(B.sizes()) == get<1>(TNN.sizes())); + assert(get<0>(hermA.sizes()) == get<0>(QQ0.sizes())); + assert(get<1>(B.sizes()) == get<1>(QQ0.sizes())); using ma::T; - int N0, Nn, sz = std::get<1>(B.sizes()); + int N0, Nn, sz = get<1>(B.sizes()); std::tie(N0, Nn) = FairDivideBoundary(comm.rank(), sz, comm.size()); Tp ovlp; @@ -887,26 +906,28 @@ Tp MixedDensityMatrixForWoodbury(const MatA& hermA, communicator& comm, bool compact = true) { - // check dimensions are consistent - int NEL = std::get<1>(B.sizes()); - assert(std::get<1>(hermA.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(hermA.sizes()) == std::get<0>(TAB.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(TAB.sizes())); - assert(std::get<1>(B.sizes()) == std::get<0>(TNN.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(TNN.sizes())); - assert(std::get<0>(hermA.sizes()) == std::get<0>(QQ0.sizes())); - assert(std::get<1>(B.sizes()) == std::get<1>(QQ0.sizes())); + using std::get; + + // check dimensions are consistent + int NEL = get<1>(B.sizes()); + assert(get<1>(hermA.sizes()) == get<0>(B.sizes())); + assert(get<0>(hermA.sizes()) == get<0>(TAB.sizes())); + assert(get<1>(B.sizes()) == get<1>(TAB.sizes())); + assert(get<1>(B.sizes()) == get<0>(TNN.sizes())); + assert(get<1>(B.sizes()) == get<1>(TNN.sizes())); + assert(get<0>(hermA.sizes()) == get<0>(QQ0.sizes())); + assert(get<1>(B.sizes()) == get<1>(QQ0.sizes())); if (compact) { - assert(std::get<0>(C.sizes()) == std::get<1>(TNN.sizes())); - assert(std::get<1>(C.sizes()) == std::get<0>(B.sizes())); + assert(get<0>(C.sizes()) == get<1>(TNN.sizes())); + assert(get<1>(C.sizes()) == get<0>(B.sizes())); } else { - assert(std::get<1>(TNM.sizes()) == std::get<0>(B.sizes())); - assert(std::get<0>(TNM.sizes()) == std::get<1>(TNN.sizes())); - assert(std::get<0>(C.sizes()) == std::get<1>(hermA.sizes())); - assert(std::get<1>(C.sizes()) == std::get<1>(TNM.sizes())); + assert(get<1>(TNM.sizes()) == get<0>(B.sizes())); + assert(get<0>(TNM.sizes()) == get<1>(TNN.sizes())); + assert(get<0>(C.sizes()) == get<1>(hermA.sizes())); + assert(get<1>(C.sizes()) == get<1>(TNM.sizes())); } using ma::T; @@ -933,7 +954,7 @@ Tp MixedDensityMatrixForWoodbury(const MatA& hermA, comm.broadcast_n(&ovlp, 1, 0); int P0, Pn; - std::tie(P0, Pn) = FairDivideBoundary(comm.rank(), int(std::get<0>(TAB.sizes())), comm.size()); + std::tie(P0, Pn) = FairDivideBoundary(comm.rank(), int(get<0>(TAB.sizes())), comm.size()); // QQ0 = TAB * inv(TNN) if (P0 != Pn) @@ -951,7 +972,7 @@ Tp MixedDensityMatrixForWoodbury(const MatA& hermA, if (N0 != Nn) ma::product(T(TNN(TNN.extension(0), {N0, Nn})), T(B), TNM.sliced(N0, Nn)); - int sz = std::get<1>(TNM.sizes()); + int sz = get<1>(TNM.sizes()); std::tie(N0, Nn) = FairDivideBoundary(comm.rank(), sz, comm.size()); comm.barrier(); @@ -992,26 +1013,27 @@ void MixedDensityMatrix(std::vector& hermA, using ma::H; using ma::T; + using std::get; int nbatch = Bi.size(); - int NMO = (herm ? std::get<1>((*hermA[0]).sizes()) : std::get<0>((*hermA[0]).sizes())); - int NEL = (herm ? std::get<0>((*hermA[0]).sizes()) : std::get<1>((*hermA[0]).sizes())); + int NMO = (herm ? get<1>((*hermA[0]).sizes()) : get<0>((*hermA[0]).sizes())); + int NEL = (herm ? get<0>((*hermA[0]).sizes()) : get<1>((*hermA[0]).sizes())); - assert(std::get<0>((*Bi[0]).sizes()) == NMO); - assert(std::get<1>((*Bi[0]).sizes()) == NEL); + assert(get<0>((*Bi[0]).sizes()) == NMO); + assert(get<1>((*Bi[0]).sizes()) == NEL); assert(C.size() == nbatch); - assert(std::get<2>(C.sizes()) == NMO); + assert(get<2>(C.sizes()) == NMO); if (compact) - assert(std::get<1>(C.sizes()) == NEL); + assert(get<1>(C.sizes()) == NEL); else - assert(std::get<1>(C.sizes()) == NMO); + assert(get<1>(C.sizes()) == NMO); assert(ovlp.size() == nbatch); - assert(std::get<1>(TNN3D.sizes()) == NEL); - assert(std::get<2>(TNN3D.sizes()) == NEL); + assert(get<1>(TNN3D.sizes()) == NEL); + assert(get<2>(TNN3D.sizes()) == NEL); if (not compact) { - assert(std::get<0>(TNM3D.sizes()) == nbatch); - assert(std::get<1>(TNM3D.sizes()) == NEL); - assert(std::get<2>(TNM3D.sizes()) == NMO); + assert(get<0>(TNM3D.sizes()) == nbatch); + assert(get<1>(TNM3D.sizes()) == NEL); + assert(get<2>(TNM3D.sizes()) == NMO); } assert(IWORK.num_elements() >= nbatch * (NEL + 1)); assert(TNN3D.stride(1) == NEL); // needed by getriBatched @@ -1148,26 +1170,27 @@ void DensityMatrices(std::vector const& Left, using ma::H; using ma::T; + using std::get; int nbatch = Right.size(); - int NMO = (herm ? std::get<1>((*Left[0]).sizes()) : std::get<0>((*Left[0]).sizes())); - int NEL = (herm ? std::get<0>((*Left[0]).sizes()) : std::get<1>((*Left[0]).sizes())); + int NMO = (herm ? get<1>((*Left[0]).sizes()) : get<0>((*Left[0]).sizes())); + int NEL = (herm ? get<0>((*Left[0]).sizes()) : get<1>((*Left[0]).sizes())); - assert(std::get<0>((*Right[0]).sizes()) == NMO); - assert(std::get<1>((*Right[0]).sizes()) == NEL); + assert(get<0>((*Right[0]).sizes()) == NMO); + assert(get<1>((*Right[0]).sizes()) == NEL); assert(G.size() == nbatch); - assert(std::get<1>((*G[0]).sizes()) == NMO); + assert(get<1>((*G[0]).sizes()) == NMO); if (compact) assert((*G[0]).size() == NEL); else assert((*G[0]).size() == NMO); assert(ovlp.size() == nbatch); - assert(std::get<1>(TNN3D.sizes()) == NEL); - assert(std::get<2>(TNN3D.sizes()) == NEL); + assert(get<1>(TNN3D.sizes()) == NEL); + assert(get<2>(TNN3D.sizes()) == NEL); if (not compact) { - assert(std::get<0>(TNM3D.sizes()) == nbatch); - assert(std::get<1>(TNM3D.sizes()) == NEL); - assert(std::get<2>(TNM3D.sizes()) == NMO); + assert(get<0>(TNM3D.sizes()) == nbatch); + assert(get<1>(TNM3D.sizes()) == NEL); + assert(get<2>(TNM3D.sizes()) == NMO); } assert(IWORK.num_elements() >= nbatch * (NEL + 1)); @@ -1273,16 +1296,18 @@ void Overlap(std::vector& hermA, using ma::H; using ma::T; + using std::get; + int nbatch = Bi.size(); assert(hermA.size() >= nbatch); - int NMO = (herm ? std::get<1>((*hermA[0]).sizes()) : std::get<0>((*hermA[0]).sizes())); - int NEL = (herm ? std::get<0>((*hermA[0]).sizes()) : std::get<1>((*hermA[0]).sizes())); + int NMO = (herm ? get<1>((*hermA[0]).sizes()) : get<0>((*hermA[0]).sizes())); + int NEL = (herm ? get<0>((*hermA[0]).sizes()) : get<1>((*hermA[0]).sizes())); - assert(std::get<0>((*Bi[0]).sizes()) == NMO); - assert(std::get<1>((*Bi[0]).sizes()) == NEL); + assert(get<0>((*Bi[0]).sizes()) == NMO); + assert(get<1>((*Bi[0]).sizes()) == NEL); assert(ovlp.size() == nbatch); - assert(std::get<1>(TNN3D.sizes()) == NEL); - assert(std::get<2>(TNN3D.sizes()) == NEL); + assert(get<1>(TNN3D.sizes()) == NEL); + assert(get<2>(TNN3D.sizes()) == NEL); assert(IWORK.num_elements() >= nbatch * (NEL + 1)); using pointer = typename std::decay::type::element_ptr; diff --git a/src/AFQMC/SlaterDeterminantOperations/rotate.hpp b/src/AFQMC/SlaterDeterminantOperations/rotate.hpp index 0d32328ee8..3e6fa9c90e 100644 --- a/src/AFQMC/SlaterDeterminantOperations/rotate.hpp +++ b/src/AFQMC/SlaterDeterminantOperations/rotate.hpp @@ -73,15 +73,16 @@ void halfRotateCholeskyMatrix(WALKER_TYPES type, double cutoff = 1e-6, bool reserve_to_fit_ = true) { + using std::get; // for C++17 compaitbility int NAEA = Alpha->size(0); int NAEB = Alpha->size(0); int NMO = Alpha->size(1); if (type == COLLINEAR) NAEB = Beta->size(0); - int nvec = std::get<1>(CholMat.sizes()); + int nvec = get<1>(CholMat.sizes()); int ncores = TG.getTotalCores(), coreid = TG.getCoreID(); - assert(std::get<0>(CholMat.sizes()) == NMO * NMO); + assert(get<0>(CholMat.sizes()) == NMO * NMO); assert(kN > k0); if (type == CLOSED && kN > NMO) APP_ABORT(" Error: kN > NMO in halfRotateCholeskyMatrix. \n"); @@ -296,18 +297,20 @@ SpCType_shm_csr_matrix halfRotateCholeskyMatrixForBias(WALKER_TYPES type, SpVType_shm_csr_matrix const& CholMat, double cutoff = 1e-6) { + using std::get; + int NAEA = Alpha->size(0); int NAEB = Alpha->size(0); int NMO = Alpha->size(1); if (type != CLOSED) NAEB = Beta->size(0); - int nvec = std::get<1>(CholMat.sizes()); + int nvec = get<1>(CholMat.sizes()); int ncores = TG.getTotalCores(), coreid = TG.getCoreID(); // to speed up, generate new communicator for eqv_nodes and split full work among all // cores in this comm. Then build from distributed container? - assert(std::get<0>(CholMat.sizes()) == NMO * NMO); + assert(get<0>(CholMat.sizes()) == NMO * NMO); std::size_t Qdim = NAEA * NMO; if (type == COLLINEAR) @@ -471,15 +474,17 @@ void halfRotateCholeskyMatrix(WALKER_TYPES type, bool conjV = false, double cutoff = 1e-6) { + using std::get; + int NAEA = Alpha->size(0); int NAEB = 0; int NMO = Alpha->size(1); if (type == COLLINEAR) NAEB = Beta->size(0); - int nvec = std::get<1>(CholMat.sizes()); + int nvec = get<1>(CholMat.sizes()); int ncores = TG.getTotalCores(), coreid = TG.getCoreID(); - assert(std::get<0>(CholMat.sizes()) == NMO * NMO); + assert(get<0>(CholMat.sizes()) == NMO * NMO); if (type == CLOSED && kN > NMO) APP_ABORT(" Error: kN > NMO in halfRotateCholeskyMatrix. \n"); @@ -500,13 +505,13 @@ void halfRotateCholeskyMatrix(WALKER_TYPES type, int Qdim = NAEA * (kN_alpha - k0_alpha) + NAEB * (kN_beta - k0_beta); if (transpose) { - assert(std::get<0>(Q.sizes()) == nvec); - assert(std::get<1>(Q.sizes()) == Qdim); + assert(get<0>(Q.sizes()) == nvec); + assert(get<1>(Q.sizes()) == Qdim); } else { - assert(std::get<0>(Q.sizes()) == Qdim); - assert(std::get<1>(Q.sizes()) == nvec); + assert(get<0>(Q.sizes()) == Qdim); + assert(get<1>(Q.sizes()) == nvec); } std::tie(ak0, ak1) = FairDivideBoundary(coreid, Qdim, ncores); @@ -623,21 +628,23 @@ void getLank(MultiArray2DA&& Aai, MultiArray2D&& buff, bool noncollinear = false) { + using std::get; + int npol = noncollinear ? 2 : 1; - int na = std::get<0>(Aai.sizes()); + int na = get<0>(Aai.sizes()); if (na == 0) return; - int ni = std::get<1>(Aai.sizes()) / npol; - int nk = std::get<1>(Likn.sizes()); - int nchol = std::get<2>(Likn.sizes()); - assert(std::get<0>(Likn.sizes()) == ni); - assert(std::get<0>(Lank.sizes()) == na); - assert(std::get<1>(Lank.sizes()) == nchol); - assert(std::get<2>(Lank.sizes()) == nk * npol); - assert(std::get<0>(buff.sizes()) >= npol * nk); - assert(std::get<1>(buff.sizes()) >= nchol); + int ni = get<1>(Aai.sizes()) / npol; + int nk = get<1>(Likn.sizes()); + int nchol = get<2>(Likn.sizes()); + assert(get<0>(Likn.sizes()) == ni); + assert(get<0>(Lank.sizes()) == na); + assert(get<1>(Lank.sizes()) == nchol); + assert(get<2>(Lank.sizes()) == nk * npol); + assert(get<0>(buff.sizes()) >= npol * nk); + assert(get<1>(buff.sizes()) >= nchol); if (noncollinear) - assert(Aai.stride(0) == std::get<1>(Aai.sizes())); // make sure it is contiguous + assert(Aai.stride(0) == get<1>(Aai.sizes())); // make sure it is contiguous using elementA = typename std::decay::type::element; using element = typename std::decay::type::element; @@ -670,20 +677,22 @@ void getLank_from_Lkin(MultiArray2DA&& Aai, MultiArray2D&& buff, bool noncollinear = false) { + using std::get; // for C++17 compatibility + int npol = noncollinear ? 2 : 1; - int na = std::get<0>(Aai.sizes()); + int na = get<0>(Aai.sizes()); if (na == 0) return; - int ni = std::get<1>(Aai.sizes()) / npol; - int nk = std::get<0>(Lkin.sizes()); - int nchol = std::get<2>(Lkin.sizes()); - assert(std::get<1>(Lkin.sizes()) == ni); - assert(std::get<0>(Lank.sizes()) == na); - assert(std::get<1>(Lank.sizes()) == nchol); - assert(std::get<2>(Lank.sizes()) == nk * npol); + int ni = get<1>(Aai.sizes()) / npol; + int nk = get<0>(Lkin.sizes()); + int nchol = get<2>(Lkin.sizes()); + assert(get<1>(Lkin.sizes()) == ni); + assert(get<0>(Lank.sizes()) == na); + assert(get<1>(Lank.sizes()) == nchol); + assert(get<2>(Lank.sizes()) == nk * npol); assert(buff.num_elements() >= na * npol * nchol); if (noncollinear) - assert(Aai.stride(0) == std::get<1>(Aai.sizes())); // make sure it is contiguous + assert(Aai.stride(0) == get<1>(Aai.sizes())); // make sure it is contiguous using Type = typename std::decay::type::element; using elementA = typename std::decay::type::element; @@ -713,26 +722,28 @@ void getLakn_Lank(MultiArray2DA&& Aai, MultiArray3DC&& Lank, bool noncollinear = false) { + using std::get; // for C++17 compatibility + int npol = noncollinear ? 2 : 1; - int na = std::get<0>(Aai.sizes()); + int na = get<0>(Aai.sizes()); if (na == 0) return; - int ni = std::get<1>(Aai.sizes()) / npol; + int ni = get<1>(Aai.sizes()) / npol; - int nmo = std::get<0>(Likn.sizes()); - int nchol = std::get<2>(Likn.sizes()); - assert(std::get<1>(Likn.sizes()) == nmo); + int nmo = get<0>(Likn.sizes()); + int nchol = get<2>(Likn.sizes()); + assert(get<1>(Likn.sizes()) == nmo); - assert(std::get<1>(Lakn.sizes()) == npol * nmo); - assert(std::get<2>(Lakn.sizes()) == nchol); + assert(get<1>(Lakn.sizes()) == npol * nmo); + assert(get<2>(Lakn.sizes()) == nchol); - assert(std::get<0>(Lakn.sizes()) >= na); - assert(std::get<0>(Lakn.sizes()) == std::get<0>(Lank.sizes())); - assert(std::get<1>(Lank.sizes()) == nchol); - assert(std::get<2>(Lank.sizes()) == npol * nmo); + assert(get<0>(Lakn.sizes()) >= na); + assert(get<0>(Lakn.sizes()) == get<0>(Lank.sizes())); + assert(get<1>(Lank.sizes()) == nchol); + assert(get<2>(Lank.sizes()) == npol * nmo); if (noncollinear) - assert(Aai.stride(0) == std::get<1>(Aai.sizes())); // make sure it is contiguous + assert(Aai.stride(0) == get<1>(Aai.sizes())); // make sure it is contiguous using elmA = typename std::decay::type::element; using elmB = typename std::decay::type::element; @@ -755,26 +766,28 @@ void getLakn_Lank_from_Lkin(MultiArray2DA&& Aai, MultiArray2D&& buff, bool noncollinear = false) { + using std::get; // for C++17 compatibility + int npol = noncollinear ? 2 : 1; - int na = std::get<0>(Aai.sizes()); + int na = get<0>(Aai.sizes()); if (na == 0) return; - int ni = std::get<1>(Aai.sizes()) / npol; + int ni = get<1>(Aai.sizes()) / npol; - int nmo = std::get<0>(Lkin.sizes()); - int nchol = std::get<2>(Lkin.sizes()); - assert(std::get<1>(Lkin.sizes()) == nmo); + int nmo = get<0>(Lkin.sizes()); + int nchol = get<2>(Lkin.sizes()); + assert(get<1>(Lkin.sizes()) == nmo); - assert(std::get<1>(Lakn.sizes()) == npol * nmo); - assert(std::get<2>(Lakn.sizes()) == nchol); + assert(get<1>(Lakn.sizes()) == npol * nmo); + assert(get<2>(Lakn.sizes()) == nchol); - assert(std::get<0>(Lakn.sizes()) >= na); - assert(std::get<0>(Lakn.sizes()) == std::get<0>(Lank.sizes())); - assert(std::get<1>(Lank.sizes()) == nchol); - assert(std::get<2>(Lank.sizes()) == npol * nmo); + assert(get<0>(Lakn.sizes()) >= na); + assert(get<0>(Lakn.sizes()) == get<0>(Lank.sizes())); + assert(get<1>(Lank.sizes()) == nchol); + assert(get<2>(Lank.sizes()) == npol * nmo); if (noncollinear) - assert(Aai.stride(0) == std::get<1>(Aai.sizes())); // make sure it is contiguous + assert(Aai.stride(0) == get<1>(Aai.sizes())); // make sure it is contiguous assert(buff.num_elements() >= na * npol * nchol); diff --git a/src/AFQMC/SlaterDeterminantOperations/tests/test_sdet_ops.cpp b/src/AFQMC/SlaterDeterminantOperations/tests/test_sdet_ops.cpp index 7ea884d817..aa16da73f0 100644 --- a/src/AFQMC/SlaterDeterminantOperations/tests/test_sdet_ops.cpp +++ b/src/AFQMC/SlaterDeterminantOperations/tests/test_sdet_ops.cpp @@ -62,10 +62,11 @@ void myCHECK(const std::complex& a, const std::complex& b) template void check(M1&& A, M2& B) { - REQUIRE(std::get<0>(A.sizes()) == std::get<0>(B.sizes())); - REQUIRE(std::get<1>(A.sizes()) == std::get<1>(B.sizes())); - for (int i = 0; i < std::get<0>(A.sizes()); i++) - for (int j = 0; j < std::get<1>(A.sizes()); j++) + using std::get; + REQUIRE(get<0>(A.sizes()) == get<0>(B.sizes())); + REQUIRE(get<1>(A.sizes()) == get<1>(B.sizes())); + for (int i = 0; i < get<0>(A.sizes()); i++) + for (int j = 0; j < get<1>(A.sizes()); j++) myCHECK(A[i][j], B[i][j]); } @@ -663,12 +664,13 @@ TEST_CASE("SDetOps_complex_mpi3", "[sdet_ops]") array A({NEL, NMO}); array B({NMO, NEL}); - for (int i = 0, k = 0; i < std::get<0>(A.sizes()); i++) - for (int j = 0; j < std::get<1>(A.sizes()); j++, k++) + using std::get; + for (int i = 0, k = 0; i < get<0>(A.sizes()); i++) + for (int j = 0; j < get<1>(A.sizes()); j++, k++) A[i][j] = m_a[k]; - for (int i = 0, k = 0; i < std::get<0>(B.sizes()); i++) - for (int j = 0; j < std::get<1>(B.sizes()); j++, k++) + for (int i = 0, k = 0; i < get<0>(B.sizes()); i++) + for (int j = 0; j < get<1>(B.sizes()); j++, k++) B[i][j] = m_b[k]; array_ref Aref(m_a.data(), {NEL, NMO}); @@ -836,12 +838,13 @@ TEST_CASE("SDetOps_complex_csr", "[sdet_ops]") array A({NMO, NEL}); // Will be transposed when Acsr is built array B({NMO, NEL}); - for (int i = 0, k = 0; i < std::get<0>(A.sizes()); i++) - for (int j = 0; j < std::get<1>(A.sizes()); j++, k++) + using std::get; + for (int i = 0, k = 0; i < get<0>(A.sizes()); i++) + for (int j = 0; j < get<1>(A.sizes()); j++, k++) A[i][j] = m_a[k]; - for (int i = 0, k = 0; i < std::get<0>(B.sizes()); i++) - for (int j = 0; j < std::get<1>(B.sizes()); j++, k++) + for (int i = 0, k = 0; i < get<0>(B.sizes()); i++) + for (int j = 0; j < get<1>(B.sizes()); j++, k++) B[i][j] = m_b[k]; boost::multi::array_ref Bref(m_b.data(), {NMO, NEL}); diff --git a/src/AFQMC/Walkers/WalkerControl.hpp b/src/AFQMC/Walkers/WalkerControl.hpp index 143a6f7cbb..627b4ef363 100644 --- a/src/AFQMC/Walkers/WalkerControl.hpp +++ b/src/AFQMC/Walkers/WalkerControl.hpp @@ -45,12 +45,14 @@ inline int swapWalkersSimple(WlkBucket& wset, IVec& NewNumPerNode, communicator& comm) { + using std::get; + int wlk_size = wset.single_walker_size() + wset.single_walker_bp_size(); int NumContexts, MyContext; NumContexts = comm.size(); MyContext = comm.rank(); static_assert(std::decay::type::dimensionality == 2, "Wrong dimensionality"); - if (wlk_size != std::get<1>(Wexcess.sizes())) + if (wlk_size != get<1>(Wexcess.sizes())) throw std::runtime_error("Array dimension error in swapWalkersSimple()."); if (1 != Wexcess.stride(1)) throw std::runtime_error("Array shape error in swapWalkersSimple()."); @@ -78,7 +80,7 @@ inline int swapWalkersSimple(WlkBucket& wset, int nsend = 0; if (deltaN <= 0 && wset.size() != CurrNumPerNode[MyContext]) throw std::runtime_error("error in swapWalkersSimple()."); - if (deltaN > 0 && (wset.size() != NewNumPerNode[MyContext] || int(std::get<0>(Wexcess.sizes())) != deltaN)) + if (deltaN > 0 && (wset.size() != NewNumPerNode[MyContext] || int(get<0>(Wexcess.sizes())) != deltaN)) throw std::runtime_error("error in swapWalkersSimple()."); std::vector buff; if (deltaN < 0) @@ -116,10 +118,12 @@ inline int swapWalkersAsync(WlkBucket& wset, int NumContexts, MyContext; NumContexts = comm.size(); MyContext = comm.rank(); + + using std::get; static_assert(std::decay::type::dimensionality == 2, "Wrong dimensionality"); - if (wlk_size != std::get<1>(Wexcess.sizes())) + if (wlk_size != get<1>(Wexcess.sizes())) throw std::runtime_error("Array dimension error in swapWalkersAsync()."); - if (1 != Wexcess.stride(1) || (std::get<0>(Wexcess.sizes()) > 0 && std::get<1>(Wexcess.sizes()) != Wexcess.stride(0))) + if (1 != Wexcess.stride(1) || (get<0>(Wexcess.sizes()) > 0 && get<1>(Wexcess.sizes()) != Wexcess.stride(0))) throw std::runtime_error("Array shape error in swapWalkersAsync()."); if (CurrNumPerNode.size() < NumContexts || NewNumPerNode.size() < NumContexts) throw std::runtime_error("Array dimension error in swapWalkersAsync()."); @@ -146,7 +150,7 @@ inline int swapWalkersAsync(WlkBucket& wset, int countSend = 1; if (deltaN <= 0 && wset.size() != CurrNumPerNode[MyContext]) throw std::runtime_error("error(1) in swapWalkersAsync()."); - if (deltaN > 0 && (wset.size() != NewNumPerNode[MyContext] || int(std::get<0>(Wexcess.sizes())) != deltaN)) + if (deltaN > 0 && (wset.size() != NewNumPerNode[MyContext] || int(get<0>(Wexcess.sizes())) != deltaN)) throw std::runtime_error("error(2) in swapWalkersAsync()."); std::vector buffers; std::vector requests; @@ -161,7 +165,7 @@ inline int swapWalkersAsync(WlkBucket& wset, } else { - requests.emplace_back(comm.isend(Wexcess[nsend].origin(), Wexcess[nsend].origin() + countSend * std::get<1>(Wexcess.sizes()), + requests.emplace_back(comm.isend(Wexcess[nsend].origin(), Wexcess[nsend].origin() + countSend * get<1>(Wexcess.sizes()), minus[ic], plus[ic] + 1999)); nsend += countSend; countSend = 1; diff --git a/src/AFQMC/Walkers/WalkerIO.hpp b/src/AFQMC/Walkers/WalkerIO.hpp index 3b94a00a63..d36ba79fe2 100644 --- a/src/AFQMC/Walkers/WalkerIO.hpp +++ b/src/AFQMC/Walkers/WalkerIO.hpp @@ -373,13 +373,15 @@ bool dumpToHDF5(WalkerSet& wset, hdf_archive& dump) displ.reextent({TG.TG_heads().size()}); wlk_per_blk.reserve(nblks); + using std::get; + int NMO, NAEA, NAEB = 0; { // to limit the scope auto w = wset[0]; - NMO = std::get<0>((*w.SlaterMatrix(Alpha)).sizes()); - NAEA = std::get<1>((*w.SlaterMatrix(Alpha)).sizes()); + NMO = get<0>((*w.SlaterMatrix(Alpha)).sizes()); + NAEA = get<1>((*w.SlaterMatrix(Alpha)).sizes()); if (walker_type == COLLINEAR) - NAEB = std::get<1>((*w.SlaterMatrix(Beta)).sizes()); + NAEB = get<1>((*w.SlaterMatrix(Beta)).sizes()); if (walker_type == NONCOLLINEAR) NMO /= 2; } diff --git a/src/AFQMC/Walkers/WalkerSetBase.h b/src/AFQMC/Walkers/WalkerSetBase.h index 3b55143765..ff11ff1383 100644 --- a/src/AFQMC/Walkers/WalkerSetBase.h +++ b/src/AFQMC/Walkers/WalkerSetBase.h @@ -132,7 +132,7 @@ class WalkerSetBase : public AFQMCInfo /* * Returns the maximum number of walkers in the set that can be stored without reallocation. */ - int capacity() const { return int(std::get<0>(walker_buffer.sizes())); } + int capacity() const { using std::get; return int(get<0>(walker_buffer.sizes())); } /* * Returns the maximum number of fields in the set that can be stored without reallocation. @@ -167,7 +167,8 @@ class WalkerSetBase : public AFQMCInfo */ iterator begin() { - assert(std::get<1>(walker_buffer.sizes()) == walker_size); + using std::get; + assert(get<1>(walker_buffer.sizes()) == walker_size); return iterator(0, boost::multi::static_array_cast(walker_buffer), data_displ, wlk_desc); } @@ -176,17 +177,18 @@ class WalkerSetBase : public AFQMCInfo */ const_iterator begin() const { - assert(std::get<1>(walker_buffer.sizes()) == walker_size); + using std::get; + assert(get<1>(walker_buffer.sizes()) == walker_size); return const_iterator(0, boost::multi::static_array_cast(walker_buffer), data_displ, wlk_desc); } - /* * Returns iterator to the past-the-end walker in the set */ iterator end() { - assert(std::get<1>(walker_buffer.sizes()) == walker_size); + using std::get; + assert(get<1>(walker_buffer.sizes()) == walker_size); return iterator(tot_num_walkers, boost::multi::static_array_cast(walker_buffer), data_displ, wlk_desc); } @@ -196,9 +198,10 @@ class WalkerSetBase : public AFQMCInfo */ reference operator[](int i) { + using std::get; if (i < 0 || i > tot_num_walkers) APP_ABORT("error: index out of bounds.\n"); - assert(std::get<1>(walker_buffer.sizes()) == walker_size); + assert(get<1>(walker_buffer.sizes()) == walker_size); return reference(boost::multi::static_array_cast(walker_buffer)[i], data_displ, wlk_desc); } @@ -209,7 +212,8 @@ class WalkerSetBase : public AFQMCInfo { if (i < 0 || i > tot_num_walkers) APP_ABORT("error: index out of bounds.\n"); - assert(std::get<1>(walker_buffer.sizes()) == walker_size); + using std::get; + assert(get<1>(walker_buffer.sizes()) == walker_size); return const_reference(boost::multi::static_array_cast(walker_buffer.const_array_cast())[i], data_displ, wlk_desc); } @@ -244,12 +248,13 @@ class WalkerSetBase : public AFQMCInfo template void resize(int n, MatA&& A, MatB&& B) { - assert(std::get<0>(A.sizes()) == wlk_desc[0]); - assert(std::get<1>(A.sizes()) == wlk_desc[1]); + using std::get; + assert(get<0>(A.sizes()) == wlk_desc[0]); + assert(get<1>(A.sizes()) == wlk_desc[1]); if (walkerType == COLLINEAR) { - assert(std::get<0>(B.sizes()) == wlk_desc[0]); - assert(std::get<1>(B.sizes()) == wlk_desc[2]); + assert(get<0>(B.sizes()) == wlk_desc[0]); + assert(get<1>(B.sizes()) == wlk_desc[2]); } reserve(n); if (n > tot_num_walkers) @@ -297,9 +302,11 @@ class WalkerSetBase : public AFQMCInfo void resize_bp(int nbp, int nCV, int nref) { - assert(std::get<1>(walker_buffer.sizes()) == walker_size); + using std::get; + + assert(get<1>(walker_buffer.sizes()) == walker_size); assert(bp_buffer.size() == bp_walker_size); - assert(walker_buffer.size() == std::get<1>(bp_buffer.sizes())); + assert(walker_buffer.size() == get<1>(bp_buffer.sizes())); // wlk_descriptor: {nmo, naea, naeb, nback_prop, nCV, nRefs, nHist} wlk_desc[3] = nbp; wlk_desc[4] = nCV; @@ -333,12 +340,12 @@ class WalkerSetBase : public AFQMCInfo data_displ[WEIGHT_HISTORY] = cnt; cnt += wlk_desc[6]; bp_walker_size = cnt; - if (std::get<0>(bp_buffer.sizes()) != bp_walker_size) + if (get<0>(bp_buffer.sizes()) != bp_walker_size) { - bp_buffer.reextent({bp_walker_size, std::get<0>(walker_buffer.sizes())}); + bp_buffer.reextent({bp_walker_size, get<0>(walker_buffer.sizes())}); using std::fill_n; - fill_n(bp_buffer.origin() + data_displ[WEIGHT_FAC] * std::get<1>(bp_buffer.sizes()), - wlk_desc[6] * std::get<1>(bp_buffer.sizes()), bp_element(1.0)); + fill_n(bp_buffer.origin() + data_displ[WEIGHT_FAC] * get<1>(bp_buffer.sizes()), + wlk_desc[6] * get<1>(bp_buffer.sizes()), bp_element(1.0)); } if (nbp > 0 && (data_displ[SMN] < 0 || data_displ[SM_AUX] < 0)) { @@ -347,7 +354,7 @@ class WalkerSetBase : public AFQMCInfo walker_size += nrow * ncol; data_displ[SM_AUX] = walker_size; walker_size += nrow * ncol; - CMatrix wb({std::get<0>(walker_buffer.sizes()), walker_size}, walker_buffer.get_allocator()); + CMatrix wb({get<0>(walker_buffer.sizes()), walker_size}, walker_buffer.get_allocator()); ma::copy(walker_buffer, wb(wb.extension(0), {0, sz})); walker_buffer = std::move(wb); } @@ -363,8 +370,10 @@ class WalkerSetBase : public AFQMCInfo int GlobalPopulation() const { + using std::get; + int res = 0; - assert(std::get<1>(walker_buffer.sizes()) == walker_size); + assert(get<1>(walker_buffer.sizes()) == walker_size); if (TG.TG_local().root()) res += tot_num_walkers; return (TG.Global() += res); @@ -372,8 +381,10 @@ class WalkerSetBase : public AFQMCInfo RealType GlobalWeight() const { + using std::get; + RealType res = 0; - assert(std::get<1>(walker_buffer.sizes()) == walker_size); + assert(get<1>(walker_buffer.sizes()) == walker_size); if (TG.TG_local().root()) { boost::multi::array buff(iextensions<1u>{tot_num_walkers}); @@ -390,10 +401,12 @@ class WalkerSetBase : public AFQMCInfo template void push_walkers(Mat&& M) { + using std::get; + static_assert(std::decay::type::dimensionality == 2, "Wrong dimensionality"); if (tot_num_walkers + M.size() > capacity()) APP_ABORT("Insufficient capacity"); - if (single_walker_size() + single_walker_bp_size() != std::get<1>(M.sizes())) + if (single_walker_size() + single_walker_bp_size() != get<1>(M.sizes())) APP_ABORT("Incorrect dimensions."); if (M.stride(1) != 1) APP_ABORT("Incorrect strides."); @@ -416,17 +429,18 @@ class WalkerSetBase : public AFQMCInfo template void pop_walkers(Mat&& M) { + using std::get; static_assert(std::decay::type::dimensionality == 2, "Wrong dimensionality"); if (tot_num_walkers < int(M.size())) APP_ABORT("Insufficient walkers"); if (wlk_desc[3] > 0) { - if (walker_size + bp_walker_size != int(std::get<1>(M.sizes()))) + if (walker_size + bp_walker_size != int(get<1>(M.sizes()))) APP_ABORT("Incorrect dimensions."); } else { - if (walker_size != int(std::get<1>(M.sizes()))) + if (walker_size != int(get<1>(M.sizes()))) APP_ABORT("Incorrect dimensions."); } if (M.stride(1) != 1) @@ -454,6 +468,8 @@ class WalkerSetBase : public AFQMCInfo std::vector>::iterator itend, Mat& M) { + using std::get; + if (std::distance(itbegin, itend) != tot_num_walkers) APP_ABORT("Error in WalkerSetBase::branch(): ptr_range != # walkers. \n"); @@ -461,13 +477,13 @@ class WalkerSetBase : public AFQMCInfo int nW = 0; for (auto it = itbegin; it != itend; ++it) nW += it->second; - if (int(std::get<0>(M.sizes())) < std::max(0, nW - targetN_per_TG)) + if (int(get<0>(M.sizes())) < std::max(0, nW - targetN_per_TG)) { std::cout << " Error in WalkerSetBase::branch(): Not enough space in excess matrix. \n" - << std::get<0>(M.sizes()) << " " << nW << " " << targetN_per_TG << std::endl; + << get<0>(M.sizes()) << " " << nW << " " << targetN_per_TG << std::endl; APP_ABORT("Error in WalkerSetBase::branch(): Not enough space in excess matrix.\n"); } - if (int(std::get<1>(M.sizes())) < walker_size + ((wlk_desc[3] > 0) ? bp_walker_size : 0)) + if (int(get<1>(M.sizes())) < walker_size + ((wlk_desc[3] > 0) ? bp_walker_size : 0)) APP_ABORT("Error in WalkerSetBase::branch(): Wrong dimensions in excess matrix.\n"); // if all walkers are dead, don't bother with routine, reset tot_num_walkers and return @@ -575,9 +591,11 @@ class WalkerSetBase : public AFQMCInfo template void scaleWeight(const T& w0, bool scale_last_history = false) { + using std::get; + if (!TG.TG_local().root()) return; - assert(std::get<1>(walker_buffer.sizes()) == walker_size); + assert(get<1>(walker_buffer.sizes()) == walker_size); auto W(boost::multi::static_array_cast(walker_buffer)); ma::scal(ComplexType(w0), W({0, tot_num_walkers}, data_displ[WEIGHT])); if (scale_last_history) @@ -631,9 +649,11 @@ class WalkerSetBase : public AFQMCInfo template void copyToIO(Vec&& x, int n) { + using std::get; + assert(n < tot_num_walkers); assert(x.size() >= walkerSizeIO()); - assert(std::get<1>(walker_buffer.sizes()) == walker_size); + assert(get<1>(walker_buffer.sizes()) == walker_size); auto W(boost::multi::static_array_cast(walker_buffer)); using std::copy_n; copy_n(W[n].origin(), walkerSizeIO(), x.origin()); @@ -642,9 +662,11 @@ class WalkerSetBase : public AFQMCInfo template void copyFromIO(Vec&& x, int n) { + using std::get; + assert(n < tot_num_walkers); assert(x.size() >= walkerSizeIO()); - assert(std::get<1>(walker_buffer.sizes()) == walker_size); + assert(get<1>(walker_buffer.sizes()) == walker_size); auto W(boost::multi::static_array_cast(walker_buffer)); using std::copy_n; copy_n(x.origin(), walkerSizeIO(), W[n].origin()); @@ -686,22 +708,26 @@ class WalkerSetBase : public AFQMCInfo { if (ip < 0 || ip > wlk_desc[3]) APP_ABORT(" Error: index out of bounds in getFields. \n"); - int skip = (data_displ[FIELDS] + ip * wlk_desc[4]) * std::get<1>(bp_buffer.sizes()); - return stdCMatrix_ptr(to_address(bp_buffer.origin()) + skip, {wlk_desc[4], std::get<1>(bp_buffer.sizes())}); + + using std::get; + int skip = (data_displ[FIELDS] + ip * wlk_desc[4]) * get<1>(bp_buffer.sizes()); + return stdCMatrix_ptr(to_address(bp_buffer.origin()) + skip, {wlk_desc[4], get<1>(bp_buffer.sizes())}); } stdCTensor_ptr getFields() { - return stdCTensor_ptr(to_address(bp_buffer.origin()) + data_displ[FIELDS] * std::get<1>(bp_buffer.sizes()), - {wlk_desc[3], wlk_desc[4], std::get<1>(bp_buffer.sizes())}); + using std::get; + return stdCTensor_ptr(to_address(bp_buffer.origin()) + data_displ[FIELDS] * get<1>(bp_buffer.sizes()), + {wlk_desc[3], wlk_desc[4], get<1>(bp_buffer.sizes())}); } template void storeFields(int ip, Mat&& V) { + using std::get; static_assert(std::decay::type::dimensionality == 2, "Wrong dimensionality"); auto&& F(*getFields(ip)); - if (V.stride(0) == std::get<1>(V.sizes())) + if (V.stride(0) == get<1>(V.sizes())) { using std::copy_n; copy_n(V.origin(), F.num_elements(), F.origin()); @@ -712,14 +738,16 @@ class WalkerSetBase : public AFQMCInfo stdCMatrix_ptr getWeightFactors() { - return stdCMatrix_ptr(to_address(bp_buffer.origin()) + data_displ[WEIGHT_FAC] * std::get<1>(bp_buffer.sizes()), - {wlk_desc[6], std::get<1>(bp_buffer.sizes())}); + using std::get; + return stdCMatrix_ptr(to_address(bp_buffer.origin()) + data_displ[WEIGHT_FAC] * get<1>(bp_buffer.sizes()), + {wlk_desc[6], get<1>(bp_buffer.sizes())}); } stdCMatrix_ptr getWeightHistory() { - return stdCMatrix_ptr(to_address(bp_buffer.origin()) + data_displ[WEIGHT_HISTORY] * std::get<1>(bp_buffer.sizes()), - {wlk_desc[6], std::get<1>(bp_buffer.sizes())}); + using std::get; + return stdCMatrix_ptr(to_address(bp_buffer.origin()) + data_displ[WEIGHT_HISTORY] * get<1>(bp_buffer.sizes()), + {wlk_desc[6], get<1>(bp_buffer.sizes())}); } double getLogOverlapFactor() const { return LogOverlapFactor; } @@ -730,7 +758,8 @@ class WalkerSetBase : public AFQMCInfo // LogOverlapFactor_new = LogOverlapFactor + f/nx void adjustLogOverlapFactor(const double f) { - assert(std::get<1>(walker_buffer.sizes()) == walker_size); + using std::get; + assert(get<1>(walker_buffer.sizes()) == walker_size); double nx = (walkerType == NONCOLLINEAR ? 1.0 : 2.0); if (TG.TG_local().root()) { diff --git a/src/AFQMC/Walkers/WalkerSetBase.icc b/src/AFQMC/Walkers/WalkerSetBase.icc index 757dac8b62..632b2dff2a 100644 --- a/src/AFQMC/Walkers/WalkerSetBase.icc +++ b/src/AFQMC/Walkers/WalkerSetBase.icc @@ -216,9 +216,11 @@ bool WalkerSetBase::clean() template void WalkerSetBase::reserve(int n) { - if (std::get<0>(walker_buffer.sizes()) < n || std::get<1>(walker_buffer.sizes()) != walker_size) + using std::get; + + if (get<0>(walker_buffer.sizes()) < n || get<1>(walker_buffer.sizes()) != walker_size) walker_buffer.reextent({n, walker_size}); - if (std::get<1>(bp_buffer.sizes()) < n || std::get<0>(bp_buffer.sizes()) != bp_walker_size) + if (get<1>(bp_buffer.sizes()) < n || get<0>(bp_buffer.sizes()) != bp_walker_size) { bp_buffer.reextent({bp_walker_size, n}); using std::fill_n; diff --git a/src/AFQMC/Wavefunctions/NOMSD.hpp b/src/AFQMC/Wavefunctions/NOMSD.hpp index 0ca56bf419..131cfae4a5 100644 --- a/src/AFQMC/Wavefunctions/NOMSD.hpp +++ b/src/AFQMC/Wavefunctions/NOMSD.hpp @@ -243,17 +243,18 @@ class NOMSD : public AFQMCInfo template void vbias(const MatG& G, MatA&& v, double a = 1.0) { + using std::get; if (transposed_G_for_vbias_) { - assert(std::get<0>(G.sizes()) == std::get<1>(v.sizes())); - assert(std::get<1>(G.sizes()) == size_of_G_for_vbias()); + assert(get<0>(G.sizes()) == get<1>(v.sizes())); + assert(get<1>(G.sizes()) == size_of_G_for_vbias()); } else { - assert(std::get<0>(G.sizes()) == size_of_G_for_vbias()); - assert(std::get<1>(G.sizes()) == std::get<1>(v.sizes())); + assert(get<0>(G.sizes()) == size_of_G_for_vbias()); + assert(get<1>(G.sizes()) == get<1>(v.sizes())); } - assert(std::get<0>(v.sizes()) == HamOp.local_number_of_cholesky_vectors()); + assert(get<0>(v.sizes()) == HamOp.local_number_of_cholesky_vectors()); if (ci.size() == 1) { // HamOp expects a compact Gc with alpha/beta components @@ -285,11 +286,12 @@ class NOMSD : public AFQMCInfo template void vHS(MatX&& X, MatA&& v, double a = 1.0) { - assert(std::get<0>(X.sizes()) == HamOp.local_number_of_cholesky_vectors()); + using std::get; + assert(get<0>(X.sizes()) == HamOp.local_number_of_cholesky_vectors()); if (transposed_vHS_) - assert(std::get<1>(X.sizes()) == std::get<0>(v.sizes())); + assert(get<1>(X.sizes()) == get<0>(v.sizes())); else - assert(std::get<1>(X.sizes()) == std::get<1>(v.sizes())); + assert(get<1>(X.sizes()) == get<1>(v.sizes())); HamOp.vHS(std::forward(X), std::forward(v), a); TG.local_barrier(); } @@ -538,13 +540,14 @@ class NOMSD : public AFQMCInfo } // TG.Node().root() TG.Node().barrier(); // for safety } - assert(std::get<0>(RefOrbMats.sizes()) == ndet); - assert(std::get<1>(RefOrbMats.sizes()) == std::get<1>(A.sizes())); + using std::get; + assert(get<0>(RefOrbMats.sizes()) == ndet); + assert(get<1>(RefOrbMats.sizes()) == get<1>(A.sizes())); auto&& RefOrbMats_(boost::multi::static_array_cast(RefOrbMats)); auto&& A_(boost::multi::static_array_cast(A)); using std::copy_n; int n0, n1; - std::tie(n0, n1) = FairDivideBoundary(TG.getLocalTGRank(), int(std::get<1>(A.sizes())), TG.getNCoresPerTG()); + std::tie(n0, n1) = FairDivideBoundary(TG.getLocalTGRank(), int(get<1>(A.sizes())), TG.getNCoresPerTG()); for (int i = 0; i < ndet; i++) copy_n(RefOrbMats_[i].origin() + n0, n1 - n0, A_[i].origin() + n0); TG.TG_local().barrier(); diff --git a/src/AFQMC/Wavefunctions/NOMSD.icc b/src/AFQMC/Wavefunctions/NOMSD.icc index 572f6d8024..9d51c1292f 100644 --- a/src/AFQMC/Wavefunctions/NOMSD.icc +++ b/src/AFQMC/Wavefunctions/NOMSD.icc @@ -44,13 +44,17 @@ template template void NOMSD::Energy_shared(const WlkSet& wset, Mat&& E, TVec&& Ov) { + using std::get; + size_t nt = wset.size() * (1 + dm_size(false)); assert(E.dimensionality == 2); assert(Ov.dimensionality == 1); assert(E.size() == wset.size()); - assert(E.stride() == std::get<1>(E.sizes())); + assert(E.stride() == get<1>(E.sizes())); assert(Ov.size() == wset.size()); - assert(std::get<1>(E.sizes()) == 3); + + using std::get; + assert(get<1>(E.sizes()) == 3); // temporary runtime check for incompatible memory spaces { @@ -133,7 +137,9 @@ void NOMSD::Energy_distributed_singleDet(const WlkSet& wset, Mat&& E, T assert(Ov.dimensionality == 1); assert(E.size() == wset.size()); assert(Ov.size() == wset.size()); - assert(std::get<1>(E.sizes()) == 3); + + using std::get; + assert(get<1>(E.sizes()) == 3); int nr = Gsize, nc = nwalk; if (transposed_G_for_E_) @@ -237,7 +243,9 @@ void NOMSD::Energy_distributed_multiDet(const WlkSet& wset, Mat&& E, TV assert(Ov.dimensionality == 1); assert(E.size() == wset.size()); assert(Ov.size() == wset.size()); - assert(std::get<1>(E.sizes()) == 3); + + using std::get; + assert(get<1>(E.sizes()) == 3); int nr = Gsize, nc = nwalk; if (transposed_G_for_E_) @@ -343,7 +351,8 @@ void NOMSD::MixedDensityMatrix_for_E_from_SM(const MatSM& SM, auto Gsize = dm_size(false); const int nw = SM.size(); - assert(std::get<1>(G.strides()) == 1); + using std::get; + assert(get<1>(G.strides()) == 1); assert(Ov.stride() == 1); if (transposed_G_for_E_) @@ -355,7 +364,9 @@ void NOMSD::MixedDensityMatrix_for_E_from_SM(const MatSM& SM, assert((G.extensions() == boost::multi::extensions_t<2>{static_cast(dm_size(false)), nw})); } assert(Ov.size() >= nw); - assert(std::get<1>(SM.sizes()) == Gsize); + + using std::get; + assert(get<1>(SM.sizes()) == Gsize); // to force synchronization before modifying structures in SHM TG.local_barrier(); fill_n(Ov.origin(), nw, 0); @@ -450,7 +461,8 @@ void NOMSD::DensityMatrix_shared(const WlkSet& wset, bool compact, bool transposed) { - assert(std::get<1>(G.strides()) == 1); + using std::get; + assert(get<1>(G.strides()) == 1); assert(Ov.stride() == 1); if (transposed) { @@ -472,17 +484,18 @@ void NOMSD::DensityMatrix_shared(const WlkSet& wset, double LogOverlapFactor(wset.getLogOverlapFactor()); auto Gsize = dm_size(not compact); + using std::get; if (walker_type != COLLINEAR) { if (herm) { - assert(std::get<0>(RefA.sizes()) == dm_dims(false, Alpha).first && - std::get<1>(RefA.sizes()) == dm_dims(false, Alpha).second); + assert(get<0>(RefA.sizes()) == dm_dims(false, Alpha).first && + get<1>(RefA.sizes()) == dm_dims(false, Alpha).second); } else { - assert(std::get<1>(RefA.sizes()) == dm_dims(false, Alpha).first && - std::get<0>(RefA.sizes()) == dm_dims(false, Alpha).second); + assert(get<1>(RefA.sizes()) == dm_dims(false, Alpha).first && + get<0>(RefA.sizes()) == dm_dims(false, Alpha).second); } auto Gdims = dm_dims(not compact, Alpha); @@ -511,23 +524,23 @@ void NOMSD::DensityMatrix_shared(const WlkSet& wset, { if (herm) { - assert(std::get<0>(RefA.sizes()) == dm_dims(false, Alpha).first && - std::get<1>(RefA.sizes()) == dm_dims(false, Alpha).second); + assert(get<0>(RefA.sizes()) == dm_dims(false, Alpha).first && + get<1>(RefA.sizes()) == dm_dims(false, Alpha).second); } else { - assert(std::get<1>(RefA.sizes()) == dm_dims(false, Alpha).first && - std::get<0>(RefA.sizes()) == dm_dims(false, Alpha).second); + assert(get<1>(RefA.sizes()) == dm_dims(false, Alpha).first && + get<0>(RefA.sizes()) == dm_dims(false, Alpha).second); } if (herm) { - assert(std::get<0>(RefB.sizes()) == dm_dims(false, Beta).first && - std::get<1>(RefB.sizes()) == dm_dims(false, Beta).second); + assert(get<0>(RefB.sizes()) == dm_dims(false, Beta).first && + get<1>(RefB.sizes()) == dm_dims(false, Beta).second); } else { - assert(std::get<1>(RefB.sizes()) == dm_dims(false, Beta).first && - std::get<0>(RefB.sizes()) == dm_dims(false, Beta).second); + assert(get<1>(RefB.sizes()) == dm_dims(false, Beta).first && + get<0>(RefB.sizes()) == dm_dims(false, Beta).second); } StaticVector ovlp2(iextensions<1u>{2 * nw}, buffer_manager.get_generator().template get_allocator()); fill_n(ovlp2.origin(), 2 * nw, ComplexType(0.0)); @@ -593,7 +606,8 @@ void NOMSD::DensityMatrix_batched(const WlkSet& wset, auto dev_ptr(make_device_ptr(G.origin())); } - assert(std::get<1>(G.strides()) == 1); + using std::get; + assert(get<1>(G.strides()) == 1); assert(Ov.stride() == 1); if (transposed) @@ -617,7 +631,9 @@ void NOMSD::DensityMatrix_batched(const WlkSet& wset, StaticVector ovlp2(iextensions<1u>{2 * nbatch__}, buffer_manager.get_generator().template get_allocator()); fill_n(ovlp2.origin(), ovlp2.num_elements(), ComplexType(0.0)); - if (std::get<1>(G.sizes()) != G.stride()) + + using std::get; + if (get<1>(G.sizes()) != G.stride()) { APP_ABORT(" Error: FIX FIX FIX need strided fill_n\n"); } @@ -633,17 +649,18 @@ void NOMSD::DensityMatrix_batched(const WlkSet& wset, std::vector Oib; Oib.reserve(nw); + using std::get; if (walker_type != COLLINEAR) { if (herm) { - assert(std::get<0>(RefA.sizes()) == dm_dims(false, Alpha).first && - std::get<1>(RefA.sizes()) == dm_dims(false, Alpha).second); + assert(get<0>(RefA.sizes()) == dm_dims(false, Alpha).first && + get<1>(RefA.sizes()) == dm_dims(false, Alpha).second); } else { - assert(std::get<1>(RefA.sizes()) == dm_dims(false, Alpha).first && - std::get<0>(RefA.sizes()) == dm_dims(false, Alpha).second); + assert(get<1>(RefA.sizes()) == dm_dims(false, Alpha).first && + get<0>(RefA.sizes()) == dm_dims(false, Alpha).second); } Static3Tensor G3D_({nbatch__, GAdims.first, GAdims.second}, buffer_manager.get_generator().template get_allocator()); @@ -682,23 +699,23 @@ void NOMSD::DensityMatrix_batched(const WlkSet& wset, { if (herm) { - assert(std::get<0>(RefA.sizes()) == dm_dims(false, Alpha).first && - std::get<1>(RefA.sizes()) == dm_dims(false, Alpha).second); + assert(get<0>(RefA.sizes()) == dm_dims(false, Alpha).first && + get<1>(RefA.sizes()) == dm_dims(false, Alpha).second); } else { - assert(std::get<1>(RefA.sizes()) == dm_dims(false, Alpha).first && - std::get<0>(RefA.sizes()) == dm_dims(false, Alpha).second); + assert(get<1>(RefA.sizes()) == dm_dims(false, Alpha).first && + get<0>(RefA.sizes()) == dm_dims(false, Alpha).second); } if (herm) { - assert(std::get<0>(RefB.sizes()) == dm_dims(false, Beta).first && - std::get<1>(RefB.sizes()) == dm_dims(false, Beta).second); + assert(get<0>(RefB.sizes()) == dm_dims(false, Beta).first && + get<1>(RefB.sizes()) == dm_dims(false, Beta).second); } else { - assert(std::get<1>(RefB.sizes()) == dm_dims(false, Beta).first && - std::get<0>(RefB.sizes()) == dm_dims(false, Beta).second); + assert(get<1>(RefB.sizes()) == dm_dims(false, Beta).first && + get<0>(RefB.sizes()) == dm_dims(false, Beta).second); } auto GBdims = dm_dims(not compact, Beta); @@ -856,7 +873,8 @@ void NOMSD::MixedDensityMatrix_shared(const WlkSet& wset, MatG&& G, TVe auto dev_ptr(make_device_ptr(G.origin())); } - assert(std::get<1>(G.strides()) == 1); + using std::get; + assert(get<1>(G.strides()) == 1); assert(Ov.stride() == 1); if (transpose) { @@ -874,7 +892,7 @@ void NOMSD::MixedDensityMatrix_shared(const WlkSet& wset, MatG&& G, TVe assert(Ov.size() >= nw); fill_n(Ov.origin(), nw, 0); // need strided fill_n???? - if (std::get<1>(G.sizes()) != G.stride()) + if (get<1>(G.sizes()) != G.stride()) { APP_ABORT(" Error: FIX FIX FIX need strided fill_n\n"); } @@ -1100,10 +1118,11 @@ void NOMSD::MixedDensityMatrix_shared(const WlkSet& wset, MatG&& G, TVe } else { + using std::get; int ik0, ikN; std::tie(ik0, ikN) = FairDivideBoundary(TG.TG_local().rank(), int(G.size()), TG.TG_local().size()); using ma::term_by_term_matrix_vector; - term_by_term_matrix_vector(ma::TOp_DIV, 1, ikN - ik0, std::get<1>(G.sizes()), make_device_ptr(G[ik0].origin()), + term_by_term_matrix_vector(ma::TOp_DIV, 1, ikN - ik0, get<1>(G.sizes()), make_device_ptr(G[ik0].origin()), G.stride(), make_device_ptr(Ov.origin()), Ov.stride()); } TG.local_barrier(); @@ -1124,7 +1143,8 @@ void NOMSD::MixedDensityMatrix_batched(const WlkSet& wset, MatG&& G, TV auto dev_ptr(make_device_ptr(G.origin())); } - assert(std::get<1>(G.strides()) == 1); + using std::get; + assert(get<1>(G.strides()) == 1); assert(Ov.stride() == 1); if (transpose) { @@ -1147,7 +1167,9 @@ void NOMSD::MixedDensityMatrix_batched(const WlkSet& wset, MatG&& G, TV buffer_manager.get_generator().template get_allocator()); fill_n(ovlp2.origin(), ovlp2.num_elements(), ComplexType(0.0)); // need strided fill_n???? - if (std::get<1>(G.sizes()) != G.stride()) + + using std::get; + if (get<1>(G.sizes()) != G.stride()) { APP_ABORT(" Error: FIX FIX FIX need strided fill_n\n"); } @@ -1288,6 +1310,8 @@ void NOMSD::MixedDensityMatrix_batched(const WlkSet& wset, MatG&& G, TV } } copy_n(Ovl.origin(), nw, Ov.origin()); + + using std::get; // normalize G if (transpose) { @@ -1300,7 +1324,7 @@ void NOMSD::MixedDensityMatrix_batched(const WlkSet& wset, MatG&& G, TV else { using ma::term_by_term_matrix_vector; - term_by_term_matrix_vector(ma::TOp_DIV, 1, G.size(), std::get<1>(G.sizes()), make_device_ptr(G.origin()), + term_by_term_matrix_vector(ma::TOp_DIV, 1, G.size(), get<1>(G.sizes()), make_device_ptr(G.origin()), G.stride(), make_device_ptr(Ov.origin()), Ov.stride()); } TG.local_barrier(); @@ -1557,8 +1581,10 @@ void NOMSD::WalkerAveragedDensityMatrix_shared(const WlkSet& wset, assert(wgt.size() >= nwalk); double LogOverlapFactor(wset.getLogOverlapFactor()); auto wlk_dims = wset.walker_dims(); + + using std::get; // Transposed temporaries for back propagation. - if (std::get<1>(G.sizes()) != G.stride()) + if (get<1>(G.sizes()) != G.stride()) { APP_ABORT(" Error: FIX FIX FIX need strided fill_n\n"); } @@ -1567,12 +1593,13 @@ void NOMSD::WalkerAveragedDensityMatrix_shared(const WlkSet& wset, TG.TG_local().barrier(); if (Refs != nullptr) { + using std::get; assert(detR != nullptr); assert(wset.size() <= (*Refs).size()); - assert(ci.size() <= std::get<1>((*Refs).sizes())); - assert(nrow * ncol == std::get<2>((*Refs).sizes())); + assert(ci.size() <= get<1>((*Refs).sizes())); + assert(nrow * ncol == get<2>((*Refs).sizes())); assert(wset.size() == (*detR).size()); - assert(OrbMats.size() == std::get<1>((*detR).sizes())); + assert(OrbMats.size() == get<1>((*detR).sizes())); } if (walker_type != COLLINEAR) { @@ -1748,8 +1775,10 @@ void NOMSD::WalkerAveragedDensityMatrix_shared_single_det(const WlkSet& assert(wgt.size() >= nwalk); auto wlk_dims = wset.walker_dims(); double LogOverlapFactor(wset.getLogOverlapFactor()); + + using std::get; // Transposed temporaries for back propagation. - if (std::get<1>(G.sizes()) != G.stride()) + if (get<1>(G.sizes()) != G.stride()) { APP_ABORT(" Error: FIX FIX FIX need strided fill_n\n"); } @@ -1757,12 +1786,13 @@ void NOMSD::WalkerAveragedDensityMatrix_shared_single_det(const WlkSet& TG.TG_local().barrier(); if (Refs != nullptr) { + using std::get; assert(detR != nullptr); assert(wset.size() <= (*Refs).size()); - assert(ci.size() <= std::get<1>((*Refs).sizes())); - assert(nrow * ncol == std::get<2>((*Refs).sizes())); + assert(ci.size() <= get<1>((*Refs).sizes())); + assert(nrow * ncol == get<2>((*Refs).sizes())); assert(wset.size() == (*detR).size()); - assert(OrbMats.size() == std::get<1>((*detR).sizes())); + assert(OrbMats.size() == get<1>((*detR).sizes())); } fill_n(Ovlp.origin(), Ovlp.num_elements(), ComplexType(0.0)); @@ -1947,25 +1977,26 @@ void NOMSD::WalkerAveragedDensityMatrix_batched(const WlkSet& wset, auto dev_ptr_(make_device_ptr(Ov.origin())); auto dev_ptr(make_device_ptr(G.origin())); } - assert(std::get<1>(G.strides())==1); + using std::get; + assert(get<1>(G.strides())==1); const int nw = wset.size(); int nbatch__ = std::min(nw,(nbatch<0?nw:nbatch)); const int ndet = ci.size(); int nrow = NMO*((walker_type==NONCOLLINEAR)?2:1); int ncol = NAEA+((walker_type==CLOSED)?0:NAEB); assert(wgt.size() >= nw); - assert(std::get<1>(wgt.sizes()) >= ndet); - if(std::get<1>(G.sizes()) != G.stride()) { + assert(get<1>(wgt.sizes()) >= ndet); + if(get<1>(G.sizes()) != G.stride()) { APP_ABORT(" Error: FIX FIX FIX need strided fill_n\n"); } fill_n(G.origin(),G.num_elements(),ComplexType(0.0)); if(Refs!=nullptr) { assert(detR!=nullptr); assert(wset.size() <= (*Refs).size()); - assert(ci.size() <= std::get<1>((*Refs).sizes()) ); - assert(nrow*ncol == std::get<2>((*Refs).sizes()) ); + assert(ci.size() <= get<1>((*Refs).sizes()) ); + assert(nrow*ncol == get<2>((*Refs).sizes()) ); assert(wset.size() == (*detR).size()); - assert(OrbMats.size() == std::get<1>((*detR).sizes()) ); + assert(OrbMats.size() == get<1>((*detR).sizes()) ); } stdCVector hvec(iextensions<1u>{2*nbatch__}); TG.local_barrier(); @@ -2220,11 +2251,13 @@ template template void NOMSD::OrthogonalizeExcited(Mat&& A, SpinTypes spin, double LogOverlapFactor) { + using std::get; + if (walker_type == NONCOLLINEAR) APP_ABORT(" Error: OrthogonalizeExcited not implemented with NONCOLLINEAR.\n"); if (spin == Alpha) { - if (extendedMatAlpha.size() != NMO || std::get<1>(extendedMatAlpha.sizes()) != maxOccupExtendedMat.first) + if (extendedMatAlpha.size() != NMO || get<1>(extendedMatAlpha.sizes()) != maxOccupExtendedMat.first) extendedMatAlpha.reextent({NMO, maxOccupExtendedMat.first}); extendedMatAlpha(extendedMatAlpha.extension(0), {0, NAEA}) = A; extendedMatAlpha(extendedMatAlpha.extension(0), {NAEA + 1, maxOccupExtendedMat.first}) = @@ -2246,7 +2279,7 @@ void NOMSD::OrthogonalizeExcited(Mat&& A, SpinTypes spin, double LogOve } else { - if (extendedMatBeta.size() != NMO || std::get<1>(extendedMatBeta.sizes()) != maxOccupExtendedMat.second) + if (extendedMatBeta.size() != NMO || get<1>(extendedMatBeta.sizes()) != maxOccupExtendedMat.second) extendedMatBeta.reextent({NMO, maxOccupExtendedMat.second}); extendedMatBeta(extendedMatBeta.extension(0), {0, NAEB}) = A; extendedMatBeta(extendedMatBeta.extension(0), {NAEB + 1, maxOccupExtendedMat.second}) = diff --git a/src/AFQMC/Wavefunctions/PHMSD.hpp b/src/AFQMC/Wavefunctions/PHMSD.hpp index 911258a5fb..aabf6f6e28 100644 --- a/src/AFQMC/Wavefunctions/PHMSD.hpp +++ b/src/AFQMC/Wavefunctions/PHMSD.hpp @@ -253,22 +253,23 @@ class PHMSD : public AFQMCInfo template void vbias(const MatG& G, MatA&& v, double a = 1.0) { - assert(std::get<0>(v.sizes()) == HamOp.local_number_of_cholesky_vectors()); + using std::get; + assert(get<0>(v.sizes()) == HamOp.local_number_of_cholesky_vectors()); double scl = (walker_type == COLLINEAR) ? 0.5 : 1.0; if (transposed_G_for_vbias_) { - assert(std::get<0>(G.sizes()) == std::get<1>(v.sizes())); - assert(std::get<1>(G.sizes()) == size_of_G_for_vbias()); + assert(get<0>(G.sizes()) == get<1>(v.sizes())); + assert(get<1>(G.sizes()) == size_of_G_for_vbias()); HamOp.vbias(G(G.extension(), {0, long(OrbMats[0].size() * NMO)}), std::forward(v), scl * a, 0.0); if (walker_type == COLLINEAR) { APP_ABORT(" Error in PHMSD::vbias: transposed_G_for_vbias_ should be false. \n"); - HamOp.vbias(G(G.extension(), {long(OrbMats[0].size() * NMO), std::get<1>(G.sizes())}), std::forward(v), scl * a, 1.0); + HamOp.vbias(G(G.extension(), {long(OrbMats[0].size() * NMO), get<1>(G.sizes())}), std::forward(v), scl * a, 1.0); } } else { assert(G.size() == size_of_G_for_vbias()); - assert(std::get<1>(G.sizes()) == std::get<1>(v.sizes())); + assert(get<1>(G.sizes()) == get<1>(v.sizes())); HamOp.vbias(G.sliced(0, OrbMats[0].size() * NMO), std::forward(v), scl * a, 0.0); if (walker_type == COLLINEAR) HamOp.vbias(G.sliced(OrbMats[0].size() * NMO, G.size()), std::forward(v), scl * a, 1.0); @@ -284,11 +285,12 @@ class PHMSD : public AFQMCInfo template void vHS(MatX&& X, MatA&& v, double a = 1.0) { - assert(std::get<0>(X.sizes()) == HamOp.local_number_of_cholesky_vectors()); + using std::get; + assert(get<0>(X.sizes()) == HamOp.local_number_of_cholesky_vectors()); if (transposed_vHS_) - assert(std::get<1>(X.sizes()) == std::get<0>(v.sizes())); + assert(get<1>(X.sizes()) == get<0>(v.sizes())); else - assert(std::get<1>(X.sizes()) == std::get<1>(v.sizes())); + assert(get<1>(X.sizes()) == get<1>(v.sizes())); HamOp.vHS(std::forward(X), std::forward(v), a); TG.local_barrier(); } @@ -300,10 +302,11 @@ class PHMSD : public AFQMCInfo template void Energy(WlkSet& wset) { + using std::get; int nw = wset.size(); if (ovlp.num_elements() != nw) ovlp.reextent(iextensions<1u>{nw}); - if (std::get<0>(eloc.sizes()) != nw || std::get<1>(eloc.sizes()) != 3) + if (get<0>(eloc.sizes()) != nw || get<1>(eloc.sizes()) != 3) eloc.reextent({nw, 3}); Energy(wset, eloc, ovlp); TG.local_barrier(); @@ -495,17 +498,18 @@ class PHMSD : public AFQMCInfo int ncol(NAEA + NAEB); //careful here, spins are stored contiguously RefOrbMats.reextent({ndet, nrow * ncol}); TG.Node().barrier(); // for safety + using std::get; if (TG.Node().root()) { boost::multi::array OA_({ - static_cast(std::get<1>(OrbMats[0].sizes())), - static_cast(std::get<0>(OrbMats[0].sizes())) + static_cast(get<1>(OrbMats[0].sizes())), + static_cast(get<0>(OrbMats[0].sizes())) }); boost::multi::array OB_({0, 0}); if (OrbMats.size() > 1) OB_.reextent({ - static_cast(std::get<1>(OrbMats[1].sizes())), - static_cast(std::get<0>(OrbMats[1].sizes())) + static_cast(get<1>(OrbMats[1].sizes())), + static_cast(get<0>(OrbMats[1].sizes())) }); ma::Matrix2MAREF('H', OrbMats[0], OA_); if (OrbMats.size() > 1) @@ -538,13 +542,15 @@ class PHMSD : public AFQMCInfo } // TG.Node().root() TG.Node().barrier(); // for safety } - assert(std::get<0>(RefOrbMats.sizes()) == ndet); - assert(std::get<1>(RefOrbMats.sizes()) == std::get<1>(A.sizes())); + + using std::get; + assert(get<0>(RefOrbMats.sizes()) == ndet); + assert(get<1>(RefOrbMats.sizes()) == get<1>(A.sizes())); auto&& RefOrbMats_(boost::multi::static_array_cast(RefOrbMats)); auto&& A_(boost::multi::static_array_cast(A)); using std::copy_n; int n0, n1; - std::tie(n0, n1) = FairDivideBoundary(TG.getLocalTGRank(), int(std::get<1>(A.sizes())), TG.getNCoresPerTG()); + std::tie(n0, n1) = FairDivideBoundary(TG.getLocalTGRank(), int(get<1>(A.sizes())), TG.getNCoresPerTG()); for (int i = 0; i < ndet; i++) copy_n(RefOrbMats_[i].origin() + n0, n1 - n0, A_[i].origin() + n0); TG.TG_local().barrier(); diff --git a/src/AFQMC/Wavefunctions/PHMSD.icc b/src/AFQMC/Wavefunctions/PHMSD.icc index 439e3db409..74ccde3276 100644 --- a/src/AFQMC/Wavefunctions/PHMSD.icc +++ b/src/AFQMC/Wavefunctions/PHMSD.icc @@ -44,8 +44,9 @@ void PHMSD::Energy_shared(const WlkSet& wset, Mat&& E, TVec&& Ov) assert(E.dimensionality == 2); assert(Ov.dimensionality == 1); assert(E.size() == wset.size()); - assert(std::get<0>(Ov.sizes()) == wset.size()); - assert(std::get<1>(E.sizes()) == 3); + + assert(get<0>(Ov.sizes()) == wset.size()); + assert(get<1>(E.sizes()) == 3); ComplexType zero(0.0); auto Gsize = dm_size(false); @@ -61,13 +62,14 @@ void PHMSD::Energy_shared(const WlkSet& wset, Mat&& E, TVec&& Ov) GrefA.reextent({nwalk, dm_dims(false, Alpha).first, dm_dims(false, Alpha).second}); + using std::get; if (wgt.size() != nwalk) wgt.reextent(iextensions<1u>{nwalk}); if (opSpinEJ.size() != nwalk) opSpinEJ.reextent(iextensions<1u>{nwalk}); if (localGbuff.size() < 2 * Gsize) localGbuff.reextent(iextensions<1u>{2 * Gsize}); - if (std::get<0>(eloc2.sizes()) != nwalk || std::get<1>(eloc2.sizes()) != 3) + if (get<0>(eloc2.sizes()) != nwalk || get<1>(eloc2.sizes()) != 3) eloc2.reextent({nwalk, 3}); std::fill_n(Ov.origin(), nwalk, zero); @@ -423,18 +425,20 @@ void PHMSD::MixedDensityMatrix(const WlkSet& wset, MatG&& G, TVec&& Ov, bool com using ma::T; assert(G.stride(1) == 1); assert(Ov.stride(0) == 1); + + using std::get; if (transpose) - assert(std::get<0>(G.sizes()) == wset.size() && std::get<1>(G.sizes()) == size_t(dm_size(not compact))); + assert(get<0>(G.sizes()) == wset.size() && get<1>(G.sizes()) == size_t(dm_size(not compact))); else - assert(std::get<1>(G.sizes()) == wset.size() && std::get<0>(G.sizes()) == size_t(dm_size(not compact))); + assert(get<1>(G.sizes()) == wset.size() && get<0>(G.sizes()) == size_t(dm_size(not compact))); const int nw = wset.size(); auto refc = abij.reference_configuration(); double LogOverlapFactor(wset.getLogOverlapFactor()); assert(Ov.size() >= nw); std::fill_n(Ov.begin(), nw, 0); - for (int i = 0; i < std::get<0>(G.sizes()); i++) + for (int i = 0; i < get<0>(G.sizes()); i++) if (i % TG.TG_local().size() == TG.TG_local().rank()) - std::fill_n(G[i].origin(), std::get<1>(G.sizes()), ComplexType(0.0)); + std::fill_n(G[i].origin(), get<1>(G.sizes()), ComplexType(0.0)); TG.local_barrier(); auto Gsize = dm_size(not compact); if (compact) @@ -569,8 +573,9 @@ void PHMSD::MixedDensityMatrix(const WlkSet& wset, MatG&& G, TVec&& Ov, bool com if (compact) { ma::product(T(Rb), GB2D0_, GB2D_); - //G({GAdims.first*GAdims.second,std::get<0>(G.sizes())},iw) = GB1D_; - ma::copy(GB1D_, G({GAdims.first * GAdims.second, std::get<0>(G.sizes())}, iw)); + using std::get; + //G({GAdims.first*GAdims.second,get<0>(G.sizes())},iw) = GB1D_; + ma::copy(GB1D_, G({GAdims.first * GAdims.second, get<0>(G.sizes())}, iw)); } else { @@ -578,7 +583,7 @@ void PHMSD::MixedDensityMatrix(const WlkSet& wset, MatG&& G, TVec&& Ov, bool com ma::product(T(Rb), GB2D0_, GB2D_); ma::product(T(OrbMats.back()), GB2D_, Gfullb); //G({Gfulla.num_elements(),G.size(0)},iw) = G1D; - ma::copy(G1D, G({Gfulla.num_elements(), std::get<0>(G.sizes())}, iw)); + ma::copy(G1D, G({Gfulla.num_elements(), get<0>(G.sizes())}, iw)); } } } @@ -681,9 +686,10 @@ void PHMSD::MixedDensityMatrix(const WlkSet& wset, MatG&& G, TVec&& Ov, bool com ma::product(T(Ra), GA2D0_shm(GA2D0_shm.extension(0), {M0, Mn}), GA2D_(GA2D_.extension(0), {M0, Mn})); // can be local boost::multi::array_ref Gw(to_address(G.origin()), - {GAdims.first, GAdims.second, long(std::get<1>(G.sizes()))}); + {GAdims.first, GAdims.second, long(get<1>(G.sizes()))}); // copying by hand for now, implement strided copy in ma_blas - for (size_t k = 0; k < std::get<0>(GA2D_.sizes()); ++k) + using std::get; + for (size_t k = 0; k < get<0>(GA2D_.sizes()); ++k) for (size_t m = M0; m < Mn; ++m) Gw[k][m][iw] = GA2D_[k][m]; } @@ -693,10 +699,11 @@ void PHMSD::MixedDensityMatrix(const WlkSet& wset, MatG&& G, TVec&& Ov, bool com GA2D_(GA2D_.extension(0), {M0, Mn})); // can be local ma::product(T(OrbMats[0]), GA2D_(GA2D_.extension(0), {M0, Mn}), Gfulla(Gfulla.extension(0), {M0, Mn})); // can be local + using std::get; boost::multi::array_ref Gw(to_address(G.origin()), - {long(std::get<0>(Gfulla.sizes())), long(std::get<1>(Gfulla.sizes())), long(std::get<1>(G.sizes()))}); + {long(get<0>(Gfulla.sizes())), long(get<1>(Gfulla.sizes())), long(get<1>(G.sizes()))}); // copying by hand for now, implement strided copy in ma_blas - for (size_t k = 0; k < std::get<0>(Gfulla.sizes()); ++k) + for (size_t k = 0; k < get<0>(Gfulla.sizes()); ++k) for (size_t m = M0; m < Mn; ++m) Gw[k][m][iw] = Gfulla[k][m]; } @@ -731,10 +738,12 @@ void PHMSD::MixedDensityMatrix(const WlkSet& wset, MatG&& G, TVec&& Ov, bool com { ma::product(T(Rb), GB2D0_shm(GB2D0_shm.extension(0), {M0, Mn}), GB2D_(GB2D_.extension(0), {M0, Mn})); // can be local + using std::get; boost::multi::array_ref Gw(to_address(G[GAdims.first * GAdims.second].origin()), - {GBdims.first, GBdims.second, long(std::get<1>(G.sizes()))}); + {GBdims.first, GBdims.second, long(get<1>(G.sizes()))}); // copying by hand for now, implement strided copy in ma_blas - for (size_t k = 0; k < std::get<0>(GB2D_.sizes()); ++k) + using std::get; + for (size_t k = 0; k < get<0>(GB2D_.sizes()); ++k) for (size_t m = M0; m < Mn; ++m) Gw[k][m][iw] = GB2D_[k][m]; } @@ -744,10 +753,11 @@ void PHMSD::MixedDensityMatrix(const WlkSet& wset, MatG&& G, TVec&& Ov, bool com GB2D_(GB2D_.extension(0), {M0, Mn})); // can be local ma::product(T(OrbMats[0]), GB2D_(GB2D_.extension(0), {M0, Mn}), Gfullb(Gfullb.extension(0), {M0, Mn})); // can be local + using std::get; boost::multi::array_ref Gw(to_address(G[Gfulla.num_elements()].origin()), - {long(std::get<0>(Gfullb.sizes())), long(std::get<1>(Gfullb.sizes())), long(std::get<1>(G.sizes()))}); + {long(get<0>(Gfullb.sizes())), long(get<1>(Gfullb.sizes())), long(get<1>(G.sizes()))}); // copying by hand for now, implement strided copy in ma_blas - for (size_t k = 0; k < std::get<0>(Gfullb.sizes()); ++k) + for (size_t k = 0; k < get<0>(Gfullb.sizes()); ++k) for (size_t m = M0; m < Mn; ++m) Gw[k][m][iw] = Gfullb[k][m]; } @@ -769,8 +779,8 @@ void PHMSD::MixedDensityMatrix(const WlkSet& wset, MatG&& G, TVec&& Ov, bool com else { auto Ov_ = Ov.origin(); - const size_t nw_ = std::get<1>(G.sizes()); - for (int ik = 0; ik < std::get<0>(G.sizes()); ++ik) + const size_t nw_ = get<1>(G.sizes()); + for (int ik = 0; ik < get<0>(G.sizes()); ++ik) if (ik % TG.TG_local().size() == TG.TG_local().rank()) { auto Gik = to_address(G[ik].origin()); @@ -814,6 +824,7 @@ void PHMSD::DensityMatrix_shared(const WlkSet& wset, if (localGbuff.size() < Gsize) localGbuff.reextent(iextensions<1u>{Gsize}); + using std::get; if (walker_type != COLLINEAR) { if (herm) @@ -841,15 +852,15 @@ void PHMSD::DensityMatrix_shared(const WlkSet& wset, else { if (herm) - assert(std::get<0>(RefA.sizes()) == dm_dims(false, Alpha).first && RefA.size(1) == dm_dims(false, Alpha).second); + assert(get<0>(RefA.sizes()) == dm_dims(false, Alpha).first && RefA.size(1) == dm_dims(false, Alpha).second); else - assert(std::get<1>(RefA.sizes()) == dm_dims(false, Alpha).first && RefA.size(0) == dm_dims(false, Alpha).second); + assert(get<1>(RefA.sizes()) == dm_dims(false, Alpha).first && RefA.size(0) == dm_dims(false, Alpha).second); if (herm) - assert(std::get<0>(RefB.sizes()) == dm_dims(false, Beta).first && RefB.size(1) == dm_dims(false, Beta).second); + assert(get<0>(RefB.sizes()) == dm_dims(false, Beta).first && RefB.size(1) == dm_dims(false, Beta).second); else - assert(std::get<1>(RefB.sizes()) == dm_dims(false, Beta).first && RefB.size(0) == dm_dims(false, Beta).second); + assert(get<1>(RefB.sizes()) == dm_dims(false, Beta).first && RefB.size(0) == dm_dims(false, Beta).second); - if (std::get<0>(ovlp2.sizes()) < 2 * nw) + if (get<0>(ovlp2.sizes()) < 2 * nw) ovlp2.reextent(iextensions<1u>{2 * nw}); fill_n(ovlp2.origin(), 2 * nw, ComplexType(0.0)); auto GAdims = dm_dims(not compact, Alpha); @@ -1200,7 +1211,8 @@ void PHMSD::OrthogonalizeExcited(Mat&& A, SpinTypes spin, double LogOverlapFacto APP_ABORT(" Error: OrthogonalizeExcited not implemented with NONCOLLINEAR.\n"); if (spin == Alpha) { - if (std::get<0>(extendedMatAlpha.sizes()) != NMO || std::get<1>(extendedMatAlpha.sizes()) != maxOccupExtendedMat.first) + using std::get; + if (get<0>(extendedMatAlpha.sizes()) != NMO || get<1>(extendedMatAlpha.sizes()) != maxOccupExtendedMat.first) extendedMatAlpha.reextent({NMO, maxOccupExtendedMat.first}); extendedMatAlpha(extendedMatAlpha.extension(0), {0, NAEA}) = A; extendedMatAlpha(extendedMatAlpha.extension(0), {NAEA + 1, maxOccupExtendedMat.first}) = @@ -1222,7 +1234,8 @@ void PHMSD::OrthogonalizeExcited(Mat&& A, SpinTypes spin, double LogOverlapFacto } else { - if (std::get<0>(extendedMatBeta.sizes()) != NMO || std::get<1>(extendedMatBeta.sizes()) != maxOccupExtendedMat.second) + using std::get; + if (get<0>(extendedMatBeta.sizes()) != NMO || get<1>(extendedMatBeta.sizes()) != maxOccupExtendedMat.second) extendedMatBeta.reextent({NMO, maxOccupExtendedMat.second}); extendedMatBeta(extendedMatBeta.extension(0), {0, NAEB}) = A; extendedMatBeta(extendedMatBeta.extension(0), {NAEB + 1, maxOccupExtendedMat.second}) = diff --git a/src/AFQMC/Wavefunctions/phmsd_helpers.hpp b/src/AFQMC/Wavefunctions/phmsd_helpers.hpp index 2c4f8d3df4..b82fdcef70 100644 --- a/src/AFQMC/Wavefunctions/phmsd_helpers.hpp +++ b/src/AFQMC/Wavefunctions/phmsd_helpers.hpp @@ -126,9 +126,11 @@ inline void calculate_R(int rank, std::vector WORK(abij.maximum_excitation_number()[spin] * abij.maximum_excitation_number()[spin]); auto confgs = abij.configurations_begin(); auto refc = abij.reference_configuration(spin); - for (int i = 0; i < std::get<0>(R.sizes()); i++) - std::fill_n(R[i].origin(), std::get<1>(R.sizes()), ComplexType(0)); - int NEL = std::get<1>(T.sizes()); + + using std::get; + for (int i = 0; i < get<0>(R.sizes()); i++) + std::fill_n(R[i].origin(), get<1>(R.sizes()), ComplexType(0)); + int NEL = get<1>(T.sizes()); std::vector orbs(NEL); ComplexType ov_a; // add reference contribution!!! diff --git a/src/AFQMC/Wavefunctions/tests/test_phmsd.cpp b/src/AFQMC/Wavefunctions/tests/test_phmsd.cpp index 68b7b42ac5..bf82dfbc41 100644 --- a/src/AFQMC/Wavefunctions/tests/test_phmsd.cpp +++ b/src/AFQMC/Wavefunctions/tests/test_phmsd.cpp @@ -233,9 +233,11 @@ void test_phmsd(boost::mpi3::communicator& world) RandomGenerator rng; WalkerSet wset(TG, doc3.getRoot(), InfoMap["info0"], rng); auto initial_guess = WfnFac.getInitialGuess(wfn_name); - REQUIRE(std::get<0>(initial_guess.sizes()) == 2); - REQUIRE(std::get<1>(initial_guess.sizes()) == NMO); - REQUIRE(std::get<2>(initial_guess.sizes()) == NAEA); + + using std::get; + REQUIRE(get<0>(initial_guess.sizes()) == 2); + REQUIRE(get<1>(initial_guess.sizes()) == NMO); + REQUIRE(get<2>(initial_guess.sizes()) == NAEA); wset.resize(nwalk, initial_guess[0], initial_guess[1](initial_guess.extension(1), {0, NAEB})); // 1. Test Overlap Explicitly diff --git a/src/AFQMC/Wavefunctions/tests/test_wfn_factory.cpp b/src/AFQMC/Wavefunctions/tests/test_wfn_factory.cpp index 4f4c02d651..1a06baf674 100644 --- a/src/AFQMC/Wavefunctions/tests/test_wfn_factory.cpp +++ b/src/AFQMC/Wavefunctions/tests/test_wfn_factory.cpp @@ -154,9 +154,11 @@ void wfn_fac(boost::mpi3::communicator& world) //nwalk=nw; WalkerSet wset(TG, doc3.getRoot(), InfoMap["info0"], rng); auto initial_guess = WfnFac.getInitialGuess(wfn_name); - REQUIRE(std::get<0>(initial_guess.sizes()) == 2); - REQUIRE(std::get<1>(initial_guess.sizes()) == NPOL * NMO); - REQUIRE(std::get<2>(initial_guess.sizes()) == NAEA); + + using std::get; + REQUIRE(get<0>(initial_guess.sizes()) == 2); + REQUIRE(get<1>(initial_guess.sizes()) == NPOL * NMO); + REQUIRE(get<2>(initial_guess.sizes()) == NAEA); if (type == COLLINEAR) wset.resize(nwalk, initial_guess[0], initial_guess[1](initial_guess.extension(1), {0, NAEB})); @@ -239,6 +241,8 @@ void wfn_fac(boost::mpi3::communicator& world) TG.local_barrier(); t1 = Time.elapsed(); ComplexType Vsum = 0; + + using std::get; if (std::abs(file_data.Vsum) > 1e-8) { for (int n = 0; n < nwalk; n++) @@ -246,12 +250,12 @@ void wfn_fac(boost::mpi3::communicator& world) Vsum = 0; if (wfn.transposed_vHS()) { - for (int i = 0; i < std::get<1>(vHS.sizes()); i++) + for (int i = 0; i < get<1>(vHS.sizes()); i++) Vsum += vHS[n][i]; } else { - for (int i = 0; i < std::get<0>(vHS.sizes()); i++) + for (int i = 0; i < get<0>(vHS.sizes()); i++) Vsum += vHS[i][n]; } CHECK(Vsum == ComplexApprox(file_data.Vsum)); @@ -262,12 +266,12 @@ void wfn_fac(boost::mpi3::communicator& world) Vsum = 0; if (wfn.transposed_vHS()) { - for (int i = 0; i < std::get<1>(vHS.sizes()); i++) + for (int i = 0; i < get<1>(vHS.sizes()); i++) Vsum += vHS[0][i]; } else { - for (int i = 0; i < std::get<0>(vHS.sizes()); i++) + for (int i = 0; i < get<0>(vHS.sizes()); i++) Vsum += vHS[i][0]; } app_log() << " Vsum: " << setprecision(12) << Vsum << " Time: " << t1 << std::endl; @@ -290,9 +294,10 @@ void wfn_fac(boost::mpi3::communicator& world) WalkerSet wset2(TG, doc3.getRoot(), InfoMap["info0"], rng); //auto initial_guess = WfnFac.getInitialGuess(wfn_name); - REQUIRE(std::get<0>(initial_guess.sizes()) == 2); - REQUIRE(std::get<1>(initial_guess.sizes()) == NPOL * NMO); - REQUIRE(std::get<2>(initial_guess.sizes()) == NAEA); + using std::get; + REQUIRE(get<0>(initial_guess.sizes()) == 2); + REQUIRE(get<1>(initial_guess.sizes()) == NPOL * NMO); + REQUIRE(get<2>(initial_guess.sizes()) == NAEA); if (type == COLLINEAR) wset2.resize(nwalk, initial_guess[0], initial_guess[1](initial_guess.extension(1), {0, NAEB})); @@ -361,12 +366,12 @@ void wfn_fac(boost::mpi3::communicator& world) Vsum = 0; if (wfn.transposed_vHS()) { - for (int i = 0; i < std::get<1>(vHS.sizes()); i++) + for (int i = 0; i < get<1>(vHS.sizes()); i++) Vsum += vHS[n][i]; } else { - for (int i = 0; i < std::get<0>(vHS.sizes()); i++) + for (int i = 0; i < get<0>(vHS.sizes()); i++) Vsum += vHS[i][n]; } CHECK(Vsum == ComplexApprox(file_data.Vsum)); @@ -377,12 +382,12 @@ void wfn_fac(boost::mpi3::communicator& world) Vsum = 0; if (wfn.transposed_vHS()) { - for (int i = 0; i < std::get<1>(vHS.sizes()); i++) + for (int i = 0; i < get<1>(vHS.sizes()); i++) Vsum += vHS[0][i]; } else { - for (int i = 0; i < std::get<0>(vHS.sizes()); i++) + for (int i = 0; i < get<0>(vHS.sizes()); i++) Vsum += vHS[i][0]; } app_log() << " Vsum: " << setprecision(12) << Vsum << std::endl; @@ -486,9 +491,11 @@ void wfn_fac_distributed(boost::mpi3::communicator& world, int ngroups) WalkerSet wset(TG, doc3.getRoot(), InfoMap["info0"], rng); auto initial_guess = WfnFac.getInitialGuess(wfn_name); - REQUIRE(std::get<0>(initial_guess.sizes()) == 2); - REQUIRE(std::get<1>(initial_guess.sizes()) == NPOL * NMO); - REQUIRE(std::get<2>(initial_guess.sizes()) == NAEA); + + using std::get; + REQUIRE(get<0>(initial_guess.sizes()) == 2); + REQUIRE(get<1>(initial_guess.sizes()) == NPOL * NMO); + REQUIRE(get<2>(initial_guess.sizes()) == NAEA); if (type == COLLINEAR) wset.resize(nwalk, initial_guess[0], initial_guess[1](initial_guess.extension(1), {0, NAEB})); @@ -584,6 +591,8 @@ void wfn_fac_distributed(boost::mpi3::communicator& world, int ngroups) TG.local_barrier(); t1 = Time.elapsed(); ComplexType Vsum = 0; + + using std::get; if (std::abs(file_data.Vsum) > 1e-8) { for (int n = 0; n < nwalk; n++) @@ -593,12 +602,12 @@ void wfn_fac_distributed(boost::mpi3::communicator& world, int ngroups) { if (wfn.transposed_vHS()) { - for (int i = 0; i < std::get<1>(vHS.sizes()); i++) + for (int i = 0; i < get<1>(vHS.sizes()); i++) Vsum += vHS[n][i]; } else { - for (int i = 0; i < std::get<0>(vHS.sizes()); i++) + for (int i = 0; i < get<0>(vHS.sizes()); i++) Vsum += vHS[i][n]; } } @@ -613,12 +622,12 @@ void wfn_fac_distributed(boost::mpi3::communicator& world, int ngroups) { if (wfn.transposed_vHS()) { - for (int i = 0; i < std::get<1>(vHS.sizes()); i++) + for (int i = 0; i < get<1>(vHS.sizes()); i++) Vsum += vHS[0][i]; } else { - for (int i = 0; i < std::get<0>(vHS.sizes()); i++) + for (int i = 0; i < get<0>(vHS.sizes()); i++) Vsum += vHS[i][0]; } } @@ -643,9 +652,10 @@ void wfn_fac_distributed(boost::mpi3::communicator& world, int ngroups) WalkerSet wset2(TG, doc3.getRoot(), InfoMap["info0"], rng); //auto initial_guess = WfnFac.getInitialGuess(wfn_name); - REQUIRE(std::get<0>(initial_guess.sizes()) == 2); - REQUIRE(std::get<1>(initial_guess.sizes()) == NPOL * NMO); - REQUIRE(std::get<2>(initial_guess.sizes()) == NAEA); + using std::get; + REQUIRE(get<0>(initial_guess.sizes()) == 2); + REQUIRE(get<1>(initial_guess.sizes()) == NPOL * NMO); + REQUIRE(get<2>(initial_guess.sizes()) == NAEA); if (type == COLLINEAR) wset2.resize(nwalk, initial_guess[0], initial_guess[1](initial_guess.extension(1), {0, NAEB})); @@ -722,6 +732,8 @@ void wfn_fac_distributed(boost::mpi3::communicator& world, int ngroups) wfn2.vHS(X2, vHS, sqrtdt); TG.local_barrier(); Vsum = 0; + + using std::get; if (std::abs(file_data.Vsum) > 1e-8) { for (int n = 0; n < nwalk; n++) @@ -731,12 +743,12 @@ void wfn_fac_distributed(boost::mpi3::communicator& world, int ngroups) { if (wfn.transposed_vHS()) { - for (int i = 0; i < std::get<1>(vHS.sizes()); i++) + for (int i = 0; i < get<1>(vHS.sizes()); i++) Vsum += vHS[n][i]; } else { - for (int i = 0; i < std::get<0>(vHS.sizes()); i++) + for (int i = 0; i < get<0>(vHS.sizes()); i++) Vsum += vHS[i][n]; } } @@ -751,12 +763,12 @@ void wfn_fac_distributed(boost::mpi3::communicator& world, int ngroups) { if (wfn.transposed_vHS()) { - for (int i = 0; i < std::get<1>(vHS.sizes()); i++) + for (int i = 0; i < get<1>(vHS.sizes()); i++) Vsum += vHS[0][i]; } else { - for (int i = 0; i < std::get<0>(vHS.sizes()); i++) + for (int i = 0; i < get<0>(vHS.sizes()); i++) Vsum += vHS[i][0]; } } diff --git a/src/io/hdf/hdf_multi.h b/src/io/hdf/hdf_multi.h index 76ee11bee5..770b6f4cac 100644 --- a/src/io/hdf/hdf_multi.h +++ b/src/io/hdf/hdf_multi.h @@ -63,8 +63,9 @@ struct h5data_proxy> : public h5_space_type(a.sizes()); - dims[1] = std::get<1>(a.sizes()); + using std::get; + dims[0] = get<0>(a.sizes()); + dims[1] = get<1>(a.sizes()); } inline bool read(data_type& ref, hid_t grp, const std::string& aname, hid_t xfer_plist = H5P_DEFAULT) @@ -92,12 +93,13 @@ struct h5data_proxy> : public h5_space_type(grp, aname, FileSpace::rank, dims)) { if (dims[0] > 0) { std::cerr << " Error: multi::array_ref can't be resized in h5data_proxy<>::read." << std::endl; - std::cerr << dims[0] << " " << std::get<0>(ref.sizes()) << std::endl; + std::cerr << dims[0] << " " << get<0>(ref.sizes()) << std::endl; } return false; } @@ -120,18 +122,21 @@ struct h5data_proxy> : public h5_space_type(a.sizes()); - dims[1] = std::get<1>(a.sizes()); + using std::get; + dims[0] = get<0>(a.sizes()); + dims[1] = get<1>(a.sizes()); } inline bool read(data_type& ref, hid_t grp, const std::string& aname, hid_t xfer_plist = H5P_DEFAULT) { + using std::get; + if (!checkShapeConsistency(grp, aname, FileSpace::rank, dims)) { if (dims[0] * dims[1] > 0) { std::cerr << " Error: multi::array_ref can't be resized in h5data_proxy<>::read." << std::endl; - std::cerr << dims[0] << " " << dims[1] << " " << std::get<0>(ref.sizes()) << " " << std::get<1>(ref.sizes()) << std::endl; + std::cerr << dims[0] << " " << dims[1] << " " << get<0>(ref.sizes()) << " " << get<1>(ref.sizes()) << std::endl; } return false; } @@ -257,18 +262,20 @@ struct h5data_proxy>> : inline h5data_proxy(const data_type& a) { - dims[0] = std::get<0>(a.sizes()); - dims[1] = std::get<1>(a.sizes()); + using std::get; + dims[0] = get<0>(a.sizes()); + dims[1] = get<1>(a.sizes()); } inline bool read(data_type& ref, hid_t grp, const std::string& aname, hid_t xfer_plist = H5P_DEFAULT) { + using std::get; if (!checkShapeConsistency(grp, aname, FileSpace::rank, dims)) { if (dims[0] * dims[1] > 0) { std::cerr << " Error: multi::array_ref can't be resized in h5data_proxy<>::read." << std::endl; - std::cerr << dims[0] << " " << dims[1] << " " << std::get<0>(ref.sizes()) << " " << std::get<1>(ref.sizes()) << std::endl; + std::cerr << dims[0] << " " << dims[1] << " " << get<0>(ref.sizes()) << " " << get<1>(ref.sizes()) << std::endl; } return false; }