From b4686616d884e11f4a624989d370b3c032d0c1c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ne=C3=AFl=20Zaim?= <49716072+NeilZaim@users.noreply.github.com> Date: Wed, 10 Aug 2022 18:37:14 +0200 Subject: [PATCH 0001/1346] Initialize particle runtime attributes inside AddNParticles (#3272) * Initialize particle runtime attributes before calling AddNParticles * Avoid Multiplication result converted to larger type warning * Directly initialize runtime attributes inside AddNParticles * Update doxygen comment --- .../analysis_rigid_injection_LabFrame.py | 15 +++ .../Modules/RigidInjection/inputs_2d_LabFrame | 4 + Python/pywarpx/_libwarpx.py | 7 +- .../benchmarks_json/RigidInjection_lab.json | 2 + Source/Particles/LaserParticleContainer.H | 11 ++ Source/Particles/LaserParticleContainer.cpp | 2 +- Source/Particles/PhysicalParticleContainer.H | 19 +++ .../Particles/PhysicalParticleContainer.cpp | 121 +++++++++++++++++- Source/Particles/WarpXParticleContainer.H | 40 +++++- Source/Particles/WarpXParticleContainer.cpp | 67 ++++++---- Source/Python/WarpXWrappers.H | 6 +- Source/Python/WarpXWrappers.cpp | 8 +- 12 files changed, 267 insertions(+), 35 deletions(-) diff --git a/Examples/Modules/RigidInjection/analysis_rigid_injection_LabFrame.py b/Examples/Modules/RigidInjection/analysis_rigid_injection_LabFrame.py index 624fd8f7cf0..ee88e32252d 100755 --- a/Examples/Modules/RigidInjection/analysis_rigid_injection_LabFrame.py +++ b/Examples/Modules/RigidInjection/analysis_rigid_injection_LabFrame.py @@ -18,6 +18,9 @@ As a help to the user, the script also compares beam width to the theory in case rigid injection is OFF (i.e., the beam starts expanding from -5 microns), in which case a warning is raised. + +Additionally, this script tests that runtime attributes are correctly initialized +with the gaussian_beam injection style. ''' import os @@ -85,5 +88,17 @@ def remove_rigid_lines(plotfile, nlines_if_rigid): assert( error_rel < tolerance_rel ) + +### Check that user runtime attributes are correctly initialized +filename_start = filename[:-5] + '00000' +ds_start = yt.load( filename_start ) +ad_start = ds_start.all_data() +x = ad_start['beam', 'particle_position_x'] +z = ad_start['beam', 'particle_position_y'] +orig_z = ad_start['beam', 'particle_orig_z'] +center = ad_start['beam', 'particle_center'] +assert(np.array_equal(z, orig_z)) +assert(np.array_equal(1*(np.abs(x) < 5.e-7), center)) + test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Modules/RigidInjection/inputs_2d_LabFrame b/Examples/Modules/RigidInjection/inputs_2d_LabFrame index 76c2fc0c937..e5fe21c73cc 100644 --- a/Examples/Modules/RigidInjection/inputs_2d_LabFrame +++ b/Examples/Modules/RigidInjection/inputs_2d_LabFrame @@ -43,6 +43,10 @@ beam.uy_th = 100. beam.uz_th = 0. beam.zinject_plane = 20.e-6 beam.rigid_advance = true +beam.addRealAttributes = orig_z +beam.addIntegerAttributes = center +beam.attribute.orig_z(x,y,z,ux,uy,uz,t) = z +beam.attribute.center(x,y,z,ux,uy,uz,t) = 1*(sqrt(x*x + y*y)<5.e-7) # Diagnostics diagnostics.diags_names = diag1 diff --git a/Python/pywarpx/_libwarpx.py b/Python/pywarpx/_libwarpx.py index 65a4d3cd543..fcf7f45d637 100755 --- a/Python/pywarpx/_libwarpx.py +++ b/Python/pywarpx/_libwarpx.py @@ -268,6 +268,8 @@ class Particle(ctypes.Structure): _ndpointer(c_particlereal, flags="C_CONTIGUOUS"), ctypes.c_int, _ndpointer(c_particlereal, flags="C_CONTIGUOUS"), + ctypes.c_int, + _ndpointer(ctypes.c_int, flags="C_CONTIGUOUS"), ctypes.c_int) self.libwarpx_so.warpx_getProbLo.restype = c_real @@ -625,6 +627,9 @@ def add_particles(self, species_name, x=None, y=None, z=None, ux=None, uy=None, # --- The -3 is because components 1 to 3 are velocities attr[:,self.get_particle_comp_index(species_name, key)-3] = vals + nattr_int = 0 + attr_int = np.empty([0], ctypes.c_int) + # Iff x/y/z/ux/uy/uz are not numpy arrays of the correct dtype, new # array copies are made with the correct dtype x = x.astype(self._numpy_particlereal_dtype, copy=False) @@ -636,7 +641,7 @@ def add_particles(self, species_name, x=None, y=None, z=None, ux=None, uy=None, self.libwarpx_so.warpx_addNParticles( ctypes.c_char_p(species_name.encode('utf-8')), x.size, - x, y, z, ux, uy, uz, nattr, attr, unique_particles + x, y, z, ux, uy, uz, nattr, attr, nattr_int, attr_int, unique_particles ) def get_particle_count(self, species_name, local=False): diff --git a/Regression/Checksum/benchmarks_json/RigidInjection_lab.json b/Regression/Checksum/benchmarks_json/RigidInjection_lab.json index c1803e7e918..b1ed4f4ae0b 100644 --- a/Regression/Checksum/benchmarks_json/RigidInjection_lab.json +++ b/Regression/Checksum/benchmarks_json/RigidInjection_lab.json @@ -1,8 +1,10 @@ { "beam": { + "particle_center": 776.0, "particle_momentum_x": 4.368290868012154e-17, "particle_momentum_y": 4.438068704747877e-17, "particle_momentum_z": 5.461849061470348e-16, + "particle_orig_z": 0.010011399181766063, "particle_position_x": 0.003457868696205348, "particle_position_y": 0.07921343233374142, "particle_weight": 62415.090744607616 diff --git a/Source/Particles/LaserParticleContainer.H b/Source/Particles/LaserParticleContainer.H index 21c3e0de375..6332807d923 100644 --- a/Source/Particles/LaserParticleContainer.H +++ b/Source/Particles/LaserParticleContainer.H @@ -14,6 +14,7 @@ #include "WarpXParticleContainer.H" #include +#include #include #include #include @@ -45,6 +46,16 @@ public: virtual void InitData () final; + /** + * \brief Method to initialize runtime attributes. Does nothing for LaserParticleContainer. + */ + virtual void DefaultInitializeRuntimeAttributes ( + amrex::ParticleTile& /*pinned_tile*/, + const int /*n_external_attr_real*/, + const int /*n_external_attr_int*/, + const amrex::RandomEngine& /*engine*/) override final {} + virtual void ReadHeader (std::istream& is) final; virtual void WriteHeader (std::ostream& os) const final; diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index dae759cee36..155bc49bcca 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -516,7 +516,7 @@ LaserParticleContainer::InitData (int lev) AddNParticles(0, np, particle_x.dataPtr(), particle_y.dataPtr(), particle_z.dataPtr(), particle_ux.dataPtr(), particle_uy.dataPtr(), particle_uz.dataPtr(), - 1, particle_w.dataPtr(), 1); + 1, particle_w.dataPtr(), 0, nullptr, 1); } void diff --git a/Source/Particles/PhysicalParticleContainer.H b/Source/Particles/PhysicalParticleContainer.H index aa077d3bcbb..264c7d9325a 100644 --- a/Source/Particles/PhysicalParticleContainer.H +++ b/Source/Particles/PhysicalParticleContainer.H @@ -221,6 +221,25 @@ public: amrex::Gpu::HostVector& particle_uz, amrex::Gpu::HostVector& particle_w); + /** + * \brief Default initialize runtime attributes in a tile. This routine does not initialize the + * first n_external_attr_real real attributes and the first n_external_attr_int integer + * attributes, which have been in principle externally set elsewhere. + * + * @param[inout] pinned_tile the tile in which attributes are initialized + * @param[in] n_external_attr_real The number of real attributes that have been externally set. + * These are NOT initialized by this function. + * @param[in] n_external_attr_int The number of integer attributes that have been externally set. + * These are NOT initialized by this function. + * @param[in] engine the random engine, used in initialization of QED optical depths + */ + virtual void DefaultInitializeRuntimeAttributes ( + amrex::ParticleTile& pinned_tile, + const int n_external_attr_real, + const int n_external_attr_int, + const amrex::RandomEngine& engine) override final; + virtual void GetParticleSlice ( const int direction, const amrex::Real z_old, const amrex::Real z_new, const amrex::Real t_boost, diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index c075c6d2576..72543982bfe 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -517,7 +517,7 @@ PhysicalParticleContainer::AddGaussianBeam ( AddNParticles(0,np, particle_x.dataPtr(), particle_y.dataPtr(), particle_z.dataPtr(), particle_ux.dataPtr(), particle_uy.dataPtr(), particle_uz.dataPtr(), - 1, particle_w.dataPtr(),1); + 1, particle_w.dataPtr(), 0, nullptr, 1); } void @@ -625,7 +625,7 @@ PhysicalParticleContainer::AddPlasmaFromFile(ParticleReal q_tot, AddNParticles(0, np, particle_x.dataPtr(), particle_y.dataPtr(), particle_z.dataPtr(), particle_ux.dataPtr(), particle_uy.dataPtr(), particle_uz.dataPtr(), - 1, particle_w.dataPtr(),1); + 1, particle_w.dataPtr(), 0, nullptr, 1); #endif // WARPX_USE_OPENPMD ignore_unused(q_tot, z_shift); @@ -633,6 +633,118 @@ PhysicalParticleContainer::AddPlasmaFromFile(ParticleReal q_tot, return; } +void +PhysicalParticleContainer::DefaultInitializeRuntimeAttributes ( + amrex::ParticleTile& pinned_tile, + const int n_external_attr_real, + const int n_external_attr_int, + const amrex::RandomEngine& engine) +{ + using namespace amrex::literals; + + const int np = pinned_tile.numParticles(); + + // Preparing data needed for user defined attributes + const int n_user_real_attribs = m_user_real_attribs.size(); + const int n_user_int_attribs = m_user_int_attribs.size(); + const auto get_position = GetParticlePosition(pinned_tile); + const auto soa = pinned_tile.getParticleTileData(); + const amrex::ParticleReal* AMREX_RESTRICT ux = soa.m_rdata[PIdx::ux]; + const amrex::ParticleReal* AMREX_RESTRICT uy = soa.m_rdata[PIdx::uy]; + const amrex::ParticleReal* AMREX_RESTRICT uz = soa.m_rdata[PIdx::uz]; + constexpr int lev = 0; + const amrex::Real t = WarpX::GetInstance().gett_new(lev); + +#ifndef WARPX_QED + amrex::ignore_unused(engine); +#endif + + // Initialize the last NumRuntimeRealComps() - n_external_attr_real runtime real attributes + for (int j = PIdx::nattribs + n_external_attr_real; j < NumRealComps() ; ++j) + { + amrex::Vector attr_temp(np, 0.0_prt); +#ifdef WARPX_QED + // Current runtime comp is quantum synchrotron optical depth + if (particle_comps.find("opticalDepthQSR") != particle_comps.end() && + particle_comps["opticalDepthQSR"] == j) + { + const QuantumSynchrotronGetOpticalDepth quantum_sync_get_opt = + m_shr_p_qs_engine->build_optical_depth_functor();; + for (int i = 0; i < np; ++i) { + attr_temp[i] = quantum_sync_get_opt(engine); + } + } + + // Current runtime comp is Breit-Wheeler optical depth + if (particle_comps.find("opticalDepthBW") != particle_comps.end() && + particle_comps["opticalDepthBW"] == j) + { + const BreitWheelerGetOpticalDepth breit_wheeler_get_opt = + m_shr_p_bw_engine->build_optical_depth_functor();; + for (int i = 0; i < np; ++i) { + attr_temp[i] = breit_wheeler_get_opt(engine); + } + } +#endif + + for (int ia = 0; ia < n_user_real_attribs; ++ia) + { + // Current runtime comp is ia-th user defined attribute + if (particle_comps.find(m_user_real_attribs[ia]) != particle_comps.end() && + particle_comps[m_user_real_attribs[ia]] == j) + { + amrex::ParticleReal xp, yp, zp; + const amrex::ParserExecutor<7> user_real_attrib_parserexec = + m_user_real_attrib_parser[ia]->compile<7>(); + for (int i = 0; i < np; ++i) { + get_position(i, xp, yp, zp); + attr_temp[i] = user_real_attrib_parserexec(xp, yp, zp, + ux[i], uy[i], uz[i], t); + } + } + } + + pinned_tile.push_back_real(j, attr_temp.data(), attr_temp.data() + np); + } + + // Initialize the last NumRuntimeIntComps() - n_external_attr_int runtime int attributes + for (int j = n_external_attr_int; j < NumIntComps() ; ++j) + { + amrex::Vector attr_temp(np, 0); + + // Current runtime comp is ionization level + if (particle_icomps.find("ionizationLevel") != particle_icomps.end() && + particle_icomps["ionizationLevel"] == j) + { + for (int i = 0; i < np; ++i) { + attr_temp[i] = ionization_initial_level; + } + } + + for (int ia = 0; ia < n_user_int_attribs; ++ia) + { + // Current runtime comp is ia-th user defined attribute + if (particle_icomps.find(m_user_int_attribs[ia]) != particle_icomps.end() && + particle_icomps[m_user_int_attribs[ia]] == j) + { + amrex::ParticleReal xp, yp, zp; + const amrex::ParserExecutor<7> user_int_attrib_parserexec = + m_user_int_attrib_parser[ia]->compile<7>(); + for (int i = 0; i < np; ++i) { + get_position(i, xp, yp, zp); + attr_temp[i] = static_cast( + user_int_attrib_parserexec(xp, yp, zp, ux[i], uy[i], uz[i], t)); + } + } + } + + pinned_tile.push_back_int(j, attr_temp.data(), attr_temp.data() + np); + } + +} + + void PhysicalParticleContainer::CheckAndAddParticle ( ParticleReal x, ParticleReal y, ParticleReal z, @@ -679,7 +791,7 @@ PhysicalParticleContainer::AddParticles (int lev) &(plasma_injector->single_particle_vel[0]), &(plasma_injector->single_particle_vel[1]), &(plasma_injector->single_particle_vel[2]), - 1, &(plasma_injector->single_particle_weight), 0); + 1, &(plasma_injector->single_particle_weight), 0, nullptr, 0); return; } @@ -701,7 +813,7 @@ PhysicalParticleContainer::AddParticles (int lev) plasma_injector->multiple_particles_vel_x.dataPtr(), plasma_injector->multiple_particles_vel_y.dataPtr(), plasma_injector->multiple_particles_vel_z.dataPtr(), - 1, plasma_injector->multiple_particles_weight.dataPtr(), 0); + 1, plasma_injector->multiple_particles_weight.dataPtr(), 0, nullptr, 0); return; } @@ -2189,6 +2301,7 @@ PhysicalParticleContainer::SplitParticles (int lev) psplit_uz.dataPtr(), 1, psplit_w.dataPtr(), + 0, nullptr, 1, NoSplitParticleID); // Copy particles from tmp to current particle container addParticles(pctmp_split,1); diff --git a/Source/Particles/WarpXParticleContainer.H b/Source/Particles/WarpXParticleContainer.H index 1f1219935fe..d6154a2d564 100644 --- a/Source/Particles/WarpXParticleContainer.H +++ b/Source/Particles/WarpXParticleContainer.H @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -143,6 +144,17 @@ public: void AllocData (); + /** + * \brief Virtual method to initialize runtime attributes. Must be overriden by each derived + * class. + */ + virtual void DefaultInitializeRuntimeAttributes ( + amrex::ParticleTile& pinned_tile, + const int n_external_attr_real, + const int n_external_attr_int, + const amrex::RandomEngine& engine) = 0; + /// /// This pushes the particle positions by one half time step. /// It is used to desynchronize the particles after initializaton @@ -246,10 +258,36 @@ public: amrex::ParticleReal maxParticleVelocity(bool local = false); + /** + * \brief Adds n particles to the simulation + * + * @param[in] lev refinement level (unused) + * @param[in] n the number of particles to add + * @param[in] x x component of the position of particles to be added + * @param[in] y y component of the position of particles to be added + * @param[in] z z component of the position of particles to be added + * @param[in] ux x component of the momentum of particles to be added + * @param[in] uy y component of the momentum of particles to be added + * @param[in] uz z component of the momentum of particles to be added + * @param[in] nattr_real number of runtime real attributes to initialize with the attr_real + * array. (particle weight is treated as a real attribute in this routine). The remaining + * runtime real attributes are initialized in the method DefaultInitializeRuntimeAttributes. + * @param[in] attr_real value of real attributes to initialize + * @param[in] nattr_int number of runtime int attributes to initialize with the attr_int array. + * The remaining runtime int attributes are initialized in the method + * DefaultInitializeRuntimeAttributes. + * @param[in] attr_int value of int attributes to initialize + * @param[in] uniqueparticles if true, each MPI rank calling this function creates n + * particles. Else, all MPI ranks work together to create n particles in total. + * @param[in] id if different than -1, this id will be assigned to the particles (used for + * particle tagging in some routines, e.g. SplitParticle) + */ void AddNParticles (int lev, int n, const amrex::ParticleReal* x, const amrex::ParticleReal* y, const amrex::ParticleReal* z, const amrex::ParticleReal* vx, const amrex::ParticleReal* vy, const amrex::ParticleReal* vz, - int nattr, const amrex::ParticleReal* attr, int uniqueparticles, amrex::Long id=-1); + const int nattr_real, const amrex::ParticleReal* attr_real, + const int nattr_int, const int* attr_int, + int uniqueparticles, amrex::Long id=-1); virtual void ReadHeader (std::istream& is) = 0; diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index d3060e8e399..3421fb9f587 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -56,6 +56,7 @@ #include #include #include +#include #include #include @@ -140,17 +141,25 @@ WarpXParticleContainer::AllocData () void WarpXParticleContainer::AddNParticles (int /*lev*/, - int n, const ParticleReal* x, const ParticleReal* y, const ParticleReal* z, - const ParticleReal* vx, const ParticleReal* vy, const ParticleReal* vz, - int nattr, const ParticleReal* attr, int uniqueparticles, amrex::Long id) + int n, const amrex::ParticleReal* x, + const amrex::ParticleReal* y, + const amrex::ParticleReal* z, + const amrex::ParticleReal* vx, + const amrex::ParticleReal* vy, + const amrex::ParticleReal* vz, + const int nattr_real, const amrex::ParticleReal* attr_real, + const int nattr_int, const int* attr_int, + int uniqueparticles, amrex::Long id) { + using namespace amrex::literals; + int ibegin, iend; if (uniqueparticles) { ibegin = 0; iend = n; } else { - int myproc = ParallelDescriptor::MyProc(); - int nprocs = ParallelDescriptor::NProcs(); + int myproc = amrex::ParallelDescriptor::MyProc(); + int nprocs = amrex::ParallelDescriptor::NProcs(); int navg = n/nprocs; int nleft = n - navg * nprocs; if (myproc < nleft) { @@ -166,7 +175,7 @@ WarpXParticleContainer::AddNParticles (int /*lev*/, // Redistribute() will move them to proper places. auto& particle_tile = DefineAndReturnParticleTile(0, 0, 0); - using PinnedTile = ParticleTile; PinnedTile pinned_tile; pinned_tile.define(NumRuntimeRealComps(), NumRuntimeIntComps()); @@ -174,10 +183,10 @@ WarpXParticleContainer::AddNParticles (int /*lev*/, std::size_t np = iend-ibegin; // treat weight as a special attr since it will always be specified - Vector weight(np); + amrex::Vector weight(np); #ifdef WARPX_DIM_RZ - Vector theta(np); + amrex::Vector theta(np); #endif for (int i = ibegin; i < iend; ++i) @@ -189,7 +198,7 @@ WarpXParticleContainer::AddNParticles (int /*lev*/, } else { p.id() = id; } - p.cpu() = ParallelDescriptor::MyProc(); + p.cpu() = amrex::ParallelDescriptor::MyProc(); #if defined(WARPX_DIM_3D) p.pos(0) = x[i]; p.pos(1) = y[i]; @@ -211,7 +220,7 @@ WarpXParticleContainer::AddNParticles (int /*lev*/, pinned_tile.push_back(p); // grab weight from the attr array - weight[i-ibegin] = attr[i*nattr]; + weight[i-ibegin] = attr_real[i*nattr_real]; } if (np > 0) @@ -232,29 +241,41 @@ WarpXParticleContainer::AddNParticles (int /*lev*/, pinned_tile.push_back_real(comp, theta.data(), theta.data() + np); } else { - pinned_tile.push_back_real(comp, np, 0.0); + pinned_tile.push_back_real(comp, np, 0.0_prt); } #else - pinned_tile.push_back_real(comp, np, 0.0); + pinned_tile.push_back_real(comp, np, 0.0_prt); #endif } - for (int j = PIdx::nattribs; j < NumRealComps(); ++j) + // Initialize nattr_real - 1 runtime real attributes from data in the attr_real array + for (int j = PIdx::nattribs; j < PIdx::nattribs + nattr_real - 1; ++j) { - if (j - PIdx::nattribs < nattr - 1) { - // get the next attribute from attr array - Vector attr_vals(np); - for (int i = ibegin; i < iend; ++i) - { - attr_vals[i-ibegin] = attr[j - PIdx::nattribs + 1 + i*nattr]; - } - pinned_tile.push_back_real(j, attr_vals.data(), attr_vals.data() + np); + // get the next attribute from attr_real array + amrex::Vector attr_vals(np); + for (int i = ibegin; i < iend; ++i) + { + attr_vals[i-ibegin] = attr_real[j - PIdx::nattribs + 1 + i*nattr_real]; } - else { - pinned_tile.push_back_real(j, np, 0.0); + pinned_tile.push_back_real(j, attr_vals.data(), attr_vals.data() + np); + } + + // Initialize nattr_int runtime integer attributes from data in the attr_int array + for (int j = 0; j < nattr_int; ++j) + { + // get the next attribute from attr_int array + amrex::Vector attr_vals(np); + for (int i = ibegin; i < iend; ++i) + { + attr_vals[i-ibegin] = attr_int[j + i*nattr_int]; } + pinned_tile.push_back_int(j, attr_vals.data(), attr_vals.data() + np); } + // Default initialize the other real and integer runtime attributes + DefaultInitializeRuntimeAttributes(pinned_tile, nattr_real - 1, nattr_int, + amrex::RandomEngine{}); + auto old_np = particle_tile.numParticles(); auto new_np = old_np + pinned_tile.numParticles(); particle_tile.resize(new_np); diff --git a/Source/Python/WarpXWrappers.H b/Source/Python/WarpXWrappers.H index 9d4f2475b93..7cded8a891b 100644 --- a/Source/Python/WarpXWrappers.H +++ b/Source/Python/WarpXWrappers.H @@ -62,8 +62,10 @@ extern "C" { amrex::ParticleReal const * vx, amrex::ParticleReal const * vy, amrex::ParticleReal const * vz, - int nattr, - amrex::ParticleReal const * attr, + const int nattr_real, + amrex::ParticleReal const * attr_real, + const int nattr_int, + int const * attr_int, int uniqueparticles); void warpx_ConvertLabParamsToBoost(); diff --git a/Source/Python/WarpXWrappers.cpp b/Source/Python/WarpXWrappers.cpp index 4cca0b3c1bb..f2855a145a4 100644 --- a/Source/Python/WarpXWrappers.cpp +++ b/Source/Python/WarpXWrappers.cpp @@ -198,14 +198,16 @@ namespace const char* char_species_name, int lenx, amrex::ParticleReal const * x, amrex::ParticleReal const * y, amrex::ParticleReal const * z, amrex::ParticleReal const * vx, amrex::ParticleReal const * vy, - amrex::ParticleReal const * vz, int nattr, - amrex::ParticleReal const * attr, int uniqueparticles) + amrex::ParticleReal const * vz, const int nattr_real, + amrex::ParticleReal const * attr_real, const int nattr_int, + int const * attr_int, int uniqueparticles) { auto & mypc = WarpX::GetInstance().GetPartContainer(); const std::string species_name(char_species_name); auto & myspc = mypc.GetParticleContainerFromName(species_name); const int lev = 0; - myspc.AddNParticles(lev, lenx, x, y, z, vx, vy, vz, nattr, attr, uniqueparticles); + myspc.AddNParticles(lev, lenx, x, y, z, vx, vy, vz, nattr_real, attr_real, + nattr_int, attr_int, uniqueparticles); } void warpx_ConvertLabParamsToBoost() From 64d89befdfd86fc44efe3bcd3a15c31d86181edc Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 11 Aug 2022 19:00:35 -0600 Subject: [PATCH 0002/1346] `isort`: AMReX, WarpX, etc. as First Party (#3307) * `isort`: AMReX, WarpX, etc. as First Party Mark the `amrex` and `pywarpx`, `picmistandard`, ... imports as first party, so that they do not change if run locally or remotely pre/post install. --- .editorconfig | 1 + .../embedded_boundary_python_API/PICMI_inputs_EB_API.py | 1 + .../capacitive_discharge/PICMI_inputs_1d.py | 3 ++- .../capacitive_discharge/PICMI_inputs_2d.py | 3 ++- .../Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py | 1 + Examples/Tests/ParticleDataPython/PICMI_inputs_2d.py | 1 + Examples/Tests/ParticleDataPython/PICMI_inputs_prev_pos_2d.py | 1 + Examples/Tests/PythonWrappers/PICMI_inputs_2d.py | 1 + Examples/Tests/pass_mpi_communicator/PICMI_inputs_2d.py | 1 + .../Tests/restart/PICMI_inputs_runtime_component_analyze.py | 1 + Python/pywarpx/WarpInterface.py | 3 ++- Python/pywarpx/picmi.py | 1 + 12 files changed, 15 insertions(+), 3 deletions(-) diff --git a/.editorconfig b/.editorconfig index adc5a1c59cd..301392b54da 100644 --- a/.editorconfig +++ b/.editorconfig @@ -35,3 +35,4 @@ indent_size = unset [*.py] # isort config force_sort_within_sections = true +known_first_party = amrex,impactx,picmistandard,pywarpx,warpx diff --git a/Examples/Modules/embedded_boundary_python_API/PICMI_inputs_EB_API.py b/Examples/Modules/embedded_boundary_python_API/PICMI_inputs_EB_API.py index 4619defe1b0..c0907d67795 100755 --- a/Examples/Modules/embedded_boundary_python_API/PICMI_inputs_EB_API.py +++ b/Examples/Modules/embedded_boundary_python_API/PICMI_inputs_EB_API.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 import numpy as np + from pywarpx import fields, picmi max_steps = 1 diff --git a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py index 709fefd2218..5eac1e172ff 100644 --- a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py +++ b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py @@ -8,10 +8,11 @@ import sys import numpy as np -from pywarpx import callbacks, fields, picmi from scipy.sparse import csc_matrix from scipy.sparse import linalg as sla +from pywarpx import callbacks, fields, picmi + constants = picmi.constants diff --git a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py index 8b4a625ef36..d5a18071ad4 100755 --- a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py +++ b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py @@ -6,10 +6,11 @@ # --- used for the field solve step. import numpy as np -from pywarpx import callbacks, fields, picmi from scipy.sparse import csc_matrix from scipy.sparse import linalg as sla +from pywarpx import callbacks, fields, picmi + constants = picmi.constants ########################## diff --git a/Examples/Tests/Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py b/Examples/Tests/Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py index ae94f1dd39d..a515797e3ff 100755 --- a/Examples/Tests/Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py +++ b/Examples/Tests/Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py @@ -9,6 +9,7 @@ matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np + from pywarpx import fields, picmi constants = picmi.constants diff --git a/Examples/Tests/ParticleDataPython/PICMI_inputs_2d.py b/Examples/Tests/ParticleDataPython/PICMI_inputs_2d.py index 40d0b49ade0..c93b84f1fd6 100755 --- a/Examples/Tests/ParticleDataPython/PICMI_inputs_2d.py +++ b/Examples/Tests/ParticleDataPython/PICMI_inputs_2d.py @@ -3,6 +3,7 @@ import sys import numpy as np + from pywarpx import callbacks, picmi # Create the parser and add the argument diff --git a/Examples/Tests/ParticleDataPython/PICMI_inputs_prev_pos_2d.py b/Examples/Tests/ParticleDataPython/PICMI_inputs_prev_pos_2d.py index 9cfe669cee0..0e988f0fe2b 100755 --- a/Examples/Tests/ParticleDataPython/PICMI_inputs_prev_pos_2d.py +++ b/Examples/Tests/ParticleDataPython/PICMI_inputs_prev_pos_2d.py @@ -3,6 +3,7 @@ # --- Input file to test the saving of old particle positions import numpy as np + from pywarpx import picmi constants = picmi.constants diff --git a/Examples/Tests/PythonWrappers/PICMI_inputs_2d.py b/Examples/Tests/PythonWrappers/PICMI_inputs_2d.py index 80574ff946d..2301a558b90 100755 --- a/Examples/Tests/PythonWrappers/PICMI_inputs_2d.py +++ b/Examples/Tests/PythonWrappers/PICMI_inputs_2d.py @@ -3,6 +3,7 @@ import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable import numpy as np + from pywarpx import picmi # Number of time steps diff --git a/Examples/Tests/pass_mpi_communicator/PICMI_inputs_2d.py b/Examples/Tests/pass_mpi_communicator/PICMI_inputs_2d.py index b97905ee071..bdca7ab08b8 100755 --- a/Examples/Tests/pass_mpi_communicator/PICMI_inputs_2d.py +++ b/Examples/Tests/pass_mpi_communicator/PICMI_inputs_2d.py @@ -6,6 +6,7 @@ # --- if the correct amount of processors are initialized in AMReX. from mpi4py import MPI + from pywarpx import picmi constants = picmi.constants diff --git a/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py b/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py index bd618dfa103..8843b5dee42 100755 --- a/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py +++ b/Examples/Tests/restart/PICMI_inputs_runtime_component_analyze.py @@ -7,6 +7,7 @@ import sys import numpy as np + from pywarpx import callbacks, picmi ########################## diff --git a/Python/pywarpx/WarpInterface.py b/Python/pywarpx/WarpInterface.py index 0a18993891a..dc80e4bc900 100644 --- a/Python/pywarpx/WarpInterface.py +++ b/Python/pywarpx/WarpInterface.py @@ -14,9 +14,10 @@ # The class WarpX_EM3D inherits from Warp's EM3D class. It primarily provides # access to the field plotting routines. -from pywarpx import PGroup import warp +from pywarpx import PGroup + from . import fields # The particle weight is always the first pid diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index be227e18297..a9a8f80af04 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -13,6 +13,7 @@ import numpy as np import periodictable + import picmistandard import pywarpx From 9fd4fbba5f27cdb5d70ac87a2f428af8c6f4ecd3 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 12 Aug 2022 09:26:31 -0700 Subject: [PATCH 0003/1346] Docs: Add Input Parameter `psatd.use_default_v_galilean` (#3304) --- Docs/source/usage/parameters.rst | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 035ef17bc42..e1d911b4a4b 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -1825,14 +1825,15 @@ Numerics and algorithms Note that the update with and without rho is also supported in RZ geometry. -* ``pstad.v_galilean`` (`3 floats`, in units of the speed of light; default `0. 0. 0.`) - Defines the galilean velocity. - Non-zero `v_galilean` activates Galilean algorithm, which suppresses the Numerical Cherenkov instability - in boosted-frame simulation. This requires the code to be compiled with `USE_PSATD=TRUE`. - (see the sub-section Numerical Stability and alternate formulation - in a Galilean frame in the :ref:`theory section `). - It also requires the use of the `direct` current deposition option - `algo.current_deposition = direct` (does not work with Esirkepov algorithm). +* ``psatd.v_galilean`` (`3 floats`, in units of the speed of light; default ``0. 0. 0.``) + Defines the Galilean velocity. + A non-zero velocity activates the Galilean algorithm, which suppresses numerical Cherenkov instabilities (NCI) in boosted-frame simulations (see the section :ref:`Numerical Stability and alternate formulation in a Galilean frame ` for more information). + This requires the code to be compiled with the spectral solver. + It also requires the use of the direct current deposition algorithm (by setting ``algo.current_deposition = direct``). + +* ``psatd.use_default_v_galilean`` (`0` or `1`; default: `0`) + This can be used in boosted-frame simulations only and sets the Galilean velocity along the :math:`z` direction automatically as :math:`v_{G} = -\sqrt{1-1/\gamma^2}`, where :math:`\gamma` is the Lorentz factor of the boosted frame (set by ``warpx.gamma_boost``). + See the section :ref:`Numerical Stability and alternate formulation in a Galilean frame ` for more information on the Galilean algorithm for boosted-frame simulations. * ``psatd.v_comoving`` (3 floating-point values, in units of the speed of light; default ``0. 0. 0.``) Defines the comoving velocity in the comoving PSATD scheme. From 7545b3c28acfa9d5ea8d3b8c600020a5bf4f3008 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Fri, 12 Aug 2022 13:14:55 -0700 Subject: [PATCH 0004/1346] Enable Particle RZ BackTransformed Diagnostics (#3309) * Enable particle RZ BackTransformed diagnostics * Add warning + compute theta * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../ComputeDiagFunctors/BackTransformParticleFunctor.H | 4 ++++ Source/Diagnostics/MultiDiagnostics.cpp | 7 +++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/Source/Diagnostics/ComputeDiagFunctors/BackTransformParticleFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/BackTransformParticleFunctor.H index 5d9b3f103b9..39c70553aae 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/BackTransformParticleFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/BackTransformParticleFunctor.H @@ -143,6 +143,10 @@ struct LorentzTransformParticles dst.m_aos[i_dst].pos(0) = xp; dst.m_aos[i_dst].pos(1) = yp; dst.m_aos[i_dst].pos(2) = zp; +#elif defined (WARPX_DIM_RZ) + dst.m_aos[i_dst].pos(0) = std::sqrt(xp*xp + yp*yp); + dst.m_aos[i_dst].pos(1) = zp; + dst.m_rdata[PIdx::theta][i_dst] = std::atan2(yp, xp); #elif defined (WARPX_DIM_XZ) dst.m_aos[i_dst].pos(0) = xp; dst.m_aos[i_dst].pos(1) = zp; diff --git a/Source/Diagnostics/MultiDiagnostics.cpp b/Source/Diagnostics/MultiDiagnostics.cpp index 6644f7b6645..62218a17568 100644 --- a/Source/Diagnostics/MultiDiagnostics.cpp +++ b/Source/Diagnostics/MultiDiagnostics.cpp @@ -4,7 +4,7 @@ #include "Diagnostics/FullDiagnostics.H" #include "Diagnostics/BoundaryScrapingDiagnostics.H" #include "Utils/TextMsg.H" - +#include #include #include #include @@ -24,10 +24,9 @@ MultiDiagnostics::MultiDiagnostics () if ( diags_types[i] == DiagTypes::Full ){ alldiags[i] = std::make_unique(i, diags_names[i]); } else if ( diags_types[i] == DiagTypes::BackTransformed ){ -#ifdef WARPX_DIM_RZ - amrex::Abort(Utils::TextMsg::Err("BackTransformed diagnostics is currently not supported for RZ")); -#else alldiags[i] = std::make_unique(i, diags_names[i]); +#ifdef WARPX_DIM_RZ + ablastr::warn_manager::WMRecordWarning("MultiDiagnostics", "BackTransformed diagnostics for fields is not yet fully implemented in RZ. Field output might be incorrect."); #endif } else if ( diags_types[i] == DiagTypes::BoundaryScraping ){ alldiags[i] = std::make_unique(i, diags_names[i]); From 1ed58acbee55bfa93918e89d7dd618177fb314e8 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 12 Aug 2022 14:04:54 -0700 Subject: [PATCH 0005/1346] Fill Guard Cells of Fields and Currents Independently (#3302) * Fill Guard Cells of Fields and Currents Independently * Fill Guard Cells of Averaged Fields as Regular Fields --- Source/BoundaryConditions/PML.H | 8 ++- Source/BoundaryConditions/PML.cpp | 65 ++++++++++--------- .../SpectralAlgorithms/PsatdAlgorithm.H | 2 - .../SpectralAlgorithms/PsatdAlgorithm.cpp | 3 +- .../PsatdAlgorithmComoving.H | 1 - .../PsatdAlgorithmComoving.cpp | 3 +- .../PsatdAlgorithmJLinearInTime.H | 2 - .../PsatdAlgorithmJLinearInTime.cpp | 3 +- .../SpectralAlgorithms/PsatdAlgorithmPml.H | 1 - .../SpectralAlgorithms/PsatdAlgorithmPml.cpp | 4 +- .../SpectralBaseAlgorithm.H | 5 +- .../SpectralBaseAlgorithm.cpp | 9 +-- .../SpectralSolver/SpectralFieldData.H | 2 +- .../SpectralSolver/SpectralFieldData.cpp | 4 +- .../SpectralSolver/SpectralSolver.H | 3 +- .../SpectralSolver/SpectralSolver.cpp | 14 ++-- Source/FieldSolver/WarpXPushFieldsEM.cpp | 57 ++++++++++++---- Source/Initialization/WarpXInitData.cpp | 8 +++ Source/WarpX.H | 7 +- Source/WarpX.cpp | 8 +-- 20 files changed, 120 insertions(+), 89 deletions(-) diff --git a/Source/BoundaryConditions/PML.H b/Source/BoundaryConditions/PML.H index d0966b3129c..840f9d825bd 100644 --- a/Source/BoundaryConditions/PML.H +++ b/Source/BoundaryConditions/PML.H @@ -133,6 +133,8 @@ public: int do_moving_window, int pml_has_particles, int do_pml_in_domain, const bool do_multi_J, const bool do_pml_dive_cleaning, const bool do_pml_divb_cleaning, + const amrex::IntVect& fill_guards_fields, + const amrex::IntVect& fill_guards_current, int max_guard_EB, amrex::Real v_sigma_sb, const amrex::IntVect do_pml_Lo = amrex::IntVect::TheUnitVector(), const amrex::IntVect do_pml_Hi = amrex::IntVect::TheUnitVector()); @@ -208,6 +210,9 @@ private: bool m_dive_cleaning; bool m_divb_cleaning; + const amrex::IntVect m_fill_guards_fields; + const amrex::IntVect m_fill_guards_current; + const amrex::Geometry* m_geom; const amrex::Geometry* m_cgeom; @@ -280,7 +285,8 @@ void PushPMLPSATDSinglePatch( const int lev, std::array,3>& pml_E, std::array,3>& pml_B, std::unique_ptr& pml_F, - std::unique_ptr& pml_G); + std::unique_ptr& pml_G, + const amrex::IntVect& fill_guards); #endif #endif diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index 507f596afd2..9c3c17b0bf7 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -550,10 +550,14 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri int do_moving_window, int /*pml_has_particles*/, int do_pml_in_domain, const bool do_multi_J, const bool do_pml_dive_cleaning, const bool do_pml_divb_cleaning, + const amrex::IntVect& fill_guards_fields, + const amrex::IntVect& fill_guards_current, int max_guard_EB, const amrex::Real v_sigma_sb, const amrex::IntVect do_pml_Lo, const amrex::IntVect do_pml_Hi) : m_dive_cleaning(do_pml_dive_cleaning), m_divb_cleaning(do_pml_divb_cleaning), + m_fill_guards_fields(fill_guards_fields), + m_fill_guards_current(fill_guards_current), m_geom(geom), m_cgeom(cgeom) { @@ -742,7 +746,6 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri "PML: PSATD solver selected but not built."); #else // Flags passed to the spectral solver constructor - const amrex::IntVect fill_guards = amrex::IntVect(0); const bool in_pml = true; const bool periodic_single_box = false; const bool update_with_rho = false; @@ -754,7 +757,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri amrex::Vector const v_comoving_zero = {0., 0., 0.}; realspace_ba.enclosedCells().grow(nge); // cell-centered + guard cells spectral_solver_fp = std::make_unique(lev, realspace_ba, dm, - nox_fft, noy_fft, noz_fft, do_nodal, fill_guards, v_galilean_zero, + nox_fft, noy_fft, noz_fft, do_nodal, v_galilean_zero, v_comoving_zero, dx, dt, in_pml, periodic_single_box, update_with_rho, fft_do_time_averaging, do_multi_J, m_dive_cleaning, m_divb_cleaning); #endif @@ -862,7 +865,6 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri "PML: PSATD solver selected but not built."); #else // Flags passed to the spectral solver constructor - const amrex::IntVect fill_guards = amrex::IntVect(0); const bool in_pml = true; const bool periodic_single_box = false; const bool update_with_rho = false; @@ -874,7 +876,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri amrex::Vector const v_comoving_zero = {0., 0., 0.}; realspace_cba.enclosedCells().grow(nge); // cell-centered + guard cells spectral_solver_cp = std::make_unique(lev, realspace_cba, cdm, - nox_fft, noy_fft, noz_fft, do_nodal, fill_guards, v_galilean_zero, + nox_fft, noy_fft, noz_fft, do_nodal, v_galilean_zero, v_comoving_zero, cdx, dt, in_pml, periodic_single_box, update_with_rho, fft_do_time_averaging, do_multi_J, m_dive_cleaning, m_divb_cleaning); #endif @@ -1406,9 +1408,9 @@ void PML::PushPSATD (const int lev) { // Update the fields on the fine and coarse patch - PushPMLPSATDSinglePatch(lev, *spectral_solver_fp, pml_E_fp, pml_B_fp, pml_F_fp, pml_G_fp); + PushPMLPSATDSinglePatch(lev, *spectral_solver_fp, pml_E_fp, pml_B_fp, pml_F_fp, pml_G_fp, m_fill_guards_fields); if (spectral_solver_cp) { - PushPMLPSATDSinglePatch(lev, *spectral_solver_cp, pml_E_cp, pml_B_cp, pml_F_cp, pml_G_cp); + PushPMLPSATDSinglePatch(lev, *spectral_solver_cp, pml_E_cp, pml_B_cp, pml_F_cp, pml_G_cp, m_fill_guards_fields); } } @@ -1419,7 +1421,8 @@ PushPMLPSATDSinglePatch ( std::array,3>& pml_E, std::array,3>& pml_B, std::unique_ptr& pml_F, - std::unique_ptr& pml_G) + std::unique_ptr& pml_G, + const amrex::IntVect& fill_guards) { const SpectralFieldIndex& Idx = solver.m_spectral_index; @@ -1463,39 +1466,39 @@ PushPMLPSATDSinglePatch ( solver.pushSpectralFields(); // Perform backward Fourier transforms - solver.BackwardTransform(lev, *pml_E[0], Idx.Exy, PMLComp::xy); - solver.BackwardTransform(lev, *pml_E[0], Idx.Exz, PMLComp::xz); - solver.BackwardTransform(lev, *pml_E[1], Idx.Eyx, PMLComp::yx); - solver.BackwardTransform(lev, *pml_E[1], Idx.Eyz, PMLComp::yz); - solver.BackwardTransform(lev, *pml_E[2], Idx.Ezx, PMLComp::zx); - solver.BackwardTransform(lev, *pml_E[2], Idx.Ezy, PMLComp::zy); - solver.BackwardTransform(lev, *pml_B[0], Idx.Bxy, PMLComp::xy); - solver.BackwardTransform(lev, *pml_B[0], Idx.Bxz, PMLComp::xz); - solver.BackwardTransform(lev, *pml_B[1], Idx.Byx, PMLComp::yx); - solver.BackwardTransform(lev, *pml_B[1], Idx.Byz, PMLComp::yz); - solver.BackwardTransform(lev, *pml_B[2], Idx.Bzx, PMLComp::zx); - solver.BackwardTransform(lev, *pml_B[2], Idx.Bzy, PMLComp::zy); + solver.BackwardTransform(lev, *pml_E[0], Idx.Exy, fill_guards, PMLComp::xy); + solver.BackwardTransform(lev, *pml_E[0], Idx.Exz, fill_guards, PMLComp::xz); + solver.BackwardTransform(lev, *pml_E[1], Idx.Eyx, fill_guards, PMLComp::yx); + solver.BackwardTransform(lev, *pml_E[1], Idx.Eyz, fill_guards, PMLComp::yz); + solver.BackwardTransform(lev, *pml_E[2], Idx.Ezx, fill_guards, PMLComp::zx); + solver.BackwardTransform(lev, *pml_E[2], Idx.Ezy, fill_guards, PMLComp::zy); + solver.BackwardTransform(lev, *pml_B[0], Idx.Bxy, fill_guards, PMLComp::xy); + solver.BackwardTransform(lev, *pml_B[0], Idx.Bxz, fill_guards, PMLComp::xz); + solver.BackwardTransform(lev, *pml_B[1], Idx.Byx, fill_guards, PMLComp::yx); + solver.BackwardTransform(lev, *pml_B[1], Idx.Byz, fill_guards, PMLComp::yz); + solver.BackwardTransform(lev, *pml_B[2], Idx.Bzx, fill_guards, PMLComp::zx); + solver.BackwardTransform(lev, *pml_B[2], Idx.Bzy, fill_guards, PMLComp::zy); // WarpX::do_pml_dive_cleaning = true if (pml_F) { - solver.BackwardTransform(lev, *pml_E[0], Idx.Exx, PMLComp::xx); - solver.BackwardTransform(lev, *pml_E[1], Idx.Eyy, PMLComp::yy); - solver.BackwardTransform(lev, *pml_E[2], Idx.Ezz, PMLComp::zz); - solver.BackwardTransform(lev, *pml_F, Idx.Fx, PMLComp::x); - solver.BackwardTransform(lev, *pml_F, Idx.Fy, PMLComp::y); - solver.BackwardTransform(lev, *pml_F, Idx.Fz, PMLComp::z); + solver.BackwardTransform(lev, *pml_E[0], Idx.Exx, fill_guards, PMLComp::xx); + solver.BackwardTransform(lev, *pml_E[1], Idx.Eyy, fill_guards, PMLComp::yy); + solver.BackwardTransform(lev, *pml_E[2], Idx.Ezz, fill_guards, PMLComp::zz); + solver.BackwardTransform(lev, *pml_F, Idx.Fx, fill_guards, PMLComp::x); + solver.BackwardTransform(lev, *pml_F, Idx.Fy, fill_guards, PMLComp::y); + solver.BackwardTransform(lev, *pml_F, Idx.Fz, fill_guards, PMLComp::z); } // WarpX::do_pml_divb_cleaning = true if (pml_G) { - solver.BackwardTransform(lev, *pml_B[0], Idx.Bxx, PMLComp::xx); - solver.BackwardTransform(lev, *pml_B[1], Idx.Byy, PMLComp::yy); - solver.BackwardTransform(lev, *pml_B[2], Idx.Bzz, PMLComp::zz); - solver.BackwardTransform(lev, *pml_G, Idx.Gx, PMLComp::x); - solver.BackwardTransform(lev, *pml_G, Idx.Gy, PMLComp::y); - solver.BackwardTransform(lev, *pml_G, Idx.Gz, PMLComp::z); + solver.BackwardTransform(lev, *pml_B[0], Idx.Bxx, fill_guards, PMLComp::xx); + solver.BackwardTransform(lev, *pml_B[1], Idx.Byy, fill_guards, PMLComp::yy); + solver.BackwardTransform(lev, *pml_B[2], Idx.Bzz, fill_guards, PMLComp::zz); + solver.BackwardTransform(lev, *pml_G, Idx.Gx, fill_guards, PMLComp::x); + solver.BackwardTransform(lev, *pml_G, Idx.Gy, fill_guards, PMLComp::y); + solver.BackwardTransform(lev, *pml_G, Idx.Gz, fill_guards, PMLComp::z); } } #endif diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.H index 9139fa433c7..dd9c6a7fd37 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.H @@ -38,7 +38,6 @@ class PsatdAlgorithm : public SpectralBaseAlgorithm * \param[in] norder_y order of the spectral solver along y * \param[in] norder_z order of the spectral solver along z * \param[in] nodal whether the E and B fields are defined on a fully nodal grid or a Yee grid - * \param[in] fill_guards Update the guard cells (in addition to the valid cells) when pushing the fields in time * \param[in] v_galilean Galilean velocity (three-dimensional array) * \param[in] dt time step of the simulation * \param[in] update_with_rho whether the update equation for E uses rho or not @@ -54,7 +53,6 @@ class PsatdAlgorithm : public SpectralBaseAlgorithm const int norder_y, const int norder_z, const bool nodal, - const amrex::IntVect& fill_guards, const amrex::Vector& v_galilean, const amrex::Real dt, const bool update_with_rho, diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.cpp index 6d9d684f5d2..1cbc27f0b1e 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.cpp @@ -35,7 +35,6 @@ PsatdAlgorithm::PsatdAlgorithm( const int norder_y, const int norder_z, const bool nodal, - const amrex::IntVect& fill_guards, const amrex::Vector& v_galilean, const amrex::Real dt, const bool update_with_rho, @@ -43,7 +42,7 @@ PsatdAlgorithm::PsatdAlgorithm( const bool dive_cleaning, const bool divb_cleaning) // Initializer list - : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal, fill_guards), + : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal), m_spectral_index(spectral_index), // Initialize the centered finite-order modified k vectors: // these are computed always with the assumption of centered grids diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.H index 6f8497f8f19..bc7dc76999f 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.H @@ -33,7 +33,6 @@ class PsatdAlgorithmComoving : public SpectralBaseAlgorithm const int norder_y, const int norder_z, const bool nodal, - const amrex::IntVect& fill_guards, const amrex::Vector& v_comoving, const amrex::Real dt, const bool update_with_rho); diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.cpp index 30a37345fba..1d6248f8d76 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.cpp @@ -26,12 +26,11 @@ PsatdAlgorithmComoving::PsatdAlgorithmComoving (const SpectralKSpace& spectral_k const SpectralFieldIndex& spectral_index, const int norder_x, const int norder_y, const int norder_z, const bool nodal, - const amrex::IntVect& fill_guards, const amrex::Vector& v_comoving, const amrex::Real dt, const bool update_with_rho) // Members initialization - : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal, fill_guards), + : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal), m_spectral_index(spectral_index), // Initialize the infinite-order k vectors (the argument n_order = -1 selects // the infinite order option, the argument nodal = false is then irrelevant) diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.H index 0689a7e4934..e0eded5f59a 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.H @@ -41,7 +41,6 @@ class PsatdAlgorithmJLinearInTime : public SpectralBaseAlgorithm * \param[in] norder_y order of the spectral solver along y * \param[in] norder_z order of the spectral solver along z * \param[in] nodal whether the E and B fields are defined on a fully nodal grid or a Yee grid - * \param[in] fill_guards Update the guard cells (in addition to the valid cells) when pushing the fields in time * \param[in] dt time step of the simulation * \param[in] time_averaging whether to use time averaging for large time steps * \param[in] dive_cleaning Update F as part of the field update, so that errors in divE=rho propagate away at the speed of light @@ -55,7 +54,6 @@ class PsatdAlgorithmJLinearInTime : public SpectralBaseAlgorithm const int norder_y, const int norder_z, const bool nodal, - const amrex::IntVect& fill_guards, const amrex::Real dt, const bool time_averaging, const bool dive_cleaning, diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.cpp index 30cc90549c0..bd9df977e0b 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.cpp @@ -35,13 +35,12 @@ PsatdAlgorithmJLinearInTime::PsatdAlgorithmJLinearInTime( const int norder_y, const int norder_z, const bool nodal, - const amrex::IntVect& fill_guards, const amrex::Real dt, const bool time_averaging, const bool dive_cleaning, const bool divb_cleaning) // Initializer list - : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal, fill_guards), + : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal), m_spectral_index(spectral_index), m_dt(dt), m_time_averaging(time_averaging), diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.H index 630c2626063..0c1aeb88504 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.H @@ -32,7 +32,6 @@ class PsatdAlgorithmPml : public SpectralBaseAlgorithm const SpectralFieldIndex& spectral_index, const int norder_x, const int norder_y, const int norder_z, const bool nodal, - const amrex::IntVect& fill_guards, const amrex::Real dt, const bool dive_cleaning, const bool divb_cleaning); diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.cpp index 69d8dc401b1..45a4d15807a 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmPml.cpp @@ -35,10 +35,10 @@ PsatdAlgorithmPml::PsatdAlgorithmPml(const SpectralKSpace& spectral_kspace, const SpectralFieldIndex& spectral_index, const int norder_x, const int norder_y, const int norder_z, const bool nodal, - const amrex::IntVect& fill_guards, const Real dt, + const Real dt, const bool dive_cleaning, const bool divb_cleaning) // Initialize members of base class - : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal, fill_guards), + : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal), m_spectral_index(spectral_index), m_dt(dt), m_dive_cleaning(dive_cleaning), diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H index b4f70d5f531..ef08c443242 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H @@ -71,8 +71,6 @@ class SpectralBaseAlgorithm protected: // Meant to be used in the subclasses - amrex::IntVect m_fill_guards; - using SpectralRealCoefficients = \ amrex::FabArray< amrex::BaseFab >; using SpectralComplexCoefficients = \ @@ -85,8 +83,7 @@ class SpectralBaseAlgorithm const amrex::DistributionMapping& dm, const SpectralFieldIndex& spectral_index, const int norder_x, const int norder_y, - const int norder_z, const bool nodal, - const amrex::IntVect& fill_guards); + const int norder_z, const bool nodal); SpectralFieldIndex m_spectral_index; diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp index 4ecbb9000fa..1a2334cc6f3 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp @@ -31,9 +31,7 @@ SpectralBaseAlgorithm::SpectralBaseAlgorithm(const SpectralKSpace& spectral_kspa const amrex::DistributionMapping& dm, const SpectralFieldIndex& spectral_index, const int norder_x, const int norder_y, - const int norder_z, const bool nodal, - const amrex::IntVect& fill_guards): - m_fill_guards(fill_guards), + const int norder_z, const bool nodal): m_spectral_index(spectral_index), // Compute and assign the modified k vectors modified_kx_vec(spectral_kspace.getModifiedKComponent(dm,0,norder_x,nodal)), @@ -66,8 +64,6 @@ SpectralBaseAlgorithm::ComputeSpectralDivE ( field_data.ForwardTransform(lev, *Efield[1], Idx.Ey, 0 ); field_data.ForwardTransform(lev, *Efield[2], Idx.Ez, 0 ); - const amrex::IntVect& fill_guards = m_fill_guards; - // Loop over boxes for (MFIter mfi(field_data.fields); mfi.isValid(); ++mfi){ @@ -107,5 +103,6 @@ SpectralBaseAlgorithm::ComputeSpectralDivE ( } // Backward Fourier transform - field_data.BackwardTransform(lev, divE, Idx.divE, 0, fill_guards); + const amrex::IntVect& fill_guards = amrex::IntVect(0); + field_data.BackwardTransform(lev, divE, Idx.divE, fill_guards, 0); } diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H index fe856c236a5..811fdd4d73c 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H @@ -127,7 +127,7 @@ class SpectralFieldData const int i_comp); void BackwardTransform (const int lev, amrex::MultiFab& mf, const int field_index, - const int i_comp, const amrex::IntVect& fill_guards); + const amrex::IntVect& fill_guards, const int i_comp); // `fields` stores fields in spectral space, as multicomponent FabArray SpectralField fields; diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp b/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp index 460e2cc3b9f..00346a2c772 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp @@ -310,8 +310,8 @@ void SpectralFieldData::BackwardTransform (const int lev, MultiFab& mf, const int field_index, - const int i_comp, - const amrex::IntVect& fill_guards) + const amrex::IntVect& fill_guards, + const int i_comp) { amrex::LayoutData* cost = WarpX::getCosts(lev); bool do_costs = WarpXUtilLoadBalance::doCosts(cost, mf.boxArray(), mf.DistributionMap()); diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.H b/Source/FieldSolver/SpectralSolver/SpectralSolver.H index df9e9f0ff4f..684cf9586b8 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.H @@ -47,7 +47,6 @@ class SpectralSolver * \param[in] norder_y spectral order along y * \param[in] norder_z spectral order along z * \param[in] nodal whether the spectral solver is applied to a nodal or staggered grid - * \param[in] fill_guards Update the guard cells (in addition to the valid cells) when pushing the fields in time * \param[in] v_galilean three-dimensional vector containing the components of the Galilean * velocity for the standard or averaged Galilean PSATD solvers * \param[in] v_comoving three-dimensional vector containing the components of the comoving @@ -72,7 +71,6 @@ class SpectralSolver const amrex::DistributionMapping& dm, const int norder_x, const int norder_y, const int norder_z, const bool nodal, - const amrex::IntVect& fill_guards, const amrex::Vector& v_galilean, const amrex::Vector& v_comoving, const amrex::RealVect dx, @@ -106,6 +104,7 @@ class SpectralSolver void BackwardTransform( const int lev, amrex::MultiFab& mf, const int field_index, + const amrex::IntVect& fill_guards, const int i_comp=0 ); /** diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp index 81cba183c52..75c82319c11 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp @@ -24,7 +24,6 @@ SpectralSolver::SpectralSolver( const amrex::DistributionMapping& dm, const int norder_x, const int norder_y, const int norder_z, const bool nodal, - const amrex::IntVect& fill_guards, const amrex::Vector& v_galilean, const amrex::Vector& v_comoving, const amrex::RealVect dx, const amrex::Real dt, @@ -52,7 +51,7 @@ SpectralSolver::SpectralSolver( { algorithm = std::make_unique( k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal, - fill_guards, dt, dive_cleaning, divb_cleaning); + dt, dive_cleaning, divb_cleaning); } else // PSATD equations in the regulard grids { @@ -61,7 +60,7 @@ SpectralSolver::SpectralSolver( { algorithm = std::make_unique( k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal, - fill_guards, v_comoving, dt, update_with_rho); + v_comoving, dt, update_with_rho); } else // PSATD algorithms: standard, Galilean, averaged Galilean, multi-J { @@ -69,13 +68,13 @@ SpectralSolver::SpectralSolver( { algorithm = std::make_unique( k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal, - fill_guards, dt, fft_do_time_averaging, dive_cleaning, divb_cleaning); + dt, fft_do_time_averaging, dive_cleaning, divb_cleaning); } else // standard, Galilean, averaged Galilean { algorithm = std::make_unique( k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal, - fill_guards, v_galilean, dt, update_with_rho, fft_do_time_averaging, + v_galilean, dt, update_with_rho, fft_do_time_averaging, dive_cleaning, divb_cleaning); } } @@ -84,8 +83,6 @@ SpectralSolver::SpectralSolver( // - Initialize arrays for fields in spectral space + FFT plans field_data = SpectralFieldData(lev, realspace_ba, k_space, dm, m_spectral_index.n_fields, periodic_single_box); - - m_fill_guards = fill_guards; } void @@ -102,10 +99,11 @@ void SpectralSolver::BackwardTransform( const int lev, amrex::MultiFab& mf, const int field_index, + const amrex::IntVect& fill_guards, const int i_comp ) { WARPX_PROFILE("SpectralSolver::BackwardTransform"); - field_data.BackwardTransform(lev, mf, field_index, i_comp, m_fill_guards); + field_data.BackwardTransform(lev, mf, field_index, fill_guards, i_comp); } void diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index 9082995cd15..6d604bc1564 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -84,15 +84,18 @@ namespace { SpectralSolver& solver, #endif const std::array,3>& vector_field, - const int compx, const int compy, const int compz) + const int compx, const int compy, const int compz, + const amrex::IntVect& fill_guards) { #ifdef WARPX_DIM_RZ + amrex::ignore_unused(fill_guards); solver.BackwardTransform(lev, *vector_field[0], compx, *vector_field[1], compy); + solver.BackwardTransform(lev, *vector_field[2], compz); #else - solver.BackwardTransform(lev, *vector_field[0], compx); - solver.BackwardTransform(lev, *vector_field[1], compy); + solver.BackwardTransform(lev, *vector_field[0], compx, fill_guards); + solver.BackwardTransform(lev, *vector_field[1], compy, fill_guards); + solver.BackwardTransform(lev, *vector_field[2], compz, fill_guards); #endif - solver.BackwardTransform(lev, *vector_field[2], compz); } } @@ -127,13 +130,17 @@ void WarpX::PSATDBackwardTransformEB ( for (int lev = 0; lev <= finest_level; ++lev) { - BackwardTransformVect(lev, *spectral_solver_fp[lev], E_fp[lev], Idx.Ex, Idx.Ey, Idx.Ez); - BackwardTransformVect(lev, *spectral_solver_fp[lev], B_fp[lev], Idx.Bx, Idx.By, Idx.Bz); + BackwardTransformVect(lev, *spectral_solver_fp[lev], E_fp[lev], + Idx.Ex, Idx.Ey, Idx.Ez, m_fill_guards_fields); + BackwardTransformVect(lev, *spectral_solver_fp[lev], B_fp[lev], + Idx.Bx, Idx.By, Idx.Bz, m_fill_guards_fields); if (spectral_solver_cp[lev]) { - BackwardTransformVect(lev, *spectral_solver_cp[lev], E_cp[lev], Idx.Ex, Idx.Ey, Idx.Ez); - BackwardTransformVect(lev, *spectral_solver_cp[lev], B_cp[lev], Idx.Bx, Idx.By, Idx.Bz); + BackwardTransformVect(lev, *spectral_solver_cp[lev], E_cp[lev], + Idx.Ex, Idx.Ey, Idx.Ez, m_fill_guards_fields); + BackwardTransformVect(lev, *spectral_solver_cp[lev], B_cp[lev], + Idx.Bx, Idx.By, Idx.Bz, m_fill_guards_fields); } } @@ -154,13 +161,17 @@ void WarpX::PSATDBackwardTransformEBavg ( for (int lev = 0; lev <= finest_level; ++lev) { - BackwardTransformVect(lev, *spectral_solver_fp[lev], E_avg_fp[lev], Idx.Ex_avg, Idx.Ey_avg, Idx.Ez_avg); - BackwardTransformVect(lev, *spectral_solver_fp[lev], B_avg_fp[lev], Idx.Bx_avg, Idx.By_avg, Idx.Bz_avg); + BackwardTransformVect(lev, *spectral_solver_fp[lev], E_avg_fp[lev], + Idx.Ex_avg, Idx.Ey_avg, Idx.Ez_avg, m_fill_guards_fields); + BackwardTransformVect(lev, *spectral_solver_fp[lev], B_avg_fp[lev], + Idx.Bx_avg, Idx.By_avg, Idx.Bz_avg, m_fill_guards_fields); if (spectral_solver_cp[lev]) { - BackwardTransformVect(lev, *spectral_solver_cp[lev], E_avg_cp[lev], Idx.Ex_avg, Idx.Ey_avg, Idx.Ez_avg); - BackwardTransformVect(lev, *spectral_solver_cp[lev], B_avg_cp[lev], Idx.Bx_avg, Idx.By_avg, Idx.Bz_avg); + BackwardTransformVect(lev, *spectral_solver_cp[lev], E_avg_cp[lev], + Idx.Ex_avg, Idx.Ey_avg, Idx.Ez_avg, m_fill_guards_fields); + BackwardTransformVect(lev, *spectral_solver_cp[lev], B_avg_cp[lev], + Idx.Bx_avg, Idx.By_avg, Idx.Bz_avg, m_fill_guards_fields); } } } @@ -188,11 +199,19 @@ WarpX::PSATDBackwardTransformF () for (int lev = 0; lev <= finest_level; ++lev) { +#ifdef WARPX_DIM_RZ if (F_fp[lev]) spectral_solver_fp[lev]->BackwardTransform(lev, *F_fp[lev], Idx.F); +#else + if (F_fp[lev]) spectral_solver_fp[lev]->BackwardTransform(lev, *F_fp[lev], Idx.F, m_fill_guards_fields); +#endif if (spectral_solver_cp[lev]) { +#ifdef WARPX_DIM_RZ if (F_cp[lev]) spectral_solver_cp[lev]->BackwardTransform(lev, *F_cp[lev], Idx.F); +#else + if (F_cp[lev]) spectral_solver_cp[lev]->BackwardTransform(lev, *F_cp[lev], Idx.F, m_fill_guards_fields); +#endif } } @@ -226,11 +245,19 @@ WarpX::PSATDBackwardTransformG () for (int lev = 0; lev <= finest_level; ++lev) { +#ifdef WARPX_DIM_RZ if (G_fp[lev]) spectral_solver_fp[lev]->BackwardTransform(lev, *G_fp[lev], Idx.G); +#else + if (G_fp[lev]) spectral_solver_fp[lev]->BackwardTransform(lev, *G_fp[lev], Idx.G, m_fill_guards_fields); +#endif if (spectral_solver_cp[lev]) { +#ifdef WARPX_DIM_RZ if (G_cp[lev]) spectral_solver_cp[lev]->BackwardTransform(lev, *G_cp[lev], Idx.G); +#else + if (G_cp[lev]) spectral_solver_cp[lev]->BackwardTransform(lev, *G_cp[lev], Idx.G, m_fill_guards_fields); +#endif } } @@ -302,7 +329,8 @@ void WarpX::PSATDBackwardTransformJ ( idx_jy = static_cast(Idx.Jy); idx_jz = static_cast(Idx.Jz); - BackwardTransformVect(lev, *spectral_solver_fp[lev], J_fp[lev], idx_jx, idx_jy, idx_jz); + BackwardTransformVect(lev, *spectral_solver_fp[lev], J_fp[lev], + idx_jx, idx_jy, idx_jz, m_fill_guards_current); if (spectral_solver_cp[lev]) { @@ -312,7 +340,8 @@ void WarpX::PSATDBackwardTransformJ ( idx_jy = static_cast(Idx.Jy); idx_jz = static_cast(Idx.Jz); - BackwardTransformVect(lev, *spectral_solver_cp[lev], J_cp[lev], idx_jx, idx_jy, idx_jz); + BackwardTransformVect(lev, *spectral_solver_cp[lev], J_cp[lev], + idx_jx, idx_jy, idx_jz, m_fill_guards_current); } } } diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 66a91271567..90a1c36eb88 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -497,12 +497,16 @@ WarpX::InitPML () do_pml_Lo[0][0] = 0; // no PML at r=0, in cylindrical geometry pml_rz[0] = std::make_unique(0, boxArray(0), DistributionMap(0), &Geom(0), pml_ncell, do_pml_in_domain); #else + // Note: fill_guards_fields and fill_guards_current are both set to + // zero (amrex::IntVect(0)) (what we do with damping BCs does not apply + // to the PML, for example in the presence of mesh refinement patches) pml[0] = std::make_unique(0, boxArray(0), DistributionMap(0), &Geom(0), nullptr, pml_ncell, pml_delta, amrex::IntVect::TheZeroVector(), dt[0], nox_fft, noy_fft, noz_fft, do_nodal, do_moving_window, pml_has_particles, do_pml_in_domain, do_multi_J, do_pml_dive_cleaning, do_pml_divb_cleaning, + amrex::IntVect(0), amrex::IntVect(0), guard_cells.ng_FieldSolver.max(), v_particle_pml, do_pml_Lo[0], do_pml_Hi[0]); @@ -529,12 +533,16 @@ WarpX::InitPML () do_pml_Lo[lev][0] = 0; } #endif + // Note: fill_guards_fields and fill_guards_current are both set to + // zero (amrex::IntVect(0)) (what we do with damping BCs does not apply + // to the PML, for example in the presence of mesh refinement patches) pml[lev] = std::make_unique(lev, boxArray(lev), DistributionMap(lev), &Geom(lev), &Geom(lev-1), pml_ncell, pml_delta, refRatio(lev-1), dt[lev], nox_fft, noy_fft, noz_fft, do_nodal, do_moving_window, pml_has_particles, do_pml_in_domain, do_multi_J, do_pml_dive_cleaning, do_pml_divb_cleaning, + amrex::IntVect(0), amrex::IntVect(0), guard_cells.ng_FieldSolver.max(), v_particle_pml, do_pml_Lo[lev], do_pml_Hi[lev]); diff --git a/Source/WarpX.H b/Source/WarpX.H index 0bfe3a9df4a..c16f30408d0 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -203,8 +203,11 @@ public: //! perform field communications in single precision static bool do_single_precision_comms; - //! Whether to fill the guard cells when computing inverse FFTs, based on the boundary conditions - static amrex::IntVect fill_guards; + //! Whether to fill guard cells when computing inverse FFTs of fields + static amrex::IntVect m_fill_guards_fields; + + //! Whether to fill guard cells when computing inverse FFTs of currents + static amrex::IntVect m_fill_guards_current; //! Solve additional Maxwell equation for F in order to control errors in Gauss' law //! (useful when using current deposition algorithms that are not charge-conserving) diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 77993acc43b..da26ee90b4e 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -106,7 +106,8 @@ Real WarpX::moving_window_v = std::numeric_limits::max(); bool WarpX::fft_do_time_averaging = false; -amrex::IntVect WarpX::fill_guards = amrex::IntVect(0); +amrex::IntVect WarpX::m_fill_guards_fields = amrex::IntVect(0); +amrex::IntVect WarpX::m_fill_guards_current = amrex::IntVect(0); Real WarpX::quantum_xi_c2 = PhysConst::xi_c2; Real WarpX::gamma_boost = 1._rt; @@ -1323,14 +1324,14 @@ WarpX::ReadParameters () if (WarpX::field_boundary_lo[dir] == FieldBoundaryType::Damped || WarpX::field_boundary_hi[dir] == FieldBoundaryType::Damped) { - WarpX::fill_guards[dir] = 1; + WarpX::m_fill_guards_fields[dir] = 1; } } // Fill guard cells with backward FFTs if Vay current deposition is used if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) { - WarpX::fill_guards = amrex::IntVect(1); + WarpX::m_fill_guards_current = amrex::IntVect(1); } } @@ -2241,7 +2242,6 @@ void WarpX::AllocLevelSpectralSolver (amrex::Vector Date: Mon, 15 Aug 2022 10:00:20 -0700 Subject: [PATCH 0006/1346] AMReX: Weekly Update (#3315) --- .github/workflows/cuda.yml | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 2 +- run_test.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index d62bc41e4b0..3a5917ac2a0 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -106,7 +106,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 103db6ebe2b570910ac4dbd7d6611e59d80f1a0b && cd - + cd amrex && git checkout --detach 4f639294606d47185d31eaee4af66fc6b590e5a2 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 1b90009aeee..35854c55470 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 103db6ebe2b570910ac4dbd7d6611e59d80f1a0b +branch = 4f639294606d47185d31eaee4af66fc6b590e5a2 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 475b48cf8b4..d21ce053ea3 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 103db6ebe2b570910ac4dbd7d6611e59d80f1a0b +branch = 4f639294606d47185d31eaee4af66fc6b590e5a2 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index eef5729330b..76b0a238862 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -235,7 +235,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "103db6ebe2b570910ac4dbd7d6611e59d80f1a0b" +set(WarpX_amrex_branch "4f639294606d47185d31eaee4af66fc6b590e5a2" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/run_test.sh b/run_test.sh index e4a4d437a5d..9b152be9129 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 103db6ebe2b570910ac4dbd7d6611e59d80f1a0b && cd - +cd amrex && git checkout --detach 4f639294606d47185d31eaee4af66fc6b590e5a2 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git From 3bb148ebbc1c8eae47d1f00ab60e68a041289cb6 Mon Sep 17 00:00:00 2001 From: Phil Miller Date: Mon, 15 Aug 2022 13:43:38 -0700 Subject: [PATCH 0007/1346] Move handling of do_not_deposit to physics callers (#3225) * Rearrange in preparation for lifting do_not_deposit higher in the call stack * Make do_not_deposit public, so callers can test on it * Make all DepositCharge call sites respect WarpXParticleContainer::do_not_deposit * Drop check for do_not_deposit in DepositCharge * Fix comment to accurately describe np_to_depose * Fix logic to account for other logic in AddSpaceChargeField * Match zero charge density array to filled array in RZ PSATD case * Structure control flow per @dpgrote's preference * Reword comment for clarity --- Source/FieldSolver/ElectrostaticSolver.cpp | 15 ++- Source/Particles/LaserParticleContainer.cpp | 4 +- Source/Particles/MultiParticleContainer.cpp | 42 ++++---- .../Particles/PhysicalParticleContainer.cpp | 4 +- Source/Particles/WarpXParticleContainer.H | 2 +- Source/Particles/WarpXParticleContainer.cpp | 96 +++++++++---------- Source/Python/WarpXWrappers.cpp | 1 + 7 files changed, 86 insertions(+), 78 deletions(-) diff --git a/Source/FieldSolver/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolver.cpp index 164b24d0037..e50098e7d22 100644 --- a/Source/FieldSolver/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolver.cpp @@ -165,15 +165,18 @@ WarpX::AddSpaceChargeField (WarpXParticleContainer& pc) BoxArray nba = boxArray(lev); nba.surroundingNodes(); rho[lev] = std::make_unique(nba, DistributionMap(lev), 1, ng); + rho[lev]->setVal(0.); phi[lev] = std::make_unique(nba, DistributionMap(lev), 1, 1); phi[lev]->setVal(0.); } // Deposit particle charge density (source of Poisson solver) bool const local = false; - bool const reset = true; + bool const reset = false; bool const do_rz_volume_scaling = true; - pc.DepositCharge(rho, local, reset, do_rz_volume_scaling); + if ( !pc.do_not_deposit) { + pc.DepositCharge(rho, local, reset, do_rz_volume_scaling); + } // Get the particle beta vector bool const local_average = false; // Average across all MPI ranks @@ -218,9 +221,11 @@ WarpX::AddSpaceChargeFieldLabFrame () bool const do_rz_volume_scaling = false; for (int ispecies=0; ispeciesnSpecies(); ispecies++){ WarpXParticleContainer& species = mypc->GetParticleContainer(ispecies); - species.DepositCharge( - rho_fp, local, reset, do_rz_volume_scaling, interpolate_across_levels - ); + if (!species.do_not_deposit) { + species.DepositCharge( rho_fp, + local, reset, do_rz_volume_scaling, interpolate_across_levels + ); + } } #ifdef WARPX_DIM_RZ for (int lev = 0; lev <= max_level; lev++) { diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index 155bc49bcca..565ac3ee1b8 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -585,7 +585,7 @@ LaserParticleContainer::Evolve (int lev, plane_Yp.resize(np); amplitude_E.resize(np); - if (rho && ! skip_deposition) { + if (rho && ! skip_deposition && ! do_not_deposit) { int* AMREX_RESTRICT ion_lev = nullptr; DepositCharge(pti, wp, ion_lev, rho, 0, 0, np_current, thread_num, lev, lev); @@ -639,7 +639,7 @@ LaserParticleContainer::Evolve (int lev, } - if (rho && ! skip_deposition) { + if (rho && ! skip_deposition && ! do_not_deposit) { int* AMREX_RESTRICT ion_lev = nullptr; DepositCharge(pti, wp, ion_lev, rho, 1, 0, np_current, thread_num, lev, lev); diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index 34dd488c13d..a0b80877d77 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -486,12 +486,19 @@ MultiParticleContainer::GetZeroChargeDensity (const int lev) { WarpX& warpx = WarpX::GetInstance(); - BoxArray ba = warpx.boxArray(lev); + BoxArray nba = warpx.boxArray(lev); DistributionMapping dmap = warpx.DistributionMap(lev); const int ng_rho = warpx.get_ng_depos_rho().max(); - auto zero_rho = std::make_unique(amrex::convert(ba,IntVect::TheNodeVector()), - dmap,WarpX::ncomps,ng_rho); + bool is_PSATD_RZ = false; +#ifdef WARPX_DIM_RZ + if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) + is_PSATD_RZ = true; +#endif + if( !is_PSATD_RZ ) + nba.surroundingNodes(); + + auto zero_rho = std::make_unique(nba, dmap, WarpX::ncomps, ng_rho); zero_rho->setVal(amrex::Real(0.0)); return zero_rho; } @@ -540,6 +547,8 @@ MultiParticleContainer::DepositCharge ( // Call the deposition kernel for each species for (auto& pc : allcontainers) { + if (pc->do_not_deposit) continue; + bool const local = true; bool const reset = false; bool const do_rz_volume_scaling = false; @@ -562,24 +571,19 @@ MultiParticleContainer::DepositCharge ( std::unique_ptr MultiParticleContainer::GetChargeDensity (int lev, bool local) { - if (allcontainers.empty()) - { - std::unique_ptr rho = GetZeroChargeDensity(lev); - return rho; + std::unique_ptr rho = GetZeroChargeDensity(lev); + + for (unsigned i = 0, n = allcontainers.size(); i < n; ++i) { + if (allcontainers[i]->do_not_deposit) continue; + std::unique_ptr rhoi = allcontainers[i]->GetChargeDensity(lev, true); + MultiFab::Add(*rho, *rhoi, 0, 0, rho->nComp(), rho->nGrowVect()); } - else - { - std::unique_ptr rho = allcontainers[0]->GetChargeDensity(lev, true); - for (unsigned i = 1, n = allcontainers.size(); i < n; ++i) { - std::unique_ptr rhoi = allcontainers[i]->GetChargeDensity(lev, true); - MultiFab::Add(*rho, *rhoi, 0, 0, rho->nComp(), rho->nGrowVect()); - } - if (!local) { - const Geometry& gm = allcontainers[0]->Geom(lev); - ablastr::utils::communication::SumBoundary(*rho, WarpX::do_single_precision_comms, gm.periodicity()); - } - return rho; + if (!local) { + const Geometry& gm = allcontainers[0]->Geom(lev); + ablastr::utils::communication::SumBoundary(*rho, WarpX::do_single_precision_comms, gm.periodicity()); } + + return rho; } void diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 72543982bfe..5b0ea8ae547 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -1910,7 +1910,7 @@ PhysicalParticleContainer::Evolve (int lev, const long np_current = (cjx) ? nfine_current : np; - if (rho && ! skip_deposition) { + if (rho && ! skip_deposition && ! do_not_deposit) { // Deposit charge before particle push, in component 0 of MultiFab rho. int* AMREX_RESTRICT ion_lev; if (do_field_ionization){ @@ -2006,7 +2006,7 @@ PhysicalParticleContainer::Evolve (int lev, } // end of "if do_electrostatic == ElectrostaticSolverAlgo::None" } // end of "if do_not_push" - if (rho && ! skip_deposition) { + if (rho && ! skip_deposition && ! do_not_deposit) { // Deposit charge after particle push, in component 1 of MultiFab rho. // (Skipped for electrostatic solver, as this may lead to out-of-bounds) if (WarpX::do_electrostatic == ElectrostaticSolverAlgo::None) { diff --git a/Source/Particles/WarpXParticleContainer.H b/Source/Particles/WarpXParticleContainer.H index d6154a2d564..7f9e4e866ec 100644 --- a/Source/Particles/WarpXParticleContainer.H +++ b/Source/Particles/WarpXParticleContainer.H @@ -307,6 +307,7 @@ public: void ApplyBoundaryConditions (); bool do_splitting = false; + int do_not_deposit = 0; bool initialize_self_fields = false; amrex::Real self_fields_required_precision = amrex::Real(1.e-11); amrex::Real self_fields_absolute_tolerance = amrex::Real(0.0); @@ -388,7 +389,6 @@ protected: bool m_gather_from_main_grid = false; int do_not_push = 0; - int do_not_deposit = 0; int do_not_gather = 0; // Whether to allow particles outside of the simulation domain to be diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index 3421fb9f587..3f0e0bcbae3 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -575,7 +575,7 @@ WarpXParticleContainer::DepositCurrent ( 1: new value (after particle push). * \param offset : Index of first particle for which charge is deposited * \param np_to_depose: Number of particles for which charge is deposited. - Particles [offset,offset+np_tp_depose] deposit charge + Particles [offset,offset+np_tp_depose) deposit charge * \param thread_num : Thread number (if tiling) * \param lev : Level of box that contains particles * \param depos_lev : Level on which particles deposit (if buffers are used) @@ -587,57 +587,55 @@ WarpXParticleContainer::DepositCharge (WarpXParIter& pti, RealVector const& wp, const long offset, const long np_to_depose, int thread_num, int lev, int depos_lev) { - if (!do_not_deposit) { - WarpX& warpx = WarpX::GetInstance(); + WarpX& warpx = WarpX::GetInstance(); - // deposition guards - // note: this is smaller than rho->nGrowVect() for PSATD - const amrex::IntVect& ng_rho = warpx.get_ng_depos_rho(); + // deposition guards + // note: this is smaller than rho->nGrowVect() for PSATD + const amrex::IntVect& ng_rho = warpx.get_ng_depos_rho(); - const std::array& dx = WarpX::CellSize(std::max(depos_lev,0)); - amrex::IntVect ref_ratio; - if (lev == depos_lev) { - ref_ratio = IntVect(AMREX_D_DECL(1, 1, 1 )); - } else { - ref_ratio = WarpX::RefRatio(depos_lev); - } - const int nc = WarpX::ncomps; - - // Get tile box where charge is deposited. - // The tile box is different when depositing in the buffers (depos_lev& xyzmin = WarpX::LowerCorner(tilebox, depos_lev, time_shift_delta); - - // pointer to costs data - amrex::LayoutData* costs = WarpX::getCosts(lev); - amrex::Real* cost = costs ? &((*costs)[pti.index()]) : nullptr; - - AMREX_ALWAYS_ASSERT(WarpX::nox == WarpX::noy); - AMREX_ALWAYS_ASSERT(WarpX::nox == WarpX::noz); - - ablastr::particles::deposit_charge( - pti, wp, this->charge, ion_lev, - rho, local_rho[thread_num], - WarpX::noz, dx, xyzmin, WarpX::n_rz_azimuthal_modes, - ng_rho, depos_lev, ref_ratio, - offset, np_to_depose, - icomp, nc, - cost, WarpX::load_balance_costs_update_algo, WarpX::do_device_synchronize - ); + const std::array& dx = WarpX::CellSize(std::max(depos_lev,0)); + amrex::IntVect ref_ratio; + if (lev == depos_lev) { + ref_ratio = IntVect(AMREX_D_DECL(1, 1, 1 )); + } else { + ref_ratio = WarpX::RefRatio(depos_lev); } + const int nc = WarpX::ncomps; + + // Get tile box where charge is deposited. + // The tile box is different when depositing in the buffers (depos_lev& xyzmin = WarpX::LowerCorner(tilebox, depos_lev, time_shift_delta); + + // pointer to costs data + amrex::LayoutData* costs = WarpX::getCosts(lev); + amrex::Real* cost = costs ? &((*costs)[pti.index()]) : nullptr; + + AMREX_ALWAYS_ASSERT(WarpX::nox == WarpX::noy); + AMREX_ALWAYS_ASSERT(WarpX::nox == WarpX::noz); + + ablastr::particles::deposit_charge( + pti, wp, this->charge, ion_lev, + rho, local_rho[thread_num], + WarpX::noz, dx, xyzmin, WarpX::n_rz_azimuthal_modes, + ng_rho, depos_lev, ref_ratio, + offset, np_to_depose, + icomp, nc, + cost, WarpX::load_balance_costs_update_algo, WarpX::do_device_synchronize + ); } void diff --git a/Source/Python/WarpXWrappers.cpp b/Source/Python/WarpXWrappers.cpp index f2855a145a4..2df989bba81 100644 --- a/Source/Python/WarpXWrappers.cpp +++ b/Source/Python/WarpXWrappers.cpp @@ -626,6 +626,7 @@ namespace { const long np = pti.numParticles(); auto& wp = pti.GetAttribs(PIdx::w); + // Do this unconditionally, ignoring myspc.do_not_deposit, to support diagnostic uses myspc.DepositCharge(pti, wp, nullptr, rho_fp, 0, 0, np, 0, lev, lev); } #ifdef WARPX_DIM_RZ From c2f6b66626f25bb0ed4580d6d0a31530fe10a0cf Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 15 Aug 2022 16:17:49 -0700 Subject: [PATCH 0008/1346] Windows CI: Abort on First Error (#3312) Our Windows CI does not abort on the first failing command. This could, in the future, lead to false-positive tests. --- .github/workflows/windows.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 671e0c81c4c..aafc3a0f1e8 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -34,12 +34,17 @@ jobs: -DWarpX_OPENPMD=ON ` -DWarpX_MPI=OFF ` -DWarpX_LIB=ON + if(!$?) { Exit $LASTEXITCODE } cmake --build build --config Debug --parallel 2 + if(!$?) { Exit $LASTEXITCODE } python3 -m pip install --upgrade pip setuptools wheel + if(!$?) { Exit $LASTEXITCODE } python3 -m pip install --upgrade cmake + if(!$?) { Exit $LASTEXITCODE } $env:PYWARPX_LIB_DIR="$(Get-Location | Foreach-Object { $_.Path })\build\lib\Debug\" python3 -m pip install . -vv --no-build-isolation + if(!$?) { Exit $LASTEXITCODE } python3 Examples\Modules\gaussian_beam\PICMI_inputs_gaussian_beam.py # JSON writes are currently very slow (50min) with MSVC @@ -79,11 +84,17 @@ jobs: -DWarpX_LIB=ON ^ -DWarpX_MPI=OFF ^ -DWarpX_OPENPMD=ON + if errorlevel 1 exit 1 cmake --build build --config Release --parallel 2 + if errorlevel 1 exit 1 python3 -m pip install --upgrade pip setuptools wheel + if errorlevel 1 exit 1 python3 -m pip install --upgrade cmake + if errorlevel 1 exit 1 set "PYWARPX_LIB_DIR=%cd%\build\lib\" python3 -m pip install . -vv --no-build-isolation + if errorlevel 1 exit 1 python3 Examples\Modules\gaussian_beam\PICMI_inputs_gaussian_beam.py --diagformat=openpmd + if errorlevel 1 exit 1 From 7c2d2b2ad5529408784a2c14419f0244854978a9 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 15 Aug 2022 16:18:19 -0700 Subject: [PATCH 0009/1346] ABLASTR: Fix Missing Include (#3313) See if this missing include, which would import dllexport, fixes symbol errors on Windows in ImpactX. --- Source/ablastr/utils/Communication.H | 2 ++ Source/ablastr/utils/Communication.cpp | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Source/ablastr/utils/Communication.H b/Source/ablastr/utils/Communication.H index 5105fcc54ee..9f58096b902 100644 --- a/Source/ablastr/utils/Communication.H +++ b/Source/ablastr/utils/Communication.H @@ -9,7 +9,9 @@ #include #include +#include #include +#include #include #include #include diff --git a/Source/ablastr/utils/Communication.cpp b/Source/ablastr/utils/Communication.cpp index b78abb33c69..e674323cc10 100644 --- a/Source/ablastr/utils/Communication.cpp +++ b/Source/ablastr/utils/Communication.cpp @@ -8,7 +8,6 @@ #include #include -#include #include #include #include From ee837089bf2264feb412e6351c22658dfe7599d2 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 15 Aug 2022 16:18:48 -0700 Subject: [PATCH 0010/1346] Docs: Add NAPAC'22 Paper (#3314) Add our latest NAPAC'22 paper to the documented references. Will replace with a JACOW reference once proceedings are out. --- Docs/source/acknowledge_us.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Docs/source/acknowledge_us.rst b/Docs/source/acknowledge_us.rst index 111604a90f0..3101d81637e 100644 --- a/Docs/source/acknowledge_us.rst +++ b/Docs/source/acknowledge_us.rst @@ -52,6 +52,11 @@ Prior WarpX references If your project uses the specific algorithms, please consider citing the respective publications in addition. +- Huebl A, Lehe R, Mitchell C E, Qiang J, Ryne R D, Sandberg R T, Vay JL. + **Next Generation Computational Tools for the Modeling and Design of Particle Accelerators at Exascale**. + *NAPAC'22*, 2022. + `arXiv:2208.02382 `__ + - Fedeli L, Zaim N, Sainte-Marie A, Thevenet M, Huebl A, Myers A, Vay JL, Vincenti H. **PICSAR-QED: a Monte Carlo module to simulate Strong-Field Quantum Electrodynamics in Particle-In-Cell codes for exascale architectures**. *New Journal of Physics* **24** 025009, 2022. From 594e65fa11e370b08155b27469fdfa3ac4e20fa0 Mon Sep 17 00:00:00 2001 From: David Grote Date: Wed, 17 Aug 2022 15:02:40 -0700 Subject: [PATCH 0011/1346] Allow filtering in z with RZ FDTD (#3281) * Allow filtering in z with RZ FDTD * Update the error message about filtering in RZ * Turned z filtering on in laser_acceleration/inputs_rz * Update LaserAccelerationRZ benchmark, including z filtering --- .../laser_acceleration/inputs_rz | 3 +- .../benchmarks_json/LaserAccelerationRZ.json | 106 +++++++++--------- Source/WarpX.cpp | 6 +- 3 files changed, 58 insertions(+), 57 deletions(-) diff --git a/Examples/Physics_applications/laser_acceleration/inputs_rz b/Examples/Physics_applications/laser_acceleration/inputs_rz index 270684b1cae..d844f890efa 100644 --- a/Examples/Physics_applications/laser_acceleration/inputs_rz +++ b/Examples/Physics_applications/laser_acceleration/inputs_rz @@ -20,7 +20,8 @@ boundary.field_hi = pec pec ################################# warpx.verbose = 1 warpx.do_dive_cleaning = 0 -warpx.use_filter = 0 +warpx.use_filter = 1 +warpx.filter_npass_each_dir = 0 1 warpx.cfl = 1. # if 1., the time step is set to its CFL limit warpx.do_moving_window = 1 warpx.moving_window_dir = z # Only z is supported for the moment diff --git a/Regression/Checksum/benchmarks_json/LaserAccelerationRZ.json b/Regression/Checksum/benchmarks_json/LaserAccelerationRZ.json index dcb2e3e7981..e543b9adabd 100644 --- a/Regression/Checksum/benchmarks_json/LaserAccelerationRZ.json +++ b/Regression/Checksum/benchmarks_json/LaserAccelerationRZ.json @@ -1,64 +1,64 @@ { "beam": { - "particle_momentum_x": 3.880109055649298e-20, - "particle_momentum_y": 5.0781930103830196e-20, - "particle_momentum_z": 1.3503608494680855e-17, - "particle_position_x": 6.242131236443886e-05, - "particle_position_y": 0.0026764363296979446, - "particle_theta": 151.4079870868123, + "particle_momentum_x": 3.880115499392648e-20, + "particle_momentum_y": 5.07820548292446e-20, + "particle_momentum_z": 1.3503614921828295e-17, + "particle_position_x": 6.242131246165356e-05, + "particle_position_y": 0.0026764363296957524, + "particle_theta": 151.40798444325364, "particle_weight": 6241509.074460764 }, "electrons": { - "particle_momentum_x": 1.787201778017949e-24, - "particle_momentum_y": 3.9234822345143987e-22, - "particle_momentum_z": 1.0100062552925791e-23, + "particle_momentum_x": 1.886388623495423e-24, + "particle_momentum_y": 3.989021920324841e-22, + "particle_momentum_z": 1.2499427438675522e-23, "particle_orig_x": 0.026508328457558912, "particle_orig_z": 0.04789125000000001, - "particle_position_x": 0.041602500069929174, - "particle_position_y": 0.047891250477036906, - "particle_theta": 7325.1193688944695, + "particle_position_x": 0.041602500066712574, + "particle_position_y": 0.047891250456211995, + "particle_theta": 7325.121562757762, "particle_weight": 813672305.532158 }, "lev=0": { - "Br_0_real": 0.36356649135193925, - "Br_1_imag": 115.41886748920795, - "Br_1_real": 142258.01965536995, - "Btheta_0_real": 1299.8816721733124, - "Btheta_1_imag": 143318.04456658955, - "Btheta_1_real": 155.37774833024366, - "Bx": 142258.01319076555, - "By": 1301.5695263567557, - "Bz": 5993.640969075834, - "Bz_0_real": 0.4737412745527051, - "Bz_1_imag": 1.1409956493384723, - "Bz_1_real": 5993.528898267216, - "Er_0_real": 276179575540.0639, - "Er_1_imag": 47911367858371.875, - "Er_1_real": 46900598536.03668, - "Etheta_0_real": 135868121.7945822, - "Etheta_1_imag": 36802874200.20133, - "Etheta_1_real": 47328835452079.97, - "Ex": 278531658643.55005, - "Ey": 47328876227481.125, - "Ez": 514006664374.9789, - "Ez_0_real": 499008075334.7451, - "Ez_1_imag": 1565161989236.6174, - "Ez_1_real": 28898922272.75169, - "Jr_0_real": 1459118139844.9536, - "Jr_1_imag": 2.3356630589200717e+17, - "Jr_1_real": 2726204346551.2925, - "Jtheta_0_real": 499384029970.2145, - "Jtheta_1_imag": 1179215927404.2832, - "Jtheta_1_real": 2.17663715880068e+17, - "Jz_0_real": 1832470462501306.8, - "Jz_1_imag": 621924149855721.0, - "Jz_1_real": 660909646259030.1, - "jx": 2109207014985.5261, - "jy": 2.1766370884715638e+17, - "jz": 1954236712029783.0, - "rho": 39480730.556067616, - "rho_0_real": 39055926.50167212, - "rho_1_imag": 21660770.34248945, - "rho_1_real": 2131498.060778751 + "Br_0_real": 0.27473108145012964, + "Br_1_imag": 104.10424416504374, + "Br_1_real": 104965.62622212195, + "Btheta_0_real": 1297.3299824026299, + "Btheta_1_imag": 105725.25637121125, + "Btheta_1_real": 141.25524413452112, + "Bx": 104965.6138283532, + "By": 1296.2727613183374, + "Bz": 5076.743764997268, + "Bz_0_real": 0.46038147543152824, + "Bz_1_imag": 1.007452397747621, + "Bz_1_real": 5076.631353934757, + "Er_0_real": 271974182110.8858, + "Er_1_imag": 39530787290253.98, + "Er_1_real": 42616765306.284, + "Etheta_0_real": 112249661.08828562, + "Etheta_1_imag": 33602739100.133934, + "Etheta_1_real": 39016517445019.95, + "Ex": 273570655229.63535, + "Ey": 39016542452492.57, + "Ez": 511653044133.8529, + "Ez_0_real": 496845145101.94775, + "Ez_1_imag": 1245709559726.1033, + "Ez_1_real": 24849961919.57713, + "Jr_0_real": 1264766566288.467, + "Jr_1_imag": 2.335663089152921e+17, + "Jr_1_real": 2273346021690.177, + "Jtheta_0_real": 475301266327.4687, + "Jtheta_1_imag": 1028946515774.2778, + "Jtheta_1_real": 2.176637922654668e+17, + "Jz_0_real": 1832469154489130.8, + "Jz_1_imag": 556484676782123.3, + "Jz_1_real": 602703174081284.9, + "jx": 1750423139263.8418, + "jy": 2.176637920803457e+17, + "jz": 1954078914516329.2, + "rho": 39314213.383921936, + "rho_0_real": 38889619.16177814, + "rho_1_imag": 21546507.958223887, + "rho_1_real": 2012888.0198535046 } -} \ No newline at end of file +} diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index da26ee90b4e..2d12f6e578e 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -731,10 +731,10 @@ WarpX::ReadParameters () } else // FDTD { - // Filter currently not working with FDTD solver in RZ geometry + // Filter currently not working with FDTD solver in RZ geometry along R // (see https://github.com/ECP-WarpX/WarpX/issues/1943) - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!use_filter, - "Filter currently not working with FDTD solver in RZ geometry"); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!use_filter || filter_npass_each_dir[0] == 0, + "In RZ geometry with FDTD, filtering can only be apply along z. This can be controlled by setting warpx.filter_npass_each_dir"); } #endif From 642f6c0f4ea5a524e6f73f2123aebbdef70c94fd Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 18 Aug 2022 06:41:54 -0700 Subject: [PATCH 0012/1346] Docs: Lassen diable MPI-I/O file lock (#3303) Workaround a OpenMPI bug that uses file locking on Lassen (OLCF). Performance is still not going up... --- Tools/machines/lassen-llnl/lassen.bsub | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Tools/machines/lassen-llnl/lassen.bsub b/Tools/machines/lassen-llnl/lassen.bsub index 38519e3aa21..5196acfb49b 100644 --- a/Tools/machines/lassen-llnl/lassen.bsub +++ b/Tools/machines/lassen-llnl/lassen.bsub @@ -40,6 +40,13 @@ cat > romio-hints << EOL cb_nodes ${NUM_HOSTS} EOL +# OpenMPI file locks are slow and not needed +# https://github.com/open-mpi/ompi/issues/10053 +export OMPI_MCA_sharedfp=^lockedfile,individual + +# HDF5: disable slow locks (promise not to open half-written files) +export HDF5_USE_FILE_LOCKING=FALSE + # OpenMP: 1 thread per MPI rank export OMP_NUM_THREADS=1 From fcc465894e8fb01a152bf799e590e0f995329f18 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 18 Aug 2022 17:41:05 -0700 Subject: [PATCH 0013/1346] Wheel: Separate Directory (#3320) * Wheel: Separate Directory The `pip_install` target did not yet work on Windows, because it has no universal wildcard `*` support for our pip install logic. Fix this by creating the wheel in a sub-directory and installing it by "finding all wheels for `pywarpx` in a given prefix". - Avoid wildcards by using a separate directory. - Avoid downloading deps during `wheel` build again. * Fix: Manifest follow-up to #3265 --- .github/workflows/windows.yml | 10 ++-------- CMakeLists.txt | 8 ++++---- MANIFEST.in | 2 +- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index aafc3a0f1e8..ee995ad54ac 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -40,10 +40,7 @@ jobs: python3 -m pip install --upgrade pip setuptools wheel if(!$?) { Exit $LASTEXITCODE } - python3 -m pip install --upgrade cmake - if(!$?) { Exit $LASTEXITCODE } - $env:PYWARPX_LIB_DIR="$(Get-Location | Foreach-Object { $_.Path })\build\lib\Debug\" - python3 -m pip install . -vv --no-build-isolation + cmake --build build --config Debug --target pip_install if(!$?) { Exit $LASTEXITCODE } python3 Examples\Modules\gaussian_beam\PICMI_inputs_gaussian_beam.py @@ -90,10 +87,7 @@ jobs: python3 -m pip install --upgrade pip setuptools wheel if errorlevel 1 exit 1 - python3 -m pip install --upgrade cmake - if errorlevel 1 exit 1 - set "PYWARPX_LIB_DIR=%cd%\build\lib\" - python3 -m pip install . -vv --no-build-isolation + cmake --build build --config Release --target pip_install if errorlevel 1 exit 1 python3 Examples\Modules\gaussian_beam\PICMI_inputs_gaussian_beam.py --diagformat=openpmd diff --git a/CMakeLists.txt b/CMakeLists.txt index ecf5c8b0b83..8756e6646e7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -436,10 +436,10 @@ if(WarpX_LIB) # build the wheel by re-using the shared library we build add_custom_target(${WarpX_CUSTOM_TARGET_PREFIX}pip_wheel - ${CMAKE_COMMAND} -E rm -f ${WarpX_BINARY_DIR}/pywarpx*whl + ${CMAKE_COMMAND} -E rm -f -r warpx-whl COMMAND - ${CMAKE_COMMAND} -E env PYWARPX_LIB_DIR=${CMAKE_LIBRARY_OUTPUT_DIRECTORY} - python3 -m pip wheel -v --no-build-isolation ${WarpX_SOURCE_DIR} + ${CMAKE_COMMAND} -E env PYWARPX_LIB_DIR=$ + python3 -m pip wheel -v --no-build-isolation --no-deps --wheel-dir=warpx-whl ${WarpX_SOURCE_DIR} WORKING_DIRECTORY ${WarpX_BINARY_DIR} DEPENDS @@ -463,7 +463,7 @@ if(WarpX_LIB) # because otherwise pip would also force reinstall all dependencies. add_custom_target(${WarpX_CUSTOM_TARGET_PREFIX}pip_install ${CMAKE_COMMAND} -E env WARPX_MPI=${WarpX_MPI} - python3 -m pip install --force-reinstall --no-deps ${PYINSTALLOPTIONS} ${WarpX_BINARY_DIR}/pywarpx*whl + python3 -m pip install --force-reinstall --no-index --no-deps ${PYINSTALLOPTIONS} --find-links=warpx-whl pywarpx WORKING_DIRECTORY ${WarpX_BINARY_DIR} DEPENDS diff --git a/MANIFEST.in b/MANIFEST.in index 13a92654fa8..fede8d8efc1 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,6 @@ include README.md LEGAL.txt LICENSE.txt include pyproject.toml -include requirements.txt +include requirements.txt requirements_mpi.txt global-include CMakeLists.txt *.cmake *.in recursive-include Source * recursive-include Python * From 23393d8b8bfd0b520468afd0344684a22d8645f8 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Thu, 18 Aug 2022 17:45:28 -0700 Subject: [PATCH 0014/1346] Fix Docs for `psatd.update_with_rho` (#3325) --- Docs/source/usage/parameters.rst | 4 ++-- Source/WarpX.cpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index e1d911b4a4b..b81b088c6b8 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -1759,8 +1759,6 @@ Numerics and algorithms If false, instead, the update equation for the electric field is expressed in terms of the current density :math:`\widehat{\boldsymbol{J}}^{\,n+1/2}` only. If charge is expected to be conserved (by setting, for example, ``psatd.current_correction=1``), then the two formulations are expected to be equivalent. - This option is currently implemented only for the standard PSATD and Galilean PSATD schemes, while it is not yet available for the averaged Galilean PSATD scheme (activated by the input parameter ``psatd.do_time_averaging``). - If ``psatd.v_galilean`` is zero, the spectral solver used is the standard PSATD scheme described in (`Vay et al, JCP 243, 2013 `_): 1. if ``psatd.update_with_rho=0``, the update equation for the electric field reads @@ -1822,6 +1820,8 @@ Numerics and algorithms The coefficients :math:`C`, :math:`S`, :math:`\theta`, :math:`\nu`, :math:`\chi_1`, :math:`\chi_2`, and :math:`\chi_3` are defined in (`Lehe et al, PRE 94, 2016 `_). The default value for ``psatd.update_with_rho`` is ``1`` if ``psatd.v_galilean`` is non-zero and ``0`` otherwise. + The option ``psatd.update_with_rho=0`` is not implemented with the following algorithms: + comoving PSATD (``psatd.v_comoving``), time averaging (``psatd.do_time_averaging=1``), div(E) cleaning (``warpx.do_dive_cleaning=1``), and multi-J (``warpx.do_multi_J=1``). Note that the update with and without rho is also supported in RZ geometry. diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 2d12f6e578e..12747b7b90b 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -1271,7 +1271,7 @@ WarpX::ReadParameters () ); # ifdef WARPX_DIM_RZ - update_with_rho = true; // Must be true for RZ PSATD + update_with_rho = true; # else if (m_v_galilean[0] == 0. && m_v_galilean[1] == 0. && m_v_galilean[2] == 0. && m_v_comoving[0] == 0. && m_v_comoving[1] == 0. && m_v_comoving[2] == 0.) { From ab94900feb60cfdc50430cb7f5059587d6dba2e5 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 19 Aug 2022 09:25:39 -0700 Subject: [PATCH 0015/1346] Docs: Add NAPAC'22 Paper (#3314) (#3316) Add our latest NAPAC'22 paper to the documented references. Will replace with a JACOW reference once proceedings are out. --- Source/ablastr/utils/Communication.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Source/ablastr/utils/Communication.cpp b/Source/ablastr/utils/Communication.cpp index e674323cc10..c7bf00c8e48 100644 --- a/Source/ablastr/utils/Communication.cpp +++ b/Source/ablastr/utils/Communication.cpp @@ -8,6 +8,8 @@ #include #include +#include +#include #include #include #include @@ -120,6 +122,7 @@ void FillBoundary (amrex::iMultiFab& imf, const amrex::Periodicity& period) { BL_PROFILE("ablastr::utils::communication::FillBoundary"); + imf.FillBoundary(ng, period); } @@ -215,6 +218,8 @@ void OverrideSync (amrex::MultiFab &mf, bool do_single_precision_comms, const amrex::Periodicity &period) { + BL_PROFILE("ablastr::utils::communication::OverrideSync"); + if (mf.ixType().cellCentered()) return; if (do_single_precision_comms) From 1e854b331ee0687bea93d8448938d36faafdfac6 Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Sat, 20 Aug 2022 08:17:45 -0700 Subject: [PATCH 0016/1346] shift BTD lab frame bounds by half a cell to prevent round off errors (#3330) * shift BTD lab frame bounds by half a cell to prevent round off errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Source/Diagnostics/BTDiagnostics.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index b00b88e3167..741ab6e6b6f 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -298,7 +298,6 @@ BTDiagnostics::InitializeBufferData ( int i_buffer , int lev) / ( (1.0_rt + m_beta_boost) * m_gamma_boost); - // Initialize buffer counter and z-positions of the i^th snapshot in // boosted-frame and lab-frame m_buffer_flush_counter[i_buffer] = 0; @@ -367,6 +366,11 @@ BTDiagnostics::InitializeBufferData ( int i_buffer , int lev) zmin_buffer_lab + warpx.moving_window_v * m_t_lab[i_buffer]); m_snapshot_domain_lab[i_buffer].setHi(m_moving_window_dir, zmax_buffer_lab + warpx.moving_window_v * m_t_lab[i_buffer]); + // To prevent round off errors, moving the snapshot domain by half a cell so that all the slices + // lie close to the cell-centers in the lab-frame grid instead of on the edge of cell. + amrex::Real new_hi = m_snapshot_domain_lab[i_buffer].hi(m_moving_window_dir) + + 0.5_rt * dz_lab(warpx.getdt(lev), ref_ratio[m_moving_window_dir]); + m_snapshot_domain_lab[i_buffer].setHi(m_moving_window_dir,new_hi); amrex::Real new_lo = m_snapshot_domain_lab[i_buffer].hi(m_moving_window_dir) - num_z_cells_in_snapshot * dz_lab(warpx.getdt(lev), ref_ratio[m_moving_window_dir]); @@ -762,7 +766,6 @@ BTDiagnostics::Flush (int i_buffer) m_particles_buffer[i_buffer][isp]->SetParGDB(vgeom[0], vdmap[0], buffer_ba); } } - RedistributeParticleBuffer(i_buffer); // Reset buffer box and particle box array From abc921068c6a556cababf2a931d7d2d996321137 Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 22 Aug 2022 09:36:38 -0700 Subject: [PATCH 0017/1346] Updated Docs/README.md (#3328) --- Docs/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Docs/README.md b/Docs/README.md index fbc98b5bb6d..d6a0f62d6c1 100644 --- a/Docs/README.md +++ b/Docs/README.md @@ -1,6 +1,7 @@ # Overview This explains how to generate the documentation for Warpx, and contribute to it. +More information can be found in Docs/source/developers/documentation.rst. ## Generating the documentation @@ -8,7 +9,7 @@ This explains how to generate the documentation for Warpx, and contribute to it. Install the Python requirements for compiling the documentation: ``` -python3 -m pip install sphinx sphinx_rtd_theme +python3 -m pip install -r Docs/requirements.txt ``` ### Compiling the documentation From 2fc121e5c8791f87b5ac56b7fdc663a76bf56f7f Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Mon, 22 Aug 2022 19:44:23 +0200 Subject: [PATCH 0018/1346] Remove outdated comments from example input files (#3332) Update comments in example input files: moving window now available along x, y, z. --- Examples/Physics_applications/laser_acceleration/inputs_1d | 2 +- Examples/Physics_applications/laser_acceleration/inputs_2d | 2 +- Examples/Physics_applications/laser_acceleration/inputs_3d | 2 +- Examples/Physics_applications/laser_acceleration/inputs_rz | 2 +- Regression/TestFillBoundary/inputs.2d | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Examples/Physics_applications/laser_acceleration/inputs_1d b/Examples/Physics_applications/laser_acceleration/inputs_1d index e143b5b16b8..8d92bfa356c 100644 --- a/Examples/Physics_applications/laser_acceleration/inputs_1d +++ b/Examples/Physics_applications/laser_acceleration/inputs_1d @@ -24,7 +24,7 @@ warpx.do_dive_cleaning = 0 warpx.use_filter = 1 warpx.cfl = 0.9 # if 1., the time step is set to its CFL limit warpx.do_moving_window = 1 -warpx.moving_window_dir = z # Only z is supported for the moment +warpx.moving_window_dir = z warpx.moving_window_v = 1.0 # units of speed of light warpx.do_dynamic_scheduling = 0 warpx.serialize_initial_conditions = 1 diff --git a/Examples/Physics_applications/laser_acceleration/inputs_2d b/Examples/Physics_applications/laser_acceleration/inputs_2d index e5c3790437f..422e33ddf57 100644 --- a/Examples/Physics_applications/laser_acceleration/inputs_2d +++ b/Examples/Physics_applications/laser_acceleration/inputs_2d @@ -26,7 +26,7 @@ warpx.do_dive_cleaning = 0 warpx.use_filter = 1 warpx.cfl = 1. # if 1., the time step is set to its CFL limit warpx.do_moving_window = 1 -warpx.moving_window_dir = z # Only z is supported for the moment +warpx.moving_window_dir = z warpx.moving_window_v = 1.0 # units of speed of light warpx.serialize_initial_conditions = 1 # for production, set this to 0 (default) diff --git a/Examples/Physics_applications/laser_acceleration/inputs_3d b/Examples/Physics_applications/laser_acceleration/inputs_3d index cc0aa6e0cb1..27ecc00117b 100644 --- a/Examples/Physics_applications/laser_acceleration/inputs_3d +++ b/Examples/Physics_applications/laser_acceleration/inputs_3d @@ -26,7 +26,7 @@ warpx.do_dive_cleaning = 0 warpx.use_filter = 1 warpx.cfl = 1. # if 1., the time step is set to its CFL limit warpx.do_moving_window = 1 -warpx.moving_window_dir = z # Only z is supported for the moment +warpx.moving_window_dir = z warpx.moving_window_v = 1.0 # units of speed of light warpx.do_dynamic_scheduling = 0 # for production, set this to 1 (default) warpx.serialize_initial_conditions = 1 # for production, set this to 0 (default) diff --git a/Examples/Physics_applications/laser_acceleration/inputs_rz b/Examples/Physics_applications/laser_acceleration/inputs_rz index d844f890efa..971f1b538cd 100644 --- a/Examples/Physics_applications/laser_acceleration/inputs_rz +++ b/Examples/Physics_applications/laser_acceleration/inputs_rz @@ -24,7 +24,7 @@ warpx.use_filter = 1 warpx.filter_npass_each_dir = 0 1 warpx.cfl = 1. # if 1., the time step is set to its CFL limit warpx.do_moving_window = 1 -warpx.moving_window_dir = z # Only z is supported for the moment +warpx.moving_window_dir = z warpx.moving_window_v = 1.0 # units of speed of light # Order of particle shape factors diff --git a/Regression/TestFillBoundary/inputs.2d b/Regression/TestFillBoundary/inputs.2d index 4a4e4b835ed..5e929a5ce79 100644 --- a/Regression/TestFillBoundary/inputs.2d +++ b/Regression/TestFillBoundary/inputs.2d @@ -23,7 +23,7 @@ particles.use_fdtd_nci_corr = 0 warpx.use_filter=0 warpx.filter_npass_each_dir=1 4 warpx.do_moving_window = 0 -warpx.moving_window_dir = z # Only z is supported for the moment +warpx.moving_window_dir = z warpx.moving_window_v = 1.0 # units of speed of light algo.particle_shape = 1 algo.maxwell_solver = yee From f0875b427876719ad0bc09c0260e7e51c1603a5e Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 22 Aug 2022 11:29:24 -0700 Subject: [PATCH 0019/1346] CI: Clean up Analysis of Galilean Tests (#3322) * CI: Clean up Analysis of Galilean Tests * Update Reference Energy Values * Update Reference Energy Tolerances * Improve Inline Comment --- Examples/Tests/galilean/analysis.py | 99 ++++++++++++++++++++++++++ Examples/Tests/galilean/analysis_2d.py | 89 ----------------------- Examples/Tests/galilean/analysis_3d.py | 75 ------------------- Regression/WarpX-tests.ini | 20 +++--- 4 files changed, 109 insertions(+), 174 deletions(-) create mode 100755 Examples/Tests/galilean/analysis.py delete mode 100755 Examples/Tests/galilean/analysis_2d.py delete mode 100755 Examples/Tests/galilean/analysis_3d.py diff --git a/Examples/Tests/galilean/analysis.py b/Examples/Tests/galilean/analysis.py new file mode 100755 index 00000000000..898ac1435d7 --- /dev/null +++ b/Examples/Tests/galilean/analysis.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +""" +This script is used to test the results of the Galilean PSATD and +averaged Galilean PSATD methods in WarpX. + +It compares the energy of the electric field with a given reference energy. + +The reference energy is computed by running the same test with: +(i) psatd.v_galilean=(0,0,0) for Galilean tests, or +(ii) psatd.do_time_averaging=0 for averaged Galilean tests. + +In both cases, the reference energy corresponds to unstable results due to NCI +(suppressed by the Galilean PSATD method, without or with averaging, respectively). +""" +import os +import re +import sys + +import numpy as np +import scipy.constants as scc + +import yt ; yt.funcs.mylog.setLevel(0) +sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +import checksumAPI + +filename = sys.argv[1] + +# Parse some input arguments from output file 'warpx_used_inputs' +current_correction = False +time_averaging = False +warpx_used_inputs = open('./warpx_used_inputs', 'r').read() +if re.search('geometry.dims\s*=\s*2', warpx_used_inputs): + dims = '2D' +elif re.search('geometry.dims\s*=\s*RZ', warpx_used_inputs): + dims = 'RZ' +elif re.search('geometry.dims\s*=\s*3', warpx_used_inputs): + dims = '3D' +if re.search('psatd.current_correction\s*=\s*1', warpx_used_inputs): + current_correction = True +if re.search('psatd.do_time_averaging\s*=\s*1', warpx_used_inputs): + time_averaging = True + +ds = yt.load(filename) + +# yt 4.0+ has rounding issues with our domain data: +# RuntimeError: yt attempted to read outside the boundaries +# of a non-periodic domain along dimension 0. +if 'force_periodicity' in dir(ds): ds.force_periodicity() + +all_data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) +Ex = all_data['boxlib', 'Ex'].squeeze().v +Ey = all_data['boxlib', 'Ey'].squeeze().v +Ez = all_data['boxlib', 'Ez'].squeeze().v + +# Set reference energy values, and tolerances for numerical stability and charge conservation +tol_energy = 1e-8 +tol_charge = 1e-9 +if dims == '2D': + if not current_correction: + energy_ref = 35657.41657683263 + if current_correction: + energy_ref = 35024.0275199999 + if time_averaging: + energy_ref = 26208.04843478073 + tol_energy = 1e-6 +elif dims == 'RZ': + if not current_correction: + energy_ref = 191002.6526271543 + if current_correction: + energy_ref = 472779.70801323955 +elif dims == '3D': + if not current_correction: + energy_ref = 661285.098907683 + if current_correction: + energy_ref = 856783.3007547935 + if time_averaging: + energy_ref = 14.564631643496 + tol_energy = 1e-4 + +# Check numerical stability by comparing electric field energy to reference energy +energy = np.sum(scc.epsilon_0/2*(Ex**2+Ey**2+Ez**2)) +err_energy = energy / energy_ref +print('\nCheck numerical stability:') +print(f'err_energy = {err_energy}') +print(f'tol_energy = {tol_energy}') +assert(err_energy < tol_energy) + +# Check charge conservation (relative L-infinity norm of error) with current correction +if current_correction: + divE = all_data['boxlib', 'divE'].squeeze().v + rho = all_data['boxlib', 'rho' ].squeeze().v / scc.epsilon_0 + err_charge = np.amax(np.abs(divE - rho)) / max(np.amax(divE), np.amax(rho)) + print('\nCheck charge conservation:') + print(f'err_charge = {err_charge}') + print(f'tol_charge = {tol_charge}') + assert(err_charge < tol_charge) + +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/galilean/analysis_2d.py b/Examples/Tests/galilean/analysis_2d.py deleted file mode 100755 index 820dcc6675b..00000000000 --- a/Examples/Tests/galilean/analysis_2d.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python3 -""" -This script is used to test the results of the Galilean PSATD method and -averaged Galilean PSATD method in WarpX. -It compares the energy of the electric field with precalculated reference energy. - 1) Galilean PSATD test: reference energy was calculated with - standard PSATD (v_galilean = (0.,0.,0.)): - * if 'v_galilean == 0': simulation is unstable because of the arosen NCI; - * if 'v_galilean != 0 : NCI is suppressed => simulation is stable. - 2) Averaged Galilean PSATD with large timestep dz/dx = 4. and c*dt = dz: - reference energy was calculated with Galilean PSATD (v_galilean = (0.,0.,0.99498743710662): - * if standard Galilean PSATD is used (psatd.do_time_averaging == 0'): - simulation is unstable because of the arosen NCI. - * if averaged Galilean PSATD is used ('psatd.do_time_averaging == 1) : - NCI is suppressed => simulation is stable. -""" -import os -import re -import sys - -import numpy as np -import scipy.constants as scc - -import yt ; yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') -import checksumAPI - -filename = sys.argv[1] - -# Parse test name -averaged = True if re.search( 'averaged', filename ) else False -current_correction = True if re.search( 'current_correction', filename ) else False -dims_RZ = True if re.search('rz', filename) else False - -ds = yt.load( filename ) - -# yt 4.0+ has rounding issues with our domain data: -# RuntimeError: yt attempted to read outside the boundaries -# of a non-periodic domain along dimension 0. -if 'force_periodicity' in dir(ds): ds.force_periodicity() - -all_data = ds.covering_grid(level = 0, left_edge = ds.domain_left_edge, dims = ds.domain_dimensions) -Ex = all_data['boxlib', 'Ex'].squeeze().v -Ey = all_data['boxlib', 'Ey'].squeeze().v -Ez = all_data['boxlib', 'Ez'].squeeze().v - -if (averaged): - # energyE_ref was calculated with Galilean PSATD method (v_galilean = (0,0,0.99498743710662)) - energyE_ref = 32532.00882239954 - tolerance_rel = 1e-6 -elif (not dims_RZ and not current_correction): - # energyE_ref was calculated with standard PSATD method (v_galilean = (0.,0.,0.)) - energyE_ref = 35657.99361677053 - tolerance_rel = 1e-8 -elif (not dims_RZ and current_correction): - # energyE_ref was calculated with standard PSATD method (v_galilean = (0.,0.,0.)): - energyE_ref = 35024.02751955393 - tolerance_rel = 2e-8 -elif (dims_RZ and not current_correction): - # energyE_ref was calculated with standard PSATD method (v_galilean = (0.,0.,0.)) - energyE_ref = 239019.10670780553 - tolerance_rel = 1e-8 -elif (dims_RZ and current_correction): - # energyE_ref was calculated with standard PSATD method (v_galilean = (0.,0.,0.)) - energyE_ref = 471730.0524143545 - tolerance_rel = 1e-9 - -energyE = np.sum(scc.epsilon_0/2*(Ex**2+Ey**2+Ez**2)) - -error_rel = energyE / energyE_ref - -print("error_rel : " + str(error_rel)) -print("tolerance_rel: " + str(tolerance_rel)) - -assert( error_rel < tolerance_rel ) - -# Check charge conservation (relative L-infinity norm of error) with current correction -if current_correction: - divE = all_data['boxlib', 'divE'].squeeze().v - rho = all_data['boxlib', 'rho' ].squeeze().v / scc.epsilon_0 - error_rel = np.amax(np.abs(divE - rho)) / max(np.amax(divE), np.amax(rho)) - tolerance = 1e-9 - print("Check charge conservation:") - print("error_rel = {}".format(error_rel)) - print("tolerance = {}".format(tolerance)) - assert( error_rel < tolerance ) - -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/galilean/analysis_3d.py b/Examples/Tests/galilean/analysis_3d.py deleted file mode 100755 index d224e2b9a16..00000000000 --- a/Examples/Tests/galilean/analysis_3d.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python3 -""" -This script is used to test the results of the Galilean PSATD method and -averaged Galilean PSATD method in WarpX. -It compares the energy of the electric field with precalculated reference energy. - 1) Galilean PSATD test: reference energy was calculated with - standard PSATD (v_galilean = (0.,0.,0.)): - * if 'v_galilean == 0': simulation is unstable because of the arosen NCI; - * if 'v_galilean != 0 : NCI is suppressed => simulation is stable. - 2) Averaged Galilean PSATD with large timestep dz/dx = 3. and c*dt = dz: - reference energy was calculated with Galilean PSATD (v_galilean = (0.,0.,0.99498743710662): - * if standard Galilean PSATD is used (psatd.do_time_averaging == 0'): - simulation is unstable because of the arosen NCI. - * if averaged Galilean PSATD is used ('psatd.do_time_averaging == 1) : - NCI is suppressed => simulation is stable. -""" -import os -import re -import sys - -import numpy as np -import scipy.constants as scc - -import yt ; yt.funcs.mylog.setLevel(0) -sys.path.insert(1, '../../../../warpx/Regression/Checksum/') -import checksumAPI - -filename = sys.argv[1] - -# Parse test name -averaged = True if re.search( 'averaged', filename ) else False -current_correction = True if re.search( 'current_correction', filename ) else False - -ds = yt.load( filename ) - -all_data = ds.covering_grid(level = 0, left_edge = ds.domain_left_edge, dims = ds.domain_dimensions) -Ex = all_data['boxlib', 'Ex'].squeeze().v -Ey = all_data['boxlib', 'Ey'].squeeze().v -Ez = all_data['boxlib', 'Ez'].squeeze().v - -if (averaged): - # energyE_ref was calculated with Galilean PSATD method (v_galilean = (0,0,0.99498743710662)) - energyE_ref = 6.816182771544472 - tolerance_rel = 1e-4 -elif (current_correction): - # energyE_ref was calculated with standard PSATD method (v_galilean = (0.,0.,0.)): - energyE_ref = 75333.81851879464 - tolerance_rel = 5e-8; -else: - # energyE_ref was calculated with standard PSATD method (v_galilean = (0.,0.,0.)) - energyE_ref = 8218.678808709019 - tolerance_rel = 1e-6; - -energyE = np.sum(scc.epsilon_0/2*(Ex**2+Ey**2+Ez**2)) - -error_rel = energyE / energyE_ref - -print("error_rel : " + str(error_rel)) -print("tolerance_rel: " + str(tolerance_rel)) - -assert( error_rel < tolerance_rel ) - -# Check charge conservation (relative L-infinity norm of error) with current correction -if current_correction: - rho = all_data['boxlib', 'rho' ].squeeze().v - divE = all_data['boxlib', 'divE'].squeeze().v - error_rel = np.amax( np.abs( divE - rho/scc.epsilon_0 ) ) / np.amax( np.abs( rho/scc.epsilon_0 ) ) - tolerance = 1e-9 - print("Check charge conservation:") - print("error_rel = {}".format(error_rel)) - print("tolerance = {}".format(tolerance)) - assert( error_rel < tolerance ) - -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index d21ce053ea3..f197ab60d0a 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -2370,7 +2370,7 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis_2d.py +analysisRoutine = Examples/Tests/galilean/analysis.py [galilean_2d_psatd_current_correction] buildDir = . @@ -2388,7 +2388,7 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis_2d.py +analysisRoutine = Examples/Tests/galilean/analysis.py [galilean_2d_psatd_hybrid] buildDir = . @@ -2442,7 +2442,7 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis_2d.py +analysisRoutine = Examples/Tests/galilean/analysis.py [galilean_rz_psatd_current_correction] buildDir = . @@ -2460,7 +2460,7 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis_2d.py +analysisRoutine = Examples/Tests/galilean/analysis.py [galilean_3d_psatd] buildDir = . @@ -2478,7 +2478,7 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis_3d.py +analysisRoutine = Examples/Tests/galilean/analysis.py [galilean_3d_psatd_current_correction] buildDir = . @@ -2496,7 +2496,7 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis_3d.py +analysisRoutine = Examples/Tests/galilean/analysis.py [averaged_galilean_2d_psatd] buildDir = . @@ -2514,7 +2514,7 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis_2d.py +analysisRoutine = Examples/Tests/galilean/analysis.py [averaged_galilean_2d_psatd_hybrid] buildDir = . @@ -2532,7 +2532,7 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis_2d.py +analysisRoutine = Examples/Tests/galilean/analysis.py [averaged_galilean_3d_psatd] buildDir = . @@ -2550,7 +2550,7 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis_3d.py +analysisRoutine = Examples/Tests/galilean/analysis.py [averaged_galilean_3d_psatd_hybrid] buildDir = . @@ -2568,7 +2568,7 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis_3d.py +analysisRoutine = Examples/Tests/galilean/analysis.py [multi_J_2d_psatd] buildDir = . From 6e9934d48dbfd9416e20ba6ab710fc544da97667 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 22 Aug 2022 12:42:42 -0700 Subject: [PATCH 0020/1346] AMReX: Weekly Update (#3333) --- .github/workflows/cuda.yml | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 2 +- run_test.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 3a5917ac2a0..c73b7ef0717 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -106,7 +106,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 4f639294606d47185d31eaee4af66fc6b590e5a2 && cd - + cd amrex && git checkout --detach 8294c3afbcbbc503f77e493196d380fbe1666d02 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 35854c55470..91b5a11153b 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 4f639294606d47185d31eaee4af66fc6b590e5a2 +branch = 8294c3afbcbbc503f77e493196d380fbe1666d02 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index f197ab60d0a..d2340a01a26 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 4f639294606d47185d31eaee4af66fc6b590e5a2 +branch = 8294c3afbcbbc503f77e493196d380fbe1666d02 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 76b0a238862..be65ecf7ad4 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -235,7 +235,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "4f639294606d47185d31eaee4af66fc6b590e5a2" +set(WarpX_amrex_branch "8294c3afbcbbc503f77e493196d380fbe1666d02" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/run_test.sh b/run_test.sh index 9b152be9129..d6ba50fd66e 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 4f639294606d47185d31eaee4af66fc6b590e5a2 && cd - +cd amrex && git checkout --detach 8294c3afbcbbc503f77e493196d380fbe1666d02 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git From 81f0bbc571e6bdf1833022b4db8559a4fefa665f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 22 Aug 2022 22:53:48 +0000 Subject: [PATCH 0021/1346] [pre-commit.ci] pre-commit autoupdate (#3334) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/Lucas-C/pre-commit-hooks: v1.3.0 → v1.3.1](https://github.com/Lucas-C/pre-commit-hooks/compare/v1.3.0...v1.3.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 25986a65b89..668c0a31b34 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -48,7 +48,7 @@ repos: # Changes tabs to spaces - repo: https://github.com/Lucas-C/pre-commit-hooks - rev: v1.3.0 + rev: v1.3.1 hooks: - id: remove-tabs exclude: 'Make.WarpX|Make.package|Makefile|GNUmake' From 855349fe53774318a0172febf43710f6b74fd46d Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 22 Aug 2022 16:11:02 -0700 Subject: [PATCH 0022/1346] Docs: Spack Desktop Environment (#3326) Add Spack environment files for developer desktop/laptop computers. --- Docs/source/install/dependencies.rst | 114 +++++++++-------- .../machines/desktop/spack-macos-openmp.yaml | 120 ++++++++++++++++++ Tools/machines/desktop/spack-ubuntu-cuda.yaml | 101 +++++++++++++++ .../machines/desktop/spack-ubuntu-openmp.yaml | 98 ++++++++++++++ Tools/machines/desktop/spack-ubuntu-rocm.yaml | 102 +++++++++++++++ 5 files changed, 483 insertions(+), 52 deletions(-) create mode 100644 Tools/machines/desktop/spack-macos-openmp.yaml create mode 100644 Tools/machines/desktop/spack-ubuntu-cuda.yaml create mode 100644 Tools/machines/desktop/spack-ubuntu-openmp.yaml create mode 100644 Tools/machines/desktop/spack-ubuntu-rocm.yaml diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index f696a1fa86d..95eb832fc43 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -41,44 +41,82 @@ Install Pick *one* of the installation methods below to install all dependencies for WarpX development in a consistent manner. +Conda (Linux/macOS/Windows) +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +With MPI (only Linux/macOS): + +.. code-block:: bash + + conda create -n warpx-dev -c conda-forge blaspp ccache cmake compilers git lapackpp "openpmd-api=*=mpi_mpich*" python numpy pandas scipy yt "fftw=*=mpi_mpich*" pkg-config matplotlib mamba ninja mpich pip virtualenv + source activate warpx-dev + +Without MPI: + +.. code-block:: bash + + conda create -n warpx-dev -c conda-forge blaspp ccache cmake compilers git lapackpp openpmd-api python numpy pandas scipy yt fftw pkg-config matplotlib mamba ninja pip virtualenv + source activate warpx-dev + + # compile WarpX with -DWarpX_MPI=OFF + +For legacy ``GNUmake`` builds, after each ``source activate warpx-dev``, you also need to set: + +.. code-block:: bash + + export FFTW_HOME=${CONDA_PREFIX} + export BLASPP_HOME=${CONDA_PREFIX} + export LAPACKPP_HOME=${CONDA_PREFIX} + +.. note:: + + A general option to deactivate that conda self-activates its base environment. + This `avoids interference with the system and other package managers `__. + + .. code-block:: bash + + conda config --set auto_activate_base false + + Spack (macOS/Linux) ^^^^^^^^^^^^^^^^^^^ -.. code-block:: bash +First, download a `Spack desktop development environment `__ of your choice. +For most desktop development, pick the OpenMP environment for CPUs unless you have a supported GPU. - spack env create warpx-dev - spack env activate warpx-dev +* **Ubuntu** Linux: - spack add adios2 # for openPMD - spack add blaspp # for PSATD in RZ - spack add ccache - spack add cmake - spack add fftw # for PSATD - spack add hdf5 # for openPMD - spack add lapackpp # for PSATD in RZ - spack add mpi - spack add openpmd-api # for openPMD - spack add pkgconfig # for fftw + * OpenMP: ``system=ubuntu; compute=openmp`` (CPUs) + * CUDA: ``system=ubuntu; compute=cuda`` (Nvidia GPUs) + * ROCm: ``system=ubuntu; compute=rocm`` (AMD GPUs) + * SYCL: *todo* (Intel GPUs) +* **macOS**: first, prepare with ``brew install gpg2; brew install gcc`` - # OpenMP support on macOS - [[ $OSTYPE == 'darwin'* ]] && spack add llvm-openmp + * OpenMP: ``system=macos; compute=openmp`` - # optional: - # spack add python - # spack add py-pip - # spack add cuda +.. code-block:: bash - spack install + # download environment file + curl -sLo https://raw.githubusercontent.com/ECP-WarpX/WarpX/development/Tools/machines/desktop/spack-${system}-${compute}.sh -In new terminal sessions, re-activate the environment with ``spack env activate warpx-dev`` again. + # create new development environment + spack env create warpx-${compute}-dev spack-${system}-${compute}.yaml + spack env activate warpx-${compute}-dev + + # installation + spack install + python3 -m pip install jupyter matplotlib numpy openpmd-api openpmd-viewer pandas scipy virtualenv yt -If you also want to run runtime tests and added Python (``spack add python`` and ``spack add py-pip``) above, install also these additional Python packages in the active Spack environment: +In new terminal sessions, re-activate the environment with .. code-block:: bash - python3 -m pip install matplotlib yt scipy pandas numpy openpmd-api virtualenv + spack env activate warpx-openmp-dev -If you want to run the ``./run_test.sh`` :ref:`test script `, which uses our legacy GNUmake build system, you need to set the following environment hints after ``spack env activate warpx-dev`` for dependent software: +again. +Replace ``openmp`` with the equivalent you chose. + +For legacy ``GNUmake`` builds, after each ``source activate warpx-openmp-dev``, you also need to set: .. code-block:: bash @@ -120,34 +158,6 @@ If you also want to compile with PSATD in RZ, you need to manually install BLAS+ -Duse_cmake_find_lapack=ON -Dbuild_tests=OFF -DCMAKE_VERBOSE_MAKEFILE=ON -Conda (Linux/macOS/Windows) -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Without MPI: - -.. code-block:: bash - - conda create -n warpx-dev -c conda-forge blaspp ccache cmake compilers git lapackpp openpmd-api python numpy pandas scipy yt fftw pkg-config matplotlib mamba ninja pip virtualenv - source activate warpx-dev - - # compile WarpX with -DWarpX_MPI=OFF - -With MPI (only Linux/macOS): - -.. code-block:: bash - - conda create -n warpx-dev -c conda-forge blaspp ccache cmake compilers git lapackpp "openpmd-api=*=mpi_openmpi*" python numpy pandas scipy yt "fftw=*=mpi_openmpi*" pkg-config matplotlib mamba ninja openmpi pip virtualenv - source activate warpx-dev - -For legacy ``GNUmake`` builds, after each ``source activate warpx-dev``, you also need to set: - -.. code-block:: bash - - export FFTW_HOME=${CONDA_PREFIX} - export BLASPP_HOME=${CONDA_PREFIX} - export LAPACKPP_HOME=${CONDA_PREFIX} - - Apt (Debian/Ubuntu) ^^^^^^^^^^^^^^^^^^^ diff --git a/Tools/machines/desktop/spack-macos-openmp.yaml b/Tools/machines/desktop/spack-macos-openmp.yaml new file mode 100644 index 00000000000..e91ca586e6c --- /dev/null +++ b/Tools/machines/desktop/spack-macos-openmp.yaml @@ -0,0 +1,120 @@ +# This is a Spack environment file. +# +# This environment can be used to install all dependencies to build the manual +# locally. +# +# Activating and installing this environment will provide all dependencies +# that are needed for full-feature development. +# https://spack.readthedocs.io/en/latest/environments.html +# +# Inside the directory of this file +# spack env create warpx-openmp-dev spack-macos-openmp.yaml +# spack env activate warpx-openmp-dev +# spack install # only needed the first time +# +spack: + specs: + - adios2 ~fortran + - ascent +adios2 +python ~fortran + - blaspp + - boost + - ccache + - cmake + - conduit ~fortran + - fftw + - hdf5 ~fortran + - lapackpp + - mpi + - llvm-openmp + - pkgconfig + - python + - py-cython + - py-h5py + - py-libensemble +nlopt + - py-mpi4py + - py-numpy + - py-pip + - py-setuptools + - py-wheel + - sensei +ascent ~catalyst +python +# not yet ready for macOS prime time +# https://github.com/spack/spack/issues/32283 +# https://github.com/spack/spack/pull/32285 +# https://github.com/spack/spack/pull/32284 +# - ecp-data-vis-sdk +adios2 +ascent +hdf5 +sensei +# skipped to save time: 3D post-processing +# - paraview +adios2 +python3 +qt +# skipped to save time, because they are faster installed via pip afterwards +# python3 -m pip install jupyter matplotlib numpy openpmd-api openpmd-viewer pandas scipy yt +# - py-jupyter +# - py-matplotlib +animation +fonts +latex +movies backend=macosx +# - openpmd-api +python +# - py-openpmd-viewer +numba +jupyter +# - py-pandas +# - py-pyqt5 +# - py-scipy +# - py-yt + + packages: + all: + variants: +mpi ~fortran + # BLAS/LAPACK: the default (accelerate) pulls veclibfort@0.4.2 for + # py-numpy, which fails to build on M1 + # MPI: the default (openmpi) triggers annoying firewall warnings when + # running executables + providers: + blas: [openblas] + lapack: [openblas] + mpi: [mpich] + # default blocks at HDF5 1.8, resulting in unmergable solution + conduit: + variants: ~hdf5_compat ~fortran + # otherwise concretization error between ccache and all other variants + zstd: + variants: +programs + + compilers: + # macOS + # preparation: you first need to install xcode (app store) and gcc (homebrew) and gpg2 (homebrew) + - compiler: + spec: apple-clang@13.1.6 + paths: + cc: /usr/bin/clang + cxx: /usr/bin/clang++ + f77: /opt/homebrew/bin/gfortran + fc: /opt/homebrew/bin/gfortran + flags: {} + operating_system: monterey + target: aarch64 + modules: [] + environment: {} + extra_rpaths: [] + - compiler: + spec: apple-clang@13.1.6 + paths: + cc: /usr/bin/clang + cxx: /usr/bin/clang++ + f77: /opt/homebrew/bin/gfortran + fc: /opt/homebrew/bin/gfortran + flags: {} + operating_system: monterey + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] + + # binary caches + mirrors: + E4S: https://cache.e4s.io + LLNL: https://mirror.spack.io +# needs boto3 +# E4Smac: s3://spack-binaries/develop/e4s-mac + + # do not try to reuse existing packages, which can confuse the concretizer + concretizer: + reuse: false + unify: true + + # limit the build parallelism (default: call virtual cores) +# config: +# build_jobs: 6 diff --git a/Tools/machines/desktop/spack-ubuntu-cuda.yaml b/Tools/machines/desktop/spack-ubuntu-cuda.yaml new file mode 100644 index 00000000000..12ce071a6df --- /dev/null +++ b/Tools/machines/desktop/spack-ubuntu-cuda.yaml @@ -0,0 +1,101 @@ +# This is a Spack environment file. +# +# This environment can be used to install all dependencies to build the manual +# locally. +# +# Activating and installing this environment will provide all dependencies +# that are needed for full-feature development. +# https://spack.readthedocs.io/en/latest/environments.html +# +# Inside the directory of this file +# spack env create warpx-cuda-dev spack-ubuntu-cuda.yaml +# spack env activate warpx-cuda-dev +# spack install # only needed the first time +# +spack: + specs: + - adios2 + - blaspp + - boost + - ccache + - cmake + - ecp-data-vis-sdk +adios2 +ascent +hdf5 +sensei + - cuda + - fftw + - hdf5 + - lapackpp + - mpi + - pkgconfig + - python + - py-cython + - py-h5py + - py-libensemble +nlopt + - py-mpi4py + - py-numpy + - py-pip + - py-setuptools + - py-wheel +# skipped to save time: 3D post-processing +# - paraview +adios2 +python3 +qt +# skipped to save time, because they are faster installed via pip afterwards +# python3 -m pip install jupyter matplotlib numpy openpmd-api openpmd-viewer pandas scipy yt +# - py-jupyter +# - py-matplotlib +animation +fonts +latex +movies +# - openpmd-api +python +# - py-openpmd-viewer +numba +jupyter +# - py-pandas +# - py-pyqt5 +# - py-scipy +# - py-yt + + packages: + all: + # note: add +cuda cuda_arch=70 + # or respective CUDA capability instead of 70 to variants below + variants: +mpi ~fortran +cuda cuda_arch=70 + # default blocks at HDF5 1.8, resulting in unmergable solution + conduit: + variants: ~hdf5_compat + + compilers: + # Ubuntu + - compiler: + spec: gcc@11.2.0 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + operating_system: ubuntu22.04 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] + - compiler: + spec: gcc@9.4.0 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + operating_system: ubuntu20.04 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] + + # binary caches + mirrors: + E4S: https://cache.e4s.io + LLNL: https://mirror.spack.io + + # do not try to reuse existing packages, which can confuse the concretizer + concretizer: + reuse: false + unify: true + + # limit the build parallelism (default: call virtual cores) +# config: +# build_jobs: 6 diff --git a/Tools/machines/desktop/spack-ubuntu-openmp.yaml b/Tools/machines/desktop/spack-ubuntu-openmp.yaml new file mode 100644 index 00000000000..c66c2f5c4cd --- /dev/null +++ b/Tools/machines/desktop/spack-ubuntu-openmp.yaml @@ -0,0 +1,98 @@ +# This is a Spack environment file. +# +# This environment can be used to install all dependencies to build the manual +# locally. +# +# Activating and installing this environment will provide all dependencies +# that are needed for full-feature development. +# https://spack.readthedocs.io/en/latest/environments.html +# +# Inside the directory of this file +# spack env create warpx-openmp-dev spack-ubuntu-openmp.yaml +# spack env activate warpx-openmp-dev +# spack install # only needed the first time +# +spack: + specs: + - adios2 + - blaspp + - boost + - ccache + - cmake + - ecp-data-vis-sdk +adios2 +ascent +hdf5 +sensei + - fftw + - hdf5 + - lapackpp + - mpi + - pkgconfig + - python + - py-cython + - py-h5py + - py-libensemble +nlopt + - py-mpi4py + - py-numpy + - py-pip + - py-setuptools + - py-wheel +# skipped to save time: 3D post-processing +# - paraview +adios2 +python3 +qt +# skipped to save time, because they are faster installed via pip afterwards +# python3 -m pip install jupyter matplotlib numpy openpmd-api openpmd-viewer pandas scipy yt +# - py-jupyter +# - py-matplotlib +animation +fonts +latex +movies +# - openpmd-api +python +# - py-openpmd-viewer +numba +jupyter +# - py-pandas +# - py-pyqt5 +# - py-scipy +# - py-yt + + packages: + all: + variants: +mpi ~fortran + # default blocks at HDF5 1.8, resulting in unmergable solution + conduit: + variants: ~hdf5_compat + + compilers: + # Ubuntu + - compiler: + spec: gcc@11.2.0 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + operating_system: ubuntu22.04 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] + - compiler: + spec: gcc@9.4.0 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + operating_system: ubuntu20.04 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] + + # binary caches + mirrors: + E4S: https://cache.e4s.io + LLNL: https://mirror.spack.io + + # do not try to reuse existing packages, which can confuse the concretizer + concretizer: + reuse: false + unify: true + + # limit the build parallelism (default: call virtual cores) +# config: +# build_jobs: 6 diff --git a/Tools/machines/desktop/spack-ubuntu-rocm.yaml b/Tools/machines/desktop/spack-ubuntu-rocm.yaml new file mode 100644 index 00000000000..2af0da1ebec --- /dev/null +++ b/Tools/machines/desktop/spack-ubuntu-rocm.yaml @@ -0,0 +1,102 @@ +# This is a Spack environment file. +# +# This environment can be used to install all dependencies to build the manual +# locally. +# +# Activating and installing this environment will provide all dependencies +# that are needed for full-feature development. +# https://spack.readthedocs.io/en/latest/environments.html +# +# Inside the directory of this file +# spack env create warpx-rocm-dev spack-ubuntu-rocm.yaml +# spack env activate warpx-rocm-dev +# spack install # only needed the first time +# +spack: + specs: + - adios2 + - blaspp + - boost + - ccache + - cmake + - ecp-data-vis-sdk +adios2 +ascent +hdf5 +sensei + - hdf5 + - hip + - lapackpp + - llvm-amdgpu + - mpi + - pkgconfig + - python + - py-cython + - py-h5py + - py-libensemble +nlopt + - py-mpi4py + - py-numpy + - py-pip + - py-setuptools + - py-wheel + - rocfft + - rocprim + - rocrand +# skipped to save time: 3D post-processing +# - paraview +adios2 +python3 +qt +# skipped to save time, because they are faster installed via pip afterwards +# python3 -m pip install jupyter matplotlib numpy openpmd-api openpmd-viewer pandas scipy yt +# - py-jupyter +# - py-matplotlib +animation +fonts +latex +movies +# - openpmd-api +python +# - py-openpmd-viewer +numba +jupyter +# - py-pandas +# - py-pyqt5 +# - py-scipy +# - py-yt + + packages: + all: + variants: +mpi ~fortran +rocm + # default blocks at HDF5 1.8, resulting in unmergable solution + conduit: + variants: ~hdf5_compat + + compilers: + # Ubuntu + - compiler: + spec: gcc@11.2.0 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + operating_system: ubuntu22.04 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] + - compiler: + spec: gcc@9.4.0 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + operating_system: ubuntu20.04 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] + + # binary caches + mirrors: + E4S: https://cache.e4s.io + LLNL: https://mirror.spack.io + + # do not try to reuse existing packages, which can confuse the concretizer + concretizer: + reuse: false + unify: true + + # limit the build parallelism (default: call virtual cores) +# config: +# build_jobs: 6 From 15579dbdb1c69babd0b4c5d7216a927443346741 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 22 Aug 2022 16:29:58 -0700 Subject: [PATCH 0023/1346] Performance Hint: Too Many Boxes / GPU (#3269) Print a performance hint if too many boxes are used per GPU. --- Source/Initialization/WarpXInitData.cpp | 37 ++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 90a1c36eb88..d0d9465791d 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1094,27 +1094,56 @@ WarpX::PerformanceHints () for (int ilev = 0; ilev <= finestLevel(); ++ilev) { total_nboxes += boxArray(ilev).size(); } - if (ParallelDescriptor::NProcs() > total_nboxes){ + auto const nprocs = ParallelDescriptor::NProcs(); + + // Check: are there more MPI ranks than Boxes? + if (nprocs > total_nboxes) { std::stringstream warnMsg; warnMsg << "Too many resources / too little work!\n" << " It looks like you requested more compute resources than " << "there are total number of boxes of cells available (" << total_nboxes << "). " - << "You started with (" << ParallelDescriptor::NProcs() - << ") MPI ranks, so (" << ParallelDescriptor::NProcs() - total_nboxes + << "You started with (" << nprocs + << ") MPI ranks, so (" << nprocs - total_nboxes << ") rank(s) will have no work.\n" #ifdef AMREX_USE_GPU << " On GPUs, consider using 1-8 boxes per GPU that together fill " << "each GPU's memory sufficiently. If you do not rely on dynamic " << "load-balancing, then one large box per GPU is ideal.\n" #endif + << "Consider decreasing the amr.blocking_factor and" + << "amr.max_grid_size parameters and/or using less MPI ranks.\n" << " More information:\n" - << " https://warpx.readthedocs.io/en/latest/running_cpp/parallelization.html\n"; + << " https://warpx.readthedocs.io/en/latest/usage/workflows/parallelization.html\n"; ablastr::warn_manager::WMRecordWarning( "Performance", warnMsg.str(), ablastr::warn_manager::WarnPriority::high); } +#ifdef AMREX_USE_GPU + // Check: Are there more than 12 boxes per GPU? + if (total_nboxes > nprocs * 12) { + std::stringstream warnMsg; + warnMsg << "Too many boxes per GPU!\n" + << " It looks like you split your simulation domain " + << "in too many boxes (" << total_nboxes << "), which " + << "results in an average number of (" + << amrex::Long(total_nboxes/nprocs) << ") per GPU. " + << "This causes severe overhead in the communication of " + << "border/guard regions.\n" + << " On GPUs, consider using 1-8 boxes per GPU that together fill " + << "each GPU's memory sufficiently. If you do not rely on dynamic " + << "load-balancing, then one large box per GPU is ideal.\n" + << "Consider increasing the amr.blocking_factor and" + << "amr.max_grid_size parameters and/or using more MPI ranks.\n" + << " More information:\n" + << " https://warpx.readthedocs.io/en/latest/usage/workflows/parallelization.html\n"; + + ablastr::warn_manager::WMRecordWarning( + "Performance", warnMsg.str(), ablastr::warn_manager::WarnPriority::high); + } +#endif + // TODO: warn if some ranks have disproportionally more work than all others // tricky: it can be ok to assign "vacuum" boxes to some ranks w/o slowing down // all other ranks; we need to measure this with our load-balancing From a8f985ac5f43a89bfd359118e2996b3cb21f93cb Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 22 Aug 2022 16:30:44 -0700 Subject: [PATCH 0024/1346] MultiParticleContainer: Prevent Bugs in Some `setVal` Calls (#3266) - Call `setVal` with default arguments (number of components and ghost cells) - Affects only functions used with multi-J time stepping: - `MultiParticleContainer::DepositCurrent` - `MultiParticleContainer::DepositCharge` --- Source/Particles/MultiParticleContainer.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index a0b80877d77..c6cb9032e1d 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -511,9 +511,9 @@ MultiParticleContainer::DepositCurrent ( // Reset the J arrays for (int lev = 0; lev < J.size(); ++lev) { - J[lev][0]->setVal(0.0, J[lev][0]->nGrowVect()); - J[lev][1]->setVal(0.0, J[lev][1]->nGrowVect()); - J[lev][2]->setVal(0.0, J[lev][2]->nGrowVect()); + J[lev][0]->setVal(0.0_rt); + J[lev][1]->setVal(0.0_rt); + J[lev][2]->setVal(0.0_rt); } // Call the deposition kernel for each species @@ -538,7 +538,7 @@ MultiParticleContainer::DepositCharge ( // Reset the rho array for (int lev = 0; lev < rho.size(); ++lev) { - rho[lev]->setVal(0.0, 0, WarpX::ncomps, rho[lev]->nGrowVect()); + rho[lev]->setVal(0.0_rt); } // Push the particles in time, if needed From d595f80ee96aaf218b1be1d899510ef3f246181c Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 22 Aug 2022 16:48:58 -0700 Subject: [PATCH 0025/1346] Spack: Fix Typo in curl (previous commit) --- Docs/source/install/dependencies.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index 95eb832fc43..910ed1d2b35 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -97,7 +97,7 @@ For most desktop development, pick the OpenMP environment for CPUs unless you ha .. code-block:: bash # download environment file - curl -sLo https://raw.githubusercontent.com/ECP-WarpX/WarpX/development/Tools/machines/desktop/spack-${system}-${compute}.sh + curl -sLO https://raw.githubusercontent.com/ECP-WarpX/WarpX/development/Tools/machines/desktop/spack-${system}-${compute}.yaml # create new development environment spack env create warpx-${compute}-dev spack-${system}-${compute}.yaml From a4c75b9bd391b611e7ffe6fd86c9d8bb336d2778 Mon Sep 17 00:00:00 2001 From: thierry <70574092+Thierry992@users.noreply.github.com> Date: Mon, 22 Aug 2022 19:52:25 -0700 Subject: [PATCH 0026/1346] Docs: Improve Nsight Systems Section (#3102) * avoid details bugs with nsight system analysis I added the header of the sbatch file for nsight system analysis, a little description of the useful output files for the analysis and a warning for the execution of the sbatch file. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update Docs/source/developers/profiling.rst Co-authored-by: Axel Huebl * Update Docs/source/developers/profiling.rst Co-authored-by: Axel Huebl * Update profiling.rst * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: remove ill-placed anchor * Fix ill-removed section * Fix Formatting * Update Wording Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Axel Huebl --- Docs/source/developers/profiling.rst | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/Docs/source/developers/profiling.rst b/Docs/source/developers/profiling.rst index c3b9852e2d1..cfc0d2ad4ea 100644 --- a/Docs/source/developers/profiling.rst +++ b/Docs/source/developers/profiling.rst @@ -116,7 +116,8 @@ Nvidia Nsight-Systems Perlmutter Example """""""""""""""""" -Example on how to create traces on a multi-GPU system that uses the Slurm scheduler (e.g., NERSC's Perlmutter system): +Example on how to create traces on a multi-GPU system that uses the Slurm scheduler (e.g., NERSC's Perlmutter system). +You can either run this on an interactive node or use the Slurm batch script header :ref:`documented here `. .. code-block:: bash @@ -151,6 +152,15 @@ Example on how to create traces on a multi-GPU system that uses the Slurm schedu This version does not record all trace information. You need to use the one directly shipped with the NVHPC base system, version 2021.4.1, located in ``/opt/nvidia/hpc_sdk/Linux_x86_64/21.11/compilers/bin/nsys``. +.. note:: + + If everything went well, you will obtain as many output files named ``profiling_.nsys-rep`` as active MPI ranks. + Each MPI rank's performance trace can be analyzed with the Nsight System graphical user interface (GUI). + In WarpX, every MPI rank is associated with one GPU, which each creates one trace file. + +.. warning:: + + The last line of the sbatch file has to match the data of your input files. Summit Example """""""""""""" From 4876446dff60411cbdd4892a044643f5fcc9dd00 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Tue, 23 Aug 2022 17:39:22 -0700 Subject: [PATCH 0027/1346] Use proper units for momenta, in BoundaryScrapingDiagnostic (#3240) * Use proper units for momenta, in BoundaryScrapingDiagnostic * Fix fetch of attribs in particlesConvertUnits Co-authored-by: Dave Grote --- .../Diagnostics/FlushFormats/FlushFormatPlotfile.cpp | 9 +++++---- Source/Diagnostics/ParticleIO.cpp | 12 ------------ Source/Diagnostics/WarpXOpenPMD.cpp | 10 +++++----- Source/Particles/ParticleIO.H | 8 ++++++-- Source/Particles/PhysicalParticleContainer.H | 8 -------- Source/Particles/WarpXParticleContainer.H | 2 -- Source/Particles/WarpXParticleContainer_fwd.H | 2 -- 7 files changed, 16 insertions(+), 35 deletions(-) diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp index 5efc6df8ec5..c85c8cab25d 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp @@ -1,5 +1,6 @@ #include "FlushFormatPlotfile.H" +#include "Particles/ParticleIO.H" #include "Diagnostics/ParticleDiag/ParticleDiag.H" #include "Particles/Filter/FilterFunctors.H" #include "Particles/WarpXParticleContainer.H" @@ -341,8 +342,7 @@ FlushFormatPlotfile::WriteParticles(const std::string& dir, // plot by default int_flags.resize(pc->NumIntComps(), 1); - pc->ConvertUnits(ConvertDirection::WarpX_to_SI); - + const auto mass = pc->AmIA() ? PhysConst::m_e : pc->getMass(); RandomFilter const random_filter(particle_diags[i].m_do_random_filter, particle_diags[i].m_random_fraction); UniformFilter const uniform_filter(particle_diags[i].m_do_uniform_filter, @@ -356,6 +356,7 @@ FlushFormatPlotfile::WriteParticles(const std::string& dir, particle_diags[i].m_diag_domain); if (!isBTD) { + particlesConvertUnits(ConvertDirection::WarpX_to_SI, pc, mass); using SrcData = WarpXParticleContainer::ParticleTileType::ConstParticleTileDataType; tmp.copyParticles(*pc, [=] AMREX_GPU_HOST_DEVICE (const SrcData& src, int ip, const amrex::RandomEngine& engine) @@ -364,9 +365,11 @@ FlushFormatPlotfile::WriteParticles(const std::string& dir, return random_filter(p, engine) * uniform_filter(p, engine) * parser_filter(p, engine) * geometry_filter(p, engine); }, true); + particlesConvertUnits(ConvertDirection::SI_to_WarpX, pc, mass); } else { PinnedMemoryParticleContainer* pinned_pc = particle_diags[i].getPinnedParticleContainer(); tmp.copyParticles(*pinned_pc, true); + particlesConvertUnits(ConvertDirection::WarpX_to_SI, &tmp, mass); } // real_names contains a list of all particle attributes. // real_flags & int_flags are 1 or 0, whether quantity is dumped or not. @@ -374,8 +377,6 @@ FlushFormatPlotfile::WriteParticles(const std::string& dir, dir, particle_diags[i].getSpeciesName(), real_flags, int_flags, real_names, int_names); - - pc->ConvertUnits(ConvertDirection::SI_to_WarpX); } } diff --git a/Source/Diagnostics/ParticleIO.cpp b/Source/Diagnostics/ParticleIO.cpp index b29a951c7df..13a406acd2f 100644 --- a/Source/Diagnostics/ParticleIO.cpp +++ b/Source/Diagnostics/ParticleIO.cpp @@ -234,15 +234,3 @@ MultiParticleContainer::WriteHeader (std::ostream& os) const allcontainers.at(i)->WriteHeader(os); } } - -void -PhysicalParticleContainer::ConvertUnits (ConvertDirection convert_direction) -{ - WARPX_PROFILE("PhysicalParticleContainer::ConvertUnits()"); - - // Account for the special case of photons - const auto t_mass = - this->AmIA() ? PhysConst::m_e : this->getMass(); - - particlesConvertUnits(convert_direction, this, t_mass); -} diff --git a/Source/Diagnostics/WarpXOpenPMD.cpp b/Source/Diagnostics/WarpXOpenPMD.cpp index 318dcf5c78d..1432a982011 100644 --- a/Source/Diagnostics/WarpXOpenPMD.cpp +++ b/Source/Diagnostics/WarpXOpenPMD.cpp @@ -6,6 +6,7 @@ */ #include "WarpXOpenPMD.H" +#include "Particles/ParticleIO.H" #include "Diagnostics/ParticleDiag/ParticleDiag.H" #include "FieldIO.H" #include "Particles/Filter/FilterFunctors.H" @@ -604,8 +605,7 @@ WarpXOpenPMDPlot::WriteOpenPMDParticles (const amrex::Vector& part // plot by default int_flags.resize(tmp.NumIntComps(), 1); - - pc->ConvertUnits(ConvertDirection::WarpX_to_SI); + const auto mass = pc->AmIA() ? PhysConst::m_e : pc->getMass(); RandomFilter const random_filter(particle_diags[i].m_do_random_filter, particle_diags[i].m_random_fraction); UniformFilter const uniform_filter(particle_diags[i].m_do_uniform_filter, @@ -620,7 +620,9 @@ WarpXOpenPMDPlot::WriteOpenPMDParticles (const amrex::Vector& part if (isBTD || use_pinned_pc) { tmp.copyParticles(*pinned_pc, true); + particlesConvertUnits(ConvertDirection::WarpX_to_SI, &tmp, mass); } else { + particlesConvertUnits(ConvertDirection::WarpX_to_SI, pc, mass); using SrcData = WarpXParticleContainer::ParticleTileType::ConstParticleTileDataType; tmp.copyParticles(*pc, [=] AMREX_GPU_HOST_DEVICE (const SrcData& src, int ip, const amrex::RandomEngine& engine) @@ -629,6 +631,7 @@ WarpXOpenPMDPlot::WriteOpenPMDParticles (const amrex::Vector& part return random_filter(p, engine) * uniform_filter(p, engine) * parser_filter(p, engine) * geometry_filter(p, engine); }, true); + particlesConvertUnits(ConvertDirection::SI_to_WarpX, pc, mass); } // real_names contains a list of all real particle attributes. @@ -659,9 +662,6 @@ WarpXOpenPMDPlot::WriteOpenPMDParticles (const amrex::Vector& part ); } } - - // Convert momentum back to WarpX units - pc->ConvertUnits(ConvertDirection::SI_to_WarpX); } } diff --git a/Source/Particles/ParticleIO.H b/Source/Particles/ParticleIO.H index ee8070d9105..50ea72d3f07 100644 --- a/Source/Particles/ParticleIO.H +++ b/Source/Particles/ParticleIO.H @@ -13,6 +13,7 @@ #include #include +enum struct ConvertDirection{WarpX_to_SI, SI_to_WarpX}; /** Convert particle momentum to/from SI * @@ -49,10 +50,13 @@ particlesConvertUnits (ConvertDirection convert_direction, T_ParticleContainer* #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif - for (WarpXParIter pti(*pc, lev); pti.isValid(); ++pti) + for (ParIter pti(*pc, lev); pti.isValid(); ++pti) { // - momenta are stored as a struct of array, in `attribs` - auto& attribs = pti.GetAttribs(); + // The GetStructOfArrays is called directly since the convenience routine GetAttribs + // is only available in WarpXParIter. ParIter is used here since the pc passed in + // will sometimes be a PinnedMemoryParticleContainer (not derived from a WarpXParticleContainer). + auto& attribs = pti.GetStructOfArrays().GetRealData(); ParticleReal* AMREX_RESTRICT ux = attribs[PIdx::ux].dataPtr(); ParticleReal* AMREX_RESTRICT uy = attribs[PIdx::uy].dataPtr(); ParticleReal* AMREX_RESTRICT uz = attribs[PIdx::uz].dataPtr(); diff --git a/Source/Particles/PhysicalParticleContainer.H b/Source/Particles/PhysicalParticleContainer.H index 264c7d9325a..16365810274 100644 --- a/Source/Particles/PhysicalParticleContainer.H +++ b/Source/Particles/PhysicalParticleContainer.H @@ -246,14 +246,6 @@ public: const amrex::Real t_lab, const amrex::Real dt, DiagnosticParticles& diagnostic_particles) final; - /** Convert particle momentum to/from SI - * - * @see particlesConvertUnits - * - * @param convert_dir convert to or from SI - */ - virtual void ConvertUnits (ConvertDirection convert_dir) override; - /** * \brief Apply NCI Godfrey filter to all components of E and B before gather * \param lev MR level diff --git a/Source/Particles/WarpXParticleContainer.H b/Source/Particles/WarpXParticleContainer.H index 7f9e4e866ec..6f0aaf8e827 100644 --- a/Source/Particles/WarpXParticleContainer.H +++ b/Source/Particles/WarpXParticleContainer.H @@ -293,8 +293,6 @@ public: virtual void WriteHeader (std::ostream& os) const = 0; - virtual void ConvertUnits (ConvertDirection /*convert_dir*/){} - static void ReadParameters (); static void BackwardCompatibility (); diff --git a/Source/Particles/WarpXParticleContainer_fwd.H b/Source/Particles/WarpXParticleContainer_fwd.H index e4b47387c28..91957015faa 100644 --- a/Source/Particles/WarpXParticleContainer_fwd.H +++ b/Source/Particles/WarpXParticleContainer_fwd.H @@ -19,8 +19,6 @@ class WarpXParIter; class WarpXParticleContainer; -enum struct ConvertDirection{WarpX_to_SI, SI_to_WarpX}; - struct DiagIdx { enum { From c91c1b2f747f44fd7be6c7ffc8c648851422e440 Mon Sep 17 00:00:00 2001 From: Ryan Sandberg Date: Thu, 25 Aug 2022 15:27:27 -0700 Subject: [PATCH 0028/1346] Order-independent `rho_` with RZ and openPMD (#3338) * order-independent rho_ * add test to check rho_ is order-agnostic * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * make analysis naming more consistent Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../Tests/openpmd_rz/analysis_openpmd_rz.py | 25 ++++++++++++++++++- Regression/WarpX-tests.ini | 2 +- Source/Diagnostics/FullDiagnostics.cpp | 2 +- 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/Examples/Tests/openpmd_rz/analysis_openpmd_rz.py b/Examples/Tests/openpmd_rz/analysis_openpmd_rz.py index 4cd48a2c668..247c4ac61a0 100755 --- a/Examples/Tests/openpmd_rz/analysis_openpmd_rz.py +++ b/Examples/Tests/openpmd_rz/analysis_openpmd_rz.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 +import numpy as np import openpmd_api as io series = io.Series("LaserAccelerationRZ_opmd_plt/openpmd_%T.h5", io.Access.read_only) @@ -8,7 +9,7 @@ ii = series.iterations[20] -assert len(ii.meshes) == 7, 'improper number of meshes' +assert len(ii.meshes) == 8, 'improper number of meshes' # select j_t jt = ii.meshes['j']['t'] @@ -22,3 +23,25 @@ assert ii.meshes['part_per_grid'][io.Mesh_Record_Component.SCALAR].shape == [512,64], 'problem with part_per_grid' assert ii.meshes['rho_electrons'][io.Mesh_Record_Component.SCALAR].shape == [3, 512, 64], 'problem with rho_electrons' + + +### test that openpmd+RZ +### 1. creates rho per species correctly +### 2. orders these appropriately +rhoe_mesh = ii.meshes['rho_electrons'] +rhob_mesh = ii.meshes['rho_beam'] +dz, dr = rhoe_mesh.grid_spacing +zmin, rmin = rhoe_mesh.grid_global_offset + +rhoe = rhoe_mesh[io.Mesh_Record_Component.SCALAR][:] +rhob = rhob_mesh[io.Mesh_Record_Component.SCALAR][:] +series.flush() +nm, nz, nr = rhoe.shape +zlist = zmin + dz * np.arange(nz) +rhoe0 = rhoe[0] # 0 mode +rhob0 = rhob[0] # 0 mode + +electron_meanz = np.sum(np.dot(zlist, rhoe0))/ np.sum(rhoe0) +beam_meanz = np.sum(np.dot(zlist, rhob0))/ np.sum(rhob0) + +assert ((electron_meanz > 0) and (beam_meanz < 0)), 'problem with openPMD+RZ. Maybe openPMD+RZ mixed up the order of rho_ diagnostics?' diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index d2340a01a26..047d5082446 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -1938,7 +1938,7 @@ analysisRoutine = Examples/analysis_default_regression.py [LaserAccelerationRZ_opmd] buildDir = . inputFile = Examples/Physics_applications/laser_acceleration/inputs_rz -runtime_params = diag1.format=openpmd diag1.openpmd_backend=h5 max_step=20 diag1.fields_to_plot=Er Bt Bz jr jt jz rho part_per_cell part_per_grid rho_electrons +runtime_params = diag1.format=openpmd diag1.openpmd_backend=h5 max_step=20 diag1.fields_to_plot=Er Bt Bz jr jt jz rho part_per_cell part_per_grid rho_beam rho_electrons dim = 2 addToCompileString = USE_RZ=TRUE USE_OPENPMD=TRUE cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_OPENPMD=ON diff --git a/Source/Diagnostics/FullDiagnostics.cpp b/Source/Diagnostics/FullDiagnostics.cpp index b9c8269edd6..7bdc2f171c4 100644 --- a/Source/Diagnostics/FullDiagnostics.cpp +++ b/Source/Diagnostics/FullDiagnostics.cpp @@ -262,7 +262,7 @@ FullDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) m_all_field_functors[lev][comp] = std::make_unique(lev, m_crse_ratio, m_rho_per_species_index[i], false, ncomp); if (update_varnames) { - AddRZModesToOutputNames(std::string("rho_") + m_all_species_names[i], ncomp); + AddRZModesToOutputNames(std::string("rho_") + m_all_species_names[m_rho_per_species_index[i]], ncomp); } i++; } else if ( m_varnames_fields[comp] == "F" ){ From 9c78dfee26130045581e8ab8d5f0daa2a9c106d6 Mon Sep 17 00:00:00 2001 From: David Grote Date: Fri, 26 Aug 2022 07:51:26 -0700 Subject: [PATCH 0029/1346] Fixed plasma lens residence correction to allow a short lens (#3318) * Fixed plasma lens residence correction to allow a short lens * Added CI test with a short plasma lens --- Examples/Tests/plasma_lens/analysis.py | 17 ++++-- Examples/Tests/plasma_lens/inputs_short_3d | 55 +++++++++++++++++++ .../benchmarks_json/Plasma_lens_short.json | 21 +++++++ Regression/WarpX-tests.ini | 18 ++++++ Source/Particles/Gather/GetExternalFields.H | 11 ++-- 5 files changed, 111 insertions(+), 11 deletions(-) create mode 100644 Examples/Tests/plasma_lens/inputs_short_3d create mode 100644 Regression/Checksum/benchmarks_json/Plasma_lens_short.json diff --git a/Examples/Tests/plasma_lens/analysis.py b/Examples/Tests/plasma_lens/analysis.py index 6a0af60ad2d..1d2313ba140 100755 --- a/Examples/Tests/plasma_lens/analysis.py +++ b/Examples/Tests/plasma_lens/analysis.py @@ -120,10 +120,19 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): print(f'Error in x velocity is {abs(np.abs((ux - ux_sim)/ux))}, which should be < 0.002') print(f'Error in y velocity is {abs(np.abs((uy - uy_sim)/uy))}, which should be < 0.002') -assert abs(np.abs((xx - xx_sim)/xx)) < 0.02, Exception('error in x particle position') -assert abs(np.abs((yy - yy_sim)/yy)) < 0.02, Exception('error in y particle position') -assert abs(np.abs((ux - ux_sim)/ux)) < 0.002, Exception('error in x particle velocity') -assert abs(np.abs((uy - uy_sim)/uy)) < 0.002, Exception('error in y particle velocity') +if plasma_lens_lengths[0] < 0.01: + # The shorter lens requires a larger tolerance since + # the calculation becomes less accurate + position_tolerance = 0.023 + velocity_tolerance = 0.003 +else: + position_tolerance = 0.02 + velocity_tolerance = 0.002 + +assert abs(np.abs((xx - xx_sim)/xx)) < position_tolerance, Exception('error in x particle position') +assert abs(np.abs((yy - yy_sim)/yy)) < position_tolerance, Exception('error in y particle position') +assert abs(np.abs((ux - ux_sim)/ux)) < velocity_tolerance, Exception('error in x particle velocity') +assert abs(np.abs((uy - uy_sim)/uy)) < velocity_tolerance, Exception('error in y particle velocity') test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/plasma_lens/inputs_short_3d b/Examples/Tests/plasma_lens/inputs_short_3d new file mode 100644 index 00000000000..4beb65ef9b0 --- /dev/null +++ b/Examples/Tests/plasma_lens/inputs_short_3d @@ -0,0 +1,55 @@ +# This case has very short plasma lenses to test the residence +# correction when the particles step over the lenses without +# landing in them. + +# Maximum number of time steps +max_step = 84 + +# number of grid points +amr.n_cell = 16 16 16 + +amr.max_level = 0 + +# Geometry +geometry.dims = 3 +geometry.prob_lo = -1.0 -1.0 0.0 # physical domain +geometry.prob_hi = 1.0 1.0 2.0 + +boundary.field_lo = pec pec pec +boundary.field_hi = pec pec pec +boundary.particle_lo = absorbing absorbing absorbing +boundary.particle_hi = absorbing absorbing absorbing + +# Algorithms +algo.particle_shape = 1 +warpx.cfl = 0.7 + +my_constants.vel_z = 0.5*clight + +# particles +particles.species_names = electrons + +electrons.charge = -q_e +electrons.mass = m_e +electrons.injection_style = "MultipleParticles" +electrons.multiple_particles_pos_x = 0.05 0. +electrons.multiple_particles_pos_y = 0. 0.04 +electrons.multiple_particles_pos_z = 0.05 0.05 +electrons.multiple_particles_vel_x = 0. 0. +electrons.multiple_particles_vel_y = 0. 0. +electrons.multiple_particles_vel_z = vel_z/clight vel_z/clight +electrons.multiple_particles_weight = 1. 1. + +particles.E_ext_particle_init_style = repeated_plasma_lens +particles.B_ext_particle_init_style = repeated_plasma_lens +particles.repeated_plasma_lens_period = 0.5 +particles.repeated_plasma_lens_starts = 0.1 0.11 0.12 0.13 +particles.repeated_plasma_lens_lengths = 0.001 0.0011 0.0012 0.0013 +particles.repeated_plasma_lens_strengths_E = 60000000. 80000000. 60000000. 20000000. +particles.repeated_plasma_lens_strengths_B = 0.0 0.0 0.0 0.0 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 1 #84 +diag1.diag_type = Full +diag1.electrons.variables = ux uy uz diff --git a/Regression/Checksum/benchmarks_json/Plasma_lens_short.json b/Regression/Checksum/benchmarks_json/Plasma_lens_short.json new file mode 100644 index 00000000000..8ee5180baa9 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/Plasma_lens_short.json @@ -0,0 +1,21 @@ +{ + "electrons": { + "particle_momentum_x": 7.536513520138313e-24, + "particle_momentum_y": 6.031954713799037e-24, + "particle_momentum_z": 2.7309245307375703e-22, + "particle_position_x": 0.04004540036766861, + "particle_position_y": 0.0320391685094304, + "particle_position_z": 3.89474872968395 + }, + "lev=0": { + "Bx": 3.448521518552731e-14, + "By": 3.441725193808431e-14, + "Bz": 3.3138806900734516e-16, + "Ex": 4.460232378185175e-06, + "Ey": 4.4889256557770925e-06, + "Ez": 9.270629731132272e-06, + "jx": 4.1993991820092705e-10, + "jy": 3.3589553305457244e-10, + "jz": 1.6492968045128927e-08 + } +} \ No newline at end of file diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 047d5082446..6263db15217 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -3078,6 +3078,24 @@ compareParticles = 1 particleTypes = electrons analysisRoutine = Examples/Tests/plasma_lens/analysis.py +[Plasma_lens_short] +buildDir = . +inputFile = Examples/Tests/plasma_lens/inputs_short_3d +runtime_params = +dim = 3 +addToCompileString = +cmakeSetupOpts = -DWarpX_DIMS=3 +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons +analysisRoutine = Examples/Tests/plasma_lens/analysis.py + [Python_plasma_lens] buildDir = . inputFile = Examples/Tests/plasma_lens/PICMI_inputs_3d.py diff --git a/Source/Particles/Gather/GetExternalFields.H b/Source/Particles/Gather/GetExternalFields.H index c7123c9f5c2..7f39cc36c2a 100644 --- a/Source/Particles/Gather/GetExternalFields.H +++ b/Source/Particles/Gather/GetExternalFields.H @@ -147,14 +147,11 @@ struct GetExternalEBField // Calculate the residence correction // frac will be 1 if the step is completely inside the lens, between 0 and 1 // when entering or leaving the lens, and otherwise 0. + // This accounts for the case when particles step over the element without landing in it. // This assumes that vzp > 0. - amrex::ParticleReal fl = 0.; - if (zl >= lens_start && zl < lens_end) fl = 1.; - amrex::ParticleReal fr = 0.; - if (zr >= lens_start && zr < lens_end) fr = 1.; - amrex::ParticleReal frac = fl; - if (fl > fr) frac = (lens_end - zl)/(zr - zl); - if (fr > fl) frac = (zr - lens_start)/(zr - zl); + amrex::ParticleReal const zl_bounded = std::min(std::max(zl, lens_start), lens_end); + amrex::ParticleReal const zr_bounded = std::min(std::max(zr, lens_start), lens_end); + amrex::ParticleReal const frac = (zr_bounded - zl_bounded)/(zr - zl); // Note that "+=" is used since the fields may have been set above // if a different E or Btype was specified. From 48c1a86047fb06b957474c5a92d15f104c77b039 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 26 Aug 2022 08:31:09 -0700 Subject: [PATCH 0030/1346] Fix Bugs w/ Current Correction and Vay Deposition (#3290) * Fix Bugs w/ Current Correction and Vay Deposition * Vay Deposition and Current Correction Cannot be Combined Together * Add Comment for Future Implementation of Vay Deposition w/ MR * Add Comment for Future Implementation of Vay Deposition w/ MR * Define SyncCurrentAndRho, Clean Up * Vay Deposition: Remove Extra FFT of Rho * Fix Bug in RZ Geometry (Double Filtering) * Add 2D Galilean Test w/o Periodic Single Box * Add RZ Galilean Test w/o Periodic Single Box * Add 3D Galilean Test w/o Periodic Single Box --- Examples/Tests/galilean/analysis.py | 19 ++- .../galilean_2d_psatd_current_correction.json | 30 ++-- ...ilean_2d_psatd_current_correction_psb.json | 31 ++++ .../galilean_3d_psatd_current_correction.json | 46 +++--- ...ilean_3d_psatd_current_correction_psb.json | 33 +++++ .../galilean_rz_psatd_current_correction.json | 42 +++--- ...ilean_rz_psatd_current_correction_psb.json | 30 ++++ Regression/WarpX-tests.ini | 60 +++++++- Source/Evolve/WarpXEvolve.cpp | 56 ++++++-- Source/FieldSolver/WarpXPushFieldsEM.cpp | 133 ++++++++++++++---- Source/WarpX.H | 16 ++- Source/WarpX.cpp | 18 ++- 12 files changed, 402 insertions(+), 112 deletions(-) create mode 100644 Regression/Checksum/benchmarks_json/galilean_2d_psatd_current_correction_psb.json create mode 100644 Regression/Checksum/benchmarks_json/galilean_3d_psatd_current_correction_psb.json create mode 100644 Regression/Checksum/benchmarks_json/galilean_rz_psatd_current_correction_psb.json diff --git a/Examples/Tests/galilean/analysis.py b/Examples/Tests/galilean/analysis.py index 898ac1435d7..9fe6bab729d 100755 --- a/Examples/Tests/galilean/analysis.py +++ b/Examples/Tests/galilean/analysis.py @@ -28,6 +28,7 @@ # Parse some input arguments from output file 'warpx_used_inputs' current_correction = False time_averaging = False +periodic_single_box = False warpx_used_inputs = open('./warpx_used_inputs', 'r').read() if re.search('geometry.dims\s*=\s*2', warpx_used_inputs): dims = '2D' @@ -39,6 +40,8 @@ current_correction = True if re.search('psatd.do_time_averaging\s*=\s*1', warpx_used_inputs): time_averaging = True +if re.search('psatd.periodic_single_box_fft\s*=\s*1', warpx_used_inputs): + periodic_single_box = True ds = yt.load(filename) @@ -58,21 +61,31 @@ if dims == '2D': if not current_correction: energy_ref = 35657.41657683263 - if current_correction: + if current_correction and periodic_single_box: energy_ref = 35024.0275199999 + if current_correction and not periodic_single_box: + energy_ref = 35675.25563324745 + tol_energy = 2e-8 + tol_charge = 2e-4 if time_averaging: energy_ref = 26208.04843478073 tol_energy = 1e-6 elif dims == 'RZ': if not current_correction: energy_ref = 191002.6526271543 - if current_correction: + if current_correction and periodic_single_box: energy_ref = 472779.70801323955 + if current_correction and not periodic_single_box: + energy_ref = 511671.4108624746 + tol_charge = 2e-4 elif dims == '3D': if not current_correction: energy_ref = 661285.098907683 - if current_correction: + if current_correction and periodic_single_box: energy_ref = 856783.3007547935 + if current_correction and not periodic_single_box: + energy_ref = 875307.5138913819 + tol_charge = 1e-2 if time_averaging: energy_ref = 14.564631643496 tol_energy = 1e-4 diff --git a/Regression/Checksum/benchmarks_json/galilean_2d_psatd_current_correction.json b/Regression/Checksum/benchmarks_json/galilean_2d_psatd_current_correction.json index ce7e37dd1d4..8d43e0b512a 100644 --- a/Regression/Checksum/benchmarks_json/galilean_2d_psatd_current_correction.json +++ b/Regression/Checksum/benchmarks_json/galilean_2d_psatd_current_correction.json @@ -1,31 +1,31 @@ { "electrons": { - "particle_momentum_x": 1.5446768224262931e-21, + "particle_momentum_x": 1.5536145943675821e-21, "particle_momentum_y": 0.0, - "particle_momentum_z": 1.7807675479103523e-16, - "particle_position_x": 1267465.6959779875, - "particle_position_y": 15724303.998550693, + "particle_momentum_z": 1.7807674983977113e-16, + "particle_position_x": 1267465.7449785147, + "particle_position_y": 15724304.003312223, "particle_weight": 1.6888332018290936e+18 }, "ions": { - "particle_momentum_x": 2.609389259207186e-18, + "particle_momentum_x": 2.6369229634007033e-18, "particle_momentum_y": 0.0, - "particle_momentum_z": 3.269760999127174e-13, - "particle_position_x": 1267465.6476913819, - "particle_position_y": 15724303.99882355, + "particle_momentum_z": 3.2697610879105043e-13, + "particle_position_x": 1267465.8546859026, + "particle_position_y": 15724304.003084136, "particle_weight": 1.6888332018290936e+18 }, "lev=0": { "Bx": 0.0, - "By": 0.002955847949540036, + "By": 0.0030245915907163154, "Bz": 0.0, - "Ex": 893534.7419589404, + "Ex": 914579.058236283, "Ey": 0.0, - "Ez": 65657.88089087335, - "divE": 1689738.9457807848, - "jx": 219.89616041000212, + "Ez": 69297.0731582175, + "divE": 1695716.5213981706, + "jx": 211.85815442883973, "jy": 0.0, - "jz": 4459.160944658457, - "rho": 1.4961265980553603e-05 + "jz": 4474.2194399497175, + "rho": 1.5014190692786408e-05 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/galilean_2d_psatd_current_correction_psb.json b/Regression/Checksum/benchmarks_json/galilean_2d_psatd_current_correction_psb.json new file mode 100644 index 00000000000..ce7e37dd1d4 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/galilean_2d_psatd_current_correction_psb.json @@ -0,0 +1,31 @@ +{ + "electrons": { + "particle_momentum_x": 1.5446768224262931e-21, + "particle_momentum_y": 0.0, + "particle_momentum_z": 1.7807675479103523e-16, + "particle_position_x": 1267465.6959779875, + "particle_position_y": 15724303.998550693, + "particle_weight": 1.6888332018290936e+18 + }, + "ions": { + "particle_momentum_x": 2.609389259207186e-18, + "particle_momentum_y": 0.0, + "particle_momentum_z": 3.269760999127174e-13, + "particle_position_x": 1267465.6476913819, + "particle_position_y": 15724303.99882355, + "particle_weight": 1.6888332018290936e+18 + }, + "lev=0": { + "Bx": 0.0, + "By": 0.002955847949540036, + "Bz": 0.0, + "Ex": 893534.7419589404, + "Ey": 0.0, + "Ez": 65657.88089087335, + "divE": 1689738.9457807848, + "jx": 219.89616041000212, + "jy": 0.0, + "jz": 4459.160944658457, + "rho": 1.4961265980553603e-05 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/galilean_3d_psatd_current_correction.json b/Regression/Checksum/benchmarks_json/galilean_3d_psatd_current_correction.json index 47f4e9c2f23..16814f47b07 100644 --- a/Regression/Checksum/benchmarks_json/galilean_3d_psatd_current_correction.json +++ b/Regression/Checksum/benchmarks_json/galilean_3d_psatd_current_correction.json @@ -1,33 +1,33 @@ { "electrons": { - "particle_momentum_x": 7.768538631989314e-22, - "particle_momentum_y": 7.844994340141792e-22, - "particle_momentum_z": 8.903837510927089e-17, - "particle_position_x": 158433.32170594894, - "particle_position_y": 158432.8515695265, - "particle_position_z": 5891662.9548929185, + "particle_momentum_x": 7.799828234013091e-22, + "particle_momentum_y": 7.853620523635122e-22, + "particle_momentum_z": 8.903838009194013e-17, + "particle_position_x": 158433.52653925028, + "particle_position_y": 158432.74997779692, + "particle_position_z": 5891662.962223673, "particle_weight": 2.041377132710917e+18 }, "ions": { - "particle_momentum_x": 1.3137653484757431e-18, - "particle_momentum_y": 1.3110225003256574e-18, - "particle_momentum_z": 1.6348803844352492e-13, - "particle_position_x": 158433.312978349, - "particle_position_y": 158432.84896819026, - "particle_position_z": 5891662.955099393, + "particle_momentum_x": 1.3150842145882957e-18, + "particle_momentum_y": 1.3043330137743231e-18, + "particle_momentum_z": 1.634880568866417e-13, + "particle_position_x": 158433.58054870443, + "particle_position_y": 158432.80156002127, + "particle_position_z": 5891662.961766562, "particle_weight": 2.041377132710917e+18 }, "lev=0": { - "Bx": 0.006449275564525033, - "By": 0.0064783778061885235, - "Bz": 0.0006158841538190647, - "Ex": 1950801.24799352, - "Ey": 1945623.9590479229, - "Ez": 150385.8857169562, - "divE": 6191274.556649665, - "jx": 505.8903778073368, - "jy": 510.11178001328403, - "jz": 16346.473505698516, - "rho": 5.481870772518483e-05 + "Bx": 0.0068073634628550315, + "By": 0.006697549542915319, + "Bz": 0.0005743262065396818, + "Ex": 2018478.8211576312, + "Ey": 2052642.246798101, + "Ez": 149912.8262920749, + "divE": 6615408.753505244, + "jx": 490.81504811208214, + "jy": 497.1861446484984, + "jz": 17469.798807376228, + "rho": 5.862037378889882e-05 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/galilean_3d_psatd_current_correction_psb.json b/Regression/Checksum/benchmarks_json/galilean_3d_psatd_current_correction_psb.json new file mode 100644 index 00000000000..47f4e9c2f23 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/galilean_3d_psatd_current_correction_psb.json @@ -0,0 +1,33 @@ +{ + "electrons": { + "particle_momentum_x": 7.768538631989314e-22, + "particle_momentum_y": 7.844994340141792e-22, + "particle_momentum_z": 8.903837510927089e-17, + "particle_position_x": 158433.32170594894, + "particle_position_y": 158432.8515695265, + "particle_position_z": 5891662.9548929185, + "particle_weight": 2.041377132710917e+18 + }, + "ions": { + "particle_momentum_x": 1.3137653484757431e-18, + "particle_momentum_y": 1.3110225003256574e-18, + "particle_momentum_z": 1.6348803844352492e-13, + "particle_position_x": 158433.312978349, + "particle_position_y": 158432.84896819026, + "particle_position_z": 5891662.955099393, + "particle_weight": 2.041377132710917e+18 + }, + "lev=0": { + "Bx": 0.006449275564525033, + "By": 0.0064783778061885235, + "Bz": 0.0006158841538190647, + "Ex": 1950801.24799352, + "Ey": 1945623.9590479229, + "Ez": 150385.8857169562, + "divE": 6191274.556649665, + "jx": 505.8903778073368, + "jy": 510.11178001328403, + "jz": 16346.473505698516, + "rho": 5.481870772518483e-05 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/galilean_rz_psatd_current_correction.json b/Regression/Checksum/benchmarks_json/galilean_rz_psatd_current_correction.json index 73cd096357e..3dd8810faa9 100644 --- a/Regression/Checksum/benchmarks_json/galilean_rz_psatd_current_correction.json +++ b/Regression/Checksum/benchmarks_json/galilean_rz_psatd_current_correction.json @@ -1,30 +1,30 @@ { "electrons": { - "particle_momentum_x": 7.005566900701696e-22, - "particle_momentum_y": 2.741432867448382e-22, - "particle_momentum_z": 8.903838637330695e-17, - "particle_position_x": 633733.8772916717, - "particle_position_y": 7862151.998406768, - "particle_theta": 51362.087114626614, + "particle_momentum_x": 7.040321706649852e-22, + "particle_momentum_y": 2.6350019627856963e-22, + "particle_momentum_z": 8.903841205118235e-17, + "particle_position_x": 633733.2382222114, + "particle_position_y": 7862152.022952429, + "particle_theta": 51150.20809917081, "particle_weight": 1.0261080645329302e+20 }, "ions": { - "particle_momentum_x": 1.3125873166811053e-18, - "particle_momentum_y": 2.7371989851752584e-22, - "particle_momentum_z": 1.6348804524259806e-13, - "particle_position_x": 633733.7424153683, - "particle_position_y": 7862151.997142274, - "particle_theta": 51470.93289837983, + "particle_momentum_x": 1.309068869366174e-18, + "particle_momentum_y": 2.6235118222356472e-22, + "particle_momentum_z": 1.6348804184310653e-13, + "particle_position_x": 633733.1816905431, + "particle_position_y": 7862151.995500945, + "particle_theta": 51448.117157711284, "particle_weight": 1.0261080645329302e+20 }, "lev=0": { - "By": 0.0017275299193154866, - "Ex": 520734.8193541992, - "Ey": 130008.65587120061, - "Ez": 36540.80980315481, - "divE": 1228895.3465882093, - "jx": 103.9586429924576, - "jz": 3245.258754763504, - "rho": 1.088087020096811e-05 + "By": 0.0017182252289440264, + "Ex": 518347.23478549306, + "Ey": 137106.07262027927, + "Ez": 35918.446653926556, + "divE": 1227626.4690642208, + "jx": 159.1245447865701, + "jz": 49252.95943703725, + "rho": 1.0869618748586227e-05 } -} +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/galilean_rz_psatd_current_correction_psb.json b/Regression/Checksum/benchmarks_json/galilean_rz_psatd_current_correction_psb.json new file mode 100644 index 00000000000..73cd096357e --- /dev/null +++ b/Regression/Checksum/benchmarks_json/galilean_rz_psatd_current_correction_psb.json @@ -0,0 +1,30 @@ +{ + "electrons": { + "particle_momentum_x": 7.005566900701696e-22, + "particle_momentum_y": 2.741432867448382e-22, + "particle_momentum_z": 8.903838637330695e-17, + "particle_position_x": 633733.8772916717, + "particle_position_y": 7862151.998406768, + "particle_theta": 51362.087114626614, + "particle_weight": 1.0261080645329302e+20 + }, + "ions": { + "particle_momentum_x": 1.3125873166811053e-18, + "particle_momentum_y": 2.7371989851752584e-22, + "particle_momentum_z": 1.6348804524259806e-13, + "particle_position_x": 633733.7424153683, + "particle_position_y": 7862151.997142274, + "particle_theta": 51470.93289837983, + "particle_weight": 1.0261080645329302e+20 + }, + "lev=0": { + "By": 0.0017275299193154866, + "Ex": 520734.8193541992, + "Ey": 130008.65587120061, + "Ez": 36540.80980315481, + "divE": 1228895.3465882093, + "jx": 103.9586429924576, + "jz": 3245.258754763504, + "rho": 1.088087020096811e-05 + } +} diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 6263db15217..8d611da8003 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -2372,7 +2372,7 @@ compareParticles = 1 particleTypes = electrons ions analysisRoutine = Examples/Tests/galilean/analysis.py -[galilean_2d_psatd_current_correction] +[galilean_2d_psatd_current_correction_psb] buildDir = . inputFile = Examples/Tests/galilean/inputs_2d runtime_params = psatd.periodic_single_box_fft=1 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE @@ -2390,6 +2390,24 @@ compareParticles = 1 particleTypes = electrons ions analysisRoutine = Examples/Tests/galilean/analysis.py +[galilean_2d_psatd_current_correction] +buildDir = . +inputFile = Examples/Tests/galilean/inputs_2d +runtime_params = psatd.periodic_single_box_fft=0 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE amr.max_grid_size=64 amr.blocking_factor=64 +dim = 2 +addToCompileString = USE_PSATD=TRUE +cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons ions +analysisRoutine = Examples/Tests/galilean/analysis.py + [galilean_2d_psatd_hybrid] buildDir = . inputFile = Examples/Tests/galilean/inputs_2d_hybrid @@ -2444,7 +2462,7 @@ compareParticles = 1 particleTypes = electrons ions analysisRoutine = Examples/Tests/galilean/analysis.py -[galilean_rz_psatd_current_correction] +[galilean_rz_psatd_current_correction_psb] buildDir = . inputFile = Examples/Tests/galilean/inputs_rz runtime_params = psatd.periodic_single_box_fft=1 psatd.current_correction=1 electrons.random_theta=0 ions.random_theta=0 @@ -2462,6 +2480,24 @@ compareParticles = 1 particleTypes = electrons ions analysisRoutine = Examples/Tests/galilean/analysis.py +[galilean_rz_psatd_current_correction] +buildDir = . +inputFile = Examples/Tests/galilean/inputs_rz +runtime_params = psatd.periodic_single_box_fft=0 psatd.current_correction=1 electrons.random_theta=0 ions.random_theta=0 amr.max_grid_size=32 amr.blocking_factor=32 +dim = 2 +addToCompileString = USE_RZ=TRUE USE_PSATD=TRUE BLAS_LIB=-lblas LAPACK_LIB=-llapack +cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_PSATD=ON +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons ions +analysisRoutine = Examples/Tests/galilean/analysis.py + [galilean_3d_psatd] buildDir = . inputFile = Examples/Tests/galilean/inputs_3d @@ -2480,7 +2516,7 @@ compareParticles = 1 particleTypes = electrons ions analysisRoutine = Examples/Tests/galilean/analysis.py -[galilean_3d_psatd_current_correction] +[galilean_3d_psatd_current_correction_psb] buildDir = . inputFile = Examples/Tests/galilean/inputs_3d runtime_params = warpx.numprocs=1 1 1 psatd.periodic_single_box_fft=1 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE @@ -2498,6 +2534,24 @@ compareParticles = 1 particleTypes = electrons ions analysisRoutine = Examples/Tests/galilean/analysis.py +[galilean_3d_psatd_current_correction] +buildDir = . +inputFile = Examples/Tests/galilean/inputs_3d +runtime_params = warpx.numprocs=1 1 2 psatd.periodic_single_box_fft=0 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE +dim = 3 +addToCompileString = USE_PSATD=TRUE +cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons ions +analysisRoutine = Examples/Tests/galilean/analysis.py + [averaged_galilean_2d_psatd] buildDir = . inputFile = Examples/Tests/averaged_galilean/inputs_avg_2d diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index b7a8c64f11c..cc71e78d5c3 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -398,19 +398,9 @@ WarpX::OneStep_nosub (Real cur_time) ExecutePythonCallback("afterdeposition"); - // Synchronize J and rho: filter, exchange boundary, interpolate across levels. - // With Vay current deposition, the current deposited at this point is not yet - // the actual current J. This is computed later in WarpX::PushPSATD, by calling - // WarpX::PSATDVayDeposition. The function SyncCurrent is called after that, - // instead of here, so that we synchronize the correct current. - // With current centering, the nodal current is deposited in 'current_fp_nodal': - // SyncCurrent stores the result of its centering into 'current_fp' and then - // performs both filtering, if used, and exchange of guard cells. - if (WarpX::current_deposition_algo != CurrentDepositionAlgo::Vay) - { - SyncCurrent(current_fp, current_cp); - } - SyncRho(); + // Synchronize J and rho: + // filter (if used), exchange guard cells, interpolate across MR levels + SyncCurrentAndRho(); // At this point, J is up-to-date inside the domain, and E and B are // up-to-date including enough guard cells for first step of the field @@ -495,6 +485,46 @@ WarpX::OneStep_nosub (Real cur_time) ExecutePythonCallback("afterEsolve"); } +void WarpX::SyncCurrentAndRho () +{ + if (maxwell_solver_id == MaxwellSolverAlgo::PSATD) + { + if (fft_periodic_single_box) + { + // With periodic single box, synchronize J and rho here, + // even with current correction or Vay deposition + if (current_deposition_algo == CurrentDepositionAlgo::Vay) + { + // TODO Replace current_cp with current_cp_vay once Vay deposition is implemented with MR + SyncCurrent(current_fp_vay, current_cp); + SyncRho(); + } + else + { + SyncCurrent(current_fp, current_cp); + SyncRho(); + } + } + else // no periodic single box + { + // Without periodic single box, synchronize J and rho here, + // except with current correction or Vay deposition: + // in these cases, synchronize later (in WarpX::PushPSATD) + if (current_correction == false && + current_deposition_algo != CurrentDepositionAlgo::Vay) + { + SyncCurrent(current_fp, current_cp); + SyncRho(); + } + } + } + else // FDTD + { + SyncCurrent(current_fp, current_cp); + SyncRho(); + } +} + void WarpX::OneStep_multiJ (const amrex::Real cur_time) { diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index 6d604bc1564..7499ee140a2 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -270,7 +270,8 @@ WarpX::PSATDBackwardTransformG () void WarpX::PSATDForwardTransformJ ( const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp) + const amrex::Vector,3>>& J_cp, + const bool apply_kspace_filter) { SpectralFieldIndex Idx; int idx_jx, idx_jy, idx_jz; @@ -299,7 +300,7 @@ void WarpX::PSATDForwardTransformJ ( #ifdef WARPX_DIM_RZ // Apply filter in k space if needed - if (WarpX::use_kspace_filter) + if (use_kspace_filter && apply_kspace_filter) { for (int lev = 0; lev <= finest_level; ++lev) { @@ -311,6 +312,8 @@ void WarpX::PSATDForwardTransformJ ( } } } +#else + amrex::ignore_unused(apply_kspace_filter); #endif } @@ -349,8 +352,10 @@ void WarpX::PSATDBackwardTransformJ ( void WarpX::PSATDForwardTransformRho ( const amrex::Vector>& charge_fp, const amrex::Vector>& charge_cp, - const int icomp, const int dcomp) + const int icomp, const int dcomp, const bool apply_kspace_filter) { + if (charge_fp[0] == nullptr) return; + const SpectralFieldIndex& Idx = spectral_solver_fp[0]->m_spectral_index; // Select index in k space @@ -368,7 +373,7 @@ void WarpX::PSATDForwardTransformRho ( #ifdef WARPX_DIM_RZ // Apply filter in k space if needed - if (WarpX::use_kspace_filter) + if (use_kspace_filter && apply_kspace_filter) { for (int lev = 0; lev <= finest_level; ++lev) { @@ -380,6 +385,8 @@ void WarpX::PSATDForwardTransformRho ( } } } +#else + amrex::ignore_unused(apply_kspace_filter); #endif } @@ -635,46 +642,114 @@ WarpX::PushPSATD () "PushFieldsEM: PSATD solver selected but not built")); #else - PSATDForwardTransformEB(Efield_fp, Bfield_fp, Efield_cp, Bfield_cp); - - amrex::Vector,3>>& J_fp = - (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) ? current_fp_vay : current_fp; + if (fft_periodic_single_box) + { + if (current_correction) + { + // FFT of J and rho + PSATDForwardTransformJ(current_fp, current_cp); + PSATDForwardTransformRho(rho_fp, rho_cp, 0, 0); // rho old + PSATDForwardTransformRho(rho_fp, rho_cp, 1, 1); // rho new - PSATDForwardTransformJ(J_fp, current_cp); + // Correct J in k-space + PSATDCurrentCorrection(); - // Do rho FFTs only if needed - if (WarpX::update_with_rho || WarpX::current_correction || WarpX::do_dive_cleaning) - { - PSATDForwardTransformRho(rho_fp, rho_cp, 0,0); // rho old - PSATDForwardTransformRho(rho_fp, rho_cp, 1,1); // rho new + // Inverse FFT of J + PSATDBackwardTransformJ(current_fp, current_cp); + } + else if (current_deposition_algo == CurrentDepositionAlgo::Vay) + { + // FFT of D and rho (if used) + // TODO Replace current_cp with current_cp_vay once Vay deposition is implemented with MR + PSATDForwardTransformJ(current_fp_vay, current_cp); + PSATDForwardTransformRho(rho_fp, rho_cp, 0, 0); // rho old + PSATDForwardTransformRho(rho_fp, rho_cp, 1, 1); // rho new + + // Compute J from D in k-space + PSATDVayDeposition(); + + // Inverse FFT of J, subtract cumulative sums of D + PSATDBackwardTransformJ(current_fp, current_cp); + // TODO Cumulative sums need to be fixed with periodic single box + PSATDSubtractCurrentPartialSumsAvg(); + + // FFT of J after subtraction of cumulative sums + PSATDForwardTransformJ(current_fp, current_cp); + } + else // no current correction, no Vay deposition + { + // FFT of J and rho (if used) + PSATDForwardTransformJ(current_fp, current_cp); + PSATDForwardTransformRho(rho_fp, rho_cp, 0, 0); // rho old + PSATDForwardTransformRho(rho_fp, rho_cp, 1, 1); // rho new + } } - - // Correct the current in Fourier space so that the continuity equation is satisfied, and - // transform back to real space so that the current correction is reflected in the diagnostics - if (WarpX::current_correction) + else // no periodic single box { - PSATDCurrentCorrection(); - PSATDBackwardTransformJ(current_fp, current_cp); - } + if (current_correction) + { + // FFT of J and rho +#ifdef WARPX_DIM_RZ + // In RZ geometry, do not apply filtering here, since it is + // applied in the subsequent calls to these functions (below) + const bool apply_kspace_filter = false; + PSATDForwardTransformJ(current_fp, current_cp, apply_kspace_filter); + PSATDForwardTransformRho(rho_fp, rho_cp, 0, 0, apply_kspace_filter); // rho old + PSATDForwardTransformRho(rho_fp, rho_cp, 1, 1, apply_kspace_filter); // rho new +#else + PSATDForwardTransformJ(current_fp, current_cp); + PSATDForwardTransformRho(rho_fp, rho_cp, 0, 0); // rho old + PSATDForwardTransformRho(rho_fp, rho_cp, 1, 1); // rho new +#endif - // Compute the current in Fourier space according to the Vay deposition scheme, and - // transform back to real space so that the Vay deposition is reflected in the diagnostics - if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) - { - PSATDVayDeposition(); - PSATDBackwardTransformJ(current_fp, current_cp); - PSATDSubtractCurrentPartialSumsAvg(); - SyncCurrent(current_fp, current_cp); + // Correct J in k-space + PSATDCurrentCorrection(); + + // Inverse FFT of J + PSATDBackwardTransformJ(current_fp, current_cp); + + // Synchronize J and rho + SyncCurrent(current_fp, current_cp); + SyncRho(); + } + else if (current_deposition_algo == CurrentDepositionAlgo::Vay) + { + // FFT of D + PSATDForwardTransformJ(current_fp_vay, current_cp); + + // Compute J from D in k-space + PSATDVayDeposition(); + + // Inverse FFT of J, subtract cumulative sums of D + PSATDBackwardTransformJ(current_fp, current_cp); + PSATDSubtractCurrentPartialSumsAvg(); + + // Synchronize J and rho (if used) + SyncCurrent(current_fp, current_cp); + SyncRho(); + } + + // FFT of J and rho (if used) PSATDForwardTransformJ(current_fp, current_cp); + PSATDForwardTransformRho(rho_fp, rho_cp, 0, 0); // rho old + PSATDForwardTransformRho(rho_fp, rho_cp, 1, 1); // rho new } + // FFT of E and B + PSATDForwardTransformEB(Efield_fp, Bfield_fp, Efield_cp, Bfield_cp); + #ifdef WARPX_DIM_RZ if (pml_rz[0]) pml_rz[0]->PushPSATD(0); #endif + // FFT of F and G if (WarpX::do_dive_cleaning) PSATDForwardTransformF(); if (WarpX::do_divb_cleaning) PSATDForwardTransformG(); + + // Update E, B, F, and G in k-space PSATDPushSpectralFields(); + + // Inverse FFT of E, B, F, and G PSATDBackwardTransformEB(Efield_fp, Bfield_fp, Efield_cp, Bfield_cp); if (WarpX::fft_do_time_averaging) PSATDBackwardTransformEBavg(Efield_avg_fp, Bfield_avg_fp, Efield_avg_cp, Bfield_avg_cp); diff --git a/Source/WarpX.H b/Source/WarpX.H index c16f30408d0..7a051e30db8 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -646,6 +646,13 @@ public: void FillBoundaryG (int lev, amrex::IntVect ng, const bool nodal_sync = false); void FillBoundaryAux (int lev, amrex::IntVect ng); + /** + * \brief Synchronize J and rho: + * filter (if used), exchange guard cells, interpolate across MR levels. + * Contains separate calls to WarpX::SyncCurrent and WarpX::SyncRho. + */ + void SyncCurrentAndRho (); + /** * \brief Apply filter and sum guard cells across MR levels. * If current centering is used, center the current from a nodal grid @@ -1506,10 +1513,13 @@ private: * storing the fine patch current to be transformed * \param J_cp Vector of three-dimensional arrays (for each level) * storing the coarse patch current to be transformed + * \param[in] apply_kspace_filter Control whether to apply filtering + * (only used in RZ geometry to avoid double filtering) */ void PSATDForwardTransformJ ( const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp); + const amrex::Vector,3>>& J_cp, + const bool apply_kspace_filter=true); /** * \brief Backward FFT of J on all mesh refinement levels @@ -1531,11 +1541,13 @@ private: * \param charge_cp Vector (for each level) storing the coarse patch charge to be transformed * \param[in] icomp index of fourth component (0 for rho_old, 1 for rho_new) * \param[in] dcomp index of spectral component (0 for rho_old, 1 for rho_new) + * \param[in] apply_kspace_filter Control whether to apply filtering + * (only used in RZ geometry to avoid double filtering) */ void PSATDForwardTransformRho ( const amrex::Vector>& charge_fp, const amrex::Vector>& charge_cp, - const int icomp, const int dcomp); + const int icomp, const int dcomp, const bool apply_kspace_filter=true); /** * \brief Copy rho_new to rho_old in spectral space diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 12747b7b90b..86961dcaf31 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -1194,6 +1194,13 @@ WarpX::ReadParameters () "Option algo.current_deposition=vay must be used with psatd.periodic_single_box_fft=0."); } + if (current_deposition_algo == CurrentDepositionAlgo::Vay) + { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + current_correction == false, + "Options algo.current_deposition=vay and psatd.current_correction=1 cannot be combined together."); + } + // Auxiliary: boosted_frame = true if warpx.gamma_boost is set in the inputs amrex::ParmParse pp_warpx("warpx"); const bool boosted_frame = pp_warpx.query("gamma_boost", gamma_boost); @@ -1328,10 +1335,15 @@ WarpX::ReadParameters () } } - // Fill guard cells with backward FFTs if Vay current deposition is used - if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) + // Without periodic single box, fill guard cells with backward FFTs, + // with current correction or Vay deposition + if (fft_periodic_single_box == false) { - WarpX::m_fill_guards_current = amrex::IntVect(1); + if (current_correction || + current_deposition_algo == CurrentDepositionAlgo::Vay) + { + WarpX::m_fill_guards_current = amrex::IntVect(1); + } } } From 052fde57cdf88bae83e5f5943eaeb845a73bbc35 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Sun, 28 Aug 2022 23:28:07 -0700 Subject: [PATCH 0031/1346] Docs: Add description of Python APIs in `libwarpx` (#3310) * add mention of callbacks and some libwarpx functions to the docs * reformatted various function docstrings in `_libwarpx.py` and added them to the docs * Add subsection Co-authored-by: Axel Huebl --- Docs/source/usage/python.rst | 96 +++++++++++++ Python/pywarpx/_libwarpx.py | 263 ++++++++++++++++++++++------------- Python/pywarpx/callbacks.py | 4 +- 3 files changed, 266 insertions(+), 97 deletions(-) diff --git a/Docs/source/usage/python.rst b/Docs/source/usage/python.rst index 3e65e545603..4052dc0896f 100644 --- a/Docs/source/usage/python.rst +++ b/Docs/source/usage/python.rst @@ -145,6 +145,102 @@ The input file should have the line ``sim.step()`` which runs the simulation. where ```` is the number of MPI ranks used, and ```` is the name of the script. + +Extending a Simulation from Python +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +When running WarpX directly from Python it is possible to interact with the simulation +by installing ``CallbackFunctions``, which will execute a given Python function at a +specific location in the WarpX simulation loop. + +.. autoclass:: pywarpx.callbacks.CallbackFunctions + +Places in the WarpX loop where callbacks are available include: +``afterinit``, ``beforecollisions``, ``aftercollisions``, ``beforeEsolve``, ``afterEsolve``, +``beforedeposition``, ``afterdeposition``, ``beforestep``, ``afterstep``, ``afterdiagnostics``, +``afterrestart`` and ``oncheckpointsignal``. +See the examples in *Examples/Tests/ParticleDataPython* for references on how to use +``callbacks``. + +There are several "hooks" available via the ``libwarpx`` shared library to access and manipulate +simulation objects (particles, fields and memory buffers) as well as general properties +(such as processor number). These "hooks" are accessible through the `Simulation.extension` object. + +.. autofunction:: pywarpx.picmi.Simulation.extension.getNProcs + +.. autofunction:: pywarpx.picmi.Simulation.extension.getMyProc + +.. autofunction:: pywarpx.picmi.Simulation.extension.get_nattr + +.. autofunction:: pywarpx.picmi.Simulation.extension.get_nattr_species + +.. autofunction:: pywarpx.picmi.Simulation.extension.getistep + +.. autofunction:: pywarpx.picmi.Simulation.extension.gett_new + +.. autofunction:: pywarpx.picmi.Simulation.extension.evolve + +.. autofunction:: pywarpx.picmi.Simulation.extension.finalize + +.. autofunction:: pywarpx.picmi.Simulation.extension.getistep + +.. autofunction:: pywarpx.picmi.Simulation.extension.gett_new + +.. autofunction:: pywarpx.picmi.Simulation.extension.evolve + +.. autofunction:: pywarpx.picmi.Simulation.extension.getProbLo + +.. autofunction:: pywarpx.picmi.Simulation.extension.getProbHi + +.. autofunction:: pywarpx.picmi.Simulation.extension.getCellSize + +Particles can be added to the simulation at specific positions and with specific +attribute values: + +.. autofunction:: pywarpx.picmi.Simulation.extension.add_particles + +Properties of the particles already in the simulation can be obtained with various +functions. + +.. autofunction:: pywarpx.picmi.Simulation.extension.get_particle_count + +.. autofunction:: pywarpx.picmi.Simulation.extension.get_particle_structs + +.. autofunction:: pywarpx.picmi.Simulation.extension.get_particle_arrays + +The ``get_particle_structs()`` and ``get_particle_arrays()`` functions are called +by several utility functions of the form ``get_particle_{comp_name}`` where +``comp_name`` is one of ``x``, ``y``, ``z``, ``r``, ``theta``, ``id``, ``cpu``, +``weight``, ``ux``, ``uy`` or ``uz``. + +The index of some specific component of the particle data can be obtained. + +.. autofunction:: pywarpx.picmi.Simulation.extension.get_particle_comp_index + +New components can be added via Python. + +.. autofunction:: pywarpx.picmi.Simulation.extension.add_real_comp + +Various diagnostics are also accessible from Python. +This includes getting the deposited or total charge density from a given species +as well as accessing the scraped particle buffer. See the example in +*Examples/Modules/ParticleBoudaryScrape* for a reference on how to interact +with scraped particle data. + +.. autofunction:: pywarpx.picmi.Simulation.extension.get_species_charge_sum + +.. autofunction:: pywarpx.picmi.Simulation.extension.depositChargeDensity + +.. autofunction:: pywarpx.picmi.Simulation.extension.get_particle_boundary_buffer_size + +.. autofunction:: pywarpx.picmi.Simulation.extension.get_particle_boundary_buffer_size + +.. autofunction:: pywarpx.picmi.Simulation.extension.get_particle_boundary_buffer_structs + +.. autofunction:: pywarpx.picmi.Simulation.extension.get_particle_boundary_buffer + +.. autofunction:: pywarpx.picmi.Simulation.extension.clearParticleBoundaryBuffer + Using Python input as a preprocessor ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/Python/pywarpx/_libwarpx.py b/Python/pywarpx/_libwarpx.py index fcf7f45d637..691288d0787 100755 --- a/Python/pywarpx/_libwarpx.py +++ b/Python/pywarpx/_libwarpx.py @@ -300,7 +300,7 @@ class Particle(ctypes.Structure): self.libwarpx_so.warpx_sett_new.argtypes = [ctypes.c_int, c_real] self.libwarpx_so.warpx_getdt.argtypes = [ctypes.c_int] - def get_boundary_number(self, boundary): + def _get_boundary_number(self, boundary): ''' Utility function to find the boundary number given a boundary name. @@ -308,13 +308,14 @@ def get_boundary_number(self, boundary): Parameters ---------- - boundary : the boundary from which to get the scraped particle data. - In the form x/y/z_hi/lo or eb. + boundary : str + The boundary from which to get the scraped particle data. In the + form x/y/z_hi/lo or eb. Returns ------- - - Integer index in the boundary scraper buffer for the given boundary. + int + Integer index in the boundary scraper buffer for the given boundary. ''' if self.geometry_dim == '3d': dimensions = {'x' : 0, 'y' : 1, 'z' : 2} @@ -348,7 +349,9 @@ def get_boundary_number(self, boundary): @staticmethod def _array1d_from_pointer(pointer, dtype, size): ''' + Function for converting a ctypes pointer to a numpy array + ''' if not pointer: raise Exception(f'_array1d_from_pointer: pointer is a nullptr') @@ -385,7 +388,7 @@ def getMyProc(self): def get_nattr(self): ''' - Get the number of extra attributes. + Get the number of extra particle attributes. ''' # --- The -3 is because the comps include the velocites @@ -393,10 +396,15 @@ def get_nattr(self): def get_nattr_species(self, species_name): ''' - Get the number of real attributes for the given species. + Parameters + ---------- + + species_name: str + Name of the species ''' + return self.libwarpx_so.warpx_nCompsSpecies( ctypes.c_char_p(species_name.encode('utf-8'))) @@ -423,8 +431,7 @@ def amrex_init(self, argv, mpi_comm=None): def initialize(self, argv=None, mpi_comm=None): ''' - Initialize WarpX and AMReX. Must be called before - doing anything else. + Initialize WarpX and AMReX. Must be called before doing anything else. ''' if argv is None: @@ -450,27 +457,33 @@ def finalize(self, finalize_mpi=1): def getistep(self, level=0): ''' - Get the current time step number for the specified level Parameter --------- - level : the refinement level to reference + level : int + The refinement level to reference ''' + return self.libwarpx_so.warpx_getistep(level) def gett_new(self, level=0): ''' - Get the next time for the specified level + Get the next time for the specified level. + + Parameters + ---------- + level : int + The refinement level to reference ''' + return self.libwarpx_so.warpx_gett_new(level) def evolve(self, num_steps=-1): ''' - Evolve the simulation for num_steps steps. If num_steps=-1, the simulation will be run until the end as specified in the inputs file. @@ -478,21 +491,54 @@ def evolve(self, num_steps=-1): Parameters ---------- - num_steps: int, the number of steps to take - + num_steps: int + The number of steps to take ''' self.libwarpx_so.warpx_evolve(num_steps); def getProbLo(self, direction): + ''' + Get the values of the lower domain boundary. + + Parameters + ---------- + + direction : int + Direction of interest + ''' + assert 0 <= direction < self.dim, 'Inappropriate direction specified' return self.libwarpx_so.warpx_getProbLo(direction) def getProbHi(self, direction): + ''' + Get the values of the upper domain boundary. + + Parameters + ---------- + + direction : int + Direction of interest + ''' + assert 0 <= direction < self.dim, 'Inappropriate direction specified' return self.libwarpx_so.warpx_getProbHi(direction) def getCellSize(self, direction, level=0): + ''' + Get the cell size in the given direction and on the given level. + + Parameters + ---------- + + direction : int + Direction of interest + + level : int + The refinement level to reference + ''' + assert 0 <= direction < 3, 'Inappropriate direction specified' assert 0 <= level and level <= self.libwarpx_so.warpx_finestLevel(), 'Inappropriate level specified' return self.libwarpx_so.warpx_getCellSize(direction, level) @@ -538,25 +584,33 @@ def getCellSize(self, direction, level=0): # # self.libwarpx_so.warpx_ComputePMLFactors(lev, dt) - def add_particles(self, species_name, x=None, y=None, z=None, ux=None, uy=None, uz=None, w=None, - unique_particles=True, **kwargs): + def add_particles(self, species_name, x=None, y=None, z=None, ux=None, uy=None, + uz=None, w=None, unique_particles=True, **kwargs): ''' - A function for adding particles to the WarpX simulation. Parameters ---------- - species_name : the species to add the particle to - x, y, z : arrays or scalars of the particle positions (default = 0.) - ux, uy, uz : arrays or scalars of the particle momenta (default = 0.) - w : array or scalar of particle weights (default = 0.) - unique_particles : whether the particles are unique or duplicated on - several processes. (default = True) - kwargs : dictionary containing an entry for all the extra particle - attribute arrays. If an attribute is not given it will be - set to 0. + species_name : str + The type of species for which particles will be added + + x, y, z : arrays or scalars + The particle positions (default = 0.) + + ux, uy, uz : arrays or scalars + The particle momenta (default = 0.) + + w : array or scalars + Particle weights (default = 0.) + + unique_particles : bool + Whether the particles are unique or duplicated on several processes + (default = True) + kwargs : dict + Containing an entry for all the extra particle attribute arrays. If + an attribute is not given it will be set to 0. ''' # --- Get length of arrays, set to one for scalars @@ -646,30 +700,31 @@ def add_particles(self, species_name, x=None, y=None, z=None, ux=None, uy=None, def get_particle_count(self, species_name, local=False): ''' - - This returns the number of particles of the specified species in the - simulation. + Get the number of particles of the specified species in the simulation. Parameters ---------- - species_name : the species name that the number will be returned for - local : If True the particle count on this processor will - be returned. + species_name : str + The species name that the number will be returned for + + local : bool + If True the particle count on this processor will be returned. + Default False. Returns ------- + int An integer count of the number of particles - ''' + return self.libwarpx_so.warpx_getNumParticles( ctypes.c_char_p(species_name.encode('utf-8')), local ) def get_particle_structs(self, species_name, level): ''' - This returns a list of numpy arrays containing the particle struct data on each tile for this process. The particle data is represented as a structured numpy array and contains the particle 'x', 'y', 'z', 'id', and 'cpu'. @@ -680,13 +735,17 @@ def get_particle_structs(self, species_name, level): Parameters ---------- - species_name : the species name that the data will be returned for + species_name : str + The species name that the data will be returned for + + level : int + The refinement level to reference Returns ------- - A List of numpy arrays. - + List of numpy arrays + The requested particle struct data ''' particles_per_tile = _LP_c_int() @@ -709,7 +768,6 @@ def get_particle_structs(self, species_name, level): def get_particle_arrays(self, species_name, comp_name, level): ''' - This returns a list of numpy arrays containing the particle array data on each tile for this process. @@ -719,14 +777,20 @@ def get_particle_arrays(self, species_name, comp_name, level): Parameters ---------- - species_name : the species name that the data will be returned for - comp_name : the component of the array data that will be returned. + species_name : str + The species name that the data will be returned for + + comp_name : str + The component of the array data that will be returned + + level : int + The refinement level to reference Returns ------- - A List of numpy arrays. - + List of numpy arrays + The requested particle array data ''' particles_per_tile = _LP_c_int() @@ -887,7 +951,6 @@ def get_particle_theta(self, species_name, level=0): def get_particle_comp_index(self, species_name, pid_name): ''' - Get the component index for a given particle attribute. This is useful to get the corrent ordering of attributes when adding new particles using `add_particles()`. @@ -895,15 +958,19 @@ def get_particle_comp_index(self, species_name, pid_name): Parameters ---------- - species_name : the species name that the data will be returned for - pid_name : string that is used to identify the new component + species_name : str + The name of the species + + pid_name : str + Name of the component for which the index will be returned Returns ------- + int Integer corresponding to the index of the requested attribute - ''' + return self.libwarpx_so.warpx_getParticleCompIndex( ctypes.c_char_p(species_name.encode('utf-8')), ctypes.c_char_p(pid_name.encode('utf-8')) @@ -911,17 +978,21 @@ def get_particle_comp_index(self, species_name, pid_name): def add_real_comp(self, species_name, pid_name, comm=True): ''' - Add a real component to the particle data array. Parameters ---------- - species_name : the species name for which the new component will be added - pid_name : string that is used to identify the new component - comm : should the component be communicated + species_name : str + The species name for which the new component will be added + + pid_name : str + Name that can be used to identify the new component + comm : bool + Should the component be communicated ''' + self.libwarpx_so.warpx_addRealComp( ctypes.c_char_p(species_name.encode('utf-8')), ctypes.c_char_p(pid_name.encode('utf-8')), comm @@ -929,47 +1000,45 @@ def add_real_comp(self, species_name, pid_name, comm=True): def get_species_charge_sum(self, species_name, local=False): ''' - Returns the total charge in the simulation due to the given species. Parameters ---------- - species_name : the species name for which charge will be summed - local : If True return total charge per processor + species_name : str + The species name for which charge will be summed + local : bool + If True return total charge per processor ''' + return self.libwarpx_so.warpx_sumParticleCharge( ctypes.c_char_p(species_name.encode('utf-8')), local ) def get_particle_boundary_buffer_size(self, species_name, boundary): ''' - This returns the number of particles that have been scraped so far in the simulation from the specified boundary and of the specified species. Parameters ---------- - species_name : return the number of scraped particles of this species - boundary : the boundary from which to get the scraped particle data. - In the form x/y/z_hi/lo - - Returns - ------- - - The number of particles scraped so far from a boundary and of a species. + species_name : str + Return the number of scraped particles of this species + boundary : str + The boundary from which to get the scraped particle data in the + form x/y/z_hi/lo ''' + return self.libwarpx_so.warpx_getParticleBoundaryBufferSize( ctypes.c_char_p(species_name.encode('utf-8')), - self.get_boundary_number(boundary) + self._get_boundary_number(boundary) ) def get_particle_boundary_buffer_structs(self, species_name, boundary, level): ''' - This returns a list of numpy arrays containing the particle struct data for a species that has been scraped by a specific simulation boundary. The particle data is represented as a structured numpy array and contains the @@ -981,23 +1050,22 @@ def get_particle_boundary_buffer_structs(self, species_name, boundary, level): Parameters ---------- - species_name : the species name that the data will be returned for - boundary : the boundary from which to get the scraped particle data. - In the form x/y/z_hi/lo or eb. - level : Which AMR level to retrieve scraped particle data from. + species_name : str + The species name that the data will be returned for - Returns - ------- - - A List of numpy arrays. + boundary : str + The boundary from which to get the scraped particle data in the + form x/y/z_hi/lo or eb. + level : int + Which AMR level to retrieve scraped particle data from. ''' particles_per_tile = _LP_c_int() num_tiles = ctypes.c_int(0) data = self.libwarpx_so.warpx_getParticleBoundaryBufferStructs( ctypes.c_char_p(species_name.encode('utf-8')), - self.get_boundary_number(boundary), level, + self._get_boundary_number(boundary), level, ctypes.byref(num_tiles), ctypes.byref(particles_per_tile) ) @@ -1014,7 +1082,6 @@ def get_particle_boundary_buffer_structs(self, species_name, boundary, level): def get_particle_boundary_buffer(self, species_name, boundary, comp_name, level): ''' - This returns a list of numpy arrays containing the particle array data for a species that has been scraped by a specific simulation boundary. @@ -1024,33 +1091,34 @@ def get_particle_boundary_buffer(self, species_name, boundary, comp_name, level) Parameters ---------- - species_name : the species name that the data will be returned for. - boundary : the boundary from which to get the scraped particle data. - In the form x/y/z_hi/lo or eb. - comp_name : the component of the array data that will be returned. - If "step_scraped" the special attribute holding the - timestep at which a particle was scraped will be - returned. - level : Which AMR level to retrieve scraped particle data from. + species_name : str + The species name that the data will be returned for. - Returns - ------- + boundary : str + The boundary from which to get the scraped particle data in the + form x/y/z_hi/lo or eb. - A List of numpy arrays. + comp_name : str + The component of the array data that will be returned. If + "step_scraped" the special attribute holding the timestep at + which a particle was scraped will be returned. + level : int + Which AMR level to retrieve scraped particle data from. ''' + particles_per_tile = _LP_c_int() num_tiles = ctypes.c_int(0) if comp_name == 'step_scraped': data = self.libwarpx_so.warpx_getParticleBoundaryBufferScrapedSteps( ctypes.c_char_p(species_name.encode('utf-8')), - self.get_boundary_number(boundary), level, + self._get_boundary_number(boundary), level, ctypes.byref(num_tiles), ctypes.byref(particles_per_tile) ) else: data = self.libwarpx_so.warpx_getParticleBoundaryBuffer( ctypes.c_char_p(species_name.encode('utf-8')), - self.get_boundary_number(boundary), level, + self._get_boundary_number(boundary), level, ctypes.byref(num_tiles), ctypes.byref(particles_per_tile), ctypes.c_char_p(comp_name.encode('utf-8')) ) @@ -1083,20 +1151,25 @@ def clearParticleBoundaryBuffer(self): def depositChargeDensity(self, species_name, level, clear_rho=True, sync_rho=True): ''' - Deposit the specified species' charge density in rho_fp in order to - access that data via pywarpx.fields.RhoFPWrapper() + access that data via pywarpx.fields.RhoFPWrapper(). Parameters ---------- - species_name : the species name that will be deposited. - level : Which AMR level to retrieve scraped particle data from. - clear_rho : If True, zero out rho_fp before deposition. - sync_rho : If True, perform MPI exchange and properly set boundary - cells for rho_fp. + species_name : str + The species name that will be deposited. + + level : int + Which AMR level to retrieve scraped particle data from. + clear_rho : bool + If True, zero out rho_fp before deposition. + + sync_rho : bool + If True, perform MPI exchange and properly set boundary cells for rho_fp. ''' + if clear_rho: from . import fields fields.RhoFPWrapper(level, True)[...] = 0.0 diff --git a/Python/pywarpx/callbacks.py b/Python/pywarpx/callbacks.py index e9f034a36f4..ba13ef439dd 100644 --- a/Python/pywarpx/callbacks.py +++ b/Python/pywarpx/callbacks.py @@ -71,12 +71,12 @@ class CallbackFunctions(object): Note that for functions passed in that are methods of a class instance, a full reference of the instance is saved. This extra reference means - that the object will not actually deleted if the user deletes the + that the object will not actually be deleted if the user deletes the original reference. This is good since the user does not need to keep the reference to the object (for example it can be created using a local variable in a function). It may be bad if the user thinks an object was deleted, but it actually isn't since it had (unkown to the user) - installed a method in one of the call back lists + installed a method in one of the call back lists. """ def __init__(self,name=None,lcallonce=0): From 8f43825cda3ba4e3f85a0d868d0d0673970242a5 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 29 Aug 2022 11:15:38 -0700 Subject: [PATCH 0032/1346] Python: 3.7+ (#3342) * Python: 3.7+ Python 3.6 is now end-of-life. Bump support to 3.7+. * CUDA CI: Python3 Update --- .github/workflows/cuda.yml | 4 ++++ Docs/source/dataanalysis/plot_parallel.rst | 3 +-- Docs/source/developers/gnumake/python.rst | 2 +- Docs/source/install/dependencies.rst | 2 +- Docs/source/usage/python.rst | 2 +- Python/setup.py | 2 +- setup.py | 5 ++--- 7 files changed, 11 insertions(+), 9 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index c73b7ef0717..70234a8966f 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -24,6 +24,10 @@ jobs: SETUPTOOLS_USE_DISTUTILS: stdlib steps: - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + name: Install Python + with: + python-version: '3.x' - name: install dependencies run: | .github/workflows/dependencies/nvcc11.sh diff --git a/Docs/source/dataanalysis/plot_parallel.rst b/Docs/source/dataanalysis/plot_parallel.rst index 85ab8f1aea0..29644175ef7 100644 --- a/Docs/source/dataanalysis/plot_parallel.rst +++ b/Docs/source/dataanalysis/plot_parallel.rst @@ -12,8 +12,7 @@ Most of its dependencies are standard Python packages, that come with a default Anaconda installation or can be installed with ``pip`` or ``conda``: `os, matplotlib, sys, argparse, matplotlib, scipy`. -Additional dependencies are ``yt >= 3.5`` ( or ``yt >= 3.6`` if you are using -rigid injection, see section :doc:`yt` on how to install ``yt``), and ``mpi4py``. +Additional dependencies are ``yt >= 4.0.1`` and ``mpi4py``. Run serial ---------- diff --git a/Docs/source/developers/gnumake/python.rst b/Docs/source/developers/gnumake/python.rst index 3a979dd3755..09c84dfe552 100644 --- a/Docs/source/developers/gnumake/python.rst +++ b/Docs/source/developers/gnumake/python.rst @@ -3,7 +3,7 @@ Installing WarpX as a Python package ==================================== -A full Python installation of WarpX can be done, which includes a build of all of the C++ code, or a pure Python version can be made which only installs the Python scripts. WarpX requires Pythone version 3.6 or newer. +A full Python installation of WarpX can be done, which includes a build of all of the C++ code, or a pure Python version can be made which only installs the Python scripts. WarpX requires Pythone version 3.7 or newer. For a full Python installation of WarpX --------------------------------------- diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index 910ed1d2b35..86f10cce863 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -27,7 +27,7 @@ Optional dependencies include: - see `optional I/O backends `__ - `CCache `__: to speed up rebuilds (For CUDA support, needs version 3.7.9+ and 4.2+ is recommended) - `Ninja `__: for faster parallel compiles -- `Python 3.6+ `__ +- `Python 3.7+ `__ - `mpi4py `__ - `numpy `__ diff --git a/Docs/source/usage/python.rst b/Docs/source/usage/python.rst index 4052dc0896f..16836c75418 100644 --- a/Docs/source/usage/python.rst +++ b/Docs/source/usage/python.rst @@ -4,7 +4,7 @@ Python (PICMI) ============== WarpX uses the `PICMI standard `__ for its Python input files. -Python version 3.6 or newer is required. +Python version 3.7 or newer is required. Example input files can be found in :ref:`the examples section `. The examples support running in both modes by commenting and uncommenting the appropriate lines. diff --git a/Python/setup.py b/Python/setup.py index 0dd904c748d..2db24d0bbfa 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -60,6 +60,6 @@ description = """Wrapper of WarpX""", package_data = package_data, install_requires = ['numpy', 'picmistandard==0.0.19', 'periodictable'], - python_requires = '>=3.6', + python_requires = '>=3.7', zip_safe=False ) diff --git a/setup.py b/setup.py index ac3ecc6171b..260f96f090e 100644 --- a/setup.py +++ b/setup.py @@ -297,7 +297,7 @@ def build_extension(self, ext): cmdclass=cmdclass, # scripts=['warpx_1d', 'warpx_2d', 'warpx_3d', 'warpx_rz'], zip_safe=False, - python_requires='>=3.6', + python_requires='>=3.7', # tests_require=['pytest'], install_requires=install_requires, # see: src/bindings/python/cli @@ -307,7 +307,7 @@ def build_extension(self, ext): # ] #}, extras_require={ - 'all': ['openPMD-api~=0.14.2', 'openPMD-viewer~=1.1', 'yt~=3.6,>=4.0.1', 'matplotlib'], + 'all': ['openPMD-api~=0.14.2', 'openPMD-viewer~=1.1', 'yt>=4.0.1', 'matplotlib'], }, # cmdclass={'test': PyTest}, # platforms='any', @@ -321,7 +321,6 @@ def build_extension(self, ext): 'Topic :: Scientific/Engineering :: Physics', 'Programming Language :: C++', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', From 9e9f5f84fc47f27523ced7b8bfa1eccdfc555e1b Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 29 Aug 2022 16:07:57 -0700 Subject: [PATCH 0033/1346] AMReX: Weekly Update (#3348) --- .github/workflows/cuda.yml | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 2 +- run_test.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 70234a8966f..498b6f6c66c 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 8294c3afbcbbc503f77e493196d380fbe1666d02 && cd - + cd amrex && git checkout --detach 3d29fd7d0e816f3c436112d90bdefe815e0ff72a && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 91b5a11153b..38a7842abb1 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 8294c3afbcbbc503f77e493196d380fbe1666d02 +branch = 3d29fd7d0e816f3c436112d90bdefe815e0ff72a [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 8d611da8003..e85cddbdc9a 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 8294c3afbcbbc503f77e493196d380fbe1666d02 +branch = 3d29fd7d0e816f3c436112d90bdefe815e0ff72a [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index be65ecf7ad4..ccfc1b041d3 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -235,7 +235,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "8294c3afbcbbc503f77e493196d380fbe1666d02" +set(WarpX_amrex_branch "3d29fd7d0e816f3c436112d90bdefe815e0ff72a" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/run_test.sh b/run_test.sh index d6ba50fd66e..733bc3c4721 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 8294c3afbcbbc503f77e493196d380fbe1666d02 && cd - +cd amrex && git checkout --detach 3d29fd7d0e816f3c436112d90bdefe815e0ff72a && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git From 3e47534613e02fd9bedbdda32892a2e0a7b76817 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 30 Aug 2022 02:09:23 +0200 Subject: [PATCH 0034/1346] Fix few "magic constants" & undefined variables found with clang-tidy (#3346) * fix few issues found with clang tidy * fix bug --- Source/Diagnostics/BTDiagnostics.cpp | 11 ++++-- .../Diagnostics/BackTransformedDiagnostic.cpp | 34 ++++++++++++------- .../Diagnostics/ReducedDiags/ReducedDiags.cpp | 3 +- Source/Diagnostics/SliceDiagnostic.cpp | 16 +++++---- 4 files changed, 41 insertions(+), 23 deletions(-) diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 741ab6e6b6f..c9cb72c5e9c 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -43,6 +43,11 @@ using namespace amrex::literals; +namespace +{ + const int permission_flag_rwxrxrx = 0755; +} + BTDiagnostics::BTDiagnostics (int i, std::string name) : Diagnostics(i, name) { @@ -844,16 +849,16 @@ void BTDiagnostics::MergeBuffersForPlotfile (int i_snapshot) // Create directory only when the first buffer is flushed out. if (m_buffer_flush_counter[i_snapshot] == 0 ) { // Create Level_0 directory to store all Cell_D and Cell_H files - if (!amrex::UtilCreateDirectory(snapshot_Level0_path, 0755) ) + if (!amrex::UtilCreateDirectory(snapshot_Level0_path, permission_flag_rwxrxrx) ) amrex::CreateDirectoryFailed(snapshot_Level0_path); // Create directory for each species selected for diagnostic for (int i = 0; i < m_particles_buffer[i_snapshot].size(); ++i) { std::string snapshot_species_path = snapshot_path + "/" + m_output_species_names[i]; - if ( !amrex::UtilCreateDirectory(snapshot_species_path, 0755)) + if ( !amrex::UtilCreateDirectory(snapshot_species_path, permission_flag_rwxrxrx)) amrex::CreateDirectoryFailed(snapshot_species_path); // Create Level_0 directory for particles to store Particle_H and DATA files std::string species_Level0_path = snapshot_species_path + "/Level_0"; - if ( !amrex::UtilCreateDirectory(species_Level0_path, 0755)) + if ( !amrex::UtilCreateDirectory(species_Level0_path, permission_flag_rwxrxrx)) amrex::CreateDirectoryFailed(species_Level0_path); } std::string buffer_WarpXHeader_path = recent_Buffer_filepath + "/WarpXHeader"; diff --git a/Source/Diagnostics/BackTransformedDiagnostic.cpp b/Source/Diagnostics/BackTransformedDiagnostic.cpp index 48d1cabc0ba..79314d137c8 100644 --- a/Source/Diagnostics/BackTransformedDiagnostic.cpp +++ b/Source/Diagnostics/BackTransformedDiagnostic.cpp @@ -52,6 +52,11 @@ using namespace amrex; +namespace +{ + const int permission_flag_rwxrxrx = 0755; +} + #ifdef WARPX_USE_HDF5 /* @@ -532,7 +537,7 @@ LorentzTransformZ (MultiFab& data, Real gamma_boost, Real beta_boost) { // Transform the transverse E and B fields. Note that ez and bz are not // changed by the tranform. - Real e_lab, b_lab, j_lab, r_lab; + Real e_lab = 0.0_rt, b_lab = 0.0_rt, j_lab = 0.0_rt, r_lab = 0.0_rt; e_lab = gamma_boost * (arr(i, j, k, 0) + beta_boost*clight*arr(i, j, k, 4)); b_lab = gamma_boost * (arr(i, j, k, 4) + @@ -550,13 +555,16 @@ LorentzTransformZ (MultiFab& data, Real gamma_boost, Real beta_boost) arr(i, j, k, 3) = b_lab; // Transform the charge and current density. Only the z component of j is affected. - j_lab = gamma_boost*(arr(i, j, k, 8) + - beta_boost*clight*arr(i, j, k, 9)); - r_lab = gamma_boost*(arr(i, j, k, 9) + - beta_boost*arr(i, j, k, 8)/clight); + const int j_comp_index = 8; + const int r_comp_index = 9; + + j_lab = gamma_boost*(arr(i, j, k, j_comp_index) + + beta_boost*clight*arr(i, j, k, j_comp_index)); + r_lab = gamma_boost*(arr(i, j, k, r_comp_index) + + beta_boost*arr(i, j, k, r_comp_index)/clight); - arr(i, j, k, 8) = j_lab; - arr(i, j, k, 9) = r_lab; + arr(i, j, k, j_comp_index) = j_lab; + arr(i, j, k, r_comp_index) = r_lab; } ); } @@ -615,7 +623,7 @@ BackTransformedDiagnostic (Real zmin_lab, Real zmax_lab, Real v_window_lab, // Query fields to dump std::vector user_fields_to_dump; ParmParse pp_warpx("warpx"); - bool do_user_fields; + bool do_user_fields = false; do_user_fields = pp_warpx.queryarr("back_transformed_diag_fields", user_fields_to_dump); if (queryWithParser(pp_warpx, "buffer_size", m_num_buffer_)) { if (m_max_box_size_ < m_num_buffer_) m_max_box_size_ = m_num_buffer_; @@ -1138,7 +1146,7 @@ writeMetaData () if (ParallelDescriptor::IOProcessor()) { const std::string fullpath = WarpX::lab_data_directory + "/snapshots"; - if (!UtilCreateDirectory(fullpath, 0755)) + if (!UtilCreateDirectory(fullpath, permission_flag_rwxrxrx)) CreateDirectoryFailed(fullpath); VisMF::IO_Buffer io_buffer(VisMF::IO_Buffer_Size); @@ -1160,7 +1168,7 @@ writeMetaData () if (m_N_slice_snapshots_ > 0) { const std::string fullpath_slice = WarpX::lab_data_directory + "/slices"; - if (!UtilCreateDirectory(fullpath_slice, 0755)) + if (!UtilCreateDirectory(fullpath_slice, permission_flag_rwxrxrx)) CreateDirectoryFailed(fullpath_slice); VisMF::IO_Buffer io_buffer_slice(VisMF::IO_Buffer_Size); @@ -1285,13 +1293,13 @@ createLabFrameDirectories() { #else if (ParallelDescriptor::IOProcessor()) { - if (!UtilCreateDirectory(m_file_name, 0755)) + if (!UtilCreateDirectory(m_file_name, permission_flag_rwxrxrx)) CreateDirectoryFailed(m_file_name); const int nlevels = 1; for(int i = 0; i < nlevels; ++i) { const std::string &fullpath = LevelFullPath(i, m_file_name); - if (!UtilCreateDirectory(fullpath, 0755)) + if (!UtilCreateDirectory(fullpath, permission_flag_rwxrxrx)) CreateDirectoryFailed(fullpath); } @@ -1305,7 +1313,7 @@ createLabFrameDirectories() { const std::string& species_name = species_names[mypc.mapSpeciesBackTransformedDiagnostics(i)]; const std::string fullpath = m_file_name + "/" + species_name; - if (!UtilCreateDirectory(fullpath, 0755)) + if (!UtilCreateDirectory(fullpath, permission_flag_rwxrxrx)) CreateDirectoryFailed(fullpath); } } diff --git a/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp b/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp index 5953647bb1b..93e4ad043cd 100644 --- a/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp +++ b/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp @@ -44,7 +44,8 @@ ReducedDiags::ReducedDiags (std::string rd_name) if (ParallelDescriptor::IOProcessor()) { // create folder - if (!UtilCreateDirectory(m_path, 0755)) + const int permission_flag_rwxrxrx = 0755; + if (!UtilCreateDirectory(m_path, permission_flag_rwxrxrx)) { CreateDirectoryFailed(m_path); } // replace / create output file diff --git a/Source/Diagnostics/SliceDiagnostic.cpp b/Source/Diagnostics/SliceDiagnostic.cpp index 6f27fe5d415..47fca30d601 100644 --- a/Source/Diagnostics/SliceDiagnostic.cpp +++ b/Source/Diagnostics/SliceDiagnostic.cpp @@ -83,7 +83,8 @@ CreateSlice( const MultiFab& mf, const Vector &dom_geom, const RealBox& real_box = dom_geom[0].ProbDomain(); RealBox slice_cc_nd_box; - int slice_grid_size = 32; + const int default_grid_size = 32; + int slice_grid_size = default_grid_size; bool interpolate = false; bool coarsen = false; @@ -304,6 +305,8 @@ CheckSliceInput( const RealBox real_box, RealBox &slice_cc_nd_box, warnMsg.str(), ablastr::warn_manager::WarnPriority::low); } + const auto very_small_number = 1E-10; + // Factor to ensure index values computation depending on index type // double fac = ( 1.0 - SliceType[idim] )*dom_geom[0].CellSize(idim)*0.5; // if dimension is reduced to one cell length // @@ -319,11 +322,11 @@ CheckSliceInput( const RealBox real_box, RealBox &slice_cc_nd_box, slice_lo[idim] = static_cast( floor( ( (slice_cc_nd_box.lo(idim) - (real_box.lo(idim) + fac ) ) - / dom_geom[0].CellSize(idim)) + fac * 1E-10) ); + / dom_geom[0].CellSize(idim)) + fac * very_small_number) ); slice_lo2[idim] = static_cast( ceil( ( (slice_cc_nd_box.lo(idim) - (real_box.lo(idim) + fac) ) - / dom_geom[0].CellSize(idim)) - fac * 1E-10 ) ); + / dom_geom[0].CellSize(idim)) - fac * very_small_number) ); } else { slice_lo[idim] = static_cast( @@ -353,9 +356,9 @@ CheckSliceInput( const RealBox real_box, RealBox &slice_cc_nd_box, else { // moving realbox.lo and reabox.hi to nearest coarsenable grid point // - auto index_lo = static_cast(floor(((slice_realbox.lo(idim) + 1E-10 + auto index_lo = static_cast(floor(((slice_realbox.lo(idim) + very_small_number - (real_box.lo(idim))) / dom_geom[0].CellSize(idim))) ); - auto index_hi = static_cast(ceil(((slice_realbox.hi(idim) - 1E-10 + auto index_hi = static_cast(ceil(((slice_realbox.hi(idim) - very_small_number - (real_box.lo(idim))) / dom_geom[0].CellSize(idim))) ); bool modify_cr = true; @@ -378,8 +381,9 @@ CheckSliceInput( const RealBox real_box, RealBox &slice_cc_nd_box, // If modified index.hi is > baselinebox.hi, move the point // // to the previous coarsenable point // + const auto small_number = 0.01; if ( (hi_new * dom_geom[0].CellSize(idim)) - > real_box.hi(idim) - real_box.lo(idim) + dom_geom[0].CellSize(idim)*0.01 ) + > real_box.hi(idim) - real_box.lo(idim) + dom_geom[0].CellSize(idim)*small_number) { hi_new = index_hi - mod_hi; } From 19dba606b11391c67e33857926db8b94ee60829c Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 31 Aug 2022 11:29:07 -0700 Subject: [PATCH 0035/1346] Perlmutter: Work-Around CUDA-Aware MPI & Slurm (#3349) * Perlmutter: Work-Around CUDA-Aware MPI & Slurm There are known HPE bugs on Perlmutter that can blow up simulations (segfault) with CUDA-aware MPI. We avoid the respective Slurm options now and just manually control the exposed GPUs per MPI rank. * Add: `gpus-per-node` --- Tools/machines/perlmutter-nersc/perlmutter.sbatch | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Tools/machines/perlmutter-nersc/perlmutter.sbatch b/Tools/machines/perlmutter-nersc/perlmutter.sbatch index 2c085364d7c..65777f30441 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter.sbatch +++ b/Tools/machines/perlmutter-nersc/perlmutter.sbatch @@ -16,8 +16,7 @@ #SBATCH -C gpu #SBATCH -c 32 #SBATCH --ntasks-per-node=4 -#SBATCH --gpus-per-task=1 -#SBATCH --gpu-bind=single:1 +#SBATCH --gpus-per-node=4 #SBATCH -o WarpX.o%j #SBATCH -e WarpX.e%j @@ -42,6 +41,9 @@ # GPU-aware MPI export MPICH_GPU_SUPPORT_ENABLED=1 +# expose one GPU per MPI rank +export CUDA_VISIBLE_DEVICES=$SLURM_LOCALID + EXE=./warpx #EXE=../WarpX/build/bin/warpx.3d.MPI.CUDA.DP.OPMD.QED #EXE=./main3d.gnu.TPROF.MPI.CUDA.ex From dba95a94fa47ffdabaf14b1a5c853adbf0695e18 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 31 Aug 2022 20:39:08 +0200 Subject: [PATCH 0036/1346] make variables constexpr (#3354) --- Source/Diagnostics/BTDiagnostics.cpp | 2 +- Source/Diagnostics/BackTransformedDiagnostic.cpp | 2 +- Source/Diagnostics/ReducedDiags/ReducedDiags.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index c9cb72c5e9c..72b6244e8ae 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -45,7 +45,7 @@ using namespace amrex::literals; namespace { - const int permission_flag_rwxrxrx = 0755; + constexpr int permission_flag_rwxrxrx = 0755; } BTDiagnostics::BTDiagnostics (int i, std::string name) diff --git a/Source/Diagnostics/BackTransformedDiagnostic.cpp b/Source/Diagnostics/BackTransformedDiagnostic.cpp index 79314d137c8..5b28d61000e 100644 --- a/Source/Diagnostics/BackTransformedDiagnostic.cpp +++ b/Source/Diagnostics/BackTransformedDiagnostic.cpp @@ -54,7 +54,7 @@ using namespace amrex; namespace { - const int permission_flag_rwxrxrx = 0755; + constexpr int permission_flag_rwxrxrx = 0755; } #ifdef WARPX_USE_HDF5 diff --git a/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp b/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp index 93e4ad043cd..68111df72b8 100644 --- a/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp +++ b/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp @@ -44,7 +44,7 @@ ReducedDiags::ReducedDiags (std::string rd_name) if (ParallelDescriptor::IOProcessor()) { // create folder - const int permission_flag_rwxrxrx = 0755; + constexpr int permission_flag_rwxrxrx = 0755; if (!UtilCreateDirectory(m_path, permission_flag_rwxrxrx)) { CreateDirectoryFailed(m_path); } From a10d4a27c399b930dc6c9e58030dd0663d42a7ac Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 31 Aug 2022 18:58:42 -0700 Subject: [PATCH 0037/1346] ABLASTR: Add Nodal Gather Functions (#3357) Add nodal field gather functions to ABLASTR for scalar and vector fields. This currently only implements 1st order (CIC) shape. Co-authored-by: Remi Lehe Co-authored-by: Remi Lehe --- Source/EmbeddedBoundary/ParticleScraper.H | 21 ++++-- Source/Particles/ParticleBoundaryBuffer.cpp | 5 +- Source/ablastr/particles/DepositCharge.H | 7 +- .../particles/NodalFieldGather.H} | 64 ++++++++++++++++--- 4 files changed, 78 insertions(+), 19 deletions(-) rename Source/{Particles/Gather/ScalarFieldGather.H => ablastr/particles/NodalFieldGather.H} (68%) diff --git a/Source/EmbeddedBoundary/ParticleScraper.H b/Source/EmbeddedBoundary/ParticleScraper.H index cb3386c5f7e..b723fd2ce5e 100644 --- a/Source/EmbeddedBoundary/ParticleScraper.H +++ b/Source/EmbeddedBoundary/ParticleScraper.H @@ -7,12 +7,23 @@ #ifndef PARTICLESCRAPER_H_ #define PARTICLESCRAPER_H_ +#include "EmbeddedBoundary/DistanceToEB.H" +#include "Particles/Pusher/GetAndSetPosition.H" + +#include + #include -#include #include +#include +#include +#include +#include +#include + +#include +#include + -#include -#include "Particles/Gather/ScalarFieldGather.H" /** * \brief Interact particles with the embedded boundary walls. @@ -169,9 +180,9 @@ scrapeParticles (PC& pc, const amrex::Vector& distance_t int i, j, k; amrex::Real W[AMREX_SPACEDIM][2]; - compute_weights_nodal(xp, yp, zp, plo, dxi, i, j, k, W); + ablastr::particles::compute_weights_nodal(xp, yp, zp, plo, dxi, i, j, k, W); - amrex::Real phi_value = interp_field_nodal(i, j, k, W, phi); + amrex::Real phi_value = ablastr::particles::interp_field_nodal(i, j, k, W, phi); if (phi_value < 0.0) { diff --git a/Source/Particles/ParticleBoundaryBuffer.cpp b/Source/Particles/ParticleBoundaryBuffer.cpp index 0eecbe0d307..7877e33d3fc 100644 --- a/Source/Particles/ParticleBoundaryBuffer.cpp +++ b/Source/Particles/ParticleBoundaryBuffer.cpp @@ -9,10 +9,11 @@ #include "EmbeddedBoundary/DistanceToEB.H" #include "Particles/ParticleBoundaryBuffer.H" #include "Particles/MultiParticleContainer.H" -#include "Particles/Gather/ScalarFieldGather.H" #include "Utils/TextMsg.H" #include "Utils/WarpXProfilerWrapper.H" +#include + #include #include #include @@ -296,7 +297,7 @@ void ParticleBoundaryBuffer::gatherParticles (MultiParticleContainer& mypc, amrex::ParticleReal xp, yp, zp; getPosition(ip, xp, yp, zp); - amrex::Real phi_value = doGatherScalarFieldNodal( + amrex::Real phi_value = ablastr::particles::doGatherScalarFieldNodal( xp, yp, zp, phiarr, dxi, plo ); return phi_value < 0.0 ? 1 : 0; diff --git a/Source/ablastr/particles/DepositCharge.H b/Source/ablastr/particles/DepositCharge.H index 5eb66be6098..e2e27c75622 100644 --- a/Source/ablastr/particles/DepositCharge.H +++ b/Source/ablastr/particles/DepositCharge.H @@ -22,8 +22,8 @@ #include -namespace ablastr { -namespace particles { +namespace ablastr::particles +{ /** Perform charge deposition for the particles on a tile. * @@ -208,7 +208,6 @@ deposit_charge (typename T_PC::ParIterType& pti, #endif } -} // namespace particles -} // namespace ablastr +} // namespace ablastr::particles #endif // ABLASTR_DEPOSIT_CHARGE_H_ diff --git a/Source/Particles/Gather/ScalarFieldGather.H b/Source/ablastr/particles/NodalFieldGather.H similarity index 68% rename from Source/Particles/Gather/ScalarFieldGather.H rename to Source/ablastr/particles/NodalFieldGather.H index 31ee35bd001..53c32960468 100644 --- a/Source/Particles/Gather/ScalarFieldGather.H +++ b/Source/ablastr/particles/NodalFieldGather.H @@ -1,19 +1,30 @@ -/* Copyright 2021 Modern Electron +/* Copyright 2019-2022 Modern Electron, Axel Huebl, Remi Lehe * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ -#ifndef SCALARFIELDGATHER_H_ -#define SCALARFIELDGATHER_H_ +#ifndef ABLASTR_NODALFIELDGATHER_H_ +#define ABLASTR_NODALFIELDGATHER_H_ +#include +#include +#include +#include +#include + + +namespace ablastr::particles +{ /** * \brief Compute weight of each surrounding node in interpolating a nodal field * to the given coordinates. * + * This currently only does linear order. + * * \param xp,yp,zp Particle position coordinates * \param plo Index lower bounds of domain. - * \param dxi 3D cell spacing + * \param dxi inverse 3D cell spacing * \param i,j,k Variables to store indices of position on grid * \param W 2D array of weights to store each neighbouring node */ @@ -21,8 +32,8 @@ AMREX_GPU_HOST_DEVICE AMREX_INLINE void compute_weights_nodal (const amrex::ParticleReal xp, const amrex::ParticleReal yp, const amrex::ParticleReal zp, - amrex::GpuArray const& plo, - amrex::GpuArray const& dxi, + amrex::GpuArray const& plo, + amrex::GpuArray const& dxi, int& i, int& j, int& k, amrex::Real W[AMREX_SPACEDIM][2]) noexcept { using namespace amrex::literals; @@ -110,7 +121,7 @@ amrex::Real interp_field_nodal (int i, int j, int k, * * \param xp,yp,zp Particle position coordinates * \param scalar_field Array4 of the nodal scalar field, either full array or tile. - * \param dxi 3D cell spacing + * \param dxi inverse 3D cell spacing * \param lo Index lower bounds of domain. */ AMREX_GPU_HOST_DEVICE AMREX_INLINE @@ -128,4 +139,41 @@ amrex::Real doGatherScalarFieldNodal (const amrex::ParticleReal xp, return interp_field_nodal(ii, jj, kk, W, scalar_field); } -#endif // SCALARFIELDGATHER_H_ + +/** + * \brief Vector field gather for a single particle. The field has to be defined + * at the cell nodes (see https://amrex-codes.github.io/amrex/docs_html/Basics.html#id2) + * + * \param xp,yp,zp Particle position coordinates + * \param vector_field_x,vector_field_y,vector_field_z Array4 of nodal scalar fields, either full array or tile. + * \param dxi inverse 3D cell spacing + * \param lo Index lower bounds of domain. + */ +AMREX_GPU_HOST_DEVICE AMREX_INLINE +amrex::GpuArray +doGatherVectorFieldNodal (const amrex::ParticleReal xp, + const amrex::ParticleReal yp, + const amrex::ParticleReal zp, + amrex::Array4 const& vector_field_x, + amrex::Array4 const& vector_field_y, + amrex::Array4 const& vector_field_z, + amrex::GpuArray const& dxi, + amrex::GpuArray const& lo) noexcept +{ + // first find the weight of surrounding nodes to use during interpolation + int ii, jj, kk; + amrex::Real W[AMREX_SPACEDIM][2]; + compute_weights_nodal(xp, yp, zp, lo, dxi, ii, jj, kk, W); + + amrex::GpuArray const field_interp = { + interp_field_nodal(ii, jj, kk, W, vector_field_x), + interp_field_nodal(ii, jj, kk, W, vector_field_y), + interp_field_nodal(ii, jj, kk, W, vector_field_z) + }; + + return field_interp; +} + +} // namespace ablastr::particles + +#endif // ABLASTR_NODALFIELDGATHER_H_ From f98da67950e7fd34d89bb7996327a6c566338c9e Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Thu, 1 Sep 2022 10:30:42 -0700 Subject: [PATCH 0038/1346] Fix a bug in AddPlasma (#3351) When warpx.refine_plasma=1, there was an error in the logic like below. if (lrefine_injection) { Box fine_overlap_box = ...; if (fine_overlap_box.ok()) { pcounts[index] = ...; } // the else part was missing } else { pcounts[index] = ...; } --- .../Particles/PhysicalParticleContainer.cpp | 38 +++++++++---------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 5b0ea8ae547..981dce2113c 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -963,8 +963,10 @@ PhysicalParticleContainer::AddPlasma (int lev, RealBox part_realbox) Gpu::DeviceVector offset(overlap_box.numPts()); auto pcounts = counts.data(); int lrrfac = rrfac; - int lrefine_injection = refine_injection; - Box lfine_box = fine_injection_box; + Box fine_overlap_box; // default Box is NOT ok(). + if (refine_injection) { + fine_overlap_box = overlap_box & amrex::shift(fine_injection_box, -shifted); + } amrex::ParallelFor(overlap_box, [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept { IntVect iv(AMREX_D_DECL(i, j, k)); @@ -977,16 +979,13 @@ PhysicalParticleContainer::AddPlasma (int lev, RealBox part_realbox) if (inj_pos->overlapsWith(lo, hi)) { auto index = overlap_box.index(iv); - if (lrefine_injection) { - Box fine_overlap_box = overlap_box & amrex::shift(lfine_box, -shifted); - if (fine_overlap_box.ok()) { - int r = (fine_overlap_box.contains(iv)) ? - AMREX_D_TERM(lrrfac,*lrrfac,*lrrfac) : 1; - pcounts[index] = num_ppc*r; - } + int r; + if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { + r = AMREX_D_TERM(lrrfac,*lrrfac,*lrrfac); } else { - pcounts[index] = num_ppc; + r = 1; } + pcounts[index] = num_ppc*r; } #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) amrex::ignore_unused(k); @@ -1497,8 +1496,10 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) Gpu::DeviceVector offset(overlap_box.numPts()); auto pcounts = counts.data(); int lrrfac = rrfac; - int lrefine_injection = refine_injection; - Box lfine_box = fine_injection_box; + Box fine_overlap_box; // default Box is NOT ok(). + if (refine_injection) { + fine_overlap_box = overlap_box & amrex::shift(fine_injection_box, -shifted); + } amrex::ParallelForRNG(overlap_box, [=] AMREX_GPU_DEVICE (int i, int j, int k, amrex::RandomEngine const& engine) noexcept { IntVect iv(AMREX_D_DECL(i, j, k)); @@ -1510,16 +1511,13 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) if (inj_pos->overlapsWith(lo, hi)) { auto index = overlap_box.index(iv); - if (lrefine_injection) { - Box fine_overlap_box = overlap_box & amrex::shift(lfine_box, -shifted); - if (fine_overlap_box.ok()) { - int r = (fine_overlap_box.contains(iv)) ? - AMREX_D_TERM(lrrfac,*lrrfac,*lrrfac) : 1; - pcounts[index] = num_ppc_int*r; - } + int r; + if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { + r = AMREX_D_TERM(lrrfac,*lrrfac,*lrrfac); } else { - pcounts[index] = num_ppc_int; + r = 1; } + pcounts[index] = num_ppc_int*r; } #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) amrex::ignore_unused(k); From 082bc8994d62e1dc06e9918483ca42ac0a6306e5 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 1 Sep 2022 13:15:18 -0700 Subject: [PATCH 0039/1346] Spack Envs: Debian (#3360) * Spack Envs: Debian Add default compilers on Debian to our Spack environments. * Debian: have `lsb-release` installed See: https://github.com/spack/spack/pull/32479 --- Docs/source/install/dependencies.rst | 2 +- Tools/machines/desktop/spack-debian-cuda.yaml | 1 + .../machines/desktop/spack-debian-openmp.yaml | 1 + Tools/machines/desktop/spack-debian-rocm.yaml | 1 + Tools/machines/desktop/spack-ubuntu-cuda.yaml | 42 +++++++++++++++++++ .../machines/desktop/spack-ubuntu-openmp.yaml | 41 ++++++++++++++++++ Tools/machines/desktop/spack-ubuntu-rocm.yaml | 41 ++++++++++++++++++ 7 files changed, 128 insertions(+), 1 deletion(-) create mode 120000 Tools/machines/desktop/spack-debian-cuda.yaml create mode 120000 Tools/machines/desktop/spack-debian-openmp.yaml create mode 120000 Tools/machines/desktop/spack-debian-rocm.yaml diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index 86f10cce863..aa9086c3de1 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -84,7 +84,7 @@ Spack (macOS/Linux) First, download a `Spack desktop development environment `__ of your choice. For most desktop development, pick the OpenMP environment for CPUs unless you have a supported GPU. -* **Ubuntu** Linux: +* **Debian/Ubuntu** Linux: * OpenMP: ``system=ubuntu; compute=openmp`` (CPUs) * CUDA: ``system=ubuntu; compute=cuda`` (Nvidia GPUs) diff --git a/Tools/machines/desktop/spack-debian-cuda.yaml b/Tools/machines/desktop/spack-debian-cuda.yaml new file mode 120000 index 00000000000..f396febb4df --- /dev/null +++ b/Tools/machines/desktop/spack-debian-cuda.yaml @@ -0,0 +1 @@ +spack-ubuntu-cuda.yaml \ No newline at end of file diff --git a/Tools/machines/desktop/spack-debian-openmp.yaml b/Tools/machines/desktop/spack-debian-openmp.yaml new file mode 120000 index 00000000000..16868de7ebe --- /dev/null +++ b/Tools/machines/desktop/spack-debian-openmp.yaml @@ -0,0 +1 @@ +spack-ubuntu-openmp.yaml \ No newline at end of file diff --git a/Tools/machines/desktop/spack-debian-rocm.yaml b/Tools/machines/desktop/spack-debian-rocm.yaml new file mode 120000 index 00000000000..59dee26a055 --- /dev/null +++ b/Tools/machines/desktop/spack-debian-rocm.yaml @@ -0,0 +1 @@ +spack-ubuntu-rocm.yaml \ No newline at end of file diff --git a/Tools/machines/desktop/spack-ubuntu-cuda.yaml b/Tools/machines/desktop/spack-ubuntu-cuda.yaml index 12ce071a6df..51a0ea88fea 100644 --- a/Tools/machines/desktop/spack-ubuntu-cuda.yaml +++ b/Tools/machines/desktop/spack-ubuntu-cuda.yaml @@ -85,6 +85,48 @@ spack: modules: [] environment: {} extra_rpaths: [] + # Debian + - compiler: + # g++-10 gcc-10 gfortran-10 (or 11) for CUDA 11.7.0 + spec: gcc@10.4.0 + paths: + cc: /usr/bin/gcc-10 + cxx: /usr/bin/g++-10 + f77: /usr/bin/gfortran-10 + fc: /usr/bin/gfortran-10 + flags: {} + # unstable/sid + operating_system: debianunstable + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] + - compiler: + spec: gcc@10.2.1 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + operating_system: debian11 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] + - compiler: + spec: gcc@8.3.0 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + operating_system: debian10 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] # binary caches mirrors: diff --git a/Tools/machines/desktop/spack-ubuntu-openmp.yaml b/Tools/machines/desktop/spack-ubuntu-openmp.yaml index c66c2f5c4cd..6bb15446e7f 100644 --- a/Tools/machines/desktop/spack-ubuntu-openmp.yaml +++ b/Tools/machines/desktop/spack-ubuntu-openmp.yaml @@ -82,6 +82,47 @@ spack: modules: [] environment: {} extra_rpaths: [] + # Debian + - compiler: + spec: gcc@12.2.0 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + # unstable/sid + operating_system: debianunstable + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] + - compiler: + spec: gcc@10.2.1 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + operating_system: debian11 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] + - compiler: + spec: gcc@8.3.0 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + operating_system: debian10 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] # binary caches mirrors: diff --git a/Tools/machines/desktop/spack-ubuntu-rocm.yaml b/Tools/machines/desktop/spack-ubuntu-rocm.yaml index 2af0da1ebec..45c9b0f776e 100644 --- a/Tools/machines/desktop/spack-ubuntu-rocm.yaml +++ b/Tools/machines/desktop/spack-ubuntu-rocm.yaml @@ -86,6 +86,47 @@ spack: modules: [] environment: {} extra_rpaths: [] + # Debian + - compiler: + spec: gcc@12.2.0 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + # unstable/sid + operating_system: debianunstable + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] + - compiler: + spec: gcc@10.2.1 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + operating_system: debian11 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] + - compiler: + spec: gcc@8.3.0 + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + flags: {} + operating_system: debian10 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] # binary caches mirrors: From 1c49cd4bf7180308cd37e65f21e6a0bf77a56dbb Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 1 Sep 2022 15:27:44 -0700 Subject: [PATCH 0040/1346] Perlmutter: Specify "regular" QOS (#3362) * Perlmutter: Specify "regular" QOS Regular seems to be not the default (anymore), which blocks submissions >1hr. * Clean up --- .../perlmutter-nersc/perlmutter.sbatch | 21 +------------------ 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/Tools/machines/perlmutter-nersc/perlmutter.sbatch b/Tools/machines/perlmutter-nersc/perlmutter.sbatch index 65777f30441..93e79f1516b 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter.sbatch +++ b/Tools/machines/perlmutter-nersc/perlmutter.sbatch @@ -11,8 +11,7 @@ #SBATCH -J WarpX # note: must end on _g #SBATCH -A -# for m3906_g LBNL/AMP users: for large runs, comment in -##SBATCH -q early_science +#SBATCH -q regular #SBATCH -C gpu #SBATCH -c 32 #SBATCH --ntasks-per-node=4 @@ -20,24 +19,6 @@ #SBATCH -o WarpX.o%j #SBATCH -e WarpX.e%j -# ============ -# -N = nodes -# -n = tasks (MPI ranks, usually = G) -# -G = GPUs (full Perlmutter node, 4) -# -c = CPU per task (128 total threads on CPU, 32 per GPU) -# -# --ntasks-per-node= number of tasks (MPI ranks) per node (full node, 4) -# --gpus-per-task= number of GPUs per task (MPI rank) (full node, 4) -# --gpus-per-node= number of GPUs per node (full node, 4) -# -# --gpu-bind=single:1 sets only one GPU to be visible to each MPI rank -# (quiets AMReX init warnings) -# -# Recommend using --ntasks-per-node=4, --gpus-per-task=1 and --gpu-bind=single:1, -# as they are fixed values and allow for easy scaling with less adjustments. -# -# ============ - # GPU-aware MPI export MPICH_GPU_SUPPORT_ENABLED=1 From 09940d721ac2cbb6173f7a3855a969f1f6e98bce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ne=C3=AFl=20Zaim?= <49716072+NeilZaim@users.noreply.github.com> Date: Fri, 2 Sep 2022 18:06:32 +0200 Subject: [PATCH 0041/1346] Initialize user defined runtime attributes in AddPlasmaFlux (#3359) --- .../Particles/PhysicalParticleContainer.cpp | 34 +++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 981dce2113c..97f890f33d2 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -1392,8 +1392,10 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) InjectorPosition* inj_pos = plasma_injector->getInjectorPosition(); InjectorDensity* inj_rho = plasma_injector->getInjectorDensity(); InjectorMomentum* inj_mom = plasma_injector->getInjectorMomentum(); - Real density_min = plasma_injector->density_min; - Real density_max = plasma_injector->density_max; + const amrex::Real density_min = plasma_injector->density_min; + const amrex::Real density_max = plasma_injector->density_max; + constexpr int level_zero = 0; + const amrex::Real t = WarpX::GetInstance().gett_new(level_zero); #ifdef WARPX_DIM_RZ const int nmodes = WarpX::n_rz_azimuthal_modes; @@ -1558,6 +1560,26 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) pa[ia] = soa.GetRealData(ia).data() + old_size; } + // user-defined integer and real attributes + const int n_user_int_attribs = m_user_int_attribs.size(); + const int n_user_real_attribs = m_user_real_attribs.size(); + amrex::Gpu::DeviceVector pa_user_int(n_user_int_attribs); + amrex::Gpu::DeviceVector pa_user_real(n_user_real_attribs); + amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > user_int_attrib_parserexec(n_user_int_attribs); + amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > user_real_attrib_parserexec(n_user_real_attribs); + for (int ia = 0; ia < n_user_int_attribs; ++ia) { + pa_user_int[ia] = soa.GetIntData(particle_icomps[m_user_int_attribs[ia]]).data() + old_size; + user_int_attrib_parserexec[ia] = m_user_int_attrib_parser[ia]->compile<7>(); + } + for (int ia = 0; ia < n_user_real_attribs; ++ia) { + pa_user_real[ia] = soa.GetRealData(particle_comps[m_user_real_attribs[ia]]).data() + old_size; + user_real_attrib_parserexec[ia] = m_user_real_attrib_parser[ia]->compile<7>(); + } + int** pa_user_int_data = pa_user_int.dataPtr(); + ParticleReal** pa_user_real_data = pa_user_real.dataPtr(); + amrex::ParserExecutor<7> const* user_int_parserexec_data = user_int_attrib_parserexec.dataPtr(); + amrex::ParserExecutor<7> const* user_real_parserexec_data = user_real_attrib_parserexec.dataPtr(); + int* p_ion_level = nullptr; if (do_field_ionization) { p_ion_level = soa.GetIntData(particle_icomps["ionizationLevel"]).data() + old_size; @@ -1719,6 +1741,14 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) p_optical_depth_BW[ip] = breit_wheeler_get_opt(engine); } #endif + // Initialize user-defined integers with user-defined parser + for (int ia = 0; ia < n_user_int_attribs; ++ia) { + pa_user_int_data[ia][ip] = static_cast(user_int_parserexec_data[ia](pos.x, pos.y, pos.z, u.x, u.y, u.z, t)); + } + // Initialize user-defined real attributes with user-defined parser + for (int ia = 0; ia < n_user_real_attribs; ++ia) { + pa_user_real_data[ia][ip] = user_real_parserexec_data[ia](pos.x, pos.y, pos.z, u.x, u.y, u.z, t); + } Real weight = dens * scale_fac * dt; #ifdef WARPX_DIM_RZ From d73bfa1f13efe9efcebc97f823aca8c96ac1f723 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 2 Sep 2022 18:08:06 +0200 Subject: [PATCH 0042/1346] Remove some magic numbers (#3355) * remove some magic numbers * fixed unreachable code issue * fixed issue with unreachable code * fixed issue with unreachable code * remove type traits * revert one change in Gaussian Laser * improved ParticleExtrema * fix bug --- .../ReducedDiags/ParticleExtrema.H | 11 ++ .../ReducedDiags/ParticleExtrema.cpp | 142 ++++++++---------- .../LaserProfileGaussian.cpp | 1 + .../LaserProfilesImpl/LaserProfileHarris.cpp | 17 ++- .../BackgroundStopping/BackgroundStopping.cpp | 5 +- .../BreitWheelerEngineWrapper.cpp | 23 ++- .../QEDInternals/QuantumSyncEngineWrapper.cpp | 26 +++- Source/Particles/Gather/GetExternalFields.cpp | 14 +- Source/Particles/ParticleBoundaryBuffer.cpp | 65 +++++--- 9 files changed, 178 insertions(+), 126 deletions(-) diff --git a/Source/Diagnostics/ReducedDiags/ParticleExtrema.H b/Source/Diagnostics/ReducedDiags/ParticleExtrema.H index e58138dd8f4..b6bfb7c5e22 100644 --- a/Source/Diagnostics/ReducedDiags/ParticleExtrema.H +++ b/Source/Diagnostics/ReducedDiags/ParticleExtrema.H @@ -10,6 +10,7 @@ #include "ReducedDiags.H" +#include #include /** @@ -36,6 +37,16 @@ public: */ void ComputeDiags(int step) override final; +private: + /// auxiliary structure to store headers and indices of the reduced diagnostics + struct aux_header_index + { + std::string header; + int idx; + }; + + /// map to store header texts and indices of the reduced diagnostics + std::map m_headers_indices; }; #endif diff --git a/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp b/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp index 3658a8c16d9..ed2d2ea1389 100644 --- a/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp +++ b/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp @@ -82,18 +82,45 @@ ParticleExtrema::ParticleExtrema (std::string rd_name) // get WarpXParticleContainer class object auto & myspc = mypc.GetParticleContainer(i_s); - if (myspc.DoQED()) - { - // resize data array for QED species - const int num_quantities = 18; - m_data.resize(num_quantities,0.0); - } else - { - // resize data array for regular species - const int num_quantities = 16; - m_data.resize(num_quantities,0.0); + auto all_diag_names = std::vector {}; + auto add_diag = [&,c=0] ( + const std::string& name, const std::string& header) mutable { + m_headers_indices[name] = aux_header_index{header, c++}; + all_diag_names.push_back(name); + }; + + add_diag("xmin", "xmin(m)"); + add_diag("xmax", "xmax(m)"); + add_diag("ymin", "ymin(m)"); + add_diag("ymax", "ymax(m)"); + add_diag("zmin", "zmin(m)"); + add_diag("zmax", "zmax(m)"); + add_diag("pxmin", "pxmin(kg*m/s)"); + add_diag("pxmax", "pxmax(kg*m/s)"); + add_diag("pymin", "pymin(kg*m/s)"); + add_diag("pymax", "pymax(kg*m/s)"); + add_diag("pzmin", "pzmin(kg*m/s)"); + add_diag("pzmax", "pzmax(kg*m/s)"); + add_diag("gmin", "gmin()"); + add_diag("gmax", "gmax()"); + +#if (defined WARPX_DIM_3D) + add_diag("wmin", "wmin()"); + add_diag("wmax", "wmax()"); +#elif (defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ)) + add_diag("wmin", "wmin(1/m)"); + add_diag("wmax", "wmax(1/m)"); +#else + add_diag("wmin", "wmin(1/m^2)"); + add_diag("wmax", "wmax(1/m^2)"); +#endif + if (myspc.DoQED()){ + add_diag("chimin", "chimin()"); + add_diag("chimax", "chimax()"); } + m_data.resize(all_diag_names.size()); + if (ParallelDescriptor::IOProcessor()) { if ( m_IsNotRestart ) @@ -103,55 +130,14 @@ ParticleExtrema::ParticleExtrema (std::string rd_name) ofs.open(m_path + m_rd_name + "." + m_extension, std::ofstream::out | std::ofstream::app); // write header row - int c = 0; + int off = 0; ofs << "#"; - ofs << "[" << c++ << "]step()"; - ofs << m_sep; - ofs << "[" << c++ << "]time(s)"; - ofs << m_sep; - ofs << "[" << c++ << "]xmin(m)"; - ofs << m_sep; - ofs << "[" << c++ << "]xmax(m)"; - ofs << m_sep; - ofs << "[" << c++ << "]ymin(m)"; - ofs << m_sep; - ofs << "[" << c++ << "]ymax(m)"; - ofs << m_sep; - ofs << "[" << c++ << "]zmin(m)"; - ofs << m_sep; - ofs << "[" << c++ << "]zmax(m)"; - ofs << m_sep; - ofs << "[" << c++ << "]pxmin(kg*m/s)"; + ofs << "[" << off++ << "]step()"; ofs << m_sep; - ofs << "[" << c++ << "]pxmax(kg*m/s)"; - ofs << m_sep; - ofs << "[" << c++ << "]pymin(kg*m/s)"; - ofs << m_sep; - ofs << "[" << c++ << "]pymax(kg*m/s)"; - ofs << m_sep; - ofs << "[" << c++ << "]pzmin(kg*m/s)"; - ofs << m_sep; - ofs << "[" << c++ << "]pzmax(kg*m/s)"; - ofs << m_sep; - ofs << "[" << c++ << "]gmin()"; - ofs << m_sep; - ofs << "[" << c++ << "]gmax()"; - ofs << m_sep; -#if (defined WARPX_DIM_3D) - ofs << "[" << c++ << "]wmin()"; - ofs << m_sep; - ofs << "[" << c++ << "]wmax()"; -#else - ofs << "[" << c++ << "]wmin(1/m)"; - ofs << m_sep; - ofs << "[" << c++ << "]wmax(1/m)"; -#endif - if (myspc.DoQED()) - { - ofs << m_sep; - ofs << "[" << c++ << "]chimin()"; - ofs << m_sep; - ofs << "[" << c++ << "]chimax()"; + ofs << "[" << off++ << "]time(s)"; + for (const auto& name : all_diag_names){ + const auto& el = m_headers_indices[name]; + ofs << m_sep << "[" << el.idx + off << "]" << el.header; } ofs << std::endl; // close file @@ -424,7 +410,6 @@ void ParticleExtrema::ComputeDiags (int step) // declare external fields const int offset = 0; const auto getExternalEB = GetExternalEBField(pti, offset); - // define variables in preparation for field gathering amrex::Box box = pti.tilebox(); box.grow(ngEB); @@ -487,27 +472,32 @@ void ParticleExtrema::ComputeDiags (int step) ParallelDescriptor::ReduceRealMax(chimax_f); } #endif - m_data[0] = xmin; - m_data[1] = xmax; - m_data[2] = ymin; - m_data[3] = ymax; - m_data[4] = zmin; - m_data[5] = zmax; - m_data[6] = uxmin*m; - m_data[7] = uxmax*m; - m_data[8] = uymin*m; - m_data[9] = uymax*m; - m_data[10] = uzmin*m; - m_data[11] = uzmax*m; - m_data[12] = gmin; - m_data[13] = gmax; - m_data[14] = wmin; - m_data[15] = wmax; + + const auto get_idx = [&](const std::string& name){ + return m_headers_indices.at(name).idx; + }; + + m_data[get_idx("xmin")] = xmin; + m_data[get_idx("xmax")] = xmax; + m_data[get_idx("ymin")] = ymin; + m_data[get_idx("ymax")] = ymax; + m_data[get_idx("zmin")] = zmin; + m_data[get_idx("zmax")] = zmax; + m_data[get_idx("pxmin")] = uxmin*m; + m_data[get_idx("pxmax")] = uxmax*m; + m_data[get_idx("pymin")] = uymin*m; + m_data[get_idx("pymax")] = uymax*m; + m_data[get_idx("pzmin")] = uzmin*m; + m_data[get_idx("pzmax")] = uzmax*m; + m_data[get_idx("gmin")] = gmin; + m_data[get_idx("gmax")] = gmax; + m_data[get_idx("wmin")] = wmin; + m_data[get_idx("wmax")] = wmax; #if (defined WARPX_QED) if (myspc.DoQED()) { - m_data[16] = chimin_f; - m_data[17] = chimax_f; + m_data[get_idx("chimin")] = chimin_f; + m_data[get_idx("chimax")] = chimax_f; } #endif } diff --git a/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp b/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp index fc7fc59aa19..33de9c1dabb 100644 --- a/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp +++ b/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp @@ -63,6 +63,7 @@ WarpXLaserProfiles::GaussianLaserProfile::init ( m_common_params.nvec.begin(), m_common_params.nvec.end(), m_params.stc_direction.begin(), 0.0); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(std::abs(dp2) < 1.0e-14, "stc_direction is not perpendicular to the laser plane vector"); diff --git a/Source/Laser/LaserProfilesImpl/LaserProfileHarris.cpp b/Source/Laser/LaserProfilesImpl/LaserProfileHarris.cpp index 1440222c5cc..6ea8070dbd9 100644 --- a/Source/Laser/LaserProfilesImpl/LaserProfileHarris.cpp +++ b/Source/Laser/LaserProfilesImpl/LaserProfileHarris.cpp @@ -69,10 +69,21 @@ WarpXLaserProfiles::HarrisLaserProfile::fill_amplitude ( // time envelope is given by the Harris function Real time_envelope = 0.; + + constexpr auto norm = 1._rt/32._rt; + constexpr auto c_0 = 10._rt; + constexpr auto c_1 = -15._rt; + constexpr auto c_2 = 6._rt; + constexpr auto c_3 = -1._rt; + constexpr auto a_1 = 1._rt; + constexpr auto a_2 = 2._rt; + constexpr auto a_3 = 3._rt; + if (t < m_params.duration) - time_envelope = 1._rt/32._rt * (10._rt - 15._rt*std::cos(arg_env) + - 6._rt*std::cos(2._rt*arg_env) - - std::cos(3._rt*arg_env)); + time_envelope = norm * (c_0 + + c_1*std::cos(a_1*arg_env) + + c_2*std::cos(a_2*arg_env) + + c_3*std::cos(a_3*arg_env)); // Copy member variables to tmp copies for GPU runs. const auto tmp_e_max = m_common_params.e_max; diff --git a/Source/Particles/Collision/BackgroundStopping/BackgroundStopping.cpp b/Source/Particles/Collision/BackgroundStopping/BackgroundStopping.cpp index 939f098ee72..dbfc1b1d40d 100644 --- a/Source/Particles/Collision/BackgroundStopping/BackgroundStopping.cpp +++ b/Source/Particles/Collision/BackgroundStopping/BackgroundStopping.cpp @@ -59,8 +59,9 @@ BackgroundStopping::BackgroundStopping (std::string const collision_name) "For background stopping, the background temperature must be specified."); } - m_background_density_func = m_background_density_parser.compile<4>(); - m_background_temperature_func = m_background_temperature_parser.compile<4>(); + constexpr auto num_parser_args = 4; + m_background_density_func = m_background_density_parser.compile(); + m_background_temperature_func = m_background_temperature_parser.compile(); if (m_background_type == BackgroundStoppingType::ELECTRONS) { m_background_mass = PhysConst::m_e; diff --git a/Source/Particles/ElementaryProcess/QEDInternals/BreitWheelerEngineWrapper.cpp b/Source/Particles/ElementaryProcess/QEDInternals/BreitWheelerEngineWrapper.cpp index 59503d2ae95..f7e32dcbbde 100644 --- a/Source/Particles/ElementaryProcess/QEDInternals/BreitWheelerEngineWrapper.cpp +++ b/Source/Particles/ElementaryProcess/QEDInternals/BreitWheelerEngineWrapper.cpp @@ -159,10 +159,14 @@ void BreitWheelerEngine::compute_lookup_tables ( void BreitWheelerEngine::init_builtin_dndt_table() { + constexpr auto default_chi_phot_min = 0.02_prt; + constexpr auto default_chi_phot_max = 200.0_prt; + constexpr auto default_chi_phot_how_many = 64; + BW_dndt_table_params dndt_params; - dndt_params.chi_phot_min = 0.02_prt; - dndt_params.chi_phot_max = 200.0_prt; - dndt_params.chi_phot_how_many = 64; + dndt_params.chi_phot_min = default_chi_phot_min; + dndt_params.chi_phot_max = default_chi_phot_max; + dndt_params.chi_phot_how_many = default_chi_phot_how_many; const auto vals = amrex::Gpu::DeviceVector{ -1.34808e+02_prt, -1.16674e+02_prt, -1.01006e+02_prt, -8.74694e+01_prt, @@ -189,11 +193,16 @@ void BreitWheelerEngine::init_builtin_dndt_table() void BreitWheelerEngine::init_builtin_pair_prod_table() { + constexpr auto default_chi_phot_min = 0.02_prt; + constexpr auto default_chi_phot_max = 200.0_prt; + constexpr auto default_chi_phot_how_many = 64; + constexpr auto default_frac_how_many = 64; + BW_pair_prod_table_params pair_prod_params; - pair_prod_params.chi_phot_min = 0.02_prt; - pair_prod_params.chi_phot_max = 200.0_prt; - pair_prod_params.chi_phot_how_many = 64; - pair_prod_params.frac_how_many = 64; + pair_prod_params.chi_phot_min = default_chi_phot_min; + pair_prod_params.chi_phot_max = default_chi_phot_max; + pair_prod_params.chi_phot_how_many = default_chi_phot_how_many; + pair_prod_params.frac_how_many = default_frac_how_many; const auto vals = amrex::Gpu::DeviceVector{ 0.00000e+00_prt, 0.00000e+00_prt, 0.00000e+00_prt, 0.00000e+00_prt, diff --git a/Source/Particles/ElementaryProcess/QEDInternals/QuantumSyncEngineWrapper.cpp b/Source/Particles/ElementaryProcess/QEDInternals/QuantumSyncEngineWrapper.cpp index e93d757a3b3..c7b39a6fb4f 100644 --- a/Source/Particles/ElementaryProcess/QEDInternals/QuantumSyncEngineWrapper.cpp +++ b/Source/Particles/ElementaryProcess/QEDInternals/QuantumSyncEngineWrapper.cpp @@ -158,10 +158,14 @@ void QuantumSynchrotronEngine::compute_lookup_tables ( void QuantumSynchrotronEngine::init_builtin_dndt_table() { + constexpr auto default_chi_part_min = 1.0e-3_prt; + constexpr auto default_chi_part_max = 200.0_prt; + constexpr auto default_chi_part_how_many = 64; + QS_dndt_table_params dndt_params; - dndt_params.chi_part_min = 1.0e-3_prt; - dndt_params.chi_part_max = 200.0_prt; - dndt_params.chi_part_how_many = 64; + dndt_params.chi_part_min = default_chi_part_min; + dndt_params.chi_part_max = default_chi_part_max; + dndt_params.chi_part_how_many = default_chi_part_how_many; const auto vals = amrex::Gpu::DeviceVector{ @@ -190,12 +194,18 @@ void QuantumSynchrotronEngine::init_builtin_dndt_table() void QuantumSynchrotronEngine::init_builtin_phot_em_table() { + constexpr auto default_chi_part_min = 1.0e-3_prt; + constexpr auto default_chi_part_max = 200.0_prt; + constexpr auto default_frac_min = 1.0e-12_prt; + constexpr auto default_chi_part_how_many = 64; + constexpr auto default_frac_how_many = 64; + QS_phot_em_table_params phot_em_params; - phot_em_params.chi_part_min = 1.0e-3_prt; - phot_em_params.chi_part_max = 200.0_prt; - phot_em_params.frac_min = 1.0e-12_prt; - phot_em_params.chi_part_how_many = 64; - phot_em_params.frac_how_many = 64; + phot_em_params.chi_part_min = default_chi_part_min; + phot_em_params.chi_part_max = default_chi_part_max; + phot_em_params.frac_min = default_frac_min; + phot_em_params.chi_part_how_many = default_chi_part_how_many; + phot_em_params.frac_how_many = default_frac_how_many; const auto vals = amrex::Gpu::DeviceVector{ diff --git a/Source/Particles/Gather/GetExternalFields.cpp b/Source/Particles/Gather/GetExternalFields.cpp index 6f5c4cea2cc..9bf2c798188 100644 --- a/Source/Particles/Gather/GetExternalFields.cpp +++ b/Source/Particles/Gather/GetExternalFields.cpp @@ -53,17 +53,19 @@ GetExternalEBField::GetExternalEBField (const WarpXParIter& a_pti, int a_offset) if (mypc.m_E_ext_particle_s == "parse_e_ext_particle_function") { m_Etype = ExternalFieldInitType::Parser; - m_Exfield_partparser = mypc.m_Ex_particle_parser->compile<4>(); - m_Eyfield_partparser = mypc.m_Ey_particle_parser->compile<4>(); - m_Ezfield_partparser = mypc.m_Ez_particle_parser->compile<4>(); + constexpr auto num_arguments = 4; //x,y,z,t + m_Exfield_partparser = mypc.m_Ex_particle_parser->compile(); + m_Eyfield_partparser = mypc.m_Ey_particle_parser->compile(); + m_Ezfield_partparser = mypc.m_Ez_particle_parser->compile(); } if (mypc.m_B_ext_particle_s == "parse_b_ext_particle_function") { m_Btype = ExternalFieldInitType::Parser; - m_Bxfield_partparser = mypc.m_Bx_particle_parser->compile<4>(); - m_Byfield_partparser = mypc.m_By_particle_parser->compile<4>(); - m_Bzfield_partparser = mypc.m_Bz_particle_parser->compile<4>(); + constexpr auto num_arguments = 4; //x,y,z,t + m_Bxfield_partparser = mypc.m_Bx_particle_parser->compile(); + m_Byfield_partparser = mypc.m_By_particle_parser->compile(); + m_Bzfield_partparser = mypc.m_Bz_particle_parser->compile(); } if (mypc.m_E_ext_particle_s == "repeated_plasma_lens" || diff --git a/Source/Particles/ParticleBoundaryBuffer.cpp b/Source/Particles/ParticleBoundaryBuffer.cpp index 7877e33d3fc..f1ee1059375 100644 --- a/Source/Particles/ParticleBoundaryBuffer.cpp +++ b/Source/Particles/ParticleBoundaryBuffer.cpp @@ -74,24 +74,41 @@ ParticleBoundaryBuffer::ParticleBoundaryBuffer () m_do_boundary_buffer[i].resize(numSpecies(), 0); } +#if defined(WARPX_DIM_1D_Z) + constexpr auto idx_zlo = 0; + constexpr auto idx_zhi = 1; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + constexpr auto idx_xlo = 0; + constexpr auto idx_xhi = 1; + constexpr auto idx_zlo = 2; + constexpr auto idx_zhi = 3; +#else + constexpr auto idx_xlo = 0; + constexpr auto idx_xhi = 1; + constexpr auto idx_ylo = 2; + constexpr auto idx_yhi = 3; + constexpr auto idx_zlo = 4; + constexpr auto idx_zhi = 5; +#endif + for (int ispecies = 0; ispecies < numSpecies(); ++ispecies) { amrex::ParmParse pp_species(getSpeciesNames()[ispecies]); #if defined(WARPX_DIM_1D_Z) - pp_species.query("save_particles_at_zlo", m_do_boundary_buffer[0][ispecies]); - pp_species.query("save_particles_at_zhi", m_do_boundary_buffer[1][ispecies]); + pp_species.query("save_particles_at_zlo", m_do_boundary_buffer[idx_zlo][ispecies]); + pp_species.query("save_particles_at_zhi", m_do_boundary_buffer[idx_zhi][ispecies]); #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - pp_species.query("save_particles_at_xlo", m_do_boundary_buffer[0][ispecies]); - pp_species.query("save_particles_at_xhi", m_do_boundary_buffer[1][ispecies]); - pp_species.query("save_particles_at_zlo", m_do_boundary_buffer[2][ispecies]); - pp_species.query("save_particles_at_zhi", m_do_boundary_buffer[3][ispecies]); + pp_species.query("save_particles_at_xlo", m_do_boundary_buffer[idx_xlo][ispecies]); + pp_species.query("save_particles_at_xhi", m_do_boundary_buffer[idx_xhi][ispecies]); + pp_species.query("save_particles_at_zlo", m_do_boundary_buffer[idx_zlo][ispecies]); + pp_species.query("save_particles_at_zhi", m_do_boundary_buffer[idx_zhi][ispecies]); #else - pp_species.query("save_particles_at_xlo", m_do_boundary_buffer[0][ispecies]); - pp_species.query("save_particles_at_xhi", m_do_boundary_buffer[1][ispecies]); - pp_species.query("save_particles_at_ylo", m_do_boundary_buffer[2][ispecies]); - pp_species.query("save_particles_at_yhi", m_do_boundary_buffer[3][ispecies]); - pp_species.query("save_particles_at_zlo", m_do_boundary_buffer[4][ispecies]); - pp_species.query("save_particles_at_zhi", m_do_boundary_buffer[5][ispecies]); + pp_species.query("save_particles_at_xlo", m_do_boundary_buffer[idx_xlo][ispecies]); + pp_species.query("save_particles_at_xhi", m_do_boundary_buffer[idx_xhi][ispecies]); + pp_species.query("save_particles_at_ylo", m_do_boundary_buffer[idx_ylo][ispecies]); + pp_species.query("save_particles_at_yhi", m_do_boundary_buffer[idx_yhi][ispecies]); + pp_species.query("save_particles_at_zlo", m_do_boundary_buffer[idx_zlo][ispecies]); + pp_species.query("save_particles_at_zhi", m_do_boundary_buffer[idx_zhi][ispecies]); #endif #ifdef AMREX_USE_EB pp_species.query("save_particles_at_eb", m_do_boundary_buffer[AMREX_SPACEDIM*2][ispecies]); @@ -103,20 +120,20 @@ ParticleBoundaryBuffer::ParticleBoundaryBuffer () } #if defined(WARPX_DIM_1D_Z) - m_boundary_names[0] = "zlo"; - m_boundary_names[1] = "zhi"; + m_boundary_names[idx_zlo] = "zlo"; + m_boundary_names[idx_zhi] = "zhi"; #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - m_boundary_names[0] = "xlo"; - m_boundary_names[1] = "xhi"; - m_boundary_names[2] = "zlo"; - m_boundary_names[3] = "zhi"; + m_boundary_names[idx_xlo] = "xlo"; + m_boundary_names[idx_xhi] = "xhi"; + m_boundary_names[idx_zlo] = "zlo"; + m_boundary_names[idx_zhi] = "zhi"; #else - m_boundary_names[0] = "xlo"; - m_boundary_names[1] = "xhi"; - m_boundary_names[2] = "ylo"; - m_boundary_names[3] = "yhi"; - m_boundary_names[4] = "zlo"; - m_boundary_names[5] = "zhi"; + m_boundary_names[idx_xlo] = "xlo"; + m_boundary_names[idx_xhi] = "xhi"; + m_boundary_names[idx_ylo] = "ylo"; + m_boundary_names[idx_yhi] = "yhi"; + m_boundary_names[idx_zlo] = "zlo"; + m_boundary_names[idx_zhi] = "zhi"; #endif #ifdef AMREX_USE_EB m_boundary_names[AMREX_SPACEDIM*2] = "eb"; From 2c30427121b5bccc5dd69ac9c87e4279af68ba60 Mon Sep 17 00:00:00 2001 From: Marco Garten Date: Fri, 2 Sep 2022 10:26:50 -0700 Subject: [PATCH 0043/1346] Docs: abort_on_unused_inputs (#3364) * Document abort_on_unused_inputs * Fix default Co-authored-by: Axel Huebl --- Docs/source/usage/parameters.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index b81b088c6b8..6d293b52f91 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -139,6 +139,10 @@ Overall simulation parameters Note that even with this set to ``1`` WarpX will not catch all out-of-memory events yet when operating close to maximum device memory. `Please also see the documentation in AMReX `_. +* ``amrex.abort_on_unused_inputs`` (``0`` or ``1``; default is ``0`` for false) + When set to ``1``, this option causes simulation to fail *after* its completion if there were unused parameters. + It is mainly intended for continuous integration and automated testing to check that all tests and inputs are adapted to API changes. + Signal Handling ^^^^^^^^^^^^^^^ From c98605eec5725d43d1a7b591f36ec8aca35f37ca Mon Sep 17 00:00:00 2001 From: Ryan Sandberg Date: Fri, 2 Sep 2022 10:53:19 -0700 Subject: [PATCH 0044/1346] add boosted frame notes to usage FAQ (#3245) * add boosted frame notes to usage FAQ * Update faq.rst * Update Docs/source/usage/faq.rst * adjust some wording * rearrange * include minimum boosted frame numerics * be clear that boosted numerics are tricky * Fix extra newlines Co-authored-by: Axel Huebl Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Docs/source/usage/faq.rst | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/Docs/source/usage/faq.rst b/Docs/source/usage/faq.rst index 3c40f6a939a..cdeedd5f1bf 100644 --- a/Docs/source/usage/faq.rst +++ b/Docs/source/usage/faq.rst @@ -27,6 +27,30 @@ We change the default in ``cmake/dependencies/AMReX.cmake``. Note that the tiny profiler adds literally no overhead to the simulation runtime, thus we enable it by default. +What design principles should I keep in mind when creating an input file? +------------------------------------------------------------------------- + +Leave a cushion between lasers, particles, and the edge of computational domain. +The laser antenna and plasma species ``zmin`` can be less than or greater than the ``geometry.prob_hi``, +but not exactly equal. + + +What do I need to know about using the boosted frame? +----------------------------------------------------- + +The input deck can be designed in the lab frame and little modification to the physical set-up is needed -- +most of the work is done internally. +Here are a few practical items to assist in designing boosted frame simulations: + +- Ions must be explicitly included +- Best practice is to separate counter-propagating objects; things moving to the right should start with :math:`z <= 0` and things stationary or moving to the left (moving to the left in the boosted frame) should start with :math:`z > 0` +- Don't forget the general design principles listed above +- The boosted frame simulation begins at boosted time :math:`t'=0` +- Numerics and algorithms need to be adjusted, as there are numerical instabilities that arise in the boosted frame. For example, setting ``particles.use_fdtd_nci_corr=1`` for an FDTD simulation or setting ``psatd.use_default_v_galilean=1`` for a PSATD simulation. Be careful as this is overly simplistic and these options will not work in all cases. Please see the :ref:`input parameters documentation ` and the :ref:`examples ` for more information + +An in-depth discussion of the boosted frame is provided in the :ref:`moving window and optimal Lorentz boosted frame ` section. + + What kinds of RZ output do you support? --------------------------------------- From 6ae2472850252835640fd427b1d543ef3a3d13c9 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 2 Sep 2022 11:08:32 -0700 Subject: [PATCH 0045/1346] Release 22.09 (#3365) * AMReX: 22.09 * PICSAR: 22.09 * WarpX: 22.09 --- .github/workflows/cuda.yml | 2 +- CMakeLists.txt | 2 +- Docs/source/conf.py | 4 ++-- LICENSE.txt | 2 +- Python/setup.py | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 4 ++-- cmake/dependencies/PICSAR.cmake | 2 +- run_test.sh | 2 +- setup.py | 2 +- 11 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 498b6f6c66c..55d5ab171ad 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 3d29fd7d0e816f3c436112d90bdefe815e0ff72a && cd - + cd amrex && git checkout --detach 22.09 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/CMakeLists.txt b/CMakeLists.txt index 8756e6646e7..ec47f693997 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ # Preamble #################################################################### # cmake_minimum_required(VERSION 3.20.0) -project(WarpX VERSION 22.08) +project(WarpX VERSION 22.09) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) diff --git a/Docs/source/conf.py b/Docs/source/conf.py index 56cc556c41a..c8944627df4 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -71,9 +71,9 @@ # built documents. # # The short X.Y version. -version = u'22.08' +version = u'22.09' # The full version, including alpha/beta/rc tags. -release = u'22.08' +release = u'22.09' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/LICENSE.txt b/LICENSE.txt index bed5da90896..6b92a5631be 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -WarpX v22.08 Copyright (c) 2018-2022, The Regents of the University of California, through Lawrence Berkeley National Laboratory, and Lawrence Livermore National Security, LLC, for the operation of Lawrence Livermore National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. +WarpX v22.09 Copyright (c) 2018-2022, The Regents of the University of California, through Lawrence Berkeley National Laboratory, and Lawrence Livermore National Security, LLC, for the operation of Lawrence Livermore National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/Python/setup.py b/Python/setup.py index 2db24d0bbfa..f662a436777 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -54,7 +54,7 @@ package_data = {} setup(name = 'pywarpx', - version = '22.08', + version = '22.09', packages = ['pywarpx'], package_dir = {'pywarpx': 'pywarpx'}, description = """Wrapper of WarpX""", diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 38a7842abb1..31fe16e1247 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 3d29fd7d0e816f3c436112d90bdefe815e0ff72a +branch = 22.09 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index e85cddbdc9a..b3fc30397db 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 3d29fd7d0e816f3c436112d90bdefe815e0ff72a +branch = 22.09 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index ccfc1b041d3..95cfcb6a50e 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -221,7 +221,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 22.08 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_DIM} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} TINYP LSOLVERS) + find_package(AMReX 22.09 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_DIM} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} TINYP LSOLVERS) message(STATUS "AMReX: Found version '${AMReX_VERSION}'") endif() endmacro() @@ -235,7 +235,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "3d29fd7d0e816f3c436112d90bdefe815e0ff72a" +set(WarpX_amrex_branch "22.09" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/PICSAR.cmake b/cmake/dependencies/PICSAR.cmake index e514879680c..bd1ab53aa56 100644 --- a/cmake/dependencies/PICSAR.cmake +++ b/cmake/dependencies/PICSAR.cmake @@ -82,7 +82,7 @@ function(find_picsar) #message(STATUS "PICSAR: Using version '${PICSAR_VERSION}'") else() # not supported by PICSAR (yet) - #find_package(PICSAR 22.08 CONFIG REQUIRED QED) + #find_package(PICSAR 22.09 CONFIG REQUIRED QED) #message(STATUS "PICSAR: Found version '${PICSAR_VERSION}'") message(FATAL_ERROR "PICSAR: Cannot be used as externally installed " "library yet. " diff --git a/run_test.sh b/run_test.sh index 733bc3c4721..a2f78829ae3 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 3d29fd7d0e816f3c436112d90bdefe815e0ff72a && cd - +cd amrex && git checkout --detach 22.09 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git diff --git a/setup.py b/setup.py index 260f96f090e..2ddbf286dd4 100644 --- a/setup.py +++ b/setup.py @@ -272,7 +272,7 @@ def build_extension(self, ext): setup( name='pywarpx', # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version = '22.08', + version = '22.09', packages = ['pywarpx'], package_dir = {'pywarpx': 'Python/pywarpx'}, author='Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.', From 4d3914dc2593c57305545be1f34e230a4623ff79 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 7 Sep 2022 10:40:25 -0700 Subject: [PATCH 0046/1346] CI: Test New v. Legacy BTD in `RigidInjection_BTD` (#3327) * Improve Testing of New v. Legacy BTD * openPMD: Flush Series before Accessing Arrays Co-authored-by: Ryan Sandberg * Add Checksums for RigidInjection_BTD (BTD data) * Compare Also Particle Momenta (BTD data) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Ryan Sandberg * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Ryan Sandberg Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../analysis_rigid_injection_BoostedFrame.py | 87 +++++++++++++++---- .../RigidInjection/inputs_2d_BoostedFrame | 36 ++++---- .../benchmarks_json/RigidInjection_BTD.json | 22 +++++ 3 files changed, 107 insertions(+), 38 deletions(-) create mode 100644 Regression/Checksum/benchmarks_json/RigidInjection_BTD.json diff --git a/Examples/Modules/RigidInjection/analysis_rigid_injection_BoostedFrame.py b/Examples/Modules/RigidInjection/analysis_rigid_injection_BoostedFrame.py index ba04580afea..8881eac7ba9 100755 --- a/Examples/Modules/RigidInjection/analysis_rigid_injection_BoostedFrame.py +++ b/Examples/Modules/RigidInjection/analysis_rigid_injection_BoostedFrame.py @@ -14,40 +14,89 @@ A Gaussian electron beam starts from -5 microns, propagates rigidly up to 20 microns after which it expands due to emittance only (the focal position is 20 microns). The beam width is measured after ~50 microns, and compared with -the theory (with a 5% error allowed). +the theory (with a 1% relative error allowed). The simulation runs in a boosted frame, and the analysis is done in the lab frame, i.e., on the back-transformed diagnostics. ''' +import os +import sys + import numpy as np +import openpmd_api as io import read_raw_data +from scipy.constants import m_e import yt yt.funcs.mylog.setLevel(0) -# Read data from back-transformed diagnostics +sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +import checksumAPI + +filename = sys.argv[1] + +# Tolerances to check consistency between legacy BTD and new BTD +rtol = 1e-16 +atol = 1e-16 + +# Read data from legacy back-transformed diagnostics snapshot = './lab_frame_data/snapshots/snapshot00001' -header = './lab_frame_data/snapshots/Header' -allrd, info = read_raw_data.read_lab_snapshot(snapshot, header) -z = np.mean( read_raw_data.get_particle_field(snapshot, 'beam', 'z') ) -w = np.std ( read_raw_data.get_particle_field(snapshot, 'beam', 'x') ) +x_legacy = read_raw_data.get_particle_field(snapshot, 'beam', 'x') +z_legacy = read_raw_data.get_particle_field(snapshot, 'beam', 'z') +ux_legacy = read_raw_data.get_particle_field(snapshot, 'beam', 'ux') +uy_legacy = read_raw_data.get_particle_field(snapshot, 'beam', 'uy') +uz_legacy = read_raw_data.get_particle_field(snapshot, 'beam', 'uz') + +# Read data from new back-transformed diagnostics (plotfile) +ds_plotfile = yt.load(filename) +x_plotfile = ds_plotfile.all_data()['beam', 'particle_position_x'].v +z_plotfile = ds_plotfile.all_data()['beam', 'particle_position_y'].v +ux_plotfile = ds_plotfile.all_data()['beam', 'particle_momentum_x'].v +uy_plotfile = ds_plotfile.all_data()['beam', 'particle_momentum_y'].v +uz_plotfile = ds_plotfile.all_data()['beam', 'particle_momentum_z'].v -# initial parameters +# Read data from new back-transformed diagnostics (openPMD) +series = io.Series("./diags/diag2/openpmd_%T.h5", io.Access.read_only) +ds_openpmd = series.iterations[1] +x_openpmd = ds_openpmd.particles['beam']['position']['x'][:] +z_openpmd = ds_openpmd.particles['beam']['position']['z'][:] +ux_openpmd = ds_openpmd.particles['beam']['momentum']['x'][:] +uy_openpmd = ds_openpmd.particles['beam']['momentum']['y'][:] +uz_openpmd = ds_openpmd.particles['beam']['momentum']['z'][:] +series.flush() + +# Sort and compare arrays to check consistency between legacy BTD and new BTD (plotfile) +assert(np.allclose(np.sort(x_legacy), np.sort(x_plotfile), rtol=rtol, atol=atol)) +assert(np.allclose(np.sort(z_legacy), np.sort(z_plotfile), rtol=rtol, atol=atol)) +assert(np.allclose(np.sort(ux_legacy*m_e), np.sort(ux_plotfile), rtol=rtol, atol=atol)) +assert(np.allclose(np.sort(uy_legacy*m_e), np.sort(uy_plotfile), rtol=rtol, atol=atol)) +assert(np.allclose(np.sort(uz_legacy*m_e), np.sort(uz_plotfile), rtol=rtol, atol=atol)) + +# Sort and compare arrays to check consistency between legacy BTD and new BTD (openPMD) +assert(np.allclose(np.sort(x_legacy), np.sort(x_openpmd), rtol=rtol, atol=atol)) +assert(np.allclose(np.sort(z_legacy), np.sort(z_openpmd), rtol=rtol, atol=atol)) +assert(np.allclose(np.sort(ux_legacy*m_e), np.sort(ux_openpmd), rtol=rtol, atol=atol)) +assert(np.allclose(np.sort(uy_legacy*m_e), np.sort(uy_openpmd), rtol=rtol, atol=atol)) +assert(np.allclose(np.sort(uz_legacy*m_e), np.sort(uz_openpmd), rtol=rtol, atol=atol)) + +# Initial parameters z0 = 20.e-6 -w0 = 1.e-6 +x0 = 1.e-6 theta0 = np.arcsin(0.1) -# Theoretical beam width after propagation if rigid ON -wth = np.sqrt( w0**2 + (z-z0)**2*theta0**2 ) -error_rel = np.abs((w-wth)/wth) -tolerance_rel = 0.03 - -# Print error and assert small error -print("Beam position: " + str(z)) -print("Beam width : " + str(w)) +# Theoretical beam width after propagation with rigid injection +z = np.mean(z_legacy) +x = np.std(x_legacy) +print(f'Beam position = {z}') +print(f'Beam width = {x}') -print("error_rel : " + str(error_rel)) -print("tolerance_rel: " + str(tolerance_rel)) +xth = np.sqrt(x0**2 + (z-z0)**2 * theta0**2) +err = np.abs((x-xth) / xth) +tol = 1e-2 +print(f'error = {err}') +print(f'tolerance = {tol}') +assert(err < tol) -assert( error_rel < tolerance_rel ) +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame b/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame index e2e53bc95b7..73301448ef8 100644 --- a/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame +++ b/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame @@ -49,27 +49,25 @@ beam.zinject_plane = 20.e-6 beam.rigid_advance = true # Diagnostics -diagnostics.diags_names = diag1 btd_pltfile btd_openpmd -diag1.intervals = 10000 -diag1.diag_type = Full +diagnostics.diags_names = diag1 diag2 -btd_openpmd.diag_type = BackTransformed -btd_openpmd.do_back_transformed_fields = 1 -btd_openpmd.num_snapshots_lab = 2 -btd_openpmd.dt_snapshots_lab = 1.8679589331096515e-13 -btd_openpmd.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho -btd_openpmd.format = openpmd -btd_openpmd.openpmd_backend = h5 -btd_openpmd.buffer_size = 32 +diag1.diag_type = BackTransformed +diag1.do_back_transformed_fields = 1 +diag1.num_snapshots_lab = 2 +diag1.dt_snapshots_lab = 1.8679589331096515e-13 +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho +diag1.format = plotfile +diag1.buffer_size = 32 +diag1.write_species = 1 -btd_pltfile.diag_type = BackTransformed -btd_pltfile.do_back_transformed_fields = 1 -btd_pltfile.num_snapshots_lab = 2 -btd_pltfile.dt_snapshots_lab = 1.8679589331096515e-13 -btd_pltfile.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho -btd_pltfile.format = plotfile -btd_pltfile.buffer_size = 32 -btd_pltfile.write_species = 1 +diag2.diag_type = BackTransformed +diag2.do_back_transformed_fields = 1 +diag2.num_snapshots_lab = 2 +diag2.dt_snapshots_lab = 1.8679589331096515e-13 +diag2.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho +diag2.format = openpmd +diag2.openpmd_backend = h5 +diag2.buffer_size = 32 # old BTD diagnostics warpx.do_back_transformed_diagnostics = 1 diff --git a/Regression/Checksum/benchmarks_json/RigidInjection_BTD.json b/Regression/Checksum/benchmarks_json/RigidInjection_BTD.json new file mode 100644 index 00000000000..90cf134201f --- /dev/null +++ b/Regression/Checksum/benchmarks_json/RigidInjection_BTD.json @@ -0,0 +1,22 @@ +{ + "beam": { + "particle_momentum_x": 2.2080215038948936e-16, + "particle_momentum_y": 2.18711072170811e-16, + "particle_momentum_z": 2.730924530737497e-15, + "particle_position_x": 0.0260823588888081, + "particle_position_y": 0.5049438607316916, + "particle_weight": 62415.090744607645 + }, + "lev=0": { + "Bx": 3.721807007218884e-05, + "By": 0.004860056238272468, + "Bz": 5.5335765596325185e-06, + "Ex": 1466447.517373168, + "Ey": 11214.10223280318, + "Ez": 283216.0961218869, + "jx": 16437877.898892513, + "jy": 2492340.3149980404, + "jz": 215102423.57036877, + "rho": 0.7246235591902177 + } +} \ No newline at end of file From afda3a41d37539929a712e453dffbf8482a21e9d Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Wed, 7 Sep 2022 13:51:58 -0700 Subject: [PATCH 0047/1346] Avoid division by 0 in plasma lenses (#3370) --- Source/Particles/Gather/GetExternalFields.H | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/Particles/Gather/GetExternalFields.H b/Source/Particles/Gather/GetExternalFields.H index 7f39cc36c2a..92f1a21bcc0 100644 --- a/Source/Particles/Gather/GetExternalFields.H +++ b/Source/Particles/Gather/GetExternalFields.H @@ -151,7 +151,7 @@ struct GetExternalEBField // This assumes that vzp > 0. amrex::ParticleReal const zl_bounded = std::min(std::max(zl, lens_start), lens_end); amrex::ParticleReal const zr_bounded = std::min(std::max(zr, lens_start), lens_end); - amrex::ParticleReal const frac = (zr_bounded - zl_bounded)/(zr - zl); + amrex::ParticleReal const frac = ((zr - zl) == 0._rt ? 1._rt : (zr_bounded - zl_bounded)/(zr - zl)); // Note that "+=" is used since the fields may have been set above // if a different E or Btype was specified. From f575d7cf2131c01987a84696879ff3677619c4af Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Thu, 8 Sep 2022 09:26:51 -0700 Subject: [PATCH 0048/1346] Update highlights.rst (#3373) --- Docs/source/highlights.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index 79a9aa31ea7..4ba5f66fc07 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -16,8 +16,8 @@ Scientific works in laser-plasma and beam-plasma acceleration. #. Wang J, Zeng M, Li D, Wang X, Lu W, Gao J. **Injection induced by coaxial laser interference in laser wakefield accelerators**. - *preprint*. under review, 2022. - `arXiv:2205.12083 `__ + Matter and Radiation at Extremes 7, 054001, 2022. + `DOI:10.1063/5.0101098 < https://doi.org/10.1063/5.0101098>`__ #. Miao B, Shrock JE, Feder L, Hollinger RC, Morrison J, Nedbailo R, Picksley A, Song H, Wang S, Rocca JJ, Milchberg HM. **Multi-GeV electron bunches from an all-optical laser wakefield accelerator**. From 8da242b15dd5280fdafafd9a6898d762bf73d098 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 8 Sep 2022 09:27:58 -0700 Subject: [PATCH 0049/1346] Docs: Fix DOI Link Formatting --- Docs/source/highlights.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index 4ba5f66fc07..cc14e297177 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -17,7 +17,7 @@ Scientific works in laser-plasma and beam-plasma acceleration. #. Wang J, Zeng M, Li D, Wang X, Lu W, Gao J. **Injection induced by coaxial laser interference in laser wakefield accelerators**. Matter and Radiation at Extremes 7, 054001, 2022. - `DOI:10.1063/5.0101098 < https://doi.org/10.1063/5.0101098>`__ + `DOI:10.1063/5.0101098 `__ #. Miao B, Shrock JE, Feder L, Hollinger RC, Morrison J, Nedbailo R, Picksley A, Song H, Wang S, Rocca JJ, Milchberg HM. **Multi-GeV electron bunches from an all-optical laser wakefield accelerator**. From eb62c182c6dc82f35efdaae0ce0fb6f2ea763ac1 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Thu, 8 Sep 2022 09:42:49 -0700 Subject: [PATCH 0050/1346] CI: Add 2D/3D Langmuir Tests w/ Multi-J PSATD (#3363) * CI: Add 3D Langmuir Tests w/ Multi-J PSATD * CI: Remove Old 2D Tests w/ Multi-J PSATD * CI: Add 2D Langmuir Tests w/ Multi-J PSATD * Use Test Name in `analysisOutputImage` --- Examples/Tests/multi_J/inputs_2d | 132 ------------------ Examples/Tests/multi_J/inputs_2d_pml | 132 ------------------ .../Langmuir_multi_2d_psatd_multiJ.json | 29 ++++ .../Langmuir_multi_2d_psatd_multiJ_nodal.json | 29 ++++ .../Langmuir_multi_psatd_multiJ.json | 28 ++++ .../Langmuir_multi_psatd_multiJ_nodal.json | 28 ++++ .../benchmarks_json/multi_J_2d_psatd.json | 45 ------ .../benchmarks_json/multi_J_2d_psatd_pml.json | 45 ------ Regression/WarpX-tests.ini | 112 ++++++++++----- 9 files changed, 190 insertions(+), 390 deletions(-) delete mode 100644 Examples/Tests/multi_J/inputs_2d delete mode 100644 Examples/Tests/multi_J/inputs_2d_pml create mode 100644 Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ.json create mode 100644 Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ_nodal.json create mode 100644 Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ.json create mode 100644 Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ_nodal.json delete mode 100644 Regression/Checksum/benchmarks_json/multi_J_2d_psatd.json delete mode 100644 Regression/Checksum/benchmarks_json/multi_J_2d_psatd_pml.json diff --git a/Examples/Tests/multi_J/inputs_2d b/Examples/Tests/multi_J/inputs_2d deleted file mode 100644 index e09615503ed..00000000000 --- a/Examples/Tests/multi_J/inputs_2d +++ /dev/null @@ -1,132 +0,0 @@ -# Iterations -max_step = 150 - -# Domain decomposition -amr.n_cell = 128 256 -warpx.numprocs = 1 2 - -# Mesh refinement and geometry -amr.max_level = 0 -geometry.dims = 2 -geometry.prob_lo = -100e-6 -220e-6 -geometry.prob_hi = 100e-6 10e-6 - -# Boundary condition -boundary.field_lo = periodic damped -boundary.field_hi = periodic damped - -# Algorithms -algo.current_deposition = direct -algo.field_gathering = energy-conserving -algo.maxwell_solver = psatd -algo.particle_pusher = vay -algo.particle_shape = 3 - -# Numerics -warpx.cfl = 3.19 -warpx.do_nodal = 1 -warpx.use_filter = 1 -warpx.verbose = 1 - -# Boosted frame -warpx.boost_direction = z -warpx.gamma_boost = 2.870114028490 - -# Moving window -warpx.do_moving_window = 1 -warpx.moving_window_dir = z -warpx.moving_window_v = 1. - -# Spectral solver -psatd.do_time_averaging = 1 -psatd.update_with_rho = 1 - -# Multi-J scheme -warpx.do_multi_J = 1 -warpx.do_multi_J_n_depositions = 2 -warpx.do_dive_cleaning = 1 -warpx.do_divb_cleaning = 1 - -# Particles -particles.species_names = driver driver_back plasma_e plasma_p -particles.use_fdtd_nci_corr = 0 -particles.rigid_injected_species = driver driver_back - -# Driver (electrons) -driver.species_type = electron -driver.injection_style = "gaussian_beam" -driver.x_rms = 5e-6 -driver.y_rms = 5e-6 -driver.z_rms = 20.1e-6 -driver.x_m = 0. -driver.y_m = 0. -driver.z_m = -80e-6 -driver.npart = 100000 -driver.q_tot = -1e-10 -driver.momentum_distribution_type = "constant" -driver.ux = 0. -driver.uy = 0. -driver.uz = 2e9 -driver.zinject_plane = 2e-3 -driver.rigid_advance = true -driver.initialize_self_fields = 0 -driver.do_symmetrize = 1 - -# Driver (positrons) -driver_back.species_type = positron -driver_back.injection_style = "gaussian_beam" -driver_back.x_rms = 5e-6 -driver_back.y_rms = 5e-6 -driver_back.z_rms = 20.1e-6 -driver_back.x_m = 0. -driver_back.y_m = 0. -driver_back.z_m = -80e-6 -driver_back.npart = 100000 -driver_back.q_tot = 1e-10 -driver_back.momentum_distribution_type = "constant" -driver_back.ux = 0. -driver_back.uy = 0. -driver_back.uz = 2e9 -driver_back.zinject_plane = 2e-3 -driver_back.rigid_advance = true -driver_back.initialize_self_fields = 0 -driver_back.do_symmetrize = 1 -driver_back.do_backward_propagation = true - -# Electrons -plasma_e.species_type = electron -plasma_e.injection_style = "NUniformPerCell" -plasma_e.zmin = 0. -plasma_e.zmax = 0.05 -plasma_e.xmin = -90e-6 -plasma_e.xmax = 90e-6 -plasma_e.ymin = -90e-6 -plasma_e.ymax = 90e-6 -plasma_e.profile = constant -plasma_e.density = 1e23 -plasma_e.num_particles_per_cell_each_dim = 1 1 1 -plasma_e.momentum_distribution_type = "at_rest" -plasma_e.do_continuous_injection = 1 - -# Hydrogen -plasma_p.species_type = hydrogen -plasma_p.injection_style = "NUniformPerCell" -plasma_p.zmin = 0. -plasma_p.zmax = 0.05 -plasma_p.xmin = -90e-6 -plasma_p.xmax = 90e-6 -plasma_p.ymin = -90e-6 -plasma_p.ymax = 90e-6 -plasma_p.profile = constant -plasma_p.density = 1e23 -plasma_p.num_particles_per_cell_each_dim = 1 1 1 -plasma_p.momentum_distribution_type = "at_rest" -plasma_p.do_continuous_injection = 1 - -# Diagnostics -diagnostics.diags_names = diag1 -diag1.intervals = 150 -diag1.diag_type = Full -diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz F G divE rho rho_driver rho_driver_back rho_plasma_e rho_plasma_p -diag1.write_species = 1 -diag1.species = driver plasma_e plasma_p diff --git a/Examples/Tests/multi_J/inputs_2d_pml b/Examples/Tests/multi_J/inputs_2d_pml deleted file mode 100644 index 6cde7b9d10d..00000000000 --- a/Examples/Tests/multi_J/inputs_2d_pml +++ /dev/null @@ -1,132 +0,0 @@ -# Iterations -max_step = 150 - -# Domain decomposition -amr.n_cell = 128 256 -warpx.numprocs = 1 2 - -# Mesh refinement and geometry -amr.max_level = 0 -geometry.dims = 2 -geometry.prob_lo = -100e-6 -220e-6 -geometry.prob_hi = 100e-6 10e-6 - -# Boundary condition -boundary.field_lo = periodic pml -boundary.field_hi = periodic pml - -# Algorithms -algo.current_deposition = direct -algo.field_gathering = energy-conserving -algo.maxwell_solver = psatd -algo.particle_pusher = vay -algo.particle_shape = 3 - -# Numerics -warpx.cfl = 3.19 -warpx.do_nodal = 1 -warpx.use_filter = 1 -warpx.verbose = 1 - -# Boosted frame -warpx.boost_direction = z -warpx.gamma_boost = 2.870114028490 - -# Moving window -warpx.do_moving_window = 1 -warpx.moving_window_dir = z -warpx.moving_window_v = 1. - -# Spectral solver -psatd.do_time_averaging = 0 -psatd.update_with_rho = 1 - -# Multi-J scheme -warpx.do_multi_J = 1 -warpx.do_multi_J_n_depositions = 2 -warpx.do_dive_cleaning = 1 -warpx.do_divb_cleaning = 1 - -# Particles -particles.species_names = driver driver_back plasma_e plasma_p -particles.use_fdtd_nci_corr = 0 -particles.rigid_injected_species = driver driver_back - -# Driver (electrons) -driver.species_type = electron -driver.injection_style = "gaussian_beam" -driver.x_rms = 5e-6 -driver.y_rms = 5e-6 -driver.z_rms = 20.1e-6 -driver.x_m = 0. -driver.y_m = 0. -driver.z_m = -80e-6 -driver.npart = 100000 -driver.q_tot = -1e-10 -driver.momentum_distribution_type = "constant" -driver.ux = 0. -driver.uy = 0. -driver.uz = 2e9 -driver.zinject_plane = 2e-3 -driver.rigid_advance = true -driver.initialize_self_fields = 0 -driver.do_symmetrize = 1 - -# Driver (positrons) -driver_back.species_type = positron -driver_back.injection_style = "gaussian_beam" -driver_back.x_rms = 5e-6 -driver_back.y_rms = 5e-6 -driver_back.z_rms = 20.1e-6 -driver_back.x_m = 0. -driver_back.y_m = 0. -driver_back.z_m = -80e-6 -driver_back.npart = 100000 -driver_back.q_tot = 1e-10 -driver_back.momentum_distribution_type = "constant" -driver_back.ux = 0. -driver_back.uy = 0. -driver_back.uz = 2e9 -driver_back.zinject_plane = 2e-3 -driver_back.rigid_advance = true -driver_back.initialize_self_fields = 0 -driver_back.do_symmetrize = 1 -driver_back.do_backward_propagation = true - -# Electrons -plasma_e.species_type = electron -plasma_e.injection_style = "NUniformPerCell" -plasma_e.zmin = 0. -plasma_e.zmax = 0.05 -plasma_e.xmin = -90e-6 -plasma_e.xmax = 90e-6 -plasma_e.ymin = -90e-6 -plasma_e.ymax = 90e-6 -plasma_e.profile = constant -plasma_e.density = 1e23 -plasma_e.num_particles_per_cell_each_dim = 1 1 1 -plasma_e.momentum_distribution_type = "at_rest" -plasma_e.do_continuous_injection = 1 - -# Hydrogen -plasma_p.species_type = hydrogen -plasma_p.injection_style = "NUniformPerCell" -plasma_p.zmin = 0. -plasma_p.zmax = 0.05 -plasma_p.xmin = -90e-6 -plasma_p.xmax = 90e-6 -plasma_p.ymin = -90e-6 -plasma_p.ymax = 90e-6 -plasma_p.profile = constant -plasma_p.density = 1e23 -plasma_p.num_particles_per_cell_each_dim = 1 1 1 -plasma_p.momentum_distribution_type = "at_rest" -plasma_p.do_continuous_injection = 1 - -# Diagnostics -diagnostics.diags_names = diag1 -diag1.intervals = 150 -diag1.diag_type = Full -diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz F G divE rho rho_driver rho_driver_back rho_plasma_e rho_plasma_p -diag1.write_species = 1 -diag1.species = driver plasma_e plasma_p diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ.json b/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ.json new file mode 100644 index 00000000000..b0c362f0fb6 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ.json @@ -0,0 +1,29 @@ +{ + "electrons": { + "particle_momentum_x": 5.664739488600762e-20, + "particle_momentum_y": 0.0, + "particle_momentum_z": 5.664739488600764e-20, + "particle_position_x": 0.6553599999999999, + "particle_position_y": 0.65536, + "particle_weight": 3200000000000000.5 + }, + "lev=0": { + "Bx": 0.0, + "By": 3.4900393205053586, + "Bz": 0.0, + "Ex": 3771422651410.755, + "Ey": 0.0, + "Ez": 3771422651410.742, + "jx": 1.0095457953459832e+16, + "jy": 0.0, + "jz": 1.0095457953459836e+16 + }, + "positrons": { + "particle_momentum_x": 5.664739488600754e-20, + "particle_momentum_y": 0.0, + "particle_momentum_z": 5.664739488600756e-20, + "particle_position_x": 0.6553599999999999, + "particle_position_y": 0.6553599999999999, + "particle_weight": 3200000000000000.5 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ_nodal.json b/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ_nodal.json new file mode 100644 index 00000000000..66c8e3e8035 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ_nodal.json @@ -0,0 +1,29 @@ +{ + "electrons": { + "particle_momentum_x": 5.668522616618711e-20, + "particle_momentum_y": 0.0, + "particle_momentum_z": 5.668522616618715e-20, + "particle_position_x": 0.6553600000002356, + "particle_position_y": 0.6553600000002355, + "particle_weight": 3200000000000000.5 + }, + "lev=0": { + "Bx": 0.0, + "By": 5.6351165293218966, + "Bz": 0.0, + "Ex": 3747153697353.926, + "Ey": 0.0, + "Ez": 3747153697353.9287, + "jx": 1.0088631639558242e+16, + "jy": 0.0, + "jz": 1.0088631639558248e+16 + }, + "positrons": { + "particle_momentum_x": 5.66852261661871e-20, + "particle_momentum_y": 0.0, + "particle_momentum_z": 5.668522616618714e-20, + "particle_position_x": 0.6553600000002356, + "particle_position_y": 0.6553600000002356, + "particle_weight": 3200000000000000.5 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ.json b/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ.json new file mode 100644 index 00000000000..c76d7cfc5c5 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ.json @@ -0,0 +1,28 @@ +{ + "electrons": { + "particle_momentum_x": 9.629015522300135e-20, + "particle_position_x": 2.621440000009873, + "particle_position_y": 2.621440000009873, + "particle_position_z": 2.6214399999999998, + "particle_weight": 128000000000.00002 + }, + "lev=0": { + "Bx": 79.96476923345703, + "By": 79.96476923350225, + "Bz": 79.96690317049361, + "Ex": 84753887916472.72, + "Ey": 84753887916472.66, + "Ez": 84753877853695.67, + "jx": 6.081254778189634e+16, + "jy": 6.081254778189637e+16, + "jz": 6.081251943036953e+16, + "part_per_cell": 524288.0, + "rho": 703417424.2676101 + }, + "positrons": { + "particle_momentum_z": 9.629011306229332e-20, + "particle_position_x": 2.621440000009873, + "particle_position_y": 2.621440000009873, + "particle_position_z": 2.6214399999999998 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ_nodal.json b/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ_nodal.json new file mode 100644 index 00000000000..1f89c4dcb14 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ_nodal.json @@ -0,0 +1,28 @@ +{ + "electrons": { + "particle_momentum_x": 9.3282651765877e-20, + "particle_position_x": 2.6214400000000015, + "particle_position_y": 2.621440000000001, + "particle_position_z": 2.621440000000001, + "particle_weight": 128000000000.00002 + }, + "lev=0": { + "Bx": 17.338468210649435, + "By": 17.338468210679473, + "Bz": 17.338468210708463, + "Ex": 86130544037694.12, + "Ey": 86130544037694.16, + "Ez": 86130544037694.16, + "jx": 5.808322546347548e+16, + "jy": 5.80832254634755e+16, + "jz": 5.8083225463475464e+16, + "part_per_cell": 524288.0, + "rho": 721143170.1131016 + }, + "positrons": { + "particle_momentum_z": 9.328265176587699e-20, + "particle_position_x": 2.6214400000000015, + "particle_position_y": 2.621440000000001, + "particle_position_z": 2.621440000000001 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/multi_J_2d_psatd.json b/Regression/Checksum/benchmarks_json/multi_J_2d_psatd.json deleted file mode 100644 index a23aa4ec4ee..00000000000 --- a/Regression/Checksum/benchmarks_json/multi_J_2d_psatd.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "driver": { - "particle_momentum_x": 1.4539941020025028e-16, - "particle_momentum_y": 0.0, - "particle_momentum_z": 9.822790382172444e-09, - "particle_position_x": 0.4000037672275025, - "particle_position_y": 30.100865423602578, - "particle_weight": 124830181489215.27 - }, - "lev=0": { - "Bx": 0.0, - "By": 923873.3689884299, - "Bz": 0.0, - "Ex": 271445959245867.38, - "Ey": 0.0, - "Ez": 93017410300609.08, - "F": 6525.329585276354, - "G": 0.0, - "divE": 2.511377423363695e+19, - "jx": 1.0071853348781318e+16, - "jy": 0.0, - "jz": 6.431583372170538e+16, - "rho": 220511427.01289138, - "rho_driver": 2562225.119933118, - "rho_driver_back": 0.0, - "rho_plasma_e": 1359622970.398378, - "rho_plasma_p": 1361225477.7925916 - }, - "plasma_e": { - "particle_momentum_x": 7.24535866217633e-19, - "particle_momentum_y": 0.0, - "particle_momentum_z": 2.3479181521728354e-17, - "particle_position_x": 1.3924195384912583, - "particle_position_y": 10.079133954833015, - "particle_weight": 6.643024495443786e+16 - }, - "plasma_p": { - "particle_momentum_x": 1.4515031430944076e-18, - "particle_momentum_y": 0.0, - "particle_momentum_z": 4.0084640226979506e-14, - "particle_position_x": 1.3456479857933001, - "particle_position_y": 10.082154761668846, - "particle_weight": 6.652881944445523e+16 - } -} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/multi_J_2d_psatd_pml.json b/Regression/Checksum/benchmarks_json/multi_J_2d_psatd_pml.json deleted file mode 100644 index bef0ac877ab..00000000000 --- a/Regression/Checksum/benchmarks_json/multi_J_2d_psatd_pml.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "driver": { - "particle_momentum_x": 1.4499925060974604e-16, - "particle_momentum_y": 0.0, - "particle_momentum_z": 9.82279038230039e-09, - "particle_position_x": 0.4000037675727589, - "particle_position_y": 30.100865423602578, - "particle_weight": 124830181489215.27 - }, - "lev=0": { - "Bx": 0.0, - "By": 921659.7003424203, - "Bz": 0.0, - "Ex": 270877199432990.72, - "Ey": 0.0, - "Ez": 92626589083860.39, - "F": 6184.029984601904, - "G": 0.0, - "divE": 2.519005022753751e+19, - "jx": 1.0172600337490698e+16, - "jy": 0.0, - "jz": 6.3852340601837256e+16, - "rho": 220316594.2695177, - "rho_driver": 2562225.1199331186, - "rho_driver_back": 0.0, - "rho_plasma_e": 1359476240.39297, - "rho_plasma_p": 1361225457.832027 - }, - "plasma_e": { - "particle_momentum_x": 7.183647425698553e-19, - "particle_momentum_y": 0.0, - "particle_momentum_z": 2.3464251836665685e-17, - "particle_position_x": 1.3918911743233844, - "particle_position_y": 10.076337229777174, - "particle_weight": 6.641680297852639e+16 - }, - "plasma_p": { - "particle_momentum_x": 1.4494644790319828e-18, - "particle_momentum_y": 0.0, - "particle_momentum_z": 4.008465449121937e-14, - "particle_position_x": 1.3456480352993405, - "particle_position_y": 10.082154831407932, - "particle_weight": 6.652881944445523e+16 - } -} \ No newline at end of file diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index b3fc30397db..8479b51ffd2 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -411,6 +411,44 @@ particleTypes = electrons positrons analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png +[Langmuir_multi_psatd_multiJ] +buildDir = . +inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 warpx.abort_on_warning_threshold=medium +dim = 3 +addToCompileString = USE_PSATD=TRUE +cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons positrons +analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisOutputImage = Langmuir_multi_psatd_multiJ.png + +[Langmuir_multi_psatd_multiJ_nodal] +buildDir = . +inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 warpx.abort_on_warning_threshold=medium warpx.do_nodal=1 +dim = 3 +addToCompileString = USE_PSATD=TRUE +cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons positrons +analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisOutputImage = Langmuir_multi_psatd_multiJ_nodal.png + [Langmuir_multi_psatd_div_cleaning] buildDir = . inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt @@ -658,6 +696,44 @@ particleTypes = electrons positrons analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = langmuir_multi_2d_analysis.png +[Langmuir_multi_2d_psatd_multiJ] +buildDir = . +inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 warpx.abort_on_warning_threshold=medium +dim = 2 +addToCompileString = USE_PSATD=TRUE +cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons positrons +analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisOutputImage = Langmuir_multi_2d_psatd_multiJ.png + +[Langmuir_multi_2d_psatd_multiJ_nodal] +buildDir = . +inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 warpx.abort_on_warning_threshold=medium warpx.do_nodal=1 +dim = 2 +addToCompileString = USE_PSATD=TRUE +cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons positrons +analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisOutputImage = Langmuir_multi_2d_psatd_multiJ_nodal.png + [Langmuir_multi_2d_psatd_momentum_conserving] buildDir = . inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt @@ -2624,42 +2700,6 @@ compareParticles = 1 particleTypes = electrons ions analysisRoutine = Examples/Tests/galilean/analysis.py -[multi_J_2d_psatd] -buildDir = . -inputFile = Examples/Tests/multi_J/inputs_2d -runtime_params = warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_PSATD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 1 -particleTypes = driver driver_back plasma_e plasma_p -analysisRoutine = Examples/analysis_default_regression.py - -[multi_J_2d_psatd_pml] -buildDir = . -inputFile = Examples/Tests/multi_J/inputs_2d_pml -runtime_params = warpx.abort_on_warning_threshold=medium -dim = 2 -addToCompileString = USE_PSATD=TRUE -cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON -restartTest = 0 -useMPI = 1 -numprocs = 2 -useOMP = 1 -numthreads = 1 -compileTest = 0 -doVis = 0 -compareParticles = 1 -particleTypes = -analysisRoutine = Examples/analysis_default_regression.py - [multi_J_rz_psatd] buildDir = . inputFile = Examples/Tests/multi_J/inputs_rz From beab2f3fecd8b5aa236f79d49d0d28f83b32d692 Mon Sep 17 00:00:00 2001 From: kngott Date: Fri, 9 Sep 2022 09:51:38 -0700 Subject: [PATCH 0051/1346] Fix perlmutter script. (#3375) * Fix perlmutter script. * Fix perlmutter script. --- Tools/machines/perlmutter-nersc/perlmutter.sbatch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Tools/machines/perlmutter-nersc/perlmutter.sbatch b/Tools/machines/perlmutter-nersc/perlmutter.sbatch index 93e79f1516b..489ca6ccc81 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter.sbatch +++ b/Tools/machines/perlmutter-nersc/perlmutter.sbatch @@ -14,7 +14,7 @@ #SBATCH -q regular #SBATCH -C gpu #SBATCH -c 32 -#SBATCH --ntasks-per-node=4 +#SBATCH --ntasks-per-gpu=1 #SBATCH --gpus-per-node=4 #SBATCH -o WarpX.o%j #SBATCH -e WarpX.e%j From b1348f65ed7c0f2c9049a797f05beb29288bb7a9 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 9 Sep 2022 09:54:33 -0700 Subject: [PATCH 0052/1346] Fix: ABLASTR nvcc Phi Fine/Coarse (#3374) Poisson solver: work around an NVCC compile error downstream in ImpactX. This is likely due to nesting of lambdas. --- Source/ablastr/fields/PoissonSolver.H | 56 +++++++++++++++++++++------ 1 file changed, 44 insertions(+), 12 deletions(-) diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index dd21669bb5b..1674ac3f5b2 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -59,6 +59,41 @@ namespace ablastr::fields { +namespace details +{ + /** Local interpolation from phi_cp to phi[lev+1] + * + * This is needed to work-around an NVCC limitation in downstream code (ImpactX), + * when nesting lambdas. Otherwise this could be written directly into the + * ParallelFor. + * + * @param[out] phi_fp_arr phi on the fine level + * @param[in] phi_cp_arr phi on the coarse level + * @param[in] refratio refinement ration + */ + struct PoissonInterpCPtoFP + { + PoissonInterpCPtoFP( + amrex::Array4 const phi_fp_arr, + amrex::Array4 const phi_cp_arr, + amrex::IntVect const refratio) + : m_phi_fp_arr(phi_fp_arr), m_phi_cp_arr(phi_cp_arr), m_refratio(refratio) + {} + + AMREX_GPU_DEVICE AMREX_FORCE_INLINE + void + operator() (long i, long j, long k) const noexcept + { + amrex::mf_nodebilin_interp(i, j, k, 0, m_phi_fp_arr, 0, m_phi_cp_arr, + 0, m_refratio); + } + + amrex::Array4 const m_phi_fp_arr; + amrex::Array4 const m_phi_cp_arr; + amrex::IntVect const m_refratio; + }; +} + /** Compute the potential `phi` by solving the Poisson equation * * Uses `rho` as a source, assuming that the source moves at a @@ -247,7 +282,7 @@ computePhi (amrex::Vector const & rho, // Copy from phi[lev] to phi_cp (in parallel) const amrex::IntVect& ng = amrex::IntVect::TheUnitVector(); const amrex::Periodicity& crse_period = geom[lev].periodicity(); - // TODO: move WarpXCommUtil.cpp over to ABLASTR + ablastr::utils::communication::ParallelCopy( phi_cp, *phi[lev], @@ -264,17 +299,14 @@ computePhi (amrex::Vector const & rho, #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (amrex::MFIter mfi(*phi[lev+1],amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) - { - amrex::Array4 const& phi_fp_arr = phi[lev+1]->array(mfi); - amrex::Array4 const& phi_cp_arr = phi_cp.array(mfi); - - amrex::Box const& b = mfi.tilebox(phi[lev+1]->ixType().toIntVect()); - amrex::ParallelFor( b, - [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept - { - amrex::mf_nodebilin_interp(i, j, k, 0, phi_fp_arr, 0, phi_cp_arr, 0, refratio); - }); + for (amrex::MFIter mfi(*phi[lev + 1], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + amrex::Array4 const phi_fp_arr = phi[lev + 1]->array(mfi); + amrex::Array4 const phi_cp_arr = phi_cp.array(mfi); + + details::PoissonInterpCPtoFP const interp(phi_fp_arr, phi_cp_arr, refratio); + + amrex::Box const b = mfi.tilebox(phi[lev + 1]->ixType().toIntVect()); + amrex::ParallelFor(b, interp); } } From 03c7ee071b4a2344f7c485738914f69441ca5db4 Mon Sep 17 00:00:00 2001 From: Elisa Rheaume <35204125+TiberiusRheaume@users.noreply.github.com> Date: Fri, 9 Sep 2022 13:51:09 -0700 Subject: [PATCH 0053/1346] Updated name for Elisa Rheaume in zenodo & Field Probe files (#3379) --- .zenodo.json | 2 +- Examples/Tests/FieldProbe/analysis_field_probe.py | 2 +- Examples/Tests/FieldProbe/inputs_2d | 2 +- Source/Diagnostics/ReducedDiags/FieldProbe.H | 2 +- Source/Diagnostics/ReducedDiags/FieldProbe.cpp | 2 +- Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.H | 2 +- Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index bd831642e16..3fded899afc 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -130,7 +130,7 @@ }, { "affiliation": "Lawrence Berkeley National Laboratory", - "name": "Rheaume, Tiberius", + "name": "Rheaume, Elisa", "orcid": "0000-0002-6710-0650" }, { diff --git a/Examples/Tests/FieldProbe/analysis_field_probe.py b/Examples/Tests/FieldProbe/analysis_field_probe.py index 7d7eaa0eb3b..e038bfd0f48 100755 --- a/Examples/Tests/FieldProbe/analysis_field_probe.py +++ b/Examples/Tests/FieldProbe/analysis_field_probe.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright 2021-2022 Tiberius Rheaume +# Copyright 2021-2022 Elisa Rheaume # # This file is part of WarpX. # diff --git a/Examples/Tests/FieldProbe/inputs_2d b/Examples/Tests/FieldProbe/inputs_2d index 848dd5adff8..eb0f427e86c 100644 --- a/Examples/Tests/FieldProbe/inputs_2d +++ b/Examples/Tests/FieldProbe/inputs_2d @@ -79,4 +79,4 @@ FP_line.x1_probe = 1.5e-6 FP_line.z1_probe = 1.7e-6 FP_line.resolution = 201 -authors = "Tiberius Rheaume , Axel Huebl " +authors = "Elisa Rheaume , Axel Huebl " diff --git a/Source/Diagnostics/ReducedDiags/FieldProbe.H b/Source/Diagnostics/ReducedDiags/FieldProbe.H index 27c52618731..655ebc2a068 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbe.H +++ b/Source/Diagnostics/ReducedDiags/FieldProbe.H @@ -1,5 +1,5 @@ /* Copyright 2021 Lorenzo Giacomel, Neil Zaim, Yinjian Zhao - * Tiberius Rheaume, Axel Huebl + * Elisa Rheaume, Axel Huebl * * This file is part of WarpX. * diff --git a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp index d5541b7ac19..993a8fa9e1c 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp @@ -1,4 +1,4 @@ -/* Copyright 2021 Lorenzo Giacomel, Tiberius Rheaume, Axel Huebl +/* Copyright 2021 Lorenzo Giacomel, Elisa Rheaume, Axel Huebl * * This file is part of WarpX. * diff --git a/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.H b/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.H index 0b9ff481e05..dbb55fc0aff 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.H +++ b/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.H @@ -1,4 +1,4 @@ -/* Copyright 2021 Tiberius Rheaume, Axel Huebl +/* Copyright 2021 Elisa Rheaume, Axel Huebl * * This file is part of WarpX. * diff --git a/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp b/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp index b1f20c4e76d..a49e1e08eaa 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp @@ -1,4 +1,4 @@ -/* Copyright 2021 Tiberius Rheaume, Axel Huebl +/* Copyright 2021 Elisa Rheaume, Axel Huebl * * This file is part of WarpX. * From 1634f6387492ae6605ce98ca4079ef71a0976c2a Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 9 Sep 2022 15:12:07 -0700 Subject: [PATCH 0054/1346] Frontier/Crusher: rocFFT Cache Control (#3366) rocFFT in ROCm 5.1+ tries to [write to a cache](https://rocfft.readthedocs.io/en/latest/library.html#runtime-compilation) in the home area by default. This does not scale. --- Docs/source/install/hpc/crusher.rst | 10 ++++++++++ Docs/source/install/hpc/frontier.rst | 10 ++++++++++ Tools/machines/crusher-olcf/submit.sh | 7 ++++++- Tools/machines/frontier-olcf/submit.sh | 5 +++++ 4 files changed, 31 insertions(+), 1 deletion(-) diff --git a/Docs/source/install/hpc/crusher.rst b/Docs/source/install/hpc/crusher.rst index 34a67fa3374..3aa6efcbd0e 100644 --- a/Docs/source/install/hpc/crusher.rst +++ b/Docs/source/install/hpc/crusher.rst @@ -106,3 +106,13 @@ Known System Issues .. code-block:: bash export FI_MR_CACHE_MAX_COUNT=0 # libfabric disable caching + +.. warning:: + + Sep 2nd, 2022 (OLCFDEV-1079): + rocFFT in ROCm 5.1+ tries to `write to a cache `__ in the home area by default. + This does not scale, disable it via: + + .. code-block:: bash + + export ROCFFT_RTC_CACHE_PATH=/dev/null diff --git a/Docs/source/install/hpc/frontier.rst b/Docs/source/install/hpc/frontier.rst index bf7cfbe1f4d..45da9496a41 100644 --- a/Docs/source/install/hpc/frontier.rst +++ b/Docs/source/install/hpc/frontier.rst @@ -117,3 +117,13 @@ Known System Issues .. code-block:: bash export FI_MR_CACHE_MAX_COUNT=0 # libfabric disable caching + +.. warning:: + + Sep 2nd, 2022 (OLCFDEV-1079): + rocFFT in ROCm 5.1+ tries to `write to a cache `__ in the home area by default. + This does not scale, disable it via: + + .. code-block:: bash + + export ROCFFT_RTC_CACHE_PATH=/dev/null diff --git a/Tools/machines/crusher-olcf/submit.sh b/Tools/machines/crusher-olcf/submit.sh index 9fbb7748d0a..828c0fb68df 100644 --- a/Tools/machines/crusher-olcf/submit.sh +++ b/Tools/machines/crusher-olcf/submit.sh @@ -19,10 +19,15 @@ # (GCDs) for a total of 8 GCDs per node. The programmer can think of the 8 GCDs # as 8 separate GPUs, each having 64 GB of high-bandwidth memory (HBM2E). -# note (5-16-22) +# note (5-16-22, OLCFHELP-6888) # this environment setting is currently needed on Crusher to work-around a # known issue with Libfabric export FI_MR_CACHE_MAX_COUNT=0 +# note (9-2-22, OLCFDEV-1079) +# this environment setting is needed to avoid that rocFFT writes a cache in +# the home directory, which does not scale. +export ROCFFT_RTC_CACHE_PATH=/dev/null + export OMP_NUM_THREADS=8 srun ./warpx inputs > output.txt diff --git a/Tools/machines/frontier-olcf/submit.sh b/Tools/machines/frontier-olcf/submit.sh index d33b4658b83..b318dd1797a 100644 --- a/Tools/machines/frontier-olcf/submit.sh +++ b/Tools/machines/frontier-olcf/submit.sh @@ -28,6 +28,11 @@ # known issue with Libfabric (both in the May and June PE) export FI_MR_CACHE_MAX_COUNT=0 +# note (9-2-22, OLCFDEV-1079) +# this environment setting is needed to avoid that rocFFT writes a cache in +# the home directory, which does not scale. +export ROCFFT_RTC_CACHE_PATH=/dev/null + export OMP_NUM_THREADS=1 export WARPX_NMPI_PER_NODE=8 export TOTAL_NMPI=$(( ${SLURM_JOB_NUM_NODES} * ${WARPX_NMPI_PER_NODE} )) From 193180a57c403508beaa2e6787083b8593f5fd44 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 9 Sep 2022 16:22:16 -0700 Subject: [PATCH 0055/1346] `SyncCurrent`: Split Filter and Sum over Guard Cells (#3222) * Split ApplyFilterandSumBoundaryJ * Fix Bug * Fix Bug * Fix CI * Pass Correct Periodicity to SumBoundaryJ * Use ApplyFilterJ and SumBoundaryJ w/ MR * Fix Bug (duplicate loops over dimensions) * Use ApplyFilterJ and SumBoundaryJ w/ MR * Clean up Variable Names in ApplyFilterJ * Increase Guard Cells of All MultiFabs, Not Only J * Reset CI Checksums --- .../benchmarks_json/Langmuir_multi_2d_MR.json | 36 +-- .../Langmuir_multi_2d_MR_anisotropic.json | 36 +-- ...erAcceleration_single_precision_comms.json | 4 +- .../particles_in_pml_2d_MR.json | 10 +- .../particles_in_pml_3d_MR.json | 26 +-- Source/Evolve/WarpXEvolve.cpp | 6 +- Source/Initialization/WarpXInitData.cpp | 4 - Source/Parallelization/GuardCellManager.H | 4 +- Source/Parallelization/GuardCellManager.cpp | 9 +- Source/Parallelization/WarpXComm.cpp | 207 ++++++++++-------- Source/WarpX.H | 21 +- Source/WarpX.cpp | 13 +- 12 files changed, 212 insertions(+), 164 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR.json b/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR.json index a067edf6c7f..1fbf0e29f36 100644 --- a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR.json +++ b/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR.json @@ -1,40 +1,40 @@ { "electrons": { - "particle_momentum_x": 4.245553180462133e-20, + "particle_momentum_x": 4.244331469283422e-20, "particle_momentum_y": 0.0, - "particle_momentum_z": 4.245553180462118e-20, - "particle_position_x": 0.6553607116882387, - "particle_position_y": 0.6553607116882386, + "particle_momentum_z": 4.244331469283411e-20, + "particle_position_x": 0.655360445407094, + "particle_position_y": 0.6553604454070939, "particle_weight": 3200000000000000.5 }, "lev=0": { "Bx": 0.0, - "By": 35.447497788711175, + "By": 34.880237093101385, "Bz": 0.0, - "Ex": 7569456532381.726, + "Ex": 7573738411730.345, "Ey": 0.0, - "Ez": 7569456532381.752, - "jx": 7305055873426885.0, + "Ez": 7573738411730.373, + "jx": 7301940679621768.0, "jy": 0.0, - "jz": 7305055873426942.0 + "jz": 7301940679621819.0 }, "lev=1": { "Bx": 0.0, - "By": 640.5385076158955, + "By": 663.9253192035617, "Bz": 0.0, - "Ex": 7591603030109.285, + "Ex": 7599937806859.2, "Ey": 0.0, - "Ez": 7591603030109.295, - "jx": 7225962509586529.0, + "Ez": 7599937806859.211, + "jx": 7111706209199148.0, "jy": 0.0, - "jz": 7225962509586578.0 + "jz": 7111706209199176.0 }, "positrons": { - "particle_momentum_x": 4.2453138721853093e-20, + "particle_momentum_x": 4.24387817954511e-20, "particle_momentum_y": 0.0, - "particle_momentum_z": 4.245313872185295e-20, - "particle_position_x": 0.6553599057578553, - "particle_position_y": 0.6553599057578553, + "particle_momentum_z": 4.243878179545099e-20, + "particle_position_x": 0.6553597738022557, + "particle_position_y": 0.6553597738022557, "particle_weight": 3200000000000000.5 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR_anisotropic.json b/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR_anisotropic.json index 751568a0168..445f514723c 100644 --- a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR_anisotropic.json +++ b/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_MR_anisotropic.json @@ -1,40 +1,40 @@ { "electrons": { - "particle_momentum_x": 4.241333654356747e-20, + "particle_momentum_x": 4.2405417073382216e-20, "particle_momentum_y": 0.0, - "particle_momentum_z": 4.243147160194918e-20, - "particle_position_x": 0.6553604355411431, - "particle_position_y": 0.6553604252756429, + "particle_momentum_z": 4.24319208902951e-20, + "particle_position_x": 0.6553602236954971, + "particle_position_y": 0.6553603460513587, "particle_weight": 3200000000000000.5 }, "lev=0": { "Bx": 0.0, - "By": 31.85950148151796, + "By": 29.016398075047086, "Bz": 0.0, - "Ex": 7575288819909.899, + "Ex": 7578443960945.405, "Ey": 0.0, - "Ez": 7572601899574.373, - "jx": 7297234592450874.0, + "Ez": 7572873581953.129, + "jx": 7295166572198360.0, "jy": 0.0, - "jz": 7303113293544192.0 + "jz": 7303381904136724.0 }, "lev=1": { "Bx": 0.0, - "By": 75.34189668301649, + "By": 72.99870260220474, "Bz": 0.0, - "Ex": 4601932334532.539, + "Ex": 4606426110840.904, "Ey": 0.0, - "Ez": 7011970862051.691, - "jx": 4494391278293099.5, + "Ez": 7017198453908.449, + "jx": 4489353143132240.0, "jy": 0.0, - "jz": 6848792132928508.0 + "jz": 6838171379723720.0 }, "positrons": { - "particle_momentum_x": 4.2408343046906606e-20, + "particle_momentum_x": 4.2397727290339454e-20, "particle_momentum_y": 0.0, - "particle_momentum_z": 4.243183197977201e-20, - "particle_position_x": 0.6553601964895763, - "particle_position_y": 0.6553595823177, + "particle_momentum_z": 4.2431934836877495e-20, + "particle_position_x": 0.6553599859378852, + "particle_position_y": 0.655359657073043, "particle_weight": 3200000000000000.5 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/LaserAcceleration_single_precision_comms.json b/Regression/Checksum/benchmarks_json/LaserAcceleration_single_precision_comms.json index fce66bcd9bf..21b6da776b3 100644 --- a/Regression/Checksum/benchmarks_json/LaserAcceleration_single_precision_comms.json +++ b/Regression/Checksum/benchmarks_json/LaserAcceleration_single_precision_comms.json @@ -12,7 +12,7 @@ }, "lev=0": { "Bx": 5863879.02030842, - "By": 2411.495350685753, + "By": 2411.501904737579, "Bz": 116025.42462998998, "Ex": 6267728094111.663, "Ey": 1670763233105822.0, @@ -22,4 +22,4 @@ "jz": 1045267552192496.5, "rho": 2211742630.7074776 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/particles_in_pml_2d_MR.json b/Regression/Checksum/benchmarks_json/particles_in_pml_2d_MR.json index 2042f0e2bd5..8f05b63e7f2 100644 --- a/Regression/Checksum/benchmarks_json/particles_in_pml_2d_MR.json +++ b/Regression/Checksum/benchmarks_json/particles_in_pml_2d_MR.json @@ -1,22 +1,22 @@ { "lev=0": { "Bx": 0.0, - "By": 3.578051588629886e-09, + "By": 3.5780515886298844e-09, "Bz": 0.0, "Ex": 1.9699822913484977, "Ey": 0.0, - "Ez": 0.5356004212513488, + "Ez": 0.5356004212513481, "jx": 0.0, "jy": 0.0, "jz": 0.0 }, "lev=1": { "Bx": 0.0, - "By": 3.2059933548393633e-09, + "By": 3.2059888629493914e-09, "Bz": 0.0, - "Ex": 2.459735202099343, + "Ex": 2.459734923669491, "Ey": 0.0, - "Ez": 0.4991264331012938, + "Ez": 0.4991257213214868, "jx": 0.0, "jy": 0.0, "jz": 0.0 diff --git a/Regression/Checksum/benchmarks_json/particles_in_pml_3d_MR.json b/Regression/Checksum/benchmarks_json/particles_in_pml_3d_MR.json index 43aee659baf..d665b04110f 100644 --- a/Regression/Checksum/benchmarks_json/particles_in_pml_3d_MR.json +++ b/Regression/Checksum/benchmarks_json/particles_in_pml_3d_MR.json @@ -1,24 +1,24 @@ { "lev=0": { - "Bx": 8.432558629746657e-05, - "By": 0.026892609338185998, - "Bz": 0.026892609345544216, - "Ex": 9112221.410831584, - "Ey": 4342060.779985666, - "Ez": 4342060.7789522, + "Bx": 8.432558629750022e-05, + "By": 0.02689260933254305, + "Bz": 0.026892609332543057, + "Ex": 9112221.407998262, + "Ey": 4342060.778180814, + "Ez": 4342060.778180817, "jx": 0.0, "jy": 0.0, "jz": 0.0 }, "lev=1": { - "Bx": 0.00013616524695631051, - "By": 0.04038118398020402, - "Bz": 0.040381183972079154, - "Ex": 13672614.170638269, - "Ey": 6988497.830608286, - "Ez": 6988497.829006851, + "Bx": 0.00013616513675437372, + "By": 0.040381141101323965, + "Bz": 0.040381141101323924, + "Ex": 13672597.083734166, + "Ey": 6988489.814361327, + "Ez": 6988489.814361326, "jx": 0.0, "jy": 0.0, "jz": 0.0 } -} +} \ No newline at end of file diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index cc71e78d5c3..1a21360219b 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -716,7 +716,8 @@ WarpX::OneStep_sub1 (Real curtime) PushParticlesandDepose(fine_lev, curtime, DtType::FirstHalf); RestrictCurrentFromFineToCoarsePatch(current_fp, current_cp, fine_lev); RestrictRhoFromFineToCoarsePatch(rho_fp, rho_cp, fine_lev); - ApplyFilterandSumBoundaryJ(current_fp, current_cp, fine_lev, PatchType::fine); + if (use_filter) ApplyFilterJ(current_fp, fine_lev); + SumBoundaryJ(current_fp, fine_lev, Geom(fine_lev).periodicity()); ApplyFilterandSumBoundaryRho(rho_fp, rho_cp, fine_lev, PatchType::fine, 0, 2*ncomps); EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); @@ -772,7 +773,8 @@ WarpX::OneStep_sub1 (Real curtime) PushParticlesandDepose(fine_lev, curtime+dt[fine_lev], DtType::SecondHalf); RestrictCurrentFromFineToCoarsePatch(current_fp, current_cp, fine_lev); RestrictRhoFromFineToCoarsePatch(rho_fp, rho_cp, fine_lev); - ApplyFilterandSumBoundaryJ(current_fp, current_cp, fine_lev, PatchType::fine); + if (use_filter) ApplyFilterJ(current_fp, fine_lev); + SumBoundaryJ(current_fp, fine_lev, Geom(fine_lev).periodicity()); ApplyFilterandSumBoundaryRho(rho_fp, rho_cp, fine_lev, PatchType::fine, 0, ncomps); EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index d0d9465791d..7af31773dce 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -392,10 +392,6 @@ WarpX::InitData () WarpX::InitNCICorrector(); } - if (WarpX::use_filter) { - WarpX::InitFilter(); - } - BuildBufferMasks(); if (WarpX::em_solver_medium==1) { diff --git a/Source/Parallelization/GuardCellManager.H b/Source/Parallelization/GuardCellManager.H index 05d99de4df6..c122aaa96c9 100644 --- a/Source/Parallelization/GuardCellManager.H +++ b/Source/Parallelization/GuardCellManager.H @@ -71,7 +71,9 @@ public: const bool do_pml, const int do_pml_in_domain, const int pml_ncell, - const amrex::Vector& ref_ratios); + const amrex::Vector& ref_ratios, + const bool use_filter, + const amrex::IntVect& bilinear_filter_stencil_length); // Guard cells allocated for MultiFabs E and B amrex::IntVect ng_alloc_EB = amrex::IntVect::TheZeroVector(); diff --git a/Source/Parallelization/GuardCellManager.cpp b/Source/Parallelization/GuardCellManager.cpp index cfe2c0e4dd2..d32d696190e 100644 --- a/Source/Parallelization/GuardCellManager.cpp +++ b/Source/Parallelization/GuardCellManager.cpp @@ -53,7 +53,9 @@ guardCellManager::Init ( const bool do_pml, const int do_pml_in_domain, const int pml_ncell, - const amrex::Vector& ref_ratios) + const amrex::Vector& ref_ratios, + const bool use_filter, + const amrex::IntVect& bilinear_filter_stencil_length) { // When using subcycling, the particles on the fine level perform two pushes // before being redistributed ; therefore, we need one extra guard cell @@ -160,6 +162,11 @@ guardCellManager::Init ( ng_depos_J = ng_alloc_J; ng_depos_rho = ng_alloc_Rho; + if (use_filter) + { + ng_alloc_J += bilinear_filter_stencil_length - amrex::IntVect(1); + } + // After pushing particle int ng_alloc_F_int = (do_moving_window) ? 2 : 0; // CKC solver requires one additional guard cell diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index 27c01b6b3f1..373cb281aa5 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -953,43 +953,79 @@ void WarpX::RestrictCurrentFromFineToCoarsePatch ( CoarsenMR::Coarsen( *crse[2], *fine[2], refinement_ratio ); } -void WarpX::ApplyFilterandSumBoundaryJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, +void WarpX::ApplyFilterJ ( + const amrex::Vector,3>>& current, const int lev, - PatchType patch_type) + const int idim) { - const int glev = (patch_type == PatchType::fine) ? lev : lev-1; - const amrex::Periodicity& period = Geom(glev).periodicity(); - const std::array,3>& j = (patch_type == PatchType::fine) ? - J_fp[lev] : J_cp[lev]; - for (int idim = 0; idim < 3; ++idim) { - IntVect ng = j[idim]->nGrowVect(); - IntVect ng_depos_J = get_ng_depos_J(); - if (WarpX::do_current_centering) - { + amrex::MultiFab& J = *current[lev][idim]; + + const int ncomp = J.nComp(); + const amrex::IntVect ngrow = J.nGrowVect(); + amrex::MultiFab Jf(J.boxArray(), J.DistributionMap(), ncomp, ngrow); + bilinear_filter.ApplyStencil(Jf, J, lev); + + const int srccomp = 0; + const int dstcomp = 0; + amrex::MultiFab::Copy(J, Jf, srccomp, dstcomp, ncomp, ngrow); +} + +void WarpX::ApplyFilterJ ( + const amrex::Vector,3>>& current, + const int lev) +{ + for (int idim=0; idim<3; ++idim) + { + ApplyFilterJ(current, lev, idim); + } +} + +void WarpX::SumBoundaryJ ( + const amrex::Vector,3>>& current, + const int lev, + const int idim, + const amrex::Periodicity& period) +{ + amrex::MultiFab& J = *current[lev][idim]; + + amrex::IntVect ng = J.nGrowVect(); + amrex::IntVect ng_depos_J = get_ng_depos_J(); + + if (WarpX::do_current_centering) + { #if defined(WARPX_DIM_1D_Z) - ng_depos_J[0] += WarpX::current_centering_noz / 2; + ng_depos_J[0] += WarpX::current_centering_noz / 2; #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - ng_depos_J[0] += WarpX::current_centering_nox / 2; - ng_depos_J[1] += WarpX::current_centering_noz / 2; + ng_depos_J[0] += WarpX::current_centering_nox / 2; + ng_depos_J[1] += WarpX::current_centering_noz / 2; #elif defined(WARPX_DIM_3D) - ng_depos_J[0] += WarpX::current_centering_nox / 2; - ng_depos_J[1] += WarpX::current_centering_noy / 2; - ng_depos_J[2] += WarpX::current_centering_noz / 2; + ng_depos_J[0] += WarpX::current_centering_nox / 2; + ng_depos_J[1] += WarpX::current_centering_noy / 2; + ng_depos_J[2] += WarpX::current_centering_noz / 2; #endif - } - if (use_filter) { - ng += bilinear_filter.stencil_length_each_dir-1; - ng_depos_J += bilinear_filter.stencil_length_each_dir-1; - ng_depos_J.min(ng); - MultiFab jf(j[idim]->boxArray(), j[idim]->DistributionMap(), j[idim]->nComp(), ng); - bilinear_filter.ApplyStencil(jf, *j[idim], lev); - WarpXSumGuardCells(*(j[idim]), jf, period, ng_depos_J, 0, (j[idim])->nComp()); - } else { - ng_depos_J.min(ng); - WarpXSumGuardCells(*(j[idim]), period, ng_depos_J, 0, (j[idim])->nComp()); - } + } + + if (use_filter) + { + ng_depos_J += bilinear_filter.stencil_length_each_dir - amrex::IntVect(1); + } + + ng_depos_J.min(ng); + + const amrex::IntVect src_ngrow = ng_depos_J; + const int icomp = 0; + const int ncomp = J.nComp(); + WarpXSumGuardCells(J, period, src_ngrow, icomp, ncomp); +} + +void WarpX::SumBoundaryJ ( + const amrex::Vector,3>>& current, + const int lev, + const amrex::Periodicity& period) +{ + for (int idim=0; idim<3; ++idim) + { + SumBoundaryJ(current, lev, idim, period); } } @@ -1011,88 +1047,73 @@ void WarpX::AddCurrentFromFineLevelandSumBoundary ( const amrex::Vector,3>>& J_cp, const int lev) { - ApplyFilterandSumBoundaryJ(J_fp, J_cp, lev, PatchType::fine); + const amrex::Periodicity& period = Geom(lev).periodicity(); - if (lev < finest_level) { + if (use_filter) + { + ApplyFilterJ(J_fp, lev); + } + SumBoundaryJ(J_fp, lev, period); + + if (lev < finest_level) + { // When there are current buffers, unlike coarse patch, // we don't care about the final state of them. - const amrex::Periodicity& period = Geom(lev).periodicity(); - for (int idim = 0; idim < 3; ++idim) { + for (int idim=0; idim<3; ++idim) + { MultiFab mf(J_fp[lev][idim]->boxArray(), J_fp[lev][idim]->DistributionMap(), J_fp[lev][idim]->nComp(), 0); mf.setVal(0.0); + IntVect ng = J_cp[lev+1][idim]->nGrowVect(); - IntVect ng_depos_J = get_ng_depos_J(); - if (WarpX::do_current_centering) - { -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - ng_depos_J[0] += WarpX::current_centering_nox / 2; - ng_depos_J[1] += WarpX::current_centering_noz / 2; -#elif defined(WARPX_DIM_3D) - ng_depos_J[0] += WarpX::current_centering_nox / 2; - ng_depos_J[1] += WarpX::current_centering_noy / 2; - ng_depos_J[2] += WarpX::current_centering_noz / 2; -#endif - } + if (use_filter && current_buf[lev+1][idim]) { - // coarse patch of fine level - ng += bilinear_filter.stencil_length_each_dir-1; - ng_depos_J += bilinear_filter.stencil_length_each_dir-1; - ng_depos_J.min(ng); - MultiFab jfc(J_cp[lev+1][idim]->boxArray(), - J_cp[lev+1][idim]->DistributionMap(), J_cp[lev+1][idim]->nComp(), ng); - bilinear_filter.ApplyStencil(jfc, *J_cp[lev+1][idim], lev+1); - - // buffer patch of fine level - MultiFab jfb(current_buf[lev+1][idim]->boxArray(), - current_buf[lev+1][idim]->DistributionMap(), current_buf[lev+1][idim]->nComp(), ng); - bilinear_filter.ApplyStencil(jfb, *current_buf[lev+1][idim], lev+1); - - MultiFab::Add(jfb, jfc, 0, 0, current_buf[lev+1][idim]->nComp(), ng); - ablastr::utils::communication::ParallelAdd(mf, jfb, 0, 0, current_buf[lev + 1][idim]->nComp(), - ng, IntVect::TheZeroVector(), WarpX::do_single_precision_comms, period); - - WarpXSumGuardCells(*J_cp[lev+1][idim], jfc, period, ng_depos_J, 0, J_cp[lev+1][idim]->nComp()); + ApplyFilterJ(J_cp, lev+1, idim); + ApplyFilterJ(current_buf, lev+1, idim); + + MultiFab::Add( + *current_buf[lev+1][idim], *J_cp[lev+1][idim], + 0, 0, current_buf[lev+1][idim]->nComp(), ng); + + ablastr::utils::communication::ParallelAdd( + mf, *current_buf[lev+1][idim], 0, 0, + current_buf[lev+1][idim]->nComp(), + ng, amrex::IntVect(0), + do_single_precision_comms, period); } else if (use_filter) // but no buffer { - // coarse patch of fine level - ng += bilinear_filter.stencil_length_each_dir-1; - ng_depos_J += bilinear_filter.stencil_length_each_dir-1; - ng_depos_J.min(ng); - MultiFab jf(J_cp[lev+1][idim]->boxArray(), - J_cp[lev+1][idim]->DistributionMap(), J_cp[lev+1][idim]->nComp(), ng); - bilinear_filter.ApplyStencil(jf, *J_cp[lev+1][idim], lev+1); - - ablastr::utils::communication::ParallelAdd(mf, jf, 0, 0, J_cp[lev + 1][idim]->nComp(), ng, - IntVect::TheZeroVector(), WarpX::do_single_precision_comms, period); - WarpXSumGuardCells(*J_cp[lev+1][idim], jf, period, ng_depos_J, 0, J_cp[lev+1][idim]->nComp()); + ApplyFilterJ(J_cp, lev+1, idim); + + ablastr::utils::communication::ParallelAdd( + mf, *J_cp[lev+1][idim], 0, 0, + J_cp[lev+1][idim]->nComp(), + ng, amrex::IntVect(0), + do_single_precision_comms, period); } else if (current_buf[lev+1][idim]) // but no filter { - ng_depos_J.min(ng); - MultiFab::Add(*current_buf[lev+1][idim], - *J_cp [lev+1][idim], 0, 0, current_buf[lev+1][idim]->nComp(), - J_cp[lev+1][idim]->nGrowVect()); - ablastr::utils::communication::ParallelAdd(mf, *current_buf[lev + 1][idim], 0, 0, - current_buf[lev + 1][idim]->nComp(), - current_buf[lev + 1][idim]->nGrowVect(), - IntVect::TheZeroVector(), WarpX::do_single_precision_comms, - period); - WarpXSumGuardCells(*(J_cp[lev+1][idim]), period, ng_depos_J, 0, J_cp[lev+1][idim]->nComp()); + MultiFab::Add( + *current_buf[lev+1][idim], *J_cp[lev+1][idim], + 0, 0, current_buf[lev+1][idim]->nComp(), ng); + + ablastr::utils::communication::ParallelAdd( + mf, *current_buf[lev+1][idim], 0, 0, + current_buf[lev+1][idim]->nComp(), + ng, amrex::IntVect(0), + do_single_precision_comms, period); } else // no filter, no buffer { - ng_depos_J.min(ng); - ablastr::utils::communication::ParallelAdd(mf, *J_cp[lev + 1][idim], 0, 0, - J_cp[lev + 1][idim]->nComp(), - J_cp[lev + 1][idim]->nGrowVect(), - IntVect::TheZeroVector(), WarpX::do_single_precision_comms, - period); - WarpXSumGuardCells(*(J_cp[lev+1][idim]), period, ng_depos_J, 0, J_cp[lev+1][idim]->nComp()); + ablastr::utils::communication::ParallelAdd( + mf, *J_cp[lev+1][idim], 0, 0, + J_cp[lev+1][idim]->nComp(), + ng, amrex::IntVect(0), + do_single_precision_comms, period); } + SumBoundaryJ(J_cp, lev+1, idim, period); MultiFab::Add(*J_fp[lev][idim], mf, 0, 0, J_fp[lev+1][idim]->nComp(), 0); } } diff --git a/Source/WarpX.H b/Source/WarpX.H index 7a051e30db8..a0f0f22f990 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -974,11 +974,22 @@ private: const int lev); void StoreCurrent (const int lev); void RestoreCurrent (const int lev); - void ApplyFilterandSumBoundaryJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, + void ApplyFilterJ ( + const amrex::Vector,3>>& current, const int lev, - PatchType patch_type); + const int idim); + void ApplyFilterJ ( + const amrex::Vector,3>>& current, + const int lev); + void SumBoundaryJ ( + const amrex::Vector,3>>& current, + const int lev, + const int idim, + const amrex::Periodicity& period); + void SumBoundaryJ ( + const amrex::Vector,3>>& current, + const int lev, + const amrex::Periodicity& period); void NodalSyncJ ( const amrex::Vector,3>>& J_fp, const amrex::Vector,3>>& J_cp, @@ -1094,7 +1105,7 @@ private: const int centering_noz); void AllocLevelMFs (int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, - const amrex::IntVect& ngEB, const amrex::IntVect& ngJ, + const amrex::IntVect& ngEB, amrex::IntVect& ngJ, const amrex::IntVect& ngRho, const amrex::IntVect& ngF, const amrex::IntVect& ngG, const bool aux_is_nodal); #ifdef WARPX_USE_PSATD diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 86961dcaf31..1cdd2e3e0d7 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -1626,6 +1626,13 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d amrex::RealVect dx = {WarpX::CellSize(lev)[0], WarpX::CellSize(lev)[1], WarpX::CellSize(lev)[2]}; #endif + // Initialize filter before guard cells manager + // (needs info on length of filter's stencil) + if (use_filter) + { + InitFilter(); + } + guard_cells.Init( dt[lev], dx, @@ -1648,7 +1655,9 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d WarpX::isAnyBoundaryPML(), WarpX::do_pml_in_domain, WarpX::pml_ncell, - this->refRatio()); + this->refRatio(), + use_filter, + bilinear_filter.stencil_length_each_dir); #ifdef AMREX_USE_EB @@ -1683,7 +1692,7 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d void WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm, - const IntVect& ngEB, const IntVect& ngJ, const IntVect& ngRho, + const IntVect& ngEB, IntVect& ngJ, const IntVect& ngRho, const IntVect& ngF, const IntVect& ngG, const bool aux_is_nodal) { // Declare nodal flags From c50bb930fc685666929ad7cc28f55a1344d8a57a Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 12 Sep 2022 07:38:51 -0700 Subject: [PATCH 0056/1346] Docs: Update Crusher (OLCF) (#3380) Use the newest available ROCm and CCE modules. --- .../crusher-olcf/crusher_warpx.profile.example | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Tools/machines/crusher-olcf/crusher_warpx.profile.example b/Tools/machines/crusher-olcf/crusher_warpx.profile.example index eeac15cdeec..1e3f90695a1 100644 --- a/Tools/machines/crusher-olcf/crusher_warpx.profile.example +++ b/Tools/machines/crusher-olcf/crusher_warpx.profile.example @@ -2,11 +2,11 @@ #export proj= # required dependencies -module load cmake/3.22.1 +module load cmake/3.23.2 module load craype-accel-amd-gfx90a -module load rocm/5.0.0 +module load rocm/5.2.0 module load cray-mpich -#module load cce/14.0.0 # must be loaded after rocm +module load cce/14.0.2 # must be loaded after rocm # optional: faster builds module load ccache @@ -20,14 +20,14 @@ module load nano #module load lapackpp # optional: for QED lookup table generation support -module load boost/1.78.0-cxx17 +#module load boost/1.78.0-cxx17 # optional: for openPMD support -module load adios2/2.7.1 +module load adios2/2.8.1 module load cray-hdf5-parallel/1.12.1.1 # optional: for Python bindings or libEnsemble -module load cray-python/3.9.4.2 +module load cray-python/3.9.12.1 # fix system defaults: do not escape $ with a \ on tab completion shopt -s direxpand @@ -51,4 +51,4 @@ export CXX=$(which CC) export FC=$(which ftn) export CFLAGS="-I${ROCM_PATH}/include" export CXXFLAGS="-I${ROCM_PATH}/include" -export LDFLAGS="-L${ROCM_PATH}/lib -lamdhip64" +#export LDFLAGS="-L${ROCM_PATH}/lib -lamdhip64" From 936faddb9a8593d5987a2421b519239050152810 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 12 Sep 2022 13:36:27 -0700 Subject: [PATCH 0057/1346] Docs: Crusher (OLCF) for PSATD+RZ (#3386) Document BLAS++/LAPACK++ usage with HIP on OLCF Crusher. --- Docs/source/install/hpc/crusher.rst | 19 ++++++++++++++++++- .../crusher_warpx.profile.example | 8 +++++++- Tools/machines/crusher-olcf/submit.sh | 1 + 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/Docs/source/install/hpc/crusher.rst b/Docs/source/install/hpc/crusher.rst index 3aa6efcbd0e..6ee03f88a8b 100644 --- a/Docs/source/install/hpc/crusher.rst +++ b/Docs/source/install/hpc/crusher.rst @@ -18,7 +18,7 @@ If you are new to this system, **please see the following resources**: * `Production directories `_: * ``$PROJWORK/$proj/``: shared with all members of a project, purged every 90 days (recommended) - * ``$MEMBERWORK/$proj/``: single user, purged every 90 days(usually smaller quota) + * ``$MEMBERWORK/$proj/``: single user, purged every 90 days (usually smaller quota) * ``$WORLDWORK/$proj/``: shared with all users, purged every 90 days * Note that the ``$HOME`` directory is mounted as read-only on compute nodes. That means you cannot run in your ``$HOME``. @@ -45,6 +45,21 @@ We recommend to store the above lines in a file, such as ``$HOME/crusher_warpx.p source $HOME/crusher_warpx.profile +And since Crusher does not yet provide a module for them, install BLAS++ and LAPACK++: + +.. code-block:: bash + + # BLAS++ (for PSATD+RZ) + git clone https://bitbucket.org/icl/blaspp.git src/blaspp + rm -rf src/blaspp-crusher-build + cmake -S src/blaspp -B src/blaspp-crusher-build -Duse_openmp=OFF -Dgpu_backend=hip -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=$HOME/sw/crusher/blaspp-master + cmake --build src/blaspp-crusher-build --target install --parallel 10 + + # LAPACK++ (for PSATD+RZ) + git clone https://bitbucket.org/icl/lapackpp.git src/lapackpp + rm -rf src/lapackpp-crusher-build + cmake -S src/lapackpp -B src/lapackpp-crusher-build -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=$HOME/sw/crusher/lapackpp-master + cmake --build src/lapackpp-crusher-build --target install --parallel 10 Then, ``cd`` into the directory ``$HOME/src/warpx`` and use the following commands to compile: @@ -69,6 +84,8 @@ Running MI250X GPUs (2x64 GB) ^^^^^^^^^^^^^^^^^^^^^ +ECP WarpX project members, use the ``aph114`` project ID. + After requesting an interactive node with the ``getNode`` alias above, run a simulation like this, here using 8 MPI ranks and a single node: .. code-block:: bash diff --git a/Tools/machines/crusher-olcf/crusher_warpx.profile.example b/Tools/machines/crusher-olcf/crusher_warpx.profile.example index 1e3f90695a1..197f360c2e8 100644 --- a/Tools/machines/crusher-olcf/crusher_warpx.profile.example +++ b/Tools/machines/crusher-olcf/crusher_warpx.profile.example @@ -1,7 +1,9 @@ # please set your project account +# note: WarpX ECP members use aph114_crusher #export proj= # required dependencies +module load cpe/22.08 module load cmake/3.23.2 module load craype-accel-amd-gfx90a module load rocm/5.2.0 @@ -18,13 +20,17 @@ module load nano # optional: for PSATD in RZ geometry support (not yet available) #module load blaspp #module load lapackpp +export CMAKE_PREFIX_PATH=$HOME/sw/crusher/blaspp-master:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=$HOME/sw/crusher/lapackpp-master:$CMAKE_PREFIX_PATH +export LD_LIBRARY_PATH=$HOME/sw/crusher/blaspp-master/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=$HOME/sw/crusher/lapackpp-master/lib64:$LD_LIBRARY_PATH # optional: for QED lookup table generation support #module load boost/1.78.0-cxx17 # optional: for openPMD support module load adios2/2.8.1 -module load cray-hdf5-parallel/1.12.1.1 +module load cray-hdf5-parallel/1.12.1.5 # optional: for Python bindings or libEnsemble module load cray-python/3.9.12.1 diff --git a/Tools/machines/crusher-olcf/submit.sh b/Tools/machines/crusher-olcf/submit.sh index 828c0fb68df..1fb13c1a274 100644 --- a/Tools/machines/crusher-olcf/submit.sh +++ b/Tools/machines/crusher-olcf/submit.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash #SBATCH -A +# note: WarpX ECP members use aph114 #SBATCH -J warpx #SBATCH -o %x-%j.out #SBATCH -t 00:10:00 From 70f9a863469f5e005d5742433c5207d723d3ac42 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 12 Sep 2022 15:01:44 -0700 Subject: [PATCH 0058/1346] AMReX: Weekly Update (#3387) --- .github/workflows/cuda.yml | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 2 +- run_test.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 55d5ab171ad..9463b30a82f 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 22.09 && cd - + cd amrex && git checkout --detach 9aa23c202a13eee489a06030b9aeda6b89856944 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 31fe16e1247..d2fff710688 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 22.09 +branch = 9aa23c202a13eee489a06030b9aeda6b89856944 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 8479b51ffd2..e123efef29c 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 22.09 +branch = 9aa23c202a13eee489a06030b9aeda6b89856944 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 95cfcb6a50e..2db3d33e87c 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -235,7 +235,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "22.09" +set(WarpX_amrex_branch "9aa23c202a13eee489a06030b9aeda6b89856944" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/run_test.sh b/run_test.sh index a2f78829ae3..5dc4713a889 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 22.09 && cd - +cd amrex && git checkout --detach 9aa23c202a13eee489a06030b9aeda6b89856944 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git From 425d22a5b596eb1ceb8e5a07d1ed3766870f9d9e Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 12 Sep 2022 15:32:15 -0700 Subject: [PATCH 0059/1346] ABLASTR: Move Used Inputs Helper (#3376) Move the helper to write a file for used inputs to ABLASTR. --- Source/Initialization/WarpXInitData.cpp | 11 ++------- Source/ablastr/utils/CMakeLists.txt | 3 ++- Source/ablastr/utils/Make.package | 5 +++-- Source/ablastr/utils/UsedInputsFile.H | 27 ++++++++++++++++++++++ Source/ablastr/utils/UsedInputsFile.cpp | 30 +++++++++++++++++++++++++ 5 files changed, 64 insertions(+), 12 deletions(-) create mode 100644 Source/ablastr/utils/UsedInputsFile.H create mode 100644 Source/ablastr/utils/UsedInputsFile.cpp diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 7af31773dce..812f0bf01df 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -29,6 +29,7 @@ #include "Utils/WarpXUtil.H" #include +#include #include #include @@ -64,7 +65,6 @@ #include #include #include -#include #include #include #include @@ -350,14 +350,7 @@ WarpX::PrintMainPICparameters () void WarpX::WriteUsedInputsFile (std::string const & filename) const { - amrex::Print() << "For full input parameters, see the file: " << filename << "\n\n"; - - if (ParallelDescriptor::IOProcessor()) { - std::ofstream jobInfoFile; - jobInfoFile.open(filename.c_str(), std::ios::out); - ParmParse::dumpTable(jobInfoFile, true); - jobInfoFile.close(); - } + ablastr::utils::write_used_inputs_file(filename); } void diff --git a/Source/ablastr/utils/CMakeLists.txt b/Source/ablastr/utils/CMakeLists.txt index 9ac122caea4..29e7cfb6d20 100644 --- a/Source/ablastr/utils/CMakeLists.txt +++ b/Source/ablastr/utils/CMakeLists.txt @@ -1,8 +1,9 @@ target_sources(ablastr PRIVATE Communication.cpp - TextMsg.cpp SignalHandling.cpp + TextMsg.cpp + UsedInputsFile.cpp ) add_subdirectory(msg_logger) diff --git a/Source/ablastr/utils/Make.package b/Source/ablastr/utils/Make.package index c9be0153acc..6db5f150b94 100644 --- a/Source/ablastr/utils/Make.package +++ b/Source/ablastr/utils/Make.package @@ -1,6 +1,7 @@ -CEXE_sources += TextMsg.cpp -CEXE_sources += SignalHandling.cpp CEXE_sources += Communication.cpp +CEXE_sources += SignalHandling.cpp +CEXE_sources += TextMsg.cpp +CEXE_sources += UsedInputsFile.cpp VPATH_LOCATIONS += $(WARPX_HOME)/Source/ablastr/utils diff --git a/Source/ablastr/utils/UsedInputsFile.H b/Source/ablastr/utils/UsedInputsFile.H new file mode 100644 index 00000000000..543ae98a256 --- /dev/null +++ b/Source/ablastr/utils/UsedInputsFile.H @@ -0,0 +1,27 @@ +/* Copyright 2022 Axel Huebl + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef ABLASTR_USED_INPUTS_FILE_H +#define ABLASTR_USED_INPUTS_FILE_H + +#include + + +namespace ablastr::utils +{ + /** Write a file that record all inputs: inputs file + command line options + * + * This uses the same syntax as amrex::ParmParse inputs files. + * Only the AMReX IOProcessor writes. + * + * @param filename the name of the text file to write + */ + void + write_used_inputs_file (std::string const & filename); +} + +#endif // ABLASTR_USED_INPUTS_FILE_H diff --git a/Source/ablastr/utils/UsedInputsFile.cpp b/Source/ablastr/utils/UsedInputsFile.cpp new file mode 100644 index 00000000000..175c67619e7 --- /dev/null +++ b/Source/ablastr/utils/UsedInputsFile.cpp @@ -0,0 +1,30 @@ +/* Copyright 2022 Axel Huebl + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#include "UsedInputsFile.H" + +#include +#include +#include + +#include +#include +#include + + +void +ablastr::utils::write_used_inputs_file (std::string const & filename) +{ + amrex::Print() << "For full input parameters, see the file: " << filename << "\n\n"; + + if (amrex::ParallelDescriptor::IOProcessor()) { + std::ofstream jobInfoFile; + jobInfoFile.open(filename.c_str(), std::ios::out); + amrex::ParmParse::dumpTable(jobInfoFile, true); + jobInfoFile.close(); + } +} From 1c7b2b6159c280a97f469f26777cd4fdbdce8eb7 Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Tue, 13 Sep 2022 20:39:58 -0700 Subject: [PATCH 0060/1346] use getWithParser (#3394) --- Source/Initialization/PlasmaInjector.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Source/Initialization/PlasmaInjector.cpp b/Source/Initialization/PlasmaInjector.cpp index 3eced8f5d96..e56d5a5daab 100644 --- a/Source/Initialization/PlasmaInjector.cpp +++ b/Source/Initialization/PlasmaInjector.cpp @@ -240,7 +240,7 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) // so that inj_pos->getPositionUnitBox calls // InjectorPosition[Random or Regular].getPositionUnitBox. else if (injection_style == "nrandompercell") { - queryWithParser(pp_species_name, "num_particles_per_cell", num_particles_per_cell); + getWithParser(pp_species_name, "num_particles_per_cell", num_particles_per_cell); #if WARPX_DIM_RZ if (WarpX::n_rz_azimuthal_modes > 1) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -258,7 +258,7 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) parseMomentum(pp_species_name); } else if (injection_style == "nfluxpercell") { surface_flux = true; - queryWithParser(pp_species_name, "num_particles_per_cell", num_particles_per_cell_real); + getWithParser(pp_species_name, "num_particles_per_cell", num_particles_per_cell_real); #ifdef WARPX_DIM_RZ if (WarpX::n_rz_azimuthal_modes > 1) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( From 0f344d617d7393b16c955018d95e1aa5d47ebb94 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 13 Sep 2022 20:41:42 -0700 Subject: [PATCH 0061/1346] ABLASTR: Fix Stray Include in DepositCharge (#3393) This include into WarpX should not be here and is unused. --- Source/ablastr/particles/DepositCharge.H | 1 - 1 file changed, 1 deletion(-) diff --git a/Source/ablastr/particles/DepositCharge.H b/Source/ablastr/particles/DepositCharge.H index e2e27c75622..67113a6b73c 100644 --- a/Source/ablastr/particles/DepositCharge.H +++ b/Source/ablastr/particles/DepositCharge.H @@ -9,7 +9,6 @@ #include "ablastr/profiler/ProfilerWrapper.H" #include "Parallelization/KernelTimer.H" -#include "Particles/Pusher/GetAndSetPosition.H" #include "Particles/ShapeFactors.H" #include "Particles/Deposition/ChargeDeposition.H" #include "ablastr/utils/TextMsg.H" From 04b6f67caab8ee95428a569c529110a12b2527f3 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 14 Sep 2022 14:02:22 -0700 Subject: [PATCH 0062/1346] Frontier/Crusher: Less Invasive libFabric Work-Around (#3396) From Steve Abbott (HPE): > There's a known problem with the default libfabrics memory > registration cache monitor that impacts codes that allocate and > free MPI buffers frequently. What you're doing now, > FI_MR_CACHE_MAX_COUNT=0 is a big hammer that disables the memory > registration cache all together. That can have a negative > performance impact, because memory registration is a heavy > operation, but it doesn't seem to be hitting WarpX very hard. If > you're mostly following an allocate-communicate-free pattern, the > memory registration cache won't help you anyway. > > An alternative is to set FI_MR_CACHE_MONITOR=memhooks , which uses > an alternative cache monitor that doesn't have the same problem. I > tested on an 8 node WarpX case we have in a bug and only saw a 2% > speedup over FI_MR_CACHE_MAX_COUNT=0, and that speedup was in > FillBoundary which I'm guessing is the only place you might have > some MPI buffer reuse. If you start to scale out you may want to > try it. > > We're working on a new default cache monitor that won't have this > problem but I'm not sure the timeline for it. We'll make sure that > when it gets pushed out we'll let you know, but for now you have to > keep using either of these two environment variables. --- Docs/source/install/hpc/crusher.rst | 4 +++- Docs/source/install/hpc/frontier.rst | 4 +++- Tools/machines/crusher-olcf/submit.sh | 4 +++- Tools/machines/frontier-olcf/submit.sh | 4 +++- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/Docs/source/install/hpc/crusher.rst b/Docs/source/install/hpc/crusher.rst index 6ee03f88a8b..a33ae0acf11 100644 --- a/Docs/source/install/hpc/crusher.rst +++ b/Docs/source/install/hpc/crusher.rst @@ -122,7 +122,9 @@ Known System Issues .. code-block:: bash - export FI_MR_CACHE_MAX_COUNT=0 # libfabric disable caching + #export FI_MR_CACHE_MAX_COUNT=0 # libfabric disable caching + # or, less invasive: + export FI_MR_CACHE_MONITOR=memhooks # alternative cache monitor .. warning:: diff --git a/Docs/source/install/hpc/frontier.rst b/Docs/source/install/hpc/frontier.rst index 45da9496a41..1263d681151 100644 --- a/Docs/source/install/hpc/frontier.rst +++ b/Docs/source/install/hpc/frontier.rst @@ -116,7 +116,9 @@ Known System Issues .. code-block:: bash - export FI_MR_CACHE_MAX_COUNT=0 # libfabric disable caching + #export FI_MR_CACHE_MAX_COUNT=0 # libfabric disable caching + # or, less invasive: + export FI_MR_CACHE_MONITOR=memhooks # alternative cache monitor .. warning:: diff --git a/Tools/machines/crusher-olcf/submit.sh b/Tools/machines/crusher-olcf/submit.sh index 1fb13c1a274..cefd25c776e 100644 --- a/Tools/machines/crusher-olcf/submit.sh +++ b/Tools/machines/crusher-olcf/submit.sh @@ -23,7 +23,9 @@ # note (5-16-22, OLCFHELP-6888) # this environment setting is currently needed on Crusher to work-around a # known issue with Libfabric -export FI_MR_CACHE_MAX_COUNT=0 +#export FI_MR_CACHE_MAX_COUNT=0 # libfabric disable caching +# or, less invasive: +export FI_MR_CACHE_MONITOR=memhooks # alternative cache monitor # note (9-2-22, OLCFDEV-1079) # this environment setting is needed to avoid that rocFFT writes a cache in diff --git a/Tools/machines/frontier-olcf/submit.sh b/Tools/machines/frontier-olcf/submit.sh index b318dd1797a..f5b52a712d3 100644 --- a/Tools/machines/frontier-olcf/submit.sh +++ b/Tools/machines/frontier-olcf/submit.sh @@ -26,7 +26,9 @@ # note (5-16-22 and 7-12-22) # this environment setting is currently needed on Frontier to work-around a # known issue with Libfabric (both in the May and June PE) -export FI_MR_CACHE_MAX_COUNT=0 +#export FI_MR_CACHE_MAX_COUNT=0 # libfabric disable caching +# or, less invasive: +export FI_MR_CACHE_MONITOR=memhooks # alternative cache monitor # note (9-2-22, OLCFDEV-1079) # this environment setting is needed to avoid that rocFFT writes a cache in From ac2521aa4c23e999715931331a817c41e416ddd2 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Thu, 15 Sep 2022 15:15:09 -0700 Subject: [PATCH 0063/1346] Use blaspp::gemm on GPU for Hankel transform (#3383) * Use gemm on GPU for Hankel transform * Add stream synchronization * Add `amrex` * blas::gemm call: add `queue` with device id * CMake: BLAS++ Missing Deps * Update installation instructions for Summit * CMake: BLAS++ should not need curand * Add paths to blaspp/lapackpp * Move Queue Constructor to Constructor * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Correct name of profiled area * Use gemm for inverse Hankel transform * Add missing comma * Clean up the code so that it compiles for CPU * Clean up code ; update documentation * Update Comment Co-authored-by: Remi Lehe * Update Tools/machines/summit-olcf/summit_warpx.profile.example Co-authored-by: Axel Huebl * Add stream synchronization * Switch to streamsynchronize * Update comments Co-authored-by: Axel Huebl Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CMakeLists.txt | 6 ++ Docs/source/install/hpc/perlmutter.rst | 4 +- Docs/source/install/hpc/summit.rst | 16 ++++ .../SpectralHankelTransform/HankelTransform.H | 12 +++ .../HankelTransform.cpp | 85 ++++++++----------- .../summit-olcf/summit_warpx.profile.example | 9 +- 6 files changed, 77 insertions(+), 55 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ec47f693997..8afc3834e83 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -259,6 +259,12 @@ if(WarpX_PSATD) if(WarpX_DIMS STREQUAL RZ) target_link_libraries(ablastr PUBLIC blaspp) target_link_libraries(ablastr PUBLIC lapackpp) + + # BLAS++ forgets to declare cuBLAS and cudaRT dependencies + if(WarpX_COMPUTE STREQUAL CUDA) + find_package(CUDAToolkit REQUIRED) + target_link_libraries(ablastr PUBLIC CUDA::cudart CUDA::cublas) + endif() endif() endif() diff --git a/Docs/source/install/hpc/perlmutter.rst b/Docs/source/install/hpc/perlmutter.rst index dee1909b577..43c047c62ad 100644 --- a/Docs/source/install/hpc/perlmutter.rst +++ b/Docs/source/install/hpc/perlmutter.rst @@ -66,13 +66,13 @@ And since Perlmutter does not yet provide a module for them, install ADIOS2, BLA # BLAS++ (for PSATD+RZ) git clone https://bitbucket.org/icl/blaspp.git src/blaspp rm -rf src/blaspp-pm-build - CXX=$(which CC) cmake -S src/blaspp -B src/blaspp-pm-build -Duse_openmp=ON -Dgpu_backend=CUDA -Duse_cmake_find_blas=ON -DBLAS_LIBRARIES=${CRAY_LIBSCI_PREFIX_DIR}/lib/libsci_gnu.a -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=$HOME/sw/perlmutter/blaspp-master + CXX=$(which CC) cmake -S src/blaspp -B src/blaspp-pm-build -Duse_openmp=OFF -Dgpu_backend=cuda -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=$HOME/sw/perlmutter/blaspp-master cmake --build src/blaspp-pm-build --target install --parallel 16 # LAPACK++ (for PSATD+RZ) git clone https://bitbucket.org/icl/lapackpp.git src/lapackpp rm -rf src/lapackpp-pm-build - CXX=$(which CC) CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S src/lapackpp -B src/lapackpp-pm-build -Duse_cmake_find_lapack=ON -DBLAS_LIBRARIES=${CRAY_LIBSCI_PREFIX_DIR}/lib/libsci_gnu.a -DLAPACK_LIBRARIES=${CRAY_LIBSCI_PREFIX_DIR}/lib/libsci_gnu.a -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=$HOME/sw/perlmutter/lapackpp-master + CXX=$(which CC) CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S src/lapackpp -B src/lapackpp-pm-build -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=$HOME/sw/perlmutter/lapackpp-master cmake --build src/lapackpp-pm-build --target install --parallel 16 Optionally, download and install Python packages for :ref:`PICMI ` or dynamic ensemble optimizations (:ref:`libEnsemble `): diff --git a/Docs/source/install/hpc/summit.rst b/Docs/source/install/hpc/summit.rst index e1212804e7d..195744fb906 100644 --- a/Docs/source/install/hpc/summit.rst +++ b/Docs/source/install/hpc/summit.rst @@ -45,6 +45,22 @@ We recommend to store the above lines in a file, such as ``$HOME/summit_warpx.pr source $HOME/summit_warpx.profile +For PSATD+RZ simulations, you will need to build BLAS++ and LAPACK++: + +.. code-block:: bash + + # BLAS++ (for PSATD+RZ) + git clone https://bitbucket.org/icl/blaspp.git src/blaspp + rm -rf src/blaspp-summit-build + cmake -S src/blaspp -B src/blaspp-summit-build -Duse_openmp=OFF -Dgpu_backend=cuda -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=$HOME/sw/summit/blaspp-master + cmake --build src/blaspp-summit-build --target install --parallel 10 + + # LAPACK++ (for PSATD+RZ) + git clone https://bitbucket.org/icl/lapackpp.git src/lapackpp + rm -rf src/lapackpp-summit-build + cmake -S src/lapackpp -B src/lapackpp-summit-build -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=$HOME/sw/summit/lapackpp-master + cmake --build src/lapackpp-summit-build --target install --parallel 10 + Optionally, download and install Python packages for :ref:`PICMI ` or dynamic ensemble optimizations (:ref:`libEnsemble `): .. code-block:: bash diff --git a/Source/FieldSolver/SpectralSolver/SpectralHankelTransform/HankelTransform.H b/Source/FieldSolver/SpectralSolver/SpectralHankelTransform/HankelTransform.H index 5a87031c40c..3f74f5d82ab 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralHankelTransform/HankelTransform.H +++ b/Source/FieldSolver/SpectralSolver/SpectralHankelTransform/HankelTransform.H @@ -8,6 +8,14 @@ #define WARPX_HANKEL_TRANSFORM_H_ #include +#include +#include + +#ifdef AMREX_USE_GPU +# include +#endif + +#include /* \brief This defines the class that performs the Hankel transform. * Original authors: Remi Lehe, Manuel Kirchen @@ -45,6 +53,10 @@ class HankelTransform RealVector m_invM; RealVector m_M; + +#ifdef AMREX_USE_GPU + std::unique_ptr m_queue; +#endif }; #endif diff --git a/Source/FieldSolver/SpectralSolver/SpectralHankelTransform/HankelTransform.cpp b/Source/FieldSolver/SpectralSolver/SpectralHankelTransform/HankelTransform.cpp index 43b26f2ee33..ddd07acad29 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralHankelTransform/HankelTransform.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralHankelTransform/HankelTransform.cpp @@ -11,6 +11,8 @@ #include "Utils/WarpXConst.H" #include "WarpX.H" +#include "Utils/WarpXProfilerWrapper.H" + #include #include @@ -23,10 +25,20 @@ HankelTransform::HankelTransform (int const hankel_order, : m_nr(nr), m_nk(nr) { + WARPX_PROFILE("HankelTransform::HankelTransform"); + // Check that azimuthal_mode has a valid value WARPX_ALWAYS_ASSERT_WITH_MESSAGE(hankel_order-1 <= azimuthal_mode && azimuthal_mode <= hankel_order+1, "azimuthal_mode must be either hankel_order-1, hankel_order or hankel_order+1"); +#ifdef AMREX_USE_GPU + // BLAS setup + // SYCL note: we need to double check AMReX device ID conventions and + // BLAS++ device ID conventions are the same + int const device_id = amrex::Gpu::Device::deviceId(); + m_queue = std::make_unique( device_id, 0 ); +#endif + amrex::Vector alphas; amrex::Vector alpha_errors; @@ -186,6 +198,8 @@ void HankelTransform::HankelForwardTransform (amrex::FArrayBox const& F, int const F_icomp, amrex::FArrayBox & G, int const G_icomp) { + WARPX_PROFILE("HankelTransform::HankelForwardTransform"); + amrex::Box const& F_box = F.box(); amrex::Box const& G_box = G.box(); @@ -198,37 +212,24 @@ HankelTransform::HankelForwardTransform (amrex::FArrayBox const& F, int const F_ AMREX_ALWAYS_ASSERT(ngr >= 0); AMREX_ALWAYS_ASSERT(F_box.bigEnd(0)+1 >= m_nr); -#ifndef AMREX_USE_GPU - // On CPU, the blas::gemm is significantly faster + // We perform stream synchronization since `gemm` may be running + // on a different stream. + amrex::Gpu::streamSynchronize(); // Note that M is flagged to be transposed since it has dimensions (m_nr, m_nk) blas::gemm(blas::Layout::ColMajor, blas::Op::Trans, blas::Op::NoTrans, m_nk, nz, m_nr, 1._rt, m_M.dataPtr(), m_nk, F.dataPtr(F_icomp)+ngr, nrF, 0._rt, - G.dataPtr(G_icomp), m_nk); - -#else - // On GPU, the explicit loop is significantly faster - // It is not clear if the GPU gemm wasn't build properly, it is cycling data out and back - // in to the device, or if it is because gemm is launching its own threads. - - amrex::Real const * M_arr = m_M.dataPtr(); - amrex::Array4 const & F_arr = F.array(); - amrex::Array4< amrex::Real> const & G_arr = G.array(); - - int const nr = m_nr; - - amrex::ParallelFor(G_box, - [=] AMREX_GPU_DEVICE(int ik, int iz, int k3d) noexcept { - G_arr(ik,iz,k3d,G_icomp) = 0.; - for (int ir=0 ; ir < nr ; ir++) { - int const ii = ir + ik*nr; - G_arr(ik,iz,k3d,G_icomp) += M_arr[ii]*F_arr(ir,iz,k3d,F_icomp); - } - }); - + G.dataPtr(G_icomp), m_nk +#ifdef AMREX_USE_GPU + , *m_queue // Calls the GPU version of blas::gemm #endif + ); + + // We perform stream synchronization since `gemm` may be running + // on a different stream. + amrex::Gpu::streamSynchronize(); } @@ -236,6 +237,8 @@ void HankelTransform::HankelInverseTransform (amrex::FArrayBox const& G, int const G_icomp, amrex::FArrayBox & F, int const F_icomp) { + WARPX_PROFILE("HankelTransform::HankelInverseTransform"); + amrex::Box const& G_box = G.box(); amrex::Box const& F_box = F.box(); @@ -248,36 +251,22 @@ HankelTransform::HankelInverseTransform (amrex::FArrayBox const& G, int const G_ AMREX_ALWAYS_ASSERT(ngr >= 0); AMREX_ALWAYS_ASSERT(F_box.bigEnd(0)+1 >= m_nr); -#ifndef AMREX_USE_GPU - // On CPU, the blas::gemm is significantly faster + // We perform stream synchronization since `gemm` may be running + // on a different stream. + amrex::Gpu::streamSynchronize(); // Note that m_invM is flagged to be transposed since it has dimensions (m_nk, m_nr) blas::gemm(blas::Layout::ColMajor, blas::Op::Trans, blas::Op::NoTrans, m_nr, nz, m_nk, 1._rt, m_invM.dataPtr(), m_nr, G.dataPtr(G_icomp), m_nk, 0._rt, - F.dataPtr(F_icomp)+ngr, nrF); - -#else - // On GPU, the explicit loop is significantly faster - // It is not clear if the GPU gemm wasn't build properly, it is cycling data out and back - // in to the device, or if it is because gemm is launching its own threads. - - amrex::Real const * invM_arr = m_invM.dataPtr(); - amrex::Array4 const & G_arr = G.array(); - amrex::Array4< amrex::Real> const & F_arr = F.array(); - - int const nk = m_nk; - - amrex::ParallelFor(G_box, - [=] AMREX_GPU_DEVICE(int ir, int iz, int k3d) noexcept { - F_arr(ir,iz,k3d,F_icomp) = 0.; - for (int ik=0 ; ik < nk ; ik++) { - int const ii = ik + ir*nk; - F_arr(ir,iz,k3d,F_icomp) += invM_arr[ii]*G_arr(ik,iz,k3d,G_icomp); - } - }); - + F.dataPtr(F_icomp)+ngr, nrF +#ifdef AMREX_USE_GPU + , *m_queue // Calls the GPU version of blas::gemm #endif + ); + // We perform stream synchronization since `gemm` may be running + // on a different stream. + amrex::Gpu::streamSynchronize(); } diff --git a/Tools/machines/summit-olcf/summit_warpx.profile.example b/Tools/machines/summit-olcf/summit_warpx.profile.example index faef6b2145a..216fe9b6f10 100644 --- a/Tools/machines/summit-olcf/summit_warpx.profile.example +++ b/Tools/machines/summit-olcf/summit_warpx.profile.example @@ -13,11 +13,10 @@ module load cuda/11.3.1 module load ccache # optional: for PSATD in RZ geometry support -module load blaspp/2021.04.01-cpu -module load lapackpp/2021.04.00-cpu - -# optional: for PSATD support (CPU only) -#module load fftw/3.3.9 +export CMAKE_PREFIX_PATH=$HOME/sw/summit/blaspp-master:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=$HOME/sw/summit/lapackpp-master:$CMAKE_PREFIX_PATH +export LD_LIBRARY_PATH=$HOME/sw/summit/blaspp-master/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=$HOME/sw/summit/lapackpp-master/lib64:$LD_LIBRARY_PATH # optional: for QED lookup table generation support module load boost/1.76.0 From 47eef0b6e103ec54947daf82c690138f4e2a5cb4 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 16 Sep 2022 00:25:20 +0200 Subject: [PATCH 0064/1346] add species name to a couple of error messages (#3381) --- Source/Initialization/PlasmaInjector.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Source/Initialization/PlasmaInjector.cpp b/Source/Initialization/PlasmaInjector.cpp index e56d5a5daab..cad2bbbe2d3 100644 --- a/Source/Initialization/PlasmaInjector.cpp +++ b/Source/Initialization/PlasmaInjector.cpp @@ -156,7 +156,8 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) charge_is_specified || species_is_specified || (injection_style == "external_file"), - "Need to specify at least one of species_type or charge" + "Need to specify at least one of species_type or charge for species '" + + species_name + "'." ); if ( mass_is_specified && species_is_specified ){ @@ -170,7 +171,8 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) mass_is_specified || species_is_specified || (injection_style == "external_file"), - "Need to specify at least one of species_type or mass" + "Need to specify at least one of species_type or mass for species '" + + species_name + "'." ); num_particles_per_cell_each_dim.assign(3, 0); From 68eb515ac1befd76e52718dd23d80195a61f7137 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Sun, 18 Sep 2022 21:42:21 -0700 Subject: [PATCH 0065/1346] Update highlights with new PRX paper (#3408) * Update highlights with new PRX paper * Update Docs/source/highlights.rst --- Docs/source/highlights.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index cc14e297177..0fbb3d549ce 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -21,8 +21,8 @@ Scientific works in laser-plasma and beam-plasma acceleration. #. Miao B, Shrock JE, Feder L, Hollinger RC, Morrison J, Nedbailo R, Picksley A, Song H, Wang S, Rocca JJ, Milchberg HM. **Multi-GeV electron bunches from an all-optical laser wakefield accelerator**. - *preprint*. under review, 2021. - `arXiv:2112.03489 `__ + Physical Review X **12**, 031038, 2022. + `DOI:10.1103/PhysRevX.12.031038 `__ #. Mirani F, Calzolari D, Formenti A, Passoni M. **Superintense laser-driven photon activation analysis**. From 3bf1a33bbbe81e39c987743c852679ba787e6014 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 19 Sep 2022 05:43:32 -0700 Subject: [PATCH 0066/1346] ABLASTR: Constants (#3405) * ABLASTR: Constants Add a new header for constants to ABLASTR. Move over constants from WarpX as its content. * Cleanup: Use amrex::Real Literals * Add Conversion: ElectronVolts Conversions for energy, momentum and mass --- Source/Utils/WarpXConst.H | 36 +++------------- Source/ablastr/constant.H | 87 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 31 deletions(-) create mode 100644 Source/ablastr/constant.H diff --git a/Source/Utils/WarpXConst.H b/Source/Utils/WarpXConst.H index fc40be2908f..d6f016ede4f 100644 --- a/Source/Utils/WarpXConst.H +++ b/Source/Utils/WarpXConst.H @@ -1,5 +1,5 @@ -/* Copyright 2019 Andrew Myers, Luca Fedeli, Maxence Thevenet - * Weiqun Zhang +/* Copyright 2019-2022 Andrew Myers, Luca Fedeli, Maxence Thevenet, + * Weiqun Zhang, Axel Huebl * * This file is part of WarpX. * @@ -8,43 +8,17 @@ #ifndef WARPX_CONST_H_ #define WARPX_CONST_H_ -#include +#include -#include -#include -#include - -// Math constants namespace MathConst { - static constexpr amrex::Real pi = static_cast(3.14159265358979323846); + using namespace ablastr::constant::math; } -// Physical constants. Values are the 2018 CODATA recommended values -// https://physics.nist.gov/cuu/Constants/index.html -// -// New additions here should also be considered for addition to -// `warpx_constants` in WarpXUtil.cpp's `makeParser`, so that they're -// available in parsing and evaluation of PICMI expressions, as well -// as the corresponding Python definitions namespace PhysConst { - static constexpr auto c = static_cast( 299'792'458. ); - static constexpr auto ep0 = static_cast( 8.8541878128e-12 ); - static constexpr auto mu0 = static_cast( 1.25663706212e-06 ); - static constexpr auto q_e = static_cast( 1.602176634e-19 ); - static constexpr auto m_e = static_cast( 9.1093837015e-31 ); - static constexpr auto m_p = static_cast( 1.67262192369e-27 ); - static constexpr auto m_u = static_cast( 1.66053906660e-27 ); - static constexpr auto hbar = static_cast( 1.054571817e-34 ); - static constexpr auto alpha = static_cast( 0.007297352573748943 );//mu0/(4*MathConst::pi)*q_e*q_e*c/hbar; - static constexpr auto r_e = static_cast( 2.817940326204929e-15 );//1./(4*MathConst::pi*ep0) * q_e*q_e/(m_e*c*c); - static constexpr double xi = 1.3050122447005176e-52; //(2.*alpha*alpha*ep0*ep0*hbar*hbar*hbar)/(45.*m_e*m_e*m_e*m_e*c*c*c*c*c); - static constexpr auto xi_c2 = static_cast( 1.1728865132395492e-35 ); // This should be usable for single precision, though - // very close to smallest number possible (1.2e-38) - - static constexpr auto kb = static_cast( 1.380649e-23 ); // Boltzmann's constant, J/K (exact) + using namespace ablastr::constant::SI; } #endif diff --git a/Source/ablastr/constant.H b/Source/ablastr/constant.H new file mode 100644 index 00000000000..3a05a8d5e14 --- /dev/null +++ b/Source/ablastr/constant.H @@ -0,0 +1,87 @@ +/* Copyright 2019-2022 Andrew Myers, Luca Fedeli, Maxence Thevenet, + * Weiqun Zhang, Axel Huebl + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef ABLASTR_CONSTANT_H_ +#define ABLASTR_CONSTANT_H_ + +#include + + +/** Numerical compile-time constants */ +namespace ablastr::constant +{ + /** Mathematical constants */ + namespace math + { + using namespace amrex::literals; + + //! ratio of a circle's circumference to its diameter + static constexpr amrex::Real pi = 3.14159265358979323846_rt; + + //! https://tauday.com/tau-manifesto + static constexpr amrex::Real tau = 2.0 * 3.14159265358979323846_rt; + } // namespace math + + /** Physical constants + * + * Values are the 2018 CODATA recommended values + * https://physics.nist.gov/cuu/Constants/index.html + * + * New additions here should also be considered for addition to + * `warpx_constants` in WarpXUtil.cpp's `makeParser`, so that they're + * available in parsing and evaluation of PICMI expressions, as well + * as the corresponding Python definitions + */ + namespace SI + { + using namespace amrex::literals; + + //! vacuum speed of light [m/s] + static constexpr auto c = 299'792'458._rt; + //! vacuum permittivity: dielectric permittivity of vacuum [F/m] + static constexpr auto ep0 = 8.8541878128e-12_rt; + //! vacuum permeability: magnetic permeability of vacuum = 4.0e-7 * pi [H/m] + static constexpr auto mu0 = 1.25663706212e-06_rt; + //! elementary charge [C] + static constexpr auto q_e = 1.602176634e-19_rt; + //! electron mass [kg] + static constexpr auto m_e = 9.1093837015e-31_rt; + //! proton mass [kg] + static constexpr auto m_p = 1.67262192369e-27_rt; + //! dalton: unified atomic mass unit [kg] + static constexpr auto m_u = 1.66053906660e-27_rt; + + //! reduced Planck Constant = h / tau [J*s] + static constexpr auto hbar = 1.054571817e-34_rt; + //! mu0/(4*MathConst::pi)*q_e*q_e*c/hbar + static constexpr auto alpha = 0.007297352573748943_rt; + //! 1./(4*MathConst::pi*ep0) * q_e*q_e/(m_e*c*c) + static constexpr auto r_e = 2.817940326204929e-15_rt; + //! (2.*alpha*alpha*ep0*ep0*hbar*hbar*hbar)/(45.*m_e*m_e*m_e*m_e*c*c*c*c*c) + static constexpr double xi = 1.3050122447005176e-52; + //! This should be usable for single precision instead of xi; very close to smallest float32 number possible (1.2e-38) + static constexpr auto xi_c2 = 1.1728865132395492e-35_rt; + + //! Boltzmann constant (exact) [J/K] + static constexpr auto kb = 1.380649e-23_rt; + + //! 1 eV in [J] + static constexpr auto eV = q_e; + //! 1 MeV in [J] + static constexpr auto MeV = q_e * 1e6_rt; + //! 1 eV/c in [kg*m/s] + static constexpr auto eV_invc = eV / c; + //! 1 MeV/c in [kg*m/s] + static constexpr auto MeV_invc = MeV / c; + //! 1 eV/c^2 in [kg] + static constexpr auto eV_invc2 = eV / (c * c); + //! 1 MeV/c^2 in [kg] + static constexpr auto MeV_invc2 = MeV / (c * c); + } // namespace SI +} // namespace ablastr::constant + +#endif // ABLASTR_CONSTANT_H_ From 276e5743b29eb0194cd3cfb48a6e7a09d520dfaf Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 19 Sep 2022 09:03:59 -0700 Subject: [PATCH 0067/1346] ABLASTR: Fix Stray Include in ChargeDeposition (#3406) This include into WarpX should not be here and is unused. Same as #3393. --- Source/ablastr/particles/DepositCharge.H | 1 + 1 file changed, 1 insertion(+) diff --git a/Source/ablastr/particles/DepositCharge.H b/Source/ablastr/particles/DepositCharge.H index 67113a6b73c..e2e27c75622 100644 --- a/Source/ablastr/particles/DepositCharge.H +++ b/Source/ablastr/particles/DepositCharge.H @@ -9,6 +9,7 @@ #include "ablastr/profiler/ProfilerWrapper.H" #include "Parallelization/KernelTimer.H" +#include "Particles/Pusher/GetAndSetPosition.H" #include "Particles/ShapeFactors.H" #include "Particles/Deposition/ChargeDeposition.H" #include "ablastr/utils/TextMsg.H" From 6e75516aba4b58179b551467663f3d9e3d3dc05f Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Mon, 19 Sep 2022 18:36:11 +0200 Subject: [PATCH 0068/1346] Display an ASCII art logo on standard output (#3382) * display WarpX ascii art * fix bug * function is now noexcept * fix bug * fixed bug --- Source/Initialization/WarpXInitData.cpp | 6 +++++- Source/Utils/CMakeLists.txt | 2 ++ Source/Utils/Logo/CMakeLists.txt | 4 ++++ Source/Utils/Logo/GetLogo.H | 19 +++++++++++++++++++ Source/Utils/Logo/GetLogo.cpp | 15 +++++++++++++++ Source/Utils/Logo/Make.package | 3 +++ Source/Utils/Make.package | 2 ++ 7 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 Source/Utils/Logo/CMakeLists.txt create mode 100644 Source/Utils/Logo/GetLogo.H create mode 100644 Source/Utils/Logo/GetLogo.cpp create mode 100644 Source/Utils/Logo/Make.package diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 812f0bf01df..6f9178af707 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -21,6 +21,7 @@ #include "Filter/BilinearFilter.H" #include "Filter/NCIGodfreyFilter.H" #include "Particles/MultiParticleContainer.H" +#include "Utils/Logo/GetLogo.H" #include "Utils/MPIInitHelpers.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" @@ -359,11 +360,14 @@ WarpX::InitData () WARPX_PROFILE("WarpX::InitData()"); utils::warpx_check_mpi_thread_level(); - Print() << "WarpX (" << WarpX::Version() << ")\n"; #ifdef WARPX_QED Print() << "PICSAR (" << WarpX::PicsarVersion() << ")\n"; #endif + Print() << "WarpX (" << WarpX::Version() << ")\n"; + + Print() << utils::logo::get_logo(); + if (restart_chkfile.empty()) { ComputeDt(); diff --git a/Source/Utils/CMakeLists.txt b/Source/Utils/CMakeLists.txt index b2754cdaf99..9253f3d0d7f 100644 --- a/Source/Utils/CMakeLists.txt +++ b/Source/Utils/CMakeLists.txt @@ -13,3 +13,5 @@ target_sources(WarpX WarpXUtil.cpp WarpXVersion.cpp ) + +add_subdirectory(Logo) diff --git a/Source/Utils/Logo/CMakeLists.txt b/Source/Utils/Logo/CMakeLists.txt new file mode 100644 index 00000000000..62b5371ee98 --- /dev/null +++ b/Source/Utils/Logo/CMakeLists.txt @@ -0,0 +1,4 @@ +target_sources(WarpX + PRIVATE + GetLogo.cpp +) diff --git a/Source/Utils/Logo/GetLogo.H b/Source/Utils/Logo/GetLogo.H new file mode 100644 index 00000000000..70a3e38ff29 --- /dev/null +++ b/Source/Utils/Logo/GetLogo.H @@ -0,0 +1,19 @@ +#ifndef WARPX_GET_LOGO_H_ +#define WARPX_GET_LOGO_H_ + +#include + +namespace utils +{ + namespace logo + { + /** + * \brief provides an ASCII art logo for WarpX + * + * \return a string containing an ASCII art logo + */ + std::string get_logo () noexcept; + } +} + +#endif diff --git a/Source/Utils/Logo/GetLogo.cpp b/Source/Utils/Logo/GetLogo.cpp new file mode 100644 index 00000000000..c515ceac1a7 --- /dev/null +++ b/Source/Utils/Logo/GetLogo.cpp @@ -0,0 +1,15 @@ +#include "GetLogo.H" + +std::string utils::logo::get_logo () noexcept +{ + return +R"( + __ __ __ __ + \ \ / /_ _ _ __ _ __\ \/ / + \ \ /\ / / _` | '__| '_ \\ / + \ V V / (_| | | | |_) / \ + \_/\_/ \__,_|_| | .__/_/\_\ + |_| + +)"; +} diff --git a/Source/Utils/Logo/Make.package b/Source/Utils/Logo/Make.package new file mode 100644 index 00000000000..2aa69ae46da --- /dev/null +++ b/Source/Utils/Logo/Make.package @@ -0,0 +1,3 @@ +CEXE_sources += GetLogo.cpp + +VPATH_LOCATIONS += $(WARPX_HOME)/Source/Utils/Logo diff --git a/Source/Utils/Make.package b/Source/Utils/Make.package index c3771508390..b20a1abe291 100644 --- a/Source/Utils/Make.package +++ b/Source/Utils/Make.package @@ -11,4 +11,6 @@ CEXE_sources += MPIInitHelpers.cpp CEXE_sources += RelativeCellPosition.cpp CEXE_sources += ParticleUtils.cpp +include $(WARPX_HOME)/Source/Utils/Logo/Make.package + VPATH_LOCATIONS += $(WARPX_HOME)/Source/Utils From adeebe893caf87ff115ca784109e40893913db28 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Mon, 19 Sep 2022 19:54:29 +0200 Subject: [PATCH 0069/1346] Improve docstrings for some physical constants (#3410) * improve documentation for numerical constants * add Antonin Sainte-Marie's suggestion for a docstring * Update tau (from PR review) --- Source/ablastr/constant.H | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Source/ablastr/constant.H b/Source/ablastr/constant.H index 3a05a8d5e14..477f57f9040 100644 --- a/Source/ablastr/constant.H +++ b/Source/ablastr/constant.H @@ -23,7 +23,7 @@ namespace ablastr::constant static constexpr amrex::Real pi = 3.14159265358979323846_rt; //! https://tauday.com/tau-manifesto - static constexpr amrex::Real tau = 2.0 * 3.14159265358979323846_rt; + static constexpr amrex::Real tau = 2.0_rt * pi; } // namespace math /** Physical constants @@ -57,13 +57,13 @@ namespace ablastr::constant //! reduced Planck Constant = h / tau [J*s] static constexpr auto hbar = 1.054571817e-34_rt; - //! mu0/(4*MathConst::pi)*q_e*q_e*c/hbar + //! fine-structure constant = mu0/(4*MathConst::pi)*q_e*q_e*c/hbar [dimensionless] static constexpr auto alpha = 0.007297352573748943_rt; - //! 1./(4*MathConst::pi*ep0) * q_e*q_e/(m_e*c*c) + //! classical electron radius = 1./(4*MathConst::pi*ep0) * q_e*q_e/(m_e*c*c) [m] static constexpr auto r_e = 2.817940326204929e-15_rt; - //! (2.*alpha*alpha*ep0*ep0*hbar*hbar*hbar)/(45.*m_e*m_e*m_e*m_e*c*c*c*c*c) + //! xi: nonlinearity parameter of Heisenberg-Euler effective theory = (2.*alpha*alpha*ep0*ep0*hbar*hbar*hbar)/(45.*m_e*m_e*m_e*m_e*c*c*c*c*c) static constexpr double xi = 1.3050122447005176e-52; - //! This should be usable for single precision instead of xi; very close to smallest float32 number possible (1.2e-38) + //! xi times c2 = xi*c*c. This should be usable for single precision instead of xi; very close to smallest float32 number possible (1.2e-38) static constexpr auto xi_c2 = 1.1728865132395492e-35_rt; //! Boltzmann constant (exact) [J/K] From 5deed41b5788a7f59da1cdc55cb30b2cea781441 Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 19 Sep 2022 13:11:54 -0700 Subject: [PATCH 0070/1346] Fixes to Physics_applications/capacitive_discharge/PICMI* (#3413) --- .../capacitive_discharge/PICMI_inputs_1d.py | 2 +- .../capacitive_discharge/PICMI_inputs_2d.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py index 5eac1e172ff..b353af9ade7 100644 --- a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py +++ b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py @@ -60,7 +60,7 @@ def initialize_inputs(self): super(PoissonSolver1D, self).initialize_inputs() - self.nz = self.grid.nx + self.nz = self.grid.number_of_cells[0] self.dz = (self.grid.xmax - self.grid.xmin) / self.nz self.nxguardphi = 1 diff --git a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py index d5a18071ad4..f35ecfe0d67 100755 --- a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py +++ b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py @@ -91,8 +91,8 @@ def initialize_inputs(self): super(PoissonSolverPseudo1D, self).initialize_inputs() - self.nx = self.grid.nx - self.nz = self.grid.ny + self.nx = self.grid.number_of_cells[0] + self.nz = self.grid.number_of_cells[1] self.dx = (self.grid.xmax - self.grid.xmin) / self.nx self.dz = (self.grid.ymax - self.grid.ymin) / self.nz From f841e67748fdc2bf8238d83ad2e6d04e8ffad694 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 19 Sep 2022 14:26:31 -0700 Subject: [PATCH 0071/1346] Zenodo: Add Marco Garten (#3414) Add Marco to our Zenodo file. --- .zenodo.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.zenodo.json b/.zenodo.json index 3fded899afc..6aa723ea0e3 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -38,6 +38,11 @@ "name": "Fedeli, Luca", "orcid": "0000-0002-7215-4178" }, + { + "affiliation": "Lawrence Berkeley National Laboratory", + "name": "Garten, Marco", + "orcid": "0000-0001-6994-2475" + }, { "affiliation": "SLAC National Accelerator Laboratory", "name": "Ge, Lixin", From a57fe9657daa6fb0769fad5555f9f0ab7b55b97c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ne=C3=AFl=20Zaim?= <49716072+NeilZaim@users.noreply.github.com> Date: Tue, 20 Sep 2022 00:11:39 +0200 Subject: [PATCH 0072/1346] Fix value of particle container m_do_back_transformed_particles when there are multiple BT diagnostics (#3184) --- Source/Diagnostics/BTDiagnostics.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 72b6244e8ae..f9f3315d997 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -112,9 +112,11 @@ void BTDiagnostics::DerivedInitData () // Turn on do_back_transformed_particles in the particle containers so that // the tmp_particle_data is allocated and the data of the corresponding species is // copied and stored in tmp_particle_data before particles are pushed. - for (auto const& species : m_output_species_names){ + if (m_do_back_transformed_particles) { mpc.SetDoBackTransformedParticles(m_do_back_transformed_particles); - mpc.SetDoBackTransformedParticles(species, m_do_back_transformed_particles); + for (auto const& species : m_output_species_names){ + mpc.SetDoBackTransformedParticles(species, m_do_back_transformed_particles); + } } m_particles_buffer.resize(m_num_buffers); m_totalParticles_flushed_already.resize(m_num_buffers); From 33fd1fe1232d280b96a0aff102b1106b779543e5 Mon Sep 17 00:00:00 2001 From: Marco Garten Date: Mon, 19 Sep 2022 16:46:03 -0700 Subject: [PATCH 0073/1346] Add beta function to BeamRelevant (#3372) * Add beta function to BeamRelevant * Beta-function: add user docs --- Docs/source/usage/parameters.rst | 41 +++++++++++-------- .../Diagnostics/ReducedDiags/BeamRelevant.cpp | 20 ++++++--- 2 files changed, 38 insertions(+), 23 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 6d293b52f91..bf6772ee2f6 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2454,39 +2454,42 @@ Reduced Diagnostics sum of the particles' weight of each species. * ``BeamRelevant`` - This type computes properties of a particle beam relevant for particle accelerators, - like position, momentum, emittance, etc. + This type computes properties of a particle beam relevant for particle accelerators, like position, momentum, emittance, etc. - ``.species`` must be provided, - such that the diagnostics are done for this (beam-like) species only. + ``.species`` must be provided, such that the diagnostics are done for this (beam-like) species only. + + The output columns (for 3D-XYZ) are the following, where the average is done over the whole species (typical usage: the particle beam is in a separate species): + + [0]: simulation step (iteration). - The output columns (for 3D-XYZ) are the following, where the average is done over - the whole species (typical usage: the particle beam is in a separate species): + [1]: time (s). - [1], [2], [3]: The mean values of beam positions (m) - :math:`\langle x \rangle`, :math:`\langle y \rangle`, + [2], [3], [4]: The mean values of beam positions (m) + :math:`\langle x \rangle`, + :math:`\langle y \rangle`, :math:`\langle z \rangle`. - [4], [5], [6]: The mean values of beam relativistic momenta (kg m/s) - :math:`\langle p_x \rangle`, :math:`\langle p_y \rangle`, + [5], [6], [7]: The mean values of beam relativistic momenta (kg m/s) + :math:`\langle p_x \rangle`, + :math:`\langle p_y \rangle`, :math:`\langle p_z \rangle`. - [7]: The mean Lorentz factor :math:`\langle \gamma \rangle`. + [8]: The mean Lorentz factor :math:`\langle \gamma \rangle`. - [8], [9], [10]: The RMS values of beam positions (m) + [9], [10], [11]: The RMS values of beam positions (m) :math:`\delta_x = \sqrt{ \langle (x - \langle x \rangle)^2 \rangle }`, :math:`\delta_y = \sqrt{ \langle (y - \langle y \rangle)^2 \rangle }`, :math:`\delta_z = \sqrt{ \langle (z - \langle z \rangle)^2 \rangle }`. - [11], [12], [13]: The RMS values of beam relativistic momenta (kg m/s) + [12], [13], [14]: The RMS values of beam relativistic momenta (kg m/s) :math:`\delta_{px} = \sqrt{ \langle (p_x - \langle p_x \rangle)^2 \rangle }`, :math:`\delta_{py} = \sqrt{ \langle (p_y - \langle p_y \rangle)^2 \rangle }`, :math:`\delta_{pz} = \sqrt{ \langle (p_z - \langle p_z \rangle)^2 \rangle }`. - [14]: The RMS value of the Lorentz factor + [15]: The RMS value of the Lorentz factor :math:`\sqrt{ \langle (\gamma - \langle \gamma \rangle)^2 \rangle }`. - [15], [16], [17]: beam projected transverse RMS normalized emittance (m) + [16], [17], [18]: beam projected transverse RMS normalized emittance (m) :math:`\epsilon_x = \dfrac{1}{mc} \sqrt{\delta_x^2 \delta_{px}^2 - \Big\langle (x-\langle x \rangle) (p_x-\langle p_x \rangle) \Big\rangle^2}`, :math:`\epsilon_y = \dfrac{1}{mc} \sqrt{\delta_y^2 \delta_{py}^2 - @@ -2494,12 +2497,16 @@ Reduced Diagnostics :math:`\epsilon_z = \dfrac{1}{mc} \sqrt{\delta_z^2 \delta_{pz}^2 - \Big\langle (z-\langle z \rangle) (p_z-\langle p_z \rangle) \Big\rangle^2}`. - [18]: The charge of the beam (C). + [19], [20]: beta function for the transverse directions (m) + :math:`\beta_x = \dfrac{{\delta_x}^2}{\epsilon_x}`, + :math:`\beta_y = \dfrac{{\delta_y}^2}{\epsilon_y}`. + + [21]: The charge of the beam (C). For 2D-XZ, :math:`\langle y \rangle`, :math:`\delta_y`, and - :math:`\epsilon_y` will not be outputed. + :math:`\epsilon_y` will not be outputted. * ``LoadBalanceCosts`` This type computes the cost, used in load balancing, for each box on the domain. diff --git a/Source/Diagnostics/ReducedDiags/BeamRelevant.cpp b/Source/Diagnostics/ReducedDiags/BeamRelevant.cpp index 03ce90ac357..4b4d404ba8c 100644 --- a/Source/Diagnostics/ReducedDiags/BeamRelevant.cpp +++ b/Source/Diagnostics/ReducedDiags/BeamRelevant.cpp @@ -48,8 +48,9 @@ BeamRelevant::BeamRelevant (std::string rd_name) // 10,11,12: rms px,py,pz // 13: rms gamma // 14,15,16: emittance x,y,z - // 17: charge - m_data.resize(18, 0.0_rt); + // 17,18: beta-function x,y + // 19: charge + m_data.resize(20, 0.0_rt); #elif (defined WARPX_DIM_XZ) // 0, 1: mean x,z // 2, 3, 4: mean px,py,pz @@ -58,8 +59,9 @@ BeamRelevant::BeamRelevant (std::string rd_name) // 8, 9,10: rms px,py,pz // 11: rms gamma // 12,13: emittance x,z - // 14: charge - m_data.resize(15, 0.0_rt); + // 14: beta-function x + // 15: charge + m_data.resize(16, 0.0_rt); #elif (defined WARPX_DIM_1D_Z) // 0 : mean z // 1,2,3 : mean px,py,pz @@ -101,6 +103,8 @@ BeamRelevant::BeamRelevant (std::string rd_name) ofs << "[" << c++ << "]emittance_x(m)"; ofs << m_sep; ofs << "[" << c++ << "]emittance_y(m)"; ofs << m_sep; ofs << "[" << c++ << "]emittance_z(m)"; ofs << m_sep; + ofs << "[" << c++ << "]beta_x(m)"; ofs << m_sep; + ofs << "[" << c++ << "]beta_y(m)"; ofs << m_sep; ofs << "[" << c++ << "]charge(C)"; ofs << std::endl; #elif (defined WARPX_DIM_XZ) int c = 0; @@ -121,6 +125,7 @@ BeamRelevant::BeamRelevant (std::string rd_name) ofs << "[" << c++ << "]gamma_rms()"; ofs << m_sep; ofs << "[" << c++ << "]emittance_x(m)"; ofs << m_sep; ofs << "[" << c++ << "]emittance_z(m)"; ofs << m_sep; + ofs << "[" << c++ << "]beta_x(m)"; ofs << m_sep; ofs << "[" << c++ << "]charge(C)"; ofs << std::endl; #elif (defined WARPX_DIM_1D_Z) int c = 0; @@ -369,7 +374,9 @@ void BeamRelevant::ComputeDiags (int step) m_data[14] = std::sqrt(x_ms*ux_ms-xux*xux) / PhysConst::c; m_data[15] = std::sqrt(y_ms*uy_ms-yuy*yuy) / PhysConst::c; m_data[16] = std::sqrt(z_ms*uz_ms-zuz*zuz) / PhysConst::c; - m_data[17] = charge; + m_data[17] = (PhysConst::c * x_ms) / std::sqrt(x_ms*ux_ms-xux*xux); + m_data[18] = (PhysConst::c * y_ms) / std::sqrt(y_ms*uy_ms-yuy*yuy); + m_data[19] = charge; #elif (defined WARPX_DIM_XZ) m_data[0] = x_mean; m_data[1] = z_mean; @@ -385,7 +392,8 @@ void BeamRelevant::ComputeDiags (int step) m_data[11] = std::sqrt(gm_ms); m_data[12] = std::sqrt(x_ms*ux_ms-xux*xux) / PhysConst::c; m_data[13] = std::sqrt(z_ms*uz_ms-zuz*zuz) / PhysConst::c; - m_data[14] = charge; + m_data[14] = (PhysConst::c * x_ms) / std::sqrt(x_ms*ux_ms-xux*xux); + m_data[15] = charge; amrex::ignore_unused(y_mean, y_ms, yuy); #elif (defined WARPX_DIM_1D_Z) m_data[0] = z_mean; From 5e1790664e93452720f57be9648401d55b4453b7 Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 19 Sep 2022 17:06:42 -0700 Subject: [PATCH 0074/1346] More fixes for capacitive_discharge PICMI tests (#3416) --- .../capacitive_discharge/PICMI_inputs_1d.py | 2 +- .../capacitive_discharge/PICMI_inputs_2d.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py index b353af9ade7..8fa450e016f 100644 --- a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py +++ b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py @@ -61,7 +61,7 @@ def initialize_inputs(self): super(PoissonSolver1D, self).initialize_inputs() self.nz = self.grid.number_of_cells[0] - self.dz = (self.grid.xmax - self.grid.xmin) / self.nz + self.dz = (self.grid.upper_bound[0] - self.grid.lower_bound[0]) / self.nz self.nxguardphi = 1 self.nzguardphi = 1 diff --git a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py index f35ecfe0d67..85cc04148d3 100755 --- a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py +++ b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_2d.py @@ -93,8 +93,8 @@ def initialize_inputs(self): self.nx = self.grid.number_of_cells[0] self.nz = self.grid.number_of_cells[1] - self.dx = (self.grid.xmax - self.grid.xmin) / self.nx - self.dz = (self.grid.ymax - self.grid.ymin) / self.nz + self.dx = (self.grid.upper_bound[0] - self.grid.lower_bound[0]) / self.nx + self.dz = (self.grid.upper_bound[1] - self.grid.lower_bound[1]) / self.nz if not np.isclose(self.dx, self.dz): raise RuntimeError('Direct solver requires dx = dz.') From 2fed2828933831ee464f0ca5d02a23dd2df54aad Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Tue, 20 Sep 2022 10:06:53 -0700 Subject: [PATCH 0075/1346] Correct and test fusion module in RZ geometry (#3255) * Add particle rotation in NuclearFusionFunc.H * Minor * indent * initial work * fixed bugs and added species * update documentation * delete unused file * Add properties for neutron, hydrogen isotopes, helium isotopes * Update code to be more consistent * Correct typo * Parse deuterium-tritium fusion * Start putting in place the files for deuterium-tritium * Update documentation * Prepare structures for deuterium tritium * Fix typo * Fix compilation * Add neutron * Add correct formula for the cross-section * Correct compilation error * Fix nuclear fusion * Reset benchmarks * Prepare creation functor for 2-product fusion * First implementation of momentum initialization * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Use utility function for fusion * Minor modification of variable names * Fix GPU compilation * Fix single precision compilation * Update types * Use util function in P-B fusion * Correct compilation errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Correct errors * Update values of mass and charge * Correct compilation error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Correct compilation error * Correct compilation error * Correct compilation error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Reset benchmark * Use helium particle in proton-boron, to avoid resetting benchmark * Fixed proton-boron test * Revert "Fixed proton-boron test" This reverts commit 73c8d9d0be8417d5cd08a23daeebbc322c984808. * Incorporate Neil's recommendations * Reset benchmarks * Correct compilation errors * Add new deuterium tritium automated test * Correct formula of cross-section * Correct cross-section * Improve analysis script * Add test of energy conservation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add test of conservation of momentum * Progress in analysis script * Fix error in the initial energy of the deuterium particles * Add check of isotropy * Clean up the test script * Rewrite p_sq formula in a way to avoids machine-precision negative numbers * Add checksum * Clean up code * Add test for fusion in RZ geometry * Update code to take into account actual timestep * Update RZ test * Update RZ test * Increase number of particles * Impart radial memory on DT particles * Correct RZ momenta * Remove unused file * Update test * Fix definition of theta * Add new test * Add checksum * Update test * Update tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix Python analysis script * Remove CPU and ID from new benchmark Co-authored-by: Yinjian Zhao Co-authored-by: Luca Fedeli Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../analysis_deuterium_tritium_fusion.py | 51 +++++-- .../inputs_deuterium_tritium_rz | 129 ++++++++++++++++++ .../Deuterium_Tritium_Fusion_3D.json | 2 +- .../Deuterium_Tritium_Fusion_RZ.json | 77 +++++++++++ Regression/WarpX-tests.ini | 16 +++ .../NuclearFusion/NuclearFusionFunc.H | 31 +++++ 6 files changed, 290 insertions(+), 16 deletions(-) create mode 100644 Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_rz create mode 100644 Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_RZ.json diff --git a/Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py b/Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py index f218df54d70..b5f5da683b2 100755 --- a/Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py +++ b/Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py @@ -6,6 +6,7 @@ # License: BSD-3-Clause-LBNL import os +import re import sys import yt @@ -66,15 +67,30 @@ E_fusion = 17.5893*MeV_to_Joule # Energy released during the fusion reaction +## Checks whether this is the 2D or the 3D test +warpx_used_inputs = open('./warpx_used_inputs', 'r').read() +if re.search('geometry.dims = RZ', warpx_used_inputs): + is_RZ = True +else: + is_RZ = False + ## Some numerical parameters for this test size_x = 8 size_y = 8 size_z = 16 -dV_total = size_x*size_y*size_z # Total simulation volume +if is_RZ: + dV_slice = np.pi * size_x**2 + yt_z_string = "particle_position_y" + nppcell_1 = 10000*8 + nppcell_2 = 900*8 +else: + dV_slice = size_x*size_y + yt_z_string = "particle_position_z" + nppcell_1 = 10000 + nppcell_2 = 900 # Volume of a slice corresponding to a single cell in the z direction. In tests 1 and 2, all the # particles of a given species in the same slice have the exact same momentum -dV_slice = size_x*size_y -dt = 1./(scc.c*np.sqrt(3.)) + # In test 1 and 2, the energy in cells number i (in z direction) is typically Energy_step * i**2 Energy_step = 22.*keV_to_Joule @@ -89,7 +105,7 @@ def add_existing_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix) data_dict[prefix+"_w_"+suffix] = yt_ad[species_name, "particle_weight"].v data_dict[prefix+"_id_"+suffix] = yt_ad[species_name, "particle_id"].v data_dict[prefix+"_cpu_"+suffix] = yt_ad[species_name, "particle_cpu"].v - data_dict[prefix+"_z_"+suffix] = yt_ad[species_name, "particle_position_z"].v + data_dict[prefix+"_z_"+suffix] = yt_ad[species_name, yt_z_string].v def add_empty_species_to_dict(data_dict, species_name, prefix, suffix): data_dict[prefix+"_px_"+suffix] = np.empty(0) @@ -263,8 +279,11 @@ def expected_weight_com(E_com, reactant0_density, reactant1_density, dV, dt): def check_macroparticle_number(data, fusion_probability_target_value, num_pair_per_cell): ## Checks that the number of macroparticles is as expected for the first and second tests - ## The first slice 0 < z < 1 does not contribute to product species creation - numcells = dV_total - dV_slice + ## The first slice 0 < z < 1 does not contribute to alpha creation + if is_RZ: + numcells = size_x*(size_z-1) + else: + numcells = size_x*size_y*(size_z-1) ## In these tests, the fusion_multiplier is so high that the fusion probability per pair is ## equal to the parameter fusion_probability_target_value fusion_probability_per_pair = fusion_probability_target_value @@ -315,7 +334,7 @@ def compute_E_com2(data): p_reactant0_sq = 2.*mass[reactant_species[0]]*(Energy_step*np.arange(size_z)**2) return p_sq_reactant1_frame_to_E_COM_frame(p_reactant0_sq) -def check_fusion_yield(data, expected_fusion_number, E_com, reactant0_density, reactant1_density): +def check_fusion_yield(data, expected_fusion_number, E_com, reactant0_density, reactant1_density, dt): ## Checks that the fusion yield is as expected for the first and second tests. product_weight_theory = expected_weight_com(E_com/keV_to_Joule, reactant0_density, reactant1_density, dV_slice, dt) @@ -330,24 +349,25 @@ def check_fusion_yield(data, expected_fusion_number, E_com, reactant0_density, r assert(np.all(is_close(product_weight_theory, product_weight_simulation, rtol = 5.*relative_std_weight))) -def specific_check1(data): - check_isotropy(data, relative_tolerance = 3.e-2) +def specific_check1(data, dt): + if not is_RZ: + check_isotropy(data, relative_tolerance = 3.e-2) expected_fusion_number = check_macroparticle_number(data, fusion_probability_target_value = 0.002, - num_pair_per_cell = 10000) + num_pair_per_cell = nppcell_1) E_com = compute_E_com1(data) check_fusion_yield(data, expected_fusion_number, E_com, reactant0_density = 1., - reactant1_density = 1.) + reactant1_density = 1., dt=dt) -def specific_check2(data): +def specific_check2(data, dt): check_xy_isotropy(data) ## Only 900 particles pairs per cell here because we ignore the 10% of reactants that are at rest expected_fusion_number = check_macroparticle_number(data, fusion_probability_target_value = 0.02, - num_pair_per_cell = 900) + num_pair_per_cell = nppcell_2) E_com = compute_E_com2(data) check_fusion_yield(data, expected_fusion_number, E_com, reactant0_density = 1.e20, - reactant1_density = 1.e26) + reactant1_density = 1.e26, dt=dt) def check_charge_conservation(rho_start, rho_end): assert(np.all(is_close(rho_start, rho_end, rtol=2.e-11))) @@ -359,6 +379,7 @@ def main(): ds_start = yt.load(filename_start) ad_end = ds_end.all_data() ad_start = ds_start.all_data() + dt = float(ds_end.current_time - ds_start.current_time) field_data_end = ds_end.covering_grid(level=0, left_edge=ds_end.domain_left_edge, dims=ds_end.domain_dimensions) field_data_start = ds_start.covering_grid(level=0, left_edge=ds_start.domain_left_edge, @@ -379,7 +400,7 @@ def main(): generic_check(data) # Checks that are specific to test number i - eval("specific_check"+str(i)+"(data)") + eval("specific_check"+str(i)+"(data, dt)") rho_start = field_data_start["rho"].to_ndarray() rho_end = field_data_end["rho"].to_ndarray() diff --git a/Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_rz b/Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_rz new file mode 100644 index 00000000000..fb581c82535 --- /dev/null +++ b/Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_rz @@ -0,0 +1,129 @@ +################################# +####### GENERAL PARAMETERS ###### +################################# +## With these parameters, each cell has a size of exactly 1 by 1 by 1 +max_step = 1 +amr.n_cell = 8 16 +amr.max_grid_size = 8 +amr.blocking_factor = 8 +amr.max_level = 0 +geometry.dims = RZ +geometry.prob_lo = 0. 0. +geometry.prob_hi = 8. 16. + +################################# +###### Boundary Condition ####### +################################# +boundary.field_lo = none periodic +boundary.field_hi = pec periodic + +################################# +############ NUMERICS ########### +################################# +warpx.verbose = 1 +warpx.cfl = 1.0 + +# Order of particle shape factors +algo.particle_shape = 1 + +################################# +############ PLASMA ############# +################################# +particles.species_names = deuterium1 tritium1 helium1 neutron1 deuterium2 tritium2 helium2 neutron2 + +my_constants.m_deuterium = 2.01410177812*m_u +my_constants.m_tritium = 3.0160492779*m_u +my_constants.m_reduced = m_deuterium*m_tritium/(m_deuterium+m_tritium) +my_constants.keV_to_J = 1.e3*q_e +my_constants.Energy_step = 22. * keV_to_J + +deuterium1.species_type = deuterium +deuterium1.injection_style = "NRandomPerCell" +deuterium1.num_particles_per_cell = 80000 +deuterium1.profile = constant +deuterium1.density = 1. +deuterium1.momentum_distribution_type = parse_momentum_function +## Thanks to the floor, all particles in the same cell have the exact same momentum +deuterium1.momentum_function_ux(x,y,z) = "u = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_deuterium*clight); if(x*x+y*y>0.0, -u*y/sqrt(x*x+y*y), 0.0)" # azimuthal velocity +deuterium1.momentum_function_uy(x,y,z) = "u = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_deuterium*clight); if(x*x+y*y>0.0, u*x/sqrt(x*x+y*y), 0.0)" # azimuthal velocity +deuterium1.momentum_function_uz(x,y,z) = "0" +deuterium1.do_not_push = 1 +deuterium1.do_not_deposit = 1 + +tritium1.species_type = tritium +tritium1.injection_style = "NRandomPerCell" +tritium1.num_particles_per_cell = 80000 +tritium1.profile = constant +tritium1.density = 1. +tritium1.momentum_distribution_type = "parse_momentum_function" +## Thanks to the floor, all particles in the same cell have the exact same momentum +tritium1.momentum_function_ux(x,y,z) = "u = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_tritium*clight); if(x*x+y*y>0.0, u*y/sqrt(x*x+y*y), 0.0)" # counter-streaming azimuthal velocity +tritium1.momentum_function_uy(x,y,z) = "u = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_tritium*clight); if(x*x+y*y>0.0, -u*x/sqrt(x*x+y*y), 0.0)" # counter-streaming azimuthal velocity +tritium1.momentum_function_uz(x,y,z) = 0 +tritium1.do_not_push = 1 +tritium1.do_not_deposit = 1 + +helium1.species_type = helium4 +helium1.do_not_push = 1 +helium1.do_not_deposit = 1 + +neutron1.species_type = neutron +neutron1.do_not_push = 1 +neutron1.do_not_deposit = 1 + +my_constants.background_dens = 1.e26 +my_constants.beam_dens = 1.e20 + +deuterium2.species_type = deuterium +deuterium2.injection_style = "NRandomPerCell" +deuterium2.num_particles_per_cell = 8000 +deuterium2.profile = "parse_density_function" +## A tenth of the macroparticles in each cell is made of immobile high-density background deuteriums. +## The other nine tenths are made of fast low-density beam deuteriums. +deuterium2.density_function(x,y,z) = if(y - floor(y) < 0.1, 10.*background_dens, 10./9.*beam_dens) +deuterium2.momentum_distribution_type = "parse_momentum_function" +deuterium2.momentum_function_ux(x,y,z) = 0. +deuterium2.momentum_function_uy(x,y,z) = 0. +deuterium2.momentum_function_uz(x,y,z) = "if(y - floor(y) < 0.1, + 0., sqrt(2*m_deuterium*Energy_step*(floor(z)**2))/(m_deuterium*clight))" +deuterium2.do_not_push = 1 +deuterium2.do_not_deposit = 1 + +tritium2.species_type = tritium +tritium2.injection_style = "NRandomPerCell" +tritium2.num_particles_per_cell = 800 +tritium2.profile = constant +tritium2.density = background_dens +tritium2.momentum_distribution_type = "constant" +tritium2.do_not_push = 1 +tritium2.do_not_deposit = 1 + +helium2.species_type = helium4 +helium2.do_not_push = 1 +helium2.do_not_deposit = 1 + +neutron2.species_type = neutron +neutron2.do_not_push = 1 +neutron2.do_not_deposit = 1 + +################################# +############ COLLISION ########## +################################# +collisions.collision_names = DTF1 DTF2 + +DTF1.species = deuterium1 tritium1 +DTF1.product_species = helium1 neutron1 +DTF1.type = nuclearfusion +DTF1.fusion_multiplier = 1.e50 + +DTF2.species = deuterium2 tritium2 +DTF2.product_species = helium2 neutron2 +DTF2.type = nuclearfusion +DTF2.fusion_multiplier = 1.e15 +DTF2.fusion_probability_target_value = 0.02 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 1 +diag1.diag_type = Full +diag1.fields_to_plot = rho diff --git a/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_3D.json b/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_3D.json index eec472698da..2c640846857 100644 --- a/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_3D.json +++ b/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_3D.json @@ -74,4 +74,4 @@ "particle_position_z": 819255.8152412223, "particle_weight": 1.0239999999424347e+29 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_RZ.json b/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_RZ.json new file mode 100644 index 00000000000..a5136dda644 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_RZ.json @@ -0,0 +1,77 @@ +{ + "deuterium1": { + "particle_momentum_x": 1.8388106511899905e-13, + "particle_momentum_y": 1.837868790009435e-13, + "particle_momentum_z": 0.0, + "particle_position_x": 40959919.499819286, + "particle_position_y": 81919224.48541151, + "particle_theta": 32166860.23003994, + "particle_weight": 3216.984554806547 + }, + "deuterium2": { + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 3.336364094249911e-14, + "particle_position_x": 4095908.9083809257, + "particle_position_y": 8192069.080030457, + "particle_theta": 3216444.348910214, + "particle_weight": 3.1898417901971444e+29 + }, + "helium1": { + "particle_momentum_x": 1.858124399143442e-15, + "particle_momentum_y": 1.876715110797694e-15, + "particle_momentum_z": 1.7098432207359157e-15, + "particle_position_x": 152920.23233108618, + "particle_position_y": 323733.9138644398, + "particle_theta": 120064.13771707338, + "particle_weight": 1.603083276067953e-27 + }, + "helium2": { + "particle_momentum_x": 1.5195006688950936e-15, + "particle_momentum_y": 1.52430083815551e-15, + "particle_momentum_z": 1.7654865863613367e-15, + "particle_position_x": 136867.63803188328, + "particle_position_y": 286903.30393175944, + "particle_theta": 107912.20520382549, + "particle_weight": 2.0862696876352987e+19 + }, + "lev=0": { + "rho": 0.0 + }, + "neutron1": { + "particle_momentum_x": 1.7160671487712845e-15, + "particle_momentum_y": 1.7154753069055672e-15, + "particle_momentum_z": 1.7098432207359157e-15, + "particle_position_x": 152920.23233108618, + "particle_position_y": 323733.9138644398, + "particle_theta": 120064.13771707338, + "particle_weight": 1.603083276067953e-27 + }, + "neutron2": { + "particle_momentum_x": 1.5195006688950936e-15, + "particle_momentum_y": 1.52430083815551e-15, + "particle_momentum_z": 1.5463311225724366e-15, + "particle_position_x": 136867.63803188328, + "particle_position_y": 286903.30393175944, + "particle_theta": 107912.20520382549, + "particle_weight": 2.0862696876352987e+19 + }, + "tritium1": { + "particle_momentum_x": 1.8384658063720362e-13, + "particle_momentum_y": 1.8381593257898129e-13, + "particle_momentum_z": 0.0, + "particle_position_x": 40961278.052658774, + "particle_position_y": 81919046.8061561, + "particle_theta": 32163925.891884565, + "particle_weight": 3217.0912552970394 + }, + "tritium2": { + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_position_x": 409793.9651940968, + "particle_position_y": 819237.3558155322, + "particle_theta": 321974.4557387621, + "particle_weight": 3.218514276139388e+29 + } +} diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index e123efef29c..e274b9058ca 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -2291,6 +2291,22 @@ compileTest = 0 doVis = 0 analysisRoutine = Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py +[Deuterium_Tritium_Fusion_RZ] +buildDir = . +inputFile = Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_rz +runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 +dim = 2 +addToCompileString = USE_RZ=TRUE +cmakeSetupOpts = -DWarpX_DIMS=RZ +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +analysisRoutine = Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py + [Maxwell_Hybrid_QED_solver] buildDir = . inputFile = Examples/Tests/Maxwell_Hybrid_QED/inputs_2d diff --git a/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H b/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H index a807a426296..75946242745 100644 --- a/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H +++ b/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H @@ -196,12 +196,36 @@ public: multiplier_ratio = max_N; } +#if (defined WARPX_DIM_RZ) + /* This momentum rotation is analogous to the one in ElasticCollisionPerez.H. */ + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(WarpX::n_rz_azimuthal_modes==1, + "RZ mode `warpx.n_rz_azimuthal_modes` must be 1 when using the binary nuclear fusion module."); + amrex::ParticleReal * const AMREX_RESTRICT theta1 = soa_1.m_rdata[PIdx::theta]; + amrex::ParticleReal * const AMREX_RESTRICT theta2 = soa_2.m_rdata[PIdx::theta]; +#endif + for (int k = 0; k < max_N; ++k) { // c1k : how many times the current particle of species 1 is paired with a particle // of species 2. Same for c2k. const int c1k = (k%NI1 < max_N%NI1) ? c1 + 1: c1; const int c2k = (k%NI2 < max_N%NI2) ? c2 + 1: c2; + +#if (defined WARPX_DIM_RZ) + /* In RZ geometry, macroparticles can collide with other macroparticles + * in the same *cylindrical* cell. For this reason, collisions between macroparticles + * are actually not local in space. In this case, the underlying assumption is that + * particles within the same cylindrical cell represent a cylindrically-symmetry + * momentum distribution function. Therefore, here, we temporarily rotate the + * momentum of one of the macroparticles in agreement with this cylindrical symmetry. + * (This is technically only valid if we use only the m=0 azimuthal mode in the simulation; + * there is a corresponding assert statement at initialization.) */ + amrex::ParticleReal const theta = theta2[I2[i2]]-theta1[I1[i1]]; + amrex::ParticleReal const u1xbuf = u1x[I1[i1]]; + u1x[I1[i1]] = u1xbuf*std::cos(theta) - u1y[I1[i1]]*std::sin(theta); + u1y[I1[i1]] = u1xbuf*std::sin(theta) + u1y[I1[i1]]*std::cos(theta); +#endif + SingleNuclearFusionEvent( u1x[ I1[i1] ], u1y[ I1[i1] ], u1z[ I1[i1] ], u2x[ I2[i2] ], u2y[ I2[i2] ], u2z[ I2[i2] ], @@ -211,6 +235,13 @@ public: m_probability_threshold, m_probability_target_value, m_fusion_type, engine); + +#if (defined WARPX_DIM_RZ) + amrex::ParticleReal const u1xbuf_new = u1x[I1[i1]]; + u1x[I1[i1]] = u1xbuf_new*std::cos(-theta) - u1y[I1[i1]]*std::sin(-theta); + u1y[I1[i1]] = u1xbuf_new*std::sin(-theta) + u1y[I1[i1]]*std::cos(-theta); +#endif + p_pair_indices_1[pair_index] = I1[i1]; p_pair_indices_2[pair_index] = I2[i2]; ++i1; if ( i1 == static_cast(I1e) ) { i1 = I1s; } From 5761b4bf998eab84ea0f7e4b132026593f3ddf9f Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Tue, 20 Sep 2022 16:28:39 -0700 Subject: [PATCH 0076/1346] PSATD: More Options for Time Dependency of J, Rho (#3242) * Rename PsatdAlgorithm as PsatdAlgorithmJConstantInTime * Add New Inputs: psatd.J_in_time, psatd.rho_in_time * Update PSATD Classes * Fix Bug for FDTD Build * Fix Warning for RZ PSATD Build * Fix Multi-J Loop w/ J Constant in Time * Clean up * Fix Error Message * Fix Time of Charge Deposition for Rho Linear * Add 3D Langmuir CI Tests w/ Multi-J Algo * Fix Checksums of New CI Tests * Remove Extra CI Tests (added in #3363) * Fix CI Tests w/ Multi-J PSATD * Add Docs for New Input Parameters * Galilean/Comoving PSATD Not Compatible w/ J Linear --- Docs/source/usage/parameters.rst | 6 +++ Regression/WarpX-tests.ini | 10 ++--- Source/BoundaryConditions/PML.H | 2 +- Source/BoundaryConditions/PML.cpp | 8 ++-- Source/Evolve/WarpXEvolve.cpp | 43 ++++++++++++------- .../SpectralAlgorithms/CMakeLists.txt | 2 +- .../SpectralAlgorithms/Make.package | 2 +- ...ithm.H => PsatdAlgorithmJConstantInTime.H} | 12 +++--- ....cpp => PsatdAlgorithmJConstantInTime.cpp} | 18 ++++---- .../SpectralAlgorithms/PsatdAlgorithmRZ.H | 5 ++- .../SpectralAlgorithms/PsatdAlgorithmRZ.cpp | 33 +++++++------- .../SpectralSolver/SpectralFieldData.H | 10 ++++- .../SpectralSolver/SpectralFieldData.cpp | 10 ++++- .../SpectralSolver/SpectralSolver.H | 10 +++-- .../SpectralSolver/SpectralSolver.cpp | 24 ++++++----- .../SpectralSolver/SpectralSolverRZ.H | 3 +- .../SpectralSolver/SpectralSolverRZ.cpp | 10 +++-- Source/FieldSolver/WarpXPushFieldsEM.cpp | 12 +++--- Source/Initialization/WarpXInitData.cpp | 4 +- Source/Utils/WarpXAlgorithmSelection.H | 14 ++++++ Source/Utils/WarpXAlgorithmSelection.cpp | 16 +++++++ Source/WarpX.H | 4 ++ Source/WarpX.cpp | 37 +++++++++++++--- 23 files changed, 198 insertions(+), 97 deletions(-) rename Source/FieldSolver/SpectralSolver/SpectralAlgorithms/{PsatdAlgorithm.H => PsatdAlgorithmJConstantInTime.H} (94%) rename Source/FieldSolver/SpectralSolver/SpectralAlgorithms/{PsatdAlgorithm.cpp => PsatdAlgorithmJConstantInTime.cpp} (98%) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index bf6772ee2f6..f943e5d26ab 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -1829,6 +1829,12 @@ Numerics and algorithms Note that the update with and without rho is also supported in RZ geometry. +* ``psatd.J_in_time`` (``constant`` or ``linear``; default ``constant``) + This determines whether the current density is assumed to be constant or linear in time, within the time step over which the electromagnetic fields are evolved. + +* ``psatd.rho_in_time`` (``linear``; default ``linear``) + This determines whether the charge density is assumed to be linear in time, within the time step over which the electromagnetic fields are evolved. + * ``psatd.v_galilean`` (`3 floats`, in units of the speed of light; default ``0. 0. 0.``) Defines the Galilean velocity. A non-zero velocity activates the Galilean algorithm, which suppresses numerical Cherenkov instabilities (NCI) in boosted-frame simulations (see the section :ref:`Numerical Stability and alternate formulation in a Galilean frame ` for more information). diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index e274b9058ca..9d1ae29a3cb 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -414,7 +414,7 @@ analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_psatd_multiJ] buildDir = . inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt -runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 warpx.abort_on_warning_threshold=medium +runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium dim = 3 addToCompileString = USE_PSATD=TRUE cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON @@ -433,7 +433,7 @@ analysisOutputImage = Langmuir_multi_psatd_multiJ.png [Langmuir_multi_psatd_multiJ_nodal] buildDir = . inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt -runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 warpx.abort_on_warning_threshold=medium warpx.do_nodal=1 +runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.do_nodal=1 dim = 3 addToCompileString = USE_PSATD=TRUE cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON @@ -699,7 +699,7 @@ analysisOutputImage = langmuir_multi_2d_analysis.png [Langmuir_multi_2d_psatd_multiJ] buildDir = . inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt -runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 warpx.abort_on_warning_threshold=medium +runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_PSATD=TRUE cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON @@ -718,7 +718,7 @@ analysisOutputImage = Langmuir_multi_2d_psatd_multiJ.png [Langmuir_multi_2d_psatd_multiJ_nodal] buildDir = . inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt -runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 warpx.abort_on_warning_threshold=medium warpx.do_nodal=1 +runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.do_nodal=1 dim = 2 addToCompileString = USE_PSATD=TRUE cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON @@ -2719,7 +2719,7 @@ analysisRoutine = Examples/Tests/galilean/analysis.py [multi_J_rz_psatd] buildDir = . inputFile = Examples/Tests/multi_J/inputs_rz -runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 warpx.abort_on_warning_threshold=medium +runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 warpx.abort_on_warning_threshold=medium psatd.J_in_time=linear dim = 2 addToCompileString = USE_RZ=TRUE USE_PSATD=TRUE cmakeSetupOpts = -DWarpX_DIMS=RZ -DWarpX_PSATD=ON diff --git a/Source/BoundaryConditions/PML.H b/Source/BoundaryConditions/PML.H index 840f9d825bd..4688c19e508 100644 --- a/Source/BoundaryConditions/PML.H +++ b/Source/BoundaryConditions/PML.H @@ -131,7 +131,7 @@ public: int ncell, int delta, amrex::IntVect ref_ratio, amrex::Real dt, int nox_fft, int noy_fft, int noz_fft, bool do_nodal, int do_moving_window, int pml_has_particles, int do_pml_in_domain, - const bool do_multi_J, + const int J_in_time, const int rho_in_time, const bool do_pml_dive_cleaning, const bool do_pml_divb_cleaning, const amrex::IntVect& fill_guards_fields, const amrex::IntVect& fill_guards_current, diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index 9c3c17b0bf7..712e287f0a7 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -548,7 +548,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri int ncell, int delta, amrex::IntVect ref_ratio, Real dt, int nox_fft, int noy_fft, int noz_fft, bool do_nodal, int do_moving_window, int /*pml_has_particles*/, int do_pml_in_domain, - const bool do_multi_J, + const int J_in_time, const int rho_in_time, const bool do_pml_dive_cleaning, const bool do_pml_divb_cleaning, const amrex::IntVect& fill_guards_fields, const amrex::IntVect& fill_guards_current, @@ -738,7 +738,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { #ifndef WARPX_USE_PSATD - amrex::ignore_unused(lev, dt, do_multi_J); + amrex::ignore_unused(lev, dt, J_in_time, rho_in_time); # if(AMREX_SPACEDIM!=3) amrex::ignore_unused(noy_fft); # endif @@ -759,7 +759,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri spectral_solver_fp = std::make_unique(lev, realspace_ba, dm, nox_fft, noy_fft, noz_fft, do_nodal, v_galilean_zero, v_comoving_zero, dx, dt, in_pml, periodic_single_box, update_with_rho, - fft_do_time_averaging, do_multi_J, m_dive_cleaning, m_divb_cleaning); + fft_do_time_averaging, J_in_time, rho_in_time, m_dive_cleaning, m_divb_cleaning); #endif } @@ -878,7 +878,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri spectral_solver_cp = std::make_unique(lev, realspace_cba, cdm, nox_fft, noy_fft, noz_fft, do_nodal, v_galilean_zero, v_comoving_zero, cdx, dt, in_pml, periodic_single_box, update_with_rho, - fft_do_time_averaging, do_multi_J, m_dive_cleaning, m_divb_cleaning); + fft_do_time_averaging, J_in_time, rho_in_time, m_dive_cleaning, m_divb_cleaning); #endif } } diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 1a21360219b..202e91643bc 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -564,16 +564,19 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // 4) Deposit J at relative time -dt with time step dt // (dt[0] denotes the time step on mesh refinement level 0) - auto& current = (WarpX::do_current_centering) ? current_fp_nodal : current_fp; - mypc->DepositCurrent(current, dt[0], -dt[0]); - // Synchronize J: filter, exchange boundary, and interpolate across levels. - // With current centering, the nodal current is deposited in 'current', - // namely 'current_fp_nodal': SyncCurrent stores the result of its centering - // into 'current_fp' and then performs both filtering, if used, and exchange - // of guard cells. - SyncCurrent(current_fp, current_cp); - // Forward FFT of J - PSATDForwardTransformJ(current_fp, current_cp); + if (J_in_time == JInTime::Linear) + { + auto& current = (WarpX::do_current_centering) ? current_fp_nodal : current_fp; + mypc->DepositCurrent(current, dt[0], -dt[0]); + // Synchronize J: filter, exchange boundary, and interpolate across levels. + // With current centering, the nodal current is deposited in 'current', + // namely 'current_fp_nodal': SyncCurrent stores the result of its centering + // into 'current_fp' and then performs both filtering, if used, and exchange + // of guard cells. + SyncCurrent(current_fp, current_cp); + // Forward FFT of J + PSATDForwardTransformJ(current_fp, current_cp); + } // Number of depositions for multi-J scheme const int n_depose = WarpX::do_multi_J_n_depositions; @@ -587,13 +590,21 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) for (int i_depose = 0; i_depose < n_loop; i_depose++) { // Move J deposited previously, from new to old - PSATDMoveJNewToJOld(); + if (J_in_time == JInTime::Linear) + { + PSATDMoveJNewToJOld(); + } + + const amrex::Real t_depose_current = (J_in_time == JInTime::Linear) ? + (i_depose-n_depose+1)*sub_dt : (i_depose-n_depose+0.5_rt)*sub_dt; - const amrex::Real t_depose = (i_depose-n_depose+1)*sub_dt; + // TODO Update this when rho quadratic in time is implemented + const amrex::Real t_depose_charge = (i_depose-n_depose+1)*sub_dt; - // Deposit new J at relative time t_depose with time step dt + // Deposit new J at relative time t_depose_current with time step dt // (dt[0] denotes the time step on mesh refinement level 0) - mypc->DepositCurrent(current, dt[0], t_depose); + auto& current = (WarpX::do_current_centering) ? current_fp_nodal : current_fp; + mypc->DepositCurrent(current, dt[0], t_depose_current); // Synchronize J: filter, exchange boundary, and interpolate across levels. // With current centering, the nodal current is deposited in 'current', // namely 'current_fp_nodal': SyncCurrent stores the result of its centering @@ -609,8 +620,8 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // Move rho deposited previously, from new to old PSATDMoveRhoNewToRhoOld(); - // Deposit rho at relative time t_depose - mypc->DepositCharge(rho_fp, t_depose); + // Deposit rho at relative time t_depose_charge + mypc->DepositCharge(rho_fp, t_depose_charge); // Filter, exchange boundary, and interpolate across levels SyncRho(); // Forward FFT of rho_new diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/CMakeLists.txt b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/CMakeLists.txt index eb88b1f4e3c..a370d4b2d8d 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/CMakeLists.txt +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/CMakeLists.txt @@ -1,6 +1,6 @@ target_sources(WarpX PRIVATE - PsatdAlgorithm.cpp + PsatdAlgorithmJConstantInTime.cpp PsatdAlgorithmJLinearInTime.cpp PsatdAlgorithmPml.cpp SpectralBaseAlgorithm.cpp diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/Make.package b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/Make.package index 7c2bf428109..c798ffb01f5 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/Make.package +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/Make.package @@ -1,5 +1,5 @@ CEXE_sources += SpectralBaseAlgorithm.cpp -CEXE_sources += PsatdAlgorithm.cpp +CEXE_sources += PsatdAlgorithmJConstantInTime.cpp CEXE_sources += PsatdAlgorithmJLinearInTime.cpp CEXE_sources += PsatdAlgorithmPml.cpp CEXE_sources += PsatdAlgorithmComoving.cpp diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.H similarity index 94% rename from Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.H rename to Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.H index dd9c6a7fd37..0d2d67434b6 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.H @@ -4,8 +4,8 @@ * * License: BSD-3-Clause-LBNL */ -#ifndef WARPX_PSATD_ALGORITHM_H_ -#define WARPX_PSATD_ALGORITHM_H_ +#ifndef WARPX_PSATD_ALGORITHM_J_CONSTANT_IN_TIME_H_ +#define WARPX_PSATD_ALGORITHM_J_CONSTANT_IN_TIME_H_ #include "FieldSolver/SpectralSolver/SpectralFieldData.H" #include "FieldSolver/SpectralSolver/SpectralKSpace.H" @@ -24,12 +24,12 @@ /* \brief Class that updates the field in spectral space * and stores the coefficients of the corresponding update equation. */ -class PsatdAlgorithm : public SpectralBaseAlgorithm +class PsatdAlgorithmJConstantInTime : public SpectralBaseAlgorithm { public: /** - * \brief Constructor of the class PsatdAlgorithm + * \brief Constructor of the class PsatdAlgorithmJConstantInTime * * \param[in] spectral_kspace spectral space * \param[in] dm distribution mapping @@ -45,7 +45,7 @@ class PsatdAlgorithm : public SpectralBaseAlgorithm * \param[in] dive_cleaning Update F as part of the field update, so that errors in divE=rho propagate away at the speed of light * \param[in] divb_cleaning Update G as part of the field update, so that errors in divB=0 propagate away at the speed of light */ - PsatdAlgorithm ( + PsatdAlgorithmJConstantInTime ( const SpectralKSpace& spectral_kspace, const amrex::DistributionMapping& dm, const SpectralFieldIndex& spectral_index, @@ -142,4 +142,4 @@ class PsatdAlgorithm : public SpectralBaseAlgorithm bool m_is_galilean; }; #endif // WARPX_USE_PSATD -#endif // WARPX_PSATD_ALGORITHM_H_ +#endif // WARPX_PSATD_ALGORITHM_J_CONSTANT_IN_TIME_H_ diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp similarity index 98% rename from Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.cpp rename to Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp index 1cbc27f0b1e..8971061f6ce 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithm.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp @@ -4,7 +4,7 @@ * * License: BSD-3-Clause-LBNL */ -#include "PsatdAlgorithm.H" +#include "PsatdAlgorithmJConstantInTime.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" @@ -27,7 +27,7 @@ using namespace amrex; -PsatdAlgorithm::PsatdAlgorithm( +PsatdAlgorithmJConstantInTime::PsatdAlgorithmJConstantInTime( const SpectralKSpace& spectral_kspace, const DistributionMapping& dm, const SpectralFieldIndex& spectral_index, @@ -110,7 +110,7 @@ PsatdAlgorithm::PsatdAlgorithm( } void -PsatdAlgorithm::pushSpectralFields (SpectralFieldData& f) const +PsatdAlgorithmJConstantInTime::pushSpectralFields (SpectralFieldData& f) const { const bool update_with_rho = m_update_with_rho; const bool time_averaging = m_time_averaging; @@ -340,7 +340,7 @@ PsatdAlgorithm::pushSpectralFields (SpectralFieldData& f) const } } -void PsatdAlgorithm::InitializeSpectralCoefficients ( +void PsatdAlgorithmJConstantInTime::InitializeSpectralCoefficients ( const SpectralKSpace& spectral_kspace, const amrex::DistributionMapping& dm, const amrex::Real dt) @@ -542,7 +542,7 @@ void PsatdAlgorithm::InitializeSpectralCoefficients ( } } -void PsatdAlgorithm::InitializeSpectralCoefficientsAveraging ( +void PsatdAlgorithmJConstantInTime::InitializeSpectralCoefficientsAveraging ( const SpectralKSpace& spectral_kspace, const amrex::DistributionMapping& dm, const amrex::Real dt) @@ -733,10 +733,10 @@ void PsatdAlgorithm::InitializeSpectralCoefficientsAveraging ( } } -void PsatdAlgorithm::CurrentCorrection (SpectralFieldData& field_data) +void PsatdAlgorithmJConstantInTime::CurrentCorrection (SpectralFieldData& field_data) { // Profiling - BL_PROFILE("PsatdAlgorithm::CurrentCorrection"); + BL_PROFILE("PsatdAlgorithmJConstantInTime::CurrentCorrection"); const SpectralFieldIndex& Idx = m_spectral_index; @@ -833,10 +833,10 @@ void PsatdAlgorithm::CurrentCorrection (SpectralFieldData& field_data) } void -PsatdAlgorithm::VayDeposition (SpectralFieldData& field_data) +PsatdAlgorithmJConstantInTime::VayDeposition (SpectralFieldData& field_data) { // Profiling - BL_PROFILE("PsatdAlgorithm::VayDeposition()"); + BL_PROFILE("PsatdAlgorithmJConstantInTime::VayDeposition()"); const SpectralFieldIndex& Idx = m_spectral_index; diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H index 608da5fd500..097a1a9d69b 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.H @@ -23,7 +23,8 @@ class PsatdAlgorithmRZ : public SpectralBaseAlgorithmRZ bool const nodal, amrex::Real const dt_step, bool const update_with_rho, const bool time_averaging, - const bool do_multi_J, + const int J_in_time, + const int rho_in_time, const bool dive_cleaning, const bool divb_cleaning); // Redefine functions from base class @@ -62,7 +63,7 @@ class PsatdAlgorithmRZ : public SpectralBaseAlgorithmRZ amrex::Real m_dt; bool m_update_with_rho; bool m_time_averaging; - bool m_do_multi_J; + int m_J_in_time; bool m_dive_cleaning; bool m_divb_cleaning; SpectralRealCoefficients C_coef, S_ck_coef, X1_coef, X2_coef, X3_coef; diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp index fec0ebc8f62..55b58821ce9 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp @@ -6,6 +6,7 @@ */ #include "PsatdAlgorithmRZ.H" #include "Utils/TextMsg.H" +#include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" @@ -23,7 +24,8 @@ PsatdAlgorithmRZ::PsatdAlgorithmRZ (SpectralKSpaceRZ const & spectral_kspace, bool const nodal, amrex::Real const dt, bool const update_with_rho, const bool time_averaging, - const bool do_multi_J, + const int J_in_time, + const int rho_in_time, const bool dive_cleaning, const bool divb_cleaning) // Initialize members of base class @@ -32,10 +34,11 @@ PsatdAlgorithmRZ::PsatdAlgorithmRZ (SpectralKSpaceRZ const & spectral_kspace, m_dt(dt), m_update_with_rho(update_with_rho), m_time_averaging(time_averaging), - m_do_multi_J(do_multi_J), + m_J_in_time(J_in_time), m_dive_cleaning(dive_cleaning), m_divb_cleaning(divb_cleaning) { + amrex::ignore_unused(rho_in_time); // Allocate the arrays of coefficients amrex::BoxArray const & ba = spectral_kspace.spectralspace_ba; @@ -47,28 +50,28 @@ PsatdAlgorithmRZ::PsatdAlgorithmRZ (SpectralKSpaceRZ const & spectral_kspace, coefficients_initialized = false; - if (time_averaging && do_multi_J) + if (time_averaging && J_in_time == JInTime::Linear) { X5_coef = SpectralRealCoefficients(ba, dm, n_rz_azimuthal_modes, 0); X6_coef = SpectralRealCoefficients(ba, dm, n_rz_azimuthal_modes, 0); } - if (time_averaging && !do_multi_J) + if (time_averaging && J_in_time != JInTime::Linear) { amrex::Abort(Utils::TextMsg::Err( - "RZ PSATD: psatd.do_time_averaging = 1 implemented only with warpx.do_multi_J = 1")); + "RZ PSATD: psatd.do_time_averaging=1 implemented only with psatd.J_in_time=linear")); } - if (dive_cleaning && !do_multi_J) + if (dive_cleaning && J_in_time != JInTime::Linear) { amrex::Abort(Utils::TextMsg::Err( - "RZ PSATD: warpx.do_dive_cleaning = 1 implemented only with warpx.do_multi_J = 1")); + "RZ PSATD: warpx.do_dive_cleaning=1 implemented only with psatd.J_in_time=linear")); } - if (divb_cleaning && !do_multi_J) + if (divb_cleaning && J_in_time != JInTime::Linear) { amrex::Abort(Utils::TextMsg::Err( - "RZ PSATD: warpx.do_divb_cleaning = 1 implemented only with warpx.do_multi_J = 1")); + "RZ PSATD: warpx.do_divb_cleaning=1 implemented only with psatd.J_in_time=linear")); } } @@ -80,7 +83,7 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) const bool update_with_rho = m_update_with_rho; const bool time_averaging = m_time_averaging; - const bool do_multi_J = m_do_multi_J; + const bool J_in_time_linear = (m_J_in_time == JInTime::Linear) ? true : false; const bool dive_cleaning = m_dive_cleaning; const bool divb_cleaning = m_divb_cleaning; @@ -109,7 +112,7 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) amrex::Array4 X5_arr; amrex::Array4 X6_arr; - if (time_averaging && do_multi_J) + if (time_averaging && J_in_time_linear) { X5_arr = X5_coef[mfi].array(); X6_arr = X6_coef[mfi].array(); @@ -235,7 +238,7 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) G_old = fields(i,j,k,G_m); } - if (do_multi_J) + if (J_in_time_linear) { const int Jp_m_new = Idx.Jx_new + Idx.n_fields*mode; const int Jm_m_new = Idx.Jy_new + Idx.n_fields*mode; @@ -332,7 +335,7 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) void PsatdAlgorithmRZ::InitializeSpectralCoefficients (SpectralFieldDataRZ const & f) { const bool time_averaging = m_time_averaging; - const bool do_multi_J = m_do_multi_J; + const bool J_in_time_linear = (m_J_in_time == JInTime::Linear) ? true : false; // Fill them with the right values: // Loop over boxes and allocate the corresponding coefficients @@ -353,7 +356,7 @@ void PsatdAlgorithmRZ::InitializeSpectralCoefficients (SpectralFieldDataRZ const amrex::Array4 X5; amrex::Array4 X6; - if (time_averaging && do_multi_J) + if (time_averaging && J_in_time_linear) { X5 = X5_coef[mfi].array(); X6 = X6_coef[mfi].array(); @@ -392,7 +395,7 @@ void PsatdAlgorithmRZ::InitializeSpectralCoefficients (SpectralFieldDataRZ const X3(i,j,k,mode) = - c*c * dt*dt / (3._rt*ep0); } - if (time_averaging && do_multi_J) + if (time_averaging && J_in_time_linear) { constexpr amrex::Real c2 = PhysConst::c; const amrex::Real dt3 = dt * dt * dt; diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H index 811fdd4d73c..4ab88f1a378 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H @@ -55,7 +55,8 @@ class SpectralFieldIndex */ SpectralFieldIndex (const bool update_with_rho, const bool time_averaging, - const bool do_multi_J, + const int J_in_time, + const int rho_in_time, const bool dive_cleaning, const bool divb_cleaning, const bool pml, @@ -89,8 +90,13 @@ class SpectralFieldIndex int Ex_avg = -1, Ey_avg = -1, Ez_avg = -1; int Bx_avg = -1, By_avg = -1, Bz_avg = -1; - // Multi-J, div(E) and div(B) cleaning + // J linear in time int Jx_new = -1, Jy_new = -1, Jz_new = -1; + + // rho quadratic in time + int rho_mid = -1; + + // div(E) and div(B) cleaning int F = -1, G = -1; // PML diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp b/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp index 00346a2c772..56189e0ff06 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp @@ -35,7 +35,8 @@ using namespace amrex; SpectralFieldIndex::SpectralFieldIndex (const bool update_with_rho, const bool time_averaging, - const bool do_multi_J, + const int J_in_time, + const int rho_in_time, const bool dive_cleaning, const bool divb_cleaning, const bool pml, @@ -68,13 +69,18 @@ SpectralFieldIndex::SpectralFieldIndex (const bool update_with_rho, if (divb_cleaning) G = c++; - if (do_multi_J) + if (J_in_time == JInTime::Linear) { Jx_new = c++; Jy_new = c++; Jz_new = c++; } + if (rho_in_time == RhoInTime::Quadratic) + { + rho_mid = c++; + } + if (pml_rz) { Er_pml = c++; Et_pml = c++; diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.H b/Source/FieldSolver/SpectralSolver/SpectralSolver.H index 684cf9586b8..38b2420105a 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.H @@ -58,9 +58,10 @@ class SpectralSolver * (no domain decomposition) * \param[in] update_with_rho whether rho is used in the field update equations * \param[in] fft_do_time_averaging whether the time averaging algorithm is used - * \param[in] do_multi_J whether the multi-J algorithm is used (hence two currents - * computed at the beginning and the end of the time interval - * instead of one current computed at half time) + * \param[in] J_in_time integer that corresponds to the time dependency of J + * (constant, linear) for the PSATD algorithm + * \param[in] rho_in_time integer that corresponds to the time dependency of rho + * (linear, quadratic) for the PSATD algorithm * \param[in] dive_cleaning whether to use div(E) cleaning to account for errors in * Gauss law (new field F in the update equations) * \param[in] divb_cleaning whether to use div(B) cleaning to account for errors in @@ -79,7 +80,8 @@ class SpectralSolver const bool periodic_single_box, const bool update_with_rho, const bool fft_do_time_averaging, - const bool do_multi_J, + const int J_in_time, + const int rho_in_time, const bool dive_cleaning, const bool divb_cleaning); diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp index 75c82319c11..c0d7b89412b 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp @@ -8,7 +8,7 @@ #include "FieldSolver/SpectralSolver/SpectralFieldData.H" #include "SpectralAlgorithms/PsatdAlgorithmComoving.H" #include "SpectralAlgorithms/PsatdAlgorithmPml.H" -#include "SpectralAlgorithms/PsatdAlgorithm.H" +#include "SpectralAlgorithms/PsatdAlgorithmJConstantInTime.H" #include "SpectralAlgorithms/PsatdAlgorithmJLinearInTime.H" #include "SpectralKSpace.H" #include "SpectralSolver.H" @@ -30,7 +30,8 @@ SpectralSolver::SpectralSolver( const bool pml, const bool periodic_single_box, const bool update_with_rho, const bool fft_do_time_averaging, - const bool do_multi_J, + const int J_in_time, + const int rho_in_time, const bool dive_cleaning, const bool divb_cleaning) { @@ -41,8 +42,9 @@ SpectralSolver::SpectralSolver( // as well as the value of the corresponding k coordinates) const SpectralKSpace k_space= SpectralKSpace(realspace_ba, dm, dx); - m_spectral_index = SpectralFieldIndex(update_with_rho, fft_do_time_averaging, - do_multi_J, dive_cleaning, divb_cleaning, pml); + m_spectral_index = SpectralFieldIndex( + update_with_rho, fft_do_time_averaging, J_in_time, rho_in_time, + dive_cleaning, divb_cleaning, pml); // - Select the algorithm depending on the input parameters // Initialize the corresponding coefficients over k space @@ -64,18 +66,18 @@ SpectralSolver::SpectralSolver( } else // PSATD algorithms: standard, Galilean, averaged Galilean, multi-J { - if (do_multi_J) + if (J_in_time == JInTime::Constant) { - algorithm = std::make_unique( + algorithm = std::make_unique( k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal, - dt, fft_do_time_averaging, dive_cleaning, divb_cleaning); + v_galilean, dt, update_with_rho, fft_do_time_averaging, + dive_cleaning, divb_cleaning); } - else // standard, Galilean, averaged Galilean + else // J linear in time { - algorithm = std::make_unique( + algorithm = std::make_unique( k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal, - v_galilean, dt, update_with_rho, fft_do_time_averaging, - dive_cleaning, divb_cleaning); + dt, fft_do_time_averaging, dive_cleaning, divb_cleaning); } } } diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H index 30c92251253..45b55d9d4da 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H @@ -37,7 +37,8 @@ class SpectralSolverRZ bool const with_pml, bool const update_with_rho, const bool fft_do_time_averaging, - const bool do_multi_J, + const int J_in_time, + const int rho_in_time, const bool dive_cleaning, const bool divb_cleaning); diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp index 3f4bddd8b16..23d93fe045b 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp @@ -35,7 +35,8 @@ SpectralSolverRZ::SpectralSolverRZ (const int lev, bool const with_pml, bool const update_with_rho, const bool fft_do_time_averaging, - const bool do_multi_J, + const int J_in_time, + const int rho_in_time, const bool dive_cleaning, const bool divb_cleaning) : k_space(realspace_ba, dm, dx) @@ -47,8 +48,9 @@ SpectralSolverRZ::SpectralSolverRZ (const int lev, // as well as the value of the corresponding k coordinates. const bool is_pml = false; - m_spectral_index = SpectralFieldIndex(update_with_rho, fft_do_time_averaging, - do_multi_J, dive_cleaning, divb_cleaning, is_pml, with_pml); + m_spectral_index = SpectralFieldIndex( + update_with_rho, fft_do_time_averaging, J_in_time, rho_in_time, + dive_cleaning, divb_cleaning, is_pml, with_pml); // - Select the algorithm depending on the input parameters // Initialize the corresponding coefficients over k space @@ -60,7 +62,7 @@ SpectralSolverRZ::SpectralSolverRZ (const int lev, // v_galilean is 0: use standard PSATD algorithm algorithm = std::make_unique( k_space, dm, m_spectral_index, n_rz_azimuthal_modes, norder_z, nodal, dt, - update_with_rho, fft_do_time_averaging, do_multi_J, dive_cleaning, divb_cleaning); + update_with_rho, fft_do_time_averaging, J_in_time, rho_in_time, dive_cleaning, divb_cleaning); } else { // Otherwise: use the Galilean algorithm algorithm = std::make_unique( diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index 7499ee140a2..e8f20661a2d 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -280,9 +280,9 @@ void WarpX::PSATDForwardTransformJ ( { Idx = spectral_solver_fp[lev]->m_spectral_index; - idx_jx = (WarpX::do_multi_J) ? static_cast(Idx.Jx_new) : static_cast(Idx.Jx); - idx_jy = (WarpX::do_multi_J) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy); - idx_jz = (WarpX::do_multi_J) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz); + idx_jx = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jx_new) : static_cast(Idx.Jx); + idx_jy = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy); + idx_jz = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz); ForwardTransformVect(lev, *spectral_solver_fp[lev], J_fp[lev], idx_jx, idx_jy, idx_jz); @@ -290,9 +290,9 @@ void WarpX::PSATDForwardTransformJ ( { Idx = spectral_solver_cp[lev]->m_spectral_index; - idx_jx = (WarpX::do_multi_J) ? static_cast(Idx.Jx_new) : static_cast(Idx.Jx); - idx_jy = (WarpX::do_multi_J) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy); - idx_jz = (WarpX::do_multi_J) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz); + idx_jx = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jx_new) : static_cast(Idx.Jx); + idx_jy = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy); + idx_jz = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz); ForwardTransformVect(lev, *spectral_solver_cp[lev], J_cp[lev], idx_jx, idx_jy, idx_jz); } diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 6f9178af707..b28c9bd5957 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -497,7 +497,7 @@ WarpX::InitPML () pml_ncell, pml_delta, amrex::IntVect::TheZeroVector(), dt[0], nox_fft, noy_fft, noz_fft, do_nodal, do_moving_window, pml_has_particles, do_pml_in_domain, - do_multi_J, + J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, amrex::IntVect(0), amrex::IntVect(0), guard_cells.ng_FieldSolver.max(), @@ -534,7 +534,7 @@ WarpX::InitPML () pml_ncell, pml_delta, refRatio(lev-1), dt[lev], nox_fft, noy_fft, noz_fft, do_nodal, do_moving_window, pml_has_particles, do_pml_in_domain, - do_multi_J, do_pml_dive_cleaning, do_pml_divb_cleaning, + J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, amrex::IntVect(0), amrex::IntVect(0), guard_cells.ng_FieldSolver.max(), v_particle_pml, diff --git a/Source/Utils/WarpXAlgorithmSelection.H b/Source/Utils/WarpXAlgorithmSelection.H index ba752e012b7..97dd3b89fe3 100644 --- a/Source/Utils/WarpXAlgorithmSelection.H +++ b/Source/Utils/WarpXAlgorithmSelection.H @@ -83,6 +83,20 @@ struct GatheringAlgo { }; }; +struct JInTime { + enum { + Constant = 0, + Linear = 1 + }; +}; + +struct RhoInTime { + enum { + Linear = 1, + Quadratic = 2 + }; +}; + /** Strategy to compute weights for use in load balance. */ struct LoadBalanceCostsUpdateAlgo { diff --git a/Source/Utils/WarpXAlgorithmSelection.cpp b/Source/Utils/WarpXAlgorithmSelection.cpp index d0db13e12e7..088d5322f77 100644 --- a/Source/Utils/WarpXAlgorithmSelection.cpp +++ b/Source/Utils/WarpXAlgorithmSelection.cpp @@ -62,6 +62,18 @@ const std::map gathering_algo_to_int = { {"default", GatheringAlgo::EnergyConserving } }; +const std::map J_in_time_to_int = { + {"constant", JInTime::Constant}, + {"linear", JInTime::Linear}, + {"default", JInTime::Constant} +}; + +const std::map rho_in_time_to_int = { + {"linear", RhoInTime::Linear}, + {"quadratic", RhoInTime::Quadratic}, + {"default", RhoInTime::Linear} +}; + const std::map load_balance_costs_update_algo_to_int = { {"timers", LoadBalanceCostsUpdateAlgo::Timers }, {"gpuclock", LoadBalanceCostsUpdateAlgo::GpuClock }, @@ -131,6 +143,10 @@ GetAlgorithmInteger( amrex::ParmParse& pp, const char* pp_search_key ){ algo_to_int = charge_deposition_algo_to_int; } else if (0 == std::strcmp(pp_search_key, "field_gathering")) { algo_to_int = gathering_algo_to_int; + } else if (0 == std::strcmp(pp_search_key, "J_in_time")) { + algo_to_int = J_in_time_to_int; + } else if (0 == std::strcmp(pp_search_key, "rho_in_time")) { + algo_to_int = rho_in_time_to_int; } else if (0 == std::strcmp(pp_search_key, "load_balance_costs_update")) { algo_to_int = load_balance_costs_update_algo_to_int; } else if (0 == std::strcmp(pp_search_key, "em_solver_medium")) { diff --git a/Source/WarpX.H b/Source/WarpX.H index a0f0f22f990..5c143a1611c 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -186,6 +186,10 @@ public: */ static amrex::Vector particle_boundary_hi; + //! Integers that correspond to the time dependency of J (constant, linear) + //! and rho (linear, quadratic) for the PSATD algorithm + static short J_in_time; + static short rho_in_time; //! If true, the current is deposited on a nodal grid and then centered onto a staggered grid //! using finite centering of order given by #current_centering_nox, #current_centering_noy, diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 1cdd2e3e0d7..aa6301a6e4b 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -121,6 +121,8 @@ short WarpX::charge_deposition_algo; short WarpX::field_gathering_algo; short WarpX::particle_pusher_algo; short WarpX::maxwell_solver_id; +short WarpX::J_in_time; +short WarpX::rho_in_time; short WarpX::load_balance_costs_update_algo; bool WarpX::do_dive_cleaning = false; bool WarpX::do_divb_cleaning = false; @@ -1154,6 +1156,11 @@ WarpX::ReadParameters () WARPX_ALWAYS_ASSERT_WITH_MESSAGE(noz_fft > 0, "PSATD order must be finite unless psatd.periodic_single_box_fft is used"); } + // Integers that correspond to the time dependency of J (constant, linear) + // and rho (linear, quadratic) for the PSATD algorithm + J_in_time = GetAlgorithmInteger(pp_psatd, "J_in_time"); + rho_in_time = GetAlgorithmInteger(pp_psatd, "rho_in_time"); + // Current correction activated by default, unless a charge-conserving // current deposition (Esirkepov, Vay) or the div(E) cleaning scheme // are used @@ -1308,10 +1315,28 @@ WarpX::ReadParameters () v_galilean_is_zero, "Multi-J algorithm not implemented with Galilean PSATD" ); + } - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(update_with_rho, - "psatd.update_with_rho must be set to 1 when warpx.do_multi_J = 1" - ); + if (J_in_time == JInTime::Constant) + { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + rho_in_time == RhoInTime::Linear, + "psatd.J_in_time=constant supports only psatd.rho_in_time=linear"); + } + + if (J_in_time == JInTime::Linear) + { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + update_with_rho, + "psatd.update_with_rho must be set to 1 when psatd.J_in_time=linear"); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + v_galilean_is_zero, + "psatd.J_in_time=linear not implemented with Galilean PSATD"); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + v_comoving_is_zero, + "psatd.J_in_time=linear not implemented with comoving PSATD"); } for (int dir = 0; dir < AMREX_SPACEDIM; dir++) @@ -2216,7 +2241,8 @@ void WarpX::AllocLevelSpectralSolverRZ (amrex::Vector Date: Tue, 20 Sep 2022 16:54:42 -0700 Subject: [PATCH 0077/1346] Add option to deposit laser on main grid (#3235) * Add option to deposit laser on main grid * Use `deposit_on_main_grid` * Update Source/Particles/LaserParticleContainer.cpp * Add documentation * Update documentation --- Docs/source/usage/parameters.rst | 5 +++++ Source/Particles/LaserParticleContainer.cpp | 16 ++++++++++------ Source/Particles/MultiParticleContainer.H | 1 + Source/Particles/MultiParticleContainer.cpp | 16 ++++++++++++++++ 4 files changed, 32 insertions(+), 6 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index f943e5d26ab..36c6dc3b252 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -1269,6 +1269,11 @@ Laser initialization per angular mode. The laser particles are loaded into radial spokes, with the number of spokes given by min_particles_per_mode*(warpx.n_rz_azimuthal_modes-1). +* ``lasers.deposit_on_main_grid`` (`int`) optional (default `0`) + When using mesh refinement, whether the antenna that emits the laser + deposits charge/current only on the main grid (i.e. level 0), or also + on the higher mesh-refinement levels. + * ``warpx.num_mirrors`` (`int`) optional (default `0`) Users can input perfect mirror condition inside the simulation domain. The number of mirrors is given by ``warpx.num_mirrors``. The mirrors are diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index 565ac3ee1b8..a8df886be83 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -550,6 +550,8 @@ LaserParticleContainer::Evolve (int lev, amrex::LayoutData* cost = WarpX::getCosts(lev); + const bool has_buffer = cjx; + #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif @@ -578,18 +580,21 @@ LaserParticleContainer::Evolve (int lev, auto& uzp = attribs[PIdx::uz]; const long np = pti.numParticles(); - // For now, laser particles do not take the current buffers into account - const long np_current = np; - plane_Xp.resize(np); plane_Yp.resize(np); amplitude_E.resize(np); + // Determine whether particles will deposit on the fine or coarse level + long np_current = np; + if (lev > 0 && m_deposit_on_main_grid && has_buffer) { + np_current = 0; + } + if (rho && ! skip_deposition && ! do_not_deposit) { int* AMREX_RESTRICT ion_lev = nullptr; DepositCharge(pti, wp, ion_lev, rho, 0, 0, np_current, thread_num, lev, lev); - if (crho) { + if (has_buffer) { DepositCharge(pti, wp, ion_lev, crho, 0, np_current, np-np_current, thread_num, lev, lev-1); } @@ -628,7 +633,6 @@ LaserParticleContainer::Evolve (int lev, 0, np_current, thread_num, lev, lev, dt, relative_time); - const bool has_buffer = cjx; if (has_buffer) { // Deposit in buffers @@ -643,7 +647,7 @@ LaserParticleContainer::Evolve (int lev, int* AMREX_RESTRICT ion_lev = nullptr; DepositCharge(pti, wp, ion_lev, rho, 1, 0, np_current, thread_num, lev, lev); - if (crho) { + if (has_buffer) { DepositCharge(pti, wp, ion_lev, crho, 1, np_current, np-np_current, thread_num, lev, lev-1); } diff --git a/Source/Particles/MultiParticleContainer.H b/Source/Particles/MultiParticleContainer.H index bf68dc0681f..e2edc4dd71e 100644 --- a/Source/Particles/MultiParticleContainer.H +++ b/Source/Particles/MultiParticleContainer.H @@ -371,6 +371,7 @@ protected: //! instead of depositing (current, charge) on the finest patch level, deposit to the coarsest grid std::vector m_deposit_on_main_grid; + std::vector m_laser_deposit_on_main_grid; //! instead of gathering fields from the finest patch level, gather from the coarsest std::vector m_gather_from_main_grid; diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index c6cb9032e1d..662e0b45f6d 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -112,6 +112,7 @@ MultiParticleContainer::MultiParticleContainer (AmrCore* amr_core) for (int i = nspecies; i < nspecies+nlasers; ++i) { allcontainers[i] = std::make_unique(amr_core, i, lasers_names[i-nspecies]); + allcontainers[i]->m_deposit_on_main_grid = m_laser_deposit_on_main_grid[i-nspecies]; } pc_tmp = std::make_unique(amr_core); @@ -347,6 +348,21 @@ MultiParticleContainer::ReadParameters () ParmParse pp_lasers("lasers"); pp_lasers.queryarr("names", lasers_names); + auto const nlasers = lasers_names.size(); + // Get lasers to deposit on main grid + m_laser_deposit_on_main_grid.resize(nlasers, false); + std::vector tmp; + pp_lasers.queryarr("deposit_on_main_grid", tmp); + for (auto const& name : tmp) { + auto it = std::find(lasers_names.begin(), lasers_names.end(), name); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + it != lasers_names.end(), + "laser '" + name + + "' in lasers.deposit_on_main_grid must be part of lasers.lasers_names"); + int i = std::distance(lasers_names.begin(), it); + m_laser_deposit_on_main_grid[i] = true; + } + #ifdef WARPX_QED ParmParse pp_warpx("warpx"); From 9aca2a6054476df203aaf25a3f3573d7c97a4e6f Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Wed, 21 Sep 2022 16:16:01 -0700 Subject: [PATCH 0078/1346] Fix compilation of RZ version on GPU (#3418) --- .../BinaryCollision/NuclearFusion/NuclearFusionFunc.H | 3 --- 1 file changed, 3 deletions(-) diff --git a/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H b/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H index 75946242745..391682b469e 100644 --- a/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H +++ b/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H @@ -197,9 +197,6 @@ public: } #if (defined WARPX_DIM_RZ) - /* This momentum rotation is analogous to the one in ElasticCollisionPerez.H. */ - AMREX_ALWAYS_ASSERT_WITH_MESSAGE(WarpX::n_rz_azimuthal_modes==1, - "RZ mode `warpx.n_rz_azimuthal_modes` must be 1 when using the binary nuclear fusion module."); amrex::ParticleReal * const AMREX_RESTRICT theta1 = soa_1.m_rdata[PIdx::theta]; amrex::ParticleReal * const AMREX_RESTRICT theta2 = soa_2.m_rdata[PIdx::theta]; #endif From 797e78d7ef6f42e315c0fbeab4556ede38fb0a1b Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Wed, 21 Sep 2022 16:28:58 -0700 Subject: [PATCH 0079/1346] Fix update of particles flushed already in BTD (#3419) --- Source/Diagnostics/BTDiagnostics.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index f9f3315d997..1bd200fb4ee 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -1192,7 +1192,7 @@ void BTDiagnostics::UpdateTotalParticlesFlushed(int i_buffer) { for (int isp = 0; isp < m_totalParticles_flushed_already[i_buffer].size(); ++isp) { - m_totalParticles_flushed_already[i_buffer][isp] += m_totalParticles_in_buffer[i_buffer][isp]; + m_totalParticles_flushed_already[i_buffer][isp] += m_particles_buffer[i_buffer][isp]->TotalNumberOfParticles(); } } From 90b72e804c96eea345896125b7eb7d3c3272711b Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 21 Sep 2022 16:50:39 -0700 Subject: [PATCH 0080/1346] CI: Test New v. Legacy BTD in `BTD_ReducedSliceDiag` (#3371) * CI: Test New BTD in `BTD_ReducedSliceDiag` * Compare Legacy BTD vs. New BTD * Fix Bug in Loading of openPMD Array Data * Fix CI Analysis --- .../analysis_3Dbacktransformed_diag.py | 67 ++++++++++++++----- .../Modules/boosted_diags/inputs_3d_slice | 40 ++++++----- .../benchmarks_json/BTD_ReducedSliceDiag.json | 32 +++++++++ 3 files changed, 102 insertions(+), 37 deletions(-) create mode 100644 Regression/Checksum/benchmarks_json/BTD_ReducedSliceDiag.json diff --git a/Examples/Modules/boosted_diags/analysis_3Dbacktransformed_diag.py b/Examples/Modules/boosted_diags/analysis_3Dbacktransformed_diag.py index 2dac6d245b2..6fa4e9c936f 100755 --- a/Examples/Modules/boosted_diags/analysis_3Dbacktransformed_diag.py +++ b/Examples/Modules/boosted_diags/analysis_3Dbacktransformed_diag.py @@ -16,30 +16,65 @@ between the full back-transformed diagnostic and the reduced diagnostic (i.e., x-z slice) . ''' +import os +import sys + import numpy as np +import openpmd_api as io import read_raw_data +import yt + +yt.funcs.mylog.setLevel(0) + +sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +import checksumAPI + +filename = sys.argv[1] -# Read data from back-transformed diagnostics of entire domain -snapshot = './lab_frame_data/snapshots/snapshot00002' +# Tolerances to check consistency between legacy BTD and new BTD +rtol = 1e-16 +atol = 1e-16 + +# Read data from legacy back-transformed diagnostics (entire domain) +snapshot = './lab_frame_data/snapshots/snapshot00003' header = './lab_frame_data/snapshots/Header' allrd, info = read_raw_data.read_lab_snapshot(snapshot, header) -F = allrd['Ez'] -print("F.shape ", F.shape) -F_1D = np.squeeze(F[F.shape[0]//2,F.shape[1]//2,:]) - +Ez_legacy = allrd['Ez'] +print(f'Ez_legacy.shape = {Ez_legacy.shape}') +Ez_legacy_1D = np.squeeze(Ez_legacy[Ez_legacy.shape[0]//2,Ez_legacy.shape[1]//2,:]) -# Read data from reduced back-transformed diagnostics (i.e. slice) -snapshot_slice = './lab_frame_data/slices/slice00002' +# Read data from reduced back-transformed diagnostics (slice) +snapshot_slice = './lab_frame_data/slices/slice00003' header_slice = './lab_frame_data/slices/Header' allrd, info = read_raw_data.read_lab_snapshot(snapshot_slice, header_slice) -Fs = allrd['Ez'] -print("Fs.shape", Fs.shape) -Fs_1D = np.squeeze(Fs[Fs.shape[0]//2,1,:]) +Ez_legacy_slice = allrd['Ez'] +print(f'Ez_legacy_slice.shape = {Ez_legacy_slice.shape}') +Ez_legacy_slice_1D = np.squeeze(Ez_legacy_slice[Ez_legacy_slice.shape[0]//2,1,:]) + +# Read data from new back-transformed diagnostics (plotfile) +ds_plotfile = yt.load(filename) +data = ds_plotfile.covering_grid( + level=0, + left_edge=ds_plotfile.domain_left_edge, + dims=ds_plotfile.domain_dimensions) +Ez_plotfile = data[('mesh', 'Ez')].to_ndarray() + +# Read data from new back-transformed diagnostics (openPMD) +series = io.Series("./diags/diag2/openpmd_%T.h5", io.Access.read_only) +ds_openpmd = series.iterations[3] +Ez_openpmd = ds_openpmd.meshes['E']['z'].load_chunk() +Ez_openpmd = Ez_openpmd.transpose() +series.flush() -error_rel = np.max(np.abs(Fs_1D - F_1D)) / np.max(np.abs(F_1D)) -tolerance_rel = 1E-15 +# Compare arrays to check consistency between new BTD formats (plotfile and openPMD) +assert(np.allclose(Ez_plotfile, Ez_openpmd, rtol=rtol, atol=atol)) -print("error_rel : " + str(error_rel)) -print("tolerance_rel: " + str(tolerance_rel)) +# Check slicing +err = np.max(np.abs(Ez_legacy_slice_1D-Ez_legacy_1D)) / np.max(np.abs(Ez_legacy_1D)) +tol = 1e-16 +print(f'error = {err}') +print(f'tolerance = {tol}') +assert(err < tol) -assert( error_rel < tolerance_rel ) +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Modules/boosted_diags/inputs_3d_slice b/Examples/Modules/boosted_diags/inputs_3d_slice index 5c774fc893e..445bc8a807f 100644 --- a/Examples/Modules/boosted_diags/inputs_3d_slice +++ b/Examples/Modules/boosted_diags/inputs_3d_slice @@ -113,27 +113,25 @@ slice.dt_slice_snapshots_lab = 3.3356409519815207e-12 slice.particle_slice_width_lab = 2.e-6 # Diagnostics -diagnostics.diags_names = diag1 btd_openpmd btd_pltfile -diag1.intervals = 10000 -diag1.diag_type = Full - -btd_openpmd.diag_type = BackTransformed -btd_openpmd.do_back_transformed_fields = 1 -btd_openpmd.num_snapshots_lab = 4 -btd_openpmd.dz_snapshots_lab = 0.001 -btd_openpmd.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho -btd_openpmd.format = openpmd -btd_openpmd.buffer_size = 32 -btd_openpmd.openpmd_backend = h5 - -btd_pltfile.diag_type = BackTransformed -btd_pltfile.do_back_transformed_fields = 1 -btd_pltfile.num_snapshots_lab = 4 -btd_pltfile.dz_snapshots_lab = 0.001 -btd_pltfile.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho -btd_pltfile.format = plotfile -btd_pltfile.buffer_size = 32 -btd_pltfile.write_species = 1 +diagnostics.diags_names = diag1 diag2 + +diag1.diag_type = BackTransformed +diag1.do_back_transformed_fields = 1 +diag1.num_snapshots_lab = 4 +diag1.dz_snapshots_lab = 0.001 +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho +diag1.format = plotfile +diag1.buffer_size = 32 +diag1.write_species = 1 + +diag2.diag_type = BackTransformed +diag2.do_back_transformed_fields = 1 +diag2.num_snapshots_lab = 4 +diag2.dz_snapshots_lab = 0.001 +diag2.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho +diag2.format = openpmd +diag2.buffer_size = 32 +diag2.openpmd_backend = h5 # old BTD diagnostics warpx.do_back_transformed_diagnostics = 1 diff --git a/Regression/Checksum/benchmarks_json/BTD_ReducedSliceDiag.json b/Regression/Checksum/benchmarks_json/BTD_ReducedSliceDiag.json new file mode 100644 index 00000000000..43b609e30d8 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/BTD_ReducedSliceDiag.json @@ -0,0 +1,32 @@ +{ + "beam": { + "particle_momentum_x": 1.1691227280755267e-17, + "particle_momentum_y": 2.3913921202517482e-17, + "particle_momentum_z": 5.159955875755814e-14, + "particle_position_x": 0.0005606444019625324, + "particle_position_y": 0.0008259894145730366, + "particle_position_z": 2.980004880402436, + "particle_weight": 62415.090744607616 + }, + "electrons": { + "particle_momentum_x": 9.678379481090198e-20, + "particle_momentum_y": 2.5764224165967887e-19, + "particle_momentum_z": 3.0774186320322946e-19, + "particle_position_x": 0.0025853613792120563, + "particle_position_y": 0.0037728464704368673, + "particle_position_z": 0.1901073014926564, + "particle_weight": 1787508743647.5366 + }, + "lev=0": { + "Bx": 498944441.0100795, + "By": 243006013.74301538, + "Bz": 18073698.419804543, + "Ex": 7.004940326170131e+16, + "Ey": 1.4626998046065405e+17, + "Ez": 2.144506972885858e+16, + "jx": 8.583168660352884e+17, + "jy": 2.1430893961423186e+18, + "jz": 2.3459512897800802e+19, + "rho": 74967092858.04996 + } +} \ No newline at end of file From 6febc63b4d58e7bacc38d1a2a98d5852663d1b31 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 23 Sep 2022 23:11:18 +0200 Subject: [PATCH 0081/1346] fix labels in inputfiles (#3422) --- Examples/Physics_applications/laser_acceleration/inputs_1d | 2 +- Examples/Physics_applications/laser_acceleration/inputs_2d | 2 +- Examples/Physics_applications/laser_acceleration/inputs_rz | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Examples/Physics_applications/laser_acceleration/inputs_1d b/Examples/Physics_applications/laser_acceleration/inputs_1d index 8d92bfa356c..95e54c7d43e 100644 --- a/Examples/Physics_applications/laser_acceleration/inputs_1d +++ b/Examples/Physics_applications/laser_acceleration/inputs_1d @@ -51,7 +51,7 @@ electrons.addIntegerAttributes = regionofinterest electrons.attribute.regionofinterest(x,y,z,ux,uy,uz,t) = " (z>12.0e-6) * (z<13.0e-6)" ################################# -############ PLASMA ############# +############ LASER ############## ################################# lasers.names = laser1 laser1.profile = Gaussian diff --git a/Examples/Physics_applications/laser_acceleration/inputs_2d b/Examples/Physics_applications/laser_acceleration/inputs_2d index 422e33ddf57..edcb8aba279 100644 --- a/Examples/Physics_applications/laser_acceleration/inputs_2d +++ b/Examples/Physics_applications/laser_acceleration/inputs_2d @@ -68,7 +68,7 @@ beam.uy_th = 2. beam.uz_th = 50. ################################# -############ PLASMA ############# +############ LASER ############## ################################# lasers.names = laser1 laser1.profile = Gaussian diff --git a/Examples/Physics_applications/laser_acceleration/inputs_rz b/Examples/Physics_applications/laser_acceleration/inputs_rz index 971f1b538cd..44350882193 100644 --- a/Examples/Physics_applications/laser_acceleration/inputs_rz +++ b/Examples/Physics_applications/laser_acceleration/inputs_rz @@ -70,7 +70,7 @@ beam.uy_th = 2. beam.uz_th = 50. ################################# -############ PLASMA ############# +############ LASER ############## ################################# lasers.names = laser1 laser1.profile = Gaussian From cf74a5b3e6de44a030bdc004802050b9427dcdbc Mon Sep 17 00:00:00 2001 From: Ryan Sandberg Date: Fri, 23 Sep 2022 20:01:19 -0700 Subject: [PATCH 0082/1346] BTD diagnostics specified by intervals (#3367) * BTD diagnostics specified by intervals * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * address CI errors * refactor and test * no duplicate or out-of-order snapshots * update documentation and tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * clean for CI * still fighting CI * Update Source/Diagnostics/BTDiagnostics.cpp Co-authored-by: Axel Huebl * Apply suggestions from code review Co-authored-by: Axel Huebl * refactor includes * add an explicit constexpr * Apply suggestions from code review Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> * describe algorithm for BTD intervals list * revert to old description of num_snapshots_lab * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * preserve behavior of num_snapshots_lab Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Axel Huebl Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 12 +++ .../RigidInjection/inputs_2d_BoostedFrame | 2 +- .../Modules/boosted_diags/inputs_3d_slice | 2 +- Source/Diagnostics/BTDiagnostics.H | 3 + Source/Diagnostics/BTDiagnostics.cpp | 16 ++- Source/Utils/IntervalsParser.H | 58 ++++++++++- Source/Utils/IntervalsParser.cpp | 99 ++++++++++++++++++- 7 files changed, 185 insertions(+), 7 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 36c6dc3b252..f87795a776a 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2159,6 +2159,18 @@ BackTransformed Diagnostics (with support for Plotfile/openPMD output) * ``.num_snapshots_lab`` (`integer`) Only used when ``.diag_type`` is ``BackTransformed``. The number of lab-frame snapshots that will be written. + Only this option or ``intervals`` should be specified; + a run-time error occurs if the user attempts to set both ``num_snapshots_lab`` and ``intervals``. + +* ``.intervals`` (`string`) + Only used when ``.diag_type`` is ``BackTransformed``. + Using the `Intervals parser`_ syntax, this string defines the lab frame times at which data is dumped, + given as multiples of the step size ``dt_snapshots_lab`` or ``dz_snapshots_lab`` described below. + Example: ``btdiag1.intervals = 10:11,20:24:2`` and ``btdiag1.dt_snapshots_lab = 1.e-12`` + indicate to dump at lab times ``1e-11``, ``1.1e-11``, ``2e-11``, ``2.2e-11``, and ``2.4e-11`` seconds. + Note that the stop interval, the second number in the slice, must always be specified. + Only this option or ``num_snapshots_lab`` should be specified; + a run-time error occurs if the user attempts to set both ``num_snapshots_lab`` and ``intervals``. * ``.dt_snapshots_lab`` (`float`, in seconds) Only used when ``.diag_type`` is ``BackTransformed``. diff --git a/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame b/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame index 73301448ef8..bd8ce220acb 100644 --- a/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame +++ b/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame @@ -53,7 +53,7 @@ diagnostics.diags_names = diag1 diag2 diag1.diag_type = BackTransformed diag1.do_back_transformed_fields = 1 -diag1.num_snapshots_lab = 2 +diag1.intervals = :1 diag1.dt_snapshots_lab = 1.8679589331096515e-13 diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho diag1.format = plotfile diff --git a/Examples/Modules/boosted_diags/inputs_3d_slice b/Examples/Modules/boosted_diags/inputs_3d_slice index 445bc8a807f..c7938991935 100644 --- a/Examples/Modules/boosted_diags/inputs_3d_slice +++ b/Examples/Modules/boosted_diags/inputs_3d_slice @@ -126,7 +126,7 @@ diag1.write_species = 1 diag2.diag_type = BackTransformed diag2.do_back_transformed_fields = 1 -diag2.num_snapshots_lab = 4 +diag2.intervals = 0:4:2, 1:3:2 diag2.dz_snapshots_lab = 0.001 diag2.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho diag2.format = openpmd diff --git a/Source/Diagnostics/BTDiagnostics.H b/Source/Diagnostics/BTDiagnostics.H index beb6c17ec21..fcfc8f13b17 100644 --- a/Source/Diagnostics/BTDiagnostics.H +++ b/Source/Diagnostics/BTDiagnostics.H @@ -10,6 +10,7 @@ #include "Diagnostics.H" #include "Diagnostics/ComputeDiagFunctors/ComputeDiagFunctor.H" #include "Utils/WarpXConst.H" +#include "Utils/IntervalsParser.H" #include #include @@ -37,6 +38,8 @@ private: bool m_plot_raw_fields_guards = false; /** Read relevant parameters for BTD */ void ReadParameters (); + /** Determines timesteps at which BTD diagnostics are written to file */ + BTDIntervalsParser m_intervals; /** \brief Flush m_mf_output and particles to file. * Currently, a temporary customized output format for the buffer * data is implemented and called in this function. diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 1bd200fb4ee..beb367309b6 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -161,8 +161,18 @@ BTDiagnostics::ReadParameters () WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_do_back_transformed_fields, " fields must be turned on for the new back-transformed diagnostics"); if (m_do_back_transformed_fields == false) m_varnames.clear(); - getWithParser(pp_diag_name, "num_snapshots_lab", m_num_snapshots_lab); - m_num_buffers = m_num_snapshots_lab; + + std::vector intervals_string_vec = {"0"}; + bool const num_snapshots_specified = queryWithParser(pp_diag_name, "num_snapshots_lab", m_num_snapshots_lab); + bool const intervals_specified = pp_diag_name.queryarr("intervals", intervals_string_vec); + if (num_snapshots_specified) + { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!intervals_specified, + "For back-transformed diagnostics, user should specify either num_snapshots_lab or intervals, not both"); + intervals_string_vec = {":" + std::to_string(m_num_snapshots_lab-1)}; + } + m_intervals = BTDIntervalsParser(intervals_string_vec); + m_num_buffers = m_intervals.NumSnapshots(); // Read either dz_snapshots_lab or dt_snapshots_lab bool snapshot_interval_is_specified = false; @@ -241,7 +251,7 @@ BTDiagnostics::InitializeBufferData ( int i_buffer , int lev) auto & warpx = WarpX::GetInstance(); // Lab-frame time for the i^th snapshot amrex::Real zmax_0 = warpx.Geom(lev).ProbHi(m_moving_window_dir); - m_t_lab.at(i_buffer) = i_buffer * m_dt_snapshots_lab + m_t_lab.at(i_buffer) = m_intervals.GetBTDIteration(i_buffer) * m_dt_snapshots_lab + m_gamma_boost*m_beta_boost*zmax_0/PhysConst::c; // Define buffer domain in boosted frame at level, lev, with user-defined lo and hi diff --git a/Source/Utils/IntervalsParser.H b/Source/Utils/IntervalsParser.H index c91fe271abc..06258b10999 100644 --- a/Source/Utils/IntervalsParser.H +++ b/Source/Utils/IntervalsParser.H @@ -21,7 +21,7 @@ public: * (0 for the starting point, std::numeric_limits::max() for the stopping point and 1 for * the period). For example SliceParser(":1000:") is equivalent to SliceParser("0:1000:1"). */ - SliceParser (const std::string& instr); + SliceParser (const std::string& instr, bool isBTD=false); /** * \brief A method that returns true if the input integer is contained in the slice. (e.g. if @@ -66,7 +66,14 @@ public: */ int getStop () const; + /** + * @brief A method that returns the number of integers contained by the slice. + * + */ + int numContained() const; + private: + bool m_isBTD = false; int m_start = 0; int m_stop = std::numeric_limits::max(); int m_period = 1; @@ -148,4 +155,53 @@ private: bool m_activated = false; }; +/** + * \brief This class is a parser for multiple slices of the form x,y,z,... where x, y and z are + * slices of the form i:j:k, as defined in the SliceParser class. This class contains a vector of + * SliceParsers. The supported function set differs from the IntervalsParser + */ +class BTDIntervalsParser +{ +public: + /** + * \brief Default constructor of the BTDIntervalsParser class. + */ + BTDIntervalsParser () = default; + + /** + * \brief Constructor of the BTDIntervalsParser class. + * + * @param[in] instr_vec an input vector string, which when concatenated is of the form + * "x,y,z,...". This will call the constructor of SliceParser using x, y and z as input + * arguments. + */ + BTDIntervalsParser (const std::vector& instr_vec); + + /** + * @brief Return the total number of unique labframe snapshots + */ + int NumSnapshots (); + + /** + * @brief Return the iteration number stored at index i_buffer + * + * @param i_buffer buffer or iteration index, between 0 and NumSnapshots + */ + int GetBTDIteration(int i_buffer); + + /** + * \brief A method that returns true if any of the slices contained by the IntervalsParser + * has a strictly positive period. + */ + bool isActivated () const; + +private: + std::vector m_btd_iterations; + std::vector m_slices; + std::vector m_slice_starting_i_buffer; + int m_n_snapshots; + static constexpr char m_separator = ','; + bool m_activated = false; +}; + #endif // WARPX_INTERVALSPARSER_H_ diff --git a/Source/Utils/IntervalsParser.cpp b/Source/Utils/IntervalsParser.cpp index e8425421f29..4da0142a03e 100644 --- a/Source/Utils/IntervalsParser.cpp +++ b/Source/Utils/IntervalsParser.cpp @@ -7,15 +7,18 @@ #include #include -SliceParser::SliceParser (const std::string& instr) +SliceParser::SliceParser (const std::string& instr, const bool isBTD) { + m_isBTD = isBTD; // split string and trim whitespaces auto insplit = WarpXUtilStr::split>(instr, m_separator, true); if(insplit.size() == 1){ // no colon in input string. The input is the period. + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!m_isBTD, "must specify interval stop for BTD"); m_period = parseStringtoInt(insplit[0], "interval period");} else if(insplit.size() == 2) // 1 colon in input string. The input is start:stop { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!m_isBTD || !insplit[1].empty(), "must specify interval stop for BTD"); if (!insplit[0].empty()){ m_start = parseStringtoInt(insplit[0], "interval start");} if (!insplit[1].empty()){ @@ -23,6 +26,7 @@ SliceParser::SliceParser (const std::string& instr) } else // 2 colons in input string. The input is start:stop:period { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!m_isBTD || !insplit[1].empty(), "must specify interval stop for BTD"); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( insplit.size() == 3, instr + "' is not a valid syntax for a slice."); @@ -64,6 +68,8 @@ int SliceParser::getStart () const {return m_start;} int SliceParser::getStop () const {return m_stop;} +int SliceParser::numContained () const {return (m_stop - m_start) / m_period + 1;} + IntervalsParser::IntervalsParser (const std::vector& instr_vec) { std::string inconcatenated; @@ -116,3 +122,94 @@ int IntervalsParser::localPeriod (const int n) const } bool IntervalsParser::isActivated () const {return m_activated;} + +BTDIntervalsParser::BTDIntervalsParser (const std::vector& instr_vec) +{ + std::string inconcatenated; + for (const auto& instr_element : instr_vec) inconcatenated +=instr_element; + + auto const insplit = WarpXUtilStr::split>(inconcatenated, std::string(1,m_separator)); + + // parse the Intervals string into Slices and store each slice in m_slices, + // in order of increasing Slice start value + for(const auto& inslc : insplit) + { + bool isBTD = true; + SliceParser temp_slice(inslc, isBTD); + if (m_slices.size() > 0) + { + // find the last index i_slice where + // the start value of m_slices[i_slice] is greater than temp_slices' start_value + int i_slice = 0; + while (temp_slice.getStart() > m_slices[i_slice].getStart() && i_slice < static_cast(m_slices.size())) + { + i_slice++; + } + m_slices.insert(m_slices.begin() + i_slice, temp_slice); + } + else + { + m_slices.push_back(temp_slice); + } + } + // from the vector of slices, m_slices, + // create a vector of integers, m_btd_iterations, containing + // the iteration of every back-transformed snapshot that will be saved + // the iteration values in m_btd_iterations are + // 1. saved in increasing order + // 2. unique, i.e. no duplicate iterations are saved + for (const auto& temp_slice : m_slices) + { + const int start = temp_slice.getStart(); + const int period = temp_slice.getPeriod(); + int btd_iter_ind; + // for Slice temp_slice in m_slices, + // determine the index in m_btd_iterations where temp_slice's starting value goes + // + // Implementation note: + // assuming the user mostly lists slices in ascending order, + // start at the end of m_btd_iterations and search backward + if (m_btd_iterations.size() == 0) + { + btd_iter_ind = 0; + } + else + { + btd_iter_ind = m_btd_iterations.size() - 1; + while (start < m_btd_iterations[btd_iter_ind] and btd_iter_ind>0) + { + btd_iter_ind--; + } + } + // insert each iteration contained in temp_slice into m_btd_iterations + // adding them in increasing sorted order and not adding any iterations + // already contained in m_btd_iterations + for (int ii = start; ii <= temp_slice.getStop(); ii += period) + { + if (m_btd_iterations.size() > 0) + { + // find where iteration ii should go in m_btd_iterations + while (ii > m_btd_iterations[btd_iter_ind] && btd_iter_ind < static_cast(m_btd_iterations.size())) + { + btd_iter_ind++; + } + if (ii != m_btd_iterations[btd_iter_ind]) + { + m_btd_iterations.insert(m_btd_iterations.begin() + btd_iter_ind, ii); + } + } else + { + m_btd_iterations.push_back(ii); + } + } + if ((temp_slice.getPeriod() > 0) && + (temp_slice.getStop() >= start)) m_activated = true; + } +} + +int BTDIntervalsParser::NumSnapshots () { return m_btd_iterations.size(); } + +int BTDIntervalsParser::GetBTDIteration(int i_buffer) +{ + return m_btd_iterations[i_buffer]; +} From 3fe406c9701f61e07b23f7123cf0a7bad492c6dc Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Mon, 26 Sep 2022 17:10:42 +0200 Subject: [PATCH 0083/1346] enforce 3 components for some laser parameters (#3423) --- Source/Particles/LaserParticleContainer.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index a8df886be83..94e67065d9f 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -99,6 +99,13 @@ LaserParticleContainer::LaserParticleContainer (AmrCore* amr_core, int ispecies, getArrWithParser(pp_laser_name, "direction", m_nvec); getArrWithParser(pp_laser_name, "polarization", m_p_X); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_position.size() == 3, + m_laser_name + ".position must have three components."); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_nvec.size() == 3, + m_laser_name + ".direction must have three components."); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_p_X.size() == 3, + m_laser_name + ".polarization must have three components."); + getWithParser(pp_laser_name, "wavelength", m_wavelength); AMREX_ALWAYS_ASSERT_WITH_MESSAGE( m_wavelength > 0, "The laser wavelength must be >0."); From 1b9ba80bea0009bd7f81dd2d31bbaf10aae9167a Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 28 Sep 2022 11:30:58 -0700 Subject: [PATCH 0084/1346] AMReX/PICSAR: Weekly Update (#3412) * AMReX: Weekly Update * Roundoff Domain: Reset Checksums: - [x] 2D: `background_mcc_dp_psp` - [x] 3D: `pml_psatd_dive_divb_cleaning` - [x] SP: `reduced_diags_single_precision` * AMReX: Weekly Update Fix #3429 --- .github/workflows/cuda.yml | 2 +- .../background_mcc_dp_psp.json | 26 ++++----- .../pml_psatd_dive_divb_cleaning.json | 14 ++--- .../reduced_diags_single_precision.json | 54 +++++++++---------- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 2 +- run_test.sh | 2 +- 8 files changed, 52 insertions(+), 52 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 9463b30a82f..3902034c672 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 9aa23c202a13eee489a06030b9aeda6b89856944 && cd - + cd amrex && git checkout --detach b84d7c069cef7470f195b250926ca0e84ec46fb2 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/Regression/Checksum/benchmarks_json/background_mcc_dp_psp.json b/Regression/Checksum/benchmarks_json/background_mcc_dp_psp.json index 6029c19495a..3c54db63b95 100644 --- a/Regression/Checksum/benchmarks_json/background_mcc_dp_psp.json +++ b/Regression/Checksum/benchmarks_json/background_mcc_dp_psp.json @@ -1,22 +1,22 @@ { "electrons": { - "particle_momentum_x": 1.011437782317433e-18, - "particle_momentum_y": 2.818489864433302e-19, - "particle_momentum_z": 2.810005819942864e-19, - "particle_position_x": 17134.12344905036, - "particle_position_y": 935.6698553080840, + "particle_momentum_x": 1.0114377818220718e-18, + "particle_momentum_y": 2.8184898646509405e-19, + "particle_momentum_z": 2.810005821001351e-19, + "particle_position_x": 17134.123449403327, + "particle_position_y": 935.6698625905589, "particle_weight": 61112621534.71875 }, "he_ions": { - "particle_momentum_x": 2.882592880031919e-18, - "particle_momentum_y": 2.1960207748059422e-18, - "particle_momentum_z": 2.1982341415215343e-18, - "particle_position_x": 17605.83295166959, - "particle_position_y": 1099.9805173814, + "particle_momentum_x": 2.8825928799866215e-18, + "particle_momentum_y": 2.1960207749440838e-18, + "particle_momentum_z": 2.198234141518127e-18, + "particle_position_x": 17605.832952557976, + "particle_position_y": 1099.9805177808296, "particle_weight": 71973184795.40625 }, "lev=0": { - "rho_electrons": 0.03566096818084508, - "rho_he_ions": 0.04192385953761138 + "rho_electrons": 0.03566096817628402, + "rho_he_ions": 0.04192385953761396 } -} +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/pml_psatd_dive_divb_cleaning.json b/Regression/Checksum/benchmarks_json/pml_psatd_dive_divb_cleaning.json index cfae22ad7d6..12f56218081 100644 --- a/Regression/Checksum/benchmarks_json/pml_psatd_dive_divb_cleaning.json +++ b/Regression/Checksum/benchmarks_json/pml_psatd_dive_divb_cleaning.json @@ -1,11 +1,11 @@ { "lev=0": { - "Bx": 1.2401714071102193e-06, - "By": 1.21931343936974e-06, - "Bz": 1.2293954248570514e-06, - "Ex": 614.2618449422512, - "Ey": 622.4788672892859, - "Ez": 613.5910967525642, - "rho": 4.90369639123331e-05 + "Bx": 1.482006352778953e-07, + "By": 1.48205157883426e-07, + "Bz": 1.4954704195856524e-07, + "Ex": 11.789793626679334, + "Ey": 11.78688532983594, + "Ez": 11.770112090435557, + "rho": 4.903696256562049e-05 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/reduced_diags_single_precision.json b/Regression/Checksum/benchmarks_json/reduced_diags_single_precision.json index 670fb8b44e2..b2162221aba 100644 --- a/Regression/Checksum/benchmarks_json/reduced_diags_single_precision.json +++ b/Regression/Checksum/benchmarks_json/reduced_diags_single_precision.json @@ -1,43 +1,43 @@ { "electrons": { - "particle_momentum_x": 2.4417212503278005e-19, - "particle_momentum_y": 2.4564848534619294e-19, - "particle_momentum_z": 2.434157491249898e-19, - "particle_position_x": 16379.247699920481, - "particle_position_y": 16382.836164549284, - "particle_position_z": 16382.833915042822, + "particle_momentum_x": 2.441720869016668e-19, + "particle_momentum_y": 2.456481313629533e-19, + "particle_momentum_z": 2.434157770814882e-19, + "particle_position_x": 16379.247624470314, + "particle_position_y": 16382.836430045034, + "particle_position_z": 16382.83386090216, "particle_weight": 800000003014656.0 }, "lev=0": { - "Bx": 0.08424062892077444, - "By": 0.08475186723996586, - "Bz": 0.08796440505066272, - "Ex": 106872540.64410019, - "Ey": 107805501.93848419, - "Ez": 107783078.61432838, - "jx": 728220.4602749646, - "jy": 736403.7514596283, - "jz": 734813.9692403525, - "rho": 0.027750860941553768, - "rho_electrons": 0.525001227832945, - "rho_protons": 0.5250012272590538 + "Bx": 0.08424077006868202, + "By": 0.08475162318143958, + "Bz": 0.0879638695257583, + "Ex": 106872406.23206902, + "Ey": 107805796.05268478, + "Ez": 107783300.6896553, + "jx": 728221.4094335437, + "jy": 736401.7841227949, + "jz": 734813.4235675633, + "rho": 0.02775090560003779, + "rho_electrons": 0.5250012280648662, + "rho_protons": 0.5250012270062143 }, "photons": { "particle_momentum_x": 1.428291590249666e-18, "particle_momentum_y": 1.4222174024686332e-18, "particle_momentum_z": 1.4246585206989104e-18, - "particle_position_x": 16376.414799568243, - "particle_position_y": 16409.71378429825, - "particle_position_z": 16378.407592666219, + "particle_position_x": 16376.414787647314, + "particle_position_y": 16409.71380140478, + "particle_position_z": 16378.407592070173, "particle_weight": 800000003014656.0 }, "protons": { - "particle_momentum_x": 1.4104802628011656e-19, - "particle_momentum_y": 1.4120349291716893e-19, - "particle_momentum_z": 1.3903171427087353e-19, - "particle_position_x": 16383.951009619981, - "particle_position_y": 16383.99123002775, - "particle_position_z": 16384.033095447347, + "particle_momentum_x": 1.4104804792534928e-19, + "particle_momentum_y": 1.4120362404591128e-19, + "particle_momentum_z": 1.3903172753601875e-19, + "particle_position_x": 16383.951009282842, + "particle_position_y": 16383.991230854765, + "particle_position_z": 16384.03309576772, "particle_weight": 800000003014656.0 } } \ No newline at end of file diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index d2fff710688..e16e49a7883 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 9aa23c202a13eee489a06030b9aeda6b89856944 +branch = b84d7c069cef7470f195b250926ca0e84ec46fb2 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 9d1ae29a3cb..8b25935a907 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 9aa23c202a13eee489a06030b9aeda6b89856944 +branch = b84d7c069cef7470f195b250926ca0e84ec46fb2 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 2db3d33e87c..8598c48b4b0 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -235,7 +235,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "9aa23c202a13eee489a06030b9aeda6b89856944" +set(WarpX_amrex_branch "b84d7c069cef7470f195b250926ca0e84ec46fb2" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/run_test.sh b/run_test.sh index 5dc4713a889..ae5411469a5 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 9aa23c202a13eee489a06030b9aeda6b89856944 && cd - +cd amrex && git checkout --detach b84d7c069cef7470f195b250926ca0e84ec46fb2 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git From 7953200f895bdfbb3c99d90991e7efadfbd7134f Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Thu, 29 Sep 2022 00:44:07 -0700 Subject: [PATCH 0085/1346] Add 1d support to `_libwarpx.py` functions `get_particle_X` (#3421) * add 1d support to _libwarpx.py functions `get_particle_X` * add coverage of 1d `_libwarpx.get_particle_z` to test suite --- .../capacitive_discharge/PICMI_inputs_1d.py | 25 ++++++++++--------- Python/pywarpx/_libwarpx.py | 16 ++++++++---- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py index 8fa450e016f..035be70cc10 100644 --- a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py +++ b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py @@ -77,18 +77,6 @@ def decompose_matrix(self): system.""" self.nsolve = self.nz + 1 - # Set up the tridiagonal computation matrix in order to solve A*phi = - # rho for phi. - self.A_ldiag = np.ones(self.nsolve-1) / self.dz**2 - self.A_mdiag = -2.*np.ones(self.nsolve) / self.dz**2 - self.A_udiag = np.ones(self.nsolve-1) / self.dz**2 - - self.A_mdiag[0] = 1. - self.A_udiag[0] = 0.0 - - self.A_mdiag[-1] = 1. - self.A_ldiag[-1] = 0.0 - # Set up the computation matrix in order to solve A*phi = rho A = np.zeros((self.nsolve, self.nsolve)) idx = np.arange(self.nsolve) @@ -358,6 +346,19 @@ def run_sim(self): if self.sim.extension.getMyProc() == 0: np.save(f'ion_density_case_{self.n+1}.npy', self.ion_density_array) + # query the particle z-coordinates if this is run during CI testing + # to cover that functionality + if self.test: + nparts = self.sim.extension.get_particle_count( + 'he_ions', local=True + ) + z_coords = np.concatenate( + self.sim.extension.get_particle_z('he_ions') + ) + assert len(z_coords) == nparts + assert np.all(z_coords >= 0.0) and np.all(z_coords <= self.gap) + + ########################## # parse input parameters ########################## diff --git a/Python/pywarpx/_libwarpx.py b/Python/pywarpx/_libwarpx.py index 691288d0787..d8f7f077f7c 100755 --- a/Python/pywarpx/_libwarpx.py +++ b/Python/pywarpx/_libwarpx.py @@ -831,6 +831,8 @@ def get_particle_x(self, species_name, level=0): return [struct['x'] for struct in structs] elif self.geometry_dim == 'rz': return [struct['x']*np.cos(theta) for struct, theta in zip(structs, self.get_particle_theta(species_name))] + elif self.geometry_dim == '1d': + raise Exception('get_particle_x: There is no x coordinate with 1D Cartesian') def get_particle_y(self, species_name, level=0): ''' @@ -840,10 +842,12 @@ def get_particle_y(self, species_name, level=0): ''' structs = self.get_particle_structs(species_name, level) - if self.geometry_dim == '3d' or self.geometry_dim == '2d': + if self.geometry_dim == '3d': return [struct['y'] for struct in structs] elif self.geometry_dim == 'rz': return [struct['x']*np.sin(theta) for struct, theta in zip(structs, self.get_particle_theta(species_name))] + elif self.geometry_dim == '1d' or self.geometry_dim == '2d': + raise Exception('get_particle_y: There is no y coordinate with 1D or 2D Cartesian') def get_particle_r(self, species_name, level=0): ''' @@ -857,8 +861,8 @@ def get_particle_r(self, species_name, level=0): return [struct['x'] for struct in structs] elif self.geometry_dim == '3d': return [np.sqrt(struct['x']**2 + struct['y']**2) for struct in structs] - elif self.geometry_dim == '2d': - raise Exception('get_particle_r: There is no r coordinate with 2D Cartesian') + elif self.geometry_dim == '2d' or self.geometry_dim == '1d': + raise Exception('get_particle_r: There is no r coordinate with 1D or 2D Cartesian') def get_particle_z(self, species_name, level=0): ''' @@ -872,6 +876,8 @@ def get_particle_z(self, species_name, level=0): return [struct['z'] for struct in structs] elif self.geometry_dim == 'rz' or self.geometry_dim == '2d': return [struct['y'] for struct in structs] + elif self.geometry_dim == '1d': + return [struct['x'] for struct in structs] def get_particle_id(self, species_name, level=0): ''' @@ -946,8 +952,8 @@ def get_particle_theta(self, species_name, level=0): elif self.geometry_dim == '3d': structs = self.get_particle_structs(species_name, level) return [np.arctan2(struct['y'], struct['x']) for struct in structs] - elif self.geometry_dim == '2d': - raise Exception('get_particle_r: There is no theta coordinate with 2D Cartesian') + elif self.geometry_dim == '2d' or self.geometry_dim == '1d': + raise Exception('get_particle_theta: There is no theta coordinate with 1D or 2D Cartesian') def get_particle_comp_index(self, species_name, pid_name): ''' From 6b467022d556225f537f2557512cedfc09282c07 Mon Sep 17 00:00:00 2001 From: Marco Garten Date: Thu, 29 Sep 2022 00:44:48 -0700 Subject: [PATCH 0086/1346] Add quiet option to Summit post-proc. docs (#3434) The quiet option drastically reduces the in-cell output of package installation with mamba but does not completely silence. --- Docs/source/install/hpc/summit.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Docs/source/install/hpc/summit.rst b/Docs/source/install/hpc/summit.rst index 195744fb906..0b2cff56b6e 100644 --- a/Docs/source/install/hpc/summit.rst +++ b/Docs/source/install/hpc/summit.rst @@ -335,6 +335,10 @@ For post-processing, most users use Python via OLCFs's `Jupyter service Date: Fri, 30 Sep 2022 09:44:26 -0700 Subject: [PATCH 0087/1346] Implement tridiag solver for 1D (#3431) * Added tridiag solve for 1D * Added tridiag solver to WarpX.H * Updated PICMI_inputs_1d.py to use float64 This was done so that it will compare to the tridiag to machine precision * Updated 1D capacitive_discharge test, adding pythonsolver flag * Update capacitive_discharge analysis_1d.py file with updated reference data * Add periodic boundary conditions, plus other fixes * Added CI test Python_background_mcc_1d_tridiag * Fix for parallel * Fixed CI test file prefix for PICMI_inputs_1d.py * Fixed comments --- .../capacitive_discharge/PICMI_inputs_1d.py | 35 ++- .../capacitive_discharge/analysis_1d.py | 68 +++--- Regression/WarpX-tests.ini | 19 ++ Source/FieldSolver/ElectrostaticSolver.cpp | 206 +++++++++++++++++- Source/WarpX.H | 2 + 5 files changed, 280 insertions(+), 50 deletions(-) diff --git a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py index 035be70cc10..21a631505dd 100644 --- a/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py +++ b/Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py @@ -17,8 +17,9 @@ class PoissonSolver1D(picmi.ElectrostaticSolver): - """This can be removed and the MLMG solver used once - https://github.com/ECP-WarpX/WarpX/issues/3123 is addressed.""" + """This solver is maintained as an example of the use of Python callbacks. + However, it is not necessarily needed since the 1D code has the direct tridiagonal + solver implemented.""" def __init__(self, grid, **kwargs): """Direct solver for the Poisson equation using superLU. This solver is @@ -89,7 +90,7 @@ def decompose_matrix(self): A[0, 0] = 1.0 A[-1, -1] = 1.0 - A = csc_matrix(A, dtype=np.float32) + A = csc_matrix(A, dtype=np.float64) self.lu = sla.splu(A) def _run_solve(self): @@ -112,7 +113,7 @@ def solve(self): # Construct b vector rho = -self.rho_data / constants.ep0 - b = np.zeros(rho.shape[0], dtype=np.float32) + b = np.zeros(rho.shape[0], dtype=np.float64) b[:] = rho * self.dz**2 b[0] = left_voltage @@ -157,10 +158,11 @@ class CapacitiveDischargeExample(object): # Time (in seconds) between diagnostic evaluations diag_interval = 32 / freq - def __init__(self, n=0, test=False): + def __init__(self, n=0, test=False, pythonsolver=False): """Get input parameters for the specific case (n) desired.""" self.n = n self.test = test + self.pythonsolver = pythonsolver # Case specific input parameters self.voltage = f"{self.voltage[n]}*sin(2*pi*{self.freq:.5e}*t)" @@ -209,11 +211,11 @@ def setup_run(self): # Field solver # ####################################################################### - # self.solver = picmi.ElectrostaticSolver( - # grid=self.grid, method='Multigrid', required_precision=1e-6, - # warpx_self_fields_verbosity=2 - # ) - self.solver = PoissonSolver1D(grid=self.grid) + if self.pythonsolver: + self.solver = PoissonSolver1D(grid=self.grid) + else: + # This will use the tridiagonal solver + self.solver = picmi.ElectrostaticSolver(grid=self.grid) ####################################################################### # Particle types setup # @@ -317,13 +319,18 @@ def setup_run(self): # Add diagnostics for the CI test to be happy # ####################################################################### + if self.pythonsolver: + file_prefix = 'Python_background_mcc_1d_plt' + else: + file_prefix = 'Python_background_mcc_1d_tridiag_plt' + field_diag = picmi.FieldDiagnostic( name='diag1', grid=self.grid, period=0, data_list=['rho_electrons', 'rho_he_ions'], write_dir='.', - warpx_file_prefix='Python_background_mcc_1d_plt' + warpx_file_prefix=file_prefix ) self.sim.add_diagnostic(field_diag) @@ -372,11 +379,15 @@ def run_sim(self): '-n', help='Test number to run (1 to 4)', required=False, type=int, default=1 ) +parser.add_argument( + '--pythonsolver', help='toggle whether to use the Python level solver', + action='store_true' +) args, left = parser.parse_known_args() sys.argv = sys.argv[:1]+left if args.n < 1 or args.n > 4: raise AttributeError('Test number must be an integer from 1 to 4.') -run = CapacitiveDischargeExample(n=args.n-1, test=args.test) +run = CapacitiveDischargeExample(n=args.n-1, test=args.test, pythonsolver=args.pythonsolver) run.run_sim() diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_1d.py b/Examples/Physics_applications/capacitive_discharge/analysis_1d.py index f9e8be0844e..73b1ce6d600 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_1d.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_1d.py @@ -1,43 +1,43 @@ #!/usr/bin/env python3 -# Copyright 2021 Modern Electron +# Copyright 2022 Modern Electron, David Grote import numpy as np ref_density = np.array([ - 1.29556695e+14, 2.24358819e+14, 2.55381744e+14, 2.55655005e+14, - 2.55796267e+14, 2.55819109e+14, 2.55819687e+14, 2.55751184e+14, - 2.55920806e+14, 2.56072344e+14, 2.55937266e+14, 2.55849080e+14, - 2.55918981e+14, 2.55980835e+14, 2.56054153e+14, 2.56074693e+14, - 2.56036953e+14, 2.56181152e+14, 2.56322617e+14, 2.56253541e+14, - 2.56196222e+14, 2.56353087e+14, 2.56256025e+14, 2.55928999e+14, - 2.56110985e+14, 2.56658916e+14, 2.56832588e+14, 2.56551873e+14, - 2.56491185e+14, 2.56469926e+14, 2.56418623e+14, 2.56541073e+14, - 2.56513772e+14, 2.56424505e+14, 2.56302757e+14, 2.56242393e+14, - 2.56270399e+14, 2.56178952e+14, 2.56071408e+14, 2.56141949e+14, - 2.56419807e+14, 2.56606936e+14, 2.56437775e+14, 2.56252446e+14, - 2.56309518e+14, 2.56383487e+14, 2.56265139e+14, 2.56167672e+14, - 2.56466917e+14, 2.56924869e+14, 2.56901785e+14, 2.56631493e+14, - 2.56643456e+14, 2.56523464e+14, 2.56378270e+14, 2.56571296e+14, - 2.56794304e+14, 2.56788544e+14, 2.56549715e+14, 2.56303160e+14, - 2.56210813e+14, 2.56418356e+14, 2.57314522e+14, 2.58471390e+14, - 2.58169771e+14, 2.56946438e+14, 2.56726546e+14, 2.56853122e+14, - 2.56613699e+14, 2.56509534e+14, 2.56692972e+14, 2.56705133e+14, - 2.56372142e+14, 2.56167556e+14, 2.56296946e+14, 2.56498752e+14, - 2.56523102e+14, 2.56404334e+14, 2.56227096e+14, 2.56398997e+14, - 2.56614907e+14, 2.56436657e+14, 2.56388606e+14, 2.56553679e+14, - 2.56637914e+14, 2.56407785e+14, 2.56104131e+14, 2.56082340e+14, - 2.56095275e+14, 2.56278448e+14, 2.56808134e+14, 2.57127897e+14, - 2.56858174e+14, 2.56326990e+14, 2.56296032e+14, 2.56563349e+14, - 2.56482273e+14, 2.56667481e+14, 2.57072448e+14, 2.56767530e+14, - 2.56433245e+14, 2.56586570e+14, 2.56636412e+14, 2.56765628e+14, - 2.56868130e+14, 2.56783441e+14, 2.56714518e+14, 2.56651014e+14, - 2.56528394e+14, 2.56227520e+14, 2.56163301e+14, 2.56408207e+14, - 2.56433120e+14, 2.56374745e+14, 2.56542028e+14, 2.56748796e+14, - 2.56715201e+14, 2.56298164e+14, 2.56042658e+14, 2.56292455e+14, - 2.56352282e+14, 2.56370562e+14, 2.56487458e+14, 2.56483667e+14, - 2.56741201e+14, 2.56665100e+14, 2.56523784e+14, 2.24741564e+14, - 1.28486944e+14 + 1.29556694e+14, 2.24358818e+14, 2.55381745e+14, 2.55655005e+14, + 2.55796268e+14, 2.55819108e+14, 2.55819686e+14, 2.55751184e+14, + 2.55920806e+14, 2.56072344e+14, 2.55937266e+14, 2.55849080e+14, + 2.55918981e+14, 2.55980835e+14, 2.56054153e+14, 2.56074694e+14, + 2.56036953e+14, 2.56181153e+14, 2.56322618e+14, 2.56253541e+14, + 2.56196224e+14, 2.56353090e+14, 2.56256022e+14, 2.55928997e+14, + 2.56110988e+14, 2.56658917e+14, 2.56832584e+14, 2.56551871e+14, + 2.56491186e+14, 2.56469928e+14, 2.56418625e+14, 2.56541071e+14, + 2.56513773e+14, 2.56424507e+14, 2.56302757e+14, 2.56242392e+14, + 2.56270399e+14, 2.56178952e+14, 2.56071407e+14, 2.56141949e+14, + 2.56419808e+14, 2.56606936e+14, 2.56437774e+14, 2.56252443e+14, + 2.56309513e+14, 2.56383484e+14, 2.56265140e+14, 2.56167674e+14, + 2.56466922e+14, 2.56924871e+14, 2.56901781e+14, 2.56631494e+14, + 2.56643458e+14, 2.56523464e+14, 2.56378273e+14, 2.56571301e+14, + 2.56794308e+14, 2.56788543e+14, 2.56549712e+14, 2.56303156e+14, + 2.56210811e+14, 2.56418363e+14, 2.57314552e+14, 2.58471405e+14, + 2.58169740e+14, 2.56946418e+14, 2.56726550e+14, 2.56853119e+14, + 2.56613698e+14, 2.56509538e+14, 2.56692976e+14, 2.56705132e+14, + 2.56372135e+14, 2.56167561e+14, 2.56296953e+14, 2.56498746e+14, + 2.56523099e+14, 2.56404333e+14, 2.56227098e+14, 2.56399004e+14, + 2.56614905e+14, 2.56436650e+14, 2.56388608e+14, 2.56553683e+14, + 2.56637912e+14, 2.56407782e+14, 2.56104130e+14, 2.56082338e+14, + 2.56095272e+14, 2.56278448e+14, 2.56808134e+14, 2.57127896e+14, + 2.56858173e+14, 2.56326991e+14, 2.56296032e+14, 2.56563348e+14, + 2.56482274e+14, 2.56667483e+14, 2.57072448e+14, 2.56767529e+14, + 2.56433245e+14, 2.56586564e+14, 2.56636403e+14, 2.56765624e+14, + 2.56868122e+14, 2.56783435e+14, 2.56714527e+14, 2.56651030e+14, + 2.56528399e+14, 2.56227514e+14, 2.56163300e+14, 2.56408217e+14, + 2.56433124e+14, 2.56374737e+14, 2.56542023e+14, 2.56748800e+14, + 2.56715205e+14, 2.56298166e+14, 2.56042658e+14, 2.56292458e+14, + 2.56352283e+14, 2.56370559e+14, 2.56487462e+14, 2.56483655e+14, + 2.56741185e+14, 2.56665111e+14, 2.56523794e+14, 2.24741566e+14, + 1.28486948e+14 ]) density_data = np.load( 'ion_density_case_1.npy' ) diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 8b25935a907..43c9c8f88c7 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -3285,6 +3285,25 @@ analysisRoutine = Examples/Physics_applications/capacitive_discharge/analysis_2d buildDir = . inputFile = Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py runtime_params = +customRunCmd = python3 PICMI_inputs_1d.py --test --pythonsolver +dim = 1 +addToCompileString = USE_PYTHON_MAIN=TRUE USE_OPENPMD=TRUE QED=FALSE +cmakeSetupOpts = -DWarpX_DIMS=1 -DWarpX_APP=OFF -DWarpX_OPENPMD=ON -DWarpX_QED=OFF +target = pip_install +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 0 +analysisRoutine = Examples/Physics_applications/capacitive_discharge/analysis_1d.py + +[Python_background_mcc_1d_tridiag] +buildDir = . +inputFile = Examples/Physics_applications/capacitive_discharge/PICMI_inputs_1d.py +runtime_params = customRunCmd = python3 PICMI_inputs_1d.py --test dim = 1 addToCompileString = USE_PYTHON_MAIN=TRUE USE_OPENPMD=TRUE QED=FALSE diff --git a/Source/FieldSolver/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolver.cpp index e50098e7d22..3a16b8b70f8 100644 --- a/Source/FieldSolver/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolver.cpp @@ -242,10 +242,24 @@ WarpX::AddSpaceChargeFieldLabFrame () setPhiBC(phi_fp); // Compute the potential phi, by solving the Poisson equation - if ( IsPythonCallBackInstalled("poissonsolver") ) ExecutePythonCallback("poissonsolver"); - else computePhi( rho_fp, phi_fp, beta, self_fields_required_precision, - self_fields_absolute_tolerance, self_fields_max_iters, - self_fields_verbosity ); + if (IsPythonCallBackInstalled("poissonsolver")) { + + // Use the Python level solver (user specified) + ExecutePythonCallback("poissonsolver"); + + } else { + +#if defined(WARPX_DIM_1D_Z) + // Use the tridiag solver with 1D + computePhiTriDiagonal(rho_fp, phi_fp); +#else + // Use the AMREX MLMG solver otherwise + computePhi(rho_fp, phi_fp, beta, self_fields_required_precision, + self_fields_absolute_tolerance, self_fields_max_iters, + self_fields_verbosity); +#endif + + } // Compute the electric field. Note that if an EB is used the electric // field will be calculated in the computePhi call. @@ -671,6 +685,190 @@ WarpX::computeB (amrex::Vector, 3> > } } +/* \brief Compute the potential by solving Poisson's equation with + a 1D tridiagonal solve. + + \param[in] rho The charge density a given species + \param[out] phi The potential to be computed by this function +*/ +void +WarpX::computePhiTriDiagonal (const amrex::Vector >& rho, + amrex::Vector >& phi) const +{ + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(max_level == 0, + "The tridiagonal solver cannot be used with mesh refinement"); + + const int lev = 0; + + const amrex::Real* dx = Geom(lev).CellSize(); + const amrex::Real xmin = Geom(lev).ProbLo(0); + const amrex::Real xmax = Geom(lev).ProbHi(0); + const int nx_full_domain = static_cast( (xmax - xmin)/dx[0] + 0.5_rt ); + + int nx_solve_min = 1; + int nx_solve_max = nx_full_domain - 1; + + auto field_boundary_lo0 = WarpX::field_boundary_lo[0]; + auto field_boundary_hi0 = WarpX::field_boundary_hi[0]; + if (field_boundary_lo0 == FieldBoundaryType::None || field_boundary_lo0 == FieldBoundaryType::Periodic) { + // Neumann or periodic boundary condition + // Solve for the point on the lower boundary + nx_solve_min = 0; + } + if (field_boundary_hi0 == FieldBoundaryType::None || field_boundary_hi0 == FieldBoundaryType::Periodic) { + // Neumann or periodic boundary condition + // Solve for the point on the upper boundary + nx_solve_max = nx_full_domain; + } + + // Create a 1-D MultiFab that covers all of x, including guard cells on each end. + // The tridiag solve will be done in this MultiFab and then copied out afterwards. + const amrex::IntVect lo_full_domain(AMREX_D_DECL(-1,0,0)); + const amrex::IntVect hi_full_domain(AMREX_D_DECL(nx_full_domain+1,0,0)); + const amrex::Box box_full_domain(lo_full_domain, hi_full_domain); + const BoxArray ba_full_domain(box_full_domain); + amrex::DistributionMapping dm_full_domain; + amrex::Vector pmap = {0}; // The data will only be on processor 0 + dm_full_domain.define(pmap); + const int ncomps1d = 1; + const amrex::IntVect nguard1d(AMREX_D_DECL(1,0,0)); + const BoxArray ba_full_domain_node = amrex::convert(ba_full_domain, amrex::IntVect::TheNodeVector()); + + // Put the data in the pinned arena since the tridiag solver will be done on the CPU, but have + // the data readily accessible from the GPU. + auto phi1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, ncomps1d, nguard1d, MFInfo().SetArena(The_Pinned_Arena())); + auto zwork1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, ncomps1d, nguard1d, MFInfo().SetArena(The_Pinned_Arena())); + auto rho1d_mf = MultiFab(ba_full_domain_node, dm_full_domain, ncomps1d, nguard1d, MFInfo().SetArena(The_Pinned_Arena())); + + // Copy previous phi to get the boundary values + phi1d_mf.ParallelCopy(*phi[lev], 0, 0, 1, Geom(lev).periodicity()); + rho1d_mf.ParallelCopy(*rho[lev], 0, 0, 1, Geom(lev).periodicity()); + + // Multiplier on the charge density + const amrex::Real norm = dx[0]*dx[0]/PhysConst::ep0; + rho1d_mf.mult(norm); + + // Use the MFIter loop since when parallel, only process zero has a FAB. + // This skips the loop on all other processors. + for (MFIter mfi(phi1d_mf); mfi.isValid(); ++mfi) { + + const auto& phi1d_arr = phi1d_mf[mfi].array(); + const auto& zwork1d_arr = zwork1d_mf[mfi].array(); + const auto& rho1d_arr = rho1d_mf[mfi].array(); + + // The loops are always performed on the CPU + + amrex::Real diag = 2._rt; + + // The initial values depend on the boundary condition + if (field_boundary_lo0 == FieldBoundaryType::PEC) { + + phi1d_arr(1,0,0) = (phi1d_arr(0,0,0) + rho1d_arr(1,0,0))/diag; + + } else if (field_boundary_lo0 == FieldBoundaryType::None) { + + // Neumann boundary condition + phi1d_arr(0,0,0) = rho1d_arr(0,0,0)/diag; + + zwork1d_arr(1,0,0) = 2._rt/diag; + diag = 2._rt - zwork1d_arr(1,0,0); + phi1d_arr(1,0,0) = (rho1d_arr(1,0,0) - (-1._rt)*phi1d_arr(1-1,0,0))/diag; + + } else if (field_boundary_lo0 == FieldBoundaryType::Periodic) { + + phi1d_arr(0,0,0) = rho1d_arr(0,0,0)/diag; + + zwork1d_arr(1,0,0) = 1._rt/diag; + diag = 2._rt - zwork1d_arr(1,0,0); + phi1d_arr(1,0,0) = (rho1d_arr(1,0,0) - (-1._rt)*phi1d_arr(1-1,0,0))/diag; + + } + + // Loop upward, calculating the Gaussian elimination multipliers and right hand sides + for (int i_up = 2 ; i_up < nx_solve_max ; i_up++) { + + zwork1d_arr(i_up,0,0) = 1._rt/diag; + diag = 2._rt - zwork1d_arr(i_up,0,0); + phi1d_arr(i_up,0,0) = (rho1d_arr(i_up,0,0) - (-1._rt)*phi1d_arr(i_up-1,0,0))/diag; + + } + + // The last value depend on the boundary condition + int const imax = nx_solve_max; + amrex::Real zwork_product = 1.; // Needed for parallel boundaries + if (field_boundary_hi0 == FieldBoundaryType::PEC) { + + zwork1d_arr(imax,0,0) = 1._rt/diag; + diag = 2._rt - zwork1d_arr(imax,0,0); + phi1d_arr(imax,0,0) = (phi1d_arr(imax+1,0,0) + rho1d_arr(imax,0,0) - (-1._rt)*phi1d_arr(imax-1,0,0))/diag; + + } else if (field_boundary_hi0 == FieldBoundaryType::None) { + + // Neumann boundary condition + zwork1d_arr(imax,0,0) = 1._rt/diag; + diag = 2._rt - 2._rt*zwork1d_arr(imax,0,0); + if (diag == 0._rt) { + // This happens if the lower boundary is also Neumann. + // It this case, the potential is relative to an arbitrary constant, + // so set the upper boundary to zero to force a value. + phi1d_arr(imax,0,0) = 0.; + } else { + phi1d_arr(imax,0,0) = (rho1d_arr(imax,0,0) - (-1._rt)*phi1d_arr(imax-1,0,0))/diag; + } + + } else if (field_boundary_hi0 == FieldBoundaryType::Periodic) { + + zwork1d_arr(imax,0,0) = 1._rt/diag; + + for (int i = 1 ; i <= nx_solve_max ; i++) { + zwork_product *= zwork1d_arr(i,0,0); + } + + diag = 2._rt - zwork1d_arr(imax,0,0) - zwork_product; + phi1d_arr(imax,0,0) = (rho1d_arr(imax,0,0) - (-1._rt)*phi1d_arr(imax-1,0,0))/diag; + + } + + // Loop downward to calculate the phi + if (field_boundary_lo0 == FieldBoundaryType::Periodic) { + + // With periodic, the right hand column adds an extra term for all rows + for (int i_down = nx_solve_max-1 ; i_down >= nx_solve_min ; i_down--) { + zwork_product /= zwork1d_arr(i_down+1,0,0); + phi1d_arr(i_down,0,0) = phi1d_arr(i_down,0,0) + zwork1d_arr(i_down+1,0,0)*phi1d_arr(i_down+1,0,0) + zwork_product*phi1d_arr(imax,0,0); + } + + } else { + + for (int i_down = nx_solve_max-1 ; i_down >= nx_solve_min ; i_down--) { + phi1d_arr(i_down,0,0) = phi1d_arr(i_down,0,0) + zwork1d_arr(i_down+1,0,0)*phi1d_arr(i_down+1,0,0); + } + + } + + // Set the value in the guard cells + // The periodic case is handled in the ParallelCopy below + if (field_boundary_lo0 == FieldBoundaryType::PEC) { + phi1d_arr(-1,0,0) = phi1d_arr(0,0,0); + } else if (field_boundary_lo0 == FieldBoundaryType::None) { + phi1d_arr(-1,0,0) = phi1d_arr(1,0,0); + } + + if (field_boundary_hi0 == FieldBoundaryType::PEC) { + phi1d_arr(nx_full_domain+1,0,0) = phi1d_arr(nx_full_domain,0,0); + } else if (field_boundary_hi0 == FieldBoundaryType::None) { + phi1d_arr(nx_full_domain+1,0,0) = phi1d_arr(nx_full_domain-1,0,0); + } + + } + + // Copy phi1d to phi, including the x guard cell + const IntVect xghost(AMREX_D_DECL(1,0,0)); + phi[lev]->ParallelCopy(phi1d_mf, 0, 0, 1, xghost, xghost, Geom(lev).periodicity()); + +} + void ElectrostaticSolver::PoissonBoundaryHandler::definePhiBCs ( ) { int dim_start = 0; diff --git a/Source/WarpX.H b/Source/WarpX.H index 5c143a1611c..d4acf916e39 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -808,6 +808,8 @@ public: void computeB (amrex::Vector, 3> >& B, const amrex::Vector >& phi, std::array const beta = {{0,0,0}} ) const; + void computePhiTriDiagonal (const amrex::Vector >& rho, + amrex::Vector >& phi) const; /** * \brief From e7c33be580f9d277df86372bd21158151ea47a9d Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Sun, 2 Oct 2022 14:20:16 -0700 Subject: [PATCH 0088/1346] Docs: BELLA MVA PoP & Ion PRAB Published (#3435) * Docs: BELLA MVA PoP Published Update the science hightlights with the final, published paper. * Levy et al. PRAB published, too --- Docs/source/highlights.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index 0fbb3d549ce..98f92a24b45 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -26,7 +26,7 @@ Scientific works in laser-plasma and beam-plasma acceleration. #. Mirani F, Calzolari D, Formenti A, Passoni M. **Superintense laser-driven photon activation analysis**. - Nature Communications Physics volume **4**.185, 2021 + Nature Communications Physics volume **4**.185, 2021. `DOI:10.1038/s42005-021-00685-2 `__ @@ -37,12 +37,13 @@ Scientific works in laser-ion acceleration and laser-matter interaction. #. Hakimi S, Obst-Huebl L, Huebl A, Nakamura K, Bulanov SS, Steinke S, Leemans WP, Kober Z, Ostermayr TM, Schenkel T, Gonsalves AJ, Vay J-L, Tilborg Jv, Toth C, Schroeder CB, Esarey E, Geddes CGR. **Laser-solid interaction studies enabled by the new capabilities of the iP2 BELLA PW beamline**. - under review, 2022 + Physics of Plasmas **29**, 083102, 2022. + `DOI:10.1063/5.0089331 `__ -#. Levy D, Andriyash IA, Haessler S, Ouille M, Kaur J, Flacco A, Kroupp E, Malka V, Lopez-Martens R. +#. Levy D, Andriyash IA, Haessler S, Kaur J, Ouille M, Flacco A, Kroupp E, Malka V, Lopez-Martens R. **Low-divergence MeV-class proton beams from kHz-driven laser-solid interactions**. - *preprint*. under review, 2021. - `arXiv:2112.12581 `__ + Phys. Rev. Accel. Beams **25**, 093402, 2022. + `DOI:10.1103/PhysRevAccelBeams.25.093402 `__ Particle Accelerator & Beam Physics From 87d48519e0db55c81eeeced5fe80612e6cb1fcf5 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Sun, 2 Oct 2022 14:20:56 -0700 Subject: [PATCH 0089/1346] Lassen (LLNL): HDF5 1.12.2 (#3378) The HDF5 1.10.4 module on Lassen (LLNL) has severe performance issues. We saw that 1.10.5+ fixed those. We now have a new module, 1.12.2, that we can use. --- Tools/machines/lassen-llnl/lassen_warpx.profile.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Tools/machines/lassen-llnl/lassen_warpx.profile.example b/Tools/machines/lassen-llnl/lassen_warpx.profile.example index 293b60258d6..61e8f971689 100644 --- a/Tools/machines/lassen-llnl/lassen_warpx.profile.example +++ b/Tools/machines/lassen-llnl/lassen_warpx.profile.example @@ -13,7 +13,7 @@ module load fftw/3.3.8 module load boost/1.70.0 # optional: for openPMD support -module load hdf5-parallel/1.10.4 +module load hdf5-parallel/1.12.2 export CMAKE_PREFIX_PATH=$HOME/sw/lassen/c-blosc-1.21.1:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=$HOME/sw/lassen/adios2-2.7.1:$CMAKE_PREFIX_PATH export LD_LIBRARY_PATH=$HOME/sw/lassen/c-blosc-1.21.1/lib64:$LD_LIBRARY_PATH From 3d0f943371ed67cf2414afc5eed84e06d1a1beb7 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Sun, 2 Oct 2022 14:22:28 -0700 Subject: [PATCH 0090/1346] Sphinx Extension: Sphinx-Design (#3361) --- Docs/requirements.txt | 1 + Docs/source/conf.py | 1 + Docs/spack.yaml | 1 + 3 files changed, 3 insertions(+) diff --git a/Docs/requirements.txt b/Docs/requirements.txt index 1eedc4e08f2..a41632f3ca1 100644 --- a/Docs/requirements.txt +++ b/Docs/requirements.txt @@ -17,4 +17,5 @@ picmistandard==0.0.19 pygments recommonmark sphinx>=2.0 +sphinx-design sphinx_rtd_theme>=0.3.1 diff --git a/Docs/source/conf.py b/Docs/source/conf.py index c8944627df4..e00ae479af4 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -46,6 +46,7 @@ extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', + 'sphinx_design', 'breathe' ] diff --git a/Docs/spack.yaml b/Docs/spack.yaml index 434d0820929..1e953943610 100644 --- a/Docs/spack.yaml +++ b/Docs/spack.yaml @@ -23,4 +23,5 @@ spack: - py-breathe - py-recommonmark - py-pygments + - py-sphinx-design - py-sphinx-rtd-theme From 45ec9e3550755f375a6564ae03b90bc323e15abc Mon Sep 17 00:00:00 2001 From: David Grote Date: Sun, 2 Oct 2022 14:23:46 -0700 Subject: [PATCH 0091/1346] Major update of the Python/picmi documentation (#3329) * Include WarpX specific documentation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Undo the precommit change of import in __init__.py * Docs: Install Pure Python WarpX PICMI Allows to use these classes in our Sphinx autodocs in the manual. * Sphinx: Add Napoleon Extension https://sphinxcontrib-napoleon.readthedocs.io * PICMI Doc Strings: Fix Some Warnings * Removed the DocumentedMetaClass since it was moved to picmistandard * Defined the languate in Docs/source/conf.py * Cleaned up minor issues in the rst files * Updated existing doc strings in picmi.py * Reformatted doc strings in Regression/Checksum/checksumAPI.py * Reformatted doc strings in Regression/Checksum * First set of WarpX specific documentation * Updated to picmistandard version 0.0.20 * Cleaned up the Python document page Fixed up the text to be more clear. Added section headers for each picmi class (for easy reference). * Further updates to picmi.py doc strings Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Axel Huebl --- Docs/requirements.txt | 8 +- Docs/source/conf.py | 3 +- Docs/source/developers/documentation.rst | 4 +- Docs/source/developers/particles.rst | 4 +- Docs/source/developers/profiling.rst | 2 +- Docs/source/developers/testing.rst | 2 +- Docs/source/highlights.rst | 2 +- Docs/source/usage/python.rst | 162 +++++-- Python/pywarpx/__init__.py | 3 + Python/pywarpx/picmi.py | 558 +++++++++++++++++++++-- Python/setup.py | 2 +- Regression/Checksum/benchmark.py | 37 +- Regression/Checksum/checksum.py | 60 ++- Regression/Checksum/checksumAPI.py | 72 ++- requirements.txt | 2 +- 15 files changed, 776 insertions(+), 145 deletions(-) diff --git a/Docs/requirements.txt b/Docs/requirements.txt index a41632f3ca1..3da427f88b4 100644 --- a/Docs/requirements.txt +++ b/Docs/requirements.txt @@ -4,6 +4,8 @@ # # License: BSD-3-Clause-LBNL +# WarpX PICMI bindings w/o C++ component (used for autoclass docs) +-e ../Python breathe # docutils 0.17 breaks HTML tags & RTD theme # https://github.com/sphinx-doc/sphinx/issues/9001 @@ -11,11 +13,13 @@ docutils<=0.16 # PICMI API docs # note: keep in sync with version in ../requirements.txt -picmistandard==0.0.19 +picmistandard==0.0.20 # for development against an unreleased PICMI version, use: -#picmistandard @ git+https://github.com/picmi-standard/picmi.git#subdirectory=PICMI_Python +# picmistandard @ git+https://github.com/picmi-standard/picmi.git#subdirectory=PICMI_Python + pygments recommonmark sphinx>=2.0 sphinx-design sphinx_rtd_theme>=0.3.1 +sphinxcontrib-napoleon diff --git a/Docs/source/conf.py b/Docs/source/conf.py index e00ae479af4..a726152064a 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -45,6 +45,7 @@ # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', + 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'sphinx_design', 'breathe' @@ -81,7 +82,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. diff --git a/Docs/source/developers/documentation.rst b/Docs/source/developers/documentation.rst index e8cad8e1b7d..f7929eb9dd1 100644 --- a/Docs/source/developers/documentation.rst +++ b/Docs/source/developers/documentation.rst @@ -41,9 +41,11 @@ Breathe documentation --------------------- Your Doxygen documentation is not only useful for people looking into the code, it is also part of the `WarpX online documentation `_ based on `Sphinx `_! -This is done using the Python module `Breathe `_, that allows you to read Doxygen documentation dorectly in the source and include it in your Sphinx documentation, by calling Breathe functions. +This is done using the Python module `Breathe `_, that allows you to write Doxygen documentation directly in the source and have it included it in your Sphinx documentation, by calling Breathe functions. For instance, the following line will get the Doxygen documentation for ``WarpXParticleContainer`` in ``Source/Particles/WarpXParticleContainer.H`` and include it to the html page generated by Sphinx: +.. code-block:: rst + .. doxygenclass:: WarpXParticleContainer Building the documentation diff --git a/Docs/source/developers/particles.rst b/Docs/source/developers/particles.rst index ef5ae3f13f4..988313079d4 100644 --- a/Docs/source/developers/particles.rst +++ b/Docs/source/developers/particles.rst @@ -85,11 +85,9 @@ On a loop over particles it can be useful to access the fields on the box we are Main functions -------------- -.. doxygenfunction:: PhysicalParticleContainer::FieldGather - .. doxygenfunction:: PhysicalParticleContainer::PushPX -.. doxygenfunction:: WarpXParticleContainer::DepositCurrent +.. doxygenfunction:: WarpXParticleContainer::DepositCurrent(amrex::Vector, 3>> &J, const amrex::Real dt, const amrex::Real relative_time) .. note:: The current deposition is used both by ``PhysicalParticleContainer`` and ``LaserParticleContainer``, so it is in the parent class ``WarpXParticleContainer``. diff --git a/Docs/source/developers/profiling.rst b/Docs/source/developers/profiling.rst index cfc0d2ad4ea..69933a0b423 100644 --- a/Docs/source/developers/profiling.rst +++ b/Docs/source/developers/profiling.rst @@ -66,7 +66,7 @@ behavior of *each* individual MPI rank. The workflow for doing so is the followi cmake -S . -B build -DAMReX_BASE_PROFILE=OFF -DAMReX_TINY_PROFILE=ON - Run the simulation to be profiled. Note that the WarpX executable will create -and new folder `bl_prof`, which contains the profiling data. + a new folder `bl_prof`, which contains the profiling data. .. note:: diff --git a/Docs/source/developers/testing.rst b/Docs/source/developers/testing.rst index 76259389165..06a4cf1ccb9 100644 --- a/Docs/source/developers/testing.rst +++ b/Docs/source/developers/testing.rst @@ -25,7 +25,7 @@ For example, if you like to change the compiler to compilation to build on Nvidi branch = development cmakeSetupOpts = -DAMReX_ASSERTIONS=ON -DAMReX_TESTING=ON -DWarpX_COMPUTE=CUDA -We also support changing compilation options :ref:`via the usual build enviroment variables `__. +We also support changing compilation options via the usual :ref:`build enviroment variables `. For instance, compiling with ``clang++ -Werror`` would be: .. code-block:: sh diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index 98f92a24b45..49a7b1122ca 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -5,7 +5,7 @@ Science Highlights WarpX can be used in many domains of laser-plasma science, plasma physics, accelerator physics and beyond. Below, we collect a series of scientific publications that used WarpX. -Please :ref:`acknowledge WarpX in your works`, so we can find your works. +Please :ref:`acknowledge WarpX in your works `, so we can find your works. Is your publication missing? :ref:`Contact us ` or edit this page via a pull request. diff --git a/Docs/source/usage/python.rst b/Docs/source/usage/python.rst index 16836c75418..afd8c5c0cac 100644 --- a/Docs/source/usage/python.rst +++ b/Docs/source/usage/python.rst @@ -7,38 +7,25 @@ WarpX uses the `PICMI standard `__ for Python version 3.7 or newer is required. Example input files can be found in :ref:`the examples section `. -The examples support running in both modes by commenting and uncommenting the appropriate lines. +In the input file, instances of classes are created defining the various aspects of the simulation. +The `Simulation` object is the central object, where the instances are passed, +defining the simulation time, field solver, registered species, etc. .. _usage-picmi-parameters: -Parameters +Classes ---------- Simulation and grid setup ^^^^^^^^^^^^^^^^^^^^^^^^^ -The `Simulation` object is the central object in a PICMI script. -It defines the simulation time, field solver, registered species, etc. - -.. autoclass:: picmistandard.PICMI_Simulation +Simulation +"""""""""" +.. autoclass:: pywarpx.picmi.Simulation :members: step, add_species, add_laser, write_input_file -Field solvers define the updates of electric and magnetic fields. - -.. autoclass:: picmistandard.PICMI_ElectromagneticSolver - -.. autoclass:: picmistandard.PICMI_ElectrostaticSolver - -Grid define the geometry and discretization. - -.. autoclass:: picmistandard.PICMI_Cartesian3DGrid - -.. autoclass:: picmistandard.PICMI_Cartesian2DGrid - -.. autoclass:: picmistandard.PICMI_Cartesian1DGrid - -.. autoclass:: picmistandard.PICMI_CylindricalGrid - +Constants +""""""""" For convenience, the PICMI interface defines the following constants, which can be used directly inside any PICMI script. The values are in SI units. @@ -49,27 +36,83 @@ which can be used directly inside any PICMI script. The values are in SI units. - ``picmi.constants.m_e``: The electron mass - ``picmi.constants.m_p``: The proton mass -Additionally to self-consistent fields from the field solver, external fields can be applied. +Field solvers define the updates of electric and magnetic fields. + +ElectromagneticSolver +""""""""""""""""""""" +.. autoclass:: pywarpx.picmi.ElectromagneticSolver -.. autoclass:: picmistandard.PICMI_ConstantAppliedField +ElectrostaticSolver +""""""""""""""""""" +.. autoclass:: pywarpx.picmi.ElectrostaticSolver -.. autoclass:: picmistandard.PICMI_AnalyticAppliedField +Cartesian3DGrid +""""""""""""""" +.. autoclass:: pywarpx.picmi.Cartesian3DGrid -.. autoclass:: picmistandard.PICMI_Mirror +Cartesian2DGrid +""""""""""""""" +.. autoclass:: pywarpx.picmi.Cartesian2DGrid -Diagnostics can be used to output data. +Cartesian1DGrid +""""""""""""""" +.. autoclass:: pywarpx.picmi.Cartesian1DGrid -.. autoclass:: picmistandard.PICMI_ParticleDiagnostic +CylindricalGrid +""""""""""""""" +.. autoclass:: pywarpx.picmi.CylindricalGrid -.. autoclass:: picmistandard.PICMI_FieldDiagnostic +EmbeddedBoundary +"""""""""""""""" +.. autoclass:: pywarpx.picmi.EmbeddedBoundary -.. autoclass:: picmistandard.PICMI_ElectrostaticFieldDiagnostic +Applied fields +^^^^^^^^^^^^^^ + +ConstantAppliedField +"""""""""""""""""""" +.. autoclass:: pywarpx.picmi.ConstantAppliedField + +AnalyticAppliedField +"""""""""""""""""""" +.. autoclass:: pywarpx.picmi.AnalyticAppliedField + +PlasmaLens +"""""""""" +.. autoclass:: pywarpx.picmi.PlasmaLens + +Mirror +"""""" +.. autoclass:: pywarpx.picmi.Mirror + +Diagnostics +^^^^^^^^^^^ + +ParticleDiagnostic +"""""""""""""""""" +.. autoclass:: pywarpx.picmi.ParticleDiagnostic + +FieldDiagnostic +""""""""""""""" +.. autoclass:: pywarpx.picmi.FieldDiagnostic + +ElectrostaticFieldDiagnostic +"""""""""""""""""""""""""""" +.. autoclass:: pywarpx.picmi.ElectrostaticFieldDiagnostic Lab-frame diagnostics diagnostics are used when running boosted-frame simulations. -.. autoclass:: picmistandard.PICMI_LabFrameParticleDiagnostic +LabFrameParticleDiagnostic +"""""""""""""""""""""""""" +.. autoclass:: pywarpx.picmi.LabFrameParticleDiagnostic + +LabFrameFieldDiagnostic +""""""""""""""""""""""" +.. autoclass:: pywarpx.picmi.LabFrameFieldDiagnostic -.. autoclass:: picmistandard.PICMI_LabFrameFieldDiagnostic +Checkpoint +"""""""""" +.. autoclass:: pywarpx.picmi.Checkpoint Particles ^^^^^^^^^ @@ -77,38 +120,70 @@ Particles Species objects are a collection of particles with similar properties. For instance, background plasma electrons, background plasma ions and an externally injected beam could each be their own particle species. -.. autoclass:: picmistandard.PICMI_Species +Species +""""""" +.. autoclass:: pywarpx.picmi.Species -.. autoclass:: picmistandard.PICMI_MultiSpecies +MultiSpecies +"""""""""""" +.. autoclass:: pywarpx.picmi.MultiSpecies Particle distributions can be used for to initialize particles in a particle species. -.. autoclass:: picmistandard.PICMI_GaussianBunchDistribution +GaussianBunchDistribution +""""""""""""""""""""""""" +.. autoclass:: pywarpx.picmi.GaussianBunchDistribution -.. autoclass:: picmistandard.PICMI_UniformDistribution +UniformDistribution +""""""""""""""""""" +.. autoclass:: pywarpx.picmi.UniformDistribution -.. autoclass:: picmistandard.PICMI_AnalyticDistribution +AnalyticDistribution +"""""""""""""""""""" +.. autoclass:: pywarpx.picmi.AnalyticDistribution -.. autoclass:: picmistandard.PICMI_ParticleListDistribution +ParticleListDistribution +"""""""""""""""""""""""" +.. autoclass:: pywarpx.picmi.ParticleListDistribution Particle layouts determine how to microscopically place macro particles in a grid cell. -.. autoclass:: picmistandard.PICMI_GriddedLayout +GriddedLayout +""""""""""""" +.. autoclass:: pywarpx.picmi.GriddedLayout + +PseudoRandomLayout +"""""""""""""""""" +.. autoclass:: pywarpx.picmi.PseudoRandomLayout -.. autoclass:: picmistandard.PICMI_PseudoRandomLayout +Other operations related to particles + +CoulombCollisions +""""""""""""""""" +.. autoclass:: pywarpx.picmi.CoulombCollisions + +MCCCollisions +""""""""""""" +.. autoclass:: pywarpx.picmi.MCCCollisions Lasers ^^^^^^ Laser profiles can be used to initialize laser pulses in the simulation. -.. autoclass:: picmistandard.PICMI_GaussianLaser +GaussianLaser +""""""""""""" +.. autoclass:: pywarpx.picmi.GaussianLaser -.. autoclass:: picmistandard.PICMI_AnalyticLaser +AnalyticLaser +""""""""""""" +.. autoclass:: pywarpx.picmi.AnalyticLaser Laser injectors control where to initialize laser pulses on the simulation grid. -.. autoclass:: picmistandard.PICMI_LaserAntenna +LaserAntenna +"""""""""""" +.. autoclass:: pywarpx.picmi.LaserAntenna .. _usage-picmi-run: @@ -119,6 +194,7 @@ Running WarpX can be run in one of two modes. It can run as a preprocessor, using the Python input file to generate an input file to be used by the C++ version, or it can be run directly from Python. +The examples support running in both modes by commenting and uncommenting the appropriate lines. In either mode, if using a `virtual environment `__, be sure to activate it before compiling and running WarpX. diff --git a/Python/pywarpx/__init__.py b/Python/pywarpx/__init__.py index 5590c7d6fc5..8f49604563c 100644 --- a/Python/pywarpx/__init__.py +++ b/Python/pywarpx/__init__.py @@ -19,3 +19,6 @@ from .Particles import electrons, newspecies, particles, positrons, protons from .WarpX import warpx from ._libwarpx import libwarpx + +# This is a circulor import and must happen after the import of libwarpx +from . import picmi # isort:skip diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index a9a8f80af04..4640a2e2e4d 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -39,7 +39,84 @@ class constants: picmistandard.register_constants(constants) + class Species(picmistandard.PICMI_Species): + """ + See `Input Parameters `_ for more information. + + Parameters + ---------- + warpx_boost_adjust_transverse_positions: bool, default=False + Whether to adjust transverse positions when apply the boost + to the simulation frame + + warpx_self_fields_required_precision: float, default=1.e-11 + Relative precision on the electrostatic solver + (when using the relativistic solver) + + warpx_self_fields_absolute_tolerance: float, default=0. + Absolute precision on the electrostatic solver + (when using the relativistic solver) + + warpx_self_fields_max_iters: integer, default=200 + Maximum number of iterations for the electrostatic + solver for the species + + warpx_self_fields_verbosity: integer, default=2 + Level of verbosity for the electrostatic solver + + warpx_save_previous_position: bool, default=False + Whether to save the old particle positions + + warpx_do_not_deposit: bool, default=False + Whether or not to deposit the charge and current density for + for this species + + warpx_reflection_model_xlo: string, default='0.' + Expression (in terms of the velocity "v") specifying the probability + that the particle will reflect on the lower x boundary + + warpx_reflection_model_xhi: string, default='0.' + Expression (in terms of the velocity "v") specifying the probability + that the particle will reflect on the upper x boundary + + warpx_reflection_model_ylo: string, default='0.' + Expression (in terms of the velocity "v") specifying the probability + that the particle will reflect on the lower y boundary + + warpx_reflection_model_yhi: string, default='0.' + Expression (in terms of the velocity "v") specifying the probability + that the particle will reflect on the upper y boundary + + warpx_reflection_model_zlo: string, default='0.' + Expression (in terms of the velocity "v") specifying the probability + that the particle will reflect on the lower z boundary + + warpx_reflection_model_zhi: string, default='0.' + Expression (in terms of the velocity "v") specifying the probability + that the particle will reflect on the upper z boundary + + warpx_save_particles_at_xlo: bool, default=False + Whether to save particles lost at the lower x boundary + + warpx_save_particles_at_xhi: bool, default=False + Whether to save particles lost at the upper x boundary + + warpx_save_particles_at_ylo: bool, default=False + Whether to save particles lost at the lower y boundary + + warpx_save_particles_at_yhi: bool, default=False + Whether to save particles lost at the upper y boundary + + warpx_save_particles_at_zlo: bool, default=False + Whether to save particles lost at the lower z boundary + + warpx_save_particles_at_zhi: bool, default=False + Whether to save particles lost at the upper z boundary + + warpx_save_particles_at_eb: bool, default=False + Whether to save particles lost at the embedded boundary + """ def init(self, kw): if self.particle_type == 'electron': @@ -404,7 +481,42 @@ def initialize_inputs(self, solver): class CylindricalGrid(picmistandard.PICMI_CylindricalGrid): - """This assumes that WarpX was compiled with USE_RZ = TRUE + """ + This assumes that WarpX was compiled with USE_RZ = TRUE + + See `Input Parameters `_ for more information. + + Parameters + --------- + warpx_max_grid_size: integer, default=32 + Maximum block size in either direction + + warpx_max_grid_size_x: integer, optional + Maximum block size in radial direction + + warpx_max_grid_size_y: integer, optional + Maximum block size in longitudinal direction + + warpx_blocking_factor: integer, optional + Blocking factor (which controls the block size) + + warpx_blocking_factor_x: integer, optional + Blocking factor (which controls the block size) in the radial direction + + warpx_blocking_factor_y: integer, optional + Blocking factor (which controls the block size) in the longitudinal direction + + warpx_potential_lo_r: float, default=0. + Electrostatic potential on the lower radial boundary + + warpx_potential_hi_r: float, default=0. + Electrostatic potential on the upper radial boundary + + warpx_potential_lo_z: float, default=0. + Electrostatic potential on the lower longitudinal boundary + + warpx_potential_hi_z: float, default=0. + Electrostatic potential on the upper longitudinal boundary """ def init(self, kw): self.max_grid_size = kw.pop('warpx_max_grid_size', 32) @@ -472,6 +584,29 @@ def initialize_inputs(self): class Cartesian1DGrid(picmistandard.PICMI_Cartesian1DGrid): + """ + See `Input Parameters `_ for more information. + + Parameters + --------- + warpx_max_grid_size: integer, default=32 + Maximum block size in either direction + + warpx_max_grid_size_x: integer, optional + Maximum block size in longitudinal direction + + warpx_blocking_factor: integer, optional + Blocking factor (which controls the block size) + + warpx_blocking_factor_x: integer, optional + Blocking factor (which controls the block size) in the longitudinal direction + + warpx_potential_lo_z: float, default=0. + Electrostatic potential on the lower longitudinal boundary + + warpx_potential_hi_z: float, default=0. + Electrostatic potential on the upper longitudinal boundary + """ def init(self, kw): self.max_grid_size = kw.pop('warpx_max_grid_size', 32) self.max_grid_size_x = kw.pop('warpx_max_grid_size_x', None) @@ -525,6 +660,41 @@ def initialize_inputs(self): pywarpx.amr.max_level = 0 class Cartesian2DGrid(picmistandard.PICMI_Cartesian2DGrid): + """ + See `Input Parameters `_ for more information. + + Parameters + --------- + warpx_max_grid_size: integer, default=32 + Maximum block size in either direction + + warpx_max_grid_size_x: integer, optional + Maximum block size in x direction + + warpx_max_grid_size_y: integer, optional + Maximum block size in z direction + + warpx_blocking_factor: integer, optional + Blocking factor (which controls the block size) + + warpx_blocking_factor_x: integer, optional + Blocking factor (which controls the block size) in the x direction + + warpx_blocking_factor_y: integer, optional + Blocking factor (which controls the block size) in the z direction + + warpx_potential_lo_x: float, default=0. + Electrostatic potential on the lower x boundary + + warpx_potential_hi_x: float, default=0. + Electrostatic potential on the upper x boundary + + warpx_potential_lo_z: float, default=0. + Electrostatic potential on the lower z boundary + + warpx_potential_hi_z: float, default=0. + Electrostatic potential on the upper z boundary + """ def init(self, kw): self.max_grid_size = kw.pop('warpx_max_grid_size', 32) self.max_grid_size_x = kw.pop('warpx_max_grid_size_x', None) @@ -586,6 +756,53 @@ def initialize_inputs(self): class Cartesian3DGrid(picmistandard.PICMI_Cartesian3DGrid): + """ + See `Input Parameters `_ for more information. + + Parameters + --------- + warpx_max_grid_size: integer, default=32 + Maximum block size in either direction + + warpx_max_grid_size_x: integer, optional + Maximum block size in x direction + + warpx_max_grid_size_y: integer, optional + Maximum block size in z direction + + warpx_max_grid_size_z: integer, optional + Maximum block size in z direction + + warpx_blocking_factor: integer, optional + Blocking factor (which controls the block size) + + warpx_blocking_factor_x: integer, optional + Blocking factor (which controls the block size) in the x direction + + warpx_blocking_factor_y: integer, optional + Blocking factor (which controls the block size) in the z direction + + warpx_blocking_factor_z: integer, optional + Blocking factor (which controls the block size) in the z direction + + warpx_potential_lo_x: float, default=0. + Electrostatic potential on the lower x boundary + + warpx_potential_hi_x: float, default=0. + Electrostatic potential on the upper x boundary + + warpx_potential_lo_y: float, default=0. + Electrostatic potential on the lower z boundary + + warpx_potential_hi_y: float, default=0. + Electrostatic potential on the upper z boundary + + warpx_potential_lo_z: float, default=0. + Electrostatic potential on the lower z boundary + + warpx_potential_hi_z: float, default=0. + Electrostatic potential on the upper z boundary + """ def init(self, kw): self.max_grid_size = kw.pop('warpx_max_grid_size', 32) self.max_grid_size_x = kw.pop('warpx_max_grid_size_x', None) @@ -653,6 +870,38 @@ def initialize_inputs(self): pywarpx.amr.max_level = 0 class ElectromagneticSolver(picmistandard.PICMI_ElectromagneticSolver): + """ + See `Input Parameters `_ for more information. + + Parameters + ---------- + warpx_pml_ncell: integer, optional + The depth of the PML, in number of cells + + warpx_periodic_single_box_fft: bool, default=False + Whether to do the spectral solver FFTs assuming a single + simulation block + + warpx_current_correction: bool, default=True + Whether to do the current correction for the spectral solver. + See documentation for exceptions to the default value. + + warpx_psatd_update_with_rho: bool, optional + Whether to update with the actual rho for the spectral solver + + warpx_psatd_do_time_averaging: bool, optional + Whether to do the time averaging for the spectral solver + + warpx_do_pml_in_domain: bool, default=False + Whether to do the PML boundaries within the domain (versus + in the guard cells) + + warpx_pml_has_particles: bool, default=False + Whether to allow particles in the PML region + + warpx_do_pml_j_damping: bool, default=False + Whether to do damping of J in the PML + """ def init(self, kw): assert self.method is None or self.method in ['Yee', 'CKC', 'PSATD', 'ECT'], Exception("Only 'Yee', 'CKC', 'PSATD', and 'ECT' are supported") @@ -718,6 +967,20 @@ def initialize_inputs(self): pywarpx.warpx.do_pml_j_damping = self.do_pml_j_damping class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): + """ + See `Input Parameters `_ for more information. + + Parameters + ---------- + warpx_relativistic: bool, default=False + Whether to use the relativistic solver or lab frame solver + + warpx_absolute_tolerance: float, default=0. + Absolute tolerance on the lab fram solver + + warpx_self_fields_verbosity: integer, default=2 + Level of verbosity for the lab frame solver + """ def init(self, kw): self.relativistic = kw.pop('warpx_relativistic', False) self.absolute_tolerance = kw.pop('warpx_absolute_tolerance', None) @@ -862,10 +1125,26 @@ def initialize_inputs(self): class CoulombCollisions(picmistandard.base._ClassWithInit): - """Custom class to handle setup of binary Coulmb collisions in WarpX. If + """ + Custom class to handle setup of binary Coulmb collisions in WarpX. If collision initialization is added to picmistandard this can be changed to - inherit that functionality.""" + inherit that functionality. + + Parameters + ---------- + name: string + Name of instance (used in the inputs file) + species: list of species instances + The species involved in the collision. Must be of length 2. + + CoulombLog: float, optional + Value of the Coulomb log to use in the collision cross section. + If not supplied, it is calculated from the local conditions. + + ndt: integer, optional + The collisions will be applied every "ndt" steps. Must be 1 or larger. + """ def __init__(self, name, species, CoulombLog=None, ndt=None, **kw): self.name = name self.species = species @@ -883,9 +1162,35 @@ def initialize_inputs(self): class MCCCollisions(picmistandard.base._ClassWithInit): - """Custom class to handle setup of MCC collisions in WarpX. If collision + """ + Custom class to handle setup of MCC collisions in WarpX. If collision initialization is added to picmistandard this can be changed to inherit - that functionality.""" + that functionality. + + Parameters + ---------- + name: string + Name of instance (used in the inputs file) + + species: species instance + The species involved in the collision + + background_density: float + The density of the background + + background_temperature: float + The temperature of the background + + scattering_processes: dictionary + The scattering process to use and any needed information + + background_mass: float, optional + The mass of the background particle. If not supplied, the default depends + on the type of scattering process. + + ndt: integer, optional + The collisions will be applied every "ndt" steps. Must be 1 or larger. + """ def __init__(self, name, species, background_density, background_temperature, scattering_processes, @@ -924,14 +1229,31 @@ class EmbeddedBoundary(picmistandard.base._ClassWithInit): changed to inherit that functionality. The geometry can be specified either as an implicit function or as an STL file (ASCII or binary). In the latter case the geometry specified in the STL file can be scaled, translated and inverted. - - implicit_function: Analytic expression describing the embedded boundary - - stl_file: STL file path (string), file contains the embedded boundary geometry - - stl_scale: factor by which the STL geometry is scaled (pure number) - - stl_center: vector by which the STL geometry is translated (in meters) - - stl_reverse_normal: if True inverts the orientation of the STL geometry - - potential: Analytic expression defining the potential. Can only be specified - when the solver is electrostatic. Optional, defaults to 0. - Parameters used in the expressions should be given as additional keyword arguments. + + Parameters + ---------- + implicit_function: string + Analytic expression describing the embedded boundary + + stl_file: string + STL file path (string), file contains the embedded boundary geometry + + stl_scale: float + Factor by which the STL geometry is scaled + + stl_center: vector of floats + Vector by which the STL geometry is translated (in meters) + + stl_reverse_normal: bool + If True inverts the orientation of the STL geometry + + potential: string, default=0. + Analytic expression defining the potential. Can only be specified + when the solver is electrostatic. + + + Parameters used in the analytic expressions should be given as additional keyword arguments. + """ def __init__(self, implicit_function=None, stl_file=None, stl_scale=None, stl_center=None, stl_reverse_normal=False, potential=None, **kw): @@ -990,11 +1312,36 @@ def initialize_inputs(self, solver): class PlasmaLens(picmistandard.base._ClassWithInit): """ Custom class to setup a plasma lens lattice. - The applied fields are dependent on the transverse position - - Ex = x*stengths_E - - Ey = y*stengths_E - - Bx = +y*stengths_B - - By = -x*stengths_B + The applied fields are dependent only on the transverse position. + + Parameters + ---------- + period: float + Periodicity of the lattice (in lab frame, in meters) + + starts: list of floats + The start of each lens relative to the periodic repeat + + lengths: list of floats + The length of each lens + + strengths_E=None: list of floats, default = 0. + The electric field strength of each lens + + strengths_B=None: list of floats, default = 0. + The magnetic field strength of each lens + + + The field that is applied depends on the transverse position of the particle, (x,y) + + - Ex = x*stengths_E + + - Ey = y*stengths_E + + - Bx = +y*stengths_B + + - By = -x*stengths_B + """ def __init__(self, period, starts, lengths, strengths_E=None, strengths_B=None, **kw): self.period = period @@ -1020,6 +1367,78 @@ def initialize_inputs(self): class Simulation(picmistandard.PICMI_Simulation): + """ + See `Input Parameters `_ for more information. + + Parameters + ---------- + warpx_current_deposition_algo: {'direct', 'esirkepov', and 'vay'}, optional + Current deposition algorithm. The default depends on conditions. + + warpx_charge_deposition_algo: {'standard'}, optional + Charge deposition algorithm. + + warpx_field_gathering_algo: {'energy-conserving', 'momentum-conserving'}, optional + Field gathering algorithm. The default depends on conditions. + + warpx_particle_pusher_algo: {'boris', 'vay', 'higuera'}, default='boris' + Particle pushing algorithm. + + warpx_use_filter: bool, optional + Whether to use filtering. The default depends on the conditions. + + warpx_serialize_initial_conditions: bool, default=False + Controls the random numbers used for initialization. + This parameter should only be used for testing and continuous integration. + + warpx_do_dynamic_scheduling: bool, default=True + Whether to do dynamic scheduling with OpenMP + + warpx_load_balance_intervals: string, default='0' + The intervals for doing load balancing + + warpx_load_balance_efficiency_ratio_threshold: float, default=1.1 + (See documentation) + + warpx_load_balance_with_sfc: bool, default=0 + (See documentation) + + warpx_load_balance_knapsack_factor: float, default=1.24 + (See documentation) + + warpx_load_balance_costs_update: {'heuristic' or 'timers' or 'gpuclock'}, optional + (See documentation) + + warpx_costs_heuristic_particles_wt: float, optional + (See documentation) + + warpx_costs_heuristic_cells_wt: float, optional + (See documentation) + + warpx_use_fdtd_nci_corr: bool, optional + Whether to use the NCI correction when using the FDTD solver + + warpx_amr_check_input: bool, optional + Whether AMReX should perform checks on the input + (primarily related to the max grid size and blocking factors) + + warpx_amr_restart: string, optional + The name of the restart to use + + warpx_zmax_plasma_to_compute_max_step: float, optional + Sets the simulation run time based on the maximum z value + + warpx_collisions: collision instance, optional + The collision instance specifying the particle collisions + + warpx_embedded_boundary: embedded boundary instance, optional + + warpx_break_signals: list of strings + Signals on which to break + + warpx_checkpoint_signals: list of strings + Signals on which to write out a checkpoint + """ # Set the C++ WarpX interface (see _libwarpx.LibWarpX) as an extension to # Simulation objects. In the future, LibWarpX objects may actually be owned @@ -1178,6 +1597,32 @@ def finalize(self): class FieldDiagnostic(picmistandard.PICMI_FieldDiagnostic): + """ + See `Input Parameters `_ for more information. + + Parameters + ---------- + warpx_plot_raw_fields: bool, optional + Flag whether to dump the raw fields + + warpx_plot_raw_fields_guards: bool, optional + Flag whether the raw fields should include the guard cells + + warpx_format: {plotfile, checkpoint, openpmd, ascent, sensei}, optional + Diagnostic file format + + warpx_openpmd_backend: {bp, h5, json}, optional + Openpmd backend file format + + warpx_file_prefix: string, optional + Prefix on the diagnostic file name + + warpx_file_min_digits: integer, optional + Minimum number of digits for the time step number in the file name + + warpx_dump_rz_modes: bool, optional + Flag whether to dump the data for all RZ modes + """ def init(self, kw): self.plot_raw_fields = kw.pop('warpx_plot_raw_fields', None) @@ -1274,6 +1719,20 @@ def initialize_inputs(self): class Checkpoint(picmistandard.base._ClassWithInit): + """ + Sets up checkpointing of the simulation, allowing for later restarts + + See `Input Parameters `_ for more information. + + Parameters + ---------- + warpx_file_prefix: string + The prefix to the checkpoint directory names + + warpx_file_min_digits: integer + Minimum number of digits for the time step number in the checkpoint + directory name. + """ def __init__(self, period = 1, write_dir = None, name = None, **kw): @@ -1307,6 +1766,32 @@ def initialize_inputs(self): self.diagnostic.file_prefix = os.path.join(write_dir, file_prefix) class ParticleDiagnostic(picmistandard.PICMI_ParticleDiagnostic): + """ + See `Input Parameters `_ for more information. + + Parameters + ---------- + warpx_format: {plotfile, checkpoint, openpmd, ascent, sensei}, optional + Diagnostic file format + + warpx_openpmd_backend: {bp, h5, json}, optional + Openpmd backend file format + + warpx_file_prefix: string, optional + Prefix on the diagnostic file name + + warpx_file_min_digits: integer, optional + Minimum number of digits for the time step number in the file name + + warpx_random_fraction: float, optional + Random fraction of particles to include in the diagnostic + + warpx_uniform_stride: integer, optional + Stride to down select to the particles to include in the diagnostic + + warpx_plot_filter_function: string, optional + Analytic expression to down select the particles to in the diagnostic + """ def init(self, kw): self.format = kw.pop('warpx_format', 'plotfile') @@ -1407,17 +1892,34 @@ def initialize_inputs(self): class LabFrameFieldDiagnostic(picmistandard.PICMI_LabFrameFieldDiagnostic): """ - Warp specific arguments: - - warpx_new_BTD: Use the new BTD diagnostics - - warpx_format: Passed to .format - - warpx_openpmd_backend: Passed to .openpmd_backend - - warpx_file_prefix: Passed to .file_prefix - - warpx_file_min_digits: Passed to .file_min_digits - - warpx_buffer_size: Passed to .buffer_size - - warpx_lower_bound: Passed to .lower_bound - - warpx_upper_bound: Passed to .upper_bound + See `Input Parameters `_ for more information. + + Parameters + ---------- + warpx_new_BTD: bool, optional + Use the new BTD diagnostics + + warpx_format: string, optional + Passed to .format + + warpx_openpmd_backend: string, optional + Passed to .openpmd_backend + + warpx_file_prefix: string, optional + Passed to .file_prefix + + warpx_file_min_digits: integer, optional + Passed to .file_min_digits + + warpx_buffer_size: integer, optional + Passed to .buffer_size + + warpx_lower_bound: vector of floats, optional + Passed to .lower_bound + + warpx_upper_bound: vector of floats, optional + Passed to .upper_bound """ - __doc__ = picmistandard.PICMI_LabFrameFieldDiagnostic.__doc__ + __doc__ def init(self, kw): self.use_new_BTD = kw.pop('warpx_new_BTD', False) if self.use_new_BTD: diff --git a/Python/setup.py b/Python/setup.py index f662a436777..04b956e10c4 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -59,7 +59,7 @@ package_dir = {'pywarpx': 'pywarpx'}, description = """Wrapper of WarpX""", package_data = package_data, - install_requires = ['numpy', 'picmistandard==0.0.19', 'periodictable'], + install_requires = ['numpy', 'picmistandard==0.0.20', 'periodictable'], python_requires = '>=3.7', zip_safe=False ) diff --git a/Regression/Checksum/benchmark.py b/Regression/Checksum/benchmark.py index 771aa06e1d2..fbbe44f98b0 100644 --- a/Regression/Checksum/benchmark.py +++ b/Regression/Checksum/benchmark.py @@ -13,20 +13,25 @@ class Benchmark: - '''Holds data and functions for referenc benchmark of one checksum test. - ''' + """ + Holds data and functions for referenc benchmark of one checksum test. + """ def __init__(self, test_name, data=None): - '''Constructor - + """ + Benchmark constructor. Store test name and reference checksum value, either from benchmark (used for comparison) or from a plotfile (used to reset a benchmark). - @param self The object pointer. - @param test_name Name of test, as found between [] in .ini file. - @param data checksum value (dictionary). - If None, it is read from benchmark. - ''' + Parameters + ---------- + test_name: string + Name of test, as found between [] in .ini file. + + data: dictionary, optional + Checksum value. + If None, it is read from benchmark. + """ self.test_name = test_name self.json_file = os.path.join(config.benchmark_location, @@ -37,18 +42,16 @@ def __init__(self, test_name, data=None): self.data = data def reset(self): - '''Update the benchmark (overwrites reference json file). - - @param self The object pointer. - ''' + """ + Update the benchmark (overwrites reference json file). + """ with open(self.json_file, 'w') as outfile: json.dump(self.data, outfile, sort_keys=True, indent=2) def get(self): - '''Read benchmark from reference json file. - - @param self The object pointer. - ''' + """ + Read benchmark from reference json file. + """ with open(self.json_file) as infile: data = json.load(infile) diff --git a/Regression/Checksum/checksum.py b/Regression/Checksum/checksum.py index 472be941f8d..3e7ac78aa00 100644 --- a/Regression/Checksum/checksum.py +++ b/Regression/Checksum/checksum.py @@ -16,21 +16,29 @@ class Checksum: - '''Class for checksum comparison of one test. - ''' + """Class for checksum comparison of one test. + """ def __init__(self, test_name, plotfile, do_fields=True, do_particles=True): - '''Constructor - + """ + Checksum constructor. Store test_name and plotfile name, and compute checksum from plotfile and store it in self.data. - @param self The object pointer. - @param test_name Name of test, as found between [] in .ini file. - @param plotfile Plotfile from which the checksum is computed. - @param do_fields Whether to compare fields in the checksum. - @param do_particles Whether to compare particles in the checksum. - ''' + Parameters + ---------- + test_name: string + Name of test, as found between [] in .ini file. + + plotfile: string + Plotfile from which the checksum is computed. + + do_fields: bool, default=True + Whether to compare fields in the checksum. + + do_particles: bool, default=True + Whether to compare particles in the checksum. + """ self.test_name = test_name self.plotfile = plotfile @@ -38,16 +46,20 @@ def __init__(self, test_name, plotfile, do_fields=True, do_particles=True): do_particles=do_particles) def read_plotfile(self, do_fields=True, do_particles=True): - '''Get checksum from plotfile. - + """ + Get checksum from plotfile. Read an AMReX plotfile with yt, compute 1 checksum per field and return all checksums in a dictionary. The checksum of quantity Q is max(abs(Q)). - @param self The object pointer. - @param do_fields Whether to read fields from the plotfile. - @param do_particles Whether to read particles from the plotfile. - ''' + Parameters + ---------- + do_fields: bool, default=True + Whether to read fields from the plotfile. + + do_particles: bool, default=True + Whether to read particles from the plotfile. + """ ds = yt.load(self.plotfile) # yt 4.0+ has rounding issues with our domain data: @@ -103,17 +115,21 @@ def read_plotfile(self, do_fields=True, do_particles=True): return data def evaluate(self, rtol=1.e-9, atol=1.e-40): - '''Compare plotfile checksum with benchmark. - + """ + Compare plotfile checksum with benchmark. Read checksum from input plotfile, read benchmark corresponding to test_name, and assert that they are equal. Almost all the body of this functions is for user-readable print statements. - @param self The object pointer. - @param test_name Name of test, as found between [] in .ini file. - @param plotfile Plotfile from which the checksum is computed. - ''' + Parameters + ---------- + rtol: float, default=1.e-9 + Relative tolerance on the benchmark + + atol: float, default=1.e-40 + Absolute tolerance on the benchmark + """ ref_benchmark = Benchmark(self.test_name) diff --git a/Regression/Checksum/checksumAPI.py b/Regression/Checksum/checksumAPI.py index 3d25413bd41..c45b6e233f4 100755 --- a/Regression/Checksum/checksumAPI.py +++ b/Regression/Checksum/checksumAPI.py @@ -16,7 +16,7 @@ from benchmark import Benchmark from checksum import Checksum -''' +""" API for WarpX checksum tests. It can be used in two ways: - Directly use functions below to make a checksum test from a python script. @@ -32,39 +32,61 @@ * Reset a benchmark. From a bash terminal: $ ./checksumAPI.py --reset-benchmark --plotfile \ --test-name -''' +""" def evaluate_checksum(test_name, plotfile, rtol=1.e-9, atol=1.e-40, do_fields=True, do_particles=True): - '''Compare plotfile checksum with benchmark. - + """ + Compare plotfile checksum with benchmark. Read checksum from input plotfile, read benchmark corresponding to test_name, and assert their equality. - @param test_name Name of test, as found between [] in .ini file. - @param plotfile Plotfile from which the checksum is computed. - @param rtol Relative tolerance for the comparison. - @param atol Absolute tolerance for the comparison. - @param do_fields Whether to compare fields in the checksum. - @param do_particles Whether to compare particles in the checksum. - ''' + Parameters + ---------- + test_name: string + Name of test, as found between [] in .ini file. + + plotfile : string + Plotfile from which the checksum is computed. + + rtol: float, default=1.e-9 + Relative tolerance for the comparison. + + atol: float, default=1.e-40 + Absolute tolerance for the comparison. + + do_fields: bool, default=True + Whether to compare fields in the checksum. + + do_particles: bool, default=True + Whether to compare particles in the checksum. + """ test_checksum = Checksum(test_name, plotfile, do_fields=do_fields, do_particles=do_particles) test_checksum.evaluate(rtol=rtol, atol=atol) def reset_benchmark(test_name, plotfile, do_fields=True, do_particles=True): - '''Update the benchmark (overwrites reference json file). - + """ + Update the benchmark (overwrites reference json file). Overwrite value of benchmark corresponding to test_name with checksum read from input plotfile. - @param test_name Name of test, as found between [] in .ini file. - @param plotfile Plotfile from which the checksum is computed. - @param do_fields Whether to write field checksums in the benchmark. - @param do_particles Whether to write particles checksums in the benchmark. - ''' + Parameters + ---------- + test_name: string + Name of test, as found between [] in .ini file. + + plotfile: string + Plotfile from which the checksum is computed. + + do_fields: bool, default=True + Whether to write field checksums in the benchmark. + + do_particles: bool, default=True + Whether to write particles checksums in the benchmark. + """ ref_checksum = Checksum(test_name, plotfile, do_fields=do_fields, do_particles=do_particles) ref_benchmark = Benchmark(test_name, ref_checksum.data) @@ -72,13 +94,17 @@ def reset_benchmark(test_name, plotfile, do_fields=True, do_particles=True): def reset_all_benchmarks(path_to_all_plotfiles): - '''Update all benchmarks (overwrites reference json files) + """ + Update all benchmarks (overwrites reference json files) found in path_to_all_plotfiles - @param path_to_all_plotfiles Path to all plotfiles for which the benchmarks - are to be reset. The plotfiles should be named _plt, which is - what regression_testing.regtests.py does, provided we're careful enough. - ''' + Parameters + ---------- + path_to_all_plotfiles: string + Path to all plotfiles for which the benchmarks + are to be reset. The plotfiles should be named _plt, which is + what regression_testing.regtests.py does, provided we're careful enough. + """ # Get list of plotfiles in path_to_all_plotfiles plotfile_list = glob.glob(path_to_all_plotfiles + '*_plt*[0-9]', diff --git a/requirements.txt b/requirements.txt index d3500be184f..9d6f5bc8840 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ periodictable~=1.5 # PICMI # note: don't forget to update the version in Docs/requirements.txt, too -picmistandard==0.0.19 +picmistandard==0.0.20 # for development against an unreleased PICMI version, use: #picmistandard @ git+https://github.com/picmi-standard/picmi.git#subdirectory=PICMI_Python From a1ade2b37db409b3c71147bf5bd27a40ded7ff11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ne=C3=AFl=20Zaim?= <49716072+NeilZaim@users.noreply.github.com> Date: Sun, 2 Oct 2022 23:25:44 +0200 Subject: [PATCH 0092/1346] Use parser for input parameters of type long (#2506) * Use parser for input parameters of type long * Revert "Use parser for input parameters of type long" This reverts commit 9573bb3f693f1247e77faa433fd96dc294e68361. * Use parser for inputs of type long * add safeCasttoLong function * Fix typo in comment --- .../Diagnostics/ReducedDiags/FieldProbe.cpp | 2 +- Source/Initialization/PlasmaInjector.cpp | 2 +- Source/Particles/LaserParticleContainer.cpp | 2 +- Source/Utils/WarpXUtil.H | 30 ++++++++++ Source/Utils/WarpXUtil.cpp | 55 ++++++++++++------- 5 files changed, 67 insertions(+), 24 deletions(-) diff --git a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp index 993a8fa9e1c..bd5d5361106 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp @@ -151,7 +151,7 @@ FieldProbe::FieldProbe (std::string rd_name) } pp_rd_name.query("integrate", m_field_probe_integrate); pp_rd_name.query("raw_fields", raw_fields); - pp_rd_name.query("interp_order", interp_order); + queryWithParser(pp_rd_name, "interp_order", interp_order); pp_rd_name.query("do_moving_window_FP", do_moving_window_FP); if (WarpX::gamma_boost > 1.0_rt) diff --git a/Source/Initialization/PlasmaInjector.cpp b/Source/Initialization/PlasmaInjector.cpp index cad2bbbe2d3..07526340f2e 100644 --- a/Source/Initialization/PlasmaInjector.cpp +++ b/Source/Initialization/PlasmaInjector.cpp @@ -220,7 +220,7 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) queryWithParser(pp_species_name, "y_cut", y_cut); queryWithParser(pp_species_name, "z_cut", z_cut); getWithParser(pp_species_name, "q_tot", q_tot); - pp_species_name.get("npart", npart); + getWithParser(pp_species_name, "npart", npart); pp_species_name.query("do_symmetrize", do_symmetrize); gaussian_beam = true; parseMomentum(pp_species_name); diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index 94e67065d9f..b56129dc0b7 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -122,7 +122,7 @@ LaserParticleContainer::LaserParticleContainer (AmrCore* amr_core, int ispecies, ); pp_laser_name.query("do_continuous_injection", do_continuous_injection); - pp_laser_name.query("min_particles_per_mode", m_min_particles_per_mode); + queryWithParser(pp_laser_name, "min_particles_per_mode", m_min_particles_per_mode); if (m_e_max == amrex::Real(0.)){ ablastr::warn_manager::WMRecordWarning("Laser", diff --git a/Source/Utils/WarpXUtil.H b/Source/Utils/WarpXUtil.H index 9360c14d459..275ce7e3143 100644 --- a/Source/Utils/WarpXUtil.H +++ b/Source/Utils/WarpXUtil.H @@ -197,6 +197,17 @@ void getCellCoordinates (int i, int j, int k, int safeCastToInt(amrex::Real x, const std::string& real_name); +/** +* \brief Do a safe cast of a real to a long +* This ensures that the float value is within the range of longs and if not, +* raises an exception. +* +* \param x Real value to cast +* \param real_name String, the name of the variable being casted to use in the error message +*/ +long +safeCastToLong(amrex::Real x, const std::string& real_name); + /** * \brief Initialize an amrex::Parser object from a string containing a math expression * @@ -265,6 +276,10 @@ int queryWithParser (const amrex::ParmParse& a_pp, char const * const str, T& va val = safeCastToInt(std::round(parser.compileHost<0>()()), str); } + else if (std::is_same::value) { + + val = safeCastToLong(std::round(parser.compileHost<0>()()), str); + } else { val = static_cast(parser.compileHost<0>()()); } @@ -289,6 +304,9 @@ int queryArrWithParser (const amrex::ParmParse& a_pp, char const * const str, st if (std::is_same::value) { val[i] = safeCastToInt(std::round(parser.compileHost<0>()()), str); } + else if (std::is_same::value) { + val[i] = safeCastToLong(std::round(parser.compileHost<0>()()), str); + } else { val[i] = static_cast(parser.compileHost<0>()()); } @@ -330,6 +348,9 @@ int queryArrWithParser (const amrex::ParmParse& a_pp, char const * const str, st if (std::is_same::value) { val[i] = safeCastToInt(std::round(parser.compileHost<0>()()), str); } + else if (std::is_same::value) { + val[i] = safeCastToLong(std::round(parser.compileHost<0>()()), str); + } else { val[i] = static_cast(parser.compileHost<0>()()); } @@ -361,6 +382,9 @@ void getWithParser (const amrex::ParmParse& a_pp, char const * const str, T& val if (std::is_same::value) { val = safeCastToInt(std::round(parser.compileHost<0>()()), str); } + else if (std::is_same::value) { + val = safeCastToLong(std::round(parser.compileHost<0>()()), str); + } else { val = static_cast(parser.compileHost<0>()()); } @@ -380,6 +404,9 @@ void getArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std if (std::is_same::value) { val[i] = safeCastToInt(std::round(parser.compileHost<0>()()), str); } + else if (std::is_same::value) { + val[i] = safeCastToLong(std::round(parser.compileHost<0>()()), str); + } else { val[i] = static_cast(parser.compileHost<0>()()); } @@ -416,6 +443,9 @@ void getArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std if (std::is_same::value) { val[i] = safeCastToInt(std::round(parser.compileHost<0>()()), str); } + else if (std::is_same::value) { + val[i] = safeCastToLong(std::round(parser.compileHost<0>()()), str); + } else { val[i] = static_cast(parser.compileHost<0>()()); } diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index afe8b7daa07..eecc04b7802 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -276,30 +276,43 @@ void Store_parserString(const amrex::ParmParse& pp, std::string query_string, f.clear(); } -int safeCastToInt(const amrex::Real x, const std::string& real_name) { - int result = 0; - bool error_detected = false; - std::string assert_msg; - // (2.0*(numeric_limits::max()/2+1)) converts numeric_limits::max()+1 to a real ensuring accuracy to all digits - // This accepts x = 2**31-1 but rejects 2**31. - using namespace amrex::literals; - constexpr amrex::Real max_range = (2.0_rt*static_cast(std::numeric_limits::max()/2+1)); - if (x < max_range) { - if (std::ceil(x) >= std::numeric_limits::min()) { - result = static_cast(x); +namespace WarpXUtilSafeCast { + template< typename int_type > + AMREX_FORCE_INLINE + int_type safeCastTo(const amrex::Real x, const std::string& real_name) { + int_type result = int_type(0); + bool error_detected = false; + std::string assert_msg; + // (2.0*(numeric_limits::max()/2+1)) converts numeric_limits::max()+1 to a real ensuring accuracy to all digits + // This accepts x = 2**31-1 but rejects 2**31. + using namespace amrex::literals; + constexpr amrex::Real max_range = (2.0_rt*static_cast(std::numeric_limits::max()/2+1)); + if (x < max_range) { + if (std::ceil(x) >= std::numeric_limits::min()) { + result = static_cast(x); + } else { + error_detected = true; + assert_msg = "Negative overflow detected when casting " + real_name + " = " + + std::to_string(x) + " to integer type"; + } + } else if (x > 0) { + error_detected = true; + assert_msg = "Overflow detected when casting " + real_name + " = " + std::to_string(x) + " to integer type"; } else { error_detected = true; - assert_msg = "Negative overflow detected when casting " + real_name + " = " + std::to_string(x) + " to int"; + assert_msg = "NaN detected when casting " + real_name + " to integer type"; } - } else if (x > 0) { - error_detected = true; - assert_msg = "Overflow detected when casting " + real_name + " = " + std::to_string(x) + " to int"; - } else { - error_detected = true; - assert_msg = "NaN detected when casting " + real_name + " to int"; - } - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!error_detected, assert_msg); - return result; + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!error_detected, assert_msg); + return result; + } +} + +int safeCastToInt(const amrex::Real x, const std::string& real_name) { + return WarpXUtilSafeCast::safeCastTo (x, real_name); +} + +long safeCastToLong(const amrex::Real x, const std::string& real_name) { + return WarpXUtilSafeCast::safeCastTo (x, real_name); } Parser makeParser (std::string const& parse_function, amrex::Vector const& varnames) From 7d3dab494a605caa8edf775bf71984d8e062e4c5 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Sun, 2 Oct 2022 18:23:53 -0700 Subject: [PATCH 0093/1346] Release 22.10 (#3444) * AMReX: Weekly Update One commit after 22.10 release, to fix oneAPI DPC++ 2022.02: 22.10-1-g13aa4df0f5 * PICSAR: 22.10 * WarpX: 22.10 --- .github/workflows/cuda.yml | 2 +- CMakeLists.txt | 2 +- Docs/source/conf.py | 4 ++-- LICENSE.txt | 2 +- Python/setup.py | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 4 ++-- cmake/dependencies/PICSAR.cmake | 2 +- run_test.sh | 2 +- setup.py | 2 +- 11 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 3902034c672..3a14aa81b44 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach b84d7c069cef7470f195b250926ca0e84ec46fb2 && cd - + cd amrex && git checkout --detach 13aa4df0f5a4af40270963ad5b42ac7ce662e045 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/CMakeLists.txt b/CMakeLists.txt index 8afc3834e83..8f3ae506e49 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ # Preamble #################################################################### # cmake_minimum_required(VERSION 3.20.0) -project(WarpX VERSION 22.09) +project(WarpX VERSION 22.10) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) diff --git a/Docs/source/conf.py b/Docs/source/conf.py index a726152064a..ac933290595 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -73,9 +73,9 @@ # built documents. # # The short X.Y version. -version = u'22.09' +version = u'22.10' # The full version, including alpha/beta/rc tags. -release = u'22.09' +release = u'22.10' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/LICENSE.txt b/LICENSE.txt index 6b92a5631be..3ccd4f4875a 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -WarpX v22.09 Copyright (c) 2018-2022, The Regents of the University of California, through Lawrence Berkeley National Laboratory, and Lawrence Livermore National Security, LLC, for the operation of Lawrence Livermore National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. +WarpX v22.10 Copyright (c) 2018-2022, The Regents of the University of California, through Lawrence Berkeley National Laboratory, and Lawrence Livermore National Security, LLC, for the operation of Lawrence Livermore National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/Python/setup.py b/Python/setup.py index 04b956e10c4..83abf888e5c 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -54,7 +54,7 @@ package_data = {} setup(name = 'pywarpx', - version = '22.09', + version = '22.10', packages = ['pywarpx'], package_dir = {'pywarpx': 'pywarpx'}, description = """Wrapper of WarpX""", diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index e16e49a7883..63b9f09dcbd 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = b84d7c069cef7470f195b250926ca0e84ec46fb2 +branch = 13aa4df0f5a4af40270963ad5b42ac7ce662e045 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 43c9c8f88c7..ba31ef94f5d 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = b84d7c069cef7470f195b250926ca0e84ec46fb2 +branch = 13aa4df0f5a4af40270963ad5b42ac7ce662e045 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 8598c48b4b0..55cce8ce15e 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -221,7 +221,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 22.09 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_DIM} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} TINYP LSOLVERS) + find_package(AMReX 22.10 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_DIM} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} TINYP LSOLVERS) message(STATUS "AMReX: Found version '${AMReX_VERSION}'") endif() endmacro() @@ -235,7 +235,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "b84d7c069cef7470f195b250926ca0e84ec46fb2" +set(WarpX_amrex_branch "13aa4df0f5a4af40270963ad5b42ac7ce662e045" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/PICSAR.cmake b/cmake/dependencies/PICSAR.cmake index bd1ab53aa56..3ecee014139 100644 --- a/cmake/dependencies/PICSAR.cmake +++ b/cmake/dependencies/PICSAR.cmake @@ -82,7 +82,7 @@ function(find_picsar) #message(STATUS "PICSAR: Using version '${PICSAR_VERSION}'") else() # not supported by PICSAR (yet) - #find_package(PICSAR 22.09 CONFIG REQUIRED QED) + #find_package(PICSAR 22.10 CONFIG REQUIRED QED) #message(STATUS "PICSAR: Found version '${PICSAR_VERSION}'") message(FATAL_ERROR "PICSAR: Cannot be used as externally installed " "library yet. " diff --git a/run_test.sh b/run_test.sh index ae5411469a5..b4e76b8a414 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach b84d7c069cef7470f195b250926ca0e84ec46fb2 && cd - +cd amrex && git checkout --detach 13aa4df0f5a4af40270963ad5b42ac7ce662e045 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git diff --git a/setup.py b/setup.py index 2ddbf286dd4..695e7328d6a 100644 --- a/setup.py +++ b/setup.py @@ -272,7 +272,7 @@ def build_extension(self, ext): setup( name='pywarpx', # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version = '22.09', + version = '22.10', packages = ['pywarpx'], package_dir = {'pywarpx': 'Python/pywarpx'}, author='Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.', From a0eea6d50989edd00a5810dcce275354f7dbee75 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 3 Oct 2022 12:49:55 -0700 Subject: [PATCH 0094/1346] Doc: Dev FAQ Pinned Memory (#3437) * Doc: Dev FAQ Pinned Memory Since I spoke to two devs in 24hrs about the topic, let us document it. Co-authored-by: Ryan Sandberg Co-authored-by: David Grote --- Docs/source/developers/faq.rst | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/Docs/source/developers/faq.rst b/Docs/source/developers/faq.rst index 273deed4c3a..d539c99da28 100644 --- a/Docs/source/developers/faq.rst +++ b/Docs/source/developers/faq.rst @@ -70,3 +70,20 @@ What does const int ``/*i_buffer*/`` mean in argument list? This is often seen in a derived class, overwriting an interface method. It means we do not name the parameter because we do not use it when we overwrite the interface. But we add the name as a comment ``/* ... */`` so that we know what we ignored when looking at the definition of the overwritten method. + + +What is Pinned Memory? +---------------------- + +We need pinned aka "page locked" host memory when we: + +- do asynchronous copies between the host and device +- want to write to CPU memory from a GPU kernel + +A typical use case is initialization of our (filtered/processed) output routines. +AMReX provides pinned memory via the ``amrex::PinnedArenaAllocator`` , which is the last argument passed to constructors of ``ParticleContainer`` and ``MultiFab``. + +Read more on this here: `How to Optimize Data Transfers in CUDA C/C++ `__ (note that pinned memory is a host memory feature and works with all GPU vendors we support) + +Bonus: underneath the hood, asynchronous MPI communications also pin and unpin memory. +One of the benefits of GPU-aware MPI implementations is, besides the possibility to use direct device-device transfers, that MPI and GPU API calls `are aware of each others' pinning ambitions `__ and do not create `data races to unpin the same memory `__. From be97e0d49a18978885174b69a2dabfe27dc64a53 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 3 Oct 2022 13:43:57 -0700 Subject: [PATCH 0095/1346] Docs: Fix PICMI Builds (#3447) Unbreak the RTD build. We need to do this all relative to the root directory. --- Docs/README.md | 2 +- Docs/requirements.txt | 2 +- Docs/source/developers/documentation.rst | 7 ++++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Docs/README.md b/Docs/README.md index d6a0f62d6c1..e6fac921b04 100644 --- a/Docs/README.md +++ b/Docs/README.md @@ -14,7 +14,7 @@ python3 -m pip install -r Docs/requirements.txt ### Compiling the documentation -`cd` into this directory and type +`cd` into the `Docs/` directory and type ``` make html ``` diff --git a/Docs/requirements.txt b/Docs/requirements.txt index 3da427f88b4..ffd547be4e2 100644 --- a/Docs/requirements.txt +++ b/Docs/requirements.txt @@ -5,7 +5,7 @@ # License: BSD-3-Clause-LBNL # WarpX PICMI bindings w/o C++ component (used for autoclass docs) --e ../Python +-e Python breathe # docutils 0.17 breaks HTML tags & RTD theme # https://github.com/sphinx-doc/sphinx/issues/9001 diff --git a/Docs/source/developers/documentation.rst b/Docs/source/developers/documentation.rst index f7929eb9dd1..0f3ca9dd9cd 100644 --- a/Docs/source/developers/documentation.rst +++ b/Docs/source/developers/documentation.rst @@ -52,12 +52,11 @@ Building the documentation -------------------------- To build the documentation on your local computer, you will need to install Doxygen as well as the Python module `breathe`. -First, change into ``Docs/`` and install the Python requirements: +First, make sure you are in the root directory of WarpX's source and install the Python requirements: .. code-block:: sh - cd Docs/ - python3 -m pip install -r requirements.txt + python3 -m pip install -r Docs/requirements.txt You will also need Doxygen (macOS: ``brew install doxygen``; Ubuntu: ``sudo apt install doxygen``). @@ -65,6 +64,8 @@ Then, to compile the documentation, use .. code-block:: sh + cd Docs/ + make html # This will first compile the Doxygen documentation (execute doxygen) # and then build html pages from rst files using sphinx and breathe. From 43c110c3c3e2e5a1279fe6612c1540713744911f Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 3 Oct 2022 16:12:15 -0700 Subject: [PATCH 0096/1346] BTD: Improve Z-Slice Message (#3440) Add values that are checked to the msg, e.g., I see: ``` z-slice in lab-frame (0.049998) is outside the buffer domain physical extent (0.049998 to 0.050000). ``` --- Source/Diagnostics/BTDiagnostics.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index beb367309b6..77cc9a30df4 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -39,6 +39,7 @@ #include #include #include +#include #include using namespace amrex::literals; @@ -579,7 +580,14 @@ BTDiagnostics::PrepareFieldDataForOutput () WARPX_ALWAYS_ASSERT_WITH_MESSAGE( m_current_z_lab[i_buffer] >= m_buffer_domain_lab[i_buffer].lo(m_moving_window_dir) and m_current_z_lab[i_buffer] <= m_buffer_domain_lab[i_buffer].hi(m_moving_window_dir), - "z-slice in lab-frame is outside the buffer domain physical extent. "); + "z-slice in lab-frame (" + + std::to_string(m_current_z_lab[i_buffer]) + + ") is outside the buffer domain physical extent (" + + std::to_string(m_buffer_domain_lab[i_buffer].lo(m_moving_window_dir)) + + " to " + + std::to_string(m_buffer_domain_lab[i_buffer].hi(m_moving_window_dir)) + + ")." + ); } m_all_field_functors[lev][i]->PrepareFunctorData ( i_buffer, ZSliceInDomain, From b13b27ea90d8741185ff8f0bef5af59e33470fb2 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 5 Oct 2022 16:01:57 -0700 Subject: [PATCH 0097/1346] openPMD: Shape of Constant Particle Records (#3451) We forgot to set the shape (particle number) of constant particle records. This violates the openPMD standard and complicates post-processing. --- Source/Diagnostics/WarpXOpenPMD.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Source/Diagnostics/WarpXOpenPMD.cpp b/Source/Diagnostics/WarpXOpenPMD.cpp index 1432a982011..6a2817e8841 100644 --- a/Source/Diagnostics/WarpXOpenPMD.cpp +++ b/Source/Diagnostics/WarpXOpenPMD.cpp @@ -1004,15 +1004,18 @@ WarpXOpenPMDPlot::SetConstParticleRecordsEDPIC ( amrex::ParticleReal const mass) { auto realType = openPMD::Dataset(openPMD::determineDatatype(), {np}); + auto const scalar = openPMD::RecordComponent::SCALAR; + // define record shape to be number of particles auto const positionComponents = detail::getParticlePositionComponentLabels(); for( auto const& comp : positionComponents ) { currSpecies["positionOffset"][comp].resetDataset( realType ); } + currSpecies["charge"][scalar].resetDataset( realType ); + currSpecies["mass"][scalar].resetDataset( realType ); // make constant using namespace amrex::literals; - auto const scalar = openPMD::RecordComponent::SCALAR; for( auto const& comp : positionComponents ) { currSpecies["positionOffset"][comp].makeConstant( 0._prt ); } From 89ecf5ff46758401adb0122eaa5317877211a582 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 5 Oct 2022 16:59:32 -0700 Subject: [PATCH 0098/1346] Doc: BLAS++/LAPACK++ on GitHub (#3449) * Doc: BLAS++/LAPACK++ on GitHub BLAS++ and LAPACK++ development has moved to GitHub :tada: Welcome! :) * Update bitbucket mention to GH * SLATE: Rename Missing Links Co-authored-by: Mark Gates --- .azure-pipelines.yml | 4 ++-- .github/workflows/dependencies/pyfull.sh | 4 ++-- Docs/source/developers/gnumake/rzgeometry.rst | 6 +++--- Docs/source/install/dependencies.rst | 6 +++--- Docs/source/install/hpc/cori.rst | 8 ++++---- Docs/source/install/hpc/crusher.rst | 4 ++-- Docs/source/install/hpc/lassen.rst | 4 ++-- Docs/source/install/hpc/perlmutter.rst | 4 ++-- Docs/source/install/hpc/summit.rst | 4 ++-- 9 files changed, 22 insertions(+), 22 deletions(-) diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index 02ea68b3f4a..3efaa1886af 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -99,11 +99,11 @@ jobs: python3 -m pip install --upgrade openpmd-api fi if [[ "${WARPX_CI_RZ_OR_NOMPI:-FALSE}" == "TRUE" ]]; then - cmake-easyinstall --prefix=/usr/local git+https://bitbucket.org/icl/blaspp.git \ + cmake-easyinstall --prefix=/usr/local git+https://github.com/icl-utk-edu/blaspp.git \ -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ -DCMAKE_CXX_STANDARD=17 \ -Duse_openmp=OFF -Dbuild_tests=OFF -DCMAKE_VERBOSE_MAKEFILE=ON - cmake-easyinstall --prefix=/usr/local git+https://bitbucket.org/icl/lapackpp.git \ + cmake-easyinstall --prefix=/usr/local git+https://github.com/icl-utk-edu/lapackpp.git \ -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ -DCMAKE_CXX_STANDARD=17 \ -Duse_cmake_find_lapack=ON -Dbuild_tests=OFF -DCMAKE_VERBOSE_MAKEFILE=ON diff --git a/.github/workflows/dependencies/pyfull.sh b/.github/workflows/dependencies/pyfull.sh index fce778b9ca5..8bd78f80230 100755 --- a/.github/workflows/dependencies/pyfull.sh +++ b/.github/workflows/dependencies/pyfull.sh @@ -40,7 +40,7 @@ export CEI_TMP="/tmp/cei" # BLAS++ & LAPACK++ cmake-easyinstall \ --prefix=/usr/local \ - git+https://bitbucket.org/icl/blaspp.git \ + git+https://github.com/icl-utk-edu/blaspp.git \ -Duse_openmp=OFF \ -Dbuild_tests=OFF \ -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ @@ -48,7 +48,7 @@ cmake-easyinstall \ cmake-easyinstall \ --prefix=/usr/local \ - git+https://bitbucket.org/icl/lapackpp.git \ + git+https://github.com/icl-utk-edu/lapackpp.git \ -Duse_cmake_find_lapack=ON \ -Dbuild_tests=OFF \ -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ diff --git a/Docs/source/developers/gnumake/rzgeometry.rst b/Docs/source/developers/gnumake/rzgeometry.rst index 8922d920892..ac90fb754f2 100644 --- a/Docs/source/developers/gnumake/rzgeometry.rst +++ b/Docs/source/developers/gnumake/rzgeometry.rst @@ -31,11 +31,11 @@ package and setting ``USE_PSATD=TRUE``. export FFTW_HOME=/usr/ - - Download and build the blaspp and lapackpp packages. These can be obtained from bitbucket. + - Download and build the blaspp and lapackpp packages. These can be obtained from GitHub. :: - git clone https://bitbucket.org/icl/blaspp.git - git clone https://bitbucket.org/icl/lapackpp.git + git clone https://github.com/icl-utk-edu/blaspp.git + git clone https://github.com/icl-utk-edu/lapackpp.git The two packages can be built in multiple ways. A recommended method is to follow the cmake instructions provided in the INSTALL.md that comes with the packages. They can also be installed using spack. diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index aa9086c3de1..553827cb240 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -20,7 +20,7 @@ Optional dependencies include: - `FFTW3 `_: for spectral solver (PSATD) support - also needs the ``pkg-config`` tool on Unix -- `BLAS++ `_ and `LAPACK++ `_: for spectral solver (PSATD) support in RZ geometry +- `BLAS++ `_ and `LAPACK++ `_: for spectral solver (PSATD) support in RZ geometry - `Boost 1.66.0+ `__: for QED lookup tables generation support - `openPMD-api 0.14.2+ `__: we automatically download and compile a copy of openPMD-api for openPMD I/O support @@ -152,9 +152,9 @@ If you also want to compile with PSATD in RZ, you need to manually install BLAS+ sudo curl -L -o /usr/local/bin/cmake-easyinstall https://git.io/JvLxY sudo chmod a+x /usr/local/bin/cmake-easyinstall - cmake-easyinstall --prefix=/usr/local git+https://bitbucket.org/icl/blaspp.git \ + cmake-easyinstall --prefix=/usr/local git+https://github.com/icl-utk-edu/blaspp.git \ -Duse_openmp=OFF -Dbuild_tests=OFF -DCMAKE_VERBOSE_MAKEFILE=ON - cmake-easyinstall --prefix=/usr/local git+https://bitbucket.org/icl/lapackpp.git \ + cmake-easyinstall --prefix=/usr/local git+https://github.com/icl-utk-edu/lapackpp.git \ -Duse_cmake_find_lapack=ON -Dbuild_tests=OFF -DCMAKE_VERBOSE_MAKEFILE=ON diff --git a/Docs/source/install/hpc/cori.rst b/Docs/source/install/hpc/cori.rst index 1abdab0f377..15675c674d7 100644 --- a/Docs/source/install/hpc/cori.rst +++ b/Docs/source/install/hpc/cori.rst @@ -59,13 +59,13 @@ And install ADIOS2, BLAS++ and LAPACK++: cmake --build src/adios2-knl-build --target install --parallel 16 # BLAS++ (for PSATD+RZ) - git clone https://bitbucket.org/icl/blaspp.git src/blaspp + git clone https://github.com/icl-utk-edu/blaspp.git src/blaspp rm -rf src/blaspp-knl-build cmake -S src/blaspp -B src/blaspp-knl-build -Duse_openmp=ON -Duse_cmake_find_blas=ON -DBLAS_LIBRARIES=${CRAY_LIBSCI_PREFIX_DIR}/lib/libsci_gnu.a -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=$HOME/sw/knl/blaspp-master-install cmake --build src/blaspp-knl-build --target install --parallel 16 # LAPACK++ (for PSATD+RZ) - git clone https://bitbucket.org/icl/lapackpp.git src/lapackpp + git clone https://github.com/icl-utk-edu/lapackpp.git src/lapackpp rm -rf src/lapackpp-knl-build CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S src/lapackpp -B src/lapackpp-knl-build -Duse_cmake_find_lapack=ON -DBLAS_LIBRARIES=${CRAY_LIBSCI_PREFIX_DIR}/lib/libsci_gnu.a -DLAPACK_LIBRARIES=${CRAY_LIBSCI_PREFIX_DIR}/lib/libsci_gnu.a -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=$HOME/sw/knl/lapackpp-master-install cmake --build src/lapackpp-knl-build --target install --parallel 16 @@ -122,13 +122,13 @@ And install ADIOS2, BLAS++ and LAPACK++: cmake --build src/adios2-haswell-build --target install --parallel 16 # BLAS++ (for PSATD+RZ) - git clone https://bitbucket.org/icl/blaspp.git src/blaspp + git clone https://github.com/icl-utk-edu/blaspp.git src/blaspp rm -rf src/blaspp-haswell-build cmake -S src/blaspp -B src/blaspp-haswell-build -Duse_openmp=ON -Duse_cmake_find_blas=ON -DBLAS_LIBRARIES=${CRAY_LIBSCI_PREFIX_DIR}/lib/libsci_gnu.a -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=$HOME/sw/blaspp-master-haswell-install cmake --build src/blaspp-haswell-build --target install --parallel 16 # LAPACK++ (for PSATD+RZ) - git clone https://bitbucket.org/icl/lapackpp.git src/lapackpp + git clone https://github.com/icl-utk-edu/blaspp.git src/lapackpp rm -rf src/lapackpp-haswell-build CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S src/lapackpp -B src/lapackpp-haswell-build -Duse_cmake_find_lapack=ON -DBLAS_LIBRARIES=${CRAY_LIBSCI_PREFIX_DIR}/lib/libsci_gnu.a -DLAPACK_LIBRARIES=${CRAY_LIBSCI_PREFIX_DIR}/lib/libsci_gnu.a -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=$HOME/sw/haswell/lapackpp-master-install cmake --build src/lapackpp-haswell-build --target install --parallel 16 diff --git a/Docs/source/install/hpc/crusher.rst b/Docs/source/install/hpc/crusher.rst index a33ae0acf11..733cf2d1135 100644 --- a/Docs/source/install/hpc/crusher.rst +++ b/Docs/source/install/hpc/crusher.rst @@ -50,13 +50,13 @@ And since Crusher does not yet provide a module for them, install BLAS++ and LAP .. code-block:: bash # BLAS++ (for PSATD+RZ) - git clone https://bitbucket.org/icl/blaspp.git src/blaspp + git clone https://github.com/icl-utk-edu/blaspp.git src/blaspp rm -rf src/blaspp-crusher-build cmake -S src/blaspp -B src/blaspp-crusher-build -Duse_openmp=OFF -Dgpu_backend=hip -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=$HOME/sw/crusher/blaspp-master cmake --build src/blaspp-crusher-build --target install --parallel 10 # LAPACK++ (for PSATD+RZ) - git clone https://bitbucket.org/icl/lapackpp.git src/lapackpp + git clone https://github.com/icl-utk-edu/lapackpp.git src/lapackpp rm -rf src/lapackpp-crusher-build cmake -S src/lapackpp -B src/lapackpp-crusher-build -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=$HOME/sw/crusher/lapackpp-master cmake --build src/lapackpp-crusher-build --target install --parallel 10 diff --git a/Docs/source/install/hpc/lassen.rst b/Docs/source/install/hpc/lassen.rst index a589016af11..bf4ca5e3201 100644 --- a/Docs/source/install/hpc/lassen.rst +++ b/Docs/source/install/hpc/lassen.rst @@ -58,13 +58,13 @@ And since Lassen does not yet provide a module for them, install ADIOS2, BLAS++ cmake --build src/adios2-lassen-build --target install -j 16 # BLAS++ (for PSATD+RZ) - git clone https://bitbucket.org/icl/blaspp.git src/blaspp + git clone https://github.com/icl-utk-edu/blaspp.git src/blaspp rm -rf src/blaspp-lassen-build cmake -S src/blaspp -B src/blaspp-lassen-build -Duse_openmp=ON -Dgpu_backend=CUDA -Duse_cmake_find_blas=ON -DBLA_VENDOR=IBMESSL -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=$HOME/sw/lassen/blaspp-master cmake --build src/blaspp-lassen-build --target install --parallel 16 # LAPACK++ (for PSATD+RZ) - git clone https://bitbucket.org/icl/lapackpp.git src/lapackpp + git clone https://github.com/icl-utk-edu/lapackpp.git src/lapackpp rm -rf src/lapackpp-lassen-build CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S src/lapackpp -B src/lapackpp-lassen-build -Duse_cmake_find_lapack=ON -DBLA_VENDOR=IBMESSL -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=$HOME/sw/lassen/lapackpp-master -DLAPACK_LIBRARIES=/usr/lib64/liblapack.so cmake --build src/lapackpp-lassen-build --target install --parallel 16 diff --git a/Docs/source/install/hpc/perlmutter.rst b/Docs/source/install/hpc/perlmutter.rst index 43c047c62ad..6b0c67b6a44 100644 --- a/Docs/source/install/hpc/perlmutter.rst +++ b/Docs/source/install/hpc/perlmutter.rst @@ -64,13 +64,13 @@ And since Perlmutter does not yet provide a module for them, install ADIOS2, BLA cmake --build src/adios2-pm-build --target install -j 16 # BLAS++ (for PSATD+RZ) - git clone https://bitbucket.org/icl/blaspp.git src/blaspp + git clone https://github.com/icl-utk-edu/blaspp.git src/blaspp rm -rf src/blaspp-pm-build CXX=$(which CC) cmake -S src/blaspp -B src/blaspp-pm-build -Duse_openmp=OFF -Dgpu_backend=cuda -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=$HOME/sw/perlmutter/blaspp-master cmake --build src/blaspp-pm-build --target install --parallel 16 # LAPACK++ (for PSATD+RZ) - git clone https://bitbucket.org/icl/lapackpp.git src/lapackpp + git clone https://github.com/icl-utk-edu/lapackpp.git src/lapackpp rm -rf src/lapackpp-pm-build CXX=$(which CC) CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S src/lapackpp -B src/lapackpp-pm-build -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=$HOME/sw/perlmutter/lapackpp-master cmake --build src/lapackpp-pm-build --target install --parallel 16 diff --git a/Docs/source/install/hpc/summit.rst b/Docs/source/install/hpc/summit.rst index 0b2cff56b6e..a4dbc16a45f 100644 --- a/Docs/source/install/hpc/summit.rst +++ b/Docs/source/install/hpc/summit.rst @@ -50,13 +50,13 @@ For PSATD+RZ simulations, you will need to build BLAS++ and LAPACK++: .. code-block:: bash # BLAS++ (for PSATD+RZ) - git clone https://bitbucket.org/icl/blaspp.git src/blaspp + git clone https://github.com/icl-utk-edu/blaspp.git src/blaspp rm -rf src/blaspp-summit-build cmake -S src/blaspp -B src/blaspp-summit-build -Duse_openmp=OFF -Dgpu_backend=cuda -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=$HOME/sw/summit/blaspp-master cmake --build src/blaspp-summit-build --target install --parallel 10 # LAPACK++ (for PSATD+RZ) - git clone https://bitbucket.org/icl/lapackpp.git src/lapackpp + git clone https://github.com/icl-utk-edu/lapackpp.git src/lapackpp rm -rf src/lapackpp-summit-build cmake -S src/lapackpp -B src/lapackpp-summit-build -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=$HOME/sw/summit/lapackpp-master cmake --build src/lapackpp-summit-build --target install --parallel 10 From 69236a8149488ad68fc7ff9eb84419838bcd7782 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 6 Oct 2022 13:42:31 -0700 Subject: [PATCH 0099/1346] Spack Desktop: Simpler CUDA Visualization (#3337) * Spack Desktop: Simpler Visualization Reduce the dependencies for in situ vis, avoiding to build DevilRay in Ascent, which takes very long right now. We mostly use Ascent with VTK-m for now, so that works. * CUDA Ascent & VTK-M: static * ADIOS2: w/o CUDA See https://github.com/ornladios/ADIOS2/issues/3332 --- Tools/machines/desktop/spack-ubuntu-cuda.yaml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/Tools/machines/desktop/spack-ubuntu-cuda.yaml b/Tools/machines/desktop/spack-ubuntu-cuda.yaml index 51a0ea88fea..08d0c95ee4b 100644 --- a/Tools/machines/desktop/spack-ubuntu-cuda.yaml +++ b/Tools/machines/desktop/spack-ubuntu-cuda.yaml @@ -14,12 +14,14 @@ # spack: specs: - - adios2 + # https://github.com/ornladios/ADIOS2/issues/3332 + - adios2 ~cuda + - ascent +adios2 +python ~fortran ~shared - blaspp - boost - ccache - cmake - - ecp-data-vis-sdk +adios2 +ascent +hdf5 +sensei + - conduit ~fortran - cuda - fftw - hdf5 @@ -35,6 +37,10 @@ spack: - py-pip - py-setuptools - py-wheel + - sensei +ascent ~catalyst +python + - vtk-m ~shared +# This always enables DevilRay, which builds too long on CUDA and we mainly use VTK-m +# - ecp-data-vis-sdk +adios2 +ascent +hdf5 +sensei # skipped to save time: 3D post-processing # - paraview +adios2 +python3 +qt # skipped to save time, because they are faster installed via pip afterwards From acb7f1d78b9ef2644dd8cd08a7eca04e17df06fa Mon Sep 17 00:00:00 2001 From: David Grote Date: Sat, 8 Oct 2022 08:19:06 -0700 Subject: [PATCH 0100/1346] Updated 1D CI tests with particles because of update of yt (#3458) * Updated 1D CI tests with particles * Updated required version of yt to 4.1.0 * Fixed CI analysis of collisionXYZ Fixed the fetching of the particle data --- Examples/Tests/collision/analysis_collision_3d.py | 7 ++++--- .../benchmarks_json/Langmuir_multi_1d.json | 14 +++++++------- .../benchmarks_json/LaserAcceleration_1d.json | 14 +++++++------- .../Python_LaserAcceleration_1d.json | 10 +++++----- .../Python_PlasmaAcceleration1d.json | 6 +++--- setup.py | 2 +- 6 files changed, 27 insertions(+), 26 deletions(-) diff --git a/Examples/Tests/collision/analysis_collision_3d.py b/Examples/Tests/collision/analysis_collision_3d.py index 3e4bda2f819..ee0c81d8f75 100755 --- a/Examples/Tests/collision/analysis_collision_3d.py +++ b/Examples/Tests/collision/analysis_collision_3d.py @@ -70,12 +70,13 @@ # load file ds = yt.load( fn ) ad = ds.all_data() - px = ad['particle_momentum_x'].to_ndarray() + pxe = ad['electron', 'particle_momentum_x'].to_ndarray() + pxi = ad['ion', 'particle_momentum_x'].to_ndarray() # get time index j j = int(fn[-5:]) # compute error - vxe = numpy.mean(px[ 0:ne])/me/c - vxi = numpy.mean(px[ne:np])/mi/c + vxe = numpy.mean(pxe)/me/c + vxi = numpy.mean(pxi)/mi/c vxd = vxe - vxi fit = a*math.exp(b*j) error = error + abs(fit-vxd) diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_1d.json b/Regression/Checksum/benchmarks_json/Langmuir_multi_1d.json index 0114f75c7ab..7586cca0a3b 100644 --- a/Regression/Checksum/benchmarks_json/Langmuir_multi_1d.json +++ b/Regression/Checksum/benchmarks_json/Langmuir_multi_1d.json @@ -2,9 +2,9 @@ "electrons": { "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, - "particle_momentum_z": 0.0, - "particle_position_x": 0.0, - "particle_weight": 0.0 + "particle_momentum_z": 1.183644630083563e-22, + "particle_position_x": 2.560000000000000e-03, + "particle_weight": 8.000000000000000e+19 }, "lev=0": { "Bx": 0.0, @@ -20,8 +20,8 @@ "positrons": { "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, - "particle_momentum_z": 0.0, - "particle_position_x": 0.0, - "particle_weight": 0.0 + "particle_momentum_z": 1.183644630083563e-22, + "particle_position_x": 2.560000000000001e-03, + "particle_weight": 8.000000000000000e+19 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/LaserAcceleration_1d.json b/Regression/Checksum/benchmarks_json/LaserAcceleration_1d.json index 243eaba414f..83c305438fb 100644 --- a/Regression/Checksum/benchmarks_json/LaserAcceleration_1d.json +++ b/Regression/Checksum/benchmarks_json/LaserAcceleration_1d.json @@ -1,12 +1,12 @@ { "electrons": { "particle_momentum_x": 0.0, - "particle_momentum_y": 0.0, - "particle_momentum_z": 0.0, - "particle_orig_z": 0.0, - "particle_position_x": 0.0, - "particle_regionofinterest": 0.0, - "particle_weight": 0.0 + "particle_momentum_y": 1.2426858089556802e-20, + "particle_momentum_z": 1.4187765007430268e-21, + "particle_orig_z": 0.022432812500000038, + "particle_position_x": 0.02243266637270741, + "particle_regionofinterest": 40.0, + "particle_weight": 5.20625e+18 }, "lev=0": { "Bx": 178016.7504669478, @@ -20,4 +20,4 @@ "jz": 1108530282155.6707, "rho": 3127749.1976868743 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/Python_LaserAcceleration_1d.json b/Regression/Checksum/benchmarks_json/Python_LaserAcceleration_1d.json index c0ac0141c88..56eb715590d 100644 --- a/Regression/Checksum/benchmarks_json/Python_LaserAcceleration_1d.json +++ b/Regression/Checksum/benchmarks_json/Python_LaserAcceleration_1d.json @@ -1,10 +1,10 @@ { "electrons": { "particle_momentum_x": 0.0, - "particle_momentum_y": 0.0, - "particle_momentum_z": 0.0, - "particle_position_x": 0.0, - "particle_weight": 0.0 + "particle_momentum_y": 1.2426858089556802e-20, + "particle_momentum_z": 1.4187765007430268e-21, + "particle_position_x": 0.02243266637270741, + "particle_weight": 5.20625e+18 }, "lev=0": { "Bx": 178016.7504669478, @@ -18,4 +18,4 @@ "jz": 1108530282155.6707, "rho": 3127749.1976868743 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/Python_PlasmaAcceleration1d.json b/Regression/Checksum/benchmarks_json/Python_PlasmaAcceleration1d.json index f0cd3571642..1dc134fd233 100644 --- a/Regression/Checksum/benchmarks_json/Python_PlasmaAcceleration1d.json +++ b/Regression/Checksum/benchmarks_json/Python_PlasmaAcceleration1d.json @@ -12,7 +12,7 @@ "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, "particle_momentum_z": 0.0, - "particle_position_x": 0.0, - "particle_weight": 0.0 + "particle_position_x": 3.9920000000000724, + "particle_weight": 4.0000000000000456e+18 } -} \ No newline at end of file +} diff --git a/setup.py b/setup.py index 695e7328d6a..1dec15a38db 100644 --- a/setup.py +++ b/setup.py @@ -307,7 +307,7 @@ def build_extension(self, ext): # ] #}, extras_require={ - 'all': ['openPMD-api~=0.14.2', 'openPMD-viewer~=1.1', 'yt>=4.0.1', 'matplotlib'], + 'all': ['openPMD-api~=0.14.2', 'openPMD-viewer~=1.1', 'yt>=4.1.0', 'matplotlib'], }, # cmdclass={'test': PyTest}, # platforms='any', From 56e04c1b911f9399662c4ff9ecf6630d686cc220 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Sat, 8 Oct 2022 17:28:44 -0700 Subject: [PATCH 0101/1346] Fix synchronization of nodal points in subcycling (#3455) * Fix synchronization of nodal points in subcycling * Update checksum --- .../benchmarks_json/nci_correctorMR.json | 50 +++++++++---------- Source/Evolve/WarpXEvolve.cpp | 16 +++--- 2 files changed, 35 insertions(+), 31 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/nci_correctorMR.json b/Regression/Checksum/benchmarks_json/nci_correctorMR.json index 719d193bc9e..114271a5559 100644 --- a/Regression/Checksum/benchmarks_json/nci_correctorMR.json +++ b/Regression/Checksum/benchmarks_json/nci_correctorMR.json @@ -1,40 +1,40 @@ { "electrons": { - "particle_momentum_x": 5.153966216377413e-22, - "particle_momentum_y": 3.2961227482981436e-24, - "particle_momentum_z": 5.592933539655906e-16, - "particle_position_x": 0.010239999985157226, + "particle_momentum_x": 5.153966216052706e-22, + "particle_momentum_y": 3.296122748303013e-24, + "particle_momentum_z": 5.592933539664134e-16, + "particle_position_x": 0.010239999985156744, "particle_position_y": 0.020479999999998985, "particle_weight": 8e+19 }, "ions": { - "particle_momentum_x": 5.153840768875646e-22, - "particle_momentum_y": 4.310806557560915e-24, - "particle_momentum_z": 1.0269479686192394e-12, + "particle_momentum_x": 5.153840768943853e-22, + "particle_momentum_y": 4.310806557567533e-24, + "particle_momentum_z": 1.0269479686192384e-12, "particle_position_x": 0.010239999999972996, "particle_position_y": 0.020479999999999998, "particle_weight": 8e+19 }, "lev=0": { - "Bx": 81.24109299539893, - "By": 65262.291375341345, - "Bz": 23.39862161563775, - "Ex": 11488110939706.75, - "Ey": 21609945108.24054, - "Ez": 10278696147794.3, - "jx": 229570742416268.2, - "jy": 13422303579038.584, - "jz": 3.827274250501233e+16 + "Bx": 81.24109299523752, + "By": 65262.29130160169, + "Bz": 23.398621614698882, + "Ex": 11488110796255.766, + "Ey": 21609945108.23542, + "Ez": 10278696121340.258, + "jx": 229570713299222.88, + "jy": 13422303579214.39, + "jz": 3.827274444464578e+16 }, "lev=1": { - "Bx": 62.270586553203685, - "By": 350716.5648397624, - "Bz": 25.70488624548334, - "Ex": 78385496879602.66, - "Ey": 19209810918.66286, - "Ez": 40391720951752.62, - "jx": 1787711533020648.2, - "jy": 27938659499596.023, - "jz": 2.5158045046993677e+17 + "Bx": 62.27058655254749, + "By": 350716.56832399766, + "Bz": 25.70488624404379, + "Ex": 78385498011625.31, + "Ey": 19209810918.48542, + "Ez": 40391720894465.53, + "jx": 1787711547069326.5, + "jy": 27938659499985.89, + "jz": 2.5158044745335216e+17 } } \ No newline at end of file diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 202e91643bc..717adbedd85 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -733,8 +733,10 @@ WarpX::OneStep_sub1 (Real curtime) EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); EvolveF(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); - FillBoundaryB(fine_lev, PatchType::fine, guard_cells.ng_FieldSolver); - FillBoundaryF(fine_lev, PatchType::fine, guard_cells.ng_alloc_F); + FillBoundaryB(fine_lev, PatchType::fine, guard_cells.ng_FieldSolver, + WarpX::sync_nodal_points); + FillBoundaryF(fine_lev, PatchType::fine, guard_cells.ng_alloc_F, + WarpX::sync_nodal_points); EvolveE(fine_lev, PatchType::fine, dt[fine_lev]); FillBoundaryE(fine_lev, PatchType::fine, guard_cells.ng_FieldGather); @@ -768,8 +770,10 @@ WarpX::OneStep_sub1 (Real curtime) EvolveB(coarse_lev, PatchType::fine, 0.5_rt*dt[coarse_lev], DtType::FirstHalf); EvolveF(coarse_lev, PatchType::fine, 0.5_rt*dt[coarse_lev], DtType::FirstHalf); - FillBoundaryB(coarse_lev, PatchType::fine, guard_cells.ng_FieldGather); - FillBoundaryF(coarse_lev, PatchType::fine, guard_cells.ng_FieldSolverF); + FillBoundaryB(coarse_lev, PatchType::fine, guard_cells.ng_FieldGather, + WarpX::sync_nodal_points); + FillBoundaryF(coarse_lev, PatchType::fine, guard_cells.ng_FieldSolverF, + WarpX::sync_nodal_points); EvolveE(coarse_lev, PatchType::fine, 0.5_rt*dt[coarse_lev]); FillBoundaryE(coarse_lev, PatchType::fine, guard_cells.ng_FieldGather); @@ -794,7 +798,8 @@ WarpX::OneStep_sub1 (Real curtime) FillBoundaryF(fine_lev, PatchType::fine, guard_cells.ng_FieldSolverF); EvolveE(fine_lev, PatchType::fine, dt[fine_lev]); - FillBoundaryE(fine_lev, PatchType::fine, guard_cells.ng_FieldSolver); + FillBoundaryE(fine_lev, PatchType::fine, guard_cells.ng_FieldSolver, + WarpX::sync_nodal_points); EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::SecondHalf); EvolveF(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::SecondHalf); @@ -830,7 +835,6 @@ WarpX::OneStep_sub1 (Real curtime) FillBoundaryB(fine_lev, PatchType::coarse, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); - FillBoundaryF(fine_lev, PatchType::coarse, guard_cells.ng_FieldSolverF, WarpX::sync_nodal_points); From e9cc65ffeb0684a97618b67c2164d95ea497226c Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Mon, 10 Oct 2022 20:36:14 +0200 Subject: [PATCH 0102/1346] Partial refactoring of the utils directory (#3404) * initial work to clean WarpX Utils * remove AMRCore from Ionization tables * progress * refactoring of a part of the utils directory * fix bug * fixed bug * fixed bug * remove debug line accidentally slipped into the code * remove debug line accidentally slipped into the code * remove debug line accidentally slipped into the code * cleaning * fixed bug --- Source/BoundaryConditions/PML.cpp | 12 +- Source/Diagnostics/BTDiagnostics.H | 4 +- Source/Diagnostics/BTDiagnostics.cpp | 24 +- .../Diagnostics/BackTransformedDiagnostic.cpp | 3 +- .../Diagnostics/BoundaryScrapingDiagnostics.H | 4 +- .../BoundaryScrapingDiagnostics.cpp | 2 +- .../ParticleReductionFunctor.cpp | 10 +- Source/Diagnostics/Diagnostics.cpp | 39 +- .../FlushFormats/FlushFormatAscent.cpp | 1 - .../FlushFormats/FlushFormatPlotfile.cpp | 3 +- Source/Diagnostics/FullDiagnostics.H | 4 +- Source/Diagnostics/FullDiagnostics.cpp | 5 +- .../Diagnostics/ParticleDiag/ParticleDiag.cpp | 16 +- .../Diagnostics/ReducedDiags/BeamRelevant.cpp | 1 - .../Diagnostics/ReducedDiags/FieldEnergy.cpp | 1 - .../Diagnostics/ReducedDiags/FieldMaximum.cpp | 1 - .../ReducedDiags/FieldMomentum.cpp | 1 - .../Diagnostics/ReducedDiags/FieldProbe.cpp | 50 ++- .../ReducedDiags/FieldReduction.cpp | 7 +- .../ReducedDiags/LoadBalanceCosts.cpp | 1 - .../ReducedDiags/LoadBalanceEfficiency.cpp | 1 - .../ReducedDiags/MultiReducedDiags.cpp | 1 - .../ReducedDiags/ParticleEnergy.cpp | 1 - .../ReducedDiags/ParticleExtrema.cpp | 1 - .../ReducedDiags/ParticleHistogram.cpp | 24 +- .../ReducedDiags/ParticleMomentum.cpp | 1 - .../ReducedDiags/ParticleNumber.cpp | 1 - .../Diagnostics/ReducedDiags/ReducedDiags.H | 4 +- .../Diagnostics/ReducedDiags/ReducedDiags.cpp | 3 +- .../Diagnostics/ReducedDiags/RhoMaximum.cpp | 1 - Source/Diagnostics/WarpXOpenPMD.cpp | 4 +- Source/EmbeddedBoundary/WarpXInitEB.cpp | 4 +- Source/Evolve/WarpXEvolve.cpp | 3 +- Source/FieldSolver/ElectrostaticSolver.cpp | 18 +- .../FieldAccessorFunctors.H | 1 - .../MacroscopicEvolveE.cpp | 1 - .../MacroscopicProperties.cpp | 23 +- .../SpectralSolver/SpectralFieldDataRZ.cpp | 2 +- Source/Initialization/CustomDensityProb.H | 5 +- Source/Initialization/InjectorDensity.cpp | 5 +- Source/Initialization/PlasmaInjector.cpp | 168 +++---- Source/Initialization/TemperatureProperties.H | 2 - .../Initialization/TemperatureProperties.cpp | 9 +- Source/Initialization/VelocityProperties.H | 2 - Source/Initialization/VelocityProperties.cpp | 8 +- Source/Initialization/WarpXInitData.cpp | 42 +- .../LaserProfileFieldFunction.cpp | 4 +- .../LaserProfileFromTXYEFile.cpp | 20 +- .../LaserProfileGaussian.cpp | 20 +- .../LaserProfilesImpl/LaserProfileHarris.cpp | 11 +- Source/Parallelization/GuardCellManager.cpp | 8 +- Source/Parallelization/WarpXComm.cpp | 1 - .../BackgroundMCC/BackgroundMCCCollision.cpp | 33 +- .../BackgroundStopping/BackgroundStopping.cpp | 28 +- .../Coulomb/PairWiseCoulombCollisionFunc.H | 5 +- .../NuclearFusion/NuclearFusionFunc.H | 14 +- .../BinaryCollision/ParticleCreationFunc.H | 1 - Source/Particles/Collision/CollisionBase.cpp | 5 +- Source/Particles/LaserParticleContainer.cpp | 25 +- Source/Particles/MultiParticleContainer.H | 1 - Source/Particles/MultiParticleContainer.cpp | 152 ++++--- Source/Particles/ParticleBoundaries.H | 1 - Source/Particles/ParticleBoundaries.cpp | 20 +- .../Particles/PhysicalParticleContainer.cpp | 45 +- .../Particles/Resampling/LevelingThinning.cpp | 8 +- .../Particles/Resampling/ResamplingTrigger.H | 4 +- .../Resampling/ResamplingTrigger.cpp | 7 +- .../RigidInjectedParticleContainer.cpp | 5 +- Source/Python/WarpXWrappers.cpp | 2 +- Source/Utils/Algorithms/IsIn.H | 58 +++ Source/Utils/Algorithms/LinearInterpolation.H | 59 +++ Source/Utils/Algorithms/Make.package | 1 + Source/Utils/Algorithms/UpperBound.H | 49 +++ Source/Utils/CMakeLists.txt | 3 +- Source/Utils/IntervalsParser.H | 207 --------- Source/Utils/IonizationEnergiesTable.H | 228 ---------- Source/Utils/Make.package | 4 + Source/Utils/Parser/CMakeLists.txt | 5 + Source/Utils/Parser/IntervalsParser.H | 220 ++++++++++ Source/Utils/{ => Parser}/IntervalsParser.cpp | 84 +++- Source/Utils/Parser/Make.package | 4 + Source/Utils/Parser/ParserUtils.H | 298 +++++++++++++ Source/Utils/Parser/ParserUtils.cpp | 165 +++++++ .../Utils/Physics/IonizationEnergiesTable.H | 226 ++++++++++ Source/Utils/Physics/Make.package | 1 + Source/Utils/{ => Physics}/atomic_data.txt | 0 .../{ => Physics}/write_atomic_data_cpp.py | 37 +- Source/Utils/Strings/CMakeLists.txt | 4 + Source/Utils/Strings/Make.package | 3 + Source/Utils/Strings/StringUtils.H | 68 +++ Source/Utils/Strings/StringUtils.cpp | 51 +++ Source/Utils/WarpXUtil.H | 413 ------------------ Source/Utils/WarpXUtil.cpp | 221 +--------- Source/WarpX.H | 13 +- Source/WarpX.cpp | 230 ++++++---- 95 files changed, 1997 insertions(+), 1599 deletions(-) create mode 100644 Source/Utils/Algorithms/IsIn.H create mode 100644 Source/Utils/Algorithms/LinearInterpolation.H create mode 100644 Source/Utils/Algorithms/Make.package create mode 100644 Source/Utils/Algorithms/UpperBound.H delete mode 100644 Source/Utils/IntervalsParser.H delete mode 100644 Source/Utils/IonizationEnergiesTable.H create mode 100644 Source/Utils/Parser/CMakeLists.txt create mode 100644 Source/Utils/Parser/IntervalsParser.H rename Source/Utils/{ => Parser}/IntervalsParser.cpp (74%) create mode 100644 Source/Utils/Parser/Make.package create mode 100644 Source/Utils/Parser/ParserUtils.H create mode 100644 Source/Utils/Parser/ParserUtils.cpp create mode 100644 Source/Utils/Physics/IonizationEnergiesTable.H create mode 100644 Source/Utils/Physics/Make.package rename Source/Utils/{ => Physics}/atomic_data.txt (100%) rename Source/Utils/{ => Physics}/write_atomic_data_cpp.py (69%) create mode 100644 Source/Utils/Strings/CMakeLists.txt create mode 100644 Source/Utils/Strings/Make.package create mode 100644 Source/Utils/Strings/StringUtils.H create mode 100644 Source/Utils/Strings/StringUtils.cpp diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index 712e287f0a7..76dbd3fde55 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -1,5 +1,5 @@ -/* Copyright 2019 Andrew Myers, Aurore Blelly, Axel Huebl - * Maxence Thevenet, Remi Lehe, Weiqun Zhang +/* Copyright 2019-2022 Andrew Myers, Aurore Blelly, Axel Huebl, + * Luca Fedeli, Maxence Thevenet, Remi Lehe, Weiqun Zhang * * * This file is part of WarpX. @@ -17,7 +17,7 @@ #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" #include "Utils/WarpXProfilerWrapper.H" -#include "Utils/WarpXUtil.H" +#include "Utils/Parser/ParserUtils.H" #include "WarpX.H" #include @@ -618,9 +618,9 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri int ngFFt_z = do_nodal ? noz_fft : noz_fft/2; ParmParse pp_psatd("psatd"); - queryWithParser(pp_psatd, "nx_guard", ngFFt_x); - queryWithParser(pp_psatd, "ny_guard", ngFFt_y); - queryWithParser(pp_psatd, "nz_guard", ngFFt_z); + utils::parser::queryWithParser(pp_psatd, "nx_guard", ngFFt_x); + utils::parser::queryWithParser(pp_psatd, "ny_guard", ngFFt_y); + utils::parser::queryWithParser(pp_psatd, "nz_guard", ngFFt_z); #if defined(WARPX_DIM_3D) IntVect ngFFT = IntVect(ngFFt_x, ngFFt_y, ngFFt_z); diff --git a/Source/Diagnostics/BTDiagnostics.H b/Source/Diagnostics/BTDiagnostics.H index fcfc8f13b17..ed8cd237bc6 100644 --- a/Source/Diagnostics/BTDiagnostics.H +++ b/Source/Diagnostics/BTDiagnostics.H @@ -10,7 +10,7 @@ #include "Diagnostics.H" #include "Diagnostics/ComputeDiagFunctors/ComputeDiagFunctor.H" #include "Utils/WarpXConst.H" -#include "Utils/IntervalsParser.H" +#include "Utils/Parser/IntervalsParser.H" #include #include @@ -39,7 +39,7 @@ private: /** Read relevant parameters for BTD */ void ReadParameters (); /** Determines timesteps at which BTD diagnostics are written to file */ - BTDIntervalsParser m_intervals; + utils::parser::BTDIntervalsParser m_intervals; /** \brief Flush m_mf_output and particles to file. * Currently, a temporary customized output format for the buffer * data is implemented and called in this function. diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 77cc9a30df4..dd82bcf0e85 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -14,10 +14,11 @@ #include "Diagnostics/Diagnostics.H" #include "Diagnostics/FlushFormats/FlushFormat.H" #include "ComputeDiagFunctors/BackTransformParticleFunctor.H" +#include "Utils/Algorithms/IsIn.H" #include "Utils/CoarsenIO.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" -#include "Utils/WarpXUtil.H" #include "WarpX.H" #include @@ -164,7 +165,8 @@ BTDiagnostics::ReadParameters () std::vector intervals_string_vec = {"0"}; - bool const num_snapshots_specified = queryWithParser(pp_diag_name, "num_snapshots_lab", m_num_snapshots_lab); + bool const num_snapshots_specified = utils::parser::queryWithParser( + pp_diag_name, "num_snapshots_lab", m_num_snapshots_lab); bool const intervals_specified = pp_diag_name.queryarr("intervals", intervals_string_vec); if (num_snapshots_specified) { @@ -172,20 +174,20 @@ BTDiagnostics::ReadParameters () "For back-transformed diagnostics, user should specify either num_snapshots_lab or intervals, not both"); intervals_string_vec = {":" + std::to_string(m_num_snapshots_lab-1)}; } - m_intervals = BTDIntervalsParser(intervals_string_vec); + m_intervals = utils::parser::BTDIntervalsParser(intervals_string_vec); m_num_buffers = m_intervals.NumSnapshots(); // Read either dz_snapshots_lab or dt_snapshots_lab - bool snapshot_interval_is_specified = false; - snapshot_interval_is_specified = queryWithParser(pp_diag_name, "dt_snapshots_lab", m_dt_snapshots_lab); - if ( queryWithParser(pp_diag_name, "dz_snapshots_lab", m_dz_snapshots_lab) ) { + bool snapshot_interval_is_specified = utils::parser::queryWithParser( + pp_diag_name, "dt_snapshots_lab", m_dt_snapshots_lab); + if ( utils::parser::queryWithParser(pp_diag_name, "dz_snapshots_lab", m_dz_snapshots_lab) ) { m_dt_snapshots_lab = m_dz_snapshots_lab/PhysConst::c; snapshot_interval_is_specified = true; } WARPX_ALWAYS_ASSERT_WITH_MESSAGE(snapshot_interval_is_specified, "For back-transformed diagnostics, user should specify either dz_snapshots_lab or dt_snapshots_lab"); - if (queryWithParser(pp_diag_name, "buffer_size", m_buffer_size)) { + if (utils::parser::queryWithParser(pp_diag_name, "buffer_size", m_buffer_size)) { if(m_max_box_size < m_buffer_size) m_max_box_size = m_buffer_size; } @@ -194,8 +196,12 @@ BTDiagnostics::ReadParameters () "jx", "jy", "jz", "rho"}; for (const auto& var : m_varnames) { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( (WarpXUtilStr::is_in(BTD_varnames_supported, var )), "Input error: field variable " + var + " in " + m_diag_name - + ".fields_to_plot is not supported for BackTransformed diagnostics. Currently supported field variables for BackTransformed diagnostics include Ex, Ey, Ez, Bx, By, Bz, jx, jy, jz, and rho"); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + (utils::algorithms::is_in(BTD_varnames_supported, var )), + "Input error: field variable " + var + " in " + m_diag_name + + ".fields_to_plot is not supported for BackTransformed diagnostics." + + " Currently supported field variables for BackTransformed diagnostics " + + "include Ex, Ey, Ez, Bx, By, Bz, jx, jy, jz, and rho"); } bool particle_fields_to_plot_specified = pp_diag_name.queryarr("particle_fields_to_plot", m_pfield_varnames); diff --git a/Source/Diagnostics/BackTransformedDiagnostic.cpp b/Source/Diagnostics/BackTransformedDiagnostic.cpp index 5b28d61000e..7d148abf15e 100644 --- a/Source/Diagnostics/BackTransformedDiagnostic.cpp +++ b/Source/Diagnostics/BackTransformedDiagnostic.cpp @@ -7,6 +7,7 @@ */ #include "BackTransformedDiagnostic.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" #include "Utils/WarpXProfilerWrapper.H" @@ -625,7 +626,7 @@ BackTransformedDiagnostic (Real zmin_lab, Real zmax_lab, Real v_window_lab, ParmParse pp_warpx("warpx"); bool do_user_fields = false; do_user_fields = pp_warpx.queryarr("back_transformed_diag_fields", user_fields_to_dump); - if (queryWithParser(pp_warpx, "buffer_size", m_num_buffer_)) { + if (utils::parser::queryWithParser(pp_warpx, "buffer_size", m_num_buffer_)) { if (m_max_box_size_ < m_num_buffer_) m_max_box_size_ = m_num_buffer_; } // If user specifies fields to dump, overwrite ncomp_to_dump, diff --git a/Source/Diagnostics/BoundaryScrapingDiagnostics.H b/Source/Diagnostics/BoundaryScrapingDiagnostics.H index 3428f8a4c3d..01b0e8305ae 100644 --- a/Source/Diagnostics/BoundaryScrapingDiagnostics.H +++ b/Source/Diagnostics/BoundaryScrapingDiagnostics.H @@ -8,7 +8,7 @@ #define WARPX_BOUNDARYSCRAPINGDIAGNOSTICS_H_ #include "Diagnostics.H" -#include "Utils/IntervalsParser.H" +#include "Utils/Parser/IntervalsParser.H" #include @@ -31,7 +31,7 @@ private: void ReadParameters (); /** Determines timesteps at which the particles are written out */ - IntervalsParser m_intervals; + utils::parser::IntervalsParser m_intervals; /** \brief Flush data to file. */ void Flush (int i_buffer) override; diff --git a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp index dcaad9129c9..a0fad7c7f80 100644 --- a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp +++ b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp @@ -61,7 +61,7 @@ BoundaryScrapingDiagnostics::ReadParameters () amrex::ParmParse pp_diag_name(m_diag_name); std::vector intervals_string_vec = {"0"}; pp_diag_name.queryarr("intervals", intervals_string_vec); - m_intervals = IntervalsParser(intervals_string_vec); + m_intervals = utils::parser::IntervalsParser(intervals_string_vec); } diff --git a/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.cpp index 0b50e2d79f2..52952a37339 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.cpp @@ -27,13 +27,15 @@ ParticleReductionFunctor::ParticleReductionFunctor (const amrex::MultiFab* mf_sr AMREX_ALWAYS_ASSERT(ncomp == 1); // Allocate and compile a parser based on the input string fn_str - m_map_fn_parser = std::make_unique(makeParser( - fn_str, {"x", "y", "z", "ux", "uy", "uz"})); + m_map_fn_parser = std::make_unique( + utils::parser::makeParser( + fn_str, {"x", "y", "z", "ux", "uy", "uz"})); m_map_fn = m_map_fn_parser->compile<6>(); // Do the same for filter function, if it exists if (m_do_filter) { - m_filter_fn_parser = std::make_unique(makeParser( - filter_str, {"x", "y", "z", "ux", "uy", "uz"})); + m_filter_fn_parser = std::make_unique( + utils::parser::makeParser( + filter_str, {"x", "y", "z", "ux", "uy", "uz"})); m_filter_fn = m_filter_fn_parser->compile<6>(); } } diff --git a/Source/Diagnostics/Diagnostics.cpp b/Source/Diagnostics/Diagnostics.cpp index 3259994bd07..236985de484 100644 --- a/Source/Diagnostics/Diagnostics.cpp +++ b/Source/Diagnostics/Diagnostics.cpp @@ -12,10 +12,11 @@ #include "FlushFormats/FlushFormatPlotfile.H" #include "FlushFormats/FlushFormatSensei.H" #include "Particles/MultiParticleContainer.H" +#include "Utils/Algorithms/IsIn.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXProfilerWrapper.H" -#include "Utils/WarpXUtil.H" #include "WarpX.H" #include @@ -51,7 +52,8 @@ Diagnostics::BaseReadParameters () amrex::ParmParse pp_diag_name(m_diag_name); m_file_prefix = "diags/" + m_diag_name; pp_diag_name.query("file_prefix", m_file_prefix); - queryWithParser(pp_diag_name, "file_min_digits", m_file_min_digits); + utils::parser::queryWithParser( + pp_diag_name, "file_min_digits", m_file_min_digits); pp_diag_name.query("format", m_format); pp_diag_name.query("dump_last_timestep", m_dump_last_timestep); @@ -71,24 +73,25 @@ Diagnostics::BaseReadParameters () } // Sanity check if user requests to plot phi - if (WarpXUtilStr::is_in(m_varnames_fields, "phi")){ + if (utils::algorithms::is_in(m_varnames_fields, "phi")){ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( warpx.do_electrostatic==ElectrostaticSolverAlgo::LabFrame, "plot phi only works if do_electrostatic = labframe"); } // Sanity check if user requests to plot F - if (WarpXUtilStr::is_in(m_varnames_fields, "F")){ + if (utils::algorithms::is_in(m_varnames_fields, "F")){ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( warpx.do_dive_cleaning, "plot F only works if warpx.do_dive_cleaning = 1"); } // G can be written to file only if WarpX::do_divb_cleaning = 1 - if (WarpXUtilStr::is_in(m_varnames_fields, "G")) + if (utils::algorithms::is_in(m_varnames_fields, "G")) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - warpx.do_divb_cleaning, "G can be written to file only if warpx.do_divb_cleaning = 1"); + warpx.do_divb_cleaning, + "G can be written to file only if warpx.do_divb_cleaning = 1"); } // If user requests to plot proc_number for a serial run, @@ -120,7 +123,8 @@ Diagnostics::BaseReadParameters () bool do_average = true; pp_diag_pfield.query((var + ".do_average").c_str(), do_average); m_pfield_do_average.push_back(do_average); - Store_parserString(pp_diag_pfield, (var + "(x,y,z,ux,uy,uz)").c_str(), parser_str); + utils::parser::Store_parserString( + pp_diag_pfield, (var + "(x,y,z,ux,uy,uz)").c_str(), parser_str); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( parser_str != "", @@ -169,7 +173,7 @@ Diagnostics::BaseReadParameters () ); } - if (WarpXUtilStr::is_in(m_varnames_fields, "none")){ + if (utils::algorithms::is_in(m_varnames_fields, "none")){ m_varnames_fields.clear(); } @@ -185,14 +189,16 @@ Diagnostics::BaseReadParameters () m_lo.resize(AMREX_SPACEDIM); m_hi.resize(AMREX_SPACEDIM); - bool lo_specified = queryArrWithParser(pp_diag_name, "diag_lo", m_lo, 0, AMREX_SPACEDIM); + const bool lo_specified = utils::parser::queryArrWithParser( + pp_diag_name, "diag_lo", m_lo, 0, AMREX_SPACEDIM); if (!lo_specified) { for (int idim=0; idim < AMREX_SPACEDIM; ++idim) { m_lo[idim] = warpx.Geom(0).ProbLo(idim); } } - bool hi_specified = queryArrWithParser(pp_diag_name, "diag_hi", m_hi, 0, AMREX_SPACEDIM); + const bool hi_specified = utils::parser::queryArrWithParser( + pp_diag_name, "diag_hi", m_hi, 0, AMREX_SPACEDIM); if (!hi_specified) { for (int idim =0; idim < AMREX_SPACEDIM; ++idim) { m_hi[idim] = warpx.Geom(0).ProbHi(idim); @@ -220,7 +226,9 @@ Diagnostics::BaseReadParameters () // Initialize cr_ratio with default value of 1 for each dimension. amrex::Vector cr_ratio(AMREX_SPACEDIM, 1); // Read user-defined coarsening ratio for the output MultiFab. - bool cr_specified = queryArrWithParser(pp_diag_name, "coarsening_ratio", cr_ratio, 0, AMREX_SPACEDIM); + const bool cr_specified = + utils::parser::queryArrWithParser( + pp_diag_name, "coarsening_ratio", cr_ratio, 0, AMREX_SPACEDIM); if (cr_specified) { for (int idim =0; idim < AMREX_SPACEDIM; ++idim) { m_crse_ratio[idim] = cr_ratio[idim]; @@ -228,7 +236,8 @@ Diagnostics::BaseReadParameters () } // Names of species to write to output - bool species_specified = pp_diag_name.queryarr("species", m_output_species_names); + const bool species_specified = + pp_diag_name.queryarr("species", m_output_species_names); // Auxiliary variables @@ -326,8 +335,10 @@ Diagnostics::InitData () m_output_species_names.clear(); } else { amrex::Vector dummy_val(AMREX_SPACEDIM); - if ( queryArrWithParser(pp_diag_name, "diag_lo", dummy_val, 0, AMREX_SPACEDIM) || - queryArrWithParser(pp_diag_name, "diag_hi", dummy_val, 0, AMREX_SPACEDIM) ) { + if ( utils::parser::queryArrWithParser( + pp_diag_name, "diag_lo", dummy_val, 0, AMREX_SPACEDIM) || + utils::parser::queryArrWithParser( + pp_diag_name, "diag_hi", dummy_val, 0, AMREX_SPACEDIM) ) { // set geometry filter for particle-diags to true when the diagnostic domain-extent // is specified by the user. // Note that the filter is set for every ith snapshot, and the number of snapshots diff --git a/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp b/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp index 9e325591e94..39631afc8e8 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp @@ -24,7 +24,6 @@ FlushFormatAscent::WriteToFile ( { #ifdef AMREX_USE_ASCENT WARPX_PROFILE("FlushFormatAscent::WriteToFile()"); - auto & warpx = WarpX::GetInstance(); // wrap mesh data diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp index c85c8cab25d..03154983632 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp @@ -6,6 +6,7 @@ #include "Particles/WarpXParticleContainer.H" #include "Particles/PinnedMemoryParticleContainer.H" #include "Utils/Interpolate.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" @@ -348,7 +349,7 @@ FlushFormatPlotfile::WriteParticles(const std::string& dir, UniformFilter const uniform_filter(particle_diags[i].m_do_uniform_filter, particle_diags[i].m_uniform_stride); ParserFilter parser_filter(particle_diags[i].m_do_parser_filter, - compileParser + utils::parser::compileParser (particle_diags[i].m_particle_filter_parser.get()), pc->getMass()); parser_filter.m_units = InputUnits::SI; diff --git a/Source/Diagnostics/FullDiagnostics.H b/Source/Diagnostics/FullDiagnostics.H index 5af5b4ea05a..bf0b8b6a5de 100644 --- a/Source/Diagnostics/FullDiagnostics.H +++ b/Source/Diagnostics/FullDiagnostics.H @@ -2,7 +2,7 @@ #define WARPX_FULLDIAGNOSTICS_H_ #include "Diagnostics.H" -#include "Utils/IntervalsParser.H" +#include "Utils/Parser/IntervalsParser.H" #include @@ -15,7 +15,7 @@ private: /** Read user-requested parameters for full diagnostics */ void ReadParameters (); /** Determines timesteps at which full diagnostics are written to file */ - IntervalsParser m_intervals; + utils::parser::IntervalsParser m_intervals; /** Whether to plot raw (i.e., NOT cell-centered) fields */ bool m_plot_raw_fields = false; /** Whether to plot guard cells of raw fields */ diff --git a/Source/Diagnostics/FullDiagnostics.cpp b/Source/Diagnostics/FullDiagnostics.cpp index 7bdc2f171c4..cf71ce9f69e 100644 --- a/Source/Diagnostics/FullDiagnostics.cpp +++ b/Source/Diagnostics/FullDiagnostics.cpp @@ -11,6 +11,7 @@ #include "Diagnostics/ParticleDiag/ParticleDiag.H" #include "FlushFormats/FlushFormat.H" #include "Particles/MultiParticleContainer.H" +#include "Utils/Algorithms/IsIn.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "WarpX.H" @@ -85,7 +86,7 @@ FullDiagnostics::ReadParameters () ".format must be plotfile or openpmd or checkpoint or ascent or sensei"); std::vector intervals_string_vec = {"0"}; pp_diag_name.getarr("intervals", intervals_string_vec); - m_intervals = IntervalsParser(intervals_string_vec); + m_intervals = utils::parser::IntervalsParser(intervals_string_vec); bool plot_raw_fields_specified = pp_diag_name.query("plot_raw_fields", m_plot_raw_fields); bool plot_raw_fields_guards_specified = pp_diag_name.query("plot_raw_fields_guards", m_plot_raw_fields_guards); bool raw_specified = plot_raw_fields_specified || plot_raw_fields_guards_specified; @@ -353,7 +354,7 @@ FullDiagnostics::AddRZModesToDiags (int lev) } // If rho is requested, all components will be written out - bool rho_requested = WarpXUtilStr::is_in( m_varnames, "rho" ); + bool rho_requested = utils::algorithms::is_in( m_varnames, "rho" ); // First index of m_all_field_functors[lev] where RZ modes are stored int icomp = m_all_field_functors[0].size(); diff --git a/Source/Diagnostics/ParticleDiag/ParticleDiag.cpp b/Source/Diagnostics/ParticleDiag/ParticleDiag.cpp index deab2ba0b9e..f8c0f7b3764 100644 --- a/Source/Diagnostics/ParticleDiag/ParticleDiag.cpp +++ b/Source/Diagnostics/ParticleDiag/ParticleDiag.cpp @@ -2,8 +2,8 @@ #include "Diagnostics/ParticleDiag/ParticleDiag.H" #include "Particles/WarpXParticleContainer.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" -#include "Utils/WarpXUtil.H" #include "WarpX.H" #include @@ -51,19 +51,19 @@ ParticleDiag::ParticleDiag(std::string diag_name, std::string name, WarpXParticl #endif // build filter functors - m_do_random_filter = queryWithParser(pp_diag_name_species_name, "random_fraction", - m_random_fraction); - m_do_uniform_filter = queryWithParser(pp_diag_name_species_name, "uniform_stride", - m_uniform_stride); + m_do_random_filter = utils::parser::queryWithParser( + pp_diag_name_species_name, "random_fraction", m_random_fraction); + m_do_uniform_filter = utils::parser::queryWithParser( + pp_diag_name_species_name, "uniform_stride",m_uniform_stride); std::string buf; m_do_parser_filter = pp_diag_name_species_name.query("plot_filter_function(t,x,y,z,ux,uy,uz)", buf); if (m_do_parser_filter) { std::string function_string = ""; - Store_parserString(pp_diag_name_species_name,"plot_filter_function(t,x,y,z,ux,uy,uz)", - function_string); + utils::parser::Store_parserString( + pp_diag_name_species_name,"plot_filter_function(t,x,y,z,ux,uy,uz)", function_string); m_particle_filter_parser = std::make_unique( - makeParser(function_string,{"t","x","y","z","ux","uy","uz"})); + utils::parser::makeParser(function_string,{"t","x","y","z","ux","uy","uz"})); } } diff --git a/Source/Diagnostics/ReducedDiags/BeamRelevant.cpp b/Source/Diagnostics/ReducedDiags/BeamRelevant.cpp index 4b4d404ba8c..f1ebd39fc3b 100644 --- a/Source/Diagnostics/ReducedDiags/BeamRelevant.cpp +++ b/Source/Diagnostics/ReducedDiags/BeamRelevant.cpp @@ -9,7 +9,6 @@ #include "Diagnostics/ReducedDiags/ReducedDiags.H" #include "Particles/MultiParticleContainer.H" #include "Particles/WarpXParticleContainer.H" -#include "Utils/IntervalsParser.H" #include "Utils/WarpXConst.H" #include "WarpX.H" diff --git a/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp b/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp index 5370077b116..6bedaee1b70 100644 --- a/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp @@ -8,7 +8,6 @@ #include "FieldEnergy.H" #include "Diagnostics/ReducedDiags/ReducedDiags.H" -#include "Utils/IntervalsParser.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" #include "WarpX.H" diff --git a/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp b/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp index 2778d58ee29..804d1641abc 100644 --- a/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp @@ -8,7 +8,6 @@ #include "FieldMaximum.H" #include "Utils/CoarsenIO.H" -#include "Utils/IntervalsParser.H" #include "Utils/TextMsg.H" #include "WarpX.H" diff --git a/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp b/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp index 8053dc1b463..45a5cc6cb7a 100644 --- a/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp @@ -8,7 +8,6 @@ #include "FieldMomentum.H" #include "Utils/CoarsenIO.H" -#include "Utils/IntervalsParser.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" #include "WarpX.H" diff --git a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp index bd5d5361106..17afcf62af2 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp @@ -11,10 +11,9 @@ #include "Particles/Pusher/GetAndSetPosition.H" #include "Particles/Pusher/UpdatePosition.H" -#include "Utils/IntervalsParser.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" -#include "Utils/WarpXUtil.H" #include "WarpX.H" #include @@ -90,12 +89,15 @@ FieldProbe::FieldProbe (std::string rd_name) x_probe = 0._rt; y_probe = 0._rt; #if !defined(WARPX_DIM_1D_Z) - getWithParser(pp_rd_name, "x_probe", x_probe); + utils::parser::getWithParser( + pp_rd_name, "x_probe", x_probe); #endif #if defined(WARPX_DIM_3D) - getWithParser(pp_rd_name, "y_probe", y_probe); + utils::parser::getWithParser( + pp_rd_name, "y_probe", y_probe); #endif - getWithParser(pp_rd_name, "z_probe", z_probe); + utils::parser::getWithParser( + pp_rd_name, "z_probe", z_probe); } else if (m_probe_geometry_str == "Line") { @@ -105,16 +107,16 @@ FieldProbe::FieldProbe (std::string rd_name) y_probe = 0._rt; y1_probe = 0._rt; #if !defined(WARPX_DIM_1D_Z) - getWithParser(pp_rd_name, "x_probe", x_probe); - getWithParser(pp_rd_name, "x1_probe", x1_probe); + utils::parser::getWithParser(pp_rd_name, "x_probe", x_probe); + utils::parser::getWithParser(pp_rd_name, "x1_probe", x1_probe); #endif #if defined(WARPX_DIM_3D) - getWithParser(pp_rd_name, "y_probe", y_probe); - getWithParser(pp_rd_name, "y1_probe", y1_probe); + utils::parser::getWithParser(pp_rd_name, "y_probe", y_probe); + utils::parser::getWithParser(pp_rd_name, "y1_probe", y1_probe); #endif - getWithParser(pp_rd_name, "z_probe", z_probe); - getWithParser(pp_rd_name, "z1_probe", z1_probe); - getWithParser(pp_rd_name, "resolution", m_resolution); + utils::parser::getWithParser(pp_rd_name, "z_probe", z_probe); + utils::parser::getWithParser(pp_rd_name, "z1_probe", z1_probe); + utils::parser::getWithParser(pp_rd_name, "resolution", m_resolution); } else if (m_probe_geometry_str == "Plane") { @@ -129,18 +131,18 @@ FieldProbe::FieldProbe (std::string rd_name) target_normal_z = 0._rt; target_up_y = 0._rt; #if defined(WARPX_DIM_3D) - getWithParser(pp_rd_name, "y_probe", y_probe); - getWithParser(pp_rd_name, "target_normal_x", target_normal_x); - getWithParser(pp_rd_name, "target_normal_y", target_normal_y); - getWithParser(pp_rd_name, "target_normal_z", target_normal_z); - getWithParser(pp_rd_name, "target_up_y", target_up_y); + utils::parser::getWithParser(pp_rd_name, "y_probe", y_probe); + utils::parser::getWithParser(pp_rd_name, "target_normal_x", target_normal_x); + utils::parser::getWithParser(pp_rd_name, "target_normal_y", target_normal_y); + utils::parser::getWithParser(pp_rd_name, "target_normal_z", target_normal_z); + utils::parser::getWithParser(pp_rd_name, "target_up_y", target_up_y); #endif - getWithParser(pp_rd_name, "x_probe", x_probe); - getWithParser(pp_rd_name, "z_probe", z_probe); - getWithParser(pp_rd_name, "target_up_x", target_up_x); - getWithParser(pp_rd_name, "target_up_z", target_up_z); - getWithParser(pp_rd_name, "detector_radius", detector_radius); - getWithParser(pp_rd_name, "resolution", m_resolution); + utils::parser::getWithParser(pp_rd_name, "x_probe", x_probe); + utils::parser::getWithParser(pp_rd_name, "z_probe", z_probe); + utils::parser::getWithParser(pp_rd_name, "target_up_x", target_up_x); + utils::parser::getWithParser(pp_rd_name, "target_up_z", target_up_z); + utils::parser::getWithParser(pp_rd_name, "detector_radius", detector_radius); + utils::parser::getWithParser(pp_rd_name, "resolution", m_resolution); } else { @@ -151,7 +153,7 @@ FieldProbe::FieldProbe (std::string rd_name) } pp_rd_name.query("integrate", m_field_probe_integrate); pp_rd_name.query("raw_fields", raw_fields); - queryWithParser(pp_rd_name, "interp_order", interp_order); + utils::parser::queryWithParser(pp_rd_name, "interp_order", interp_order); pp_rd_name.query("do_moving_window_FP", do_moving_window_FP); if (WarpX::gamma_boost > 1.0_rt) diff --git a/Source/Diagnostics/ReducedDiags/FieldReduction.cpp b/Source/Diagnostics/ReducedDiags/FieldReduction.cpp index 1f98dca8cb6..23d1e76a8dc 100644 --- a/Source/Diagnostics/ReducedDiags/FieldReduction.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldReduction.cpp @@ -7,10 +7,9 @@ #include "FieldReduction.H" -#include "Utils/IntervalsParser.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" -#include "Utils/WarpXUtil.H" #include #include @@ -49,10 +48,10 @@ FieldReduction::FieldReduction (std::string rd_name) // read reduced function with parser std::string parser_string = ""; - Store_parserString(pp_rd_name,"reduced_function(x,y,z,Ex,Ey,Ez,Bx,By,Bz)", + utils::parser::Store_parserString(pp_rd_name,"reduced_function(x,y,z,Ex,Ey,Ez,Bx,By,Bz)", parser_string); m_parser = std::make_unique( - makeParser(parser_string,{"x","y","z","Ex","Ey","Ez","Bx","By","Bz"})); + utils::parser::makeParser(parser_string,{"x","y","z","Ex","Ey","Ez","Bx","By","Bz"})); // Replace all newlines and possible following whitespaces with a single whitespace. This // should avoid weird formatting when the string is written in the header of the output file. diff --git a/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.cpp b/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.cpp index 5f3da09c280..ec01b4fc0c6 100644 --- a/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.cpp +++ b/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.cpp @@ -8,7 +8,6 @@ #include "Diagnostics/ReducedDiags/ReducedDiags.H" #include "Particles/MultiParticleContainer.H" -#include "Utils/IntervalsParser.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "WarpX.H" diff --git a/Source/Diagnostics/ReducedDiags/LoadBalanceEfficiency.cpp b/Source/Diagnostics/ReducedDiags/LoadBalanceEfficiency.cpp index 6f599653722..e3b82ed2a25 100644 --- a/Source/Diagnostics/ReducedDiags/LoadBalanceEfficiency.cpp +++ b/Source/Diagnostics/ReducedDiags/LoadBalanceEfficiency.cpp @@ -7,7 +7,6 @@ #include "LoadBalanceEfficiency.H" #include "Diagnostics/ReducedDiags/ReducedDiags.H" -#include "Utils/IntervalsParser.H" #include "WarpX.H" #include diff --git a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp index 50b0dede00d..9199d823164 100644 --- a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp +++ b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp @@ -20,7 +20,6 @@ #include "ParticleMomentum.H" #include "ParticleNumber.H" #include "RhoMaximum.H" -#include "Utils/IntervalsParser.H" #include "Utils/TextMsg.H" #include "Utils/WarpXProfilerWrapper.H" diff --git a/Source/Diagnostics/ReducedDiags/ParticleEnergy.cpp b/Source/Diagnostics/ReducedDiags/ParticleEnergy.cpp index 9d3a83eb27b..b345e04bda6 100644 --- a/Source/Diagnostics/ReducedDiags/ParticleEnergy.cpp +++ b/Source/Diagnostics/ReducedDiags/ParticleEnergy.cpp @@ -12,7 +12,6 @@ #include "Particles/MultiParticleContainer.H" #include "Particles/SpeciesPhysicalProperties.H" #include "Particles/WarpXParticleContainer.H" -#include "Utils/IntervalsParser.H" #include "WarpX.H" #include diff --git a/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp b/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp index ed2d2ea1389..0eb9b978684 100644 --- a/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp +++ b/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp @@ -17,7 +17,6 @@ #include "Particles/Pusher/GetAndSetPosition.H" #include "Particles/SpeciesPhysicalProperties.H" #include "Particles/WarpXParticleContainer.H" -#include "Utils/IntervalsParser.H" #include "Utils/WarpXConst.H" #include "WarpX.H" diff --git a/Source/Diagnostics/ReducedDiags/ParticleHistogram.cpp b/Source/Diagnostics/ReducedDiags/ParticleHistogram.cpp index f54f9bf37d1..019ad6d3cc2 100644 --- a/Source/Diagnostics/ReducedDiags/ParticleHistogram.cpp +++ b/Source/Diagnostics/ReducedDiags/ParticleHistogram.cpp @@ -11,10 +11,9 @@ #include "Particles/MultiParticleContainer.H" #include "Particles/Pusher/GetAndSetPosition.H" #include "Particles/WarpXParticleContainer.H" -#include "Utils/IntervalsParser.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" -#include "Utils/WarpXUtil.H" #include "WarpX.H" #include @@ -61,17 +60,17 @@ ParticleHistogram::ParticleHistogram (std::string rd_name) pp_rd_name.get("species",selected_species_name); // read bin parameters - getWithParser(pp_rd_name, "bin_number",m_bin_num); - getWithParser(pp_rd_name, "bin_max", m_bin_max); - getWithParser(pp_rd_name, "bin_min", m_bin_min); + utils::parser::getWithParser(pp_rd_name, "bin_number",m_bin_num); + utils::parser::getWithParser(pp_rd_name, "bin_max", m_bin_max); + utils::parser::getWithParser(pp_rd_name, "bin_min", m_bin_min); m_bin_size = (m_bin_max - m_bin_min) / m_bin_num; // read histogram function std::string function_string = ""; - Store_parserString(pp_rd_name,"histogram_function(t,x,y,z,ux,uy,uz)", + utils::parser::Store_parserString(pp_rd_name,"histogram_function(t,x,y,z,ux,uy,uz)", function_string); m_parser = std::make_unique( - makeParser(function_string,{"t","x","y","z","ux","uy","uz"})); + utils::parser::makeParser(function_string,{"t","x","y","z","ux","uy","uz"})); // read normalization type std::string norm_string = "default"; @@ -113,9 +112,10 @@ ParticleHistogram::ParticleHistogram (std::string rd_name) m_do_parser_filter = pp_rd_name.query("filter_function(t,x,y,z,ux,uy,uz)", buf); if (m_do_parser_filter) { std::string filter_string = ""; - Store_parserString(pp_rd_name,"filter_function(t,x,y,z,ux,uy,uz)", filter_string); + utils::parser::Store_parserString( + pp_rd_name,"filter_function(t,x,y,z,ux,uy,uz)", filter_string); m_parser_filter = std::make_unique( - makeParser(filter_string,{"t","x","y","z","ux","uy","uz"})); + utils::parser::makeParser(filter_string,{"t","x","y","z","ux","uy","uz"})); } // resize data array @@ -168,10 +168,12 @@ void ParticleHistogram::ComputeDiags (int step) auto & myspc = mypc.GetParticleContainer(m_selected_species_id); // get parser - auto fun_partparser = compileParser(m_parser.get()); + auto fun_partparser = + utils::parser::compileParser(m_parser.get()); // get filter parser - auto fun_filterparser = compileParser(m_parser_filter.get()); + auto fun_filterparser = + utils::parser::compileParser(m_parser_filter.get()); // declare local variables auto const num_bins = m_bin_num; diff --git a/Source/Diagnostics/ReducedDiags/ParticleMomentum.cpp b/Source/Diagnostics/ReducedDiags/ParticleMomentum.cpp index 56487820e86..e45577c36ff 100644 --- a/Source/Diagnostics/ReducedDiags/ParticleMomentum.cpp +++ b/Source/Diagnostics/ReducedDiags/ParticleMomentum.cpp @@ -10,7 +10,6 @@ #include "Particles/MultiParticleContainer.H" #include "Particles/SpeciesPhysicalProperties.H" #include "Particles/WarpXParticleContainer.H" -#include "Utils/IntervalsParser.H" #include "Utils/WarpXConst.H" #include "WarpX.H" diff --git a/Source/Diagnostics/ReducedDiags/ParticleNumber.cpp b/Source/Diagnostics/ReducedDiags/ParticleNumber.cpp index 9a2a3c79703..b1f140e21ed 100644 --- a/Source/Diagnostics/ReducedDiags/ParticleNumber.cpp +++ b/Source/Diagnostics/ReducedDiags/ParticleNumber.cpp @@ -10,7 +10,6 @@ #include "Diagnostics/ReducedDiags/ReducedDiags.H" #include "Particles/MultiParticleContainer.H" #include "Particles/WarpXParticleContainer.H" -#include "Utils/IntervalsParser.H" #include "WarpX.H" #include diff --git a/Source/Diagnostics/ReducedDiags/ReducedDiags.H b/Source/Diagnostics/ReducedDiags/ReducedDiags.H index ae7ea7d8c7e..43fb1b76b06 100644 --- a/Source/Diagnostics/ReducedDiags/ReducedDiags.H +++ b/Source/Diagnostics/ReducedDiags/ReducedDiags.H @@ -8,7 +8,7 @@ #ifndef WARPX_DIAGNOSTICS_REDUCEDDIAGS_REDUCEDDIAGS_H_ #define WARPX_DIAGNOSTICS_REDUCEDDIAGS_REDUCEDDIAGS_H_ -#include "Utils/IntervalsParser.H" +#include "Utils/Parser/IntervalsParser.H" #include @@ -34,7 +34,7 @@ public: std::string m_rd_name; /// output intervals - IntervalsParser m_intervals; + utils::parser::IntervalsParser m_intervals; /// check if it is a restart run int m_IsNotRestart = 1; diff --git a/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp b/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp index 68111df72b8..aa6341dd9f5 100644 --- a/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp +++ b/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp @@ -8,6 +8,7 @@ #include "ReducedDiags.H" #include "WarpX.H" +#include "Utils/Parser/IntervalsParser.H" #include "Utils/TextMsg.H" #include @@ -59,7 +60,7 @@ ReducedDiags::ReducedDiags (std::string rd_name) // read reduced diags intervals std::vector intervals_string_vec = {"1"}; pp_rd_name.getarr("intervals", intervals_string_vec); - m_intervals = IntervalsParser(intervals_string_vec); + m_intervals = utils::parser::IntervalsParser(intervals_string_vec); // read separator pp_rd_name.query("separator", m_sep); diff --git a/Source/Diagnostics/ReducedDiags/RhoMaximum.cpp b/Source/Diagnostics/ReducedDiags/RhoMaximum.cpp index 0d6cd43cbc7..43c5efbd69d 100644 --- a/Source/Diagnostics/ReducedDiags/RhoMaximum.cpp +++ b/Source/Diagnostics/ReducedDiags/RhoMaximum.cpp @@ -11,7 +11,6 @@ #include "Diagnostics/ReducedDiags/ReducedDiags.H" #include "Particles/MultiParticleContainer.H" #include "Particles/WarpXParticleContainer.H" -#include "Utils/IntervalsParser.H" #include "Utils/TextMsg.H" #include "WarpX.H" diff --git a/Source/Diagnostics/WarpXOpenPMD.cpp b/Source/Diagnostics/WarpXOpenPMD.cpp index 6a2817e8841..1a31db64002 100644 --- a/Source/Diagnostics/WarpXOpenPMD.cpp +++ b/Source/Diagnostics/WarpXOpenPMD.cpp @@ -11,10 +11,10 @@ #include "FieldIO.H" #include "Particles/Filter/FilterFunctors.H" #include "Utils/TextMsg.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/RelativeCellPosition.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXProfilerWrapper.H" -#include "Utils/WarpXUtil.H" #include "WarpX.H" #include @@ -611,7 +611,7 @@ WarpXOpenPMDPlot::WriteOpenPMDParticles (const amrex::Vector& part UniformFilter const uniform_filter(particle_diags[i].m_do_uniform_filter, particle_diags[i].m_uniform_stride); ParserFilter parser_filter(particle_diags[i].m_do_parser_filter, - compileParser + utils::parser::compileParser (particle_diags[i].m_particle_filter_parser.get()), pc->getMass()); parser_filter.m_units = InputUnits::SI; diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index b3bb9699bf4..a3a315ae055 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -8,8 +8,8 @@ #include "WarpX.H" #ifdef AMREX_USE_EB +# include "Utils/Parser/ParserUtils.H" # include "Utils/TextMsg.H" -# include "Utils/WarpXUtil.H" # include # include @@ -87,7 +87,7 @@ WarpX::InitEB () std::string impf; pp_warpx.query("eb_implicit_function", impf); if (! impf.empty()) { - auto eb_if_parser = makeParser(impf, {"x", "y", "z"}); + auto eb_if_parser = utils::parser::makeParser(impf, {"x", "y", "z"}); ParserIF pif(eb_if_parser.compile<3>()); auto gshop = amrex::EB2::makeShop(pif, eb_if_parser); // The last argument of amrex::EB2::Build is the maximum coarsening level diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 717adbedd85..7300b1e32c0 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -26,12 +26,11 @@ #include "Particles/MultiParticleContainer.H" #include "Particles/ParticleBoundaryBuffer.H" #include "Python/WarpX_py.H" -#include "Utils/IntervalsParser.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" +#include "Utils/WarpXUtil.H" #include "Utils/WarpXConst.H" #include "Utils/WarpXProfilerWrapper.H" -#include "Utils/WarpXUtil.H" #include #include diff --git a/Source/FieldSolver/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolver.cpp index 3a16b8b70f8..64158d3a158 100644 --- a/Source/FieldSolver/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolver.cpp @@ -11,10 +11,10 @@ #include "Particles/MultiParticleContainer.H" #include "Particles/WarpXParticleContainer.H" #include "Python/WarpX_py.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" #include "Utils/TextMsg.H" -#include "Utils/WarpXUtil.H" #include "Utils/WarpXProfilerWrapper.H" #include @@ -937,13 +937,13 @@ void ElectrostaticSolver::PoissonBoundaryHandler::definePhiBCs ( ) void ElectrostaticSolver::PoissonBoundaryHandler::buildParsers () { - potential_xlo_parser = makeParser(potential_xlo_str, {"t"}); - potential_xhi_parser = makeParser(potential_xhi_str, {"t"}); - potential_ylo_parser = makeParser(potential_ylo_str, {"t"}); - potential_yhi_parser = makeParser(potential_yhi_str, {"t"}); - potential_zlo_parser = makeParser(potential_zlo_str, {"t"}); - potential_zhi_parser = makeParser(potential_zhi_str, {"t"}); - potential_eb_parser = makeParser(potential_eb_str, {"x", "y", "z", "t"}); + potential_xlo_parser = utils::parser::makeParser(potential_xlo_str, {"t"}); + potential_xhi_parser = utils::parser::makeParser(potential_xhi_str, {"t"}); + potential_ylo_parser = utils::parser::makeParser(potential_ylo_str, {"t"}); + potential_yhi_parser = utils::parser::makeParser(potential_yhi_str, {"t"}); + potential_zlo_parser = utils::parser::makeParser(potential_zlo_str, {"t"}); + potential_zhi_parser = utils::parser::makeParser(potential_zhi_str, {"t"}); + potential_eb_parser = utils::parser::makeParser(potential_eb_str, {"x", "y", "z", "t"}); potential_xlo = potential_xlo_parser.compile<1>(); potential_xhi = potential_xhi_parser.compile<1>(); @@ -960,7 +960,7 @@ void ElectrostaticSolver::PoissonBoundaryHandler::buildParsers () phi_EB_only_t = false; } else { - potential_eb_parser = makeParser(potential_eb_str, {"t"}); + potential_eb_parser = utils::parser::makeParser(potential_eb_str, {"t"}); potential_eb_t = potential_eb_parser.compile<1>(); } } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/FieldAccessorFunctors.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/FieldAccessorFunctors.H index 8b39296f9ea..58c0837a89b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/FieldAccessorFunctors.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/FieldAccessorFunctors.H @@ -10,7 +10,6 @@ #include "WarpX.H" #include "Utils/CoarsenIO.H" -#include "Utils/WarpXUtil.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" #include diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp index 4b117aad34a..7f0f995569a 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp @@ -11,7 +11,6 @@ #include "Utils/CoarsenIO.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" -#include "Utils/WarpXUtil.H" #include "WarpX.H" #include diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp index 438e237fb39..0bd063bebdc 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp @@ -1,7 +1,7 @@ #include "MacroscopicProperties.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" -#include "Utils/WarpXUtil.H" #include "WarpX.H" #include @@ -43,7 +43,7 @@ MacroscopicProperties::ReadParameters () // Query input for material conductivity, sigma. bool sigma_specified = false; - if (queryWithParser(pp_macroscopic, "sigma", m_sigma)) { + if (utils::parser::queryWithParser(pp_macroscopic, "sigma", m_sigma)) { m_sigma_s = "constant"; sigma_specified = true; } @@ -60,13 +60,14 @@ MacroscopicProperties::ReadParameters () } // initialization of sigma (conductivity) with parser if (m_sigma_s == "parse_sigma_function") { - Store_parserString(pp_macroscopic, "sigma_function(x,y,z)", m_str_sigma_function); + utils::parser::Store_parserString( + pp_macroscopic, "sigma_function(x,y,z)", m_str_sigma_function); m_sigma_parser = std::make_unique( - makeParser(m_str_sigma_function,{"x","y","z"})); + utils::parser::makeParser(m_str_sigma_function,{"x","y","z"})); } bool epsilon_specified = false; - if (queryWithParser(pp_macroscopic, "epsilon", m_epsilon)) { + if (utils::parser::queryWithParser(pp_macroscopic, "epsilon", m_epsilon)) { m_epsilon_s = "constant"; epsilon_specified = true; } @@ -84,14 +85,15 @@ MacroscopicProperties::ReadParameters () // initialization of epsilon (permittivity) with parser if (m_epsilon_s == "parse_epsilon_function") { - Store_parserString(pp_macroscopic, "epsilon_function(x,y,z)", m_str_epsilon_function); + utils::parser::Store_parserString( + pp_macroscopic, "epsilon_function(x,y,z)", m_str_epsilon_function); m_epsilon_parser = std::make_unique( - makeParser(m_str_epsilon_function,{"x","y","z"})); + utils::parser::makeParser(m_str_epsilon_function,{"x","y","z"})); } // Query input for material permittivity, epsilon. bool mu_specified = false; - if (queryWithParser(pp_macroscopic, "mu", m_mu)) { + if (utils::parser::queryWithParser(pp_macroscopic, "mu", m_mu)) { m_mu_s = "constant"; mu_specified = true; } @@ -109,9 +111,10 @@ MacroscopicProperties::ReadParameters () // initialization of mu (permeability) with parser if (m_mu_s == "parse_mu_function") { - Store_parserString(pp_macroscopic, "mu_function(x,y,z)", m_str_mu_function); + utils::parser::Store_parserString( + pp_macroscopic, "mu_function(x,y,z)", m_str_mu_function); m_mu_parser = std::make_unique( - makeParser(m_str_mu_function,{"x","y","z"})); + utils::parser::makeParser(m_str_mu_function,{"x","y","z"})); } } diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldDataRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralFieldDataRZ.cpp index 5a2f1d643ff..59d7fb70ec1 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldDataRZ.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldDataRZ.cpp @@ -6,8 +6,8 @@ */ #include "SpectralFieldDataRZ.H" -#include "WarpX.H" #include "Utils/WarpXUtil.H" +#include "WarpX.H" #include diff --git a/Source/Initialization/CustomDensityProb.H b/Source/Initialization/CustomDensityProb.H index fef514d954b..bcd37970753 100644 --- a/Source/Initialization/CustomDensityProb.H +++ b/Source/Initialization/CustomDensityProb.H @@ -8,7 +8,7 @@ #define CUSTOM_DENSITY_PROB_H_ #include "Utils/TextMsg.H" -#include "Utils/WarpXUtil.H" +#include "Utils/Parser/ParserUtils.H" #include #include @@ -28,7 +28,8 @@ struct InjectorDensityCustom std::vector v; WARPX_ALWAYS_ASSERT_WITH_MESSAGE(v.size() <= 6, "Too many parameters for InjectorDensityCustom"); - getArrWithParser(pp_species_name, "custom_profile_params", v); + utils::parser::getArrWithParser( + pp_species_name, "custom_profile_params", v); for (int i = 0; i < static_cast(v.size()); ++i) { p[i] = v[i]; } diff --git a/Source/Initialization/InjectorDensity.cpp b/Source/Initialization/InjectorDensity.cpp index e277ad1d574..e4b3523ec01 100644 --- a/Source/Initialization/InjectorDensity.cpp +++ b/Source/Initialization/InjectorDensity.cpp @@ -8,8 +8,8 @@ #include "InjectorDensity.H" #include "Initialization/CustomDensityProb.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" -#include "Utils/WarpXUtil.H" #include #include @@ -51,7 +51,8 @@ InjectorDensityPredefined::InjectorDensityPredefined ( std::vector v; // Read parameters for the predefined plasma profile. - getArrWithParser(pp_species_name, "predefined_profile_params", v); + utils::parser::getArrWithParser( + pp_species_name, "predefined_profile_params", v); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(v.size() <= 6, "Too many parameters for InjectorDensityPredefined"); for (int i = 0; i < static_cast(v.size()); ++i) { diff --git a/Source/Initialization/PlasmaInjector.cpp b/Source/Initialization/PlasmaInjector.cpp index 07526340f2e..b6f9ae9ade6 100644 --- a/Source/Initialization/PlasmaInjector.cpp +++ b/Source/Initialization/PlasmaInjector.cpp @@ -15,9 +15,9 @@ #include "Initialization/InjectorMomentum.H" #include "Initialization/InjectorPosition.H" #include "Particles/SpeciesPhysicalProperties.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" -#include "Utils/WarpXUtil.H" #include "WarpX.H" #include @@ -112,15 +112,15 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) } # endif - queryWithParser(pp_species_name, "xmin", xmin); - queryWithParser(pp_species_name, "ymin", ymin); - queryWithParser(pp_species_name, "zmin", zmin); - queryWithParser(pp_species_name, "xmax", xmax); - queryWithParser(pp_species_name, "ymax", ymax); - queryWithParser(pp_species_name, "zmax", zmax); + utils::parser::queryWithParser(pp_species_name, "xmin", xmin); + utils::parser::queryWithParser(pp_species_name, "ymin", ymin); + utils::parser::queryWithParser(pp_species_name, "zmin", zmin); + utils::parser::queryWithParser(pp_species_name, "xmax", xmax); + utils::parser::queryWithParser(pp_species_name, "ymax", ymax); + utils::parser::queryWithParser(pp_species_name, "zmax", zmax); - queryWithParser(pp_species_name, "density_min", density_min); - queryWithParser(pp_species_name, "density_max", density_max); + utils::parser::queryWithParser(pp_species_name, "density_min", density_min); + utils::parser::queryWithParser(pp_species_name, "density_max", density_max); std::string physical_species_s; bool species_is_specified = pp_species_name.query("species_type", physical_species_s); @@ -142,8 +142,10 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) ::tolower); // parse charge and mass - bool charge_is_specified = queryWithParser(pp_species_name, "charge", charge); - bool mass_is_specified = queryWithParser(pp_species_name, "mass", mass); + const bool charge_is_specified = + utils::parser::queryWithParser(pp_species_name, "charge", charge); + const bool mass_is_specified = + utils::parser::queryWithParser(pp_species_name, "mass", mass); if ( charge_is_specified && species_is_specified ){ ablastr::warn_manager::WMRecordWarning("Species", @@ -180,22 +182,32 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) if (injection_style == "none") { return; } else if (injection_style == "singleparticle") { - getArrWithParser(pp_species_name, "single_particle_pos", single_particle_pos, 0, 3); - getArrWithParser(pp_species_name, "single_particle_vel", single_particle_vel, 0, 3); + utils::parser::getArrWithParser( + pp_species_name, "single_particle_pos", single_particle_pos, 0, 3); + utils::parser::getArrWithParser( + pp_species_name, "single_particle_vel", single_particle_vel, 0, 3); for (auto& x : single_particle_vel) { x *= PhysConst::c; } - getWithParser(pp_species_name, "single_particle_weight", single_particle_weight); + utils::parser::getWithParser( + pp_species_name, "single_particle_weight", single_particle_weight); add_single_particle = true; return; } else if (injection_style == "multipleparticles") { - getArrWithParser(pp_species_name, "multiple_particles_pos_x", multiple_particles_pos_x); - getArrWithParser(pp_species_name, "multiple_particles_pos_y", multiple_particles_pos_y); - getArrWithParser(pp_species_name, "multiple_particles_pos_z", multiple_particles_pos_z); - getArrWithParser(pp_species_name, "multiple_particles_vel_x", multiple_particles_vel_x); - getArrWithParser(pp_species_name, "multiple_particles_vel_y", multiple_particles_vel_y); - getArrWithParser(pp_species_name, "multiple_particles_vel_z", multiple_particles_vel_z); - getArrWithParser(pp_species_name, "multiple_particles_weight", multiple_particles_weight); + utils::parser::getArrWithParser( + pp_species_name, "multiple_particles_pos_x", multiple_particles_pos_x); + utils::parser::getArrWithParser( + pp_species_name, "multiple_particles_pos_y", multiple_particles_pos_y); + utils::parser::getArrWithParser( + pp_species_name, "multiple_particles_pos_z", multiple_particles_pos_z); + utils::parser::getArrWithParser( + pp_species_name, "multiple_particles_vel_x", multiple_particles_vel_x); + utils::parser::getArrWithParser( + pp_species_name, "multiple_particles_vel_y", multiple_particles_vel_y); + utils::parser::getArrWithParser( + pp_species_name, "multiple_particles_vel_z", multiple_particles_vel_z); + utils::parser::getArrWithParser( + pp_species_name, "multiple_particles_weight", multiple_particles_weight); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( ((multiple_particles_pos_x.size() == multiple_particles_pos_y.size()) && (multiple_particles_pos_x.size() == multiple_particles_pos_z.size()) && @@ -210,17 +222,18 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) add_multiple_particles = true; return; } else if (injection_style == "gaussian_beam") { - getWithParser(pp_species_name, "x_m", x_m); - getWithParser(pp_species_name, "y_m", y_m); - getWithParser(pp_species_name, "z_m", z_m); - getWithParser(pp_species_name, "x_rms", x_rms); - getWithParser(pp_species_name, "y_rms", y_rms); - getWithParser(pp_species_name, "z_rms", z_rms); - queryWithParser(pp_species_name, "x_cut", x_cut); - queryWithParser(pp_species_name, "y_cut", y_cut); - queryWithParser(pp_species_name, "z_cut", z_cut); - getWithParser(pp_species_name, "q_tot", q_tot); - getWithParser(pp_species_name, "npart", npart); + + utils::parser::getWithParser(pp_species_name, "x_m", x_m); + utils::parser::getWithParser(pp_species_name, "y_m", y_m); + utils::parser::getWithParser(pp_species_name, "z_m", z_m); + utils::parser::getWithParser(pp_species_name, "x_rms", x_rms); + utils::parser::getWithParser(pp_species_name, "y_rms", y_rms); + utils::parser::getWithParser(pp_species_name, "z_rms", z_rms); + utils::parser::queryWithParser(pp_species_name, "x_cut", x_cut); + utils::parser::queryWithParser(pp_species_name, "y_cut", y_cut); + utils::parser::queryWithParser(pp_species_name, "z_cut", z_cut); + utils::parser::getWithParser(pp_species_name, "q_tot", q_tot); + utils::parser::getWithParser(pp_species_name, "npart", npart); pp_species_name.query("do_symmetrize", do_symmetrize); gaussian_beam = true; parseMomentum(pp_species_name); @@ -242,7 +255,8 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) // so that inj_pos->getPositionUnitBox calls // InjectorPosition[Random or Regular].getPositionUnitBox. else if (injection_style == "nrandompercell") { - getWithParser(pp_species_name, "num_particles_per_cell", num_particles_per_cell); + utils::parser::getWithParser( + pp_species_name, "num_particles_per_cell", num_particles_per_cell); #if WARPX_DIM_RZ if (WarpX::n_rz_azimuthal_modes > 1) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -260,7 +274,8 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) parseMomentum(pp_species_name); } else if (injection_style == "nfluxpercell") { surface_flux = true; - getWithParser(pp_species_name, "num_particles_per_cell", num_particles_per_cell_real); + utils::parser::getWithParser( + pp_species_name, "num_particles_per_cell", num_particles_per_cell_real); #ifdef WARPX_DIM_RZ if (WarpX::n_rz_azimuthal_modes > 1) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -270,9 +285,12 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) "(Please visit PR#765 for more information.)"); } #endif - getWithParser(pp_species_name, "surface_flux_pos", surface_flux_pos); - queryWithParser(pp_species_name, "flux_tmin", flux_tmin); - queryWithParser(pp_species_name, "flux_tmax", flux_tmax); + utils::parser::getWithParser( + pp_species_name, "surface_flux_pos", surface_flux_pos); + utils::parser::queryWithParser( + pp_species_name, "flux_tmin", flux_tmin); + utils::parser::queryWithParser( + pp_species_name, "flux_tmax", flux_tmax); std::string flux_normal_axis_string; pp_species_name.get("flux_normal_axis", flux_normal_axis_string); flux_normal_axis = -1; @@ -332,8 +350,9 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) #else constexpr int num_required_ppc_each_dim = 3; #endif - getArrWithParser(pp_species_name, "num_particles_per_cell_each_dim", - num_particles_per_cell_each_dim, 0, num_required_ppc_each_dim); + utils::parser::getArrWithParser( + pp_species_name, "num_particles_per_cell_each_dim", + num_particles_per_cell_each_dim, 0, num_required_ppc_each_dim); #if WARPX_DIM_XZ num_particles_per_cell_each_dim.push_back(1); #endif @@ -372,8 +391,8 @@ PlasmaInjector::PlasmaInjector (int ispecies, const std::string& name) std::string str_injection_file; pp_species_name.get("injection_file", str_injection_file); // optional parameters - queryWithParser(pp_species_name, "q_tot", q_tot); - queryWithParser(pp_species_name, "z_shift",z_shift); + utils::parser::queryWithParser(pp_species_name, "q_tot", q_tot); + utils::parser::queryWithParser(pp_species_name, "z_shift",z_shift); #ifdef WARPX_USE_OPENPMD if (amrex::ParallelDescriptor::IOProcessor()) { @@ -519,7 +538,7 @@ void PlasmaInjector::parseDensity (amrex::ParmParse& pp) std::transform(rho_prof_s.begin(), rho_prof_s.end(), rho_prof_s.begin(), ::tolower); if (rho_prof_s == "constant") { - getWithParser(pp, "density", density); + utils::parser::getWithParser(pp, "density", density); // Construct InjectorDensity with InjectorDensityConstant. h_inj_rho.reset(new InjectorDensity((InjectorDensityConstant*)nullptr, density)); } else if (rho_prof_s == "custom") { @@ -529,12 +548,13 @@ void PlasmaInjector::parseDensity (amrex::ParmParse& pp) // Construct InjectorDensity with InjectorDensityPredefined. h_inj_rho.reset(new InjectorDensity((InjectorDensityPredefined*)nullptr,species_name)); } else if (rho_prof_s == "parse_density_function") { - Store_parserString(pp, "density_function(x,y,z)", str_density_function); + utils::parser::Store_parserString( + pp, "density_function(x,y,z)", str_density_function); // Construct InjectorDensity with InjectorDensityParser. - density_parser = std::make_unique(makeParser( - str_density_function,{"x","y","z"})); + density_parser = std::make_unique( + utils::parser::makeParser(str_density_function,{"x","y","z"})); h_inj_rho.reset(new InjectorDensity((InjectorDensityParser*)nullptr, - density_parser->compile<3>())); + density_parser->compile<3>())); } else { //No need for profile definition if external file is used std::string injection_style = "none"; @@ -569,9 +589,9 @@ void PlasmaInjector::parseMomentum (amrex::ParmParse& pp) amrex::Real ux = 0._rt; amrex::Real uy = 0._rt; amrex::Real uz = 0._rt; - queryWithParser(pp, "ux", ux); - queryWithParser(pp, "uy", uy); - queryWithParser(pp, "uz", uz); + utils::parser::queryWithParser(pp, "ux", ux); + utils::parser::queryWithParser(pp, "uy", uy); + utils::parser::queryWithParser(pp, "uz", uz); // Construct InjectorMomentum with InjectorMomentumConstant. h_inj_mom.reset(new InjectorMomentum((InjectorMomentumConstant*)nullptr, ux, uy, uz)); } else if (mom_dist_s == "custom") { @@ -584,12 +604,12 @@ void PlasmaInjector::parseMomentum (amrex::ParmParse& pp) amrex::Real ux_th = 0._rt; amrex::Real uy_th = 0._rt; amrex::Real uz_th = 0._rt; - queryWithParser(pp, "ux_m", ux_m); - queryWithParser(pp, "uy_m", uy_m); - queryWithParser(pp, "uz_m", uz_m); - queryWithParser(pp, "ux_th", ux_th); - queryWithParser(pp, "uy_th", uy_th); - queryWithParser(pp, "uz_th", uz_th); + utils::parser::queryWithParser(pp, "ux_m", ux_m); + utils::parser::queryWithParser(pp, "uy_m", uy_m); + utils::parser::queryWithParser(pp, "uz_m", uz_m); + utils::parser::queryWithParser(pp, "ux_th", ux_th); + utils::parser::queryWithParser(pp, "uy_th", uy_th); + utils::parser::queryWithParser(pp, "uz_th", uz_th); // Construct InjectorMomentum with InjectorMomentumGaussian. h_inj_mom.reset(new InjectorMomentum((InjectorMomentumGaussian*)nullptr, ux_m, uy_m, uz_m, ux_th, uy_th, uz_th)); @@ -602,12 +622,12 @@ void PlasmaInjector::parseMomentum (amrex::ParmParse& pp) amrex::Real ux_th = 0._rt; amrex::Real uy_th = 0._rt; amrex::Real uz_th = 0._rt; - queryWithParser(pp, "ux_m", ux_m); - queryWithParser(pp, "uy_m", uy_m); - queryWithParser(pp, "uz_m", uz_m); - queryWithParser(pp, "ux_th", ux_th); - queryWithParser(pp, "uy_th", uy_th); - queryWithParser(pp, "uz_th", uz_th); + utils::parser::queryWithParser(pp, "ux_m", ux_m); + utils::parser::queryWithParser(pp, "uy_m", uy_m); + utils::parser::queryWithParser(pp, "uz_m", uz_m); + utils::parser::queryWithParser(pp, "ux_th", ux_th); + utils::parser::queryWithParser(pp, "uy_th", uy_th); + utils::parser::queryWithParser(pp, "uz_th", uz_th); // Construct InjectorMomentum with InjectorMomentumGaussianFlux. h_inj_mom.reset(new InjectorMomentum((InjectorMomentumGaussianFlux*)nullptr, ux_m, uy_m, uz_m, ux_th, uy_th, uz_th, @@ -628,24 +648,24 @@ void PlasmaInjector::parseMomentum (amrex::ParmParse& pp) h_inj_mom.reset(new InjectorMomentum((InjectorMomentumJuttner*)nullptr, getTemp, getVel)); } else if (mom_dist_s == "radial_expansion") { amrex::Real u_over_r = 0._rt; - queryWithParser(pp, "u_over_r", u_over_r); + utils::parser::queryWithParser(pp, "u_over_r", u_over_r); // Construct InjectorMomentum with InjectorMomentumRadialExpansion. h_inj_mom.reset(new InjectorMomentum ((InjectorMomentumRadialExpansion*)nullptr, u_over_r)); } else if (mom_dist_s == "parse_momentum_function") { - Store_parserString(pp, "momentum_function_ux(x,y,z)", - str_momentum_function_ux); - Store_parserString(pp, "momentum_function_uy(x,y,z)", - str_momentum_function_uy); - Store_parserString(pp, "momentum_function_uz(x,y,z)", - str_momentum_function_uz); + utils::parser::Store_parserString(pp, "momentum_function_ux(x,y,z)", + str_momentum_function_ux); + utils::parser::Store_parserString(pp, "momentum_function_uy(x,y,z)", + str_momentum_function_uy); + utils::parser::Store_parserString(pp, "momentum_function_uz(x,y,z)", + str_momentum_function_uz); // Construct InjectorMomentum with InjectorMomentumParser. - ux_parser = std::make_unique(makeParser(str_momentum_function_ux, - {"x","y","z"})); - uy_parser = std::make_unique(makeParser(str_momentum_function_uy, - {"x","y","z"})); - uz_parser = std::make_unique(makeParser(str_momentum_function_uz, - {"x","y","z"})); + ux_parser = std::make_unique( + utils::parser::makeParser(str_momentum_function_ux, {"x","y","z"})); + uy_parser = std::make_unique( + utils::parser::makeParser(str_momentum_function_uy, {"x","y","z"})); + uz_parser = std::make_unique( + utils::parser::makeParser(str_momentum_function_uz, {"x","y","z"})); h_inj_mom.reset(new InjectorMomentum((InjectorMomentumParser*)nullptr, ux_parser->compile<3>(), uy_parser->compile<3>(), diff --git a/Source/Initialization/TemperatureProperties.H b/Source/Initialization/TemperatureProperties.H index 18e75be60ae..c62aec83e4c 100644 --- a/Source/Initialization/TemperatureProperties.H +++ b/Source/Initialization/TemperatureProperties.H @@ -9,8 +9,6 @@ #ifndef TEMPERATURE_PROPERTIES_H_ #define TEMPERATURE_PROPERTIES_H_ -#include - #include #include #include diff --git a/Source/Initialization/TemperatureProperties.cpp b/Source/Initialization/TemperatureProperties.cpp index 901275b0426..6414fd11826 100644 --- a/Source/Initialization/TemperatureProperties.cpp +++ b/Source/Initialization/TemperatureProperties.cpp @@ -7,6 +7,7 @@ */ #include "TemperatureProperties.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include @@ -25,7 +26,8 @@ TemperatureProperties::TemperatureProperties (amrex::ParmParse& pp) { pp.query("theta_distribution_type", temp_dist_s); pp.query("momentum_distribution_type", mom_dist_s); if (temp_dist_s == "constant") { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(queryWithParser(pp, "theta", theta), + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + utils::parser::queryWithParser(pp, "theta", theta), "Temperature parameter theta not specified"); // Do validation on theta value @@ -56,9 +58,10 @@ TemperatureProperties::TemperatureProperties (amrex::ParmParse& pp) { } else if (temp_dist_s == "parser") { std::string str_theta_function; - Store_parserString(pp, "theta_function(x,y,z)", str_theta_function); + utils::parser::Store_parserString(pp, "theta_function(x,y,z)", str_theta_function); m_ptr_temperature_parser = - std::make_unique(makeParser(str_theta_function,{"x","y","z"})); + std::make_unique( + utils::parser::makeParser(str_theta_function,{"x","y","z"})); m_type = TempParserFunction; } else { diff --git a/Source/Initialization/VelocityProperties.H b/Source/Initialization/VelocityProperties.H index e9e6362bf35..c8744fcfb63 100644 --- a/Source/Initialization/VelocityProperties.H +++ b/Source/Initialization/VelocityProperties.H @@ -8,8 +8,6 @@ #ifndef VELOCITY_PROPERTIES_H_ #define VELOCITY_PROPERTIES_H_ -#include - #include #include #include diff --git a/Source/Initialization/VelocityProperties.cpp b/Source/Initialization/VelocityProperties.cpp index 59788fc7e72..95f41d403d0 100644 --- a/Source/Initialization/VelocityProperties.cpp +++ b/Source/Initialization/VelocityProperties.cpp @@ -8,6 +8,7 @@ #include "VelocityProperties.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" VelocityProperties::VelocityProperties (amrex::ParmParse& pp) { @@ -45,7 +46,7 @@ VelocityProperties::VelocityProperties (amrex::ParmParse& pp) { pp.query("beta_distribution_type", vel_dist_s); if (vel_dist_s == "constant") { - queryWithParser(pp, "beta", m_velocity); + utils::parser::queryWithParser(pp, "beta", m_velocity); m_type = VelConstantValue; WARPX_ALWAYS_ASSERT_WITH_MESSAGE( m_velocity > -1 && m_velocity < 1, @@ -55,9 +56,10 @@ VelocityProperties::VelocityProperties (amrex::ParmParse& pp) { } else if (vel_dist_s == "parser") { std::string str_beta_function; - Store_parserString(pp, "beta_function(x,y,z)", str_beta_function); + utils::parser::Store_parserString(pp, "beta_function(x,y,z)", str_beta_function); m_ptr_velocity_parser = - std::make_unique(makeParser(str_beta_function,{"x","y","z"})); + std::make_unique( + utils::parser::makeParser(str_beta_function,{"x","y","z"})); m_type = VelParserFunction; } else { diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index b28c9bd5957..a6d959cafa5 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -23,11 +23,11 @@ #include "Particles/MultiParticleContainer.H" #include "Utils/Logo/GetLogo.H" #include "Utils/MPIInitHelpers.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" #include "Utils/WarpXProfilerWrapper.H" -#include "Utils/WarpXUtil.H" #include #include @@ -699,12 +699,12 @@ WarpX::InitLevelData (int lev, Real /*time*/) // if the input string is "constant", the values for the // external grid must be provided in the input. if (B_ext_grid_s == "constant") - getArrWithParser(pp_warpx, "B_external_grid", B_external_grid); + utils::parser::getArrWithParser(pp_warpx, "B_external_grid", B_external_grid); // if the input string is "constant", the values for the // external grid must be provided in the input. if (E_ext_grid_s == "constant") - getArrWithParser(pp_warpx, "E_external_grid", E_external_grid); + utils::parser::getArrWithParser(pp_warpx, "E_external_grid", E_external_grid); // initialize the averaged fields only if the averaged algorithm // is activated ('psatd.do_time_averaging=1') @@ -775,18 +775,18 @@ WarpX::InitLevelData (int lev, Real /*time*/) amrex::Abort(Utils::TextMsg::Err( "E and B parser for external fields does not work with RZ -- TO DO")); #endif - Store_parserString(pp_warpx, "Bx_external_grid_function(x,y,z)", - str_Bx_ext_grid_function); - Store_parserString(pp_warpx, "By_external_grid_function(x,y,z)", - str_By_ext_grid_function); - Store_parserString(pp_warpx, "Bz_external_grid_function(x,y,z)", - str_Bz_ext_grid_function); + utils::parser::Store_parserString(pp_warpx, "Bx_external_grid_function(x,y,z)", + str_Bx_ext_grid_function); + utils::parser::Store_parserString(pp_warpx, "By_external_grid_function(x,y,z)", + str_By_ext_grid_function); + utils::parser::Store_parserString(pp_warpx, "Bz_external_grid_function(x,y,z)", + str_Bz_ext_grid_function); Bxfield_parser = std::make_unique( - makeParser(str_Bx_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Bx_ext_grid_function,{"x","y","z"})); Byfield_parser = std::make_unique( - makeParser(str_By_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_By_ext_grid_function,{"x","y","z"})); Bzfield_parser = std::make_unique( - makeParser(str_Bz_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Bz_ext_grid_function,{"x","y","z"})); // Initialize Bfield_fp with external function InitializeExternalFieldsOnGridUsingParser(Bfield_fp[lev][0].get(), @@ -833,19 +833,19 @@ WarpX::InitLevelData (int lev, Real /*time*/) amrex::Abort(Utils::TextMsg::Err( "E and B parser for external fields does not work with RZ -- TO DO")); #endif - Store_parserString(pp_warpx, "Ex_external_grid_function(x,y,z)", - str_Ex_ext_grid_function); - Store_parserString(pp_warpx, "Ey_external_grid_function(x,y,z)", - str_Ey_ext_grid_function); - Store_parserString(pp_warpx, "Ez_external_grid_function(x,y,z)", - str_Ez_ext_grid_function); + utils::parser::Store_parserString(pp_warpx, "Ex_external_grid_function(x,y,z)", + str_Ex_ext_grid_function); + utils::parser::Store_parserString(pp_warpx, "Ey_external_grid_function(x,y,z)", + str_Ey_ext_grid_function); + utils::parser::Store_parserString(pp_warpx, "Ez_external_grid_function(x,y,z)", + str_Ez_ext_grid_function); Exfield_parser = std::make_unique( - makeParser(str_Ex_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Ex_ext_grid_function,{"x","y","z"})); Eyfield_parser = std::make_unique( - makeParser(str_Ey_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Ey_ext_grid_function,{"x","y","z"})); Ezfield_parser = std::make_unique( - makeParser(str_Ez_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Ez_ext_grid_function,{"x","y","z"})); // Initialize Efield_fp with external function InitializeExternalFieldsOnGridUsingParser(Efield_fp[lev][0].get(), diff --git a/Source/Laser/LaserProfilesImpl/LaserProfileFieldFunction.cpp b/Source/Laser/LaserProfilesImpl/LaserProfileFieldFunction.cpp index c69221c0d28..bc0b4381df8 100644 --- a/Source/Laser/LaserProfilesImpl/LaserProfileFieldFunction.cpp +++ b/Source/Laser/LaserProfilesImpl/LaserProfileFieldFunction.cpp @@ -6,9 +6,9 @@ */ #include "Laser/LaserProfiles.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpX_Complex.H" -#include "Utils/WarpXUtil.H" #include #include @@ -41,7 +41,7 @@ WarpXLaserProfiles::FieldFunctionLaserProfile::init ( symbols.erase("t"); // after removing variables, we are left with constants for (auto it = symbols.begin(); it != symbols.end(); ) { Real v; - if (queryWithParser(ppc, it->c_str(), v)) { + if (utils::parser::queryWithParser(ppc, it->c_str(), v)) { m_parser.setConstant(*it, v); it = symbols.erase(it); } else { diff --git a/Source/Laser/LaserProfilesImpl/LaserProfileFromTXYEFile.cpp b/Source/Laser/LaserProfilesImpl/LaserProfileFromTXYEFile.cpp index ef795057565..376ec949504 100644 --- a/Source/Laser/LaserProfilesImpl/LaserProfileFromTXYEFile.cpp +++ b/Source/Laser/LaserProfilesImpl/LaserProfileFromTXYEFile.cpp @@ -6,8 +6,10 @@ */ #include "Laser/LaserProfiles.H" +#include "Utils/Algorithms/LinearInterpolation.H" +#include "Utils/Algorithms/UpperBound.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" -#include "Utils/WarpXUtil.H" #include "Utils/WarpX_Complex.H" #include @@ -65,7 +67,7 @@ WarpXLaserProfiles::FromTXYEFileLaserProfile::init ( //Set time_chunk_size m_params.time_chunk_size = m_params.nt; int temp = 1; - if(queryWithParser(ppl ,"time_chunk_size", temp)){ + if(utils::parser::queryWithParser(ppl ,"time_chunk_size", temp)){ m_params.time_chunk_size = min( temp, m_params.time_chunk_size); } @@ -74,7 +76,7 @@ WarpXLaserProfiles::FromTXYEFileLaserProfile::init ( } //Reads the (optional) delay - queryWithParser(ppl, "delay", m_params.t_delay); + utils::parser::queryWithParser(ppl, "delay", m_params.t_delay); //Allocate memory for E_data Vector const int data_size = m_params.time_chunk_size* @@ -402,7 +404,7 @@ WarpXLaserProfiles::FromTXYEFileLaserProfile::internal_fill_amplitude_uniform( (i_interp-tmp_idx_first_time)*tmp_nx*tmp_ny+ j_interp*tmp_ny + k_interp; }; - amplitude[i] = WarpXUtilAlgo::trilinear_interp( + amplitude[i] = utils::algorithms::trilinear_interp( t_left, t_right, x_0, x_1, y_0, y_1, @@ -421,7 +423,7 @@ WarpXLaserProfiles::FromTXYEFileLaserProfile::internal_fill_amplitude_uniform( const auto idx = [=](int i_interp, int j_interp){ return (i_interp-tmp_idx_first_time) * tmp_nx + j_interp; }; - amplitude[i] = WarpXUtilAlgo::bilinear_interp( + amplitude[i] = utils::algorithms::bilinear_interp( t_left, t_right, x_0, x_1, p_E_data[idx(idx_t_left, idx_x_left)], @@ -491,14 +493,14 @@ WarpXLaserProfiles::FromTXYEFileLaserProfile::internal_fill_amplitude_nonuniform #endif //Find indices along x - auto const p_x_right = WarpXUtilAlgo::upper_bound( + auto const p_x_right = utils::algorithms::upper_bound( p_x_coords, p_x_coords+tmp_x_coords_size, Xp[ip]); const int idx_x_right = p_x_right - p_x_coords; const int idx_x_left = idx_x_right - 1; #if (defined(WARPX_DIM_3D) || (defined WARPX_DIM_RZ)) //Find indices along y - auto const p_y_right = WarpXUtilAlgo::upper_bound( + auto const p_y_right = utils::algorithms::upper_bound( p_y_coords, p_y_coords+tmp_y_coords_size, Yp[ip]); const int idx_y_right = p_y_right - p_y_coords; const int idx_y_left = idx_y_right - 1; @@ -509,7 +511,7 @@ WarpXLaserProfiles::FromTXYEFileLaserProfile::internal_fill_amplitude_nonuniform (i-tmp_idx_first_time)*tmp_x_coords_size*tmp_y_coords_size+ j*tmp_y_coords_size + k; }; - amplitude[ip] = WarpXUtilAlgo::trilinear_interp( + amplitude[ip] = utils::algorithms::trilinear_interp( t_left, t_right, p_x_coords[idx_x_left], p_x_coords[idx_x_right], p_y_coords[idx_y_left], p_y_coords[idx_y_right], @@ -528,7 +530,7 @@ WarpXLaserProfiles::FromTXYEFileLaserProfile::internal_fill_amplitude_nonuniform const auto idx = [=](int i, int j){ return (i-tmp_idx_first_time) * tmp_x_coords_size + j; }; - amplitude[ip] = WarpXUtilAlgo::bilinear_interp( + amplitude[ip] = utils::algorithms::bilinear_interp( t_left, t_right, p_x_coords[idx_x_left], p_x_coords[idx_x_right], p_E_data[idx(idx_t_left, idx_x_left)], diff --git a/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp b/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp index 33de9c1dabb..c1dca774a11 100644 --- a/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp +++ b/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp @@ -7,9 +7,9 @@ */ #include "Laser/LaserProfiles.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" -#include "Utils/WarpXUtil.H" #include "Utils/WarpX_Complex.H" #include @@ -39,17 +39,17 @@ WarpXLaserProfiles::GaussianLaserProfile::init ( m_common_params = params; // Parse the properties of the Gaussian profile - getWithParser(ppl, "profile_waist", m_params.waist); - getWithParser(ppl, "profile_duration", m_params.duration); - getWithParser(ppl, "profile_t_peak", m_params.t_peak); - getWithParser(ppl, "profile_focal_distance", m_params.focal_distance); - queryWithParser(ppl, "zeta", m_params.zeta); - queryWithParser(ppl, "beta", m_params.beta); - queryWithParser(ppl, "phi2", m_params.phi2); - queryWithParser(ppl, "phi0", m_params.phi0); + utils::parser::getWithParser(ppl, "profile_waist", m_params.waist); + utils::parser::getWithParser(ppl, "profile_duration", m_params.duration); + utils::parser::getWithParser(ppl, "profile_t_peak", m_params.t_peak); + utils::parser::getWithParser(ppl, "profile_focal_distance", m_params.focal_distance); + utils::parser::queryWithParser(ppl, "zeta", m_params.zeta); + utils::parser::queryWithParser(ppl, "beta", m_params.beta); + utils::parser::queryWithParser(ppl, "phi2", m_params.phi2); + utils::parser::queryWithParser(ppl, "phi0", m_params.phi0); m_params.stc_direction = m_common_params.p_X; - queryArrWithParser(ppl, "stc_direction", m_params.stc_direction); + utils::parser::queryArrWithParser(ppl, "stc_direction", m_params.stc_direction); auto const s = 1.0_rt / std::sqrt( m_params.stc_direction[0]*m_params.stc_direction[0] + m_params.stc_direction[1]*m_params.stc_direction[1] + diff --git a/Source/Laser/LaserProfilesImpl/LaserProfileHarris.cpp b/Source/Laser/LaserProfilesImpl/LaserProfileHarris.cpp index 6ea8070dbd9..2fe1a2341dc 100644 --- a/Source/Laser/LaserProfilesImpl/LaserProfileHarris.cpp +++ b/Source/Laser/LaserProfilesImpl/LaserProfileHarris.cpp @@ -6,8 +6,8 @@ */ #include "Laser/LaserProfiles.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/WarpXConst.H" -#include "Utils/WarpXUtil.H" #include "Utils/WarpX_Complex.H" #include @@ -26,9 +26,12 @@ WarpXLaserProfiles::HarrisLaserProfile::init ( CommonLaserParameters params) { // Parse the properties of the Harris profile - getWithParser(ppl, "profile_waist", m_params.waist); - getWithParser(ppl, "profile_duration", m_params.duration); - getWithParser(ppl, "profile_focal_distance", m_params.focal_distance); + utils::parser::getWithParser( + ppl, "profile_waist", m_params.waist); + utils::parser::getWithParser( + ppl, "profile_duration", m_params.duration); + utils::parser::getWithParser( + ppl, "profile_focal_distance", m_params.focal_distance); //Copy common params m_common_params = params; } diff --git a/Source/Parallelization/GuardCellManager.cpp b/Source/Parallelization/GuardCellManager.cpp index d32d696190e..d01230e4b5e 100644 --- a/Source/Parallelization/GuardCellManager.cpp +++ b/Source/Parallelization/GuardCellManager.cpp @@ -15,10 +15,10 @@ # include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" #endif #include "Filter/NCIGodfreyFilter.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" -#include "Utils/WarpXUtil.H" #include #include @@ -201,9 +201,9 @@ guardCellManager::Init ( int ngFFt_z = (do_nodal || galilean) ? noz_fft : noz_fft / 2; ParmParse pp_psatd("psatd"); - queryWithParser(pp_psatd, "nx_guard", ngFFt_x); - queryWithParser(pp_psatd, "ny_guard", ngFFt_y); - queryWithParser(pp_psatd, "nz_guard", ngFFt_z); + utils::parser::queryWithParser(pp_psatd, "nx_guard", ngFFt_x); + utils::parser::queryWithParser(pp_psatd, "ny_guard", ngFFt_y); + utils::parser::queryWithParser(pp_psatd, "nz_guard", ngFFt_z); #if defined(WARPX_DIM_3D) IntVect ngFFT = IntVect(ngFFt_x, ngFFt_y, ngFFt_z); diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index 373cb281aa5..cee53031c07 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -14,7 +14,6 @@ #endif #include "Filter/BilinearFilter.H" #include "Utils/CoarsenMR.H" -#include "Utils/IntervalsParser.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXProfilerWrapper.H" diff --git a/Source/Particles/Collision/BackgroundMCC/BackgroundMCCCollision.cpp b/Source/Particles/Collision/BackgroundMCC/BackgroundMCCCollision.cpp index 1f45062b781..8bc0d6dbab4 100644 --- a/Source/Particles/Collision/BackgroundMCC/BackgroundMCCCollision.cpp +++ b/Source/Particles/Collision/BackgroundMCC/BackgroundMCCCollision.cpp @@ -5,12 +5,13 @@ * License: BSD-3-Clause-LBNL */ #include "BackgroundMCCCollision.H" + #include "ImpactIonization.H" #include "Particles/ParticleCreation/FilterCopyTransform.H" #include "Particles/ParticleCreation/SmartCopy.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/ParticleUtils.H" -#include "Utils/WarpXUtil.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" @@ -29,36 +30,42 @@ BackgroundMCCCollision::BackgroundMCCCollision (std::string const collision_name amrex::ParmParse pp_collision_name(collision_name); amrex::ParticleReal background_density = 0; - if (queryWithParser(pp_collision_name, "background_density", background_density)) { + if (utils::parser::queryWithParser(pp_collision_name, "background_density", background_density)) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - (background_density > 0), "The background density must be greater than 0." - ); - m_background_density_parser = makeParser(std::to_string(background_density), {"x", "y", "z", "t"}); + (background_density > 0), + "The background density must be greater than 0."); + m_background_density_parser = + utils::parser::makeParser( + std::to_string(background_density), {"x", "y", "z", "t"}); } else { std::string background_density_str; pp_collision_name.get("background_density(x,y,z,t)", background_density_str); - m_background_density_parser = makeParser(background_density_str, {"x", "y", "z", "t"}); + m_background_density_parser = + utils::parser::makeParser(background_density_str, {"x", "y", "z", "t"}); } amrex::ParticleReal background_temperature; - if (queryWithParser(pp_collision_name, "background_temperature", background_temperature)) { + if (utils::parser::queryWithParser(pp_collision_name, "background_temperature", background_temperature)) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( (background_temperature >= 0), "The background temperature must be positive." ); - m_background_temperature_parser = makeParser(std::to_string(background_temperature), {"x", "y", "z", "t"}); + m_background_temperature_parser = + utils::parser::makeParser(std::to_string(background_temperature), {"x", "y", "z", "t"}); } else { std::string background_temperature_str; pp_collision_name.get("background_temperature(x,y,z,t)", background_temperature_str); - m_background_temperature_parser = makeParser(background_temperature_str, {"x", "y", "z", "t"}); + m_background_temperature_parser = + utils::parser::makeParser(background_temperature_str, {"x", "y", "z", "t"}); } // compile parsers for background density and temperature m_background_density_func = m_background_density_parser.compile<4>(); m_background_temperature_func = m_background_temperature_parser.compile<4>(); - queryWithParser(pp_collision_name, "max_background_density", m_max_background_density); + utils::parser::queryWithParser( + pp_collision_name, "max_background_density", m_max_background_density); // if the background density is constant we can use that number to calculate // the maximum collision probability, if `max_background_density` was not // specified @@ -75,7 +82,8 @@ BackgroundMCCCollision::BackgroundMCCCollision (std::string const collision_name // will be used. If no neutral mass is specified and ionization is not // included the mass of the colliding species will be used m_background_mass = -1; - queryWithParser(pp_collision_name, "background_mass", m_background_mass); + utils::parser::queryWithParser( + pp_collision_name, "background_mass", m_background_mass); // query for a list of collision processes // these could be elastic, excitation, charge_exchange, back, etc. @@ -95,7 +103,8 @@ BackgroundMCCCollision::BackgroundMCCCollision (std::string const collision_name if (scattering_process.find("excitation") != std::string::npos || scattering_process.find("ionization") != std::string::npos) { std::string kw_energy = scattering_process + "_energy"; - getWithParser(pp_collision_name, kw_energy.c_str(), energy); + utils::parser::getWithParser( + pp_collision_name, kw_energy.c_str(), energy); } MCCProcess process(scattering_process, cross_section_file, energy); diff --git a/Source/Particles/Collision/BackgroundStopping/BackgroundStopping.cpp b/Source/Particles/Collision/BackgroundStopping/BackgroundStopping.cpp index dbfc1b1d40d..2f438c8d650 100644 --- a/Source/Particles/Collision/BackgroundStopping/BackgroundStopping.cpp +++ b/Source/Particles/Collision/BackgroundStopping/BackgroundStopping.cpp @@ -5,8 +5,9 @@ * License: BSD-3-Clause-LBNL */ #include "BackgroundStopping.H" + +#include "Utils/Parser/ParserUtils.H" #include "Utils/ParticleUtils.H" -#include "Utils/WarpXUtil.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" @@ -35,12 +36,14 @@ BackgroundStopping::BackgroundStopping (std::string const collision_name) amrex::ParticleReal background_density; std::string background_density_str; - if (queryWithParser(pp_collision_name, "background_density", background_density)) { + if (utils::parser::queryWithParser(pp_collision_name, "background_density", background_density)) { AMREX_ALWAYS_ASSERT_WITH_MESSAGE(background_density > 0, "For background stopping, the background density must be greater than 0"); - m_background_density_parser = makeParser(std::to_string(background_density), {"x", "y", "z", "t"}); + m_background_density_parser = + utils::parser::makeParser(std::to_string(background_density), {"x", "y", "z", "t"}); } else if (pp_collision_name.query("background_density(x,y,z,t)", background_density_str)) { - m_background_density_parser = makeParser(background_density_str, {"x", "y", "z", "t"}); + m_background_density_parser = + utils::parser::makeParser(background_density_str, {"x", "y", "z", "t"}); } else { AMREX_ALWAYS_ASSERT_WITH_MESSAGE(false, "For background stopping, the background density must be specified."); @@ -48,12 +51,14 @@ BackgroundStopping::BackgroundStopping (std::string const collision_name) amrex::ParticleReal background_temperature; std::string background_temperature_str; - if (queryWithParser(pp_collision_name, "background_temperature", background_temperature)) { + if (utils::parser::queryWithParser(pp_collision_name, "background_temperature", background_temperature)) { AMREX_ALWAYS_ASSERT_WITH_MESSAGE(background_temperature > 0, "For background stopping, the background temperature must be greater than 0"); - m_background_temperature_parser = makeParser(std::to_string(background_temperature), {"x", "y", "z", "t"}); + m_background_temperature_parser = + utils::parser::makeParser(std::to_string(background_temperature), {"x", "y", "z", "t"}); } else if (pp_collision_name.query("background_temperature(x,y,z,t)", background_temperature_str)) { - m_background_temperature_parser = makeParser(background_temperature_str, {"x", "y", "z", "t"}); + m_background_temperature_parser = + utils::parser::makeParser(background_temperature_str, {"x", "y", "z", "t"}); } else { AMREX_ALWAYS_ASSERT_WITH_MESSAGE(false, "For background stopping, the background temperature must be specified."); @@ -65,10 +70,13 @@ BackgroundStopping::BackgroundStopping (std::string const collision_name) if (m_background_type == BackgroundStoppingType::ELECTRONS) { m_background_mass = PhysConst::m_e; - queryWithParser(pp_collision_name, "background_mass", m_background_mass); + utils::parser::queryWithParser( + pp_collision_name, "background_mass", m_background_mass); } else if (m_background_type == BackgroundStoppingType::IONS) { - getWithParser(pp_collision_name, "background_mass", m_background_mass); - getWithParser(pp_collision_name, "background_charge_state", m_background_charge_state); + utils::parser::getWithParser( + pp_collision_name, "background_mass", m_background_mass); + utils::parser::getWithParser( + pp_collision_name, "background_charge_state", m_background_charge_state); } AMREX_ALWAYS_ASSERT_WITH_MESSAGE(m_background_mass > 0, "For background stopping, the background mass must be greater than 0"); diff --git a/Source/Particles/Collision/BinaryCollision/Coulomb/PairWiseCoulombCollisionFunc.H b/Source/Particles/Collision/BinaryCollision/Coulomb/PairWiseCoulombCollisionFunc.H index 0acdc9af86d..a1b8979e80c 100644 --- a/Source/Particles/Collision/BinaryCollision/Coulomb/PairWiseCoulombCollisionFunc.H +++ b/Source/Particles/Collision/BinaryCollision/Coulomb/PairWiseCoulombCollisionFunc.H @@ -11,7 +11,7 @@ #include "ElasticCollisionPerez.H" #include "Particles/Pusher/GetAndSetPosition.H" #include "Particles/WarpXParticleContainer.H" -#include "Utils/WarpXUtil.H" +#include "Utils/Parser/ParserUtils.H" #include #include @@ -49,7 +49,8 @@ public: amrex::ParmParse pp_collision_name(collision_name); // default Coulomb log, if < 0, will be computed automatically m_CoulombLog = -1.0_prt; - queryWithParser(pp_collision_name, "CoulombLog", m_CoulombLog); + utils::parser::queryWithParser( + pp_collision_name, "CoulombLog", m_CoulombLog); } /** diff --git a/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H b/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H index 391682b469e..25624469d71 100644 --- a/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H +++ b/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H @@ -14,8 +14,8 @@ #include "Particles/Pusher/GetAndSetPosition.H" #include "Particles/MultiParticleContainer.H" #include "Particles/WarpXParticleContainer.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" -#include "Utils/WarpXUtil.H" #include "WarpX.H" #include @@ -94,15 +94,17 @@ public: // default fusion multiplier m_fusion_multiplier = 1.0_prt; - queryWithParser(pp_collision_name, "fusion_multiplier", m_fusion_multiplier); + utils::parser::queryWithParser( + pp_collision_name, "fusion_multiplier", m_fusion_multiplier); // default fusion probability threshold m_probability_threshold = 0.02_prt; - queryWithParser(pp_collision_name, "fusion_probability_threshold", - m_probability_threshold); + utils::parser::queryWithParser( + pp_collision_name, "fusion_probability_threshold", m_probability_threshold); // default fusion probability target_value m_probability_target_value = 0.002_prt; - queryWithParser(pp_collision_name, "fusion_probability_target_value", - m_probability_target_value); + utils::parser::queryWithParser( + pp_collision_name, "fusion_probability_target_value", + m_probability_target_value); } /** diff --git a/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.H b/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.H index 510c59094c9..be77ae9e075 100644 --- a/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.H +++ b/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.H @@ -15,7 +15,6 @@ #include "Particles/ParticleCreation/SmartCopy.H" #include "Particles/MultiParticleContainer.H" #include "Particles/WarpXParticleContainer.H" -#include "Utils/WarpXUtil.H" #include "WarpX.H" #include diff --git a/Source/Particles/Collision/CollisionBase.cpp b/Source/Particles/Collision/CollisionBase.cpp index 948f277c3ff..08376d6fa41 100644 --- a/Source/Particles/Collision/CollisionBase.cpp +++ b/Source/Particles/Collision/CollisionBase.cpp @@ -6,7 +6,7 @@ */ #include "CollisionBase.H" -#include "Utils/WarpXUtil.H" +#include "Utils/Parser/ParserUtils.H" #include @@ -19,6 +19,7 @@ CollisionBase::CollisionBase (std::string collision_name) // number of time steps between collisions m_ndt = 1; - queryWithParser(pp_collision_name, "ndt", m_ndt); + utils::parser::queryWithParser( + pp_collision_name, "ndt", m_ndt); } diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index b56129dc0b7..b810723fe6f 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -13,11 +13,11 @@ #include "Particles/LaserParticleContainer.H" #include "Particles/Pusher/GetAndSetPosition.H" #include "Particles/WarpXParticleContainer.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" #include "Utils/WarpXProfilerWrapper.H" -#include "Utils/WarpXUtil.H" #include @@ -95,9 +95,9 @@ LaserParticleContainer::LaserParticleContainer (AmrCore* amr_core, int ispecies, std::transform(laser_type_s.begin(), laser_type_s.end(), laser_type_s.begin(), ::tolower); // Parse the properties of the antenna - getArrWithParser(pp_laser_name, "position", m_position); - getArrWithParser(pp_laser_name, "direction", m_nvec); - getArrWithParser(pp_laser_name, "polarization", m_p_X); + utils::parser::getArrWithParser(pp_laser_name, "position", m_position); + utils::parser::getArrWithParser(pp_laser_name, "direction", m_nvec); + utils::parser::getArrWithParser(pp_laser_name, "polarization", m_p_X); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_position.size() == 3, m_laser_name + ".position must have three components."); @@ -106,12 +106,14 @@ LaserParticleContainer::LaserParticleContainer (AmrCore* amr_core, int ispecies, WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_p_X.size() == 3, m_laser_name + ".polarization must have three components."); - getWithParser(pp_laser_name, "wavelength", m_wavelength); + utils::parser::getWithParser(pp_laser_name, "wavelength", m_wavelength); AMREX_ALWAYS_ASSERT_WITH_MESSAGE( m_wavelength > 0, "The laser wavelength must be >0."); - const bool e_max_is_specified = queryWithParser(pp_laser_name, "e_max", m_e_max); + const bool e_max_is_specified = + utils::parser::queryWithParser(pp_laser_name, "e_max", m_e_max); Real a0; - const bool a0_is_specified = queryWithParser(pp_laser_name, "a0", a0); + const bool a0_is_specified = + utils::parser::queryWithParser(pp_laser_name, "a0", a0); if (a0_is_specified){ Real omega = 2._rt*MathConst::pi*PhysConst::c/m_wavelength; m_e_max = PhysConst::m_e * omega * PhysConst::c * a0 / PhysConst::q_e; @@ -122,7 +124,8 @@ LaserParticleContainer::LaserParticleContainer (AmrCore* amr_core, int ispecies, ); pp_laser_name.query("do_continuous_injection", do_continuous_injection); - queryWithParser(pp_laser_name, "min_particles_per_mode", m_min_particles_per_mode); + utils::parser::queryWithParser(pp_laser_name, + "min_particles_per_mode", m_min_particles_per_mode); if (m_e_max == amrex::Real(0.)){ ablastr::warn_manager::WMRecordWarning("Laser", @@ -194,10 +197,12 @@ LaserParticleContainer::LaserParticleContainer (AmrCore* amr_core, int ispecies, m_laser_injection_box= Geom(0).ProbDomain(); { Vector lo, hi; - if (queryArrWithParser(pp_laser_name, "prob_lo", lo, 0, AMREX_SPACEDIM)) { + if (utils::parser::queryArrWithParser( + pp_laser_name, "prob_lo", lo, 0, AMREX_SPACEDIM)) { m_laser_injection_box.setLo(lo); } - if (queryArrWithParser(pp_laser_name, "prob_hi", hi, 0, AMREX_SPACEDIM)) { + if (utils::parser::queryArrWithParser( + pp_laser_name, "prob_hi", hi, 0, AMREX_SPACEDIM)) { m_laser_injection_box.setHi(hi); } } diff --git a/Source/Particles/MultiParticleContainer.H b/Source/Particles/MultiParticleContainer.H index e2edc4dd71e..7f355810e78 100644 --- a/Source/Particles/MultiParticleContainer.H +++ b/Source/Particles/MultiParticleContainer.H @@ -22,7 +22,6 @@ #include "PhysicalParticleContainer.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" -#include "Utils/WarpXUtil.H" #include "WarpXParticleContainer.H" #include "ParticleBoundaries.H" diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index 662e0b45f6d..9c7563a3f16 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -32,8 +32,10 @@ #include "Particles/RigidInjectedParticleContainer.H" #include "Particles/WarpXParticleContainer.H" #include "SpeciesPhysicalProperties.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXProfilerWrapper.H" +#include "Utils/WarpXUtil.H" #ifdef AMREX_USE_EB # include "EmbeddedBoundary/ParticleScraper.H" # include "EmbeddedBoundary/ParticleBoundaryProcess.H" @@ -169,13 +171,15 @@ MultiParticleContainer::ReadParameters () // then the values for the external B on particles must // be provided in the input file. if (m_B_ext_particle_s == "constant") - getArrWithParser(pp_particles, "B_external_particle", m_B_external_particle); + utils::parser::getArrWithParser( + pp_particles, "B_external_particle", m_B_external_particle); // if the input string for E_external on particles is "constant" // then the values for the external E on particles must // be provided in the input file. if (m_E_ext_particle_s == "constant") - getArrWithParser(pp_particles, "E_external_particle", m_E_external_particle); + utils::parser::getArrWithParser( + pp_particles, "E_external_particle", m_E_external_particle); // if the input string for B_ext_particle_s is // "parse_b_ext_particle_function" then the mathematical expression @@ -186,20 +190,23 @@ MultiParticleContainer::ReadParameters () std::string str_Bx_ext_particle_function; std::string str_By_ext_particle_function; std::string str_Bz_ext_particle_function; - Store_parserString(pp_particles, "Bx_external_particle_function(x,y,z,t)", - str_Bx_ext_particle_function); - Store_parserString(pp_particles, "By_external_particle_function(x,y,z,t)", - str_By_ext_particle_function); - Store_parserString(pp_particles, "Bz_external_particle_function(x,y,z,t)", - str_Bz_ext_particle_function); + utils::parser::Store_parserString( + pp_particles, "Bx_external_particle_function(x,y,z,t)", + str_Bx_ext_particle_function); + utils::parser::Store_parserString( + pp_particles, "By_external_particle_function(x,y,z,t)", + str_By_ext_particle_function); + utils::parser::Store_parserString( + pp_particles, "Bz_external_particle_function(x,y,z,t)", + str_Bz_ext_particle_function); // Parser for B_external on the particle m_Bx_particle_parser = std::make_unique( - makeParser(str_Bx_ext_particle_function,{"x","y","z","t"})); + utils::parser::makeParser(str_Bx_ext_particle_function,{"x","y","z","t"})); m_By_particle_parser = std::make_unique( - makeParser(str_By_ext_particle_function,{"x","y","z","t"})); + utils::parser::makeParser(str_By_ext_particle_function,{"x","y","z","t"})); m_Bz_particle_parser = std::make_unique( - makeParser(str_Bz_ext_particle_function,{"x","y","z","t"})); + utils::parser::makeParser(str_Bz_ext_particle_function,{"x","y","z","t"})); } @@ -212,19 +219,22 @@ MultiParticleContainer::ReadParameters () std::string str_Ex_ext_particle_function; std::string str_Ey_ext_particle_function; std::string str_Ez_ext_particle_function; - Store_parserString(pp_particles, "Ex_external_particle_function(x,y,z,t)", - str_Ex_ext_particle_function); - Store_parserString(pp_particles, "Ey_external_particle_function(x,y,z,t)", - str_Ey_ext_particle_function); - Store_parserString(pp_particles, "Ez_external_particle_function(x,y,z,t)", - str_Ez_ext_particle_function); + utils::parser::Store_parserString( + pp_particles, "Ex_external_particle_function(x,y,z,t)", + str_Ex_ext_particle_function); + utils::parser::Store_parserString( + pp_particles, "Ey_external_particle_function(x,y,z,t)", + str_Ey_ext_particle_function); + utils::parser::Store_parserString( + pp_particles, "Ez_external_particle_function(x,y,z,t)", + str_Ez_ext_particle_function); // Parser for E_external on the particle m_Ex_particle_parser = std::make_unique( - makeParser(str_Ex_ext_particle_function,{"x","y","z","t"})); + utils::parser::makeParser(str_Ex_ext_particle_function,{"x","y","z","t"})); m_Ey_particle_parser = std::make_unique( - makeParser(str_Ey_ext_particle_function,{"x","y","z","t"})); + utils::parser::makeParser(str_Ey_ext_particle_function,{"x","y","z","t"})); m_Ez_particle_parser = std::make_unique( - makeParser(str_Ez_ext_particle_function,{"x","y","z","t"})); + utils::parser::makeParser(str_Ez_ext_particle_function,{"x","y","z","t"})); } @@ -233,11 +243,17 @@ MultiParticleContainer::ReadParameters () // must be provided in the input file. if (m_E_ext_particle_s == "repeated_plasma_lens" || m_B_ext_particle_s == "repeated_plasma_lens") { - getWithParser(pp_particles, "repeated_plasma_lens_period", m_repeated_plasma_lens_period); + utils::parser::getWithParser( + pp_particles, "repeated_plasma_lens_period", + m_repeated_plasma_lens_period); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_repeated_plasma_lens_period > 0._rt, "The period of the repeated plasma lens must be greater than zero"); - getArrWithParser(pp_particles, "repeated_plasma_lens_starts", h_repeated_plasma_lens_starts); - getArrWithParser(pp_particles, "repeated_plasma_lens_lengths", h_repeated_plasma_lens_lengths); + utils::parser::getArrWithParser( + pp_particles, "repeated_plasma_lens_starts", + h_repeated_plasma_lens_starts); + utils::parser::getArrWithParser( + pp_particles, "repeated_plasma_lens_lengths", + h_repeated_plasma_lens_lengths); int n_lenses = static_cast(h_repeated_plasma_lens_starts.size()); d_repeated_plasma_lens_starts.resize(n_lenses); @@ -253,10 +269,14 @@ MultiParticleContainer::ReadParameters () h_repeated_plasma_lens_strengths_B.resize(n_lenses); if (m_E_ext_particle_s == "repeated_plasma_lens") { - getArrWithParser(pp_particles, "repeated_plasma_lens_strengths_E", h_repeated_plasma_lens_strengths_E); + utils::parser::getArrWithParser( + pp_particles, "repeated_plasma_lens_strengths_E", + h_repeated_plasma_lens_strengths_E); } if (m_B_ext_particle_s == "repeated_plasma_lens") { - getArrWithParser(pp_particles, "repeated_plasma_lens_strengths_B", h_repeated_plasma_lens_strengths_B); + utils::parser::getArrWithParser( + pp_particles, "repeated_plasma_lens_strengths_B", + h_repeated_plasma_lens_strengths_B); } d_repeated_plasma_lens_strengths_E.resize(n_lenses); @@ -373,18 +393,26 @@ MultiParticleContainer::ReadParameters () pp_qed_schwinger.get("ele_product_species", m_qed_schwinger_ele_product_name); pp_qed_schwinger.get("pos_product_species", m_qed_schwinger_pos_product_name); #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - getWithParser(pp_qed_schwinger, "y_size",m_qed_schwinger_y_size); + utils::parser::getWithParser( + pp_qed_schwinger, "y_size",m_qed_schwinger_y_size); #endif - queryWithParser(pp_qed_schwinger, "threshold_poisson_gaussian", - m_qed_schwinger_threshold_poisson_gaussian); - queryWithParser(pp_qed_schwinger, "xmin", m_qed_schwinger_xmin); - queryWithParser(pp_qed_schwinger, "xmax", m_qed_schwinger_xmax); + utils::parser::queryWithParser( + pp_qed_schwinger, "threshold_poisson_gaussian", + m_qed_schwinger_threshold_poisson_gaussian); + utils::parser::queryWithParser( + pp_qed_schwinger, "xmin", m_qed_schwinger_xmin); + utils::parser::queryWithParser( + pp_qed_schwinger, "xmax", m_qed_schwinger_xmax); #if defined(WARPX_DIM_3D) - queryWithParser(pp_qed_schwinger, "ymin", m_qed_schwinger_ymin); - queryWithParser(pp_qed_schwinger, "ymax", m_qed_schwinger_ymax); + utils::parser::queryWithParser( + pp_qed_schwinger, "ymin", m_qed_schwinger_ymin); + utils::parser::queryWithParser( + pp_qed_schwinger, "ymax", m_qed_schwinger_ymax); #endif - queryWithParser(pp_qed_schwinger, "zmin", m_qed_schwinger_zmin); - queryWithParser(pp_qed_schwinger, "zmax", m_qed_schwinger_zmax); + utils::parser::queryWithParser( + pp_qed_schwinger, "zmin", m_qed_schwinger_zmin); + utils::parser::queryWithParser( + pp_qed_schwinger, "zmax", m_qed_schwinger_zmax); } #endif initialized = true; @@ -1055,7 +1083,8 @@ void MultiParticleContainer::InitQuantumSync () //If specified, use a user-defined energy threshold for photon creation ParticleReal temp; constexpr auto mec2 = PhysConst::c * PhysConst::c * PhysConst::m_e; - if(queryWithParser(pp_qed_qs, "photon_creation_energy_threshold", temp)){ + if(utils::parser::queryWithParser( + pp_qed_qs, "photon_creation_energy_threshold", temp)){ temp *= mec2; m_quantum_sync_photon_creation_energy_threshold = temp; } @@ -1069,7 +1098,7 @@ void MultiParticleContainer::InitQuantumSync () // considered for Synchrotron emission. If a lepton has chi < chi_min, // the optical depth is not evolved and photon generation is ignored amrex::Real qs_minimum_chi_part; - getWithParser(pp_qed_qs, "chi_min", qs_minimum_chi_part); + utils::parser::getWithParser(pp_qed_qs, "chi_min", qs_minimum_chi_part); pp_qed_qs.query("lookup_table_mode", lookup_table_mode); @@ -1127,7 +1156,7 @@ void MultiParticleContainer::InitBreitWheeler () // considered for pair production. If a photon has chi < chi_min, // the optical depth is not evolved and photon generation is ignored amrex::Real bw_minimum_chi_part; - if(!queryWithParser(pp_qed_bw, "chi_min", bw_minimum_chi_part)) + if(!utils::parser::queryWithParser(pp_qed_bw, "chi_min", bw_minimum_chi_part)) amrex::Abort("qed_bw.chi_min should be provided!"); pp_qed_bw.query("lookup_table_mode", lookup_table_mode); @@ -1189,7 +1218,7 @@ MultiParticleContainer::QuantumSyncGenerateTable () // considered for Synchrotron emission. If a lepton has chi < chi_min, // the optical depth is not evolved and photon generation is ignored amrex::Real qs_minimum_chi_part; - getWithParser(pp_qed_qs, "chi_min", qs_minimum_chi_part); + utils::parser::getWithParser(pp_qed_qs, "chi_min", qs_minimum_chi_part); if(ParallelDescriptor::IOProcessor()){ PicsarQuantumSyncCtrl ctrl; @@ -1202,14 +1231,17 @@ MultiParticleContainer::QuantumSyncGenerateTable () //Minimun chi for the table. If a lepton has chi < tab_dndt_chi_min, //chi is considered as if it were equal to tab_dndt_chi_min - getWithParser(pp_qed_qs, "tab_dndt_chi_min", ctrl.dndt_params.chi_part_min); + utils::parser::getWithParser( + pp_qed_qs, "tab_dndt_chi_min", ctrl.dndt_params.chi_part_min); //Maximum chi for the table. If a lepton has chi > tab_dndt_chi_max, //chi is considered as if it were equal to tab_dndt_chi_max - getWithParser(pp_qed_qs, "tab_dndt_chi_max", ctrl.dndt_params.chi_part_max); + utils::parser::getWithParser( + pp_qed_qs, "tab_dndt_chi_max", ctrl.dndt_params.chi_part_max); //How many points should be used for chi in the table - getWithParser(pp_qed_qs, "tab_dndt_how_many", ctrl.dndt_params.chi_part_how_many); + utils::parser::getWithParser( + pp_qed_qs, "tab_dndt_how_many", ctrl.dndt_params.chi_part_how_many); //------ //--- sub-table 2 (2D) @@ -1219,23 +1251,28 @@ MultiParticleContainer::QuantumSyncGenerateTable () //Minimun chi for the table. If a lepton has chi < tab_em_chi_min, //chi is considered as if it were equal to tab_em_chi_min - getWithParser(pp_qed_qs, "tab_em_chi_min", ctrl.phot_em_params.chi_part_min); + utils::parser::getWithParser( + pp_qed_qs, "tab_em_chi_min", ctrl.phot_em_params.chi_part_min); //Maximum chi for the table. If a lepton has chi > tab_em_chi_max, //chi is considered as if it were equal to tab_em_chi_max - getWithParser(pp_qed_qs, "tab_em_chi_max", ctrl.phot_em_params.chi_part_max); + utils::parser::getWithParser( + pp_qed_qs, "tab_em_chi_max", ctrl.phot_em_params.chi_part_max); //How many points should be used for chi in the table - getWithParser(pp_qed_qs, "tab_em_chi_how_many", ctrl.phot_em_params.chi_part_how_many); + utils::parser::getWithParser( + pp_qed_qs, "tab_em_chi_how_many", ctrl.phot_em_params.chi_part_how_many); //The other axis of the table is the ratio between the quantum //parameter of the emitted photon and the quantum parameter of the //lepton. This parameter is the minimum ratio to consider for the table. - getWithParser(pp_qed_qs, "tab_em_frac_min", ctrl.phot_em_params.frac_min); + utils::parser::getWithParser( + pp_qed_qs, "tab_em_frac_min", ctrl.phot_em_params.frac_min); //This parameter is the number of different points to consider for the second //axis - getWithParser(pp_qed_qs, "tab_em_frac_how_many", ctrl.phot_em_params.frac_how_many); + utils::parser::getWithParser( + pp_qed_qs, "tab_em_frac_how_many", ctrl.phot_em_params.frac_how_many); //==================== m_shr_p_qs_engine->compute_lookup_tables(ctrl, qs_minimum_chi_part); @@ -1270,7 +1307,7 @@ MultiParticleContainer::BreitWheelerGenerateTable () // considered for pair production. If a photon has chi < chi_min, // the optical depth is not evolved and photon generation is ignored amrex::Real bw_minimum_chi_part; - getWithParser(pp_qed_bw, "chi_min", bw_minimum_chi_part); + utils::parser::getWithParser(pp_qed_bw, "chi_min", bw_minimum_chi_part); if(ParallelDescriptor::IOProcessor()){ PicsarBreitWheelerCtrl ctrl; @@ -1283,14 +1320,17 @@ MultiParticleContainer::BreitWheelerGenerateTable () //Minimun chi for the table. If a photon has chi < tab_dndt_chi_min, //an analytical approximation is used. - getWithParser(pp_qed_bw, "tab_dndt_chi_min", ctrl.dndt_params.chi_phot_min); + utils::parser::getWithParser( + pp_qed_bw, "tab_dndt_chi_min", ctrl.dndt_params.chi_phot_min); //Maximum chi for the table. If a photon has chi > tab_dndt_chi_max, //an analytical approximation is used. - getWithParser(pp_qed_bw, "tab_dndt_chi_max", ctrl.dndt_params.chi_phot_max); + utils::parser::getWithParser( + pp_qed_bw, "tab_dndt_chi_max", ctrl.dndt_params.chi_phot_max); //How many points should be used for chi in the table - getWithParser(pp_qed_bw, "tab_dndt_how_many", ctrl.dndt_params.chi_phot_how_many); + utils::parser::getWithParser( + pp_qed_bw, "tab_dndt_how_many", ctrl.dndt_params.chi_phot_how_many); //------ //--- sub-table 2 (2D) @@ -1300,19 +1340,23 @@ MultiParticleContainer::BreitWheelerGenerateTable () //Minimun chi for the table. If a photon has chi < tab_pair_chi_min //chi is considered as it were equal to chi_phot_tpair_min - getWithParser(pp_qed_bw, "tab_pair_chi_min", ctrl.pair_prod_params.chi_phot_min); + utils::parser::getWithParser( + pp_qed_bw, "tab_pair_chi_min", ctrl.pair_prod_params.chi_phot_min); //Maximum chi for the table. If a photon has chi > tab_pair_chi_max //chi is considered as it were equal to chi_phot_tpair_max - getWithParser(pp_qed_bw, "tab_pair_chi_max", ctrl.pair_prod_params.chi_phot_max); + utils::parser::getWithParser( + pp_qed_bw, "tab_pair_chi_max", ctrl.pair_prod_params.chi_phot_max); //How many points should be used for chi in the table - getWithParser(pp_qed_bw, "tab_pair_chi_how_many", ctrl.pair_prod_params.chi_phot_how_many); + utils::parser::getWithParser( + pp_qed_bw, "tab_pair_chi_how_many", ctrl.pair_prod_params.chi_phot_how_many); //The other axis of the table is the fraction of the initial energy //'taken away' by the most energetic particle of the pair. //This parameter is the number of different fractions to consider - getWithParser(pp_qed_bw, "tab_pair_frac_how_many", ctrl.pair_prod_params.frac_how_many); + utils::parser::getWithParser( + pp_qed_bw, "tab_pair_frac_how_many", ctrl.pair_prod_params.frac_how_many); //==================== m_shr_p_bw_engine->compute_lookup_tables(ctrl, bw_minimum_chi_part); diff --git a/Source/Particles/ParticleBoundaries.H b/Source/Particles/ParticleBoundaries.H index 00b6368a5ea..1f4fb0372eb 100644 --- a/Source/Particles/ParticleBoundaries.H +++ b/Source/Particles/ParticleBoundaries.H @@ -8,7 +8,6 @@ #define PARTICLEBOUNDARIES_H_ #include "Utils/WarpXAlgorithmSelection.H" -#include "Utils/WarpXUtil.H" #include #include diff --git a/Source/Particles/ParticleBoundaries.cpp b/Source/Particles/ParticleBoundaries.cpp index 2bdbd0d1405..a6e80717e81 100644 --- a/Source/Particles/ParticleBoundaries.cpp +++ b/Source/Particles/ParticleBoundaries.cpp @@ -7,6 +7,8 @@ #include "ParticleBoundaries.H" +#include "Utils/Parser/ParserUtils.H" + ParticleBoundaries::ParticleBoundaries () noexcept { SetAll(ParticleBoundaryType::Absorbing); @@ -64,18 +66,24 @@ ParticleBoundaries::CheckAll (ParticleBoundaryType bc) void ParticleBoundaries::BuildReflectionModelParsers () { - reflection_model_xlo_parser = std::make_unique(makeParser(reflection_model_xlo_str, {"v"})); + reflection_model_xlo_parser = std::make_unique( + utils::parser::makeParser(reflection_model_xlo_str, {"v"})); data.reflection_model_xlo = reflection_model_xlo_parser->compile<1>(); - reflection_model_xhi_parser = std::make_unique(makeParser(reflection_model_xhi_str, {"v"})); + reflection_model_xhi_parser = std::make_unique( + utils::parser::makeParser(reflection_model_xhi_str, {"v"})); data.reflection_model_xhi = reflection_model_xhi_parser->compile<1>(); #ifdef WARPX_DIM_3D - reflection_model_ylo_parser = std::make_unique(makeParser(reflection_model_ylo_str, {"v"})); + reflection_model_ylo_parser = std::make_unique( + utils::parser::makeParser(reflection_model_ylo_str, {"v"})); data.reflection_model_ylo = reflection_model_ylo_parser->compile<1>(); - reflection_model_yhi_parser = std::make_unique(makeParser(reflection_model_yhi_str, {"v"})); + reflection_model_yhi_parser = std::make_unique( + utils::parser::makeParser(reflection_model_yhi_str, {"v"})); data.reflection_model_yhi = reflection_model_yhi_parser->compile<1>(); #endif - reflection_model_zlo_parser = std::make_unique(makeParser(reflection_model_zlo_str, {"v"})); + reflection_model_zlo_parser = std::make_unique( + utils::parser::makeParser(reflection_model_zlo_str, {"v"})); data.reflection_model_zlo = reflection_model_zlo_parser->compile<1>(); - reflection_model_zhi_parser = std::make_unique(makeParser(reflection_model_zhi_str, {"v"})); + reflection_model_zhi_parser = std::make_unique( + utils::parser::makeParser(reflection_model_zhi_str, {"v"})); data.reflection_model_zhi = reflection_model_zhi_parser->compile<1>(); } diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 97f890f33d2..fa9b0c6472e 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -31,12 +31,12 @@ #include "Particles/Pusher/UpdatePosition.H" #include "Particles/SpeciesPhysicalProperties.H" #include "Particles/WarpXParticleContainer.H" -#include "Utils/IonizationEnergiesTable.H" +#include "Utils/Parser/ParserUtils.H" +#include "Utils/Physics/IonizationEnergiesTable.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" #include "Utils/WarpXProfilerWrapper.H" -#include "Utils/WarpXUtil.H" #include "WarpX.H" #include @@ -257,9 +257,12 @@ PhysicalParticleContainer::PhysicalParticleContainer (AmrCore* amr_core, int isp pp_species_name.query("do_continuous_injection", do_continuous_injection); pp_species_name.query("initialize_self_fields", initialize_self_fields); - queryWithParser(pp_species_name, "self_fields_required_precision", self_fields_required_precision); - queryWithParser(pp_species_name, "self_fields_absolute_tolerance", self_fields_absolute_tolerance); - queryWithParser(pp_species_name, "self_fields_max_iters", self_fields_max_iters); + utils::parser::queryWithParser( + pp_species_name, "self_fields_required_precision", self_fields_required_precision); + utils::parser::queryWithParser( + pp_species_name, "self_fields_absolute_tolerance", self_fields_absolute_tolerance); + utils::parser::queryWithParser( + pp_species_name, "self_fields_max_iters", self_fields_max_iters); pp_species_name.query("self_fields_verbosity", self_fields_verbosity); // Whether to plot back-transformed (lab-frame) diagnostics // for this species. @@ -310,9 +313,11 @@ PhysicalParticleContainer::PhysicalParticleContainer (AmrCore* amr_core, int isp str_int_attrib_function.resize(n_user_int_attribs); m_user_int_attrib_parser.resize(n_user_int_attribs); for (int i = 0; i < n_user_int_attribs; ++i) { - Store_parserString(pp_species_name, "attribute."+m_user_int_attribs.at(i)+"(x,y,z,ux,uy,uz,t)", str_int_attrib_function.at(i)); + utils::parser::Store_parserString( + pp_species_name, "attribute."+m_user_int_attribs.at(i)+"(x,y,z,ux,uy,uz,t)", + str_int_attrib_function.at(i)); m_user_int_attrib_parser.at(i) = std::make_unique( - makeParser(str_int_attrib_function.at(i),{"x","y","z","ux","uy","uz","t"})); + utils::parser::makeParser(str_int_attrib_function.at(i),{"x","y","z","ux","uy","uz","t"})); AddIntComp(m_user_int_attribs.at(i)); } @@ -323,9 +328,11 @@ PhysicalParticleContainer::PhysicalParticleContainer (AmrCore* amr_core, int isp str_real_attrib_function.resize(n_user_real_attribs); m_user_real_attrib_parser.resize(n_user_real_attribs); for (int i = 0; i < n_user_real_attribs; ++i) { - Store_parserString(pp_species_name, "attribute."+m_user_real_attribs.at(i)+"(x,y,z,ux,uy,uz,t)", str_real_attrib_function.at(i)); + utils::parser::Store_parserString( + pp_species_name, "attribute."+m_user_real_attribs.at(i)+"(x,y,z,ux,uy,uz,t)", + str_real_attrib_function.at(i)); m_user_real_attrib_parser.at(i) = std::make_unique( - makeParser(str_real_attrib_function.at(i),{"x","y","z","ux","uy","uz","t"})); + utils::parser::makeParser(str_real_attrib_function.at(i),{"x","y","z","ux","uy","uz","t"})); AddRealComp(m_user_real_attribs.at(i)); } @@ -2873,18 +2880,20 @@ PhysicalParticleContainer::InitIonizationModule () "overriding user value and setting charge = q_e."); charge = PhysConst::q_e; } - queryWithParser(pp_species_name, "ionization_initial_level", ionization_initial_level); + utils::parser::queryWithParser( + pp_species_name, "ionization_initial_level", ionization_initial_level); pp_species_name.get("ionization_product_species", ionization_product_name); pp_species_name.get("physical_element", physical_element); // Add runtime integer component for ionization level AddIntComp("ionizationLevel"); // Get atomic number and ionization energies from file - int const ion_element_id = ion_map_ids.at(physical_element); - ion_atomic_number = ion_atomic_numbers[ion_element_id]; + const int ion_element_id = utils::physics::ion_map_ids.at(physical_element); + ion_atomic_number = utils::physics::ion_atomic_numbers[ion_element_id]; Vector h_ionization_energies(ion_atomic_number); - int offset = ion_energy_offsets[ion_element_id]; + const int offset = utils::physics::ion_energy_offsets[ion_element_id]; for(int i=0; i @@ -34,7 +34,8 @@ LevelingThinning::LevelingThinning (const std::string species_name) using namespace amrex::literals; amrex::ParmParse pp_species_name(species_name); - queryWithParser(pp_species_name, "resampling_algorithm_target_ratio", m_target_ratio); + utils::parser::queryWithParser( + pp_species_name, "resampling_algorithm_target_ratio", m_target_ratio); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( m_target_ratio > 0._rt, "Resampling target ratio should be strictly greater than 0"); if (m_target_ratio <= 1._rt) @@ -45,7 +46,8 @@ LevelingThinning::LevelingThinning (const std::string species_name) "It is possible that no particle will be removed during resampling"); } - queryWithParser(pp_species_name, "resampling_algorithm_min_ppc", m_min_ppc); + utils::parser::queryWithParser( + pp_species_name, "resampling_algorithm_min_ppc", m_min_ppc); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_min_ppc >= 1, "Resampling min_ppc should be greater than or equal to 1"); } diff --git a/Source/Particles/Resampling/ResamplingTrigger.H b/Source/Particles/Resampling/ResamplingTrigger.H index 03850cf510f..ce89adab89c 100644 --- a/Source/Particles/Resampling/ResamplingTrigger.H +++ b/Source/Particles/Resampling/ResamplingTrigger.H @@ -7,7 +7,7 @@ #ifndef WARPX_RESAMPLING_TRIGGER_H_ #define WARPX_RESAMPLING_TRIGGER_H_ -#include "Utils/IntervalsParser.H" +#include "Utils/Parser/IntervalsParser.H" #include @@ -54,7 +54,7 @@ public: private: // Intervals that define predetermined timesteps at which resampling is performed for all // species. - IntervalsParser m_resampling_intervals; + utils::parser::IntervalsParser m_resampling_intervals; // Average number of particles per cell above which resampling is performed for a given species amrex::Real m_max_avg_ppc = std::numeric_limits::max(); diff --git a/Source/Particles/Resampling/ResamplingTrigger.cpp b/Source/Particles/Resampling/ResamplingTrigger.cpp index 220b65f84ce..53883f1e963 100644 --- a/Source/Particles/Resampling/ResamplingTrigger.cpp +++ b/Source/Particles/Resampling/ResamplingTrigger.cpp @@ -6,7 +6,7 @@ */ #include "ResamplingTrigger.H" -#include "Utils/WarpXUtil.H" +#include "Utils/Parser/ParserUtils.H" #include "WarpX.H" #include @@ -20,9 +20,10 @@ ResamplingTrigger::ResamplingTrigger (const std::string species_name) std::vector resampling_trigger_int_string_vec = {"0"}; pp_species_name.queryarr("resampling_trigger_intervals", resampling_trigger_int_string_vec); - m_resampling_intervals = IntervalsParser(resampling_trigger_int_string_vec); + m_resampling_intervals = utils::parser::IntervalsParser(resampling_trigger_int_string_vec); - queryWithParser(pp_species_name, "resampling_trigger_max_avg_ppc", m_max_avg_ppc); + utils::parser::queryWithParser( + pp_species_name, "resampling_trigger_max_avg_ppc", m_max_avg_ppc); } bool ResamplingTrigger::triggered (const int timestep, const amrex::Real global_numparts) const diff --git a/Source/Particles/RigidInjectedParticleContainer.cpp b/Source/Particles/RigidInjectedParticleContainer.cpp index 489f286b577..cc064538b97 100644 --- a/Source/Particles/RigidInjectedParticleContainer.cpp +++ b/Source/Particles/RigidInjectedParticleContainer.cpp @@ -18,10 +18,10 @@ #include "Pusher/UpdateMomentumHigueraCary.H" #include "Pusher/UpdateMomentumVay.H" #include "RigidInjectedParticleContainer.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" #include "Utils/WarpXProfilerWrapper.H" -#include "Utils/WarpXUtil.H" #include "WarpX.H" #include @@ -63,7 +63,8 @@ RigidInjectedParticleContainer::RigidInjectedParticleContainer (AmrCore* amr_cor ParmParse pp_species_name(species_name); - getWithParser(pp_species_name, "zinject_plane", zinject_plane); + utils::parser::getWithParser( + pp_species_name, "zinject_plane", zinject_plane); pp_species_name.query("rigid_advance", rigid_advance); } diff --git a/Source/Python/WarpXWrappers.cpp b/Source/Python/WarpXWrappers.cpp index 2df989bba81..61909cff738 100644 --- a/Source/Python/WarpXWrappers.cpp +++ b/Source/Python/WarpXWrappers.cpp @@ -11,8 +11,8 @@ #include "Particles/MultiParticleContainer.H" #include "Particles/ParticleBoundaryBuffer.H" #include "Particles/WarpXParticleContainer.H" -#include "Utils/WarpXUtil.H" #include "Utils/WarpXProfilerWrapper.H" +#include "Utils/WarpXUtil.H" #include "WarpX.H" #include "WarpXWrappers.H" #include "WarpX_py.H" diff --git a/Source/Utils/Algorithms/IsIn.H b/Source/Utils/Algorithms/IsIn.H new file mode 100644 index 00000000000..c9d2f477ef8 --- /dev/null +++ b/Source/Utils/Algorithms/IsIn.H @@ -0,0 +1,58 @@ +/* Copyright 2022 Andrew Myers, Luca Fedeli, Maxence Thevenet + * Revathi Jambunathan + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_UTILS_ALGORITHMS_ISIN_H_ +#define WARPX_UTILS_ALGORITHMS_ISIN_H_ + +#include +#include + +namespace utils::algorithms +{ + /** \brief Returns true if an item of type TE is in a vector + * of TV objects (provided that TE can be converted into TV), false otherwise + * + * @tparam TV the typename of the vector elements + * @tparam TE the typename of the item + * + * @param vect a vector of TV objects + * @param elem an object of type TE + * + * @return true if elem is in vect, false otherwise + */ + template ::value>::type> + bool is_in(const std::vector& vect, + const TE& elem) + { + return (std::find(vect.begin(), vect.end(), elem) != vect.end()); + } + + + /** \brief Returns true if any of the items of a vector is contained + * in another vector (provided that TE can be converted into TV) + * + * @tparam TV the typename of the first vector elements + * @tparam TV the typename of the second vector elements + * + * @param vect a vector of TV objects + * @param elems a vector of TE objects + * + * @return true if any element of elems is in vect, false otherwise + */ + template ::value>::type> + bool any_of_is_in(const std::vector& vect, + const std::vector& elems) + { + return std::any_of(elems.begin(), elems.end(), + [&](const auto elem){return is_in(vect, elem);}); + } +} + +#endif //WARPX_UTILS_ALGORITHMS_ISIN_H_ diff --git a/Source/Utils/Algorithms/LinearInterpolation.H b/Source/Utils/Algorithms/LinearInterpolation.H new file mode 100644 index 00000000000..32fdf7a6e81 --- /dev/null +++ b/Source/Utils/Algorithms/LinearInterpolation.H @@ -0,0 +1,59 @@ +/* Copyright 2022 Luca Fedeli + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_UTILS_ALGORITHMS_LINEAR_INTERPOLATION_H_ +#define WARPX_UTILS_ALGORITHMS_LINEAR_INTERPOLATION_H_ + +#include +#include + +namespace utils::algorithms +{ + /** \brief Performs a linear interpolation + * + * Performs a linear interpolation at x given the 2 points + * (x0, f0) and (x1, f1) + */ + template AMREX_GPU_DEVICE AMREX_FORCE_INLINE + T linear_interp(T x0, T x1, T f0, T f1, T x) + { + return ((x1-x)*f0 + (x-x0)*f1)/(x1-x0); + } + + /** \brief Performs a bilinear interpolation + * + * Performs a bilinear interpolation at (x,y) given the 4 points + * (x0, y0, f00), (x0, y1, f01), (x1, y0, f10), (x1, y1, f11). + */ + template AMREX_GPU_DEVICE AMREX_FORCE_INLINE + T bilinear_interp(T x0, T x1, T y0, T y1, T f00, T f01, T f10, T f11, T x, T y) + { + const T fx0 = linear_interp(x0, x1, f00, f10, x); + const T fx1 = linear_interp(x0, x1, f01, f11, x); + return linear_interp(y0, y1, fx0, fx1, y); + } + + /** \brief Performs a trilinear interpolation + * + * Performs a trilinear interpolation at (x,y,z) given the 8 points + * (x0, y0, z0, f000), (x0, y0, z1, f001), (x0, y1, z0, f010), (x0, y1, z1, f011), + * (x1, y0, z0, f100), (x1, y0, z1, f101), (x1, y1, z0, f110), (x1, y1, z1, f111) + */ + template AMREX_GPU_DEVICE AMREX_FORCE_INLINE + T trilinear_interp(T x0, T x1,T y0, T y1, T z0, T z1, + T f000, T f001, T f010, T f011, T f100, T f101, T f110, T f111, + T x, T y, T z) + { + const T fxy0 = bilinear_interp( + x0, x1, y0, y1, f000, f010, f100, f110, x, y); + const T fxy1 = bilinear_interp( + x0, x1, y0, y1, f001, f011, f101, f111, x, y); + return linear_interp(z0, z1, fxy0, fxy1, z); + } +} + +#endif //WARPX_UTILS_ALGORITHMS_LINEAR_INTERPOLATION_H_ diff --git a/Source/Utils/Algorithms/Make.package b/Source/Utils/Algorithms/Make.package new file mode 100644 index 00000000000..04638494d51 --- /dev/null +++ b/Source/Utils/Algorithms/Make.package @@ -0,0 +1 @@ +VPATH_LOCATIONS += $(WARPX_HOME)/Source/Utils/Algorithms diff --git a/Source/Utils/Algorithms/UpperBound.H b/Source/Utils/Algorithms/UpperBound.H new file mode 100644 index 00000000000..8a528971a7d --- /dev/null +++ b/Source/Utils/Algorithms/UpperBound.H @@ -0,0 +1,49 @@ +/* Copyright 2022 Luca Fedeli + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_UTILS_ALGORITHMS_UPPER_BOUND_H_ +#define WARPX_UTILS_ALGORITHMS_UPPER_BOUND_H_ + +#include +#include + +namespace utils::algorithms +{ + + /** \brief Returns a pointer to the first element in the range [first, last) that is greater than val + * + * A re-implementation of the upper_bound algorithm suitable for GPU kernels. + * + * @param first: pointer to left limit of the range to consider + * @param last: pointer to right limit of the range to consider + * @param val: value to compare the elements of [first, last) to + */ + template AMREX_GPU_DEVICE AMREX_FORCE_INLINE + const T* upper_bound(const T* first, const T* last, const T& val) + { + const T* it; + size_t count, step; + count = last-first; + while(count>0){ + it = first; + step = count/2; + it += step; + if (!(val<*it)){ + first = ++it; + count -= step + 1; + } + else{ + count = step; + } + } + + return first; + } + +} + +#endif //WARPX_UTILS_ALGORITHMS_UPPER_BOUND_H_ diff --git a/Source/Utils/CMakeLists.txt b/Source/Utils/CMakeLists.txt index 9253f3d0d7f..19fe6bc3460 100644 --- a/Source/Utils/CMakeLists.txt +++ b/Source/Utils/CMakeLists.txt @@ -3,7 +3,6 @@ target_sources(WarpX CoarsenIO.cpp CoarsenMR.cpp Interpolate.cpp - IntervalsParser.cpp MPIInitHelpers.cpp ParticleUtils.cpp RelativeCellPosition.cpp @@ -15,3 +14,5 @@ target_sources(WarpX ) add_subdirectory(Logo) +add_subdirectory(Parser) +add_subdirectory(Strings) diff --git a/Source/Utils/IntervalsParser.H b/Source/Utils/IntervalsParser.H deleted file mode 100644 index 06258b10999..00000000000 --- a/Source/Utils/IntervalsParser.H +++ /dev/null @@ -1,207 +0,0 @@ -#ifndef WARPX_INTERVALSPARSER_H_ -#define WARPX_INTERVALSPARSER_H_ - -#include -#include -#include - -/** - * \brief This class is a parser for slices of the form i:j:k where i, j and k are integers - * representing respectively the starting point, the stopping point and the period. - */ -class SliceParser -{ -public: - /** - * \brief Constructor of the SliceParser class. - * - * @param[in] instr an input string of the form "i:j:k", "i:j" or "k" where i, j and k are - * integers representing respectively the starting point, the stopping point and the period. - * Any of these integers may be omitted in which case it will be equal to their default value - * (0 for the starting point, std::numeric_limits::max() for the stopping point and 1 for - * the period). For example SliceParser(":1000:") is equivalent to SliceParser("0:1000:1"). - */ - SliceParser (const std::string& instr, bool isBTD=false); - - /** - * \brief A method that returns true if the input integer is contained in the slice. (e.g. if - * the list is initialized with "300:500:100", this method returns true if and only if n is - * 300, 400 or 500). If the period is negative or 0, the function always returns false. - * - * @param[in] n the input integer - */ - bool contains (const int n) const; - - /** - * \brief A method that returns the smallest integer strictly greater than n such that - * contains(n) is true. Returns std::numeric_limits::max() if there is no such integer. - * - * @param[in] n the input integer - */ - int nextContains (const int n) const; - - /** - * \brief A method that returns the greatest integer strictly smaller than n such that - * contains(n) is true. Returns 0 if there is no such integer. - * - * @param[in] n the input integer - */ - int previousContains (const int n) const; - - /** - * \brief A method that returns the slice period. - * - */ - int getPeriod () const; - - /** - * \brief A method that returns the slice start. - * - */ - int getStart () const; - - /** - * \brief A method that returns the slice stop. - * - */ - int getStop () const; - - /** - * @brief A method that returns the number of integers contained by the slice. - * - */ - int numContained() const; - -private: - bool m_isBTD = false; - int m_start = 0; - int m_stop = std::numeric_limits::max(); - int m_period = 1; - std::string m_separator = ":"; - -}; - -/** - * \brief This class is a parser for multiple slices of the form x,y,z,... where x, y and z are - * slices of the form i:j:k, as defined in the SliceParser class. This class contains a vector of - * SliceParsers. - */ -class IntervalsParser -{ -public: - /** - * \brief Default constructor of the IntervalsParser class. - */ - IntervalsParser () = default; - - /** - * \brief Constructor of the IntervalsParser class. - * - * @param[in] instr_vec an input vector string, which when concatenated is of the form - * "x,y,z,...". This will call the constructor of SliceParser using x, y and z as input - * arguments. - */ - IntervalsParser (const std::vector& instr_vec); - - /** - * \brief A method that returns true if the input integer is contained in any of the slices - * contained by the IntervalsParser. - * - * @param[in] n the input integer - */ - bool contains (const int n) const; - - /** - * \brief A method that returns the smallest integer strictly greater than n such that - * contains(n) is true. Returns std::numeric_limits::max() if there is no such integer. - * - * @param[in] n the input integer - */ - int nextContains (const int n) const; - - /** - * \brief A method that returns the greatest integer strictly smaller than n such that - * contains(n) is true. Returns 0 if there is no such integer. - * - * @param[in] n the input integer - */ - int previousContains (const int n) const; - - /** - * \brief A method that returns the greatest integer smaller than or equal to n such that - * contains(n) is true. Returns 0 if there is no such integer. - * - * @param[in] n the input integer - */ - int previousContainsInclusive (const int n) const; - - /** - * \brief A method the local period (in timesteps) of the IntervalsParser at timestep n. - * The period is defined by nextContains(n) - previousContainsInclusive(n) - * - * @param[in] n the input integer - */ - int localPeriod (const int n) const; - -/** - * \brief A method that returns true if any of the slices contained by the IntervalsParser - * has a strictly positive period. - */ - bool isActivated () const; - -private: - std::vector m_slices; - std::string m_separator = ","; - bool m_activated = false; -}; - -/** - * \brief This class is a parser for multiple slices of the form x,y,z,... where x, y and z are - * slices of the form i:j:k, as defined in the SliceParser class. This class contains a vector of - * SliceParsers. The supported function set differs from the IntervalsParser - */ -class BTDIntervalsParser -{ -public: - /** - * \brief Default constructor of the BTDIntervalsParser class. - */ - BTDIntervalsParser () = default; - - /** - * \brief Constructor of the BTDIntervalsParser class. - * - * @param[in] instr_vec an input vector string, which when concatenated is of the form - * "x,y,z,...". This will call the constructor of SliceParser using x, y and z as input - * arguments. - */ - BTDIntervalsParser (const std::vector& instr_vec); - - /** - * @brief Return the total number of unique labframe snapshots - */ - int NumSnapshots (); - - /** - * @brief Return the iteration number stored at index i_buffer - * - * @param i_buffer buffer or iteration index, between 0 and NumSnapshots - */ - int GetBTDIteration(int i_buffer); - - /** - * \brief A method that returns true if any of the slices contained by the IntervalsParser - * has a strictly positive period. - */ - bool isActivated () const; - -private: - std::vector m_btd_iterations; - std::vector m_slices; - std::vector m_slice_starting_i_buffer; - int m_n_snapshots; - static constexpr char m_separator = ','; - bool m_activated = false; -}; - -#endif // WARPX_INTERVALSPARSER_H_ diff --git a/Source/Utils/IonizationEnergiesTable.H b/Source/Utils/IonizationEnergiesTable.H deleted file mode 100644 index bc2b6005a23..00000000000 --- a/Source/Utils/IonizationEnergiesTable.H +++ /dev/null @@ -1,228 +0,0 @@ -/* Copyright 2019-2021 Axel Huebl, Maxence Thevenet - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ -// This script was automatically generated! -// Edit dev/Source/Utils/write_atomic_data_cpp.py instead! -#ifndef WARPX_IONIZATION_TABLE_H_ -#define WARPX_IONIZATION_TABLE_H_ - -#include -#include - -#include -#include - -static std::map const ion_map_ids = { - {"H", 0}, - {"He", 1}, - {"Li", 2}, - {"Be", 3}, - {"B", 4}, - {"C", 5}, - {"N", 6}, - {"O", 7}, - {"F", 8}, - {"Ne", 9}, - {"Na", 10}, - {"Mg", 11}, - {"Al", 12}, - {"Si", 13}, - {"P", 14}, - {"S", 15}, - {"Cl", 16}, - {"Ar", 17}, - {"Cu", 18}, - {"Kr", 19}, - {"Rb", 20}, - {"Xe", 21}, - {"Rn", 22} }; - -constexpr int nelements = 23; - -constexpr int ion_atomic_numbers[nelements] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, - 11, 12, 13, 14, 15, 16, 17, 18, 29, 36, - 37, 54, 86}; - -constexpr int ion_energy_offsets[nelements] = { - 0, 1, 3, 6, 10, 15, 21, 28, 36, 45, - 55, 66, 78, 91, 105, 120, 136, 153, 171, 200, - 236, 273, 327}; - -constexpr int energies_tab_length = 413; - -constexpr amrex::Real table_ionization_energies[energies_tab_length]{ - // H - amrex::Real(13.59843449), - // He - amrex::Real(24.58738880), amrex::Real(54.4177650), - // Li - amrex::Real(5.39171495), amrex::Real(75.6400964), amrex::Real(122.4543581), - // Be - amrex::Real(9.322699), amrex::Real(18.21115), amrex::Real(153.896203), - amrex::Real(217.7185843), - // B - amrex::Real(8.298019), amrex::Real(25.15483), amrex::Real(37.93058), - amrex::Real(259.3715), amrex::Real(340.226020), - // C - amrex::Real(11.2602880), amrex::Real(24.383154), amrex::Real(47.88778), - amrex::Real(64.49352), amrex::Real(392.090515), amrex::Real(489.993194), - // N - amrex::Real(14.53413), amrex::Real(29.60125), amrex::Real(47.4453), - amrex::Real(77.4735), amrex::Real(97.8901), amrex::Real(552.06732), - amrex::Real(667.046116), - // O - amrex::Real(13.618055), amrex::Real(35.12112), amrex::Real(54.93554), - amrex::Real(77.41350), amrex::Real(113.8990), amrex::Real(138.1189), - amrex::Real(739.32682), amrex::Real(871.40988), - // F - amrex::Real(17.42282), amrex::Real(34.97081), amrex::Real(62.70798), - amrex::Real(87.175), amrex::Real(114.249), amrex::Real(157.16311), - amrex::Real(185.1868), amrex::Real(953.89804), amrex::Real(1103.11747), - // Ne - amrex::Real(21.564540), amrex::Real(40.96297), amrex::Real(63.4233), - amrex::Real(97.1900), amrex::Real(126.247), amrex::Real(157.934), - amrex::Real(207.271), amrex::Real(239.0970), amrex::Real(1195.80783), - amrex::Real(1362.19915), - // Na - amrex::Real(5.1390769), amrex::Real(47.28636), amrex::Real(71.6200), - amrex::Real(98.936), amrex::Real(138.404), amrex::Real(172.23), - amrex::Real(208.504), amrex::Real(264.192), amrex::Real(299.856), - amrex::Real(1465.13449), amrex::Real(1648.70218), - // Mg - amrex::Real(7.646236), amrex::Real(15.035271), amrex::Real(80.1436), - amrex::Real(109.2654), amrex::Real(141.33), amrex::Real(186.76), - amrex::Real(225.02), amrex::Real(265.924), amrex::Real(327.99), - amrex::Real(367.489), amrex::Real(1761.80487), amrex::Real(1962.66365), - // Al - amrex::Real(5.985769), amrex::Real(18.82855), amrex::Real(28.447642), - amrex::Real(119.9924), amrex::Real(153.8252), amrex::Real(190.49), - amrex::Real(241.76), amrex::Real(284.64), amrex::Real(330.21), - amrex::Real(398.65), amrex::Real(442.005), amrex::Real(2085.97700), - amrex::Real(2304.14005), - // Si - amrex::Real(8.15168), amrex::Real(16.34585), amrex::Real(33.49300), - amrex::Real(45.14179), amrex::Real(166.767), amrex::Real(205.279), - amrex::Real(246.57), amrex::Real(303.59), amrex::Real(351.28), - amrex::Real(401.38), amrex::Real(476.273), amrex::Real(523.415), - amrex::Real(2437.65813), amrex::Real(2673.17753), - // P - amrex::Real(10.486686), amrex::Real(19.76949), amrex::Real(30.20264), - amrex::Real(51.44387), amrex::Real(65.02511), amrex::Real(220.430), - amrex::Real(263.57), amrex::Real(309.60), amrex::Real(372.31), - amrex::Real(424.40), amrex::Real(479.44), amrex::Real(560.62), - amrex::Real(611.741), amrex::Real(2816.90876), amrex::Real(3069.8415), - // S - amrex::Real(10.36001), amrex::Real(23.33788), amrex::Real(34.86), - amrex::Real(47.222), amrex::Real(72.5945), amrex::Real(88.0529), - amrex::Real(280.954), amrex::Real(328.794), amrex::Real(379.84), - amrex::Real(447.7), amrex::Real(504.55), amrex::Real(564.41), - amrex::Real(651.96), amrex::Real(706.994), amrex::Real(3223.7807), - amrex::Real(3494.1879), - // Cl - amrex::Real(12.967632), amrex::Real(23.81364), amrex::Real(39.80), - amrex::Real(53.24), amrex::Real(67.68), amrex::Real(96.94), - amrex::Real(114.2013), amrex::Real(348.306), amrex::Real(400.851), - amrex::Real(456.7), amrex::Real(530.0), amrex::Real(591.58), - amrex::Real(656.30), amrex::Real(750.23), amrex::Real(809.198), - amrex::Real(3658.3437), amrex::Real(3946.2909), - // Ar - amrex::Real(15.7596117), amrex::Real(27.62967), amrex::Real(40.735), - amrex::Real(59.58), amrex::Real(74.84), amrex::Real(91.290), - amrex::Real(124.41), amrex::Real(143.4567), amrex::Real(422.60), - amrex::Real(479.76), amrex::Real(540.4), amrex::Real(619.0), - amrex::Real(685.5), amrex::Real(755.13), amrex::Real(855.5), - amrex::Real(918.375), amrex::Real(4120.6656), amrex::Real(4426.2228), - // Cu - amrex::Real(7.726380), amrex::Real(20.29239), amrex::Real(36.841), - amrex::Real(57.38), amrex::Real(79.8), amrex::Real(103.0), - amrex::Real(139.0), amrex::Real(166.0), amrex::Real(198.0), - amrex::Real(232.2), amrex::Real(265.33), amrex::Real(367.0), - amrex::Real(401.0), amrex::Real(436.0), amrex::Real(483.1), - amrex::Real(518.7), amrex::Real(552.8), amrex::Real(632.5), - amrex::Real(670.608), amrex::Real(1690.5), amrex::Real(1800), - amrex::Real(1918), amrex::Real(2044), amrex::Real(2179.4), - amrex::Real(2307.3), amrex::Real(2479.1), amrex::Real(2586.954), - amrex::Real(11062.4313), amrex::Real(11567.613), - // Kr - amrex::Real(13.9996053), amrex::Real(24.35984), amrex::Real(35.838), - amrex::Real(50.85), amrex::Real(64.69), amrex::Real(78.49), - amrex::Real(109.13), amrex::Real(125.802), amrex::Real(233.0), - amrex::Real(268), amrex::Real(308), amrex::Real(350), - amrex::Real(391), amrex::Real(446), amrex::Real(492), - amrex::Real(540), amrex::Real(591), amrex::Real(640), - amrex::Real(785), amrex::Real(831.6), amrex::Real(882.8), - amrex::Real(945), amrex::Real(999.0), amrex::Real(1042), - amrex::Real(1155.0), amrex::Real(1205.23), amrex::Real(2928.9), - amrex::Real(3072), amrex::Real(3228), amrex::Real(3380), - amrex::Real(3584), amrex::Real(3752.0), amrex::Real(3971), - amrex::Real(4109.083), amrex::Real(17296.420), amrex::Real(17936.209), - // Rb - amrex::Real(4.1771280), amrex::Real(27.28954), amrex::Real(39.247), - amrex::Real(52.20), amrex::Real(68.44), amrex::Real(82.9), - amrex::Real(98.67), amrex::Real(132.79), amrex::Real(150.628), - amrex::Real(277.12), amrex::Real(313.1), amrex::Real(356.0), - amrex::Real(400), amrex::Real(443), amrex::Real(502), - amrex::Real(550), amrex::Real(601), amrex::Real(654), - amrex::Real(706.0), amrex::Real(857), amrex::Real(905.3), - amrex::Real(958.9), amrex::Real(1024), amrex::Real(1080), - amrex::Real(1125), amrex::Real(1242.5), amrex::Real(1294.57), - amrex::Real(3133.3), amrex::Real(3281), amrex::Real(3443), - amrex::Real(3600), amrex::Real(3815), amrex::Real(3988), - amrex::Real(4214), amrex::Real(4356.865), amrex::Real(18305.884), - amrex::Real(18965.516), - // Xe - amrex::Real(12.1298436), amrex::Real(20.975), amrex::Real(31.05), - amrex::Real(42.20), amrex::Real(54.1), amrex::Real(66.703), - amrex::Real(91.6), amrex::Real(105.9778), amrex::Real(179.84), - amrex::Real(202.0), amrex::Real(229.02), amrex::Real(255.0), - amrex::Real(281), amrex::Real(314), amrex::Real(343), - amrex::Real(374), amrex::Real(404), amrex::Real(434), - amrex::Real(549), amrex::Real(582), amrex::Real(616), - amrex::Real(650), amrex::Real(700), amrex::Real(736), - amrex::Real(818), amrex::Real(857.0), amrex::Real(1493), - amrex::Real(1571), amrex::Real(1653), amrex::Real(1742), - amrex::Real(1826), amrex::Real(1919), amrex::Real(2023), - amrex::Real(2113), amrex::Real(2209), amrex::Real(2300), - amrex::Real(2556), amrex::Real(2637), amrex::Real(2726), - amrex::Real(2811), amrex::Real(2975), amrex::Real(3068), - amrex::Real(3243), amrex::Real(3333.8), amrex::Real(7660), - amrex::Real(7889), amrex::Real(8144), amrex::Real(8382), - amrex::Real(8971), amrex::Real(9243), amrex::Real(9581), - amrex::Real(9810.37), amrex::Real(40271.724), amrex::Real(41299.71), - // Rn - amrex::Real(10.74850), amrex::Real(21.4), amrex::Real(29.4), - amrex::Real(36.9), amrex::Real(52.9), amrex::Real(64.0), - amrex::Real(88.0), amrex::Real(102.0), amrex::Real(154.0), - amrex::Real(173.9), amrex::Real(195.0), amrex::Real(218.0), - amrex::Real(240), amrex::Real(264), amrex::Real(293), - amrex::Real(317), amrex::Real(342), amrex::Real(367), - amrex::Real(488), amrex::Real(520), amrex::Real(550), - amrex::Real(580), amrex::Real(640), amrex::Real(680), - amrex::Real(760), amrex::Real(800), amrex::Real(850), - amrex::Real(920), amrex::Real(980), amrex::Real(1050), - amrex::Real(1110), amrex::Real(1180), amrex::Real(1250), - amrex::Real(1310), amrex::Real(1390), amrex::Real(1460), - amrex::Real(1520), amrex::Real(1590), amrex::Real(1660), - amrex::Real(1720), amrex::Real(2033), amrex::Real(2094), - amrex::Real(2158), amrex::Real(2227), amrex::Real(2293), - amrex::Real(2357), amrex::Real(2467), amrex::Real(2535), - amrex::Real(2606), amrex::Real(2674), amrex::Real(2944), - amrex::Real(3010), amrex::Real(3082), amrex::Real(3149), - amrex::Real(3433), amrex::Real(3510), amrex::Real(3699), - amrex::Real(3777), amrex::Real(6169), amrex::Real(6318), - amrex::Real(6476), amrex::Real(6646), amrex::Real(6807), - amrex::Real(6964), amrex::Real(7283), amrex::Real(7450), - amrex::Real(7630), amrex::Real(7800), amrex::Real(8260), - amrex::Real(8410), amrex::Real(8570), amrex::Real(8710), - amrex::Real(9610), amrex::Real(9780), amrex::Real(10120), - amrex::Real(10290), amrex::Real(21770), amrex::Real(22160), - amrex::Real(22600), amrex::Real(22990), amrex::Real(26310), - amrex::Real(26830), amrex::Real(27490), amrex::Real(27903.1), - amrex::Real(110842.0), amrex::Real(112843.7) -}; - -#endif // #ifndef WARPX_IONIZATION_TABLE_H_ diff --git a/Source/Utils/Make.package b/Source/Utils/Make.package index b20a1abe291..f4395372057 100644 --- a/Source/Utils/Make.package +++ b/Source/Utils/Make.package @@ -11,6 +11,10 @@ CEXE_sources += MPIInitHelpers.cpp CEXE_sources += RelativeCellPosition.cpp CEXE_sources += ParticleUtils.cpp +include $(WARPX_HOME)/Source/Utils/Algorithms/Make.package include $(WARPX_HOME)/Source/Utils/Logo/Make.package +include $(WARPX_HOME)/Source/Utils/Parser/Make.package +include $(WARPX_HOME)/Source/Utils/Physics/Make.package +include $(WARPX_HOME)/Source/Utils/Strings/Make.package VPATH_LOCATIONS += $(WARPX_HOME)/Source/Utils diff --git a/Source/Utils/Parser/CMakeLists.txt b/Source/Utils/Parser/CMakeLists.txt new file mode 100644 index 00000000000..620ee8dcd37 --- /dev/null +++ b/Source/Utils/Parser/CMakeLists.txt @@ -0,0 +1,5 @@ +target_sources(WarpX + PRIVATE + IntervalsParser.cpp + ParserUtils.cpp +) diff --git a/Source/Utils/Parser/IntervalsParser.H b/Source/Utils/Parser/IntervalsParser.H new file mode 100644 index 00000000000..e9fffce9e62 --- /dev/null +++ b/Source/Utils/Parser/IntervalsParser.H @@ -0,0 +1,220 @@ +/* Copyright 2022 Andrew Myers, Burlen Loring, Luca Fedeli + * Maxence Thevenet, Remi Lehe, Revathi Jambunathan + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_UTILS_PARSER_INTERVALSPARSER_H_ +#define WARPX_UTILS_PARSER_INTERVALSPARSER_H_ + +#include +#include +#include + +namespace utils::parser +{ + + /** + * \brief This class is a parser for slices of the form i:j:k where i, j and k are integers + * representing respectively the starting point, the stopping point and the period. + */ + class SliceParser + { + public: + /** + * \brief Constructor of the SliceParser class. + * + * @param[in] instr an input string of the form "i:j:k", "i:j" or "k" where i, j and k are + * integers representing respectively the starting point, the stopping point and the period. + * Any of these integers may be omitted in which case it will be equal to their default value + * (0 for the starting point, std::numeric_limits::max() for the stopping point and 1 for + * the period). For example SliceParser(":1000:") is equivalent to SliceParser("0:1000:1"). + */ + SliceParser (const std::string& instr, bool isBTD=false); + + /** + * \brief A method that returns true if the input integer is contained in the slice. (e.g. if + * the list is initialized with "300:500:100", this method returns true if and only if n is + * 300, 400 or 500). If the period is negative or 0, the function always returns false. + * + * @param[in] n the input integer + */ + bool contains (const int n) const; + + /** + * \brief A method that returns the smallest integer strictly greater than n such that + * contains(n) is true. Returns std::numeric_limits::max() if there is no such integer. + * + * @param[in] n the input integer + */ + int nextContains (const int n) const; + + /** + * \brief A method that returns the greatest integer strictly smaller than n such that + * contains(n) is true. Returns 0 if there is no such integer. + * + * @param[in] n the input integer + */ + int previousContains (const int n) const; + + /** + * \brief A method that returns the slice period. + * + */ + int getPeriod () const; + + /** + * \brief A method that returns the slice start. + * + */ + int getStart () const; + + /** + * \brief A method that returns the slice stop. + * + */ + int getStop () const; + + /** + * @brief A method that returns the number of integers contained by the slice. + * + */ + int numContained() const; + + private: + bool m_isBTD = false; + int m_start = 0; + int m_stop = std::numeric_limits::max(); + int m_period = 1; + std::string m_separator = ":"; + + }; + + + /** + * \brief This class is a parser for multiple slices of the form x,y,z,... where x, y and z are + * slices of the form i:j:k, as defined in the SliceParser class. This class contains a vector of + * SliceParsers. + */ + class IntervalsParser + { + public: + /** + * \brief Default constructor of the IntervalsParser class. + */ + IntervalsParser () = default; + + /** + * \brief Constructor of the IntervalsParser class. + * + * @param[in] instr_vec an input vector string, which when concatenated is of the form + * "x,y,z,...". This will call the constructor of SliceParser using x, y and z as input + * arguments. + */ + IntervalsParser (const std::vector& instr_vec); + + /** + * \brief A method that returns true if the input integer is contained in any of the slices + * contained by the IntervalsParser. + * + * @param[in] n the input integer + */ + bool contains (const int n) const; + + /** + * \brief A method that returns the smallest integer strictly greater than n such that + * contains(n) is true. Returns std::numeric_limits::max() if there is no such integer. + * + * @param[in] n the input integer + */ + int nextContains (const int n) const; + + /** + * \brief A method that returns the greatest integer strictly smaller than n such that + * contains(n) is true. Returns 0 if there is no such integer. + * + * @param[in] n the input integer + */ + int previousContains (const int n) const; + + /** + * \brief A method that returns the greatest integer smaller than or equal to n such that + * contains(n) is true. Returns 0 if there is no such integer. + * + * @param[in] n the input integer + */ + int previousContainsInclusive (const int n) const; + + /** + * \brief A method the local period (in timesteps) of the IntervalsParser at timestep n. + * The period is defined by nextContains(n) - previousContainsInclusive(n) + * + * @param[in] n the input integer + */ + int localPeriod (const int n) const; + + /** + * \brief A method that returns true if any of the slices contained by the IntervalsParser + * has a strictly positive period. + */ + bool isActivated () const; + + private: + std::vector m_slices; + std::string m_separator = ","; + bool m_activated = false; + }; + + /** + * \brief This class is a parser for multiple slices of the form x,y,z,... where x, y and z are + * slices of the form i:j:k, as defined in the SliceParser class. This class contains a vector of + * SliceParsers. The supported function set differs from the IntervalsParser + */ + class BTDIntervalsParser + { + public: + /** + * \brief Default constructor of the BTDIntervalsParser class. + */ + BTDIntervalsParser () = default; + + /** + * \brief Constructor of the BTDIntervalsParser class. + * + * @param[in] instr_vec an input vector string, which when concatenated is of the form + * "x,y,z,...". This will call the constructor of SliceParser using x, y and z as input + * arguments. + */ + BTDIntervalsParser (const std::vector& instr_vec); + + /** + * @brief Return the total number of unique labframe snapshots + */ + int NumSnapshots (); + + /** + * @brief Return the iteration number stored at index i_buffer + * + * @param i_buffer buffer or iteration index, between 0 and NumSnapshots + */ + int GetBTDIteration(int i_buffer); + + /** + * \brief A method that returns true if any of the slices contained by the IntervalsParser + * has a strictly positive period. + */ + bool isActivated () const; + + private: + std::vector m_btd_iterations; + std::vector m_slices; + std::vector m_slice_starting_i_buffer; + int m_n_snapshots; + static constexpr char m_separator = ','; + bool m_activated = false; + }; +} + +#endif // WARPX_UTILS_PARSER_INTERVALSPARSER_H_ diff --git a/Source/Utils/IntervalsParser.cpp b/Source/Utils/Parser/IntervalsParser.cpp similarity index 74% rename from Source/Utils/IntervalsParser.cpp rename to Source/Utils/Parser/IntervalsParser.cpp index 4da0142a03e..d535edeb8b9 100644 --- a/Source/Utils/IntervalsParser.cpp +++ b/Source/Utils/Parser/IntervalsParser.cpp @@ -1,17 +1,28 @@ +/* Copyright 2022 Andrew Myers, Burlen Loring, Luca Fedeli + * Maxence Thevenet, Remi Lehe, Revathi Jambunathan + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + #include "IntervalsParser.H" -#include "TextMsg.H" -#include "WarpXUtil.H" + +#include "ParserUtils.H" +#include "Utils/Strings/StringUtils.H" +#include "Utils/TextMsg.H" #include #include -#include -SliceParser::SliceParser (const std::string& instr, const bool isBTD) +utils::parser::SliceParser::SliceParser (const std::string& instr, const bool isBTD) { + namespace utils_str = utils::strings; + m_isBTD = isBTD; // split string and trim whitespaces - auto insplit = WarpXUtilStr::split>(instr, m_separator, true); + auto insplit = utils_str::split>(instr, m_separator, true); if(insplit.size() == 1){ // no colon in input string. The input is the period. WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!m_isBTD, "must specify interval stop for BTD"); @@ -39,13 +50,15 @@ SliceParser::SliceParser (const std::string& instr, const bool isBTD) } } -bool SliceParser::contains (const int n) const + +bool utils::parser::SliceParser::contains (const int n) const { if (m_period <= 0) {return false;} return (n - m_start) % m_period == 0 && n >= m_start && n <= m_stop; } -int SliceParser::nextContains (const int n) const + +int utils::parser::SliceParser::nextContains (const int n) const { if (m_period <= 0) {return std::numeric_limits::max();} int next = m_start; @@ -54,7 +67,8 @@ int SliceParser::nextContains (const int n) const return next; } -int SliceParser::previousContains (const int n) const + +int utils::parser::SliceParser::previousContains (const int n) const { if (m_period <= 0) {return false;} int previous = ((std::min(n-1,m_stop)-m_start)/m_period)*m_period+m_start; @@ -62,20 +76,28 @@ int SliceParser::previousContains (const int n) const return previous; } -int SliceParser::getPeriod () const {return m_period;} -int SliceParser::getStart () const {return m_start;} +int utils::parser::SliceParser::getPeriod () const {return m_period;} -int SliceParser::getStop () const {return m_stop;} -int SliceParser::numContained () const {return (m_stop - m_start) / m_period + 1;} +int utils::parser::SliceParser::getStart () const {return m_start;} -IntervalsParser::IntervalsParser (const std::vector& instr_vec) + +int utils::parser::SliceParser::getStop () const {return m_stop;} + + +int utils::parser::SliceParser::numContained () const { + return (m_stop - m_start) / m_period + 1;} + +utils::parser::IntervalsParser::IntervalsParser ( + const std::vector& instr_vec) { + namespace utils_str = utils::strings; + std::string inconcatenated; for (const auto& instr_element : instr_vec) inconcatenated +=instr_element; - auto insplit = WarpXUtilStr::split>(inconcatenated, m_separator); + auto insplit = utils_str::split>(inconcatenated, m_separator); for(const auto& inslc : insplit) { @@ -86,13 +108,15 @@ IntervalsParser::IntervalsParser (const std::vector& instr_vec) } } -bool IntervalsParser::contains (const int n) const + +bool utils::parser::IntervalsParser::contains (const int n) const { return std::any_of(m_slices.begin(), m_slices.end(), [&](const auto& slice){return slice.contains(n);}); } -int IntervalsParser::nextContains (const int n) const + +int utils::parser::IntervalsParser::nextContains (const int n) const { int next = std::numeric_limits::max(); for(const auto& slice: m_slices){ @@ -101,7 +125,8 @@ int IntervalsParser::nextContains (const int n) const return next; } -int IntervalsParser::previousContains (const int n) const + +int utils::parser::IntervalsParser::previousContains (const int n) const { int previous = 0; for(const auto& slice: m_slices){ @@ -110,25 +135,31 @@ int IntervalsParser::previousContains (const int n) const return previous; } -int IntervalsParser::previousContainsInclusive (const int n) const + +int utils::parser::IntervalsParser::previousContainsInclusive ( + const int n) const { if (contains(n)){return n;} else {return previousContains(n);} } -int IntervalsParser::localPeriod (const int n) const + +int utils::parser::IntervalsParser::localPeriod (const int n) const { return nextContains(n) - previousContainsInclusive(n); } -bool IntervalsParser::isActivated () const {return m_activated;} -BTDIntervalsParser::BTDIntervalsParser (const std::vector& instr_vec) +bool utils::parser::IntervalsParser::isActivated () const {return m_activated;} + + +utils::parser::BTDIntervalsParser::BTDIntervalsParser ( + const std::vector& instr_vec) { std::string inconcatenated; for (const auto& instr_element : instr_vec) inconcatenated +=instr_element; - auto const insplit = WarpXUtilStr::split>(inconcatenated, std::string(1,m_separator)); + auto const insplit = utils::strings::split>(inconcatenated, std::string(1,m_separator)); // parse the Intervals string into Slices and store each slice in m_slices, // in order of increasing Slice start value @@ -207,9 +238,14 @@ BTDIntervalsParser::BTDIntervalsParser (const std::vector& instr_ve } } -int BTDIntervalsParser::NumSnapshots () { return m_btd_iterations.size(); } -int BTDIntervalsParser::GetBTDIteration(int i_buffer) +int utils::parser::BTDIntervalsParser::NumSnapshots () +{ + return m_btd_iterations.size(); +} + + +int utils::parser::BTDIntervalsParser::GetBTDIteration (int i_buffer) { return m_btd_iterations[i_buffer]; } diff --git a/Source/Utils/Parser/Make.package b/Source/Utils/Parser/Make.package new file mode 100644 index 00000000000..f5447bc5055 --- /dev/null +++ b/Source/Utils/Parser/Make.package @@ -0,0 +1,4 @@ +CEXE_sources += IntervalsParser.cpp +CEXE_sources += ParserUtils.cpp + +VPATH_LOCATIONS += $(WARPX_HOME)/Source/Utils/Parser diff --git a/Source/Utils/Parser/ParserUtils.H b/Source/Utils/Parser/ParserUtils.H new file mode 100644 index 00000000000..4195bed1a59 --- /dev/null +++ b/Source/Utils/Parser/ParserUtils.H @@ -0,0 +1,298 @@ +/* Copyright 2022 Andrew Myers, Burlen Loring, Luca Fedeli + * Maxence Thevenet, Remi Lehe, Revathi Jambunathan + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_UTILS_PARSER_PARSERUTILS_H_ +#define WARPX_UTILS_PARSER_PARSERUTILS_H_ + +#include +#include +#include +#include + +#include +#include +#include + +namespace utils::parser +{ + /** + * \brief Do a safe cast of a real to an int + * This ensures that the float value is within the range of ints and if not, + * raises an exception. + * + * \param x Real value to cast + * \param real_name String, the name of the variable being casted to use in the error message + */ + int + safeCastToInt(amrex::Real x, const std::string& real_name); + + + /** + * \brief Do a safe cast of a real to a long + * This ensures that the float value is within the range of longs and if not, + * raises an exception. + * + * \param x Real value to cast + * \param real_name String, the name of the variable being casted to use in the error message + */ + long + safeCastToLong(amrex::Real x, const std::string& real_name); + + + /** + * \brief Initialize an amrex::Parser object from a string containing a math expression + * + * \param parse_function String to read to initialize the parser. + * \param varnames A list of predefined independent variables + */ + amrex::Parser makeParser ( + std::string const& parse_function, + amrex::Vector const& varnames); + + + /** + * \brief Parse a string (typically a mathematical expression) from the + * input file and store it into a variable. + * + * \param pp used to read the query_string `pp.=string` + * \param query_string ParmParse.query will look for this string + * \param stored_string variable in which the string to parse is stored + */ + void Store_parserString( + const amrex::ParmParse &pp, + std::string query_string, + std::string& stored_string); + + + /** Parse a string and return as a double precision floating point number + * + * In case the string cannot be interpreted as a double, + * this function ... + * + * \param str The string to be parsed + * \return representation as a double + */ + double parseStringtoDouble(const std::string& str); + + + /** Parse a string and return an int + * + * In case the string cannot be interpreted as Real, + * this function ... + * + * \param str The string to be parsed + * \param name For integers, the name, to be used in error messages + * \return rounded closest integer + */ + int parseStringtoInt(const std::string& str, const std::string& name); + + + template + amrex::ParserExecutor compileParser (amrex::Parser const* parser) + { + if (parser) { + return parser->compile(); + } else { + return amrex::ParserExecutor{}; + } + } + + + /** Similar to amrex::ParmParse::query, but also supports math expressions for the value. + * + * amrex::ParmParse::query reads a name and a value from the input file. This function does the + * same, and applies the amrex::Parser to the value, so the user has the choice to specify a value or + * a math expression (including user-defined constants). + * Works for amrex::Real numbers and integers. + * + * \param[in] a_pp amrex::ParmParse object + * \param[in] str name of the parameter to read + * \param[out] val where the value queried and parsed is stored, either a scalar or vector + */ + template + int queryWithParser (const amrex::ParmParse& a_pp, char const * const str, T& val) + { + // call amrex::ParmParse::query, check if the user specified str. + std::string tmp_str; + int is_specified = a_pp.query(str, tmp_str); + if (is_specified) + { + // If so, create a parser object and apply it to the value provided by the user. + std::string str_val; + Store_parserString(a_pp, str, str_val); + + auto parser = makeParser(str_val, {}); + + if (std::is_same::value) { + + val = safeCastToInt(std::round(parser.compileHost<0>()()), str); + } + else { + val = static_cast(parser.compileHost<0>()()); + } + } + // return the same output as amrex::ParmParse::query + return is_specified; + } + + + template + int queryArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val) + { + // call amrex::ParmParse::query, check if the user specified str. + std::vector tmp_str_arr; + int is_specified = a_pp.queryarr(str, tmp_str_arr); + if (is_specified) + { + // If so, create parser objects and apply them to the values provided by the user. + int const n = static_cast(tmp_str_arr.size()); + val.resize(n); + for (int i=0 ; i < n ; i++) { + auto parser = makeParser(tmp_str_arr[i], {}); + if (std::is_same::value) { + val[i] = safeCastToInt(std::round(parser.compileHost<0>()()), str); + } + else { + val[i] = static_cast(parser.compileHost<0>()()); + } + } + } + // return the same output as amrex::ParmParse::query + return is_specified; + } + + + /** Similar to amrex::ParmParse::query, but also supports math expressions for the value. + * + * amrex::ParmParse::query reads a name and a value from the input file. This function does the + * same, and applies the amrex::Parser to the value, so the user has the choice to specify a value or + * a math expression (including user-defined constants). + * Works for amrex::Real numbers and integers. + * + * \param[in] a_pp amrex::ParmParse object + * \param[in] str name of the parameter to read + * \param[out] val where the value queried and parsed is stored, either a scalar or vector + * \param[in] start_ix start index in the list of inputs values (optional with arrays, default is + * amrex::ParmParse::FIRST for starting with the first input value) + * \param[in] num_val number of input values to use (optional with arrays, default is + * amrex::ParmParse::LAST for reading until the last input value) + */ + template + int queryArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val, + const int start_ix, const int num_val) + { + // call amrex::ParmParse::query, check if the user specified str. + std::vector tmp_str_arr; + int is_specified = a_pp.queryarr(str, tmp_str_arr, start_ix, num_val); + if (is_specified) + { + // If so, create parser objects and apply them to the values provided by the user. + int const n = static_cast(tmp_str_arr.size()); + val.resize(n); + for (int i=0 ; i < n ; i++) { + auto parser = makeParser(tmp_str_arr[i], {}); + if (std::is_same::value) { + val[i] = safeCastToInt(std::round(parser.compileHost<0>()()), str); + } + else { + val[i] = static_cast(parser.compileHost<0>()()); + } + } + } + // return the same output as amrex::ParmParse::query + return is_specified; + } + + + /** Similar to amrex::ParmParse::get, but also supports math expressions for the value. + * + * amrex::ParmParse::get reads a name and a value from the input file. This function does the + * same, and applies the Parser to the value, so the user has the choice to specify a value or + * a math expression (including user-defined constants). + * Works for amrex::Real numbers and integers. + * + * \param[in] a_pp amrex::ParmParse object + * \param[in] str name of the parameter to read + * \param[out] val where the value queried and parsed is stored + */ + template + void getWithParser (const amrex::ParmParse& a_pp, char const * const str, T& val) + { + // If so, create a parser object and apply it to the value provided by the user. + std::string str_val; + Store_parserString(a_pp, str, str_val); + + auto parser = makeParser(str_val, {}); + if (std::is_same::value) { + val = safeCastToInt(std::round(parser.compileHost<0>()()), str); + } + else { + val = static_cast(parser.compileHost<0>()()); + } + } + + template + void getArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val) + { + // Create parser objects and apply them to the values provided by the user. + std::vector tmp_str_arr; + a_pp.getarr(str, tmp_str_arr); + + int const n = static_cast(tmp_str_arr.size()); + val.resize(n); + for (int i=0 ; i < n ; i++) { + auto parser = makeParser(tmp_str_arr[i], {}); + if (std::is_same::value) { + val[i] = safeCastToInt(std::round(parser.compileHost<0>()()), str); + } + else { + val[i] = static_cast(parser.compileHost<0>()()); + } + } + } + + + /** Similar to amrex::ParmParse::get, but also supports math expressions for the value. + * + * amrex::ParmParse::get reads a name and a value from the input file. This function does the + * same, and applies the Parser to the value, so the user has the choice to specify a value or + * a math expression (including user-defined constants). + * Works for amrex::Real numbers and integers. + * + * \param[in] a_pp amrex::ParmParse object + * \param[in] str name of the parameter to read + * \param[out] val where the value queried and parsed is stored + * \param[in] start_ix start index in the list of inputs values (optional with arrays, default is + * amrex::ParmParse::FIRST for starting with the first input value) + * \param[in] num_val number of input values to use (optional with arrays, default is + * amrex::ParmParse::LAST for reading until the last input value) + */ + template + void getArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val, + const int start_ix, const int num_val) + { + // Create parser objects and apply them to the values provided by the user. + std::vector tmp_str_arr; + a_pp.getarr(str, tmp_str_arr, start_ix, num_val); + + int const n = static_cast(tmp_str_arr.size()); + val.resize(n); + for (int i=0 ; i < n ; i++) { + auto parser = makeParser(tmp_str_arr[i], {}); + if (std::is_same::value) { + val[i] = safeCastToInt(std::round(parser.compileHost<0>()()), str); + } + else { + val[i] = static_cast(parser.compileHost<0>()()); + } + } + } + +} + +#endif // WARPX_UTILS_PARSER_PARSERUTILS_H_ diff --git a/Source/Utils/Parser/ParserUtils.cpp b/Source/Utils/Parser/ParserUtils.cpp new file mode 100644 index 00000000000..48cc9b24876 --- /dev/null +++ b/Source/Utils/Parser/ParserUtils.cpp @@ -0,0 +1,165 @@ +/* Copyright 2022 Andrew Myers, Burlen Loring, Luca Fedeli + * Maxence Thevenet, Remi Lehe, Revathi Jambunathan + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#include "ParserUtils.H" + +#include "Utils/TextMsg.H" +#include "Utils/WarpXConst.H" + +#include +#include + +#include +#include +#include + +void utils::parser::Store_parserString( + const amrex::ParmParse& pp, + std::string query_string, + std::string& stored_string) +{ + std::vector f; + pp.getarr(query_string.c_str(), f); + stored_string.clear(); + for (auto const& s : f) { + stored_string += s; + } + f.clear(); +} + + +namespace { + template< typename int_type > + AMREX_FORCE_INLINE + int_type safeCastTo(const amrex::Real x, const std::string& real_name) { + int_type result = int_type(0); + bool error_detected = false; + std::string assert_msg; + // (2.0*(numeric_limits::max()/2+1)) converts numeric_limits::max()+1 to a real ensuring accuracy to all digits + // This accepts x = 2**31-1 but rejects 2**31. + using namespace amrex::literals; + constexpr amrex::Real max_range = (2.0_rt*static_cast(std::numeric_limits::max()/2+1)); + if (x < max_range) { + if (std::ceil(x) >= std::numeric_limits::min()) { + result = static_cast(x); + } else { + error_detected = true; + assert_msg = "Negative overflow detected when casting " + real_name + " = " + + std::to_string(x) + " to integer type"; + } + } else if (x > 0) { + error_detected = true; + assert_msg = "Overflow detected when casting " + real_name + " = " + std::to_string(x) + " to integer type"; + } else { + error_detected = true; + assert_msg = "NaN detected when casting " + real_name + " to integer type"; + } + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!error_detected, assert_msg); + return result; + } +} + + +int utils::parser::safeCastToInt(const amrex::Real x, const std::string& real_name) { + return ::safeCastTo (x, real_name); +} + + +long utils::parser::safeCastToLong(const amrex::Real x, const std::string& real_name) { + return ::safeCastTo (x, real_name); +} + + +amrex::Parser utils::parser::makeParser ( + std::string const& parse_function, amrex::Vector const& varnames) +{ + // Since queryWithParser recursively calls this routine, keep track of symbols + // in case an infinite recursion is found (a symbol's value depending on itself). + static std::set recursive_symbols; + + amrex::Parser parser(parse_function); + parser.registerVariables(varnames); + + std::set symbols = parser.symbols(); + for (auto const& v : varnames) symbols.erase(v.c_str()); + + // User can provide inputs under this name, through which expressions + // can be provided for arbitrary variables. PICMI inputs are aware of + // this convention and use the same prefix as well. This potentially + // includes variable names that match physical or mathematical + // constants, in case the user wishes to enforce a different + // system of units or some form of quasi-physical behavior in the + // simulation. Thus, this needs to override any built-in + // constants. + amrex::ParmParse pp_my_constants("my_constants"); + + // Physical / Numerical Constants available to parsed expressions + static std::map warpx_constants = + { + {"clight", PhysConst::c}, + {"epsilon0", PhysConst::ep0}, + {"mu0", PhysConst::mu0}, + {"q_e", PhysConst::q_e}, + {"m_e", PhysConst::m_e}, + {"m_p", PhysConst::m_p}, + {"m_u", PhysConst::m_u}, + {"kb", PhysConst::kb}, + {"pi", MathConst::pi}, + }; + + for (auto it = symbols.begin(); it != symbols.end(); ) { + // Always parsing in double precision avoids potential overflows that may occur when parsing + // user's expressions because of the limited range of exponentials in single precision + double v; + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + recursive_symbols.count(*it)==0, + "Expressions contains recursive symbol "+*it); + recursive_symbols.insert(*it); + const bool is_input = queryWithParser(pp_my_constants, it->c_str(), v); + recursive_symbols.erase(*it); + + if (is_input) { + parser.setConstant(*it, v); + it = symbols.erase(it); + continue; + } + + const auto constant = warpx_constants.find(*it); + if (constant != warpx_constants.end()) { + parser.setConstant(*it, constant->second); + it = symbols.erase(it); + continue; + } + + ++it; + } + for (auto const& s : symbols) { + amrex::Abort(Utils::TextMsg::Err("makeParser::Unknown symbol "+s)); + } + return parser; +} + + +double +utils::parser::parseStringtoDouble(const std::string& str) +{ + const auto parser = makeParser(str, {}); + const auto exe = parser.compileHost<0>(); + const auto result = exe(); + return result; +} + + +int +utils::parser::parseStringtoInt(const std::string& str, const std::string& name) +{ + const auto rval = static_cast(parseStringtoDouble(str)); + const auto ival = safeCastToInt(std::round(rval), name); + return ival; +} diff --git a/Source/Utils/Physics/IonizationEnergiesTable.H b/Source/Utils/Physics/IonizationEnergiesTable.H new file mode 100644 index 00000000000..b806340cad1 --- /dev/null +++ b/Source/Utils/Physics/IonizationEnergiesTable.H @@ -0,0 +1,226 @@ +// This script was automatically generated! +// Edit dev/Source/Utils/Physics/write_atomic_data_cpp.py instead! + +#ifndef WARPX_UTILS_PHYSICS_IONIZATION_TABLE_H_ +#define WARPX_UTILS_PHYSICS_IONIZATION_TABLE_H_ + +#include + +#include +#include + +namespace utils::physics +{ + static std::map const ion_map_ids = { + {"H", 0}, + {"He", 1}, + {"Li", 2}, + {"Be", 3}, + {"B", 4}, + {"C", 5}, + {"N", 6}, + {"O", 7}, + {"F", 8}, + {"Ne", 9}, + {"Na", 10}, + {"Mg", 11}, + {"Al", 12}, + {"Si", 13}, + {"P", 14}, + {"S", 15}, + {"Cl", 16}, + {"Ar", 17}, + {"Cu", 18}, + {"Kr", 19}, + {"Rb", 20}, + {"Xe", 21}, + {"Rn", 22} }; + + constexpr int nelements = 23; + + constexpr int ion_atomic_numbers[nelements] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 29, 36, + 37, 54, 86}; + + constexpr int ion_energy_offsets[nelements] = { + 0, 1, 3, 6, 10, 15, 21, 28, 36, 45, + 55, 66, 78, 91, 105, 120, 136, 153, 171, 200, + 236, 273, 327}; + + constexpr int energies_tab_length = 413; + + constexpr amrex::Real table_ionization_energies[energies_tab_length]{ + // H + amrex::Real(13.59843449), + // He + amrex::Real(24.58738880), amrex::Real(54.4177650), + // Li + amrex::Real(5.39171495), amrex::Real(75.6400964), amrex::Real(122.4543581), + // Be + amrex::Real(9.322699), amrex::Real(18.21115), amrex::Real(153.896203), + amrex::Real(217.7185843), + // B + amrex::Real(8.298019), amrex::Real(25.15483), amrex::Real(37.93058), + amrex::Real(259.3715), amrex::Real(340.226020), + // C + amrex::Real(11.2602880), amrex::Real(24.383154), amrex::Real(47.88778), + amrex::Real(64.49352), amrex::Real(392.090515), amrex::Real(489.993194), + // N + amrex::Real(14.53413), amrex::Real(29.60125), amrex::Real(47.4453), + amrex::Real(77.4735), amrex::Real(97.8901), amrex::Real(552.06732), + amrex::Real(667.046116), + // O + amrex::Real(13.618055), amrex::Real(35.12112), amrex::Real(54.93554), + amrex::Real(77.41350), amrex::Real(113.8990), amrex::Real(138.1189), + amrex::Real(739.32682), amrex::Real(871.40988), + // F + amrex::Real(17.42282), amrex::Real(34.97081), amrex::Real(62.70798), + amrex::Real(87.175), amrex::Real(114.249), amrex::Real(157.16311), + amrex::Real(185.1868), amrex::Real(953.89804), amrex::Real(1103.11747), + // Ne + amrex::Real(21.564540), amrex::Real(40.96297), amrex::Real(63.4233), + amrex::Real(97.1900), amrex::Real(126.247), amrex::Real(157.934), + amrex::Real(207.271), amrex::Real(239.0970), amrex::Real(1195.80783), + amrex::Real(1362.19915), + // Na + amrex::Real(5.1390769), amrex::Real(47.28636), amrex::Real(71.6200), + amrex::Real(98.936), amrex::Real(138.404), amrex::Real(172.23), + amrex::Real(208.504), amrex::Real(264.192), amrex::Real(299.856), + amrex::Real(1465.13449), amrex::Real(1648.70218), + // Mg + amrex::Real(7.646236), amrex::Real(15.035271), amrex::Real(80.1436), + amrex::Real(109.2654), amrex::Real(141.33), amrex::Real(186.76), + amrex::Real(225.02), amrex::Real(265.924), amrex::Real(327.99), + amrex::Real(367.489), amrex::Real(1761.80487), amrex::Real(1962.66365), + // Al + amrex::Real(5.985769), amrex::Real(18.82855), amrex::Real(28.447642), + amrex::Real(119.9924), amrex::Real(153.8252), amrex::Real(190.49), + amrex::Real(241.76), amrex::Real(284.64), amrex::Real(330.21), + amrex::Real(398.65), amrex::Real(442.005), amrex::Real(2085.97700), + amrex::Real(2304.14005), + // Si + amrex::Real(8.15168), amrex::Real(16.34585), amrex::Real(33.49300), + amrex::Real(45.14179), amrex::Real(166.767), amrex::Real(205.279), + amrex::Real(246.57), amrex::Real(303.59), amrex::Real(351.28), + amrex::Real(401.38), amrex::Real(476.273), amrex::Real(523.415), + amrex::Real(2437.65813), amrex::Real(2673.17753), + // P + amrex::Real(10.486686), amrex::Real(19.76949), amrex::Real(30.20264), + amrex::Real(51.44387), amrex::Real(65.02511), amrex::Real(220.430), + amrex::Real(263.57), amrex::Real(309.60), amrex::Real(372.31), + amrex::Real(424.40), amrex::Real(479.44), amrex::Real(560.62), + amrex::Real(611.741), amrex::Real(2816.90876), amrex::Real(3069.8415), + // S + amrex::Real(10.36001), amrex::Real(23.33788), amrex::Real(34.86), + amrex::Real(47.222), amrex::Real(72.5945), amrex::Real(88.0529), + amrex::Real(280.954), amrex::Real(328.794), amrex::Real(379.84), + amrex::Real(447.7), amrex::Real(504.55), amrex::Real(564.41), + amrex::Real(651.96), amrex::Real(706.994), amrex::Real(3223.7807), + amrex::Real(3494.1879), + // Cl + amrex::Real(12.967632), amrex::Real(23.81364), amrex::Real(39.80), + amrex::Real(53.24), amrex::Real(67.68), amrex::Real(96.94), + amrex::Real(114.2013), amrex::Real(348.306), amrex::Real(400.851), + amrex::Real(456.7), amrex::Real(530.0), amrex::Real(591.58), + amrex::Real(656.30), amrex::Real(750.23), amrex::Real(809.198), + amrex::Real(3658.3437), amrex::Real(3946.2909), + // Ar + amrex::Real(15.7596117), amrex::Real(27.62967), amrex::Real(40.735), + amrex::Real(59.58), amrex::Real(74.84), amrex::Real(91.290), + amrex::Real(124.41), amrex::Real(143.4567), amrex::Real(422.60), + amrex::Real(479.76), amrex::Real(540.4), amrex::Real(619.0), + amrex::Real(685.5), amrex::Real(755.13), amrex::Real(855.5), + amrex::Real(918.375), amrex::Real(4120.6656), amrex::Real(4426.2228), + // Cu + amrex::Real(7.726380), amrex::Real(20.29239), amrex::Real(36.841), + amrex::Real(57.38), amrex::Real(79.8), amrex::Real(103.0), + amrex::Real(139.0), amrex::Real(166.0), amrex::Real(198.0), + amrex::Real(232.2), amrex::Real(265.33), amrex::Real(367.0), + amrex::Real(401.0), amrex::Real(436.0), amrex::Real(483.1), + amrex::Real(518.7), amrex::Real(552.8), amrex::Real(632.5), + amrex::Real(670.608), amrex::Real(1690.5), amrex::Real(1800), + amrex::Real(1918), amrex::Real(2044), amrex::Real(2179.4), + amrex::Real(2307.3), amrex::Real(2479.1), amrex::Real(2586.954), + amrex::Real(11062.4313), amrex::Real(11567.613), + // Kr + amrex::Real(13.9996053), amrex::Real(24.35984), amrex::Real(35.838), + amrex::Real(50.85), amrex::Real(64.69), amrex::Real(78.49), + amrex::Real(109.13), amrex::Real(125.802), amrex::Real(233.0), + amrex::Real(268), amrex::Real(308), amrex::Real(350), + amrex::Real(391), amrex::Real(446), amrex::Real(492), + amrex::Real(540), amrex::Real(591), amrex::Real(640), + amrex::Real(785), amrex::Real(831.6), amrex::Real(882.8), + amrex::Real(945), amrex::Real(999.0), amrex::Real(1042), + amrex::Real(1155.0), amrex::Real(1205.23), amrex::Real(2928.9), + amrex::Real(3072), amrex::Real(3228), amrex::Real(3380), + amrex::Real(3584), amrex::Real(3752.0), amrex::Real(3971), + amrex::Real(4109.083), amrex::Real(17296.420), amrex::Real(17936.209), + // Rb + amrex::Real(4.1771280), amrex::Real(27.28954), amrex::Real(39.247), + amrex::Real(52.20), amrex::Real(68.44), amrex::Real(82.9), + amrex::Real(98.67), amrex::Real(132.79), amrex::Real(150.628), + amrex::Real(277.12), amrex::Real(313.1), amrex::Real(356.0), + amrex::Real(400), amrex::Real(443), amrex::Real(502), + amrex::Real(550), amrex::Real(601), amrex::Real(654), + amrex::Real(706.0), amrex::Real(857), amrex::Real(905.3), + amrex::Real(958.9), amrex::Real(1024), amrex::Real(1080), + amrex::Real(1125), amrex::Real(1242.5), amrex::Real(1294.57), + amrex::Real(3133.3), amrex::Real(3281), amrex::Real(3443), + amrex::Real(3600), amrex::Real(3815), amrex::Real(3988), + amrex::Real(4214), amrex::Real(4356.865), amrex::Real(18305.884), + amrex::Real(18965.516), + // Xe + amrex::Real(12.1298436), amrex::Real(20.975), amrex::Real(31.05), + amrex::Real(42.20), amrex::Real(54.1), amrex::Real(66.703), + amrex::Real(91.6), amrex::Real(105.9778), amrex::Real(179.84), + amrex::Real(202.0), amrex::Real(229.02), amrex::Real(255.0), + amrex::Real(281), amrex::Real(314), amrex::Real(343), + amrex::Real(374), amrex::Real(404), amrex::Real(434), + amrex::Real(549), amrex::Real(582), amrex::Real(616), + amrex::Real(650), amrex::Real(700), amrex::Real(736), + amrex::Real(818), amrex::Real(857.0), amrex::Real(1493), + amrex::Real(1571), amrex::Real(1653), amrex::Real(1742), + amrex::Real(1826), amrex::Real(1919), amrex::Real(2023), + amrex::Real(2113), amrex::Real(2209), amrex::Real(2300), + amrex::Real(2556), amrex::Real(2637), amrex::Real(2726), + amrex::Real(2811), amrex::Real(2975), amrex::Real(3068), + amrex::Real(3243), amrex::Real(3333.8), amrex::Real(7660), + amrex::Real(7889), amrex::Real(8144), amrex::Real(8382), + amrex::Real(8971), amrex::Real(9243), amrex::Real(9581), + amrex::Real(9810.37), amrex::Real(40271.724), amrex::Real(41299.71), + // Rn + amrex::Real(10.74850), amrex::Real(21.4), amrex::Real(29.4), + amrex::Real(36.9), amrex::Real(52.9), amrex::Real(64.0), + amrex::Real(88.0), amrex::Real(102.0), amrex::Real(154.0), + amrex::Real(173.9), amrex::Real(195.0), amrex::Real(218.0), + amrex::Real(240), amrex::Real(264), amrex::Real(293), + amrex::Real(317), amrex::Real(342), amrex::Real(367), + amrex::Real(488), amrex::Real(520), amrex::Real(550), + amrex::Real(580), amrex::Real(640), amrex::Real(680), + amrex::Real(760), amrex::Real(800), amrex::Real(850), + amrex::Real(920), amrex::Real(980), amrex::Real(1050), + amrex::Real(1110), amrex::Real(1180), amrex::Real(1250), + amrex::Real(1310), amrex::Real(1390), amrex::Real(1460), + amrex::Real(1520), amrex::Real(1590), amrex::Real(1660), + amrex::Real(1720), amrex::Real(2033), amrex::Real(2094), + amrex::Real(2158), amrex::Real(2227), amrex::Real(2293), + amrex::Real(2357), amrex::Real(2467), amrex::Real(2535), + amrex::Real(2606), amrex::Real(2674), amrex::Real(2944), + amrex::Real(3010), amrex::Real(3082), amrex::Real(3149), + amrex::Real(3433), amrex::Real(3510), amrex::Real(3699), + amrex::Real(3777), amrex::Real(6169), amrex::Real(6318), + amrex::Real(6476), amrex::Real(6646), amrex::Real(6807), + amrex::Real(6964), amrex::Real(7283), amrex::Real(7450), + amrex::Real(7630), amrex::Real(7800), amrex::Real(8260), + amrex::Real(8410), amrex::Real(8570), amrex::Real(8710), + amrex::Real(9610), amrex::Real(9780), amrex::Real(10120), + amrex::Real(10290), amrex::Real(21770), amrex::Real(22160), + amrex::Real(22600), amrex::Real(22990), amrex::Real(26310), + amrex::Real(26830), amrex::Real(27490), amrex::Real(27903.1), + amrex::Real(110842.0), amrex::Real(112843.7) + }; + +} + +#endif // #ifndef WARPX_UTILS_PHYSICS_IONIZATION_TABLE_H_ diff --git a/Source/Utils/Physics/Make.package b/Source/Utils/Physics/Make.package new file mode 100644 index 00000000000..d8f7bab3216 --- /dev/null +++ b/Source/Utils/Physics/Make.package @@ -0,0 +1 @@ +VPATH_LOCATIONS += $(WARPX_HOME)/Source/Utils/Physics diff --git a/Source/Utils/atomic_data.txt b/Source/Utils/Physics/atomic_data.txt similarity index 100% rename from Source/Utils/atomic_data.txt rename to Source/Utils/Physics/atomic_data.txt diff --git a/Source/Utils/write_atomic_data_cpp.py b/Source/Utils/Physics/write_atomic_data_cpp.py similarity index 69% rename from Source/Utils/write_atomic_data_cpp.py rename to Source/Utils/Physics/write_atomic_data_cpp.py index 3b0538aa193..11cd3b2c0c5 100644 --- a/Source/Utils/write_atomic_data_cpp.py +++ b/Source/Utils/Physics/write_atomic_data_cpp.py @@ -32,56 +32,59 @@ # Head of CPP file cpp_string = '// This script was automatically generated!\n' -cpp_string += '// Edit dev/Source/Utils/write_atomic_data_cpp.py instead!\n' -cpp_string += '#ifndef WARPX_IONIZATION_TABLE_H_\n' -cpp_string += '#define WARPX_IONIZATION_TABLE_H_\n\n' -cpp_string += '#include \n' +cpp_string += '// Edit dev/Source/Utils/Physics/write_atomic_data_cpp.py instead!\n\n' +cpp_string += '#ifndef WARPX_UTILS_PHYSICS_IONIZATION_TABLE_H_\n' +cpp_string += '#define WARPX_UTILS_PHYSICS_IONIZATION_TABLE_H_\n\n' cpp_string += '#include \n\n' cpp_string += '#include \n' cpp_string += '#include \n\n' +cpp_string += 'namespace utils::physics\n' +cpp_string += '{\n' # Map each element to ID in table -cpp_string += 'static std::map const ion_map_ids = {' +cpp_string += ' static std::map const ion_map_ids = {' for count, name in enumerate(ion_names): - cpp_string += '\n {"' + name + '", ' + str(count) + '},' + cpp_string += '\n {"' + name + '", ' + str(count) + '},' cpp_string = cpp_string[:-1] cpp_string += ' };\n\n' # Atomic number of each species -cpp_string += 'constexpr int nelements = ' + str(len(ion_names)) + ';\n\n' -cpp_string += 'constexpr int ion_atomic_numbers[nelements] = {\n ' +cpp_string += ' constexpr int nelements = ' + str(len(ion_names)) + ';\n\n' +cpp_string += ' constexpr int ion_atomic_numbers[nelements] = {\n ' for count, atom_num in enumerate(ion_atom_numbers): - if count%10==0 and count>0: cpp_string = cpp_string[:-2] + ',\n ' + if count%10==0 and count>0: cpp_string = cpp_string[:-2] + ',\n ' cpp_string += str(atom_num) + ', ' cpp_string = cpp_string[:-2] cpp_string += '};\n\n' # Offset of each element in table of ionization energies -cpp_string += 'constexpr int ion_energy_offsets[nelements] = {\n ' +cpp_string += ' constexpr int ion_energy_offsets[nelements] = {\n ' for count, offset in enumerate(ion_offsets): - if count%10==0 and count>0: cpp_string = cpp_string[:-2] + ',\n ' + if count%10==0 and count>0: cpp_string = cpp_string[:-2] + ',\n ' cpp_string += str(offset) + ', ' cpp_string = cpp_string[:-2] cpp_string += '};\n\n' # Table of ionization energies -cpp_string += 'constexpr int energies_tab_length = ' + str(len(list_of_tuples)) + ';\n\n' -cpp_string += 'constexpr amrex::Real table_ionization_energies[energies_tab_length]{' +cpp_string += ' constexpr int energies_tab_length = ' + str(len(list_of_tuples)) + ';\n\n' +cpp_string += ' constexpr amrex::Real table_ionization_energies[energies_tab_length]{' for element in ion_names: - cpp_string += '\n // ' + element + '\n ' + cpp_string += '\n // ' + element + '\n ' regex_command = \ '\n\s+(\d+)\s+\|\s+%s\s+\w+\s+\|\s+\+*(\d+)\s+\|\s+\(*\[*(\d+\.*\d*)' \ %element list_of_tuples = re.findall( regex_command, text_data ) for count, energy in enumerate([x[2] for x in list_of_tuples]): - if count%3==0 and count>0: cpp_string = cpp_string[:-2] + ',\n ' + if count%3==0 and count>0: cpp_string = cpp_string[:-2] + ',\n ' cpp_string += "amrex::Real(" + energy + '), ' cpp_string = cpp_string[:-1] cpp_string = cpp_string[:-1] -cpp_string += '\n};\n\n' +cpp_string += '\n };\n\n' + +cpp_string += '}\n\n' # Write the string to file -cpp_string += '#endif // #ifndef WARPX_IONIZATION_TABLE_H_\n' +cpp_string += '#endif // #ifndef WARPX_UTILS_PHYSICS_IONIZATION_TABLE_H_\n' f= open("IonizationEnergiesTable.H","w") f.write(cpp_string) f.close() diff --git a/Source/Utils/Strings/CMakeLists.txt b/Source/Utils/Strings/CMakeLists.txt new file mode 100644 index 00000000000..918384f7a8b --- /dev/null +++ b/Source/Utils/Strings/CMakeLists.txt @@ -0,0 +1,4 @@ +target_sources(WarpX + PRIVATE + StringUtils.cpp +) diff --git a/Source/Utils/Strings/Make.package b/Source/Utils/Strings/Make.package new file mode 100644 index 00000000000..d6f578b8a3c --- /dev/null +++ b/Source/Utils/Strings/Make.package @@ -0,0 +1,3 @@ +CEXE_sources += StringUtils.cpp + +VPATH_LOCATIONS += $(WARPX_HOME)/Source/Utils/Strings diff --git a/Source/Utils/Strings/StringUtils.H b/Source/Utils/Strings/StringUtils.H new file mode 100644 index 00000000000..5c21d1be1ea --- /dev/null +++ b/Source/Utils/Strings/StringUtils.H @@ -0,0 +1,68 @@ +/* Copyright 2022 Andrew Myers, Luca Fedeli, Maxence Thevenet + * Revathi Jambunathan + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_UTILS_STRINGS_STRINGUTILS_H_ +#define WARPX_UTILS_STRINGS_STRINGUTILS_H_ + +#include + +#include +#include + +namespace utils::strings +{ + /** \brief Splits a string using a string separator. This is somewhat similar to + * amrex::Tokenize. The main difference is that, if the separator ":" is used, + * amrex::Tokenize will split ":3::2" into ["3","2"] while this functio will + * split ":3::2" into ["","3","","2"]. This function can also perform a trimming to + * remove whitespaces (or any other arbitrary string) from the split string. + * + * @tparam Container the type of the split string. + * + * @param[in] instr the input string + * @param[in] separator the separator string + * @param[in] trim true to trim the split string, false otherwise. + * @param[in] trim_space the string to trim if trim is true. + * @return cont the split string + */ + template + auto split (std::string const& instr, std::string const& separator, + bool const trim = false, std::string const& trim_space = " \t") + { + Container cont; + std::size_t current = instr.find(separator); + std::size_t previous = 0; + while (current != std::string::npos) { + if (trim){ + cont.push_back(amrex::trim(instr.substr(previous, current - previous),trim_space));} + else{ + cont.push_back(instr.substr(previous, current - previous));} + previous = current + separator.size(); + current = instr.find(separator, previous); + } + if (trim){ + cont.push_back(amrex::trim(instr.substr(previous, current - previous),trim_space));} + else{ + cont.push_back(instr.substr(previous, current - previous));} + return cont; + } + + /** \brief This function performs automatic text wrapping on a string, + * returning an array of strings each not exceeding the maximum line length + * (unless the text contains a word exceeding the maximum line length). + * + * @param[in] text the string containing the text to be wrapped + * @param[in] max_line_length the maximum line length + * @return an std::vector containing the lines of the wrapped text + */ + std::vector automatic_text_wrap( + const std::string& text, const int max_line_length); + +} + +#endif //WARPX_UTILS_STRINGS_STRINGUTILS_H_ diff --git a/Source/Utils/Strings/StringUtils.cpp b/Source/Utils/Strings/StringUtils.cpp new file mode 100644 index 00000000000..f095794c978 --- /dev/null +++ b/Source/Utils/Strings/StringUtils.cpp @@ -0,0 +1,51 @@ +/* Copyright 2022 Andrew Myers, Luca Fedeli, Maxence Thevenet + * Revathi Jambunathan + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#include "StringUtils.H" + +#include + +std::vector automatic_text_wrap( + const std::string& text, const int max_line_length){ + + auto ss_text = std::stringstream{text}; + auto wrapped_text_lines = std::vector{}; + + std::string line; + while(std::getline(ss_text, line,'\n')){ + + auto ss_line = std::stringstream{line}; + int counter = 0; + std::stringstream ss_line_out; + std::string word; + + while (ss_line >> word){ + const auto wlen = static_cast(word.length()); + + if(counter == 0){ + ss_line_out << word; + counter += wlen; + } + else{ + if (counter + wlen < max_line_length){ + ss_line_out << " " << word; + counter += (wlen+1); + } + else{ + wrapped_text_lines.push_back(ss_line_out.str()); + ss_line_out = std::stringstream{word}; + counter = wlen; + } + } + } + + wrapped_text_lines.push_back(ss_line_out.str()); + } + + return wrapped_text_lines; +} diff --git a/Source/Utils/WarpXUtil.H b/Source/Utils/WarpXUtil.H index 275ce7e3143..c306a5a648a 100644 --- a/Source/Utils/WarpXUtil.H +++ b/Source/Utils/WarpXUtil.H @@ -56,16 +56,6 @@ void CheckGriddingForRZSpectral (); void NullifyMF(amrex::MultiFab& mf, int lev, amrex::Real zmin, amrex::Real zmax); -/** - * \brief Parse a string (typically a mathematical expression) from the - * input file and store it into a variable. - * - * \param pp used to read the query_string `pp.=string` - * \param query_string ParmParse.query will look for this string - * \param stored_string variable in which the string to parse is stored - */ -void Store_parserString(const amrex::ParmParse &pp, std::string query_string, - std::string& stored_string); namespace WarpXUtilIO{ /** @@ -80,77 +70,6 @@ bool WriteBinaryDataOnFile(std::string filename, const amrex::Vector& data namespace WarpXUtilAlgo{ -/** \brief Returns a pointer to the first element in the range [first, last) that is greater than val - * - * A re-implementation of the upper_bound algorithm suitable for GPU kernels. - * - * @param first: pointer to left limit of the range to consider - * @param last: pointer to right limit of the range to consider - * @param val: value to compare the elements of [first, last) to - */ -template AMREX_GPU_DEVICE AMREX_FORCE_INLINE -const T* upper_bound(const T* first, const T* last, const T& val) -{ - const T* it; - size_t count, step; - count = last-first; - while(count>0){ - it = first; - step = count/2; - it += step; - if (!(val<*it)){ - first = ++it; - count -= step + 1; - } - else{ - count = step; - } - } - return first; -} - -/** \brief Performs a linear interpolation - * - * Performs a linear interpolation at x given the 2 points - * (x0, f0) and (x1, f1) - */ -template AMREX_GPU_DEVICE AMREX_FORCE_INLINE -T linear_interp(T x0, T x1, T f0, T f1, T x) -{ - return ((x1-x)*f0 + (x-x0)*f1)/(x1-x0); -} - -/** \brief Performs a bilinear interpolation - * - * Performs a bilinear interpolation at (x,y) given the 4 points - * (x0, y0, f00), (x0, y1, f01), (x1, y0, f10), (x1, y1, f11). - */ -template AMREX_GPU_DEVICE AMREX_FORCE_INLINE -T bilinear_interp(T x0, T x1, T y0, T y1, T f00, T f01, T f10, T f11, T x, T y) -{ - const T fx0 = linear_interp(x0, x1, f00, f10, x); - const T fx1 = linear_interp(x0, x1, f01, f11, x); - return linear_interp(y0, y1, fx0, fx1, y); -} - -/** \brief Performs a trilinear interpolation - * - * Performs a trilinear interpolation at (x,y,z) given the 8 points - * (x0, y0, z0, f000), (x0, y0, z1, f001), (x0, y1, z0, f010), (x0, y1, z1, f011), - * (x1, y0, z0, f100), (x1, y0, z1, f101), (x1, y1, z0, f110), (x1, y1, z1, f111) - */ -template AMREX_GPU_DEVICE AMREX_FORCE_INLINE -T trilinear_interp(T x0, T x1,T y0, T y1, T z0, T z1, - T f000, T f001, T f010, T f011, T f100, T f101, T f110, T f111, - T x, T y, T z) -{ - const T fxy0 = bilinear_interp( - x0, x1, y0, y1, f000, f010, f100, f110, x, y); - const T fxy1 = bilinear_interp( - x0, x1, y0, y1, f001, f011, f101, f111, x, y); - return linear_interp(z0, z1, fxy0, fxy1, z); -} - /** \brief Compute physical coordinates (x,y,z) that correspond to a given (i,j,k) and * the corresponding staggering, mf_type. * @@ -186,338 +105,6 @@ void getCellCoordinates (int i, int j, int k, } -/** -* \brief Do a safe cast of a real to an int -* This ensures that the float value is within the range of ints and if not, -* raises an exception. -* -* \param x Real value to cast -* \param real_name String, the name of the variable being casted to use in the error message -*/ -int -safeCastToInt(amrex::Real x, const std::string& real_name); - -/** -* \brief Do a safe cast of a real to a long -* This ensures that the float value is within the range of longs and if not, -* raises an exception. -* -* \param x Real value to cast -* \param real_name String, the name of the variable being casted to use in the error message -*/ -long -safeCastToLong(amrex::Real x, const std::string& real_name); - -/** -* \brief Initialize an amrex::Parser object from a string containing a math expression -* -* \param parse_function String to read to initialize the parser. -* \param varnames A list of predefined independent variables -*/ -amrex::Parser makeParser (std::string const& parse_function, amrex::Vector const& varnames); - -/** Parse a string and return as a real - * - * In case the string cannot be interpreted as Real, - * this function ... - * - * \param str The string to be parsed - * \return representation as real - */ -double parseStringtoReal(std::string str); - -/** Parse a string and return an int - * - * In case the string cannot be interpreted as Real, - * this function ... - * - * \param str The string to be parsed - * \param name For integers, the name, to be used in error messages - * \return rounded closest integer - */ -int parseStringtoInt(std::string str, std::string name); - -template -amrex::ParserExecutor compileParser (amrex::Parser const* parser) -{ - if (parser) { - return parser->compile(); - } else { - return amrex::ParserExecutor{}; - } -} - -/** Similar to amrex::ParmParse::query, but also supports math expressions for the value. - * - * amrex::ParmParse::query reads a name and a value from the input file. This function does the - * same, and applies the amrex::Parser to the value, so the user has the choice to specify a value or - * a math expression (including user-defined constants). - * Works for amrex::Real numbers and integers. - * - * \param[in] a_pp amrex::ParmParse object - * \param[in] str name of the parameter to read - * \param[out] val where the value queried and parsed is stored, either a scalar or vector - */ -template -int queryWithParser (const amrex::ParmParse& a_pp, char const * const str, T& val) -{ - // call amrex::ParmParse::query, check if the user specified str. - std::string tmp_str; - int is_specified = a_pp.query(str, tmp_str); - if (is_specified) - { - // If so, create a parser object and apply it to the value provided by the user. - std::string str_val; - Store_parserString(a_pp, str, str_val); - - auto parser = makeParser(str_val, {}); - - if (std::is_same::value) { - - val = safeCastToInt(std::round(parser.compileHost<0>()()), str); - } - else if (std::is_same::value) { - - val = safeCastToLong(std::round(parser.compileHost<0>()()), str); - } - else { - val = static_cast(parser.compileHost<0>()()); - } - } - // return the same output as amrex::ParmParse::query - return is_specified; -} - -template -int queryArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val) -{ - // call amrex::ParmParse::query, check if the user specified str. - std::vector tmp_str_arr; - int is_specified = a_pp.queryarr(str, tmp_str_arr); - if (is_specified) - { - // If so, create parser objects and apply them to the values provided by the user. - int const n = static_cast(tmp_str_arr.size()); - val.resize(n); - for (int i=0 ; i < n ; i++) { - auto parser = makeParser(tmp_str_arr[i], {}); - if (std::is_same::value) { - val[i] = safeCastToInt(std::round(parser.compileHost<0>()()), str); - } - else if (std::is_same::value) { - val[i] = safeCastToLong(std::round(parser.compileHost<0>()()), str); - } - else { - val[i] = static_cast(parser.compileHost<0>()()); - } - } - } - // return the same output as amrex::ParmParse::query - return is_specified; -} - -/** Similar to amrex::ParmParse::query, but also supports math expressions for the value. - * - * amrex::ParmParse::query reads a name and a value from the input file. This function does the - * same, and applies the amrex::Parser to the value, so the user has the choice to specify a value or - * a math expression (including user-defined constants). - * Works for amrex::Real numbers and integers. - * - * \param[in] a_pp amrex::ParmParse object - * \param[in] str name of the parameter to read - * \param[out] val where the value queried and parsed is stored, either a scalar or vector - * \param[in] start_ix start index in the list of inputs values (optional with arrays, default is - * amrex::ParmParse::FIRST for starting with the first input value) - * \param[in] num_val number of input values to use (optional with arrays, default is - * amrex::ParmParse::LAST for reading until the last input value) - */ -template -int queryArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val, - const int start_ix, const int num_val) -{ - // call amrex::ParmParse::query, check if the user specified str. - std::vector tmp_str_arr; - int is_specified = a_pp.queryarr(str, tmp_str_arr, start_ix, num_val); - if (is_specified) - { - // If so, create parser objects and apply them to the values provided by the user. - int const n = static_cast(tmp_str_arr.size()); - val.resize(n); - for (int i=0 ; i < n ; i++) { - auto parser = makeParser(tmp_str_arr[i], {}); - if (std::is_same::value) { - val[i] = safeCastToInt(std::round(parser.compileHost<0>()()), str); - } - else if (std::is_same::value) { - val[i] = safeCastToLong(std::round(parser.compileHost<0>()()), str); - } - else { - val[i] = static_cast(parser.compileHost<0>()()); - } - } - } - // return the same output as amrex::ParmParse::query - return is_specified; -} - -/** Similar to amrex::ParmParse::get, but also supports math expressions for the value. - * - * amrex::ParmParse::get reads a name and a value from the input file. This function does the - * same, and applies the Parser to the value, so the user has the choice to specify a value or - * a math expression (including user-defined constants). - * Works for amrex::Real numbers and integers. - * - * \param[in] a_pp amrex::ParmParse object - * \param[in] str name of the parameter to read - * \param[out] val where the value queried and parsed is stored - */ -template -void getWithParser (const amrex::ParmParse& a_pp, char const * const str, T& val) -{ - // If so, create a parser object and apply it to the value provided by the user. - std::string str_val; - Store_parserString(a_pp, str, str_val); - - auto parser = makeParser(str_val, {}); - if (std::is_same::value) { - val = safeCastToInt(std::round(parser.compileHost<0>()()), str); - } - else if (std::is_same::value) { - val = safeCastToLong(std::round(parser.compileHost<0>()()), str); - } - else { - val = static_cast(parser.compileHost<0>()()); - } -} - -template -void getArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val) -{ - // Create parser objects and apply them to the values provided by the user. - std::vector tmp_str_arr; - a_pp.getarr(str, tmp_str_arr); - - int const n = static_cast(tmp_str_arr.size()); - val.resize(n); - for (int i=0 ; i < n ; i++) { - auto parser = makeParser(tmp_str_arr[i], {}); - if (std::is_same::value) { - val[i] = safeCastToInt(std::round(parser.compileHost<0>()()), str); - } - else if (std::is_same::value) { - val[i] = safeCastToLong(std::round(parser.compileHost<0>()()), str); - } - else { - val[i] = static_cast(parser.compileHost<0>()()); - } - } -} - -/** Similar to amrex::ParmParse::get, but also supports math expressions for the value. - * - * amrex::ParmParse::get reads a name and a value from the input file. This function does the - * same, and applies the Parser to the value, so the user has the choice to specify a value or - * a math expression (including user-defined constants). - * Works for amrex::Real numbers and integers. - * - * \param[in] a_pp amrex::ParmParse object - * \param[in] str name of the parameter to read - * \param[out] val where the value queried and parsed is stored - * \param[in] start_ix start index in the list of inputs values (optional with arrays, default is - * amrex::ParmParse::FIRST for starting with the first input value) - * \param[in] num_val number of input values to use (optional with arrays, default is - * amrex::ParmParse::LAST for reading until the last input value) - */ -template -void getArrWithParser (const amrex::ParmParse& a_pp, char const * const str, std::vector& val, - const int start_ix, const int num_val) -{ - // Create parser objects and apply them to the values provided by the user. - std::vector tmp_str_arr; - a_pp.getarr(str, tmp_str_arr, start_ix, num_val); - - int const n = static_cast(tmp_str_arr.size()); - val.resize(n); - for (int i=0 ; i < n ; i++) { - auto parser = makeParser(tmp_str_arr[i], {}); - if (std::is_same::value) { - val[i] = safeCastToInt(std::round(parser.compileHost<0>()()), str); - } - else if (std::is_same::value) { - val[i] = safeCastToLong(std::round(parser.compileHost<0>()()), str); - } - else { - val[i] = static_cast(parser.compileHost<0>()()); - } - } -} - -namespace WarpXUtilStr -{ - /** Return true if elem is in vect, false otherwise - * @param[in] vect vector of strings, typically names - * @param[in] elem single string - * @return true if elem is in vect, false otherwise - */ - bool is_in(const std::vector& vect, - const std::string& elem); - - /** Return true if any element in elems is in vect, false otherwise - * @param[in] vect vector of strings, typically names - * @param[in] elems vector of string - * @return true if any element in elems is in vect, false otherwise - */ - bool is_in(const std::vector& vect, - const std::vector& elems); - - /** \brief Splits a string using a string separator. This is somewhat similar to - * amrex::Tokenize. The main difference is that, if the separator ":" is used, - * amrex::Tokenize will split ":3::2" into ["3","2"] while this functio will - * split ":3::2" into ["","3","","2"]. This function can also perform a trimming to - * remove whitespaces (or any other arbitrary string) from the split string. - * - * @tparam Container the type of the split string. - * - * @param[in] instr the input string - * @param[in] separator the separator string - * @param[in] trim true to trim the split string, false otherwise. - * @param[in] trim_space the string to trim if trim is true. - * @return cont the split string - */ - template - auto split (std::string const& instr, std::string const& separator, - bool const trim = false, std::string const& trim_space = " \t") - { - Container cont; - std::size_t current = instr.find(separator); - std::size_t previous = 0; - while (current != std::string::npos) { - if (trim){ - cont.push_back(amrex::trim(instr.substr(previous, current - previous),trim_space));} - else{ - cont.push_back(instr.substr(previous, current - previous));} - previous = current + separator.size(); - current = instr.find(separator, previous); - } - if (trim){ - cont.push_back(amrex::trim(instr.substr(previous, current - previous),trim_space));} - else{ - cont.push_back(instr.substr(previous, current - previous));} - return cont; - } - - /** \brief This function performs automatic text wrapping on a string, - * returning an array of strings each not exceeding the maximum line length - * (unless the text contains a word exceeding the maximum line length). - * - * @param[in] text the string containing the text to be wrapped - * @param[in] max_line_length the maximum line length - * @return an std::vector containing the lines of the wrapped text - */ - std::vector automatic_text_wrap( - const std::string& text, const int max_line_length); - -} namespace WarpXUtilLoadBalance { diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index eecc04b7802..a36d31e93a1 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -7,6 +7,7 @@ */ #include "WarpX.H" +#include "Utils/Parser/ParserUtils.H" #include "TextMsg.H" #include "WarpXAlgorithmSelection.H" #include "WarpXConst.H" @@ -44,7 +45,7 @@ void PreparseAMReXInputIntArray(amrex::ParmParse& a_pp, char const * const input const int cnt = a_pp.countval(input_str); if (cnt > 0) { Vector input_array; - getArrWithParser(a_pp, input_str, input_array); + utils::parser::getArrWithParser(a_pp, input_str, input_array); if (replace) { a_pp.remove(input_str); } @@ -64,9 +65,11 @@ void ParseGeometryInput() Vector prob_lo(AMREX_SPACEDIM); Vector prob_hi(AMREX_SPACEDIM); - getArrWithParser(pp_geometry, "prob_lo", prob_lo, 0, AMREX_SPACEDIM); + utils::parser::getArrWithParser( + pp_geometry, "prob_lo", prob_lo, 0, AMREX_SPACEDIM); AMREX_ALWAYS_ASSERT(prob_lo.size() == AMREX_SPACEDIM); - getArrWithParser(pp_geometry, "prob_hi", prob_hi, 0, AMREX_SPACEDIM); + utils::parser::getArrWithParser( + pp_geometry, "prob_hi", prob_hi, 0, AMREX_SPACEDIM); AMREX_ALWAYS_ASSERT(prob_hi.size() == AMREX_SPACEDIM); #ifdef WARPX_DIM_RZ @@ -108,7 +111,7 @@ void ReadBoostedFrameParameters(Real& gamma_boost, Real& beta_boost, Vector& boost_direction) { ParmParse pp_warpx("warpx"); - queryWithParser(pp_warpx, "gamma_boost", gamma_boost); + utils::parser::queryWithParser(pp_warpx, "gamma_boost", gamma_boost); if( gamma_boost > 1. ) { beta_boost = std::sqrt(1._rt-1._rt/std::pow(gamma_boost,2._rt)); std::string s; @@ -155,19 +158,25 @@ void ConvertLabParamsToBoost() ParmParse pp_amr("amr"); ParmParse pp_slice("slice"); - getArrWithParser(pp_geometry, "prob_lo", prob_lo, 0, AMREX_SPACEDIM); - getArrWithParser(pp_geometry, "prob_hi", prob_hi, 0, AMREX_SPACEDIM); + utils::parser::getArrWithParser( + pp_geometry, "prob_lo", prob_lo, 0, AMREX_SPACEDIM); + utils::parser::getArrWithParser( + pp_geometry, "prob_hi", prob_hi, 0, AMREX_SPACEDIM); - queryArrWithParser(pp_slice, "dom_lo", slice_lo, 0, AMREX_SPACEDIM); + utils::parser::queryArrWithParser( + pp_slice, "dom_lo", slice_lo, 0, AMREX_SPACEDIM); AMREX_ALWAYS_ASSERT(slice_lo.size() == AMREX_SPACEDIM); - queryArrWithParser(pp_slice, "dom_hi", slice_hi, 0, AMREX_SPACEDIM); + utils::parser::queryArrWithParser( + pp_slice, "dom_hi", slice_hi, 0, AMREX_SPACEDIM); AMREX_ALWAYS_ASSERT(slice_hi.size() == AMREX_SPACEDIM); pp_amr.query("max_level", max_level); if (max_level > 0){ - getArrWithParser(pp_warpx, "fine_tag_lo", fine_tag_lo); - getArrWithParser(pp_warpx, "fine_tag_hi", fine_tag_hi); + utils::parser::getArrWithParser( + pp_warpx, "fine_tag_lo", fine_tag_lo); + utils::parser::getArrWithParser( + pp_warpx, "fine_tag_hi", fine_tag_hi); } @@ -264,143 +273,6 @@ namespace WarpXUtilIO{ } } -void Store_parserString(const amrex::ParmParse& pp, std::string query_string, - std::string& stored_string) -{ - std::vector f; - pp.getarr(query_string.c_str(), f); - stored_string.clear(); - for (auto const& s : f) { - stored_string += s; - } - f.clear(); -} - -namespace WarpXUtilSafeCast { - template< typename int_type > - AMREX_FORCE_INLINE - int_type safeCastTo(const amrex::Real x, const std::string& real_name) { - int_type result = int_type(0); - bool error_detected = false; - std::string assert_msg; - // (2.0*(numeric_limits::max()/2+1)) converts numeric_limits::max()+1 to a real ensuring accuracy to all digits - // This accepts x = 2**31-1 but rejects 2**31. - using namespace amrex::literals; - constexpr amrex::Real max_range = (2.0_rt*static_cast(std::numeric_limits::max()/2+1)); - if (x < max_range) { - if (std::ceil(x) >= std::numeric_limits::min()) { - result = static_cast(x); - } else { - error_detected = true; - assert_msg = "Negative overflow detected when casting " + real_name + " = " + - std::to_string(x) + " to integer type"; - } - } else if (x > 0) { - error_detected = true; - assert_msg = "Overflow detected when casting " + real_name + " = " + std::to_string(x) + " to integer type"; - } else { - error_detected = true; - assert_msg = "NaN detected when casting " + real_name + " to integer type"; - } - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!error_detected, assert_msg); - return result; - } -} - -int safeCastToInt(const amrex::Real x, const std::string& real_name) { - return WarpXUtilSafeCast::safeCastTo (x, real_name); -} - -long safeCastToLong(const amrex::Real x, const std::string& real_name) { - return WarpXUtilSafeCast::safeCastTo (x, real_name); -} - -Parser makeParser (std::string const& parse_function, amrex::Vector const& varnames) -{ - // Since queryWithParser recursively calls this routine, keep track of symbols - // in case an infinite recursion is found (a symbol's value depending on itself). - static std::set recursive_symbols; - - Parser parser(parse_function); - parser.registerVariables(varnames); - - std::set symbols = parser.symbols(); - for (auto const& v : varnames) symbols.erase(v.c_str()); - - // User can provide inputs under this name, through which expressions - // can be provided for arbitrary variables. PICMI inputs are aware of - // this convention and use the same prefix as well. This potentially - // includes variable names that match physical or mathematical - // constants, in case the user wishes to enforce a different - // system of units or some form of quasi-physical behavior in the - // simulation. Thus, this needs to override any built-in - // constants. - ParmParse pp_my_constants("my_constants"); - - // Physical / Numerical Constants available to parsed expressions - static std::map warpx_constants = - { - {"clight", PhysConst::c}, - {"epsilon0", PhysConst::ep0}, - {"mu0", PhysConst::mu0}, - {"q_e", PhysConst::q_e}, - {"m_e", PhysConst::m_e}, - {"m_p", PhysConst::m_p}, - {"m_u", PhysConst::m_u}, - {"kb", PhysConst::kb}, - {"pi", MathConst::pi}, - }; - - for (auto it = symbols.begin(); it != symbols.end(); ) { - // Always parsing in double precision avoids potential overflows that may occur when parsing - // user's expressions because of the limited range of exponentials in single precision - double v; - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - recursive_symbols.count(*it)==0, - "Expressions contains recursive symbol "+*it); - recursive_symbols.insert(*it); - const bool is_input = queryWithParser(pp_my_constants, it->c_str(), v); - recursive_symbols.erase(*it); - - if (is_input) { - parser.setConstant(*it, v); - it = symbols.erase(it); - continue; - } - - auto constant = warpx_constants.find(*it); - if (constant != warpx_constants.end()) { - parser.setConstant(*it, constant->second); - it = symbols.erase(it); - continue; - } - - ++it; - } - for (auto const& s : symbols) { - amrex::Abort(Utils::TextMsg::Err("makeParser::Unknown symbol "+s)); - } - return parser; -} - -double -parseStringtoReal(std::string str) -{ - auto parser = makeParser(str, {}); - auto exe = parser.compileHost<0>(); - double result = exe(); - return result; -} - -int -parseStringtoInt(std::string str, std::string name) -{ - auto const rval = static_cast(parseStringtoReal(str)); - int ival = safeCastToInt(std::round(rval), name); - return ival; -} - void CheckDims () { // Ensure that geometry.dims is set properly. @@ -592,61 +464,6 @@ void ReadBCParams () pp_geometry.addarr("is_periodic", geom_periodicity); } -namespace WarpXUtilStr -{ - bool is_in(const std::vector& vect, - const std::string& elem) - { - return (std::find(vect.begin(), vect.end(), elem) != vect.end()); - } - - bool is_in(const std::vector& vect, - const std::vector& elems) - { - return std::any_of(elems.begin(), elems.end(), - [&](const auto elem){return is_in(vect, elem);}); - } - - std::vector automatic_text_wrap( - const std::string& text, const int max_line_length){ - - auto ss_text = std::stringstream{text}; - auto wrapped_text_lines = std::vector{}; - - std::string line; - while(std::getline(ss_text, line,'\n')){ - - auto ss_line = std::stringstream{line}; - int counter = 0; - std::stringstream ss_line_out; - std::string word; - - while (ss_line >> word){ - const auto wlen = static_cast(word.length()); - - if(counter == 0){ - ss_line_out << word; - counter += wlen; - } - else{ - if (counter + wlen < max_line_length){ - ss_line_out << " " << word; - counter += (wlen+1); - } - else{ - wrapped_text_lines.push_back(ss_line_out.str()); - ss_line_out = std::stringstream{word}; - counter = wlen; - } - } - } - - wrapped_text_lines.push_back(ss_line_out.str()); - } - - return wrapped_text_lines; - } -} namespace WarpXUtilLoadBalance { diff --git a/Source/WarpX.H b/Source/WarpX.H index d4acf916e39..86f7b57c5a3 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -35,7 +35,7 @@ #include "FieldSolver/ElectrostaticSolver.H" #include "Filter/BilinearFilter.H" #include "Parallelization/GuardCellManager.H" -#include "Utils/IntervalsParser.H" +#include "Utils/Parser/IntervalsParser.H" #include "Utils/WarpXAlgorithmSelection.H" #include @@ -306,7 +306,7 @@ public: static bool do_dynamic_scheduling; static bool refine_plasma; - static IntervalsParser sort_intervals; + static utils::parser::IntervalsParser sort_intervals; static amrex::IntVect sort_bin_size; static bool do_subcycling; @@ -530,7 +530,10 @@ public: /** \brief returns the load balance interval */ - IntervalsParser get_load_balance_intervals () const {return load_balance_intervals;} + utils::parser::IntervalsParser get_load_balance_intervals () const + { + return load_balance_intervals; + } /** * \brief Private function for spectral solver @@ -1270,7 +1273,7 @@ private: // Load balancing /** Load balancing intervals that reads the "load_balance_intervals" string int the input file * for getting steps at which load balancing is performed */ - IntervalsParser load_balance_intervals; + utils::parser::IntervalsParser load_balance_intervals; /** Collection of LayoutData to keep track of weights used in load balancing * routines. Contains timer-based or heuristic-based costs depending on input option */ amrex::Vector > > costs; @@ -1303,7 +1306,7 @@ private: amrex::Real costs_heuristic_particles_wt = amrex::Real(0); // Determines timesteps for override sync - IntervalsParser override_sync_intervals; + utils::parser::IntervalsParser override_sync_intervals; // Other runtime parameters int verbose = 1; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index aa6301a6e4b..7f697f66810 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -166,7 +166,7 @@ bool WarpX::refine_plasma = false; int WarpX::num_mirrors = 0; -IntervalsParser WarpX::sort_intervals; +utils::parser::IntervalsParser WarpX::sort_intervals; amrex::IntVect WarpX::sort_bin_size(AMREX_D_DECL(1,1,1)); bool WarpX::do_back_transformed_diagnostics = false; @@ -454,8 +454,8 @@ WarpX::ReadParameters () { ParmParse pp;// Traditionally, max_step and stop_time do not have prefix. - queryWithParser(pp, "max_step", max_step); - queryWithParser(pp, "stop_time", stop_time); + utils::parser::queryWithParser(pp, "max_step", max_step); + utils::parser::queryWithParser(pp, "stop_time", stop_time); pp.query("authors", authors); } @@ -500,7 +500,9 @@ WarpX::ReadParameters () } std::vector numprocs_in; - queryArrWithParser(pp_warpx, "numprocs", numprocs_in, 0, AMREX_SPACEDIM); + utils::parser::queryArrWithParser( + pp_warpx, "numprocs", numprocs_in, 0, AMREX_SPACEDIM); + if (not numprocs_in.empty()) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE (numprocs_in.size() == AMREX_SPACEDIM, @@ -578,20 +580,22 @@ WarpX::ReadParameters () } } - queryWithParser(pp_warpx, "cfl", cfl); + utils::parser::queryWithParser(pp_warpx, "cfl", cfl); pp_warpx.query("verbose", verbose); - queryWithParser(pp_warpx, "regrid_int", regrid_int); + utils::parser::queryWithParser(pp_warpx, "regrid_int", regrid_int); pp_warpx.query("do_subcycling", do_subcycling); pp_warpx.query("do_multi_J", do_multi_J); if (do_multi_J) { - getWithParser(pp_warpx, "do_multi_J_n_depositions", do_multi_J_n_depositions); + utils::parser::getWithParser( + pp_warpx, "do_multi_J_n_depositions", do_multi_J_n_depositions); } pp_warpx.query("use_hybrid_QED", use_hybrid_QED); pp_warpx.query("safe_guard_cells", safe_guard_cells); std::vector override_sync_intervals_string_vec = {"1"}; pp_warpx.queryarr("override_sync_intervals", override_sync_intervals_string_vec); - override_sync_intervals = IntervalsParser(override_sync_intervals_string_vec); + override_sync_intervals = + utils::parser::IntervalsParser(override_sync_intervals_string_vec); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(do_subcycling != 1 || max_level <= 1, "Subcycling method 1 only works for 2 levels."); @@ -602,15 +606,17 @@ WarpX::ReadParameters () // queryWithParser returns 1 if argument zmax_plasma_to_compute_max_step is // specified by the user, 0 otherwise. - do_compute_max_step_from_zmax = - queryWithParser(pp_warpx, "zmax_plasma_to_compute_max_step", - zmax_plasma_to_compute_max_step); + do_compute_max_step_from_zmax = utils::parser::queryWithParser( + pp_warpx, "zmax_plasma_to_compute_max_step", + zmax_plasma_to_compute_max_step); pp_warpx.query("do_moving_window", do_moving_window); if (do_moving_window) { - queryWithParser(pp_warpx, "start_moving_window_step", start_moving_window_step); - queryWithParser(pp_warpx, "end_moving_window_step", end_moving_window_step); + utils::parser::queryWithParser( + pp_warpx, "start_moving_window_step", start_moving_window_step); + utils::parser::queryWithParser( + pp_warpx, "end_moving_window_step", end_moving_window_step); std::string s; pp_warpx.get("moving_window_dir", s); if (s == "x" || s == "X") { @@ -633,7 +639,8 @@ WarpX::ReadParameters () moving_window_x = geom[0].ProbLo(moving_window_dir); - getWithParser(pp_warpx, "moving_window_v", moving_window_v); + utils::parser::getWithParser( + pp_warpx, "moving_window_v", moving_window_v); moving_window_v *= PhysConst::c; } @@ -650,20 +657,25 @@ WarpX::ReadParameters () WARPX_ALWAYS_ASSERT_WITH_MESSAGE( (s == "z" || s == "Z"), "The boosted frame diagnostic currently only works if the boost is in the z direction."); - queryWithParser(pp_warpx, "num_snapshots_lab", num_snapshots_lab); + utils::parser::queryWithParser( + pp_warpx, "num_snapshots_lab", num_snapshots_lab); // Read either dz_snapshots_lab or dt_snapshots_lab Real dz_snapshots_lab = 0; - bool snapshot_interval_is_specified = queryWithParser(pp_warpx, "dt_snapshots_lab", dt_snapshots_lab); - if ( queryWithParser(pp_warpx, "dz_snapshots_lab", dz_snapshots_lab) ){ - dt_snapshots_lab = dz_snapshots_lab/PhysConst::c; - snapshot_interval_is_specified = true; - } + const bool dt_snapshots_specified = + utils::parser::queryWithParser(pp_warpx, "dt_snapshots_lab", dt_snapshots_lab); + const bool dz_snapshots_specified = + utils::parser::queryWithParser(pp_warpx, "dz_snapshots_lab", dz_snapshots_lab); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - snapshot_interval_is_specified, + dt_snapshots_specified || dz_snapshots_specified, "When using back-transformed diagnostics, user should specify either dz_snapshots_lab or dt_snapshots_lab."); - getWithParser(pp_warpx, "gamma_boost", gamma_boost); + if (dz_snapshots_specified){ + dt_snapshots_lab = dz_snapshots_lab/PhysConst::c; + } + + utils::parser::getWithParser(pp_warpx, "gamma_boost", gamma_boost); pp_warpx.query("do_back_transformed_fields", do_back_transformed_fields); @@ -685,9 +697,12 @@ WarpX::ReadParameters () if (do_electrostatic == ElectrostaticSolverAlgo::LabFrame) { // Note that with the relativistic version, these parameters would be // input for each species. - queryWithParser(pp_warpx, "self_fields_required_precision", self_fields_required_precision); - queryWithParser(pp_warpx, "self_fields_absolute_tolerance", self_fields_absolute_tolerance); - queryWithParser(pp_warpx, "self_fields_max_iters", self_fields_max_iters); + utils::parser::queryWithParser( + pp_warpx, "self_fields_required_precision", self_fields_required_precision); + utils::parser::queryWithParser( + pp_warpx, "self_fields_absolute_tolerance", self_fields_absolute_tolerance); + utils::parser::queryWithParser( + pp_warpx, "self_fields_max_iters", self_fields_max_iters); pp_warpx.query("self_fields_verbosity", self_fields_verbosity); } // Parse the input file for domain boundary potentials @@ -701,7 +716,7 @@ WarpX::ReadParameters () pp_warpx.query("eb_potential(x,y,z,t)", m_poisson_boundary_handler.potential_eb_str); m_poisson_boundary_handler.buildParsers(); - queryWithParser(pp_warpx, "const_dt", const_dt); + utils::parser::queryWithParser(pp_warpx, "const_dt", const_dt); // Filter currently not working with FDTD solver in RZ geometry: turn OFF by default // (see https://github.com/ECP-WarpX/WarpX/issues/1943) @@ -714,7 +729,8 @@ WarpX::ReadParameters () pp_warpx.query("use_filter", use_filter); pp_warpx.query("use_filter_compensation", use_filter_compensation); Vector parse_filter_npass_each_dir(AMREX_SPACEDIM,1); - queryArrWithParser(pp_warpx, "filter_npass_each_dir", parse_filter_npass_each_dir, 0, AMREX_SPACEDIM); + utils::parser::queryArrWithParser( + pp_warpx, "filter_npass_each_dir", parse_filter_npass_each_dir, 0, AMREX_SPACEDIM); filter_npass_each_dir[0] = parse_filter_npass_each_dir[0]; #if (AMREX_SPACEDIM >= 2) filter_npass_each_dir[1] = parse_filter_npass_each_dir[1]; @@ -740,14 +756,18 @@ WarpX::ReadParameters () } #endif - queryWithParser(pp_warpx, "num_mirrors", num_mirrors); + utils::parser::queryWithParser( + pp_warpx, "num_mirrors", num_mirrors); if (num_mirrors>0){ mirror_z.resize(num_mirrors); - getArrWithParser(pp_warpx, "mirror_z", mirror_z, 0, num_mirrors); + utils::parser::getArrWithParser( + pp_warpx, "mirror_z", mirror_z, 0, num_mirrors); mirror_z_width.resize(num_mirrors); - getArrWithParser(pp_warpx, "mirror_z_width", mirror_z_width, 0, num_mirrors); + utils::parser::getArrWithParser( + pp_warpx, "mirror_z_width", mirror_z_width, 0, num_mirrors); mirror_z_npoints.resize(num_mirrors); - getArrWithParser(pp_warpx, "mirror_z_npoints", mirror_z_npoints, 0, num_mirrors); + utils::parser::getArrWithParser( + pp_warpx, "mirror_z_npoints", mirror_z_npoints, 0, num_mirrors); } pp_warpx.query("do_single_precision_comms", do_single_precision_comms); @@ -765,11 +785,14 @@ WarpX::ReadParameters () pp_warpx.query("refine_plasma", refine_plasma); pp_warpx.query("do_dive_cleaning", do_dive_cleaning); pp_warpx.query("do_divb_cleaning", do_divb_cleaning); - queryWithParser(pp_warpx, "n_field_gather_buffer", n_field_gather_buffer); - queryWithParser(pp_warpx, "n_current_deposition_buffer", n_current_deposition_buffer); + utils::parser::queryWithParser( + pp_warpx, "n_field_gather_buffer", n_field_gather_buffer); + utils::parser::queryWithParser( + pp_warpx, "n_current_deposition_buffer", n_current_deposition_buffer); amrex::Real quantum_xi_tmp; - int quantum_xi_is_specified = queryWithParser(pp_warpx, "quantum_xi", quantum_xi_tmp); + const auto quantum_xi_is_specified = + utils::parser::queryWithParser(pp_warpx, "quantum_xi", quantum_xi_tmp); if (quantum_xi_is_specified) { double const quantum_xi = quantum_xi_tmp; quantum_xi_c2 = static_cast(quantum_xi * PhysConst::c * PhysConst::c); @@ -796,15 +819,15 @@ WarpX::ReadParameters () } } - queryWithParser(pp_warpx, "pml_ncell", pml_ncell); - queryWithParser(pp_warpx, "pml_delta", pml_delta); + utils::parser::queryWithParser(pp_warpx, "pml_ncell", pml_ncell); + utils::parser::queryWithParser(pp_warpx, "pml_delta", pml_delta); pp_warpx.query("pml_has_particles", pml_has_particles); pp_warpx.query("do_pml_j_damping", do_pml_j_damping); pp_warpx.query("do_pml_in_domain", do_pml_in_domain); pp_warpx.query("do_similar_dm_pml", do_similar_dm_pml); // Read `v_particle_pml` in units of the speed of light v_particle_pml = 1._rt; - queryWithParser(pp_warpx, "v_particle_pml", v_particle_pml); + utils::parser::queryWithParser(pp_warpx, "v_particle_pml", v_particle_pml); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(0._rt < v_particle_pml && v_particle_pml <= 1._rt, "Input value for the velocity warpx.v_particle_pml of the macroparticle must be in (0,1] (in units of c)."); // Scale by the speed of light @@ -885,19 +908,19 @@ WarpX::ReadParameters () ParmParse pp_vismf("vismf"); pp_vismf.add("usesingleread", use_single_read); pp_vismf.add("usesinglewrite", use_single_write); - queryWithParser(pp_warpx, "mffile_nstreams", mffile_nstreams); + utils::parser::queryWithParser(pp_warpx, "mffile_nstreams", mffile_nstreams); VisMF::SetMFFileInStreams(mffile_nstreams); - queryWithParser(pp_warpx, "field_io_nfiles", field_io_nfiles); + utils::parser::queryWithParser(pp_warpx, "field_io_nfiles", field_io_nfiles); VisMF::SetNOutFiles(field_io_nfiles); - queryWithParser(pp_warpx, "particle_io_nfiles", particle_io_nfiles); + utils::parser::queryWithParser(pp_warpx, "particle_io_nfiles", particle_io_nfiles); ParmParse pp_particles("particles"); pp_particles.add("particles_nfiles", particle_io_nfiles); } if (maxLevel() > 0) { Vector lo, hi; - getArrWithParser(pp_warpx, "fine_tag_lo", lo); - getArrWithParser(pp_warpx, "fine_tag_hi", hi); + utils::parser::getArrWithParser(pp_warpx, "fine_tag_lo", lo); + utils::parser::getArrWithParser(pp_warpx, "fine_tag_hi", hi); fine_tag_lo = RealVect{lo}; fine_tag_hi = RealVect{hi}; } @@ -910,7 +933,7 @@ WarpX::ReadParameters () #ifdef WARPX_DIM_RZ // Only needs to be set with WARPX_DIM_RZ, otherwise defaults to 1 - queryWithParser(pp_warpx, "n_rz_azimuthal_modes", n_rz_azimuthal_modes); + utils::parser::queryWithParser(pp_warpx, "n_rz_azimuthal_modes", n_rz_azimuthal_modes); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( n_rz_azimuthal_modes > 0, "The number of azimuthal modes (n_rz_azimuthal_modes) must be at least 1"); #endif @@ -996,14 +1019,17 @@ WarpX::ReadParameters () // Load balancing parameters std::vector load_balance_intervals_string_vec = {"0"}; pp_algo.queryarr("load_balance_intervals", load_balance_intervals_string_vec); - load_balance_intervals = IntervalsParser(load_balance_intervals_string_vec); + load_balance_intervals = utils::parser::IntervalsParser( + load_balance_intervals_string_vec); pp_algo.query("load_balance_with_sfc", load_balance_with_sfc); pp_algo.query("load_balance_knapsack_factor", load_balance_knapsack_factor); - queryWithParser(pp_algo, "load_balance_efficiency_ratio_threshold", + utils::parser::queryWithParser(pp_algo, "load_balance_efficiency_ratio_threshold", load_balance_efficiency_ratio_threshold); load_balance_costs_update_algo = GetAlgorithmInteger(pp_algo, "load_balance_costs_update"); - queryWithParser(pp_algo, "costs_heuristic_cells_wt", costs_heuristic_cells_wt); - queryWithParser(pp_algo, "costs_heuristic_particles_wt", costs_heuristic_particles_wt); + utils::parser::queryWithParser( + pp_algo, "costs_heuristic_cells_wt", costs_heuristic_cells_wt); + utils::parser::queryWithParser( + pp_algo, "costs_heuristic_particles_wt", costs_heuristic_particles_wt); // Parse algo.particle_shape and check that input is acceptable // (do this only if there is at least one particle or laser species) @@ -1018,7 +1044,7 @@ WarpX::ReadParameters () std::vector sort_intervals_string_vec = {"-1"}; int particle_shape; if (!species_names.empty() || !lasers_names.empty()) { - if (queryWithParser(pp_algo, "particle_shape", particle_shape)){ + if (utils::parser::queryWithParser(pp_algo, "particle_shape", particle_shape)){ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( (particle_shape >= 1) && (particle_shape <=3), @@ -1053,11 +1079,13 @@ WarpX::ReadParameters () amrex::ParmParse pp_warpx("warpx"); pp_warpx.queryarr("sort_intervals", sort_intervals_string_vec); - sort_intervals = IntervalsParser(sort_intervals_string_vec); + sort_intervals = utils::parser::IntervalsParser(sort_intervals_string_vec); Vector vect_sort_bin_size(AMREX_SPACEDIM,1); - bool sort_bin_size_is_specified = queryArrWithParser(pp_warpx, "sort_bin_size", - vect_sort_bin_size, 0, AMREX_SPACEDIM); + bool sort_bin_size_is_specified = + utils::parser::queryArrWithParser( + pp_warpx, "sort_bin_size", + vect_sort_bin_size, 0, AMREX_SPACEDIM); if (sort_bin_size_is_specified){ for (int i=0; i slice_lo(AMREX_SPACEDIM); - amrex::Vector slice_hi(AMREX_SPACEDIM); - Vector slice_crse_ratio(AMREX_SPACEDIM); - // set default slice_crse_ratio // - for (int idim=0; idim < AMREX_SPACEDIM; ++idim ) - { - slice_crse_ratio[idim] = 1; - } - queryArrWithParser(pp_slice, "dom_lo", slice_lo, 0, AMREX_SPACEDIM); - queryArrWithParser(pp_slice, "dom_hi", slice_hi, 0, AMREX_SPACEDIM); - queryArrWithParser(pp_slice, "coarsening_ratio",slice_crse_ratio,0,AMREX_SPACEDIM); - queryWithParser(pp_slice, "plot_int",slice_plot_int); - slice_realbox.setLo(slice_lo); - slice_realbox.setHi(slice_hi); - slice_cr_ratio = IntVect(AMREX_D_DECL(1,1,1)); - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) - { - if (slice_crse_ratio[idim] > 1 ) { - slice_cr_ratio[idim] = slice_crse_ratio[idim]; - } - } - - if (do_back_transformed_diagnostics) { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(gamma_boost > 1.0, - "gamma_boost must be > 1 to use the boost frame diagnostic"); - queryWithParser(pp_slice, "num_slice_snapshots_lab", num_slice_snapshots_lab); - if (num_slice_snapshots_lab > 0) { - getWithParser(pp_slice, "dt_slice_snapshots_lab", dt_slice_snapshots_lab ); - getWithParser(pp_slice, "particle_slice_width_lab",particle_slice_width_lab); - } - } + ParmParse pp_slice("slice"); + amrex::Vector slice_lo(AMREX_SPACEDIM); + amrex::Vector slice_hi(AMREX_SPACEDIM); + Vector slice_crse_ratio(AMREX_SPACEDIM); + // set default slice_crse_ratio // + for (int idim=0; idim < AMREX_SPACEDIM; ++idim ) + { + slice_crse_ratio[idim] = 1; + } + utils::parser::queryArrWithParser( + pp_slice, "dom_lo", slice_lo, 0, AMREX_SPACEDIM); + utils::parser::queryArrWithParser( + pp_slice, "dom_hi", slice_hi, 0, AMREX_SPACEDIM); + utils::parser::queryArrWithParser( + pp_slice, "coarsening_ratio",slice_crse_ratio,0,AMREX_SPACEDIM); + utils::parser::queryWithParser( + pp_slice, "plot_int",slice_plot_int); + slice_realbox.setLo(slice_lo); + slice_realbox.setHi(slice_hi); + slice_cr_ratio = IntVect(AMREX_D_DECL(1,1,1)); + for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) + { + if (slice_crse_ratio[idim] > 1 ) { + slice_cr_ratio[idim] = slice_crse_ratio[idim]; + } + } + if (do_back_transformed_diagnostics) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(gamma_boost > 1.0, + "gamma_boost must be > 1 to use the boost frame diagnostic"); + utils::parser::queryWithParser( + pp_slice, "num_slice_snapshots_lab", num_slice_snapshots_lab); + if (num_slice_snapshots_lab > 0) { + utils::parser::getWithParser( + pp_slice, "dt_slice_snapshots_lab", dt_slice_snapshots_lab ); + utils::parser::getWithParser( + pp_slice, "particle_slice_width_lab",particle_slice_width_lab); + } + } } } From 23fa23209879cbdf5ef829530def162c2b343c72 Mon Sep 17 00:00:00 2001 From: Andrew Myers Date: Mon, 10 Oct 2022 13:50:56 -0700 Subject: [PATCH 0103/1346] Add functions for reading particle id and cpu numbers correctly (#3457) --- .../Tests/restart/PICMI_inputs_id_cpu_read.py | 151 ++++++++++++++++++ Python/pywarpx/_libwarpx.py | 33 ++-- Regression/WarpX-tests.ini | 19 +++ Source/Python/WarpXWrappers.H | 4 + Source/Python/WarpXWrappers.cpp | 36 +++++ 5 files changed, 234 insertions(+), 9 deletions(-) create mode 100755 Examples/Tests/restart/PICMI_inputs_id_cpu_read.py diff --git a/Examples/Tests/restart/PICMI_inputs_id_cpu_read.py b/Examples/Tests/restart/PICMI_inputs_id_cpu_read.py new file mode 100755 index 00000000000..495f7426b3a --- /dev/null +++ b/Examples/Tests/restart/PICMI_inputs_id_cpu_read.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 +# +# This is a script that makes sure particle ids and cpus can be read correctly +# + +import sys + +import numpy as np + +from pywarpx import callbacks, picmi + +########################## +# physics parameters +########################## + +dt = 7.5e-10 + +########################## +# numerics parameters +########################## + +max_steps = 10 + +nx = 64 +ny = 64 + +xmin = 0 +xmax = 0.03 +ymin = 0 +ymax = 0.03 + +########################## +# numerics components +########################## + +grid = picmi.Cartesian2DGrid( + number_of_cells = [nx, ny], + lower_bound = [xmin, ymin], + upper_bound = [xmax, ymax], + lower_boundary_conditions = ['dirichlet', 'periodic'], + upper_boundary_conditions = ['dirichlet', 'periodic'], + lower_boundary_conditions_particles = ['absorbing', 'periodic'], + upper_boundary_conditions_particles = ['absorbing', 'periodic'], + moving_window_velocity = None, + warpx_max_grid_size = 32 +) + +solver = picmi.ElectrostaticSolver( + grid=grid, method='Multigrid', required_precision=1e-6, + warpx_self_fields_verbosity=0 +) + +########################## +# physics components +########################## + +electrons = picmi.Species( + particle_type='electron', name='electrons' +) + +########################## +# diagnostics +########################## + +field_diag = picmi.FieldDiagnostic( + name = 'diag1', + grid = grid, + period = 10, + data_list = ['phi'], + write_dir = '.', + warpx_file_prefix = f'Python_restart_runtime_components_plt' +) + +checkpoint = picmi.Checkpoint( + name = 'chkpoint', + period = 5, + write_dir = '.', + warpx_file_min_digits = 5, + warpx_file_prefix = f'Python_restart_runtime_components_chk' +) + +########################## +# simulation setup +########################## + +sim = picmi.Simulation( + solver = solver, + time_step_size = dt, + max_steps = max_steps, + verbose = 1 +) + +sim.add_species( + electrons, + layout = picmi.GriddedLayout( + n_macroparticle_per_cell=[0, 0], grid=grid + ) +) + +for arg in sys.argv: + if arg.startswith("amr.restart"): + restart_file_name = arg.split("=")[1] + sim.amr_restart = restart_file_name + sys.argv.remove(arg) + +sim.add_diagnostic(field_diag) +sim.add_diagnostic(checkpoint) +sim.initialize_inputs() +sim.initialize_warpx() + +########################## +# python particle data access +########################## + +# set numpy random seed so that the particle properties generated +# below will be reproducible from run to run +np.random.seed(30025025) + +sim.extension.add_real_comp('electrons', 'newPid') + +def add_particles(): + + nps = 10 + x = np.linspace(0.005, 0.025, nps) + y = np.zeros(nps) + z = np.linspace(0.005, 0.025, nps) + ux = np.random.normal(loc=0, scale=1e3, size=nps) + uy = np.random.normal(loc=0, scale=1e3, size=nps) + uz = np.random.normal(loc=0, scale=1e3, size=nps) + w = np.ones(nps) * 2.0 + newPid = 5.0 + + sim.extension.add_particles( + species_name='electrons', x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, + w=w, newPid=newPid + ) + +callbacks.installbeforestep(add_particles) + +########################## +# simulation run +########################## + +step_number = sim.extension.getistep(0) +sim.step(max_steps) + +############################################### +# check that the ids and cpus are read properly +############################################### +assert(np.sum(np.concatenate(sim.extension.get_particle_id('electrons'))) == 5050) +assert(np.sum(np.concatenate(sim.extension.get_particle_cpu('electrons'))) == 0) diff --git a/Python/pywarpx/_libwarpx.py b/Python/pywarpx/_libwarpx.py index d8f7f077f7c..44426d624b3 100755 --- a/Python/pywarpx/_libwarpx.py +++ b/Python/pywarpx/_libwarpx.py @@ -143,13 +143,13 @@ def load_library(self): # our particle data type, depends on _ParticleReal_size _p_struct = ( [(d, self._numpy_particlereal_dtype) for d in 'xyz'[:self.dim]] - + [('id', 'i4'), ('cpu', 'i4')] + + [('idcpu', 'u8')] ) self._p_dtype = np.dtype(_p_struct, align=True) _numpy_to_ctypes = {} _numpy_to_ctypes[self._numpy_particlereal_dtype] = c_particlereal - _numpy_to_ctypes['i4'] = ctypes.c_int + _numpy_to_ctypes['u8'] = ctypes.c_uint64 class Particle(ctypes.Structure): _fields_ = [(v[0], _numpy_to_ctypes[v[1]]) for v in _p_struct] @@ -271,7 +271,12 @@ class Particle(ctypes.Structure): ctypes.c_int, _ndpointer(ctypes.c_int, flags="C_CONTIGUOUS"), ctypes.c_int) - + self.libwarpx_so.warpx_convert_id_to_long.argtypes = (_ndpointer(ctypes.c_int64, flags="C_CONTIGUOUS"), + _ndpointer(Particle, flags="C_CONTIGUOUS"), + ctypes.c_int) + self.libwarpx_so.warpx_convert_cpu_to_int.argtypes = (_ndpointer(ctypes.c_int32, flags="C_CONTIGUOUS"), + _ndpointer(Particle, flags="C_CONTIGUOUS"), + ctypes.c_int) self.libwarpx_so.warpx_getProbLo.restype = c_real self.libwarpx_so.warpx_getProbHi.restype = c_real self.libwarpx_so.warpx_getCellSize.restype = c_real @@ -727,7 +732,7 @@ def get_particle_structs(self, species_name, level): ''' This returns a list of numpy arrays containing the particle struct data on each tile for this process. The particle data is represented as a structured - numpy array and contains the particle 'x', 'y', 'z', 'id', and 'cpu'. + numpy array and contains the particle 'x', 'y', 'z', and 'idcpu'. The data for the numpy arrays are not copied, but share the underlying memory buffer with WarpX. The numpy arrays are fully writeable. @@ -883,21 +888,31 @@ def get_particle_id(self, species_name, level=0): ''' Return a list of numpy arrays containing the particle 'id' - positions on each tile. + numbers on each tile. ''' + ids = [] structs = self.get_particle_structs(species_name, level) - return [struct['id'] for struct in structs] + for ptile_of_structs in structs: + arr = np.empty(ptile_of_structs.shape, np.int64) + self.libwarpx_so.warpx_convert_id_to_long(arr, ptile_of_structs, arr.size) + ids.append(arr) + return ids def get_particle_cpu(self, species_name, level=0): ''' Return a list of numpy arrays containing the particle 'cpu' - positions on each tile. + numbers on each tile. ''' + cpus = [] structs = self.get_particle_structs(species_name, level) - return [struct['cpu'] for struct in structs] + for ptile_of_structs in structs: + arr = np.empty(ptile_of_structs.shape, np.int32) + self.libwarpx_so.warpx_convert_cpu_to_int(arr, ptile_of_structs, arr.size) + cpus.append(arr) + return cpus def get_particle_weight(self, species_name, level=0): ''' @@ -1048,7 +1063,7 @@ def get_particle_boundary_buffer_structs(self, species_name, boundary, level): This returns a list of numpy arrays containing the particle struct data for a species that has been scraped by a specific simulation boundary. The particle data is represented as a structured numpy array and contains the - particle 'x', 'y', 'z', 'id', and 'cpu'. + particle 'x', 'y', 'z', and 'idcpu'. The data for the numpy arrays are not copied, but share the underlying memory buffer with WarpX. The numpy arrays are fully writeable. diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index ba31ef94f5d..a276b07d103 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -985,6 +985,25 @@ doVis = 0 compareParticles = 0 particleTypes = electrons +[Python_id_cpu_read] +buildDir = . +inputFile = Examples/Tests/restart/PICMI_inputs_id_cpu_read.py +runtime_params = +customRunCmd = python3 PICMI_inputs_id_cpu_read.py +dim = 2 +addToCompileString = USE_PYTHON_MAIN=TRUE +cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_APP=OFF +target = pip_install +restartTest = 0 +useMPI = 1 +numprocs = 1 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 0 +particleTypes = electrons + [Python_restart_eb] buildDir = . inputFile = Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py diff --git a/Source/Python/WarpXWrappers.H b/Source/Python/WarpXWrappers.H index 7cded8a891b..9dbdb02db87 100644 --- a/Source/Python/WarpXWrappers.H +++ b/Source/Python/WarpXWrappers.H @@ -82,6 +82,10 @@ extern "C" { long warpx_getNumParticles(const char* char_species_name, const bool local); + void warpx_convert_id_to_long (amrex::Long* ids, const WarpXParticleContainer::ParticleType* pstructs, int size); + + void warpx_convert_cpu_to_int (int* cpus, const WarpXParticleContainer::ParticleType* pstructs, int size); + amrex::ParticleReal** warpx_getParticleStructs( const char* char_species_name, int lev, int* num_tiles, int** particles_per_tile); diff --git a/Source/Python/WarpXWrappers.cpp b/Source/Python/WarpXWrappers.cpp index 61909cff738..bbcc26daf0b 100644 --- a/Source/Python/WarpXWrappers.cpp +++ b/Source/Python/WarpXWrappers.cpp @@ -488,6 +488,42 @@ namespace return data; } + void warpx_convert_id_to_long (amrex::Long* ids, const WarpXParticleContainer::ParticleType* pstructs, int size) + { + amrex::Long* d_ptr = nullptr; +#ifdef AMREX_USE_GPU + amrex::Gpu::DeviceVector d_ids(size); + d_ptr = d_ids.data(); +#else + d_ptr = ids; +#endif + amrex::ParallelFor(size, [=] AMREX_GPU_DEVICE (int i) noexcept + { + d_ptr[i] = pstructs[i].id(); + }); +#ifdef AMREX_USE_GPU + amrex::Gpu::dtoh_memcpy(ids, d_ptr, size*sizeof(amrex::Long)); +#endif + } + + void warpx_convert_cpu_to_int (int* cpus, const WarpXParticleContainer::ParticleType* pstructs, int size) + { + int* d_ptr = nullptr; +#ifdef AMREX_USE_GPU + amrex::Gpu::DeviceVector d_cpus(size); + d_ptr = d_cpus.data(); +#else + d_ptr = cpus; +#endif + amrex::ParallelFor(size, [=] AMREX_GPU_DEVICE (int i) noexcept + { + d_ptr[i] = pstructs[i].cpu(); + }); +#ifdef AMREX_USE_GPU + amrex::Gpu::dtoh_memcpy(cpus, d_ptr, size*sizeof(int)); +#endif + } + int warpx_getParticleCompIndex ( const char* char_species_name, const char* char_comp_name ) { From 2bdd20c449c985ff5785c2c404061744fb100059 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 10 Oct 2022 16:03:56 -0700 Subject: [PATCH 0104/1346] AMReX: Weekly Update (#3459) --- .github/workflows/cuda.yml | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 2 +- run_test.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 3a14aa81b44..554d8abcd14 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 13aa4df0f5a4af40270963ad5b42ac7ce662e045 && cd - + cd amrex && git checkout --detach 2d87a4c8ad5d375008ee9b1c23a50404fe0dfa21 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 63b9f09dcbd..bb8111fe7bf 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 13aa4df0f5a4af40270963ad5b42ac7ce662e045 +branch = 2d87a4c8ad5d375008ee9b1c23a50404fe0dfa21 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index a276b07d103..6f0ae79de78 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 13aa4df0f5a4af40270963ad5b42ac7ce662e045 +branch = 2d87a4c8ad5d375008ee9b1c23a50404fe0dfa21 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 55cce8ce15e..0a1ddff6575 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -235,7 +235,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "13aa4df0f5a4af40270963ad5b42ac7ce662e045" +set(WarpX_amrex_branch "2d87a4c8ad5d375008ee9b1c23a50404fe0dfa21" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/run_test.sh b/run_test.sh index b4e76b8a414..e92af233f88 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 13aa4df0f5a4af40270963ad5b42ac7ce662e045 && cd - +cd amrex && git checkout --detach 2d87a4c8ad5d375008ee9b1c23a50404fe0dfa21 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git From 1acebbc1973dd771880715751a7d0d39bed4d8be Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 10 Oct 2022 17:25:14 -0700 Subject: [PATCH 0105/1346] Docs: Expand Debugging Workflow (#3461) Add more instructions how to debug errors seen in *Release* mode. --- Docs/source/usage/workflows/debugging.rst | 30 +++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/Docs/source/usage/workflows/debugging.rst b/Docs/source/usage/workflows/debugging.rst index ff8b018dde9..2b85523b852 100644 --- a/Docs/source/usage/workflows/debugging.rst +++ b/Docs/source/usage/workflows/debugging.rst @@ -9,6 +9,7 @@ You might also want to debug your code as you implement new features in WarpX du This section gives a step-by-step guidance on how to systematically check what might be going wrong. + Debugging Workflow ------------------ @@ -22,6 +23,9 @@ Try the following steps to debug a simulation: Do you spot numerical artifacts or instabilities that could point to missing resolution or unexpected/incompatible numerical parameters? #. Did the job output files indicate a crash? Check the ``Backtrace.`` files for the location of the code that triggered the crash. Backtraces are read from bottom (high-level) to top (most specific line that crashed). +#. Try to make the reproducible scenario as small as possible by modifying the inputs file. + Reduce number of cells, particles and MPI processes to something as small and as quick to execute as possible. + The next steps in debugging will increase runtime, so you will benefit from a fast reproducer. #. In case of a crash, Backtraces can be more detailed if you :ref:`re-compile ` with debug flags: for example, try compiling with ``-DCMAKE_BUILD_TYPE=RelWithDebInfo`` (some slowdown) or even ``-DCMAKE_BUILD_TYPE=Debug`` (this will make the simulation way slower) and rerun. #. If debug builds are too costly, try instead compiling with ``-DAMReX_ASSERTIONS=ON`` to activate more checks and rerun. #. If the problem looks like a memory violation, this could be from an invalid field or particle index access. @@ -38,6 +42,7 @@ Collect your above findings, describe where and what you are running and how you Can you reproduce the problem with a smaller setup (less parallelism and/or less resolution)? Report these details in a :ref:`WarpX GitHub issue `. + Debuggers --------- @@ -48,3 +53,28 @@ See the `AMReX debugger section `). +That means, code errors will likely show up as symptoms of earlier errors in the code instead of directly showing the underlying line that caused the error. + +For instance, we have `these `__ `checks `__ in release mode + +.. code-block:: + + Particles shape does not fit within tile (CPU) or guard cells (GPU) used for charge deposition + +.. code-block:: + + Particles shape does not fit within tile (CPU) or guard cells (GPU) used for current deposition + +which prevent that particles with positions that violate the local definitions of guard cells cause confusing errors in charge/current deposition. + +In such a case, as described above, rebuild and rerun in *Debug* mode before searching further for the bug. +Usually, the bug is from ``NaN`` or ``infinite`` numbers assigned to particles or fields earlier in the code or from ill-defined guard sizes. +Building in debug mode will likely move the first thrown error to an earlier location in the code, which is then closer to the underlying cause. + +Then, continue following the workflow above, adding more compilation guards and runtime flags that can trap array bound violations and invalid floating point values. From 1ceef7a8b3fd3caf7e2d8d964009d1bf2344fd93 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 11 Oct 2022 10:19:50 -0700 Subject: [PATCH 0106/1346] openPMD Engines: Allow Params w/o Type (#3460) The current implementation only accepted parameters for ADIOS2 engines if the engine type was also specified. That is not necessary, the default (bp4) will usually be taken and thus the documentation did not work. This fixes it, keeping the engine type optional in the inputs. --- Source/Diagnostics/WarpXOpenPMD.cpp | 30 +++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/Source/Diagnostics/WarpXOpenPMD.cpp b/Source/Diagnostics/WarpXOpenPMD.cpp index 1a31db64002..4320bc7ae1f 100644 --- a/Source/Diagnostics/WarpXOpenPMD.cpp +++ b/Source/Diagnostics/WarpXOpenPMD.cpp @@ -181,34 +181,44 @@ namespace detail op_block += R"END(, "parameters": { )END"; - op_block += op_parameters + "}"; + op_block += op_parameters + + "\n }"; } op_block += R"END( } ] })END"; - if (!engine_type.empty()) - op_block += ","; - + if (!engine_type.empty() || !en_parameters.empty()) + op_block += ","; } // end operator string block // add the engine string block - if (!engine_type.empty()) { + if (!engine_type.empty() || !en_parameters.empty()) + { en_block = R"END( - "engine": { + "engine": {)END"; + + // non-default engine type + if (!engine_type.empty()) { + en_block += R"END( "type": ")END"; - en_block += engine_type + "\""; + en_block += engine_type + "\""; + if(!en_parameters.empty()) + en_block += ","; + } + + // non-default engine parameters if (!en_parameters.empty()) { - en_block += R"END(, + en_block += R"END( "parameters": { )END"; - en_block += en_parameters + "}"; + en_block += en_parameters + + "\n }"; } en_block += R"END( })END"; - } // end engine string block options = top_block + op_block + en_block + end_block; From d2d2138349761c6d1598f8f7fb2093c502154ebb Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Wed, 12 Oct 2022 16:38:30 -0700 Subject: [PATCH 0107/1346] Correct particle positions outside refined injection (#3463) * Correct particle positions outside refined injection * Improve test * Update benchmark * Update Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py --- .../analysis_refined_injection.py | 13 ++++- .../benchmarks_json/RefinedInjection.json | 52 +++++++++---------- .../Particles/PhysicalParticleContainer.cpp | 15 ++++-- 3 files changed, 47 insertions(+), 33 deletions(-) diff --git a/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py b/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py index 88bbd422deb..a9527fa1f28 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py @@ -31,8 +31,7 @@ np = ad['electrons', 'particle_id'].size # the number of coarse particle streams -# (odd because one that exactly hits the fine patch is not refined) -n_coarse = 11 +n_coarse = 10 # the number of fine particle streams n_fine = 64 @@ -50,5 +49,15 @@ assert( np == np_expected ) +# Test uniformity of rho, by taking a slice of rho that +# crosses the edge of the refined injection region +# (but is ahead of the mesh refinement patch) +ds.force_periodicity() +ad = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) +rho = ad['rho'].to_ndarray().squeeze() +rho_slice = rho[13:51, 475] +# Test uniformity up to 0.5% relative variation +assert( rho_slice.std() < 0.005*abs(rho_slice.mean()) ) + test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Regression/Checksum/benchmarks_json/RefinedInjection.json b/Regression/Checksum/benchmarks_json/RefinedInjection.json index 5645cdb81b6..7a3fb482857 100644 --- a/Regression/Checksum/benchmarks_json/RefinedInjection.json +++ b/Regression/Checksum/benchmarks_json/RefinedInjection.json @@ -8,35 +8,35 @@ "particle_weight": 12483018148921.525 }, "electrons": { - "particle_momentum_x": 1.7019663108287842e-19, - "particle_momentum_y": 7.804841508613444e-19, - "particle_momentum_z": 3.6023951025729006e-19, - "particle_position_x": 0.13262784674591108, - "particle_position_y": 0.28121954338547195, - "particle_weight": 122071289062500.27 + "particle_momentum_x": 1.701962385014116e-19, + "particle_momentum_y": 7.804483053552035e-19, + "particle_momentum_z": 3.6023937355618695e-19, + "particle_position_x": 0.13035674941360564, + "particle_position_y": 0.2792572644186751, + "particle_weight": 119232421875000.25 }, "lev=0": { - "Bx": 24121596.196462017, - "By": 78274.29859712059, - "Bz": 601058.4634075701, - "Ex": 22866959486331.44, - "Ey": 6508550078474765.0, - "Ez": 32117037637504.49, - "jx": 1125098206038016.2, - "jy": 2.3251392468384067e+18, - "jz": 6796866375752497.0, - "rho": 156567078.90542063 + "Bx": 24121596.46539713, + "By": 78274.29859708338, + "Bz": 601059.1001251702, + "Ex": 22866961334698.695, + "Ey": 6508550266827824.0, + "Ez": 32117002621363.535, + "jx": 1125083620993366.5, + "jy": 2.325138134178515e+18, + "jz": 6796844511773390.0, + "rho": 152886703.9390533 }, "lev=1": { - "Bx": 53303287.754619256, - "By": 213834.35319447558, - "Bz": 1321619.3400202503, - "Ex": 47849984748795.125, - "Ey": 1.4551014572946544e+16, - "Ez": 65308104831571.74, - "jx": 43901994433620.09, - "jy": 9.274835473220729e+18, - "jz": 1.9263572931827268e+16, - "rho": 67240256.28814039 + "Bx": 53303287.79524416, + "By": 213834.37094616296, + "Bz": 1321619.3483672105, + "Ex": 47849992572011.23, + "Ey": 1.4551014574617832e+16, + "Ez": 65308101458272.28, + "jx": 43895764938937.25, + "jy": 9.274835323597818e+18, + "jz": 1.926356834571391e+16, + "rho": 67208930.86201674 } } \ No newline at end of file diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index fa9b0c6472e..7e04d690260 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -1130,9 +1130,11 @@ PhysicalParticleContainer::AddPlasma (int lev, RealBox part_realbox) ParticleType& p = pp[ip]; p.id() = pid+ip; p.cpu() = cpuid; - - const XDim3 r = - inj_pos->getPositionUnitBox(i_part, lrrfac, engine); + const XDim3 r = (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) ? + // In the refined injection region: use refinement ratio `lrrfac` + inj_pos->getPositionUnitBox(i_part, lrrfac, engine) : + // Otherwise: use 1 as the refinement ratio + inj_pos->getPositionUnitBox(i_part, 1, engine); auto pos = getCellCoords(overlap_corner, dx, r, iv); #if defined(WARPX_DIM_3D) @@ -1645,8 +1647,11 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) p.cpu() = cpuid; // This assumes the inj_pos is of type InjectorPositionRandomPlane - const XDim3 r = - inj_pos->getPositionUnitBox(i_part, lrrfac, engine); + const XDim3 r = (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) ? + // In the refined injection region: use refinement ratio `lrrfac` + inj_pos->getPositionUnitBox(i_part, lrrfac, engine) : + // Otherwise: use 1 as the refinement ratio + inj_pos->getPositionUnitBox(i_part, 1, engine); auto pos = getCellCoords(overlap_corner, dx, r, iv); auto ppos = PDim3(pos); From 912af3a03b10360c8799593490ef9fcfe7af8270 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 13 Oct 2022 19:18:43 -0700 Subject: [PATCH 0108/1346] CI: Fix macOS OMP (#3465) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix: ``` $ cmake -S . -B build_dp ... -- The C compiler identification is AppleClang 13.0.0.13000029 -- The CXX compiler identification is AppleClang 13.0.0.13000029 ... CMake Error at /usr/local/Cellar/cmake/3.24.2/share/cmake/Modules/FindPackageHandleStandardArgs.cmake:230 (message): -- Configuring incomplete, errors occurred! Could NOT find OpenMP_CXX (missing: OpenMP_CXX_FLAGS OpenMP_CXX_LIB_NAMES) ``` ``` libomp 14.0.6 is already installed but outdated (so it will be upgraded). ==> Downloading https://ghcr.io/v2/homebrew/core/libomp/manifests/15.0.2 ==> Downloading https://ghcr.io/v2/homebrew/core/libomp/blobs/sha256:a4e0796616d09221e2a486c95f9aa7c12d3c617e594b1d463a8f479bd4fa45c2 ==> Downloading from https://pkg-containers.githubusercontent.com/ghcr1/blobs/sha256:a4e0796616d09221e2a486c95f9aa7c12d3c617e594b1d463a8f479bd4fa45c2?se=2022-10-13T20%3A50%3A00Z&sig=a68J671xfsvWip4TqYo5%2B2E0U5AswwCQDocHLNc6XXE%3D&sp=r&spr=https&sr=b&sv=2019-12-12 ==> Upgrading libomp 14.0.6 -> 15.0.2 ==> Pouring libomp--15.0.2.big_sur.bottle.tar.gz ==> Caveats libomp is keg-only, which means it was not symlinked into /usr/local, because it can override GCC headers and result in broken builds. For compilers to find libomp you may need to set: export LDFLAGS="-L/usr/local/opt/libomp/lib" export CPPFLAGS="-I/usr/local/opt/libomp/include" ==> Summary 🍺 /usr/local/Cellar/libomp/15.0.2: 7 files, 1.7MB ==> Running `brew cleanup libomp`... Disable this behaviour by setting HOMEBREW_NO_INSTALL_CLEANUP. Hide these hints with HOMEBREW_NO_ENV_HINTS (see `man brew`). Removing: /usr/local/Cellar/libomp/14.0.6... (7 files, 1.6MB) Removing: /Users/runner/Library/Caches/Homebrew/libomp--14.0.6... (548.8KB) ``` --- .github/workflows/macos.yml | 14 ++++++++++++++ Docs/source/install/dependencies.rst | 2 ++ 2 files changed, 16 insertions(+) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 24484e612c8..b50cfc8647e 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -21,14 +21,28 @@ jobs: SETUPTOOLS_USE_DISTUTILS: stdlib steps: - uses: actions/checkout@v2 + - name: Brew Cache + uses: actions/cache@v2 + # - once stored under a key, they become immutable (even if local cache path content changes) + # - for a refresh the key has to change, e.g., hash of a tracked file in the key + with: + path: | + /usr/local + /Users/runner/Library/Caches/Homebrew + key: brew-macos-appleclang-${{ hashFiles('.github/workflows/macos.yml') }} + restore-keys: | + brew-macos-appleclang- - name: install dependencies run: | + brew --cache set +e rm -rf /usr/local/bin/2to3 + brew unlink gcc brew update brew install ccache brew install fftw brew install libomp + brew link --force libomp brew install ninja brew install open-mpi brew install pkg-config diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index 553827cb240..cd1c08412d5 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -139,6 +139,8 @@ Brew (macOS/Linux) brew install git brew install hdf5-mpi # for openPMD brew install libomp + brew unlink gcc + brew link --force libomp brew install pkg-config # for fftw brew install open-mpi brew install openblas # for PSATD in RZ From dedd1a550d65156fc0d28a76eed9bb99151789f6 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 17 Oct 2022 12:10:01 -0700 Subject: [PATCH 0109/1346] Update GCC Requirement (#3470) * NVCC generates code that tends to trigger ICEs in GCC 7.x * CTAD erratum in GCC 8: https://gcc.gnu.org/projects/cxx-status.html#cxx17 --- Docs/source/install/dependencies.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index cd1c08412d5..7a8aa0bb55c 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -6,7 +6,7 @@ Dependencies WarpX depends on the following popular third party software. Please see installation instructions below. -- a mature `C++17 `__ compiler, e.g., GCC 7, Clang 7, NVCC 11.0, MSVC 19.15 or newer +- a mature `C++17 `__ compiler, e.g., GCC 8, Clang 7, NVCC 11.0, MSVC 19.15 or newer - `CMake 3.20.0+ `__ - `Git 2.18+ `__ - `AMReX `__: we automatically download and compile a copy of AMReX From 3bfe281d9d70c7f521953a8ade9d825565edbd2b Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 18 Oct 2022 07:33:22 -0700 Subject: [PATCH 0110/1346] AMReX: Weekly Update (#3469) --- .github/workflows/cuda.yml | 2 +- .../ElectrostaticSphereEB.json | 8 ++--- .../ElectrostaticSphereEB_RZ.json | 4 +-- .../ElectrostaticSphereEB_RZ_MR.json | 4 +-- .../ElectrostaticSphereEB_mixedBCs.json | 8 ++--- .../benchmarks_json/embedded_circle.json | 30 +++++++++---------- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 2 +- run_test.sh | 2 +- 10 files changed, 32 insertions(+), 32 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 554d8abcd14..2da4c68bfa3 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 2d87a4c8ad5d375008ee9b1c23a50404fe0dfa21 && cd - + cd amrex && git checkout --detach 56b6402d238979fca6e7c57fdc644a54c4cf6fce && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB.json b/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB.json index 830f2c5f96f..d67fa86ba6e 100644 --- a/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB.json +++ b/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB.json @@ -1,9 +1,9 @@ { "lev=0": { - "Ex": 277943.2629517972, - "Ey": 277943.2629517972, - "Ez": 277943.26295179717, - "phi": 56106.09774654215, + "Ex": 277943.2629548584, + "Ey": 277943.2629548584, + "Ez": 277943.2629548584, + "phi": 85529.09775065957, "rho": 0.0 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_RZ.json b/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_RZ.json index 0599ba1868b..05e6102a8a1 100644 --- a/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_RZ.json +++ b/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_RZ.json @@ -1,6 +1,6 @@ { "lev=0": { "Ex": 8497.669853722688, - "phi": 1235.661848443515 + "phi": 2035.6618484435153 } -} +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_RZ_MR.json b/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_RZ_MR.json index b9bf88f8701..05854b9ad41 100644 --- a/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_RZ_MR.json +++ b/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_RZ_MR.json @@ -1,10 +1,10 @@ { "lev=0": { "Ex": 8497.669853733625, - "phi": 1235.6618484451053 + "phi": 2035.6618484451055 }, "lev=1": { "Ex": 19546.08010855407, - "phi": 1690.40853286864 + "phi": 3290.7266050322164 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_mixedBCs.json b/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_mixedBCs.json index a72e2af87bb..2f0bce3566f 100644 --- a/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_mixedBCs.json +++ b/Regression/Checksum/benchmarks_json/ElectrostaticSphereEB_mixedBCs.json @@ -1,9 +1,9 @@ { "lev=0": { - "Ex": 377516.27292315144, - "Ey": 226043.3936195203, - "Ez": 135194.57566843418, - "phi": 91969.21822440247, + "Ex": 377516.27292092174, + "Ey": 226043.3936206682, + "Ez": 135194.57567110562, + "phi": 121392.21822320674, "rho": 0.0 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/embedded_circle.json b/Regression/Checksum/benchmarks_json/embedded_circle.json index 021f0918add..ace7d30f0ac 100644 --- a/Regression/Checksum/benchmarks_json/embedded_circle.json +++ b/Regression/Checksum/benchmarks_json/embedded_circle.json @@ -1,23 +1,23 @@ { "ar_ions": { - "particle_momentum_x": 2.656937628868437e-18, - "particle_momentum_y": 2.6462397149880108e-18, - "particle_momentum_z": 2.653197184723758e-18, - "particle_position_x": 3.174140583032092, - "particle_position_y": 3.174226960143397, - "particle_weight": 988047180.1757812 + "particle_momentum_x": 2.6571830401121494e-18, + "particle_momentum_y": 2.646539615146913e-18, + "particle_momentum_z": 2.653185496755454e-18, + "particle_position_x": 3.1741954177767076, + "particle_position_y": 3.1742863708239244, + "particle_weight": 988093872.0703125 }, "electrons": { - "particle_momentum_x": 3.017862149732503e-20, - "particle_momentum_y": 3.016284742627221e-20, - "particle_momentum_z": 3.026716864845998e-20, - "particle_position_x": 3.077259990879374, - "particle_position_y": 3.0760809372537063, - "particle_weight": 957635192.8710938 + "particle_momentum_x": 3.0229127320784506e-20, + "particle_momentum_y": 3.0092955344922534e-20, + "particle_momentum_z": 3.0309328048709314e-20, + "particle_position_x": 3.0780941230460233, + "particle_position_y": 3.0770045014981156, + "particle_weight": 957744140.625 }, "lev=0": { - "phi": 56891.444043840616, - "rho_ar_ions": 257.79395973399653, - "rho_electrons": 250.42393788893844 + "phi": 61982.03832581357, + "rho_ar_ions": 257.80622322208114, + "rho_electrons": 250.4644844217117 } } \ No newline at end of file diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index bb8111fe7bf..3efec7df509 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 2d87a4c8ad5d375008ee9b1c23a50404fe0dfa21 +branch = 56b6402d238979fca6e7c57fdc644a54c4cf6fce [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 6f0ae79de78..b9edd257d16 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 2d87a4c8ad5d375008ee9b1c23a50404fe0dfa21 +branch = 56b6402d238979fca6e7c57fdc644a54c4cf6fce [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 0a1ddff6575..2864d9cd8ac 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -235,7 +235,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "2d87a4c8ad5d375008ee9b1c23a50404fe0dfa21" +set(WarpX_amrex_branch "56b6402d238979fca6e7c57fdc644a54c4cf6fce" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/run_test.sh b/run_test.sh index e92af233f88..546ab9c6384 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 2d87a4c8ad5d375008ee9b1c23a50404fe0dfa21 && cd - +cd amrex && git checkout --detach 56b6402d238979fca6e7c57fdc644a54c4cf6fce && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git From 72ab1bc9956a3ae34f628b47b918489a11f1fbdd Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 19 Oct 2022 15:33:59 -0700 Subject: [PATCH 0111/1346] OLCF Jupyter: Mamba is Pre-Installed (#3471) Mamba is now pre-installed on OLCF Jupyter base images! :) --- Docs/source/install/hpc/summit.rst | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Docs/source/install/hpc/summit.rst b/Docs/source/install/hpc/summit.rst index a4dbc16a45f..2a22ab2e79b 100644 --- a/Docs/source/install/hpc/summit.rst +++ b/Docs/source/install/hpc/summit.rst @@ -344,10 +344,7 @@ When starting up a post-processing session, run this in your first cells: # work-around for OLCFHELP-4242 !jupyter serverextension enable --py --sys-prefix dask_labextension - # next Jupyter cell: install a faster & better conda package manager - !conda install -c conda-forge -y mamba - - # next cell: the software you want + # next Jupyter cell: the software you want !mamba install --quiet -c conda-forge -y openpmd-api openpmd-viewer ipympl ipywidgets fast-histogram yt # restart notebook From 602a1fab675f94b7b6b7f3326b60348933956c24 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 21 Oct 2022 08:58:14 -0700 Subject: [PATCH 0112/1346] User-Specified Used Inputs File (#3462) Allow users to overwrite the default for the "used inputs" file that we write. --- Docs/source/usage/how_to_run.rst | 2 +- Docs/source/usage/parameters.rst | 8 ++++++++ Source/Initialization/WarpXInitData.cpp | 6 +++++- Source/WarpX.H | 2 +- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/Docs/source/usage/how_to_run.rst b/Docs/source/usage/how_to_run.rst index e2c38bff673..26c56cb6007 100644 --- a/Docs/source/usage/how_to_run.rst +++ b/Docs/source/usage/how_to_run.rst @@ -99,7 +99,7 @@ On an :ref:`HPC system `, you would instead submit the :ref:`job sc By default, WarpX will write a status update to the terminal (``stdout``). On :ref:`HPC systems `, we usually store a copy of this in a file called ``outputs.txt``. -We also store by default an exact copy of all explicitly and implicitly used inputs parameters in a file called ``warpx_used_inputs``. +We also store by default an exact copy of all explicitly and implicitly used inputs parameters in a file called ``warpx_used_inputs`` (this file name can be changed). This is important for reproducibility, since as we wrote in the previous paragraph, the options in the input file can be extended and overwritten from the command line. :ref:`Further configured diagnostics ` are explained in the next sections. diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index f87795a776a..354cc55743a 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -3,6 +3,10 @@ Input Parameters ================ +.. note:: + + WarpX input options are read via AMReX `ParmParse `__. + .. note:: The AMReX parser (see :ref:`running-cpp-parameters-parser`) is used for the right-hand-side of all input parameters that consist of one or more integers or floats, so expressions like ``.density_max = "2.+1."`` and/or using user-defined constants are accepted. @@ -24,6 +28,10 @@ Overall simulation parameters ``max_step`` and ``stop_time`` are provided, both criteria are used and the simulation stops when the first criterion is hit. +* ``warpx.used_inputs_file`` (`string`; default: ``warpx_used_inputs``) + Name of a file that WarpX writes to archive the used inputs. + The context of this file will contain an exact copy of all explicitly and implicitly used inputs parameters, including those :ref:`extended and overwritten from the command line `. + * ``warpx.gamma_boost`` (`float`) The Lorentz factor of the boosted frame in which the simulation is run. (The corresponding Lorentz transformation is assumed to be along ``warpx.boost_direction``.) diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index a6d959cafa5..e25ba8afffe 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -349,8 +349,12 @@ WarpX::PrintMainPICparameters () } void -WarpX::WriteUsedInputsFile (std::string const & filename) const +WarpX::WriteUsedInputsFile () const { + std::string filename = "warpx_used_inputs"; + ParmParse pp_warpx("warpx"); + pp_warpx.queryAdd("used_inputs_file", filename); + ablastr::utils::write_used_inputs_file(filename); } diff --git a/Source/WarpX.H b/Source/WarpX.H index 86f7b57c5a3..4874313d538 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -453,7 +453,7 @@ public: void PrintMainPICparameters (); /** Write a file that record all inputs: inputs file + command line options */ - void WriteUsedInputsFile (std::string const & filename = "warpx_used_inputs") const; + void WriteUsedInputsFile () const; /** Print dt and dx,dy,dz */ void PrintDtDxDyDz (); From 137ec92644fe88b589ef014e03a96b6366605dc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ne=C3=AFl=20Zaim?= <49716072+NeilZaim@users.noreply.github.com> Date: Fri, 21 Oct 2022 18:02:50 +0200 Subject: [PATCH 0113/1346] Fix unused import warnings in python analysis scripts (#3472) * Fix unused import warnings in python analysis scripts * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Examples/Tests/ion_stopping/analysis_ion_stopping.py | 2 +- .../particle_fields_diags/analysis_particle_diags_impl.py | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/Examples/Tests/ion_stopping/analysis_ion_stopping.py b/Examples/Tests/ion_stopping/analysis_ion_stopping.py index 1ddec4a6283..dd9ee0f8744 100755 --- a/Examples/Tests/ion_stopping/analysis_ion_stopping.py +++ b/Examples/Tests/ion_stopping/analysis_ion_stopping.py @@ -16,7 +16,7 @@ import sys import numpy as np -from scipy.constants import c, e, epsilon_0, k, m_e, m_p +from scipy.constants import e, epsilon_0, k, m_e, m_p import yt sys.path.insert(1, '../../../../warpx/Regression/Checksum/') diff --git a/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py b/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py index 452e802e481..0ffeee8bb7f 100755 --- a/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py +++ b/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py @@ -16,10 +16,7 @@ import numpy as np import openpmd_api as io -from scipy.constants import c, e -from scipy.constants import epsilon_0 as eps0 -from scipy.constants import m_e, m_p -from scipy.constants import mu_0 as mu0 +from scipy.constants import c, e, m_e, m_p import yt sys.path.insert(1, '../../../../warpx/Regression/Checksum/') From c27b85db2b606f4474e5b331e4f9aeebaaf2da30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ne=C3=AFl=20Zaim?= <49716072+NeilZaim@users.noreply.github.com> Date: Mon, 24 Oct 2022 18:01:36 +0200 Subject: [PATCH 0114/1346] Do not query redundant `do_back_transformed_particles` (#3183) * Do not query redundant do_back_transformed_particles * Only set m_do_back_transformed_particles to true if write_species is true --- Source/Diagnostics/BTDiagnostics.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index dd82bcf0e85..c519cd248c0 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -106,7 +106,7 @@ void BTDiagnostics::DerivedInitData () if (m_output_species_names.size() == 0 and write_species == 1) m_output_species_names = mpc.GetSpeciesNames(); - if (m_output_species_names.size() > 0) { + if (m_output_species_names.size() > 0 and write_species == 1) { m_do_back_transformed_particles = true; } else { m_do_back_transformed_particles = false; @@ -158,8 +158,6 @@ BTDiagnostics::ReadParameters () m_file_prefix = "diags/" + m_diag_name; pp_diag_name.query("file_prefix", m_file_prefix); pp_diag_name.query("do_back_transformed_fields", m_do_back_transformed_fields); - pp_diag_name.query("do_back_transformed_particles", m_do_back_transformed_particles); - AMREX_ALWAYS_ASSERT(m_do_back_transformed_fields or m_do_back_transformed_particles); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_do_back_transformed_fields, " fields must be turned on for the new back-transformed diagnostics"); if (m_do_back_transformed_fields == false) m_varnames.clear(); From b96e6a95f83e095e364c06e4416e81c0fbcf0ec0 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Mon, 24 Oct 2022 09:42:58 -0700 Subject: [PATCH 0115/1346] Fix stair-case solver in 2D Cartesian (#2942) * Fix stair-case solver in 2D Cartesian * Generalization for the macroscropic solver * Update automated test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update benchmark Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../embedded_boundary_cube/analysis_fields_2d.py | 5 ++++- Examples/Modules/embedded_boundary_cube/inputs_2d | 5 +++++ .../benchmarks_json/embedded_boundary_cube_2d.json | 11 +++++------ Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp | 7 ++++++- .../FiniteDifferenceSolver/MacroscopicEvolveE.cpp | 7 ++++++- 5 files changed, 26 insertions(+), 9 deletions(-) diff --git a/Examples/Modules/embedded_boundary_cube/analysis_fields_2d.py b/Examples/Modules/embedded_boundary_cube/analysis_fields_2d.py index 227048be9d2..67e893b199a 100755 --- a/Examples/Modules/embedded_boundary_cube/analysis_fields_2d.py +++ b/Examples/Modules/embedded_boundary_cube/analysis_fields_2d.py @@ -48,7 +48,6 @@ (-Lx / 2 <= x < Lx / 2) * (-Lz / 2 <= z < Lz / 2) * np.cos(np.pi / Lx * c * t)) - rel_tol_err = 1e-3 # Compute relative l^2 error on By @@ -56,6 +55,10 @@ rel_err_y = np.sqrt(np.sum(np.square(By_sim - By_th)) / np.sum(np.square(By_th))) assert (rel_err_y < rel_tol_err) +# Compute relative l^2 error on Ey +Ey_sim = data['Ey'].to_ndarray() +rel_err_y = np.sqrt(np.sum(np.square(Ey_sim/c - By_th)) / np.sum(np.square(By_th))) + test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Modules/embedded_boundary_cube/inputs_2d b/Examples/Modules/embedded_boundary_cube/inputs_2d index 2b334858249..476992b360d 100644 --- a/Examples/Modules/embedded_boundary_cube/inputs_2d +++ b/Examples/Modules/embedded_boundary_cube/inputs_2d @@ -20,6 +20,7 @@ my_constants.zmax = 0.5 warpx.eb_implicit_function = "max(max(x+xmin,-(x+xmax)), max(z+zmin,-(z+zmax)))" warpx.B_ext_grid_init_style = parse_B_ext_grid_function +warpx.E_ext_grid_init_style = parse_E_ext_grid_function my_constants.m = 0 my_constants.p = 1 @@ -30,6 +31,10 @@ warpx.Bz_external_grid_function(x,y,z) = 0 warpx.Bx_external_grid_function(x,y,z) = 0 warpx.By_external_grid_function(x,y,z) = cos(m * pi / Lx * (x - Lx / 2)) * cos(p * pi / Lz * (z - Lz / 2))*mu0 +warpx.Ez_external_grid_function(x,y,z) = 0 +warpx.Ex_external_grid_function(x,y,z) = 0 +warpx.Ey_external_grid_function(x,y,z) = cos(m * pi / Lx * (x - Lx / 2)) * cos(p * pi / Lz * (z - Lz / 2))*mu0*clight + diagnostics.diags_names = diag1 diag1.intervals = 1 diag1.diag_type = Full diff --git a/Regression/Checksum/benchmarks_json/embedded_boundary_cube_2d.json b/Regression/Checksum/benchmarks_json/embedded_boundary_cube_2d.json index e7c0d4828f3..37a82967a23 100644 --- a/Regression/Checksum/benchmarks_json/embedded_boundary_cube_2d.json +++ b/Regression/Checksum/benchmarks_json/embedded_boundary_cube_2d.json @@ -1,11 +1,10 @@ { "lev=0": { - "Bx": 0.0, + "Bx": 9.263694545408502e-05, "By": 0.00031905198933489135, - "Bz": 0.0, - "Ex": 8553.906698053022, - "Ey": 0.0, + "Bz": 7.328424783762596e-05, + "Ex": 8553.90669805307, + "Ey": 60867.04830538045, "Ez": 0.0 } -} - +} \ No newline at end of file diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp index 02f1d808772..da91eb73dc3 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp @@ -166,9 +166,14 @@ void FiniteDifferenceSolver::EvolveECartesian ( [=] AMREX_GPU_DEVICE (int i, int j, int k){ #ifdef AMREX_USE_EB // Skip field push if this cell is fully covered by embedded boundaries +#ifdef WARPX_DIM_3D if (ly(i,j,k) <= 0) return; +#elif defined(WARPX_DIM_XZ) + //In XZ Ey is associated with a mesh node, so we need to check if the mesh node is covered + amrex::ignore_unused(ly); + if (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0) return; +#endif #endif - Ey(i, j, k) += c2 * dt * ( - T_Algo::DownwardDx(Bz, coefs_x, n_coefs_x, i, j, k) + T_Algo::DownwardDz(Bx, coefs_z, n_coefs_z, i, j, k) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp index 7f0f995569a..d6bc9be9e8a 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp @@ -193,8 +193,13 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( [=] AMREX_GPU_DEVICE (int i, int j, int k){ #ifdef AMREX_USE_EB - // Skip field push if this cell is fully covered by embedded boundaries +#ifdef WARPX_DIM_3D if (ly(i,j,k) <= 0) return; +#elif defined(WARPX_DIM_XZ) + //In XZ Ey is associated with a mesh node, so we need to check if the mesh node is covered + amrex::ignore_unused(ly); + if (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j, k)<=0 || lz(i, j-1, k)<=0) return; +#endif #endif // Interpolate conductivity, sigma, to Ey position on the grid amrex::Real const sigma_interp = CoarsenIO::Interp( sigma_arr, sigma_stag, From 2a743bf6a71f3a22bf7e3c2f401775bfba88b0a3 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 24 Oct 2022 13:59:07 -0700 Subject: [PATCH 0116/1346] AMReX: Weekly Update (#3479) --- .github/workflows/cuda.yml | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 2 +- run_test.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 2da4c68bfa3..dc0a8732843 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 56b6402d238979fca6e7c57fdc644a54c4cf6fce && cd - + cd amrex && git checkout --detach 3082028e42870b1ed37f0d26160ef078580511e3 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 3efec7df509..568beffaf69 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 56b6402d238979fca6e7c57fdc644a54c4cf6fce +branch = 3082028e42870b1ed37f0d26160ef078580511e3 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index b9edd257d16..9d3b36b9e4c 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 56b6402d238979fca6e7c57fdc644a54c4cf6fce +branch = 3082028e42870b1ed37f0d26160ef078580511e3 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 2864d9cd8ac..126e8660bfa 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -235,7 +235,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "56b6402d238979fca6e7c57fdc644a54c4cf6fce" +set(WarpX_amrex_branch "3082028e42870b1ed37f0d26160ef078580511e3" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/run_test.sh b/run_test.sh index 546ab9c6384..002d3e88228 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 56b6402d238979fca6e7c57fdc644a54c4cf6fce && cd - +cd amrex && git checkout --detach 3082028e42870b1ed37f0d26160ef078580511e3 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git From 6bf1a1f65fbf908ab9bc9227b40b90756ad7f334 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Mon, 24 Oct 2022 16:06:32 -0700 Subject: [PATCH 0117/1346] Add reduced diagnostics to `picmi.py` (#3475) * add reduced diagnostics to picmi * added test of picmi reduced diagnostics * changes requested during PR review --- .../PICMI_inputs_loadbalancecosts.py | 89 +++++++ ...analysis_reduced_diags_loadbalancecosts.py | 6 +- Python/pywarpx/Diagnostics.py | 1 + Python/pywarpx/WarpX.py | 10 +- Python/pywarpx/__init__.py | 2 +- Python/pywarpx/picmi.py | 228 +++++++++++++----- Regression/WarpX-tests.ini | 19 ++ 7 files changed, 295 insertions(+), 60 deletions(-) create mode 100644 Examples/Tests/reduced_diags/PICMI_inputs_loadbalancecosts.py diff --git a/Examples/Tests/reduced_diags/PICMI_inputs_loadbalancecosts.py b/Examples/Tests/reduced_diags/PICMI_inputs_loadbalancecosts.py new file mode 100644 index 00000000000..569262b8304 --- /dev/null +++ b/Examples/Tests/reduced_diags/PICMI_inputs_loadbalancecosts.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 + +from pywarpx import picmi + +# Number of time steps +max_steps = 3 + +# Number of cells +nx = 128 +ny = 32 +nz = 128 + +# Physical domain +xmin = 0. +xmax = 4. +ymin = 0. +ymax = 4. +zmin = 0. +zmax = 4. + +# Create grid +grid = picmi.Cartesian3DGrid( + number_of_cells=[nx, ny, nz], + warpx_max_grid_size=32, + lower_bound=[xmin, ymin, zmin], + upper_bound=[xmax, ymax, zmax], + lower_boundary_conditions=['periodic', 'periodic', 'periodic'], + upper_boundary_conditions=['periodic', 'periodic', 'periodic'] +) + +# Electromagnetic solver +solver = picmi.ElectromagneticSolver( + grid=grid, + method='Yee', + cfl=0.99999 +) + +# Particles +electrons = picmi.Species( + particle_type='electron', + name='electrons', + initial_distribution=picmi.UniformDistribution( + density=1e14, + rms_velocity=[0]*3, + upper_bound=[xmax, ymax, 1.0] + ) +) +layout = picmi.GriddedLayout( + n_macroparticle_per_cell=[1, 1, 1], grid=grid +) + +# Reduced diagnostic +reduced_diag = picmi.ReducedDiagnostic( + diag_type='LoadBalanceCosts', + period=1, + name='LBC' +) + +# Diagnostic +diag = picmi.FieldDiagnostic( + grid=grid, + period=3, + write_dir='.', + warpx_file_prefix='Python_reduced_diags_loadbalancecosts_timers_plt' +) + +# Set up simulation +sim = picmi.Simulation( + solver=solver, + max_steps=max_steps, + verbose=1, + particle_shape=1, + warpx_current_deposition_algo='esirkepov', + warpx_field_gathering_algo='energy-conserving', + warpx_load_balance_intervals=2 +) + +# Add species +sim.add_species(electrons, layout=layout) + +# Add reduced diagnostics +sim.add_diagnostic(reduced_diag) + +# Add diagnostics +sim.add_diagnostic(diag) + +# Advance simulation until last time step +# sim.write_input_file("test_input") +sim.step(max_steps) diff --git a/Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py b/Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py index 263e16bdfb8..a706aace1f6 100755 --- a/Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py +++ b/Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py @@ -17,7 +17,6 @@ # Possible running time: ~ 1 s -import os import sys import numpy as np @@ -68,8 +67,9 @@ def get_efficiency(i): print('load balance efficiency (before load balance): ', efficiency_before) print('load balance efficiency (after load balance): ', efficiency_after) -# The load balanced case is expcted to be more efficient then non-load balanced case +# The load balanced case is expected to be more efficient +# than non-load balanced case assert(efficiency_before < efficiency_after) -test_name = os.path.split(os.getcwd())[1] +test_name = 'reduced_diags_loadbalancecosts_timers' checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Python/pywarpx/Diagnostics.py b/Python/pywarpx/Diagnostics.py index 408ce6d1409..06562c28d77 100644 --- a/Python/pywarpx/Diagnostics.py +++ b/Python/pywarpx/Diagnostics.py @@ -7,6 +7,7 @@ from .Bucket import Bucket diagnostics = Bucket('diagnostics', _diagnostics_dict={}) +reduced_diagnostics = Bucket('warpx', _diagnostics_dict={}) class Diagnostic(Bucket): """ diff --git a/Python/pywarpx/WarpX.py b/Python/pywarpx/WarpX.py index 175917ab64d..517c74d421d 100644 --- a/Python/pywarpx/WarpX.py +++ b/Python/pywarpx/WarpX.py @@ -14,7 +14,7 @@ from .Bucket import Bucket from .Collisions import collisions, collisions_list from .Constants import my_constants -from .Diagnostics import diagnostics +from .Diagnostics import diagnostics, reduced_diagnostics from .EB2 import eb2 from .Geometry import geometry from .Interpolation import interpolation @@ -76,6 +76,14 @@ def create_argv_list(self): for species_diagnostic in diagnostic._species_dict.values(): argv += species_diagnostic.attrlist() + reduced_diagnostics.reduced_diags_names = reduced_diagnostics._diagnostics_dict.keys() + argv += reduced_diagnostics.attrlist() + for diagnostic in reduced_diagnostics._diagnostics_dict.values(): + diagnostic.species = diagnostic._species_dict.keys() + argv += diagnostic.attrlist() + for species_diagnostic in diagnostic._species_dict.values(): + argv += species_diagnostic.attrlist() + return argv def init(self, mpi_comm=None): diff --git a/Python/pywarpx/__init__.py b/Python/pywarpx/__init__.py index 8f49604563c..58d2143988b 100644 --- a/Python/pywarpx/__init__.py +++ b/Python/pywarpx/__init__.py @@ -9,7 +9,7 @@ from .Boundary import boundary from .Collisions import collisions from .Constants import my_constants -from .Diagnostics import diagnostics +from .Diagnostics import diagnostics, reduced_diagnostics from .EB2 import eb2 from .Geometry import geometry from .Interpolation import interpolation diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 4640a2e2e4d..3005b06bddb 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1595,8 +1595,43 @@ def finalize(self): # Simulation frame diagnostics # ---------------------------- +class WarpXDiagnosticBase(object): + """ + Base class for all WarpX diagnostic containing functionality shared by + all WarpX diagnostic installations. + """ + def add_diagnostic(self): + # reduced diagnostics go in a different bucket than regular diagnostics + if isinstance(self, ReducedDiagnostic): + bucket = pywarpx.reduced_diagnostics + name_template = 'reduced_diag' + else: + bucket = pywarpx.diagnostics + name_template = 'diag' + + name = getattr(self, 'name', None) + if name is None: + diagnostics_number = (len(bucket._diagnostics_dict) + 1) + self.name = f'{name_template}{diagnostics_number}' + + try: + self.diagnostic = bucket._diagnostics_dict[self.name] + except KeyError: + self.diagnostic = pywarpx.Diagnostics.Diagnostic( + self.name, _species_dict={} + ) + bucket._diagnostics_dict[self.name] = self.diagnostic + + -class FieldDiagnostic(picmistandard.PICMI_FieldDiagnostic): + def set_write_dir(self): + if self.write_dir is not None or self.file_prefix is not None: + write_dir = (self.write_dir or 'diags') + file_prefix = (self.file_prefix or self.name) + self.diagnostic.file_prefix = os.path.join(write_dir, file_prefix) + + +class FieldDiagnostic(picmistandard.PICMI_FieldDiagnostic, WarpXDiagnosticBase): """ See `Input Parameters `_ for more information. @@ -1637,16 +1672,7 @@ def init(self, kw): def initialize_inputs(self): - name = getattr(self, 'name', None) - if name is None: - diagnostics_number = len(pywarpx.diagnostics._diagnostics_dict) + 1 - self.name = 'diag{}'.format(diagnostics_number) - - try: - self.diagnostic = pywarpx.diagnostics._diagnostics_dict[self.name] - except KeyError: - self.diagnostic = pywarpx.Diagnostics.Diagnostic(self.name, _species_dict={}) - pywarpx.diagnostics._diagnostics_dict[self.name] = self.diagnostic + self.add_diagnostic() self.diagnostic.diag_type = 'Full' self.diagnostic.format = self.format @@ -1709,16 +1735,13 @@ def initialize_inputs(self): self.diagnostic.plot_finepatch = self.plot_finepatch self.diagnostic.plot_crsepatch = self.plot_crsepatch - if self.write_dir is not None or self.file_prefix is not None: - write_dir = (self.write_dir or 'diags') - file_prefix = (self.file_prefix or self.name) - self.diagnostic.file_prefix = os.path.join(write_dir, file_prefix) + self.set_write_dir() ElectrostaticFieldDiagnostic = FieldDiagnostic -class Checkpoint(picmistandard.base._ClassWithInit): +class Checkpoint(picmistandard.base._ClassWithInit, WarpXDiagnosticBase): """ Sets up checkpointing of the simulation, allowing for later restarts @@ -1749,23 +1772,17 @@ def __init__(self, period = 1, write_dir = None, name = None, **kw): def initialize_inputs(self): - try: - self.diagnostic = pywarpx.diagnostics._diagnostics_dict[self.name] - except KeyError: - self.diagnostic = pywarpx.Diagnostics.Diagnostic(self.name, _species_dict={}) - pywarpx.diagnostics._diagnostics_dict[self.name] = self.diagnostic + self.add_diagnostic() self.diagnostic.intervals = self.period self.diagnostic.diag_type = 'Full' self.diagnostic.format = 'checkpoint' self.diagnostic.file_min_digits = self.file_min_digits - if self.write_dir is not None or self.file_prefix is not None: - write_dir = (self.write_dir or 'diags') - file_prefix = (self.file_prefix or self.name) - self.diagnostic.file_prefix = os.path.join(write_dir, file_prefix) + self.set_write_dir() -class ParticleDiagnostic(picmistandard.PICMI_ParticleDiagnostic): + +class ParticleDiagnostic(picmistandard.PICMI_ParticleDiagnostic, WarpXDiagnosticBase): """ See `Input Parameters `_ for more information. @@ -1815,16 +1832,7 @@ def init(self, kw): def initialize_inputs(self): - name = getattr(self, 'name', None) - if name is None: - diagnostics_number = len(pywarpx.diagnostics._diagnostics_dict) + 1 - self.name = 'diag{}'.format(diagnostics_number) - - try: - self.diagnostic = pywarpx.diagnostics._diagnostics_dict[self.name] - except KeyError: - self.diagnostic = pywarpx.Diagnostics.Diagnostic(self.name, _species_dict={}) - pywarpx.diagnostics._diagnostics_dict[self.name] = self.diagnostic + self.add_diagnostic() self.diagnostic.diag_type = 'Full' self.diagnostic.format = self.format @@ -1832,10 +1840,7 @@ def initialize_inputs(self): self.diagnostic.file_min_digits = self.file_min_digits self.diagnostic.intervals = self.period - if self.write_dir is not None or self.file_prefix is not None: - write_dir = (self.write_dir or 'diags') - file_prefix = (self.file_prefix or self.name) - self.diagnostic.file_prefix = os.path.join(write_dir, file_prefix) + self.set_write_dir() # --- Use a set to ensure that fields don't get repeated. variables = set() @@ -1890,7 +1895,8 @@ def initialize_inputs(self): # ---------------------------- -class LabFrameFieldDiagnostic(picmistandard.PICMI_LabFrameFieldDiagnostic): +class LabFrameFieldDiagnostic(picmistandard.PICMI_LabFrameFieldDiagnostic, + WarpXDiagnosticBase): """ See `Input Parameters `_ for more information. @@ -1952,16 +1958,7 @@ def initialize_inputs_old(self): def initialize_inputs_new(self): - name = getattr(self, 'name', None) - if name is None: - diagnostics_number = len(pywarpx.diagnostics._diagnostics_dict) + 1 - self.name = 'diag{}'.format(diagnostics_number) - - try: - self.diagnostic = pywarpx.diagnostics._diagnostics_dict[self.name] - except KeyError: - self.diagnostic = pywarpx.Diagnostics.Diagnostic(self.name, _species_dict={}) - pywarpx.diagnostics._diagnostics_dict[self.name] = self.diagnostic + self.add_diagnostic() self.diagnostic.diag_type = 'BackTransformed' self.diagnostic.format = self.format @@ -2006,10 +2003,7 @@ def initialize_inputs_new(self): fields_to_plot.sort() self.diagnostic.fields_to_plot = fields_to_plot - if self.write_dir is not None or self.file_prefix is not None: - write_dir = (self.write_dir or 'diags') - file_prefix = (self.file_prefix or self.name) - self.diagnostic.file_prefix = os.path.join(write_dir, file_prefix) + self.set_write_dir() class LabFrameParticleDiagnostic(picmistandard.PICMI_LabFrameParticleDiagnostic): @@ -2033,3 +2027,127 @@ def initialize_inputs(self): pywarpx.warpx.num_snapshots_lab = self.num_snapshots pywarpx.warpx.dt_snapshots_lab = self.dt_snapshots pywarpx.warpx.lab_data_directory = self.write_dir + + +class ReducedDiagnostic(picmistandard.base._ClassWithInit, WarpXDiagnosticBase): + """ + Sets up a reduced diagnostic in the simulation. + + See `Input Parameters `_ + for more information. + + Parameters + ---------- + diag_type: string + The type of reduced diagnostic. See the link above for all the different + types of reduced diagnostics available. + + name: string + The name of this diagnostic which will also be the name of the data + file written to disk. + + period: integer + The simulation step interval at which to output this diagnostic. + + path: string + The file path in which the diagnostic file should be written. + + extension: string + The file extension used for the diagnostic output. + + separator: string + The separator between row values in the output file. + """ + + def __init__(self, diag_type, name=None, period=1, path=None, + extension=None, separator=None, **kw): + + self.name = name + self.type = diag_type + self.intervals = period + self.path = path + self.extension = extension + self.separator = separator + + self._species = kw.pop('species', None) + + # Now we need to handle all the specific inputs required for the + # different reduced diagnostic types. + # Note: only a limited number are presently supported. + + # The simple diagnostics do not require any additional arguments + self._simple_reduced_diagnostics = [ + 'ParticleEnergy', 'ParticleMomentum', 'FieldEnergy', + 'FieldMomentum', 'FieldMaximum', 'RhoMaximum', 'ParticleNumber', + 'LoadBalanceCosts', 'LoadBalanceEfficiency', + ] + # The species diagnostics require a species to be provided + self._species_reduced_diagnostics = [ + 'BeamRelevant', 'ParticleHistogram', 'ParticleExtrema', + ] + + if self.type in self._simple_reduced_diagnostics: + pass + elif self.type in self._species_reduced_diagnostics: + if self._species is None: + raise AttributeError( + f"{self.type} reduced diagnostic requires a species." + ) + if self.type == 'ParticleHistogram': + raise NotImplementedError( + f"{self.type} reduced diagnostic is not yet supported " + "in pywarpx." + ) + elif self.type == "FieldProbe": + kw = self._handle_field_probe(**kw) + else: + raise RuntimeError( + f"{self.type} reduced diagnostic is not yet supported " + "in pywarpx." + ) + + self.handle_init(kw) + + def _handle_field_probe(self, **kw): + """Utility function to grab required inputs for a field probe from kw""" + self.probe_geometry = kw.pop("probe_geometry") + self.x_probe = kw.pop("x_probe") + self.y_probe = kw.pop("y_probe") + self.z_probe = kw.pop("z_probe") + + self.interp_order = kw.pop("interp_order", None) + self.integrate = kw.pop("integrate", None) + self.do_moving_window_FP = kw.pop("do_moving_window_FP", None) + + if self.probe_geometry.lower() != 'point': + self.resolution = kw.pop("resolution") + + if self.probe_geometry.lower() == 'line': + self.x1_probe = kw.pop("x1_probe") + self.y1_probe = kw.pop("y1_probe") + self.z1_probe = kw.pop("z1_probe") + + if self.probe_geometry.lower() == 'plane': + self.detector_radius = kw.pop("detector_radius") + + self.target_normal_x = kw.pop("target_normal_x") + self.target_normal_y = kw.pop("target_normal_y") + self.target_normal_z = kw.pop("target_normal_z") + + self.target_up_x = kw.pop("target_up_x") + self.target_up_y = kw.pop("target_up_y") + self.target_up_z = kw.pop("target_up_z") + + return kw + + def initialize_inputs(self): + + self.add_diagnostic() + + for key in self.__dict__.keys(): + if not key.startswith('_') and key not in ['name', 'diagnostic']: + self.diagnostic.__setattr__(key, self.__dict__[key]) + + if self._species is not None: + diag = pywarpx.Bucket.Bucket(self.name + '.' + self._species.name) + self.diagnostic._species_dict[self._species.name] = diag diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 9d3b36b9e4c..9d711e088f9 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -2395,6 +2395,25 @@ doVis = 0 compareParticles = 0 analysisRoutine = Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py +[Python_reduced_diags_loadbalancecosts_timers] +buildDir = . +inputFile = Examples/Tests/reduced_diags/PICMI_inputs_loadbalancecosts.py +runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 algo.load_balance_costs_update=Timers +customRunCmd = python3 PICMI_inputs_loadbalancecosts.py +dim = 3 +addToCompileString = USE_PYTHON_MAIN=TRUE +cmakeSetupOpts = -DWarpX_DIMS=3 +target = pip_install +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 0 +analysisRoutine = Examples/Tests/reduced_diags/analysis_reduced_diags_loadbalancecosts.py + [reduced_diags_loadbalancecosts_timers_psatd] buildDir = . inputFile = Examples/Tests/reduced_diags/inputs_loadbalancecosts From c4abb48f021276a61648802ea6e65f1688ceb770 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 24 Oct 2022 17:00:47 -0700 Subject: [PATCH 0118/1346] CI: oneAPI with `-O1` (#3478) oneAPI 2022.2.0 hangs for `-O2` and higher: #3442 --- .github/workflows/intel.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index 8bb5e2c4552..6f320845053 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -65,6 +65,8 @@ jobs: name: oneAPI ICX SP runs-on: ubuntu-20.04 # Since 2021.4.0, AMReX_GpuUtility.H: error: comparison with NaN always evaluates to false in fast floating point modes + # oneAPI 2022.2.0 hangs for -O2 and higher: + # https://github.com/ECP-WarpX/WarpX/issues/3442 env: CXXFLAGS: "-Werror -Wno-error=pass-failed -Wno-tautological-constant-compare" # For oneAPI, Ninja is slower than the default: @@ -98,6 +100,7 @@ jobs: export CC=$(which icx) cmake -S . -B build_sp \ + -DCMAKE_CXX_FLAGS_RELEASE="-O1 -DNDEBUG" \ -DCMAKE_VERBOSE_MAKEFILE=ON \ -DWarpX_EB=ON \ -DWarpX_LIB=ON \ @@ -123,6 +126,8 @@ jobs: name: oneAPI DPC++ SP runs-on: ubuntu-20.04 # Since 2021.4.0, AMReX_GpuUtility.H: error: comparison with NaN always evaluates to false in fast floating point modes + # oneAPI 2022.2.0 hangs for -O2 and higher: + # https://github.com/ECP-WarpX/WarpX/issues/3442 env: CXXFLAGS: "-Werror -Wno-tautological-constant-compare" # For oneAPI, Ninja is slower than the default: @@ -156,6 +161,7 @@ jobs: export CC=$(which clang) cmake -S . -B build_sp \ + -DCMAKE_CXX_FLAGS_RELEASE="-O1 -DNDEBUG" \ -DBUILD_SHARED_LIBS=ON \ -DCMAKE_VERBOSE_MAKEFILE=ON \ -DWarpX_COMPUTE=SYCL \ From e4c735403311c48ad69edcabe91241ee1968938a Mon Sep 17 00:00:00 2001 From: Ryan Sandberg Date: Tue, 25 Oct 2022 12:18:30 -0700 Subject: [PATCH 0119/1346] Diagnostics: print when writing openPMD/Ascent/Sensei and BTD plotfile/openPMD (#3424) * print when writing openPMD, Ascent, Sensei * remove unnecesssary variables, include min_digits * print snapshot id when writing btd * fix ascent print * compile without warnings * still working on Ascent * still working Ascent * Print when BTD buffer is flushed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * print when writing openPMD, Ascent, Sensei * remove unnecesssary variables, include min_digits * print snapshot id when writing btd * fix ascent print * compile without warnings * still working on Ascent * still working Ascent * Print when BTD buffer is flushed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add warning message if BTD not full * adjust for CI, no in-situ vis BTD * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * revert to correct plotfile naming * change warning topic to BTD * nicer format for print statements * extend examples to fill BTD * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Reset Checksums of BTD_ReducedSliceDiag * Upgrade CI Test LaserAccelerationBoost * Apply suggestions from code review Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> * improve BTD warning notes * edit final step notes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix formatting Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Edoardo Zoni Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Co-authored-by: Axel Huebl --- .../RigidInjection/inputs_2d_BoostedFrame | 2 +- .../Modules/boosted_diags/inputs_3d_slice | 4 +- .../laser_acceleration/inputs_2d_boost | 13 +++-- .../benchmarks_json/BTD_ReducedSliceDiag.json | 8 +-- .../LaserAccelerationBoost.json | 54 +++++++++---------- Regression/WarpX-tests.ini | 2 +- .../Diagnostics/BTD_Plotfile_Header_Impl.cpp | 2 - Source/Diagnostics/BTDiagnostics.cpp | 48 ++++++++++++++++- .../BoundaryScrapingDiagnostics.cpp | 5 +- Source/Diagnostics/FlushFormats/FlushFormat.H | 1 + .../FlushFormats/FlushFormatAscent.H | 1 + .../FlushFormats/FlushFormatAscent.cpp | 14 +++-- .../FlushFormats/FlushFormatCheckpoint.H | 1 + .../FlushFormats/FlushFormatCheckpoint.cpp | 1 + .../FlushFormats/FlushFormatOpenPMD.H | 1 + .../FlushFormats/FlushFormatOpenPMD.cpp | 16 +++++- .../FlushFormats/FlushFormatPlotfile.H | 1 + .../FlushFormats/FlushFormatPlotfile.cpp | 18 +++++-- .../FlushFormats/FlushFormatSensei.H | 1 + .../FlushFormats/FlushFormatSensei.cpp | 17 ++++-- Source/Evolve/WarpXEvolve.cpp | 4 +- Source/Utils/Parser/IntervalsParser.H | 11 ++-- Source/Utils/Parser/IntervalsParser.cpp | 13 ++++- 23 files changed, 174 insertions(+), 64 deletions(-) diff --git a/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame b/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame index bd8ce220acb..d5c4c2b3cec 100644 --- a/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame +++ b/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame @@ -1,6 +1,6 @@ # stop_time = 1.5e-13 -warpx.zmax_plasma_to_compute_max_step = 50.e-6 +warpx.zmax_plasma_to_compute_max_step = 56.e-6 warpx.gamma_boost = 5. warpx.boost_direction = z diff --git a/Examples/Modules/boosted_diags/inputs_3d_slice b/Examples/Modules/boosted_diags/inputs_3d_slice index c7938991935..ff68b590993 100644 --- a/Examples/Modules/boosted_diags/inputs_3d_slice +++ b/Examples/Modules/boosted_diags/inputs_3d_slice @@ -1,4 +1,4 @@ -warpx.zmax_plasma_to_compute_max_step = 0.0031 +warpx.zmax_plasma_to_compute_max_step = 0.0055 amr.n_cell = 32 32 64 amr.max_grid_size = 64 @@ -126,7 +126,7 @@ diag1.write_species = 1 diag2.diag_type = BackTransformed diag2.do_back_transformed_fields = 1 -diag2.intervals = 0:4:2, 1:3:2 +diag2.intervals = 0:3:2, 1:3:2 diag2.dz_snapshots_lab = 0.001 diag2.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho diag2.format = openpmd diff --git a/Examples/Physics_applications/laser_acceleration/inputs_2d_boost b/Examples/Physics_applications/laser_acceleration/inputs_2d_boost index f91596e9f50..c2aa92c3634 100644 --- a/Examples/Physics_applications/laser_acceleration/inputs_2d_boost +++ b/Examples/Physics_applications/laser_acceleration/inputs_2d_boost @@ -44,9 +44,6 @@ algo.particle_shape = 3 ################################# warpx.gamma_boost = 10. warpx.boost_direction = z -warpx.do_back_transformed_diagnostics = 1 -warpx.num_snapshots_lab = 7 -warpx.dt_snapshots_lab = 1.6678204759907604e-12 ################################# ############ PLASMA ############# @@ -123,6 +120,12 @@ laser1.wavelength = 0.81e-6 # The wavelength of the laser (in meters) # Diagnostics diagnostics.diags_names = diag1 -diag1.intervals = 100 -diag1.diag_type = Full + +diag1.diag_type = BackTransformed +diag1.do_back_transformed_fields = 1 +diag1.num_snapshots_lab = 3 +diag1.dt_snapshots_lab = 1.6678204759907604e-12 diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho +diag1.format = plotfile +diag1.buffer_size = 32 +diag1.write_species = 1 diff --git a/Regression/Checksum/benchmarks_json/BTD_ReducedSliceDiag.json b/Regression/Checksum/benchmarks_json/BTD_ReducedSliceDiag.json index 43b609e30d8..1fd9e23a3e9 100644 --- a/Regression/Checksum/benchmarks_json/BTD_ReducedSliceDiag.json +++ b/Regression/Checksum/benchmarks_json/BTD_ReducedSliceDiag.json @@ -9,9 +9,9 @@ "particle_weight": 62415.090744607616 }, "electrons": { - "particle_momentum_x": 9.678379481090198e-20, - "particle_momentum_y": 2.5764224165967887e-19, - "particle_momentum_z": 3.0774186320322946e-19, + "particle_momentum_x": 8.238176902434188e-20, + "particle_momentum_y": 2.362174160683283e-19, + "particle_momentum_z": 2.776294933800578e-19, "particle_position_x": 0.0025853613792120563, "particle_position_y": 0.0037728464704368673, "particle_position_z": 0.1901073014926564, @@ -29,4 +29,4 @@ "jz": 2.3459512897800802e+19, "rho": 74967092858.04996 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/LaserAccelerationBoost.json b/Regression/Checksum/benchmarks_json/LaserAccelerationBoost.json index 14a2b346c18..47a314ecbee 100644 --- a/Regression/Checksum/benchmarks_json/LaserAccelerationBoost.json +++ b/Regression/Checksum/benchmarks_json/LaserAccelerationBoost.json @@ -1,38 +1,38 @@ { "beam": { - "particle_momentum_x": 4.40462890925364e-19, - "particle_momentum_y": 4.375137239523109e-19, - "particle_momentum_z": 2.6808820042118736e-18, - "particle_position_x": 0.0008126719835048633, - "particle_position_y": 0.3596147573048867, + "particle_momentum_x": 3.535284585563231e-19, + "particle_momentum_y": 4.403094613346061e-19, + "particle_momentum_z": 5.658013779496569e-17, + "particle_position_x": 0.008317876520240174, + "particle_position_y": 1.1704335094514386, "particle_weight": 62415090744.60765 }, "electrons": { - "particle_momentum_x": 6.2983486271535245e-27, - "particle_momentum_y": 2.561148643336908e-22, - "particle_momentum_z": 3.2606826949441258e-18, - "particle_position_x": 0.07200000000522772, - "particle_position_y": 0.040410946815682504, - "particle_weight": 7.205670325760674e+16 + "particle_momentum_x": 2.2135959939611405e-23, + "particle_momentum_y": 2.822519730011994e-22, + "particle_momentum_z": 5.260625039372931e-22, + "particle_position_x": 0.010800577787577741, + "particle_position_y": 0.21115060628258137, + "particle_weight": 4.121554826246186e+16 }, "ions": { - "particle_momentum_x": 7.729986759772206e-30, - "particle_momentum_y": 2.561148659606118e-22, - "particle_momentum_z": 5.987111293060833e-15, - "particle_position_x": 0.072, - "particle_position_y": 0.04041094673206954, - "particle_weight": 7.205670325760674e+16 + "particle_momentum_x": 6.248472008953412e-23, + "particle_momentum_y": 4.449200926395666e-22, + "particle_momentum_z": 5.768167708374496e-22, + "particle_position_x": 0.010800001678510793, + "particle_position_y": 0.21114947608115497, + "particle_weight": 4.121554826246186e+16 }, "lev=0": { - "Bx": 7184.641132345461, - "By": 16.4296392148614, - "Bz": 425.92136410328976, - "Ex": 4509329694.688026, - "Ey": 1984276461005.9053, - "Ez": 6310104620.90831, - "jx": 4752150599.756195, - "jy": 553920892529501.1, - "jz": 450362837764.43524, - "rho": 1569.6222852017581 + "Bx": 4818965.7728866, + "By": 1716.1766615204324, + "Bz": 14516.299533155969, + "Ex": 2355477198959.7603, + "Ey": 1446115193004757.5, + "Ez": 21864073851126.54, + "jx": 1995525322686390.0, + "jy": 5.31264244145399e+16, + "jz": 2.0300854881306136e+16, + "rho": 67807419.3491986 } } \ No newline at end of file diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 9d711e088f9..8de0b824582 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -2156,7 +2156,7 @@ analysisRoutine = Examples/analysis_default_regression.py [LaserAccelerationBoost] buildDir = . inputFile = Examples/Physics_applications/laser_acceleration/inputs_2d_boost -runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 amr.n_cell=64 512 max_step=20 +runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 amr.n_cell=64 512 max_step=300 dim = 2 addToCompileString = cmakeSetupOpts = -DWarpX_DIMS=2 diff --git a/Source/Diagnostics/BTD_Plotfile_Header_Impl.cpp b/Source/Diagnostics/BTD_Plotfile_Header_Impl.cpp index 73d8b5ea0f3..9f858680e16 100644 --- a/Source/Diagnostics/BTD_Plotfile_Header_Impl.cpp +++ b/Source/Diagnostics/BTD_Plotfile_Header_Impl.cpp @@ -111,7 +111,6 @@ void BTDPlotfileHeaderImpl::WriteHeader () { if ( amrex::FileExists(m_Header_path) ) { - amrex::Print() << Utils::TextMsg::Info(" removing this file : " + m_Header_path); amrex::FileSystem::Remove(m_Header_path); } std::ofstream HeaderFile; @@ -262,7 +261,6 @@ void BTDMultiFabHeaderImpl::WriteMultiFabHeader () { if ( amrex::FileExists(m_Header_path) ) { - amrex::Print() << Utils::TextMsg::Info(" removing this file : " + m_Header_path); amrex::FileSystem::Remove(m_Header_path); } std::ofstream FabHeaderFile; diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index c519cd248c0..0e8a8424488 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -22,6 +22,8 @@ #include "WarpX.H" #include +#include +#include #include #include @@ -123,6 +125,48 @@ void BTDiagnostics::DerivedInitData () m_particles_buffer.resize(m_num_buffers); m_totalParticles_flushed_already.resize(m_num_buffers); m_totalParticles_in_buffer.resize(m_num_buffers); + + // check that simulation can fill all BTD snapshots + const int lev = 0; + const amrex::Real dt_boosted_frame = warpx.getdt(lev); + const int moving_dir = warpx.moving_window_dir; + const amrex::Real Lz_lab = warpx.Geom(lev).ProbLength(moving_dir) / warpx.gamma_boost / (1._rt+warpx.beta_boost); + const int ref_ratio = 1; + const amrex::Real dz_snapshot_grid = dz_lab(dt_boosted_frame, ref_ratio); + // Need enough buffers so the snapshot length is longer than the lab frame length + // num_buffers * m_buffer_size * dz_snapshot_grid >= Lz + const int num_buffers = ceil(Lz_lab / m_buffer_size / dz_snapshot_grid); + const int final_snapshot_iteration = m_intervals.GetFinalIteration(); + + // the final snapshot starts filling when the + // right edge of the moving window intersects the final snapshot + // time of final snapshot : t_sn = t0 + i*dt_snapshot + // where t0 is the time of first BTD snapshot, t0 = zmax / c * beta / (1-beta) + // + // the right edge of the moving window at the time of the final snapshot + // has space time coordinates + // time t_intersect = t_sn, position z_intersect=zmax + c*t_sn + // the boosted time of this space time pair is + // t_intersect_boost = gamma * (t_intersect - beta * z_intersect_boost/c) + // = gamma * (t_sn * (1 - beta) - beta * zmax / c) + // = gamma * (zmax*beta/c + i*dt_snapshot*(1-beta) - beta*zmax/c) + // = gamma * i * dt_snapshot * (1-beta) + // = i * dt_snapshot / gamma / (1+beta) + // + // if j = final snapshot starting step, then we want to solve + // j dt_boosted_frame >= t_intersect_boost = i * dt_snapshot / gamma / (1+beta) + // j >= i / gamma / (1+beta) * dt_snapshot / dt_boosted_frame + const int final_snapshot_starting_step = ceil(final_snapshot_iteration / warpx.gamma_boost / (1._rt+warpx.beta_boost) * m_dt_snapshots_lab / dt_boosted_frame); + const int final_snapshot_fill_iteration = final_snapshot_starting_step + num_buffers * m_buffer_size - 1; + if (final_snapshot_fill_iteration > warpx.maxStep()) { + std::string warn_string = + "\nSimulation might not run long enough to fill all BTD snapshots.\n" + "Final step: " + std::to_string(warpx.maxStep()) + "\n" + "Last BTD snapshot fills around step: " + std::to_string(final_snapshot_fill_iteration); + ablastr::warn_manager::WMRecordWarning( + "BTD", warn_string, + ablastr::warn_manager::WarnPriority::low); + } } void @@ -205,7 +249,6 @@ BTDiagnostics::ReadParameters () bool particle_fields_to_plot_specified = pp_diag_name.queryarr("particle_fields_to_plot", m_pfield_varnames); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!particle_fields_to_plot_specified, "particle_fields_to_plot is currently not supported for BackTransformed Diagnostics"); - } bool @@ -810,7 +853,8 @@ BTDiagnostics::Flush (int i_buffer) m_varnames, m_mf_output[i_buffer], m_geom_output[i_buffer], warpx.getistep(), labtime, m_output_species[i_buffer], nlev_output, file_name, m_file_min_digits, m_plot_raw_fields, m_plot_raw_fields_guards, - use_pinned_pc, isBTD, i_buffer, m_geom_snapshot[i_buffer][0], isLastBTDFlush, + use_pinned_pc, isBTD, i_buffer, m_buffer_flush_counter[i_buffer], + m_max_buffer_multifabs[i_buffer], m_geom_snapshot[i_buffer][0], isLastBTDFlush, m_totalParticles_flushed_already[i_buffer]); for (int isp = 0; isp < m_particles_buffer.at(i_buffer).size(); ++isp) { diff --git a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp index a0fad7c7f80..dff03634358 100644 --- a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp +++ b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp @@ -146,6 +146,8 @@ BoundaryScrapingDiagnostics::Flush (int i_buffer) // This is not a backtransform diagnostics bool const isBTD = false; bool const isLastBTD = false; + int const bufferID = 0; + int const numBTDBuffers = 0; // The data being written out is saved in a pinned particle container bool const use_pinned_pc = true; const amrex::Geometry& geom = warpx.Geom(0); // For compatibility with `WriteToFile` ; not used @@ -156,7 +158,8 @@ BoundaryScrapingDiagnostics::Flush (int i_buffer) m_flush_format->WriteToFile( m_varnames, m_mf_output[i_buffer], m_geom_output[i_buffer], warpx.getistep(), warpx.gett_new(0), m_output_species[i_buffer], nlev_output, file_prefix, - m_file_min_digits, false, false, use_pinned_pc, isBTD, warpx.getistep(0), geom, + m_file_min_digits, false, false, use_pinned_pc, isBTD, + warpx.getistep(0), bufferID, numBTDBuffers, geom, isLastBTD, m_totalParticles_flushed_already[i_buffer]); // Now that the data has been written out, clear out the buffer diff --git a/Source/Diagnostics/FlushFormats/FlushFormat.H b/Source/Diagnostics/FlushFormats/FlushFormat.H index 33de6d86a0e..8e0fbb7e693 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormat.H +++ b/Source/Diagnostics/FlushFormats/FlushFormat.H @@ -22,6 +22,7 @@ public: bool plot_raw_fields_guards, const bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, + int bufferID = 1, int numBuffers = 1, const amrex::Geometry& full_BTD_snapshot = amrex::Geometry(), bool isLastBTDFlush = false, const amrex::Vector& totalParticlesFlushedAlready = amrex::Vector() ) const = 0; diff --git a/Source/Diagnostics/FlushFormats/FlushFormatAscent.H b/Source/Diagnostics/FlushFormats/FlushFormatAscent.H index e4b47c6b4e1..01b051373c9 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatAscent.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatAscent.H @@ -39,6 +39,7 @@ public: bool plot_raw_fields_guards, const bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, + int bufferID = 1, int numBuffers = 1, const amrex::Geometry& full_BTD_snapshot = amrex::Geometry(), bool isLastBTDFlush = false, const amrex::Vector& totalParticlesFlushedAlready = amrex::Vector() ) const override; diff --git a/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp b/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp index 39631afc8e8..8fe25bef32b 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp @@ -16,16 +16,24 @@ FlushFormatAscent::WriteToFile ( amrex::Vector& geom, const amrex::Vector iteration, const double time, const amrex::Vector& particle_diags, int nlev, - const std::string prefix, int /*file_min_digits*/, bool plot_raw_fields, + const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, const bool /*use_pinned_pc*/, - bool /*isBTD*/, int /*snapshotID*/, const amrex::Geometry& /*full_BTD_snapshot*/, + bool isBTD, int /*snapshotID*/, int /*bufferID*/, int /*numBuffers*/, + const amrex::Geometry& /*full_BTD_snapshot*/, bool /*isLastBTDFlush*/, const amrex::Vector& /* totalParticlesFlushedAlready*/) const { #ifdef AMREX_USE_ASCENT WARPX_PROFILE("FlushFormatAscent::WriteToFile()"); auto & warpx = WarpX::GetInstance(); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !isBTD, + "In-situ visualization is not currently supported for back-transformed diagnostics."); + + const std::string& filename = amrex::Concatenate(prefix, iteration[0], file_min_digits); + amrex::Print() << Utils::TextMsg::Info("Writing Ascent file " + filename); + // wrap mesh data WARPX_PROFILE_VAR("FlushFormatAscent::WriteToFile::MultiLevelToBlueprint", prof_ascent_mesh_blueprint); conduit::Node bp_mesh; @@ -59,7 +67,7 @@ FlushFormatAscent::WriteToFile ( #else amrex::ignore_unused(varnames, mf, geom, iteration, time, - particle_diags, nlev); + particle_diags, nlev, file_min_digits, isBTD); #endif // AMREX_USE_ASCENT amrex::ignore_unused(prefix, plot_raw_fields, plot_raw_fields_guards); } diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H index 7b8a4f9d4d0..abc70437575 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H @@ -26,6 +26,7 @@ class FlushFormatCheckpoint final : public FlushFormatPlotfile bool plot_raw_fields_guards, const bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, + int bufferID = 1, int numBuffers = 1, const amrex::Geometry& full_BTD_snapshot = amrex::Geometry(), bool isLastBTDFlush = false, const amrex::Vector& totalParticlesFlushedAlready = amrex::Vector() ) const override final; diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp index 2e5d3f7347e..c845a6937ca 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp @@ -37,6 +37,7 @@ FlushFormatCheckpoint::WriteToFile ( bool /*plot_raw_fields_guards*/, const bool /*use_pinned_pc*/, bool /*isBTD*/, int /*snapshotID*/, + int /*bufferID*/, int /*numBuffers*/, const amrex::Geometry& /*full_BTD_snapshot*/, bool /*isLastBTDFlush*/, const amrex::Vector& /* totalParticlesFlushedAlready*/) const { diff --git a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.H b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.H index 208096aeee0..1a0d63ecba2 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.H @@ -38,6 +38,7 @@ public: bool plot_raw_fields_guards, const bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, + int bufferID = 1, int numBuffers = 1, const amrex::Geometry& full_BTD_snapshot = amrex::Geometry(), bool isLastBTDFlush = false, const amrex::Vector& totalParticlesFlushedAlready = amrex::Vector() ) const override; diff --git a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp index 4c7c9ab8191..b2a772066e5 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp @@ -114,10 +114,24 @@ FlushFormatOpenPMD::WriteToFile ( const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, const bool use_pinned_pc, - bool isBTD, int snapshotID, const amrex::Geometry& full_BTD_snapshot, + bool isBTD, int snapshotID, int bufferID, int numBuffers, + const amrex::Geometry& full_BTD_snapshot, bool isLastBTDFlush, const amrex::Vector& totalParticlesFlushedAlready) const { WARPX_PROFILE("FlushFormatOpenPMD::WriteToFile()"); + const std::string& filename = amrex::Concatenate(prefix, iteration[0], file_min_digits); + if (!isBTD) + { + amrex::Print() << Utils::TextMsg::Info("Writing openPMD file " + filename); + } else + { + amrex::Print() << Utils::TextMsg::Info("Writing buffer " + std::to_string(bufferID+1) + " of " + std::to_string(numBuffers) + + " to snapshot " + std::to_string(snapshotID) + " to openPMD BTD " + prefix); + if (isLastBTDFlush) + { + amrex::Print() << Utils::TextMsg::Info("Finished writing snapshot " + std::to_string(snapshotID) + " in openPMD BTD " + prefix); + } + } WARPX_ALWAYS_ASSERT_WITH_MESSAGE( !plot_raw_fields && !plot_raw_fields_guards, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H index ceaed8f7dda..e773f8e2f75 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H @@ -33,6 +33,7 @@ public: bool plot_raw_fields_guards, const bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, + int bufferID = 1, int numBuffers = 1, const amrex::Geometry& full_BTD_snapshot = amrex::Geometry(), bool isLastBTDFlush = false, const amrex::Vector& totalParticlesFlushedAlready = amrex::Vector() ) const override; diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp index 03154983632..0272493d06f 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp @@ -62,13 +62,25 @@ FlushFormatPlotfile::WriteToFile ( const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, const bool /*use_pinned_pc*/, - bool isBTD, int /*snapshotID*/, const amrex::Geometry& /*full_BTD_snapshot*/, - bool /*isLastBTDFlush*/, const amrex::Vector& /* totalParticlesFlushedAlready*/) const + bool isBTD, int snapshotID, int bufferID, int numBuffers, + const amrex::Geometry& /*full_BTD_snapshot*/, + bool isLastBTDFlush, const amrex::Vector& /* totalParticlesFlushedAlready*/) const { WARPX_PROFILE("FlushFormatPlotfile::WriteToFile()"); auto & warpx = WarpX::GetInstance(); const std::string& filename = amrex::Concatenate(prefix, iteration[0], file_min_digits); - amrex::Print() << Utils::TextMsg::Info("Writing plotfile " + filename); + if (!isBTD) + { + amrex::Print() << Utils::TextMsg::Info("Writing plotfile " + filename); + } else + { + amrex::Print() << Utils::TextMsg::Info("Writing buffer " + std::to_string(bufferID+1) + " of " + std::to_string(numBuffers) + + " to snapshot " + std::to_string(snapshotID) + " in plotfile BTD " + prefix ); + if (isLastBTDFlush) + { + amrex::Print() << Utils::TextMsg::Info("Finished writing snapshot " + std::to_string(snapshotID) + " in plotfile BTD " + filename); + } + } Vector rfs; VisMF::Header::Version current_version = VisMF::GetHeaderVersion(); diff --git a/Source/Diagnostics/FlushFormats/FlushFormatSensei.H b/Source/Diagnostics/FlushFormats/FlushFormatSensei.H index 2998ceba723..f828213d7f2 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatSensei.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatSensei.H @@ -57,6 +57,7 @@ public: bool plot_raw_fields_guards, const bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, + int bufferID = 1, int numBuffers = 1, const amrex::Geometry& full_BTD_snapshot = amrex::Geometry(), bool isLastBTDFlush = false, const amrex::Vector& totalParticlesFlushedAlready = amrex::Vector() ) const override; diff --git a/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp b/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp index 3d052859919..0993a3eaf2c 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp @@ -56,20 +56,27 @@ FlushFormatSensei::WriteToFile ( int nlev, const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, const bool use_pinned_pc, - bool isBTD, int snapshotID, - const amrex::Geometry& full_BTD_snapshot, bool isLastBTDFlush, + bool isBTD, int /*snapshotID*/, int /*bufferID*/, int /*numBuffers*/, + const amrex::Geometry& /*full_BTD_snapshot*/, bool /*isLastBTDFlush*/, const amrex::Vector& totalParticlesFlushedAlready) const { amrex::ignore_unused( geom, nlev, prefix, file_min_digits, plot_raw_fields, plot_raw_fields_guards, - use_pinned_pc, isBTD, snapshotID, full_BTD_snapshot, - isLastBTDFlush, totalParticlesFlushedAlready); + use_pinned_pc, + totalParticlesFlushedAlready); #ifndef AMREX_USE_SENSEI_INSITU - amrex::ignore_unused(varnames, mf, iteration, time, particle_diags); + amrex::ignore_unused(varnames, mf, iteration, time, particle_diags, + isBTD); #else + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !isBTD, + "In-situ visualization is not currently supported for back-transformed diagnostics."); + WARPX_PROFILE("FlushFormatSensei::WriteToFile()"); + const std::string& filename = amrex::Concatenate(prefix, iteration[0], file_min_digits); + amrex::Print() << Utils::TextMsg::Info("Writing Sensei file " + filename); amrex::Vector *mf_ptr = const_cast*>(&mf); diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 7300b1e32c0..2c87d1efa2e 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -88,7 +88,7 @@ WarpX::Evolve (int numsteps) // Start loop on time steps if (verbose) { - amrex::Print() << "\nSTEP " << step+1 << " starts ...\n"; + amrex::Print() << "STEP " << step+1 << " starts ...\n"; } ExecutePythonCallback("beforestep"); @@ -360,7 +360,7 @@ WarpX::Evolve (int numsteps) << " DT = " << dt[0] << "\n"; amrex::Print()<< "Evolve time = " << evolve_time << " s; This step = " << evolve_time_end_step-evolve_time_beg_step - << " s; Avg. per step = " << evolve_time/(step-step_begin+1) << " s\n"; + << " s; Avg. per step = " << evolve_time/(step-step_begin+1) << " s\n\n"; } if (cur_time >= stop_time - 1.e-3*dt[0] || SignalHandling::TestAndResetActionRequestFlag(SignalHandling::SIGNAL_REQUESTS_BREAK)) { diff --git a/Source/Utils/Parser/IntervalsParser.H b/Source/Utils/Parser/IntervalsParser.H index e9fffce9e62..f590139b30c 100644 --- a/Source/Utils/Parser/IntervalsParser.H +++ b/Source/Utils/Parser/IntervalsParser.H @@ -192,14 +192,20 @@ namespace utils::parser /** * @brief Return the total number of unique labframe snapshots */ - int NumSnapshots (); + int NumSnapshots () const; /** * @brief Return the iteration number stored at index i_buffer * * @param i_buffer buffer or iteration index, between 0 and NumSnapshots */ - int GetBTDIteration(int i_buffer); + int GetBTDIteration(int i_buffer) const; + + /** + * @brief Return the final BTD iteration + * + */ + int GetFinalIteration() const; /** * \brief A method that returns true if any of the slices contained by the IntervalsParser @@ -211,7 +217,6 @@ namespace utils::parser std::vector m_btd_iterations; std::vector m_slices; std::vector m_slice_starting_i_buffer; - int m_n_snapshots; static constexpr char m_separator = ','; bool m_activated = false; }; diff --git a/Source/Utils/Parser/IntervalsParser.cpp b/Source/Utils/Parser/IntervalsParser.cpp index d535edeb8b9..aab2cbe30eb 100644 --- a/Source/Utils/Parser/IntervalsParser.cpp +++ b/Source/Utils/Parser/IntervalsParser.cpp @@ -239,13 +239,22 @@ utils::parser::BTDIntervalsParser::BTDIntervalsParser ( } -int utils::parser::BTDIntervalsParser::NumSnapshots () +int utils::parser::BTDIntervalsParser::NumSnapshots () const { return m_btd_iterations.size(); } -int utils::parser::BTDIntervalsParser::GetBTDIteration (int i_buffer) +int utils::parser::BTDIntervalsParser::GetBTDIteration (int i_buffer) const { return m_btd_iterations[i_buffer]; } + + +int utils::parser::BTDIntervalsParser::GetFinalIteration () const +{ + return m_btd_iterations.back(); +} + + +bool utils::parser::BTDIntervalsParser::isActivated () const {return m_activated;} From 5c84ec02fe1e167a3473f64358298441ac15c934 Mon Sep 17 00:00:00 2001 From: David Grote Date: Tue, 25 Oct 2022 15:32:26 -0700 Subject: [PATCH 0120/1346] Replace "none" BC with Neumann for electrostatic (#3483) * Replace "none" BC with Neumann for electrostatic * Added checking for Neumann boundaries * Updated documentation regarding Neumann BCs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 4 ++- .../ElectrostaticSphereEB/inputs_3d_mixed_BCs | 4 +-- Source/FieldSolver/ElectrostaticSolver.cpp | 28 +++++++++++-------- Source/Utils/WarpXAlgorithmSelection.H | 3 +- Source/Utils/WarpXAlgorithmSelection.cpp | 1 + 5 files changed, 25 insertions(+), 15 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 354cc55743a..917cd06e5c6 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -316,7 +316,9 @@ Domain Boundary Conditions * ``pec``: This option can be used to set a Perfect Electric Conductor at the simulation boundary. For the electromagnetic solve, at PEC, the tangential electric field and the normal magnetic field are set to 0. This boundary can be used to model a dielectric or metallic surface. In the guard-cell region, the tangential electric field is set equal and opposite to the respective field component in the mirror location across the PEC boundary, and the normal electric field is set equal to the field component in the mirror location in the domain across the PEC boundary. Similarly, the tangential (and normal) magnetic field components are set equal (and opposite) to the respective magnetic field components in the mirror locations across the PEC boundary. Note that PEC boundary is invalid at `r=0` for the RZ solver. Please use ``none`` option. This boundary condition does not work with the spectral solver. If an electrostatic field solve is used the boundary potentials can also be set through ``boundary.potential_lo_x/y/z`` and ``boundary.potential_hi_x/y/z`` (default `0`). - * ``none``: No boundary condition is applied to the fields with the electromagnetic solver. This option must be used for the RZ-solver at `r=0`. If the electrostatic solver is used, a Neumann boundary condition (with gradient equal to 0) will be applied on the specified boundary. + * ``none``: No boundary condition is applied to the fields with the electromagnetic solver. This option must be used for the RZ-solver at `r=0`. + + * ``neumann``: For the electrostatic solver, a Neumann boundary condition (with gradient of the potential equal to 0) will be applied on the specified boundary. * ``boundary.particle_lo`` and ``boundary.particle_hi`` (`2 strings` for 2D, `3 strings` for 3D, `absorbing` by default) Options are: diff --git a/Examples/Tests/ElectrostaticSphereEB/inputs_3d_mixed_BCs b/Examples/Tests/ElectrostaticSphereEB/inputs_3d_mixed_BCs index 33bc912632a..0c1b9130ded 100644 --- a/Examples/Tests/ElectrostaticSphereEB/inputs_3d_mixed_BCs +++ b/Examples/Tests/ElectrostaticSphereEB/inputs_3d_mixed_BCs @@ -4,8 +4,8 @@ amr.max_level = 0 amr.blocking_factor = 8 amr.max_grid_size = 128 geometry.dims = 3 -boundary.field_lo = pec pec none -boundary.field_hi = pec none none +boundary.field_lo = pec pec neumann +boundary.field_hi = pec neumann neumann boundary.potential_lo_x = 0 boundary.potential_hi_x = 0 boundary.potential_lo_y = 0 diff --git a/Source/FieldSolver/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolver.cpp index 64158d3a158..103fff13199 100644 --- a/Source/FieldSolver/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolver.cpp @@ -711,12 +711,12 @@ WarpX::computePhiTriDiagonal (const amrex::Vector FieldBCType_algo_to_int = { {"pmc", FieldBoundaryType::PMC}, {"damped", FieldBoundaryType::Damped}, {"absorbing_silver_mueller", FieldBoundaryType::Absorbing_SilverMueller}, + {"neumann", FieldBoundaryType::Neumann}, {"none", FieldBoundaryType::None}, {"default", FieldBoundaryType::PML} }; From f34f4c219e596eb086dfbb9c6ad8484b958d8b87 Mon Sep 17 00:00:00 2001 From: Marco Garten Date: Tue, 25 Oct 2022 16:51:40 -0700 Subject: [PATCH 0121/1346] Add warning to set zmin and zmax for ion-acc example (#3323) * Add warning to set zmin and zmax for ions and also change the ion acceleration example to set these parameters so that users are aware that memory overflow could happen for production-size examples furthermore, correct units in comment after definition of r0 and Lcut Replace zmin and zmax with variables for electrons * Reset checksum for regression test Reset benchmark for LaserIonAcc2d test because the particle initialization in the input changed. --- Docs/source/usage/examples.rst | 6 +++ .../Physics_applications/laser_ion/inputs | 27 ++++++++----- .../benchmarks_json/LaserIonAcc2d.json | 40 +++++++++---------- 3 files changed, 43 insertions(+), 30 deletions(-) diff --git a/Docs/source/usage/examples.rst b/Docs/source/usage/examples.rst index a27b9f3df25..cbaaa6bcb0e 100644 --- a/Docs/source/usage/examples.rst +++ b/Docs/source/usage/examples.rst @@ -60,6 +60,12 @@ Laser-ion acceleration The resolution of this 2D case is extremely low by default. You will need a computing cluster for adequate resolution of the target density, see comments in the input file. +.. warning:: + + It is strongly advised to set the parameters ``.zmin / zmax / xmin / ...`` when working with highly dense targets that are limited in one or multiple dimensions. + The particle creation routine will first create particles everywhere between these limits (`defaulting to box size if unset`), setting particles to invalid only afterwards based on the density profile. + Not setting these parameters can quickly lead to memory overflows. + Uniform plasma -------------- diff --git a/Examples/Physics_applications/laser_ion/inputs b/Examples/Physics_applications/laser_ion/inputs index f3659368c25..91f8f07045a 100644 --- a/Examples/Physics_applications/laser_ion/inputs +++ b/Examples/Physics_applications/laser_ion/inputs @@ -76,6 +76,14 @@ algo.load_balance_costs_update = Heuristic ################################# # Target Profile # + +# definitions for target extent and pre-plasma +my_constants.L = 0.05e-6 # [m] scale length (>0) +my_constants.Lcut = 2.0e-6 # [m] hard cutoff from surface +my_constants.r0 = 2.5e-6 # [m] radius or half-thickness +my_constants.eps_z = 0.05e-6 # [m] small offset in z to make zmin, zmax interval larger than 2*(r0 + Lcut) +my_constants.zmax = r0 + Lcut + eps_z # [m] upper limit in z for particle creation + particles.species_names = electrons hydrogen # particle species @@ -83,8 +91,10 @@ hydrogen.species_type = hydrogen hydrogen.injection_style = NUniformPerCell hydrogen.num_particles_per_cell_each_dim = 2 2 4 hydrogen.momentum_distribution_type = at_rest -#hydrogen.zmin = -10.0e-6 -#hydrogen.zmax = 10.0e-6 +# minimum and maximum z position between which particles are initialized +# --> should be set for dense targets limit memory consumption during initialization +hydrogen.zmin = -zmax +hydrogen.zmax = zmax hydrogen.profile = parse_density_function hydrogen.addRealAttributes = orig_x orig_z hydrogen.attribute.orig_x(x,y,z,ux,uy,uz,t) = "x" @@ -96,8 +106,10 @@ electrons.num_particles_per_cell_each_dim = 2 2 4 electrons.momentum_distribution_type = "gaussian" electrons.ux_th = .01 electrons.uz_th = .01 -#electrons.zmin = -10.0e-6 -#electrons.zmax = 10.0e-6 +# minimum and maximum z position between which particles are initialized +# --> should be set for dense targets limit memory consumption during initialization +electrons.zmin = -zmax +electrons.zmax = zmax # ionization physics (field ionization/ADK) # [i1] none (fully pre-ionized): @@ -130,12 +142,7 @@ my_constants.n0 = 30.0 # [n_c] # [material 4] Copper (ion density: 8.49e28/m^3; times ionization level) #my_constants.n0 = 1400 -# profiles -# pre-plasma -my_constants.L = 0.05e-6 # [1/m] scale length (>0) -my_constants.Lcut = 2.0e-6 # [1/m] hard cutoff from surface -# core: flat foil, cylinder or sphere -my_constants.r0 = 2.5e-6 # [m] radius or half-thickness +# density profiles (target extent, pre-plasma and cutoffs defined above particle species list) # [target 1] flat foil (thickness = 2*r0) electrons.density_function(x,y,z) = "nc*n0*( diff --git a/Regression/Checksum/benchmarks_json/LaserIonAcc2d.json b/Regression/Checksum/benchmarks_json/LaserIonAcc2d.json index 36dcc4813e5..6b9d29fbbe1 100644 --- a/Regression/Checksum/benchmarks_json/LaserIonAcc2d.json +++ b/Regression/Checksum/benchmarks_json/LaserIonAcc2d.json @@ -1,33 +1,33 @@ { "electrons": { - "particle_momentum_x": 3.819011093562328e-19, + "particle_momentum_x": 3.7558265697785297e-19, "particle_momentum_y": 0.0, - "particle_momentum_z": 1.6442909854275157e-18, - "particle_position_x": 0.008132686101590795, - "particle_position_y": 0.030529760810180325, - "particle_weight": 2.641331189632942e+17 + "particle_momentum_z": 1.6241045337016777e-18, + "particle_position_x": 0.008080139452222582, + "particle_position_y": 0.030470786164249836, + "particle_weight": 2.6527193922723818e+17 }, "hydrogen": { - "particle_momentum_x": 2.2442952799834144e-18, - "particle_momentum_z": 1.0841140295639398e-18, - "particle_orig_x": 0.008258544921875001, - "particle_orig_z": 0.0366896337890625, - "particle_position_x": 0.008258183633694481, - "particle_position_y": 0.036687836783915156, - "particle_weight": 2.701906737218416e+17 + "particle_momentum_x": 2.230242228305449e-18, + "particle_momentum_z": 1.087276856218956e-18, + "particle_orig_x": 0.008248212890625, + "particle_orig_z": 0.0368645947265625, + "particle_position_x": 0.008247833494376897, + "particle_position_y": 0.03686279813152423, + "particle_weight": 2.6934893377423152e+17 }, "lev=0": { "Bx": 0.0, - "By": 11393530.864665572, + "By": 11411806.976599155, "Bz": 0.0, - "Ex": 2033401599040428.8, + "Ex": 2035695789467976.2, "Ey": 0.0, - "Ez": 316047997346965.6, - "jx": 1.634666300264935e+19, + "Ez": 323118235034526.9, + "jx": 1.656704421803856e+19, "jy": 0.0, - "jz": 8.884561198935773e+18, - "rho": 61730016945.00626, - "rho_electrons": 17451988195798.281, - "rho_hydrogen": 17441819816491.93 + "jz": 8.846078579875918e+18, + "rho": 61752907894.83176, + "rho_electrons": 17451375232572.703, + "rho_hydrogen": 17441818436520.373 } } \ No newline at end of file From 8a94eef4d68d1721658dcb9f1d443e6fcfc69dba Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Fri, 28 Oct 2022 08:58:34 -0700 Subject: [PATCH 0122/1346] Add neumann BC in WarpX PICMI interface (#3487) --- Python/pywarpx/picmi.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 3005b06bddb..1c6ad317d50 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -22,7 +22,8 @@ # dictionary to map field boundary conditions from picmistandard to WarpX BC_map = { - 'open':'pml', 'dirichlet':'pec', 'periodic':'periodic', 'damped':'damped', 'none':'none', None:'none' + 'open':'pml', 'dirichlet':'pec', 'periodic':'periodic', 'damped':'damped', + 'neumann':'neumann', 'none':'none', None:'none' } class constants: From 337255bc75785e22015f8aac8961689c028a6786 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 28 Oct 2022 10:14:09 -0700 Subject: [PATCH 0123/1346] Docs: LUMI (CSC) (#3488) * Docs: LUMI (CSC) Start documenting LUMI usage instructions for WarpX. * Add more system info --- Docs/source/install/hpc.rst | 1 + Docs/source/install/hpc/frontier.rst | 2 +- Docs/source/install/hpc/lumi.rst | 81 +++++++++++++++++++ .../lumi-csc/lumi_warpx.profile.example | 51 ++++++++++++ 4 files changed, 134 insertions(+), 1 deletion(-) create mode 100644 Docs/source/install/hpc/lumi.rst create mode 100644 Tools/machines/lumi-csc/lumi_warpx.profile.example diff --git a/Docs/source/install/hpc.rst b/Docs/source/install/hpc.rst index af32d46edb2..643ab604c86 100644 --- a/Docs/source/install/hpc.rst +++ b/Docs/source/install/hpc.rst @@ -36,6 +36,7 @@ HPC Systems hpc/lawrencium hpc/ookami hpc/lxplus + hpc/lumi .. tip:: diff --git a/Docs/source/install/hpc/frontier.rst b/Docs/source/install/hpc/frontier.rst index 1263d681151..801f1b39bdf 100644 --- a/Docs/source/install/hpc/frontier.rst +++ b/Docs/source/install/hpc/frontier.rst @@ -75,7 +75,7 @@ The general :ref:`cmake compile-time options ` apply as usual. Running ------- -.. _running-cpp-frontier-MI100-GPUs: +.. _running-cpp-frontier-MI250X-GPUs: MI250X GPUs (2x64 GB) ^^^^^^^^^^^^^^^^^^^^^ diff --git a/Docs/source/install/hpc/lumi.rst b/Docs/source/install/hpc/lumi.rst new file mode 100644 index 00000000000..8933ff279d3 --- /dev/null +++ b/Docs/source/install/hpc/lumi.rst @@ -0,0 +1,81 @@ +.. _building-lumi: + +LUMI (CSC) +========== + +The `LUMI cluster `_ is located at CSC. + + +Introduction +------------ + +If you are new to this system, **please see the following resources**: + +* `Lumi user guide `_ +* Batch system: `Slurm `_ +* `Jupyter service ? `__ +* Production directories: + + * `LUMI-P `__: 4 independent [Lustre](https://docs.lumi-supercomputer.eu/hardware/storage/lumip/#lustre) file systems + * `LUMI-F `__: a fast Lustre file system + * `LUMI-O `__: object storage + + +Installation +------------ + +Use the following commands to download the WarpX source code and switch to the correct branch: + +.. code-block:: bash + + git clone https://github.com/ECP-WarpX/WarpX.git $HOME/src/warpx + +We use the following modules and environments on the system (``$HOME/lumi_warpx.profile``). + +.. literalinclude:: ../../../../Tools/machines/lumi-csc/lumi_warpx.profile.example + :language: bash + :caption: You can copy this file from ``Tools/machines/lumi-csc/lumi_warpx.profile.example``. + + +We recommend to store the above lines in a file, such as ``$HOME/lumi_warpx.profile``, and load it into your shell after a login: + +.. code-block:: bash + + source $HOME/lumi_warpx.profile + +Then, ``cd`` into the directory ``$HOME/src/warpx`` and use the following commands to compile: + +.. code-block:: bash + + cd $HOME/src/warpx + rm -rf build + + cmake -S . -B build -DWarpX_DIMS=3 -DWarpX_COMPUTE=HIP + cmake --build build -j 6 + +The general :ref:`cmake compile-time options ` apply as usual. + + +.. _running-cpp-lumi: + +Running +------- + +.. _running-cpp-lumi-MI250X-GPUs: + +MI250X GPUs (2x64 GB) +^^^^^^^^^^^^^^^^^^^^^ + +.. note:: + + TODO: Add batch script template. + + +.. _post-processing-lumi: + +Post-Processing +--------------- + +.. note:: + + TODO: Document any Jupyter or data services. diff --git a/Tools/machines/lumi-csc/lumi_warpx.profile.example b/Tools/machines/lumi-csc/lumi_warpx.profile.example new file mode 100644 index 00000000000..c85d50731af --- /dev/null +++ b/Tools/machines/lumi-csc/lumi_warpx.profile.example @@ -0,0 +1,51 @@ +# please set your project account +#export proj= + +# optional: just an additional text editor +module load nano + +# required dependencies +module load LUMI/22.08 partition/G +module load buildtools +module load craype-accel-amd-gfx90a +module load rocm/5.0.2 +module load cray-mpich +module load cce/14.0.2 + +# optional: faster re-builds +#module load ccache + +# optional: for PSATD in RZ geometry support +# TODO: BLAS++, LAPACK++ + +# optional: for QED lookup table generation support +# TODO: BOOST + +# optional: for openPMD support +# TODO: HDF5, ADIOS2 + +# optional: Ascent in situ support +# TODO + +# optional: for Python bindings or libEnsemble +# TODO + +if [ -d "$HOME/sw/venvs/warpx-lumi" ] +then + source $HOME/sw/venvs/warpx-lumi/bin/activate +fi + +# an alias to request an interactive batch node for two hours +# for paralle execution, start on the batch node: jsrun +#alias getNode="..." +# an alias to run a command on a batch node for up to 30min +# usage: nrun +#alias runNode="..." + +# optimize ROCm compilation for MI250X +export AMREX_AMD_ARCH=gfx90a + +# compiler environment hints +export CC=$(which cc) +export CXX=$(which CC) +export FC=$(which ftn) From f6b4b10e028299546f82ab05a3a7a7975beeefca Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 28 Oct 2022 19:17:42 +0200 Subject: [PATCH 0124/1346] add warning if PML and PSATD are used together (#3489) --- Source/Initialization/WarpXInitData.cpp | 16 ++++++++++++++++ Source/WarpX.H | 5 +++++ 2 files changed, 21 insertions(+) diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index e25ba8afffe..4865cfa6537 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -431,6 +431,8 @@ WarpX::InitData () } PerformanceHints(); + + CheckKnownIssues(); } void @@ -1262,3 +1264,17 @@ void WarpX::InitializeEBGridData (int lev) amrex::ignore_unused(lev); #endif } + +void WarpX::CheckKnownIssues() +{ + if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD && + (std::any_of(do_pml_Lo[0].begin(),do_pml_Lo[0].end(),[](const auto& ee){return ee;}) || + std::any_of(do_pml_Hi[0].begin(),do_pml_Hi[0].end(),[](const auto& ee){return ee;})) ) + { + ablastr::warn_manager::WMRecordWarning( + "PML", + "Using PSATD together with PML may lead to instabilities if the plasma touches the PML region. " + "It is recommended to leave enough empty space between the plama boundary and the PML region.", + ablastr::warn_manager::WarnPriority::low); + } +} diff --git a/Source/WarpX.H b/Source/WarpX.H index 4874313d538..4431be57e02 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -1068,6 +1068,11 @@ private: */ void CheckGuardCells(amrex::MultiFab const& mf); + /** + * \brief Checks for known numerical issues involving different WarpX modules + */ + void CheckKnownIssues(); + /** Check the requested resources and write performance hints */ void PerformanceHints (); From b8962c76d8a646ef06e8d9f54e129aad6f5cd7c7 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 28 Oct 2022 10:59:38 -0700 Subject: [PATCH 0125/1346] ABLASTR: Always Sync Option (#3467) * ABLASTR: Always Sync Option Add an option to ABLASTR/WarpX to always sync nodal cells that are owned by multiple blocks. This can be used for debugging. * Fix typo Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> * Use New Option in One CI Test * Fix Logic: Force only to be `true`, not `false` Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Co-authored-by: Edoardo Zoni --- Docs/source/usage/parameters.rst | 64 ++++++++++------- Docs/source/usage/workflows/debugging.rst | 1 + .../pml_psatd_dive_divb_cleaning.json | 12 ++-- Regression/WarpX-tests.ini | 2 +- Source/Parallelization/WarpXComm.cpp | 24 +++---- Source/WarpX.H | 24 +++---- Source/ablastr/utils/Communication.H | 5 +- Source/ablastr/utils/Communication.cpp | 68 ++++++++++--------- 8 files changed, 110 insertions(+), 90 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 917cd06e5c6..b3906ed140d 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -57,21 +57,6 @@ Overall simulation parameters printed to standard output. Currently only works if the Lorentz boost and the moving window are along the z direction. -* ``warpx.verbose`` (``0`` or ``1``; default is ``1`` for true) - Controls how much information is printed to the terminal, when running WarpX. - -* ``warpx.always_warn_immediately`` (``0`` or ``1``; default is ``0`` for false) - If set to ``1``, WarpX immediately prints every warning message as soon as - it is generated. It is mainly intended for debug purposes, in case a simulation - crashes before a global warning report can be printed. - -* ``warpx.abort_on_warning_threshold`` (string: ``low``, ``medium`` or ``high``) optional - Optional threshold to abort as soon as a warning is raised. - If the threshold is set, warning messages with priority greater than or - equal to the threshold trigger an immediate abort. - It is mainly intended for debug purposes, and is best used with - ``warpx.always_warn_immediately=1``. - * ``warpx.random_seed`` (`string` or `int` > 0) optional If provided ``warpx.random_seed = random``, the random seed will be determined using `std::random_device` and `std::clock()`, @@ -147,10 +132,6 @@ Overall simulation parameters Note that even with this set to ``1`` WarpX will not catch all out-of-memory events yet when operating close to maximum device memory. `Please also see the documentation in AMReX `_. -* ``amrex.abort_on_unused_inputs`` (``0`` or ``1``; default is ``0`` for false) - When set to ``1``, this option causes simulation to fail *after* its completion if there were unused parameters. - It is mainly intended for continuous integration and automated testing to check that all tests and inputs are adapted to API changes. - Signal Handling ^^^^^^^^^^^^^^^ @@ -497,9 +478,6 @@ Distribution across MPI ranks and parallelization * ``warpx.do_dynamic_scheduling`` (`0` or `1`) optional (default `1`) Whether to activate OpenMP dynamic scheduling. -* ``warpx.safe_guard_cells`` (`0` or `1`) optional (default `0`) - For developers: run in safe mode, exchanging more guard cells, and more often in the PIC loop (for debugging). - .. _running-cpp-parameters-parser: Math parser and user-defined constants @@ -982,10 +960,6 @@ Particle initialization boosted frame, whether or not to plot back-transformed diagnostics for this species. -* ``warpx.serialize_initial_conditions`` (`0` or `1`) optional (default `0`) - Serialize the initial conditions for reproducible testing. - Mainly whether or not to use OpenMP threading for particle initialization. - * ``.do_field_ionization`` (`0` or `1`) optional (default `0`) Do field ionization for this species (using the ADK theory). @@ -2850,3 +2824,41 @@ This is essentially the python slicing syntax except that the stop is inclusive Note that if a given period is zero or negative, the corresponding slice is disregarded. For example, ``something_intervals = -1`` deactivates ``something`` and ``something_intervals = ::-1,100:1000:25`` is equivalent to ``something_intervals = 100:1000:25``. + + +.. _running-cpp-parameters-test-debug: + +Testing and Debugging +--------------------- + +When developing, testing and :ref:`debugging WarpX `, the following options can be considered. + +* ``warpx.verbose`` (``0`` or ``1``; default is ``1`` for true) + Controls how much information is printed to the terminal, when running WarpX. + +* ``warpx.always_warn_immediately`` (``0`` or ``1``; default is ``0`` for false) + If set to ``1``, WarpX immediately prints every warning message as soon as + it is generated. It is mainly intended for debug purposes, in case a simulation + crashes before a global warning report can be printed. + +* ``warpx.abort_on_warning_threshold`` (string: ``low``, ``medium`` or ``high``) optional + Optional threshold to abort as soon as a warning is raised. + If the threshold is set, warning messages with priority greater than or + equal to the threshold trigger an immediate abort. + It is mainly intended for debug purposes, and is best used with + ``warpx.always_warn_immediately=1``. + +* ``amrex.abort_on_unused_inputs`` (``0`` or ``1``; default is ``0`` for false) + When set to ``1``, this option causes simulation to fail *after* its completion if there were unused parameters. + It is mainly intended for continuous integration and automated testing to check that all tests and inputs are adapted to API changes. + +* ``warpx.serialize_initial_conditions`` (`0` or `1`) optional (default `0`) + Serialize the initial conditions for reproducible testing, e.g, in our continuous integration tests. + Mainly whether or not to use OpenMP threading for particle initialization. + +* ``warpx.safe_guard_cells`` (`0` or `1`) optional (default `0`) + Run in safe mode, exchanging more guard cells, and more often in the PIC loop (for debugging). + +* ``ablastr.fillboundary_always_sync`` (`0` or `1`) optional (default `0`) + Run all ``FillBoundary`` operations on ``MultiFab`` to force-synchronize shared nodal points. + This slightly increases communication cost and can help to spot missing ``nodal_sync`` flags in these operations. diff --git a/Docs/source/usage/workflows/debugging.rst b/Docs/source/usage/workflows/debugging.rst index 2b85523b852..9a7d7efacd0 100644 --- a/Docs/source/usage/workflows/debugging.rst +++ b/Docs/source/usage/workflows/debugging.rst @@ -26,6 +26,7 @@ Try the following steps to debug a simulation: #. Try to make the reproducible scenario as small as possible by modifying the inputs file. Reduce number of cells, particles and MPI processes to something as small and as quick to execute as possible. The next steps in debugging will increase runtime, so you will benefit from a fast reproducer. +#. Consider adding :ref:`runtime debug options ` that can narrow down typical causes in numerical implementations. #. In case of a crash, Backtraces can be more detailed if you :ref:`re-compile ` with debug flags: for example, try compiling with ``-DCMAKE_BUILD_TYPE=RelWithDebInfo`` (some slowdown) or even ``-DCMAKE_BUILD_TYPE=Debug`` (this will make the simulation way slower) and rerun. #. If debug builds are too costly, try instead compiling with ``-DAMReX_ASSERTIONS=ON`` to activate more checks and rerun. #. If the problem looks like a memory violation, this could be from an invalid field or particle index access. diff --git a/Regression/Checksum/benchmarks_json/pml_psatd_dive_divb_cleaning.json b/Regression/Checksum/benchmarks_json/pml_psatd_dive_divb_cleaning.json index 12f56218081..f0aaaddce49 100644 --- a/Regression/Checksum/benchmarks_json/pml_psatd_dive_divb_cleaning.json +++ b/Regression/Checksum/benchmarks_json/pml_psatd_dive_divb_cleaning.json @@ -1,11 +1,11 @@ { "lev=0": { - "Bx": 1.482006352778953e-07, - "By": 1.48205157883426e-07, - "Bz": 1.4954704195856524e-07, - "Ex": 11.789793626679334, - "Ey": 11.78688532983594, - "Ez": 11.770112090435557, + "Bx": 1.4820063339458393e-07, + "By": 1.4820515850615877e-07, + "Bz": 1.495470407235984e-07, + "Ex": 11.789793389856982, + "Ey": 11.786884764477099, + "Ez": 11.770110814468563, "rho": 4.903696256562049e-05 } } \ No newline at end of file diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 8de0b824582..90f937f016a 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -121,7 +121,7 @@ analysisRoutine = Examples/Tests/PML/analysis_pml_psatd.py [pml_psatd_dive_divb_cleaning] buildDir = . inputFile = Examples/Tests/PML/inputs_3d -runtime_params = warpx.do_similar_dm_pml=0 warpx.abort_on_warning_threshold=medium +runtime_params = warpx.do_similar_dm_pml=0 warpx.abort_on_warning_threshold=medium ablastr.fillboundary_always_sync=1 dim = 3 addToCompileString = USE_PSATD=TRUE cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index cee53031c07..396b0f24b72 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -473,7 +473,7 @@ void WarpX::UpdateCurrentNodalToStag (amrex::MultiFab& dst, amrex::MultiFab cons } void -WarpX::FillBoundaryB (IntVect ng, const bool nodal_sync) +WarpX::FillBoundaryB (IntVect ng, std::optional nodal_sync) { for (int lev = 0; lev <= finest_level; ++lev) { @@ -482,7 +482,7 @@ WarpX::FillBoundaryB (IntVect ng, const bool nodal_sync) } void -WarpX::FillBoundaryE (IntVect ng, const bool nodal_sync) +WarpX::FillBoundaryE (IntVect ng, std::optional nodal_sync) { for (int lev = 0; lev <= finest_level; ++lev) { @@ -491,7 +491,7 @@ WarpX::FillBoundaryE (IntVect ng, const bool nodal_sync) } void -WarpX::FillBoundaryF (IntVect ng, const bool nodal_sync) +WarpX::FillBoundaryF (IntVect ng, std::optional nodal_sync) { for (int lev = 0; lev <= finest_level; ++lev) { @@ -500,7 +500,7 @@ WarpX::FillBoundaryF (IntVect ng, const bool nodal_sync) } void -WarpX::FillBoundaryG (IntVect ng, const bool nodal_sync) +WarpX::FillBoundaryG (IntVect ng, std::optional nodal_sync) { for (int lev = 0; lev <= finest_level; ++lev) { @@ -528,14 +528,14 @@ WarpX::FillBoundaryE_avg (IntVect ng) void -WarpX::FillBoundaryE (int lev, IntVect ng, const bool nodal_sync) +WarpX::FillBoundaryE (int lev, IntVect ng, std::optional nodal_sync) { FillBoundaryE(lev, PatchType::fine, ng, nodal_sync); if (lev > 0) FillBoundaryE(lev, PatchType::coarse, ng, nodal_sync); } void -WarpX::FillBoundaryE (const int lev, const PatchType patch_type, const amrex::IntVect ng, const bool nodal_sync) +WarpX::FillBoundaryE (const int lev, const PatchType patch_type, const amrex::IntVect ng, std::optional nodal_sync) { std::array mf; amrex::Periodicity period; @@ -585,14 +585,14 @@ WarpX::FillBoundaryE (const int lev, const PatchType patch_type, const amrex::In } void -WarpX::FillBoundaryB (int lev, IntVect ng, const bool nodal_sync) +WarpX::FillBoundaryB (int lev, IntVect ng, std::optional nodal_sync) { FillBoundaryB(lev, PatchType::fine, ng, nodal_sync); if (lev > 0) FillBoundaryB(lev, PatchType::coarse, ng, nodal_sync); } void -WarpX::FillBoundaryB (const int lev, const PatchType patch_type, const amrex::IntVect ng, const bool nodal_sync) +WarpX::FillBoundaryB (const int lev, const PatchType patch_type, const amrex::IntVect ng, std::optional nodal_sync) { std::array mf; amrex::Periodicity period; @@ -747,14 +747,14 @@ WarpX::FillBoundaryB_avg (int lev, PatchType patch_type, IntVect ng) } void -WarpX::FillBoundaryF (int lev, IntVect ng, const bool nodal_sync) +WarpX::FillBoundaryF (int lev, IntVect ng, std::optional nodal_sync) { FillBoundaryF(lev, PatchType::fine, ng, nodal_sync); if (lev > 0) FillBoundaryF(lev, PatchType::coarse, ng, nodal_sync); } void -WarpX::FillBoundaryF (int lev, PatchType patch_type, IntVect ng, const bool nodal_sync) +WarpX::FillBoundaryF (int lev, PatchType patch_type, IntVect ng, std::optional nodal_sync) { if (patch_type == PatchType::fine) { @@ -788,7 +788,7 @@ WarpX::FillBoundaryF (int lev, PatchType patch_type, IntVect ng, const bool noda } } -void WarpX::FillBoundaryG (int lev, IntVect ng, const bool nodal_sync) +void WarpX::FillBoundaryG (int lev, IntVect ng, std::optional nodal_sync) { FillBoundaryG(lev, PatchType::fine, ng, nodal_sync); @@ -798,7 +798,7 @@ void WarpX::FillBoundaryG (int lev, IntVect ng, const bool nodal_sync) } } -void WarpX::FillBoundaryG (int lev, PatchType patch_type, IntVect ng, const bool nodal_sync) +void WarpX::FillBoundaryG (int lev, PatchType patch_type, IntVect ng, std::optional nodal_sync) { if (patch_type == PatchType::fine) { diff --git a/Source/WarpX.H b/Source/WarpX.H index 4431be57e02..0f2a986e7e1 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -636,21 +636,21 @@ public: void UpdateCurrentNodalToStag (amrex::MultiFab& dst, amrex::MultiFab const& src); // Fill boundary cells including coarse/fine boundaries - void FillBoundaryB (amrex::IntVect ng, const bool nodal_sync = false); - void FillBoundaryE (amrex::IntVect ng, const bool nodal_sync = false); + void FillBoundaryB (amrex::IntVect ng, std::optional nodal_sync = std::nullopt); + void FillBoundaryE (amrex::IntVect ng, std::optional nodal_sync = std::nullopt); void FillBoundaryB_avg (amrex::IntVect ng); void FillBoundaryE_avg (amrex::IntVect ng); - void FillBoundaryF (amrex::IntVect ng, const bool nodal_sync = false); - void FillBoundaryG (amrex::IntVect ng, const bool nodal_sync = false); + void FillBoundaryF (amrex::IntVect ng, std::optional nodal_sync = std::nullopt); + void FillBoundaryG (amrex::IntVect ng, std::optional nodal_sync = std::nullopt); void FillBoundaryAux (amrex::IntVect ng); - void FillBoundaryE (int lev, amrex::IntVect ng, const bool nodal_sync = false); - void FillBoundaryB (int lev, amrex::IntVect ng, const bool nodal_sync = false); + void FillBoundaryE (int lev, amrex::IntVect ng, std::optional nodal_sync = std::nullopt); + void FillBoundaryB (int lev, amrex::IntVect ng, std::optional nodal_sync = std::nullopt); void FillBoundaryE_avg (int lev, amrex::IntVect ng); void FillBoundaryB_avg (int lev, amrex::IntVect ng); - void FillBoundaryF (int lev, amrex::IntVect ng, const bool nodal_sync = false); - void FillBoundaryG (int lev, amrex::IntVect ng, const bool nodal_sync = false); + void FillBoundaryF (int lev, amrex::IntVect ng, std::optional nodal_sync = std::nullopt); + void FillBoundaryG (int lev, amrex::IntVect ng, std::optional nodal_sync = std::nullopt); void FillBoundaryAux (int lev, amrex::IntVect ng); /** @@ -957,10 +957,10 @@ private: /// void EvolveEM(int numsteps); - void FillBoundaryB (const int lev, const PatchType patch_type, const amrex::IntVect ng, const bool nodal_sync = false); - void FillBoundaryE (const int lev, const PatchType patch_type, const amrex::IntVect ng, const bool nodal_sync = false); - void FillBoundaryF (int lev, PatchType patch_type, amrex::IntVect ng, const bool nodal_sync = false); - void FillBoundaryG (int lev, PatchType patch_type, amrex::IntVect ng, const bool nodal_sync = false); + void FillBoundaryB (const int lev, const PatchType patch_type, const amrex::IntVect ng, std::optional nodal_sync = std::nullopt); + void FillBoundaryE (const int lev, const PatchType patch_type, const amrex::IntVect ng, std::optional nodal_sync = std::nullopt); + void FillBoundaryF (int lev, PatchType patch_type, amrex::IntVect ng, std::optional nodal_sync = std::nullopt); + void FillBoundaryG (int lev, PatchType patch_type, amrex::IntVect ng, std::optional nodal_sync = std::nullopt); void FillBoundaryB_avg (int lev, PatchType patch_type, amrex::IntVect ng); void FillBoundaryE_avg (int lev, PatchType patch_type, amrex::IntVect ng); diff --git a/Source/ablastr/utils/Communication.H b/Source/ablastr/utils/Communication.H index 9f58096b902..2c879460490 100644 --- a/Source/ablastr/utils/Communication.H +++ b/Source/ablastr/utils/Communication.H @@ -18,6 +18,9 @@ #include "WarpX.H" +#include + + namespace ablastr::utils::communication { @@ -66,7 +69,7 @@ void FillBoundary (amrex::MultiFab &mf, amrex::IntVect ng, bool do_single_precision_comms, const amrex::Periodicity &period = amrex::Periodicity::NonPeriodic(), - const bool nodal_sync = false); + std::optional nodal_sync = std::nullopt); void FillBoundary (amrex::iMultiFab &mf, const amrex::Periodicity &period = amrex::Periodicity::NonPeriodic()); diff --git a/Source/ablastr/utils/Communication.cpp b/Source/ablastr/utils/Communication.cpp index c7bf00c8e48..71cd8380226 100644 --- a/Source/ablastr/utils/Communication.cpp +++ b/Source/ablastr/utils/Communication.cpp @@ -13,6 +13,8 @@ #include #include #include +#include + namespace ablastr::utils::communication { @@ -60,27 +62,24 @@ void ParallelAdd(amrex::MultiFab &dst, const amrex::MultiFab &src, int src_comp, do_single_precision_comms, period, amrex::FabArrayBase::ADD); } -void FillBoundary (amrex::MultiFab &mf, bool do_single_precision_comms, const amrex::Periodicity &period) +void FillBoundary (amrex::MultiFab &mf, + amrex::IntVect ng, + bool do_single_precision_comms, + const amrex::Periodicity &period, + std::optional nodal_sync) { BL_PROFILE("ablastr::utils::communication::FillBoundary"); - if (do_single_precision_comms) - { - mf.FillBoundary(period); - } - else - { - mf.FillBoundary(period); - } -} + // allow developers to always enforce nodal sync, independent of the + // nodal_sync argument + bool do_nodal_sync_arg = nodal_sync.value_or(false); -void FillBoundary(amrex::MultiFab &mf, - amrex::IntVect ng, - bool do_single_precision_comms, - const amrex::Periodicity &period, - const bool nodal_sync) -{ - BL_PROFILE("ablastr::utils::communication::FillBoundary"); + amrex::ParmParse pp_ablastr("ablastr"); + bool do_nodal_sync_input = false; + pp_ablastr.query("fillboundary_always_sync", do_nodal_sync_input); + + // logic: inputs overwrite argument unless argument is true + bool const do_nodal_sync = do_nodal_sync_arg || do_nodal_sync_input; if (do_single_precision_comms) { @@ -91,7 +90,7 @@ void FillBoundary(amrex::MultiFab &mf, mixedCopy(mf_tmp, mf, 0, 0, mf.nComp(), mf.nGrowVect()); - if (nodal_sync) { + if (do_nodal_sync) { mf_tmp.FillBoundaryAndSync(0, mf.nComp(), ng, period); } else { mf_tmp.FillBoundary(ng, period); @@ -101,8 +100,7 @@ void FillBoundary(amrex::MultiFab &mf, } else { - - if (nodal_sync) { + if (do_nodal_sync) { mf.FillBoundaryAndSync(0, mf.nComp(), ng, period); } else { mf.FillBoundary(ng, period); @@ -110,9 +108,24 @@ void FillBoundary(amrex::MultiFab &mf, } } -void FillBoundary(amrex::iMultiFab &imf, const amrex::Periodicity &period) +void FillBoundary (amrex::MultiFab &mf, bool do_single_precision_comms, const amrex::Periodicity &period) { - BL_PROFILE("ablastr::utils::communication::FillBoundary"); + amrex::IntVect const ng = mf.n_grow; + FillBoundary(mf, ng, do_single_precision_comms, period); +} + +void +FillBoundary (amrex::Vector const &mf, bool do_single_precision_comms, + const amrex::Periodicity &period) +{ + for (auto x : mf) { + ablastr::utils::communication::FillBoundary(*x, do_single_precision_comms, period); + } +} + +void FillBoundary (amrex::iMultiFab &imf, const amrex::Periodicity &period) +{ + BL_PROFILE("ablastr::utils::communication::FillBoundary::iMultiFab"); imf.FillBoundary(period); } @@ -121,20 +134,11 @@ void FillBoundary (amrex::iMultiFab& imf, amrex::IntVect ng, const amrex::Periodicity& period) { - BL_PROFILE("ablastr::utils::communication::FillBoundary"); + BL_PROFILE("ablastr::utils::communication::FillBoundary::iMultiFab"); imf.FillBoundary(ng, period); } -void -FillBoundary(amrex::Vector const &mf, bool do_single_precision_comms, - const amrex::Periodicity &period) -{ - for (auto x : mf) { - ablastr::utils::communication::FillBoundary(*x, do_single_precision_comms, period); - } -} - void SumBoundary (amrex::MultiFab &mf, bool do_single_precision_comms, const amrex::Periodicity &period) { BL_PROFILE("ablastr::utils::communication::SumBoundary"); From affb02a5eca4221b4eb631e81d14afd324b2f447 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 28 Oct 2022 20:03:36 +0200 Subject: [PATCH 0126/1346] Fix Invalid Mem w/ Moving Window, LB & PMLs (#3450) * workaround to fix invalid memory access while using PML + Moving Window + Load Balancing + Timers * fixed bug * add argument to shiftMF to avoid updating cost for PML --- Source/Utils/WarpXMovingWindow.cpp | 64 ++++++++++++++++++------------ Source/WarpX.H | 5 ++- 2 files changed, 41 insertions(+), 28 deletions(-) diff --git a/Source/Utils/WarpXMovingWindow.cpp b/Source/Utils/WarpXMovingWindow.cpp index 30d02211ff8..887d4550470 100644 --- a/Source/Utils/WarpXMovingWindow.cpp +++ b/Source/Utils/WarpXMovingWindow.cpp @@ -150,6 +150,9 @@ WarpX::MoveWindow (const int step, bool move_j) int num_shift = num_shift_base; int num_shift_crse = num_shift; + constexpr auto do_update_cost = true; + constexpr auto dont_update_cost = false; //We can't update cost for PML + // Shift the mesh fields for (int lev = 0; lev <= finest_level; ++lev) { @@ -177,47 +180,55 @@ WarpX::MoveWindow (const int step, bool move_j) if (dim == 1) Efield_parser = Eyfield_parser->compile<3>(); if (dim == 2) Efield_parser = Ezfield_parser->compile<3>(); } - shiftMF(*Bfield_fp[lev][dim], geom[lev], num_shift, dir, lev, B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*Efield_fp[lev][dim], geom[lev], num_shift, dir, lev, E_external_grid[dim], use_Eparser, Efield_parser); + shiftMF(*Bfield_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, + B_external_grid[dim], use_Bparser, Bfield_parser); + shiftMF(*Efield_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, + E_external_grid[dim], use_Eparser, Efield_parser); if (fft_do_time_averaging) { - shiftMF(*Bfield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*Efield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, E_external_grid[dim], use_Eparser, Efield_parser); + shiftMF(*Bfield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, + B_external_grid[dim], use_Bparser, Bfield_parser); + shiftMF(*Efield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, + E_external_grid[dim], use_Eparser, Efield_parser); } if (move_j) { - shiftMF(*current_fp[lev][dim], geom[lev], num_shift, dir, lev); + shiftMF(*current_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost); } if (pml[lev] && pml[lev]->ok()) { const std::array& pml_B = pml[lev]->GetB_fp(); const std::array& pml_E = pml[lev]->GetE_fp(); - shiftMF(*pml_B[dim], geom[lev], num_shift, dir, lev); - shiftMF(*pml_E[dim], geom[lev], num_shift, dir, lev); + shiftMF(*pml_B[dim], geom[lev], num_shift, dir, lev, dont_update_cost); + shiftMF(*pml_E[dim], geom[lev], num_shift, dir, lev, dont_update_cost); } #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_PSATD) if (pml_rz[lev] && dim < 2) { const std::array& pml_rz_B = pml_rz[lev]->GetB_fp(); const std::array& pml_rz_E = pml_rz[lev]->GetE_fp(); - shiftMF(*pml_rz_B[dim], geom[lev], num_shift, dir, lev); - shiftMF(*pml_rz_E[dim], geom[lev], num_shift, dir, lev); + shiftMF(*pml_rz_B[dim], geom[lev], num_shift, dir, lev, dont_update_cost); + shiftMF(*pml_rz_E[dim], geom[lev], num_shift, dir, lev, dont_update_cost); } #endif if (lev > 0) { // coarse grid - shiftMF(*Bfield_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*Efield_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, E_external_grid[dim], use_Eparser, Efield_parser); - shiftMF(*Bfield_aux[lev][dim], geom[lev], num_shift, dir, lev); - shiftMF(*Efield_aux[lev][dim], geom[lev], num_shift, dir, lev); + shiftMF(*Bfield_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, + B_external_grid[dim], use_Bparser, Bfield_parser); + shiftMF(*Efield_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, + E_external_grid[dim], use_Eparser, Efield_parser); + shiftMF(*Bfield_aux[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*Efield_aux[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost); if (fft_do_time_averaging) { - shiftMF(*Bfield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*Efield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, E_external_grid[dim], use_Eparser, Efield_parser); + shiftMF(*Bfield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, + B_external_grid[dim], use_Bparser, Bfield_parser); + shiftMF(*Efield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, + E_external_grid[dim], use_Eparser, Efield_parser); } if (move_j) { - shiftMF(*current_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev); + shiftMF(*current_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost); } if (do_pml && pml[lev]->ok()) { const std::array& pml_B = pml[lev]->GetB_cp(); const std::array& pml_E = pml[lev]->GetE_cp(); - shiftMF(*pml_B[dim], geom[lev-1], num_shift_crse, dir, lev); - shiftMF(*pml_E[dim], geom[lev-1], num_shift_crse, dir, lev); + shiftMF(*pml_B[dim], geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); + shiftMF(*pml_E[dim], geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); } } } @@ -225,17 +236,17 @@ WarpX::MoveWindow (const int step, bool move_j) // Shift scalar component F for dive cleaning if (do_dive_cleaning) { // Fine grid - shiftMF(*F_fp[lev], geom[lev], num_shift, dir, lev); + shiftMF(*F_fp[lev], geom[lev], num_shift, dir, lev, do_update_cost); if (do_pml && pml[lev]->ok()) { amrex::MultiFab* pml_F = pml[lev]->GetF_fp(); - shiftMF(*pml_F, geom[lev], num_shift, dir, lev); + shiftMF(*pml_F, geom[lev], num_shift, dir, lev, dont_update_cost); } if (lev > 0) { // Coarse grid - shiftMF(*F_cp[lev], geom[lev-1], num_shift_crse, dir, lev); + shiftMF(*F_cp[lev], geom[lev-1], num_shift_crse, dir, lev, do_update_cost); if (do_pml && pml[lev]->ok()) { amrex::MultiFab* pml_F = pml[lev]->GetF_cp(); - shiftMF(*pml_F, geom[lev-1], num_shift_crse, dir, lev); + shiftMF(*pml_F, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); } } } @@ -244,10 +255,10 @@ WarpX::MoveWindow (const int step, bool move_j) if (move_j) { if (rho_fp[lev]){ // Fine grid - shiftMF(*rho_fp[lev], geom[lev], num_shift, dir, lev); + shiftMF(*rho_fp[lev], geom[lev], num_shift, dir, lev, do_update_cost); if (lev > 0){ // Coarse grid - shiftMF(*rho_cp[lev], geom[lev-1], num_shift_crse, dir, lev); + shiftMF(*rho_cp[lev], geom[lev-1], num_shift_crse, dir, lev, do_update_cost); } } } @@ -296,7 +307,7 @@ WarpX::MoveWindow (const int step, bool move_j) void WarpX::shiftMF (amrex::MultiFab& mf, const amrex::Geometry& geom, - int num_shift, int dir, const int lev, + int num_shift, int dir, const int lev, bool update_cost_flag, amrex::Real external_field, bool useparser, amrex::ParserExecutor<3> const& field_parser) { @@ -428,7 +439,8 @@ WarpX::shiftMF (amrex::MultiFab& mf, const amrex::Geometry& geom, dstfab(i,j,k,n) = srcfab(i+shift.x,j+shift.y,k+shift.z,n); }) - if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) + if (cost && update_cost_flag && + WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) { amrex::Gpu::synchronize(); wt = amrex::second() - wt; diff --git a/Source/WarpX.H b/Source/WarpX.H index 0f2a986e7e1..402b4ee660d 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -100,8 +100,9 @@ public: ParticleBoundaryBuffer& GetParticleBoundaryBuffer () { return *m_particle_boundary_buffer; } static void shiftMF (amrex::MultiFab& mf, const amrex::Geometry& geom, - int num_shift, int dir, const int lev, amrex::Real external_field=0.0, - bool useparser = false, amrex::ParserExecutor<3> const& field_parser={}); + int num_shift, int dir, const int lev, bool update_cost_flag, + amrex::Real external_field=0.0, bool useparser = false, + amrex::ParserExecutor<3> const& field_parser={}); static void GotoNextLine (std::istream& is); From 77581c7baa0e1a0a241414b38e41e4e41ccf1e4e Mon Sep 17 00:00:00 2001 From: David Grote Date: Fri, 28 Oct 2022 12:28:08 -0700 Subject: [PATCH 0127/1346] For background stopping, added error checks of the user input (#3163) * Added error checks of the user input * Fixed literals and const's --- .../BackgroundStopping/BackgroundStopping.cpp | 63 ++++++++++++------- 1 file changed, 39 insertions(+), 24 deletions(-) diff --git a/Source/Particles/Collision/BackgroundStopping/BackgroundStopping.cpp b/Source/Particles/Collision/BackgroundStopping/BackgroundStopping.cpp index 2f438c8d650..fa9e7394981 100644 --- a/Source/Particles/Collision/BackgroundStopping/BackgroundStopping.cpp +++ b/Source/Particles/Collision/BackgroundStopping/BackgroundStopping.cpp @@ -19,6 +19,8 @@ BackgroundStopping::BackgroundStopping (std::string const collision_name) : CollisionBase(collision_name) { + using namespace amrex::literals; + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(m_species_names.size() == 1, "Background stopping must have exactly one species."); @@ -37,7 +39,7 @@ BackgroundStopping::BackgroundStopping (std::string const collision_name) amrex::ParticleReal background_density; std::string background_density_str; if (utils::parser::queryWithParser(pp_collision_name, "background_density", background_density)) { - AMREX_ALWAYS_ASSERT_WITH_MESSAGE(background_density > 0, + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(background_density > 0_prt, "For background stopping, the background density must be greater than 0"); m_background_density_parser = utils::parser::makeParser(std::to_string(background_density), {"x", "y", "z", "t"}); @@ -52,7 +54,7 @@ BackgroundStopping::BackgroundStopping (std::string const collision_name) amrex::ParticleReal background_temperature; std::string background_temperature_str; if (utils::parser::queryWithParser(pp_collision_name, "background_temperature", background_temperature)) { - AMREX_ALWAYS_ASSERT_WITH_MESSAGE(background_temperature > 0, + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(background_temperature > 0_prt, "For background stopping, the background temperature must be greater than 0"); m_background_temperature_parser = utils::parser::makeParser(std::to_string(background_temperature), {"x", "y", "z", "t"}); @@ -78,7 +80,7 @@ BackgroundStopping::BackgroundStopping (std::string const collision_name) utils::parser::getWithParser( pp_collision_name, "background_charge_state", m_background_charge_state); } - AMREX_ALWAYS_ASSERT_WITH_MESSAGE(m_background_mass > 0, + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(m_background_mass > 0_prt, "For background stopping, the background mass must be greater than 0"); } @@ -90,8 +92,11 @@ BackgroundStopping::doCollisions (amrex::Real cur_time, amrex::Real dt, MultiPar using namespace amrex::literals; auto& species = mypc->GetParticleContainerFromName(m_species_names[0]); - amrex::ParticleReal species_mass = species.getMass(); - amrex::ParticleReal species_charge = species.getCharge(); + amrex::ParticleReal const species_mass = species.getMass(); + amrex::ParticleReal const species_charge = species.getCharge(); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(species_mass > 0_prt, "Error: With background stopping, the species mass must be > 0"); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(species_charge != 0_prt, "Error: With background stopping, the species charge must be nonzero"); BackgroundStoppingType background_type = m_background_type; @@ -138,14 +143,14 @@ void BackgroundStopping::doBackgroundStoppingOnElectronsWithinTile (WarpXParIter using std::sqrt, std::abs, std::log, std::exp; // get particle count - const long np = pti.numParticles(); + long const np = pti.numParticles(); // get background particle mass - amrex::ParticleReal mass_e = m_background_mass; + amrex::ParticleReal const mass_e = m_background_mass; // setup parsers for the background density and temperature - auto n_e_func = m_background_density_func; - auto T_e_func = m_background_temperature_func; + auto const n_e_func = m_background_density_func; + auto const T_e_func = m_background_temperature_func; // get Struct-Of-Array particle data, also called attribs auto& attribs = pti.GetAttribs(); @@ -154,7 +159,7 @@ void BackgroundStopping::doBackgroundStoppingOnElectronsWithinTile (WarpXParIter amrex::ParticleReal* const AMREX_RESTRICT uz = attribs[PIdx::uz].dataPtr(); // May be needed to evaluate the density and/or temperature functions - auto GetPosition = GetParticlePosition(pti); + auto const GetPosition = GetParticlePosition(pti); amrex::ParallelFor(np, [=] AMREX_GPU_HOST_DEVICE (long ip) @@ -165,6 +170,9 @@ void BackgroundStopping::doBackgroundStoppingOnElectronsWithinTile (WarpXParIter amrex::ParticleReal const n_e = n_e_func(x, y, z, t); amrex::ParticleReal const T_e = T_e_func(x, y, z, t)*PhysConst::kb; + AMREX_ASSERT(n_e > 0_prt); + AMREX_ASSERT(T_e > 0_prt); + // This implements the equation 14.12 from Introduction to Plasma Physics, // Goldston and Rutherford, the slowing down of beam ions due to collisions with electrons. // The equation is written as dV/dt = -alpha*V, and integrated to @@ -178,17 +186,19 @@ void BackgroundStopping::doBackgroundStoppingOnElectronsWithinTile (WarpXParIter amrex::ParticleReal const Zb = abs(species_charge/q_e); - amrex::ParticleReal const vth = sqrt(3._prt*T_e/mass_e); + amrex::ParticleReal const vth = sqrt(3_prt*T_e/mass_e); amrex::ParticleReal const wp = sqrt(n_e*q_e2/(ep0*mass_e)); amrex::ParticleReal const lambdadb = vth/wp; amrex::ParticleReal const lambdadb3 = lambdadb*lambdadb*lambdadb; - amrex::ParticleReal const loglambda = log((12._prt*pi/Zb)*(n_e*lambdadb3)); + amrex::ParticleReal const loglambda = log((12_prt*pi/Zb)*(n_e*lambdadb3)); + + AMREX_ASSERT(loglambda > 0_prt); amrex::ParticleReal const pi32 = pi*sqrt(pi); amrex::ParticleReal const q2 = species_charge*species_charge; amrex::ParticleReal const T32 = T_e*sqrt(T_e); - amrex::ParticleReal const alpha = sqrt(2._prt)*n_e*q2*q_e2*sqrt(mass_e)*loglambda/(12._prt*pi32*ep02*species_mass*T32); + amrex::ParticleReal const alpha = sqrt(2_prt)*n_e*q2*q_e2*sqrt(mass_e)*loglambda/(12_prt*pi32*ep02*species_mass*T32); ux[ip] *= exp(-alpha*dt); uy[ip] *= exp(-alpha*dt); @@ -207,15 +217,15 @@ void BackgroundStopping::doBackgroundStoppingOnIonsWithinTile (WarpXParIter& pti using std::sqrt, std::abs, std::log, std::exp, std::pow; // get particle count - const long np = pti.numParticles(); + long const np = pti.numParticles(); // get background particle mass - amrex::ParticleReal mass_i = m_background_mass; - amrex::ParticleReal charge_state_i = m_background_charge_state; + amrex::ParticleReal const mass_i = m_background_mass; + amrex::ParticleReal const charge_state_i = m_background_charge_state; // setup parsers for the background density and temperature - auto n_i_func = m_background_density_func; - auto T_i_func = m_background_temperature_func; + auto const n_i_func = m_background_density_func; + auto const T_i_func = m_background_temperature_func; // get Struct-Of-Array particle data, also called attribs auto& attribs = pti.GetAttribs(); @@ -224,7 +234,7 @@ void BackgroundStopping::doBackgroundStoppingOnIonsWithinTile (WarpXParIter& pti amrex::ParticleReal* const AMREX_RESTRICT uz = attribs[PIdx::uz].dataPtr(); // May be needed to evaluate the density function - auto GetPosition = GetParticlePosition(pti); + auto const GetPosition = GetParticlePosition(pti); amrex::ParallelFor(np, [=] AMREX_GPU_HOST_DEVICE (long ip) @@ -235,6 +245,9 @@ void BackgroundStopping::doBackgroundStoppingOnIonsWithinTile (WarpXParIter& pti amrex::ParticleReal const n_i = n_i_func(x, y, z, t); amrex::ParticleReal const T_i = T_i_func(x, y, z, t)*PhysConst::kb; + AMREX_ASSERT(n_i > 0_prt); + AMREX_ASSERT(T_i > 0_prt); + // This implements the equation 14.20 from Introduction to Plasma Physics, // Goldston and Rutherford, the slowing down of beam ions due to collisions with electrons. // The equation is written with energy, W, as dW/dt = -alpha/W**0.5, and integrated to @@ -250,19 +263,21 @@ void BackgroundStopping::doBackgroundStoppingOnIonsWithinTile (WarpXParIter& pti amrex::ParticleReal const qb2 = species_charge*species_charge; amrex::ParticleReal const Zb = abs(species_charge/q_e); - amrex::ParticleReal const vth = sqrt(3._prt*T_i/mass_i); + amrex::ParticleReal const vth = sqrt(3_prt*T_i/mass_i); amrex::ParticleReal const wp = sqrt(n_i*q_e2/(ep0*mass_i)); amrex::ParticleReal const lambdadb = vth/wp; amrex::ParticleReal const lambdadb3 = lambdadb*lambdadb*lambdadb; - amrex::ParticleReal const loglambda = log((12._prt*pi/Zb)*(n_i*lambdadb3)); + amrex::ParticleReal const loglambda = log((12_prt*pi/Zb)*(n_i*lambdadb3)); + + AMREX_ASSERT(loglambda > 0_prt); - amrex::ParticleReal const alpha = sqrt(2._prt)*n_i*qi2*qb2*sqrt(species_mass)*loglambda/(8._prt*pi*ep02*mass_i); + amrex::ParticleReal const alpha = sqrt(2_prt)*n_i*qi2*qb2*sqrt(species_mass)*loglambda/(8_prt*pi*ep02*mass_i); amrex::ParticleReal const W0 = 0.5_prt*species_mass*(ux[ip]*ux[ip] + uy[ip]*uy[ip] + uz[ip]*uz[ip]); amrex::ParticleReal const f1 = pow(W0, 1.5_prt) - 1.5_prt*alpha*dt; // If f1 goes negative, the particle has fully stopped, so set W1 to 0. - amrex::ParticleReal const W1 = pow((f1 > 0._prt ? f1 : 0._prt), 2._prt/3._prt); - amrex::ParticleReal const vscale = (W0 > 0._prt ? std::sqrt(W1/W0) : 0._prt); + amrex::ParticleReal const W1 = pow((f1 > 0_prt ? f1 : 0_prt), 2_prt/3_prt); + amrex::ParticleReal const vscale = (W0 > 0_prt ? std::sqrt(W1/W0) : 0_prt); ux[ip] *= vscale; uy[ip] *= vscale; From 431ef8fc2f652a961adbc9a94545363c191630b7 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Fri, 28 Oct 2022 19:18:33 -0700 Subject: [PATCH 0128/1346] Modify naming of species in CI fusion test (#3490) * Generalize species names in fusion tests * Update benchmarks * Correct typo --- .../analysis_deuterium_tritium_fusion.py | 10 +- .../inputs_deuterium_tritium_3d | 124 +++++++++--------- .../inputs_deuterium_tritium_rz | 124 +++++++++--------- .../Deuterium_Tritium_Fusion_3D.json | 16 +-- .../Deuterium_Tritium_Fusion_RZ.json | 16 +-- 5 files changed, 145 insertions(+), 145 deletions(-) diff --git a/Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py b/Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py index b5f5da683b2..a77613e6374 100755 --- a/Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py +++ b/Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py @@ -50,12 +50,12 @@ ## Define reactants and products reactant_species = ['deuterium', 'tritium'] -product_species = ['helium', 'neutron'] +product_species = ['helium4', 'neutron'] mass = { 'deuterium': 2.01410177812*scc.m_u, 'tritium': 3.0160492779*scc.m_u, - 'helium': 4.00260325413*scc.m_u, + 'helium4': 4.00260325413*scc.m_u, 'neutron': 1.0013784193052508*scc.m_p } m_reduced = np.product([mass[s] for s in reactant_species])/np.sum([mass[s] for s in reactant_species]) @@ -390,11 +390,11 @@ def main(): data = {} for species_name in reactant_species: - add_species_to_dict(ad_start, data, species_name+str(i), species_name, "start") - add_species_to_dict(ad_end, data, species_name+str(i), species_name, "end") + add_species_to_dict(ad_start, data, species_name+'_'+str(i), species_name, "start") + add_species_to_dict(ad_end, data, species_name+'_'+str(i), species_name, "end") for species_name in product_species: - add_species_to_dict(ad_end, data, species_name+str(i), species_name, "end") + add_species_to_dict(ad_end, data, species_name+'_'+str(i), species_name, "end") # General checks that are performed for all tests generic_check(data) diff --git a/Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_3d b/Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_3d index 575e9c42888..3a8d2002a40 100644 --- a/Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_3d +++ b/Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_3d @@ -29,7 +29,7 @@ algo.particle_shape = 1 ################################# ############ PLASMA ############# ################################# -particles.species_names = deuterium1 tritium1 helium1 neutron1 deuterium2 tritium2 helium2 neutron2 +particles.species_names = deuterium_1 tritium_1 helium4_1 neutron_1 deuterium_2 tritium_2 helium4_2 neutron_2 my_constants.m_deuterium = 2.01410177812*m_u my_constants.m_tritium = 3.0160492779*m_u @@ -37,87 +37,87 @@ my_constants.m_reduced = m_deuterium*m_tritium/(m_deuterium+m_tritium) my_constants.keV_to_J = 1.e3*q_e my_constants.Energy_step = 22. * keV_to_J -deuterium1.species_type = deuterium -deuterium1.injection_style = "NRandomPerCell" -deuterium1.num_particles_per_cell = 10000 -deuterium1.profile = constant -deuterium1.density = 1. -deuterium1.momentum_distribution_type = "parse_momentum_function" -deuterium1.momentum_function_ux(x,y,z) = 0. -deuterium1.momentum_function_uy(x,y,z) = 0. +deuterium_1.species_type = deuterium +deuterium_1.injection_style = "NRandomPerCell" +deuterium_1.num_particles_per_cell = 10000 +deuterium_1.profile = constant +deuterium_1.density = 1. +deuterium_1.momentum_distribution_type = "parse_momentum_function" +deuterium_1.momentum_function_ux(x,y,z) = 0. +deuterium_1.momentum_function_uy(x,y,z) = 0. ## Thanks to the floor, all particles in the same cell have the exact same momentum -deuterium1.momentum_function_uz(x,y,z) = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_deuterium*clight) -deuterium1.do_not_push = 1 -deuterium1.do_not_deposit = 1 - -tritium1.species_type = tritium -tritium1.injection_style = "NRandomPerCell" -tritium1.num_particles_per_cell = 10000 -tritium1.profile = constant -tritium1.density = 1. -tritium1.momentum_distribution_type = "parse_momentum_function" -tritium1.momentum_function_ux(x,y,z) = 0. -tritium1.momentum_function_uy(x,y,z) = 0. +deuterium_1.momentum_function_uz(x,y,z) = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_deuterium*clight) +deuterium_1.do_not_push = 1 +deuterium_1.do_not_deposit = 1 + +tritium_1.species_type = tritium +tritium_1.injection_style = "NRandomPerCell" +tritium_1.num_particles_per_cell = 10000 +tritium_1.profile = constant +tritium_1.density = 1. +tritium_1.momentum_distribution_type = "parse_momentum_function" +tritium_1.momentum_function_ux(x,y,z) = 0. +tritium_1.momentum_function_uy(x,y,z) = 0. ## Thanks to the floor, all particles in the same cell have the exact same momentum -tritium1.momentum_function_uz(x,y,z) = -sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_tritium*clight) -tritium1.do_not_push = 1 -tritium1.do_not_deposit = 1 +tritium_1.momentum_function_uz(x,y,z) = -sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_tritium*clight) +tritium_1.do_not_push = 1 +tritium_1.do_not_deposit = 1 -helium1.species_type = helium4 -helium1.do_not_push = 1 -helium1.do_not_deposit = 1 +helium4_1.species_type = helium4 +helium4_1.do_not_push = 1 +helium4_1.do_not_deposit = 1 -neutron1.species_type = neutron -neutron1.do_not_push = 1 -neutron1.do_not_deposit = 1 +neutron_1.species_type = neutron +neutron_1.do_not_push = 1 +neutron_1.do_not_deposit = 1 my_constants.background_dens = 1.e26 my_constants.beam_dens = 1.e20 -deuterium2.species_type = deuterium -deuterium2.injection_style = "NRandomPerCell" -deuterium2.num_particles_per_cell = 1000 -deuterium2.profile = "parse_density_function" +deuterium_2.species_type = deuterium +deuterium_2.injection_style = "NRandomPerCell" +deuterium_2.num_particles_per_cell = 1000 +deuterium_2.profile = "parse_density_function" ## A tenth of the macroparticles in each cell is made of immobile high-density background deuteriums. ## The other nine tenths are made of fast low-density beam deuteriums. -deuterium2.density_function(x,y,z) = if(y - floor(y) < 0.1, 10.*background_dens, 10./9.*beam_dens) -deuterium2.momentum_distribution_type = "parse_momentum_function" -deuterium2.momentum_function_ux(x,y,z) = 0. -deuterium2.momentum_function_uy(x,y,z) = 0. -deuterium2.momentum_function_uz(x,y,z) = "if(y - floor(y) < 0.1, +deuterium_2.density_function(x,y,z) = if(y - floor(y) < 0.1, 10.*background_dens, 10./9.*beam_dens) +deuterium_2.momentum_distribution_type = "parse_momentum_function" +deuterium_2.momentum_function_ux(x,y,z) = 0. +deuterium_2.momentum_function_uy(x,y,z) = 0. +deuterium_2.momentum_function_uz(x,y,z) = "if(y - floor(y) < 0.1, 0., sqrt(2*m_deuterium*Energy_step*(floor(z)**2))/(m_deuterium*clight))" -deuterium2.do_not_push = 1 -deuterium2.do_not_deposit = 1 - -tritium2.species_type = tritium -tritium2.injection_style = "NRandomPerCell" -tritium2.num_particles_per_cell = 100 -tritium2.profile = constant -tritium2.density = background_dens -tritium2.momentum_distribution_type = "constant" -tritium2.do_not_push = 1 -tritium2.do_not_deposit = 1 - -helium2.species_type = helium4 -helium2.do_not_push = 1 -helium2.do_not_deposit = 1 - -neutron2.species_type = neutron -neutron2.do_not_push = 1 -neutron2.do_not_deposit = 1 +deuterium_2.do_not_push = 1 +deuterium_2.do_not_deposit = 1 + +tritium_2.species_type = tritium +tritium_2.injection_style = "NRandomPerCell" +tritium_2.num_particles_per_cell = 100 +tritium_2.profile = constant +tritium_2.density = background_dens +tritium_2.momentum_distribution_type = "constant" +tritium_2.do_not_push = 1 +tritium_2.do_not_deposit = 1 + +helium4_2.species_type = helium4 +helium4_2.do_not_push = 1 +helium4_2.do_not_deposit = 1 + +neutron_2.species_type = neutron +neutron_2.do_not_push = 1 +neutron_2.do_not_deposit = 1 ################################# ############ COLLISION ########## ################################# collisions.collision_names = DTF1 DTF2 -DTF1.species = deuterium1 tritium1 -DTF1.product_species = helium1 neutron1 +DTF1.species = deuterium_1 tritium_1 +DTF1.product_species = helium4_1 neutron_1 DTF1.type = nuclearfusion DTF1.fusion_multiplier = 1.e50 -DTF2.species = deuterium2 tritium2 -DTF2.product_species = helium2 neutron2 +DTF2.species = deuterium_2 tritium_2 +DTF2.product_species = helium4_2 neutron_2 DTF2.type = nuclearfusion DTF2.fusion_multiplier = 1.e15 DTF2.fusion_probability_target_value = 0.02 diff --git a/Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_rz b/Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_rz index fb581c82535..67a8aa14840 100644 --- a/Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_rz +++ b/Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_rz @@ -29,7 +29,7 @@ algo.particle_shape = 1 ################################# ############ PLASMA ############# ################################# -particles.species_names = deuterium1 tritium1 helium1 neutron1 deuterium2 tritium2 helium2 neutron2 +particles.species_names = deuterium_1 tritium_1 helium4_1 neutron_1 deuterium_2 tritium_2 helium4_2 neutron_2 my_constants.m_deuterium = 2.01410177812*m_u my_constants.m_tritium = 3.0160492779*m_u @@ -37,87 +37,87 @@ my_constants.m_reduced = m_deuterium*m_tritium/(m_deuterium+m_tritium) my_constants.keV_to_J = 1.e3*q_e my_constants.Energy_step = 22. * keV_to_J -deuterium1.species_type = deuterium -deuterium1.injection_style = "NRandomPerCell" -deuterium1.num_particles_per_cell = 80000 -deuterium1.profile = constant -deuterium1.density = 1. -deuterium1.momentum_distribution_type = parse_momentum_function +deuterium_1.species_type = deuterium +deuterium_1.injection_style = "NRandomPerCell" +deuterium_1.num_particles_per_cell = 80000 +deuterium_1.profile = constant +deuterium_1.density = 1. +deuterium_1.momentum_distribution_type = parse_momentum_function ## Thanks to the floor, all particles in the same cell have the exact same momentum -deuterium1.momentum_function_ux(x,y,z) = "u = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_deuterium*clight); if(x*x+y*y>0.0, -u*y/sqrt(x*x+y*y), 0.0)" # azimuthal velocity -deuterium1.momentum_function_uy(x,y,z) = "u = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_deuterium*clight); if(x*x+y*y>0.0, u*x/sqrt(x*x+y*y), 0.0)" # azimuthal velocity -deuterium1.momentum_function_uz(x,y,z) = "0" -deuterium1.do_not_push = 1 -deuterium1.do_not_deposit = 1 - -tritium1.species_type = tritium -tritium1.injection_style = "NRandomPerCell" -tritium1.num_particles_per_cell = 80000 -tritium1.profile = constant -tritium1.density = 1. -tritium1.momentum_distribution_type = "parse_momentum_function" +deuterium_1.momentum_function_ux(x,y,z) = "u = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_deuterium*clight); if(x*x+y*y>0.0, -u*y/sqrt(x*x+y*y), 0.0)" # azimuthal velocity +deuterium_1.momentum_function_uy(x,y,z) = "u = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_deuterium*clight); if(x*x+y*y>0.0, u*x/sqrt(x*x+y*y), 0.0)" # azimuthal velocity +deuterium_1.momentum_function_uz(x,y,z) = "0" +deuterium_1.do_not_push = 1 +deuterium_1.do_not_deposit = 1 + +tritium_1.species_type = tritium +tritium_1.injection_style = "NRandomPerCell" +tritium_1.num_particles_per_cell = 80000 +tritium_1.profile = constant +tritium_1.density = 1. +tritium_1.momentum_distribution_type = "parse_momentum_function" ## Thanks to the floor, all particles in the same cell have the exact same momentum -tritium1.momentum_function_ux(x,y,z) = "u = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_tritium*clight); if(x*x+y*y>0.0, u*y/sqrt(x*x+y*y), 0.0)" # counter-streaming azimuthal velocity -tritium1.momentum_function_uy(x,y,z) = "u = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_tritium*clight); if(x*x+y*y>0.0, -u*x/sqrt(x*x+y*y), 0.0)" # counter-streaming azimuthal velocity -tritium1.momentum_function_uz(x,y,z) = 0 -tritium1.do_not_push = 1 -tritium1.do_not_deposit = 1 +tritium_1.momentum_function_ux(x,y,z) = "u = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_tritium*clight); if(x*x+y*y>0.0, u*y/sqrt(x*x+y*y), 0.0)" # counter-streaming azimuthal velocity +tritium_1.momentum_function_uy(x,y,z) = "u = sqrt(2*m_reduced*Energy_step*(floor(z)**2))/(m_tritium*clight); if(x*x+y*y>0.0, -u*x/sqrt(x*x+y*y), 0.0)" # counter-streaming azimuthal velocity +tritium_1.momentum_function_uz(x,y,z) = 0 +tritium_1.do_not_push = 1 +tritium_1.do_not_deposit = 1 -helium1.species_type = helium4 -helium1.do_not_push = 1 -helium1.do_not_deposit = 1 +helium4_1.species_type = helium4 +helium4_1.do_not_push = 1 +helium4_1.do_not_deposit = 1 -neutron1.species_type = neutron -neutron1.do_not_push = 1 -neutron1.do_not_deposit = 1 +neutron_1.species_type = neutron +neutron_1.do_not_push = 1 +neutron_1.do_not_deposit = 1 my_constants.background_dens = 1.e26 my_constants.beam_dens = 1.e20 -deuterium2.species_type = deuterium -deuterium2.injection_style = "NRandomPerCell" -deuterium2.num_particles_per_cell = 8000 -deuterium2.profile = "parse_density_function" +deuterium_2.species_type = deuterium +deuterium_2.injection_style = "NRandomPerCell" +deuterium_2.num_particles_per_cell = 8000 +deuterium_2.profile = "parse_density_function" ## A tenth of the macroparticles in each cell is made of immobile high-density background deuteriums. ## The other nine tenths are made of fast low-density beam deuteriums. -deuterium2.density_function(x,y,z) = if(y - floor(y) < 0.1, 10.*background_dens, 10./9.*beam_dens) -deuterium2.momentum_distribution_type = "parse_momentum_function" -deuterium2.momentum_function_ux(x,y,z) = 0. -deuterium2.momentum_function_uy(x,y,z) = 0. -deuterium2.momentum_function_uz(x,y,z) = "if(y - floor(y) < 0.1, +deuterium_2.density_function(x,y,z) = if(y - floor(y) < 0.1, 10.*background_dens, 10./9.*beam_dens) +deuterium_2.momentum_distribution_type = "parse_momentum_function" +deuterium_2.momentum_function_ux(x,y,z) = 0. +deuterium_2.momentum_function_uy(x,y,z) = 0. +deuterium_2.momentum_function_uz(x,y,z) = "if(y - floor(y) < 0.1, 0., sqrt(2*m_deuterium*Energy_step*(floor(z)**2))/(m_deuterium*clight))" -deuterium2.do_not_push = 1 -deuterium2.do_not_deposit = 1 - -tritium2.species_type = tritium -tritium2.injection_style = "NRandomPerCell" -tritium2.num_particles_per_cell = 800 -tritium2.profile = constant -tritium2.density = background_dens -tritium2.momentum_distribution_type = "constant" -tritium2.do_not_push = 1 -tritium2.do_not_deposit = 1 - -helium2.species_type = helium4 -helium2.do_not_push = 1 -helium2.do_not_deposit = 1 - -neutron2.species_type = neutron -neutron2.do_not_push = 1 -neutron2.do_not_deposit = 1 +deuterium_2.do_not_push = 1 +deuterium_2.do_not_deposit = 1 + +tritium_2.species_type = tritium +tritium_2.injection_style = "NRandomPerCell" +tritium_2.num_particles_per_cell = 800 +tritium_2.profile = constant +tritium_2.density = background_dens +tritium_2.momentum_distribution_type = "constant" +tritium_2.do_not_push = 1 +tritium_2.do_not_deposit = 1 + +helium4_2.species_type = helium4 +helium4_2.do_not_push = 1 +helium4_2.do_not_deposit = 1 + +neutron_2.species_type = neutron +neutron_2.do_not_push = 1 +neutron_2.do_not_deposit = 1 ################################# ############ COLLISION ########## ################################# collisions.collision_names = DTF1 DTF2 -DTF1.species = deuterium1 tritium1 -DTF1.product_species = helium1 neutron1 +DTF1.species = deuterium_1 tritium_1 +DTF1.product_species = helium4_1 neutron_1 DTF1.type = nuclearfusion DTF1.fusion_multiplier = 1.e50 -DTF2.species = deuterium2 tritium2 -DTF2.product_species = helium2 neutron2 +DTF2.species = deuterium_2 tritium_2 +DTF2.product_species = helium4_2 neutron_2 DTF2.type = nuclearfusion DTF2.fusion_multiplier = 1.e15 DTF2.fusion_probability_target_value = 0.02 diff --git a/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_3D.json b/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_3D.json index 2c640846857..382c6010c1c 100644 --- a/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_3D.json +++ b/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_3D.json @@ -1,5 +1,5 @@ { - "deuterium1": { + "deuterium_1": { "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, "particle_momentum_z": 2.8875978729147693e-13, @@ -8,7 +8,7 @@ "particle_position_z": 81919021.52308556, "particle_weight": 1024.000000000021 }, - "deuterium2": { + "deuterium_2": { "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, "particle_momentum_z": 3.356807324363973e-14, @@ -17,7 +17,7 @@ "particle_position_z": 8191737.5566503005, "particle_weight": 1.0227810240779905e+29 }, - "helium1": { + "helium4_1": { "particle_momentum_x": 1.7519716491839538e-15, "particle_momentum_y": 1.7523289312260283e-15, "particle_momentum_z": 1.7480231586369996e-15, @@ -26,7 +26,7 @@ "particle_position_z": 325970.4138010667, "particle_weight": 4.421535775967805e-28 }, - "helium2": { + "helium4_2": { "particle_momentum_x": 1.5330942227771018e-15, "particle_momentum_y": 1.5328473121602395e-15, "particle_momentum_z": 1.7635828326228758e-15, @@ -38,7 +38,7 @@ "lev=0": { "rho": 0.0 }, - "neutron1": { + "neutron_1": { "particle_momentum_x": 1.7519716491839538e-15, "particle_momentum_y": 1.7523289312260283e-15, "particle_momentum_z": 1.7480231586369996e-15, @@ -47,7 +47,7 @@ "particle_position_z": 325970.4138010667, "particle_weight": 4.421535775967805e-28 }, - "neutron2": { + "neutron_2": { "particle_momentum_x": 1.5330942227771018e-15, "particle_momentum_y": 1.5328473121602395e-15, "particle_momentum_z": 1.549297051563983e-15, @@ -56,7 +56,7 @@ "particle_position_z": 290143.4673994485, "particle_weight": 5.756530048087129e+18 }, - "tritium1": { + "tritium_1": { "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, "particle_momentum_z": 2.8875978729147693e-13, @@ -65,7 +65,7 @@ "particle_position_z": 81920546.19181262, "particle_weight": 1024.000000000021 }, - "tritium2": { + "tritium_2": { "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, "particle_momentum_z": 0.0, diff --git a/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_RZ.json b/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_RZ.json index a5136dda644..3fb0db8d8a5 100644 --- a/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_RZ.json +++ b/Regression/Checksum/benchmarks_json/Deuterium_Tritium_Fusion_RZ.json @@ -1,5 +1,5 @@ { - "deuterium1": { + "deuterium_1": { "particle_momentum_x": 1.8388106511899905e-13, "particle_momentum_y": 1.837868790009435e-13, "particle_momentum_z": 0.0, @@ -8,7 +8,7 @@ "particle_theta": 32166860.23003994, "particle_weight": 3216.984554806547 }, - "deuterium2": { + "deuterium_2": { "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, "particle_momentum_z": 3.336364094249911e-14, @@ -17,7 +17,7 @@ "particle_theta": 3216444.348910214, "particle_weight": 3.1898417901971444e+29 }, - "helium1": { + "helium4_1": { "particle_momentum_x": 1.858124399143442e-15, "particle_momentum_y": 1.876715110797694e-15, "particle_momentum_z": 1.7098432207359157e-15, @@ -26,7 +26,7 @@ "particle_theta": 120064.13771707338, "particle_weight": 1.603083276067953e-27 }, - "helium2": { + "helium4_2": { "particle_momentum_x": 1.5195006688950936e-15, "particle_momentum_y": 1.52430083815551e-15, "particle_momentum_z": 1.7654865863613367e-15, @@ -38,7 +38,7 @@ "lev=0": { "rho": 0.0 }, - "neutron1": { + "neutron_1": { "particle_momentum_x": 1.7160671487712845e-15, "particle_momentum_y": 1.7154753069055672e-15, "particle_momentum_z": 1.7098432207359157e-15, @@ -47,7 +47,7 @@ "particle_theta": 120064.13771707338, "particle_weight": 1.603083276067953e-27 }, - "neutron2": { + "neutron_2": { "particle_momentum_x": 1.5195006688950936e-15, "particle_momentum_y": 1.52430083815551e-15, "particle_momentum_z": 1.5463311225724366e-15, @@ -56,7 +56,7 @@ "particle_theta": 107912.20520382549, "particle_weight": 2.0862696876352987e+19 }, - "tritium1": { + "tritium_1": { "particle_momentum_x": 1.8384658063720362e-13, "particle_momentum_y": 1.8381593257898129e-13, "particle_momentum_z": 0.0, @@ -65,7 +65,7 @@ "particle_theta": 32163925.891884565, "particle_weight": 3217.0912552970394 }, - "tritium2": { + "tritium_2": { "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, "particle_momentum_z": 0.0, From 840216b5ac71a4ff566abff4bfae10e084f9f56f Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Mon, 31 Oct 2022 10:37:43 -0700 Subject: [PATCH 0129/1346] BTD fields with RZ + openPMD - single mode only (#3350) * cell center BTD functors for RZ with openpmd * add RZ modes to output varnames too * update varnames once and set map for RZ fields in BTfunctor * fix access to varname field instead of unallocated m_varnames, and update hi ki-index after flush * set back trasnformed fields to false if fields_to_plot is none. Separate PArticle and Field io further. Also, clean up Particle Buffer initialize so its consistent with Field box, boxarray, dmap, and geom * clean commented line * assert for single rz mode * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * remove unused var * Apply suggestions from code review From Axels' review Co-authored-by: Axel Huebl * adding comments, doxygen, and clean-up Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Axel Huebl --- Source/Diagnostics/BTDiagnostics.H | 31 ++ Source/Diagnostics/BTDiagnostics.cpp | 389 ++++++++++++------ .../BackTransformFunctor.cpp | 15 + 3 files changed, 317 insertions(+), 118 deletions(-) diff --git a/Source/Diagnostics/BTDiagnostics.H b/Source/Diagnostics/BTDiagnostics.H index ed8cd237bc6..c25a031d73e 100644 --- a/Source/Diagnostics/BTDiagnostics.H +++ b/Source/Diagnostics/BTDiagnostics.H @@ -74,6 +74,25 @@ private: * is initialized. */ void InitializeFieldFunctors (int lev) override; + /** Initialize functors that store pointers to the fields in RZ requested by the user. + * Additionally, the cell-center functors that stores pointers to all fields, + * namely, Er, Et, Ez, Br, Bt, Bz, jr, jt, jz, and rho is also initialized. + * \param[in] lev level on which the vector of unique_ptrs to field functors + * is initialized. + */ + void InitializeFieldFunctorsRZopenPMD (int lev) override; + /** Populating m_varnames with real and imaginary parts of each RZ mode. + * Modes are numbered from 0 to (nmodes-1) and mode 0 is purely real. + * Both m_cellcenter_varnames (storing cell-centered data) and + * m_varnames (storing back-transformed field data) are modified to include RZ modes to include + * field_modeid_real/imag. For example, for Er with two modes, the varnames are + * Er_0_real, Er_1_real, Er_1_imag + * \param[in] field field-name + * \param[in] ncomp number of rz components (if 2 modes, the ncomp is 2*nmodes-1) + * \param[in] cellcenter_data if true, m_cellcenter_varnames are updated + if false, m_varnames is updated + */ + void AddRZModesToOutputNames (const std::string& field, const int ncomp, bool cellcenter_data); /** This function allocates and initializes particle buffers for all the snapshots. * This is currently an empty function: * The particle containers required for this must be added to populate this function. @@ -313,9 +332,20 @@ private: * All the fields are stored regardless of the specific fields to plot selected * by the user. */ +#ifdef WARPX_DIM_RZ + amrex::Vector< std::string > m_cellcenter_varnames = {"Er", "Et", "Ez", + "Br", "Bt", "Bz", + "jr", "jt", "jz", "rho"}; + amrex::Vector< std::string > m_cellcenter_varnames_fields = {"Er", "Et", "Ez", + "Br", "Bt", "Bz", + "jr", "jt", "jz", + "rho"}; +#else amrex::Vector< std::string > m_cellcenter_varnames = {"Ex", "Ey", "Ez", "Bx", "By", "Bz", "jx", "jy", "jz", "rho"}; +#endif + /** Merge the lab-frame buffer multifabs so it can be visualized as * a single plotfile @@ -356,6 +386,7 @@ private: void ClearParticleBuffer(int i_buffer); /** Redistributes particles to the buffer box array in the lab-frame */ void RedistributeParticleBuffer (const int i_buffer); + void UpdateVarnamesForRZopenPMD(); }; #endif // WARPX_BTDIAGNOSTICS_H_ diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 0e8a8424488..510c1b94692 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -167,6 +167,9 @@ void BTDiagnostics::DerivedInitData () "BTD", warn_string, ablastr::warn_manager::WarnPriority::low); } +#ifdef WARPX_DIM_RZ + UpdateVarnamesForRZopenPMD(); +#endif } void @@ -195,14 +198,15 @@ BTDiagnostics::ReadParameters () m_crse_ratio == amrex::IntVect(1), "Only support for coarsening ratio of 1 in all directions is included for BTD\n" ); - + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(WarpX::n_rz_azimuthal_modes==1, "Currently only one mode is supported for BTD"); // Read list of back-transform diag parameters requested by the user // amrex::ParmParse pp_diag_name(m_diag_name); m_file_prefix = "diags/" + m_diag_name; pp_diag_name.query("file_prefix", m_file_prefix); pp_diag_name.query("do_back_transformed_fields", m_do_back_transformed_fields); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_do_back_transformed_fields, " fields must be turned on for the new back-transformed diagnostics"); + pp_diag_name.query("do_back_transformed_particles", m_do_back_transformed_particles); + AMREX_ALWAYS_ASSERT(m_do_back_transformed_fields or m_do_back_transformed_particles); if (m_do_back_transformed_fields == false) m_varnames.clear(); @@ -232,10 +236,15 @@ BTDiagnostics::ReadParameters () if (utils::parser::queryWithParser(pp_diag_name, "buffer_size", m_buffer_size)) { if(m_max_box_size < m_buffer_size) m_max_box_size = m_buffer_size; } - +#ifdef WARPX_DIM_RZ + amrex::Vector< std::string > BTD_varnames_supported = {"Er", "Et", "Ez", + "Br", "Bt", "Bz", + "jr", "jt", "jz", "rho"}; +#else amrex::Vector< std::string > BTD_varnames_supported = {"Ex", "Ey", "Ez", "Bx", "By", "Bz", "jx", "jy", "jz", "rho"}; +#endif for (const auto& var : m_varnames) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -243,11 +252,15 @@ BTDiagnostics::ReadParameters () "Input error: field variable " + var + " in " + m_diag_name + ".fields_to_plot is not supported for BackTransformed diagnostics." + " Currently supported field variables for BackTransformed diagnostics " - + "include Ex, Ey, Ez, Bx, By, Bz, jx, jy, jz, and rho"); + + "include Ex, Ey, Ez, Bx, By, Bz, jx, jy, jz, and rho in Cartesian coordinates and " + + "Er, Et, Ez, Br, Bt, Bz, jr, jt, jz, and rho in cylindrical (RZ coordinates)"); } bool particle_fields_to_plot_specified = pp_diag_name.queryarr("particle_fields_to_plot", m_pfield_varnames); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!particle_fields_to_plot_specified, "particle_fields_to_plot is currently not supported for BackTransformed Diagnostics"); + if (m_varnames.size() == 0) { + m_do_back_transformed_fields = false; + } } @@ -441,6 +454,7 @@ BTDiagnostics::InitializeBufferData ( int i_buffer , int lev) dz_lab(warpx.getdt(lev), ref_ratio[m_moving_window_dir]); m_snapshot_domain_lab[i_buffer].setLo(m_moving_window_dir, new_lo); // cell-centered index that corresponds to the hi-end of the lab-frame in the z-direction + // Adding 0.5 dz_lab so that we obtain the cell-centered index consistent to the hi-end int snapshot_kindex_hi = static_cast(floor( ( m_snapshot_domain_lab[i_buffer].hi(m_moving_window_dir) - (m_snapshot_domain_lab[i_buffer].lo(m_moving_window_dir) @@ -450,6 +464,9 @@ BTDiagnostics::InitializeBufferData ( int i_buffer , int lev) m_snapshot_box[i_buffer].setBig( m_moving_window_dir, snapshot_kindex_hi); m_snapshot_box[i_buffer].setSmall( m_moving_window_dir, snapshot_kindex_hi - (num_z_cells_in_snapshot-1) ); + // Setting hi k-index for the first buffer + m_buffer_k_index_hi[i_buffer] = m_snapshot_box[i_buffer].bigEnd(m_moving_window_dir); + } void @@ -475,6 +492,14 @@ BTDiagnostics::InitializeFieldFunctors (int lev) // Initialize fields functors only if do_back_transformed_fields is selected if (m_do_back_transformed_fields == false) return; +#ifdef WARPX_DIM_RZ + // For RZ, initialize field functors RZ for openpmd + // This is a specialized call for intializing cell-center functors + // such that, all modes of a field component are stored contiguously + // For example, Er0, Er1_real, Er1_imag, etc + InitializeFieldFunctorsRZopenPMD(lev); +#else + auto & warpx = WarpX::GetInstance(); // Clear any pre-existing vector to release stored data // This ensures that when domain is load-balanced, the functors point @@ -526,6 +551,148 @@ BTDiagnostics::InitializeFieldFunctors (int lev) } } +#endif +} + + +void +BTDiagnostics::UpdateVarnamesForRZopenPMD () +{ +#ifdef WARPX_DIM_RZ + auto & warpx = WarpX::GetInstance(); + int ncomp_multimodefab = warpx.get_pointer_Efield_aux(0,0)->nComp(); + int ncomp = ncomp_multimodefab; + + + bool update_varnames = true; + if (update_varnames) { + const int n_rz = ncomp * m_varnames_fields.size(); + m_varnames.clear(); + m_varnames.reserve(n_rz); + } + // AddRZ modes to output names for the back-transformed data + if (update_varnames) { + for (int comp=0, n=m_varnames_fields.size(); compnComp(); + int ncomp = ncomp_multimodefab; + // Clear any pre-existing vector to release stored data + // This ensures that when domain is load-balanced, the functors point + // to the correct field-data pointers + m_all_field_functors[lev].clear(); + // For back-transformed data, all the components are cell-centered and stored + // in a single multifab, m_cell_centered_data. + // Therefore, size of functors at all levels is 1 + int num_BT_functors = 1; + m_all_field_functors[lev].resize(num_BT_functors); + for (int i = 0; i < num_BT_functors; ++i) { + int nvars = static_cast(m_varnames.size()); + m_all_field_functors[lev][i] = std::make_unique( + m_cell_centered_data[lev].get(), lev, + nvars, m_num_buffers, m_varnames); + } + + // Reset field functors for cell-center multifab + m_cell_center_functors[lev].clear(); + m_cell_center_functors[lev].resize(m_cellcenter_varnames_fields.size()); + + for (int comp=0, n=m_cell_center_functors.at(lev).size(); comp(warpx.get_pointer_Efield_aux(lev, 0), lev, m_crse_ratio, false, ncomp); + } else if ( m_cellcenter_varnames_fields[comp] == "Et" ){ + m_cell_center_functors[lev][comp] = std::make_unique(warpx.get_pointer_Efield_aux(lev, 1), lev, m_crse_ratio, false, ncomp); + } else if ( m_cellcenter_varnames_fields[comp] == "Ez" ){ + m_cell_center_functors[lev][comp] = std::make_unique(warpx.get_pointer_Efield_aux(lev, 2), lev, m_crse_ratio, false, ncomp); + } else if ( m_cellcenter_varnames_fields[comp] == "Br" ){ + m_cell_center_functors[lev][comp] = std::make_unique(warpx.get_pointer_Bfield_aux(lev, 0), lev, m_crse_ratio, false, ncomp); + } else if ( m_cellcenter_varnames_fields[comp] == "Bt" ){ + m_cell_center_functors[lev][comp] = std::make_unique(warpx.get_pointer_Bfield_aux(lev, 1), lev, m_crse_ratio, false, ncomp); + } else if ( m_cellcenter_varnames_fields[comp] == "Bz" ){ + m_cell_center_functors[lev][comp] = std::make_unique(warpx.get_pointer_Bfield_aux(lev, 2), lev, m_crse_ratio, false, ncomp); + } else if ( m_cellcenter_varnames_fields[comp] == "jr" ){ + m_cell_center_functors[lev][comp] = std::make_unique(warpx.get_pointer_current_fp(lev, 0), lev, m_crse_ratio, false, ncomp); + } else if ( m_cellcenter_varnames_fields[comp] == "jt" ){ + m_cell_center_functors[lev][comp] = std::make_unique(warpx.get_pointer_current_fp(lev, 1), lev, m_crse_ratio, false, ncomp); + } else if ( m_cellcenter_varnames_fields[comp] == "jz" ){ + m_cell_center_functors[lev][comp] = std::make_unique(warpx.get_pointer_current_fp(lev, 2), lev, m_crse_ratio, false, ncomp); + } else if ( m_cellcenter_varnames_fields[comp] == "rho" ){ + m_cell_center_functors[lev][comp] = std::make_unique(lev, m_crse_ratio, -1, false, ncomp); + } + } + +#endif + amrex::ignore_unused(lev); +} + +void +BTDiagnostics::AddRZModesToOutputNames (const std::string& field, const int ncomp, bool cellcenter_data) +{ +#ifdef WARPX_DIM_RZ + // In cylindrical geometry, real and imag part of each mode are also + // dumped to file separately, so they need to be added to m_varnames + // we number modes from 0 to (nmodes-1); + // mode 0 is purely real, all higher modes are complex + int const nmodes = (ncomp+1)/2; + + if (cellcenter_data) { + m_cellcenter_varnames.push_back( field + "_0_real" ); + for (int ic=1; ic < nmodes; ic++) { + m_cellcenter_varnames.push_back( field + "_" + std::to_string(ic) + "_real" ); + m_cellcenter_varnames.push_back( field + "_" + std::to_string(ic) + "_imag" ); + } + } else { + m_varnames.push_back(field + "_0_real"); + for (int ic=1; ic < nmodes; ic++) { + m_varnames.push_back( field + "_" + std::to_string(ic) + "_real" ); + m_varnames.push_back( field + "_" + std::to_string(ic) + "_imag" ); + } + } +#else + amrex::ignore_unused(field, ncomp, cellcenter_data); +#endif } void @@ -683,63 +850,59 @@ BTDiagnostics::SetSnapshotFullStatus (const int i_buffer) void BTDiagnostics::DefineFieldBufferMultiFab (const int i_buffer, const int lev) { - if ( m_do_back_transformed_fields ) { - auto & warpx = WarpX::GetInstance(); - - const int hi_k_lab = m_buffer_k_index_hi[i_buffer]; - m_buffer_box[i_buffer].setSmall( m_moving_window_dir, hi_k_lab - m_buffer_size + 1); - m_buffer_box[i_buffer].setBig( m_moving_window_dir, hi_k_lab ); - // Setting hi k-index for the next buffer - m_buffer_k_index_hi[i_buffer] = m_buffer_box[i_buffer].smallEnd(m_moving_window_dir) - 1; - amrex::BoxArray buffer_ba( m_buffer_box[i_buffer] ); - buffer_ba.maxSize(m_max_box_size); - // Generate a new distribution map for the back-transformed buffer multifab - amrex::DistributionMapping buffer_dmap(buffer_ba); - // Number of guard cells for the output buffer is zero. - // Unlike FullDiagnostics, "m_format == sensei" option is not included here. - int ngrow = 0; - m_mf_output[i_buffer][lev] = amrex::MultiFab( buffer_ba, buffer_dmap, - m_varnames.size(), ngrow ); - m_mf_output[i_buffer][lev].setVal(0.); - - amrex::IntVect ref_ratio = amrex::IntVect(1); - if (lev > 0 ) ref_ratio = WarpX::RefRatio(lev-1); - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - amrex::Real cellsize; - if (idim < WARPX_ZINDEX) { - cellsize = warpx.Geom(lev).CellSize(idim); - } else { - cellsize = dz_lab(warpx.getdt(lev), ref_ratio[m_moving_window_dir]); - } - amrex::Real buffer_lo = m_snapshot_domain_lab[i_buffer].lo(idim) - + ( buffer_ba.getCellCenteredBox(0).smallEnd(idim) - - m_snapshot_box[i_buffer].smallEnd(idim) - ) * cellsize; - amrex::Real buffer_hi = m_snapshot_domain_lab[i_buffer].lo(idim) - + ( buffer_ba.getCellCenteredBox( buffer_ba.size()-1 ).bigEnd(idim) - - m_snapshot_box[i_buffer].smallEnd(idim) - + 1 ) * cellsize; - m_buffer_domain_lab[i_buffer].setLo(idim, buffer_lo); - m_buffer_domain_lab[i_buffer].setHi(idim, buffer_hi); - } + auto & warpx = WarpX::GetInstance(); + + const int hi_k_lab = m_buffer_k_index_hi[i_buffer]; + m_buffer_box[i_buffer].setSmall( m_moving_window_dir, hi_k_lab - m_buffer_size + 1); + m_buffer_box[i_buffer].setBig( m_moving_window_dir, hi_k_lab ); + amrex::BoxArray buffer_ba( m_buffer_box[i_buffer] ); + buffer_ba.maxSize(m_max_box_size); + // Generate a new distribution map for the back-transformed buffer multifab + amrex::DistributionMapping buffer_dmap(buffer_ba); + // Number of guard cells for the output buffer is zero. + // Unlike FullDiagnostics, "m_format == sensei" option is not included here. + int ngrow = 0; + m_mf_output[i_buffer][lev] = amrex::MultiFab( buffer_ba, buffer_dmap, + m_varnames.size(), ngrow ); + m_mf_output[i_buffer][lev].setVal(0.); - // Define the geometry object at level, lev, for the ith buffer. - if (lev == 0) { - // The extent of the physical domain covered by the ith buffer mf, m_mf_output - // Default non-periodic geometry for diags - amrex::Vector BTdiag_periodicity(AMREX_SPACEDIM, 0); - // Box covering the extent of the user-defined diag in the back-transformed frame - amrex::Box domain = buffer_ba.minimalBox(); - // define the geometry object for the ith buffer using Physical co-oridnates - // of m_buffer_domain_lab[i_buffer]. - m_geom_output[i_buffer][lev].define( domain, &m_buffer_domain_lab[i_buffer], - amrex::CoordSys::cartesian, - BTdiag_periodicity.data() ); - } else if (lev > 0 ) { - // Refine the geometry object defined at the previous level, lev-1 - m_geom_output[i_buffer][lev] = amrex::refine( m_geom_output[i_buffer][lev-1], - warpx.RefRatio(lev-1) ); + amrex::IntVect ref_ratio = amrex::IntVect(1); + if (lev > 0 ) ref_ratio = WarpX::RefRatio(lev-1); + for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { + amrex::Real cellsize; + if (idim < WARPX_ZINDEX) { + cellsize = warpx.Geom(lev).CellSize(idim); + } else { + cellsize = dz_lab(warpx.getdt(lev), ref_ratio[m_moving_window_dir]); } + amrex::Real buffer_lo = m_snapshot_domain_lab[i_buffer].lo(idim) + + ( buffer_ba.getCellCenteredBox(0).smallEnd(idim) + - m_snapshot_box[i_buffer].smallEnd(idim) + ) * cellsize; + amrex::Real buffer_hi = m_snapshot_domain_lab[i_buffer].lo(idim) + + ( buffer_ba.getCellCenteredBox( buffer_ba.size()-1 ).bigEnd(idim) + - m_snapshot_box[i_buffer].smallEnd(idim) + + 1 ) * cellsize; + m_buffer_domain_lab[i_buffer].setLo(idim, buffer_lo); + m_buffer_domain_lab[i_buffer].setHi(idim, buffer_hi); + } + + // Define the geometry object at level, lev, for the ith buffer. + if (lev == 0) { + // The extent of the physical domain covered by the ith buffer mf, m_mf_output + // Default non-periodic geometry for diags + amrex::Vector BTdiag_periodicity(AMREX_SPACEDIM, 0); + // Box covering the extent of the user-defined diag in the back-transformed frame + amrex::Box domain = buffer_ba.minimalBox(); + // define the geometry object for the ith buffer using Physical co-oridnates + // of m_buffer_domain_lab[i_buffer]. + m_geom_output[i_buffer][lev].define( domain, &m_buffer_domain_lab[i_buffer], + amrex::CoordSys::cartesian, + BTdiag_periodicity.data() ); + } else if (lev > 0 ) { + // Refine the geometry object defined at the previous level, lev-1 + m_geom_output[i_buffer][lev] = amrex::refine( m_geom_output[i_buffer][lev-1], + warpx.RefRatio(lev-1) ); } } @@ -747,27 +910,22 @@ BTDiagnostics::DefineFieldBufferMultiFab (const int i_buffer, const int lev) void BTDiagnostics::DefineSnapshotGeometry (const int i_buffer, const int lev) { - if ( m_do_back_transformed_fields ) { - auto & warpx = WarpX::GetInstance(); - // Setting hi k-index for the first buffer - m_buffer_k_index_hi[i_buffer] = m_snapshot_box[i_buffer].bigEnd(m_moving_window_dir); - - if (lev == 0) { - // Default non-periodic geometry for diags - amrex::Vector BTdiag_periodicity(AMREX_SPACEDIM, 0); - // Define the geometry object for the ith snapshot using Physical co-oridnates - // of m_snapshot_domain_lab[i_buffer], that corresponds to the full snapshot - // in the back-transformed frame - m_geom_snapshot[i_buffer][lev].define( m_snapshot_box[i_buffer], - &m_snapshot_domain_lab[i_buffer], - amrex::CoordSys::cartesian, - BTdiag_periodicity.data() ); - - } else if (lev > 0) { - // Refine the geometry object defined at the previous level, lev-1 - m_geom_snapshot[i_buffer][lev] = amrex::refine( m_geom_snapshot[i_buffer][lev-1], - warpx.RefRatio(lev-1) ); - } + auto & warpx = WarpX::GetInstance(); + + if (lev == 0) { + // Default non-periodic geometry for diags + amrex::Vector BTdiag_periodicity(AMREX_SPACEDIM, 0); + // Define the geometry object for the ith snapshot using Physical co-oridnates + // of m_snapshot_domain_lab[i_buffer], that corresponds to the full snapshot + // in the back-transformed frame + m_geom_snapshot[i_buffer][lev].define( m_snapshot_box[i_buffer], + &m_snapshot_domain_lab[i_buffer], + amrex::CoordSys::cartesian, + BTdiag_periodicity.data() ); + } else if (lev > 0) { + // Refine the geometry object defined at the previous level, lev-1 + m_geom_snapshot[i_buffer][lev] = amrex::refine( m_geom_snapshot[i_buffer][lev-1], + warpx.RefRatio(lev-1) ); } } @@ -846,9 +1004,12 @@ BTDiagnostics::Flush (int i_buffer) m_buffer_box[i_buffer].setSmall(m_moving_window_dir, (m_buffer_box[i_buffer].smallEnd(m_moving_window_dir) + 1) ); m_buffer_box[i_buffer].setBig(m_moving_window_dir, (m_buffer_box[i_buffer].bigEnd(m_moving_window_dir) - 1) ); m_particles_buffer[i_buffer][0]->SetParticleBoxArray(0,vba.back()); + for (int isp = 0; isp < m_particles_buffer.at(i_buffer).size(); ++isp) { + // BTD output is single level. Setting particle geometry, dmap, boxarray to level0 + m_particles_buffer[i_buffer][isp]->SetParGDB(vgeom[0], vdmap[0], vba.back()); + } } } - m_flush_format->WriteToFile( m_varnames, m_mf_output[i_buffer], m_geom_output[i_buffer], warpx.getistep(), labtime, m_output_species[i_buffer], nlev_output, file_name, m_file_min_digits, @@ -857,6 +1018,19 @@ BTDiagnostics::Flush (int i_buffer) m_max_buffer_multifabs[i_buffer], m_geom_snapshot[i_buffer][0], isLastBTDFlush, m_totalParticles_flushed_already[i_buffer]); + // Note : test if this is needed before or after WriteToFile. This is because, for plotfiles, when writing particles, amrex checks if the particles are within the bounds defined by the box. However, in BTD, particles can be (at max) 1 cell outside the bounds of the geometry. Hence rescaling the box after WriteToFile + if (m_format == "plotfile") { + if (m_particles_buffer.at(i_buffer).size() > 0 ) { + m_buffer_box[i_buffer].setSmall(m_moving_window_dir, (m_buffer_box[i_buffer].smallEnd(m_moving_window_dir) + 1) ); + m_buffer_box[i_buffer].setBig(m_moving_window_dir, (m_buffer_box[i_buffer].bigEnd(m_moving_window_dir) - 1) ); + m_particles_buffer[i_buffer][0]->SetParticleBoxArray(0,vba.back()); + for (int isp = 0; isp < m_particles_buffer.at(i_buffer).size(); ++isp) { + // BTD output is single level. Setting particle geometry, dmap, boxarray to level0 + m_particles_buffer[i_buffer][isp]->SetParGDB(vgeom[0], vdmap[0], vba.back()); + } + } + } + for (int isp = 0; isp < m_particles_buffer.at(i_buffer).size(); ++isp) { // Buffer particle container reset to include geometry, dmap, Boxarray, and refratio // so that particles from finest level can also be selected and transformed @@ -876,6 +1050,10 @@ BTDiagnostics::Flush (int i_buffer) ResetTotalParticlesInBuffer(i_buffer); ClearParticleBuffer(i_buffer); } + // Setting hi k-index for the next buffer, such that, the index is one less than the lo-index of previous buffer + // For example, for buffer size of 256, if the first buffer extent was [256,511] + // then the next buffer will be from [0,255]. That is, the hi-index of the following buffer is 256-1 + m_buffer_k_index_hi[i_buffer] = m_buffer_box[i_buffer].smallEnd(m_moving_window_dir) - 1; } void BTDiagnostics::RedistributeParticleBuffer (const int i_buffer) @@ -1196,7 +1374,6 @@ BTDiagnostics::InitializeParticleBuffer () void BTDiagnostics::PrepareParticleDataForOutput() { - auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev < nlev_output; ++lev) { for (int i = 0; i < m_all_particle_functors.size(); ++i) { @@ -1206,43 +1383,19 @@ BTDiagnostics::PrepareParticleDataForOutput() bool ZSliceInDomain = GetZSliceInDomainFlag (i_buffer, lev); if (ZSliceInDomain) { if ( m_totalParticles_in_buffer[i_buffer][i] == 0) { - if (m_do_back_transformed_fields) { - // use the same Box, BoxArray, and Geometry as fields for particles - amrex::Box particle_buffer_box = m_buffer_box[i_buffer]; - amrex::BoxArray buffer_ba( particle_buffer_box ); - buffer_ba.maxSize(m_max_box_size); - amrex::DistributionMapping buffer_dmap(buffer_ba); - m_particles_buffer[i_buffer][i]->SetParticleBoxArray(lev, buffer_ba); - m_particles_buffer[i_buffer][i]->SetParticleDistributionMap(lev, buffer_dmap); - m_particles_buffer[i_buffer][i]->SetParticleGeometry(lev, m_geom_snapshot[i_buffer][lev]); - } else { - amrex::Box particle_buffer_box = m_buffer_box[i_buffer]; - particle_buffer_box.setSmall(m_moving_window_dir, - m_buffer_box[i_buffer].smallEnd(m_moving_window_dir)-1); - particle_buffer_box.setBig(m_moving_window_dir, - m_buffer_box[i_buffer].bigEnd(m_moving_window_dir)+1); - amrex::BoxArray buffer_ba( particle_buffer_box ); - buffer_ba.maxSize(m_max_box_size); - amrex::DistributionMapping buffer_dmap(buffer_ba); - m_particles_buffer[i_buffer][i]->SetParticleBoxArray(lev, buffer_ba); - m_particles_buffer[i_buffer][i]->SetParticleDistributionMap(lev, buffer_dmap); - amrex::IntVect particle_DomBox_lo = m_snapshot_box[i_buffer].smallEnd(); - amrex::IntVect particle_DomBox_hi = m_snapshot_box[i_buffer].bigEnd(); - int zmin = std::max(0, particle_DomBox_lo[m_moving_window_dir] ); - particle_DomBox_lo[m_moving_window_dir] = zmin; - amrex::Box ParticleBox(particle_DomBox_lo, particle_DomBox_hi); - int num_cells = particle_DomBox_hi[m_moving_window_dir] - zmin + 1; - amrex::IntVect ref_ratio = amrex::IntVect(1); - amrex::Real new_lo = m_snapshot_domain_lab[i_buffer].hi(m_moving_window_dir) - - num_cells * dz_lab(warpx.getdt(lev), ref_ratio[m_moving_window_dir]); - amrex::RealBox ParticleRealBox = m_snapshot_domain_lab[i_buffer]; - ParticleRealBox.setLo(m_moving_window_dir, new_lo); - amrex::Vector BTdiag_periodicity(AMREX_SPACEDIM, 0); - amrex::Geometry geom; - geom.define(ParticleBox, &ParticleRealBox, amrex::CoordSys::cartesian, - BTdiag_periodicity.data() ); - m_particles_buffer[i_buffer][i]->SetParticleGeometry(lev, geom); + if (!m_do_back_transformed_fields || m_varnames_fields.size()==0) { + if ( m_buffer_flush_counter[i_buffer] == 0) { + DefineSnapshotGeometry(i_buffer, lev); + } + DefineFieldBufferMultiFab(i_buffer, lev); } + amrex::Box particle_buffer_box = m_buffer_box[i_buffer]; + amrex::BoxArray buffer_ba( particle_buffer_box ); + buffer_ba.maxSize(m_max_box_size); + amrex::DistributionMapping buffer_dmap(buffer_ba); + m_particles_buffer[i_buffer][i]->SetParticleBoxArray(lev, buffer_ba); + m_particles_buffer[i_buffer][i]->SetParticleDistributionMap(lev, buffer_dmap); + m_particles_buffer[i_buffer][i]->SetParticleGeometry(lev, m_geom_snapshot[i_buffer][lev]); } } m_all_particle_functors[i]->PrepareFunctorData ( diff --git a/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.cpp index a334bf53451..10a9ff12a7c 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.cpp @@ -155,6 +155,20 @@ BackTransformFunctor::InitData () m_k_index_zlab.resize( m_num_buffers ); m_map_varnames.resize( m_varnames.size() ); +#ifdef WARPX_DIM_RZ + std::map m_possible_fields_to_dump = { + {"Er", 0}, + {"Et", 1}, + {"Ez", 2}, + {"Br", 3}, + {"Bt", 4}, + {"Bz", 5}, + {"jr", 6}, + {"jt", 7}, + {"jz", 8}, + {"rho", 9} + }; +#else std::map m_possible_fields_to_dump = { {"Ex", 0}, {"Ey", 1}, @@ -167,6 +181,7 @@ BackTransformFunctor::InitData () {"jz", 8}, {"rho", 9} }; +#endif for (int i = 0; i < m_varnames.size(); ++i) { From bd71f5d7c78bb5112b44a76f21ec77094746ab98 Mon Sep 17 00:00:00 2001 From: Ryan Sandberg Date: Mon, 31 Oct 2022 10:44:28 -0700 Subject: [PATCH 0130/1346] Add BTD notes to FAQ section (#3473) * Begin writing BTD FAQ notes * FAQ notes on BTD * access figure from github online * update figure and improve faq answers --- Docs/source/usage/faq.rst | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/Docs/source/usage/faq.rst b/Docs/source/usage/faq.rst index cdeedd5f1bf..59c8c3aee49 100644 --- a/Docs/source/usage/faq.rst +++ b/Docs/source/usage/faq.rst @@ -50,6 +50,23 @@ Here are a few practical items to assist in designing boosted frame simulations: An in-depth discussion of the boosted frame is provided in the :ref:`moving window and optimal Lorentz boosted frame ` section. +What about Back-transformed diagnostics (BTD)? +---------------------------------------------- + +.. figure:: https://user-images.githubusercontent.com/10621396/198702232-9dd595ad-479e-4170-bd25-51e2b72cd50a.png + :alt: [fig:BTD_features] Minkowski diagram indicating several features of the back-transformed diagnostic (BTD). The diagram explains why the first BTD begins to fill at boosted time :math:`t'=0` but this doesn't necessarily correspond to lab time :math:`t=0`, how the BTD grid-spacing is determined by the boosted time step :math:`\Delta t'`, hence why the snapshot length don't correspond to the grid spacing and length in the input script, and how the BTD snapshots complete when the effective snapshot length is covered in the boosted frame. + + [fig:BTD_features] Minkowski diagram indicating several features of the back-transformed diagnostic (BTD). The diagram explains why the first BTD begins to fill at boosted time :math:`t'=0` but this doesn't necessarily correspond to lab time :math:`t=0`, how the BTD grid-spacing is determined by the boosted time step :math:`\Delta t'`, hence why the snapshot length don't correspond to the grid spacing and length in the input script, and how the BTD snapshots complete when the effective snapshot length is covered in the boosted frame. + + +Several BTD quantities differ slightly from the lab frame domain described in the input deck. +In the following discussion, we will use a subscript input (e.g. :math:`\Delta z_{\rm input}`) to denote properties of the lab frame domain. + + +- The first back-transformed diagnostic (BTD) snapshot may not occur at :math:`t=0`. Rather, it occurs at :math:`t_0=\frac{z_{max}}c \beta(1+\beta)\gamma^2`. This is the first time when the boosted frame can complete the snapshot. +- The grid spacing of the BTD snapshot is different from the grid spacing indicated in the input script. It is given by :math:`\Delta z_{\rm grid,snapshot}=\frac{c\Delta t_{\rm boost}}{\gamma\beta}`. For a CFL-limited time step, :math:`\Delta z_{\rm grid,snapshot}\approx \frac{1+\beta}{\beta} \Delta z_{\rm input}\approx 2 \Delta z_{\rm input}`. Hence in many common use cases at large boost, it is expected that the BTD snapshot has a grid spacing twice what is expressed in the input script. +- The effective length of the BTD snapshot may be longer than anticipated from the input script because the grid spacing is different. Additionally, the number of grid points in the BTD snapshot is a multiple of ``.buffer_size`` whereas the number of grid cells specified in the input deck may not be. +- The code may require longer than anticipated to complete a BTD snapshot. The code starts filling the :math:`i^{th}` snapshot around step :math:`j_{\rm BTD start}={\rm ceil}\left( i\gamma(1-\beta)\frac{\Delta t_{\rm snapshot}}{\Delta t_{\rm boost}}\right)`. The code then saves information for one BTD cell every time step in the boosted frame simulation. The :math:`i^{th}` snapshot is completed and saved :math:`n_{z,{\rm snapshot}}=n_{\rm buffers}\cdot ({\rm buffer\ size})` time steps after it begins, which is when the effective snapshot length is covered by the simulation. What kinds of RZ output do you support? --------------------------------------- From 886e495dd471aaf0b9279fb001ff55af713c5685 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 31 Oct 2022 11:18:07 -0700 Subject: [PATCH 0131/1346] AMReX: Weekly Update (#3492) --- .github/workflows/cuda.yml | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 2 +- run_test.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index dc0a8732843..b3554875332 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 3082028e42870b1ed37f0d26160ef078580511e3 && cd - + cd amrex && git checkout --detach 735c3513153f1d06f783e64f455816be85fb3602 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 568beffaf69..1de1c6b07aa 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 3082028e42870b1ed37f0d26160ef078580511e3 +branch = 735c3513153f1d06f783e64f455816be85fb3602 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 90f937f016a..68bfcc57b6c 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 3082028e42870b1ed37f0d26160ef078580511e3 +branch = 735c3513153f1d06f783e64f455816be85fb3602 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 126e8660bfa..8b794d61460 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -235,7 +235,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "3082028e42870b1ed37f0d26160ef078580511e3" +set(WarpX_amrex_branch "735c3513153f1d06f783e64f455816be85fb3602" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/run_test.sh b/run_test.sh index 002d3e88228..2f2ab444877 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 3082028e42870b1ed37f0d26160ef078580511e3 && cd - +cd amrex && git checkout --detach 735c3513153f1d06f783e64f455816be85fb3602 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git From 5645f4b37a6f6f21705433d5afe28e9167eb2885 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Mon, 31 Oct 2022 15:51:23 -0700 Subject: [PATCH 0132/1346] Implement D+D and D+He fusion (#3257) * Implement D+D -> n+He3 fusion * Fix logic for fusion reaction * Check products in a different place * Correct compilation error * Implement D+D -> T+p cross-section * Update example * Use clearer naming convention for fusion types * Revert changes to example input script * Add analysis script * Progress on tests * Use 2 species in test * Correct momentum of colliding species * Update test * Update test * Generalize species names in fusion tests * Update benchmarks * Correct typo * Updated scripts * Update script so that it works for D+T and D+D * Update CI * Add benchmark file * Correct typo * Fix compilation on GPU * Update RZ CI test * Implement Deuterium-Helium reaction * Apply suggestions from code review Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- ...sion.py => analysis_two_product_fusion.py} | 73 ++++++++---- .../inputs_deuterium_deuterium_3d | 83 +++++++++++++ .../Deuterium_Deuterium_Fusion_3D.json | 41 +++++++ Regression/WarpX-tests.ini | 20 +++- .../BinaryCollision/BinaryCollisionUtils.H | 17 ++- .../BinaryCollision/BinaryCollisionUtils.cpp | 73 +++++++++++- .../BoschHaleFusionCrossSection.H | 111 ++++++++++++++++++ .../DeuteriumTritiumFusionCrossSection.H | 64 ---------- .../NuclearFusion/NuclearFusionFunc.H | 27 ----- .../NuclearFusion/SingleNuclearFusionEvent.H | 12 +- .../BinaryCollision/ParticleCreationFunc.H | 16 ++- .../BinaryCollision/ParticleCreationFunc.cpp | 6 +- 12 files changed, 406 insertions(+), 137 deletions(-) rename Examples/Modules/nuclear_fusion/{analysis_deuterium_tritium_fusion.py => analysis_two_product_fusion.py} (93%) create mode 100644 Examples/Modules/nuclear_fusion/inputs_deuterium_deuterium_3d create mode 100644 Regression/Checksum/benchmarks_json/Deuterium_Deuterium_Fusion_3D.json create mode 100644 Source/Particles/Collision/BinaryCollision/NuclearFusion/BoschHaleFusionCrossSection.H delete mode 100644 Source/Particles/Collision/BinaryCollision/NuclearFusion/DeuteriumTritiumFusionCrossSection.H diff --git a/Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py b/Examples/Modules/nuclear_fusion/analysis_two_product_fusion.py similarity index 93% rename from Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py rename to Examples/Modules/nuclear_fusion/analysis_two_product_fusion.py index a77613e6374..ad8b7f70e10 100755 --- a/Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py +++ b/Examples/Modules/nuclear_fusion/analysis_two_product_fusion.py @@ -48,25 +48,11 @@ default_tol = 1.e-12 # Default relative tolerance -## Define reactants and products -reactant_species = ['deuterium', 'tritium'] -product_species = ['helium4', 'neutron'] - -mass = { - 'deuterium': 2.01410177812*scc.m_u, - 'tritium': 3.0160492779*scc.m_u, - 'helium4': 4.00260325413*scc.m_u, - 'neutron': 1.0013784193052508*scc.m_p -} -m_reduced = np.product([mass[s] for s in reactant_species])/np.sum([mass[s] for s in reactant_species]) - ## Some physical parameters keV_to_Joule = scc.e*1e3 MeV_to_Joule = scc.e*1e6 barn_to_square_meter = 1.e-28 -E_fusion = 17.5893*MeV_to_Joule # Energy released during the fusion reaction - ## Checks whether this is the 2D or the 3D test warpx_used_inputs = open('./warpx_used_inputs', 'r').read() if re.search('geometry.dims = RZ', warpx_used_inputs): @@ -74,6 +60,33 @@ else: is_RZ = False +## Check which kind of test we are doing: D+T or D+D +# Define reactants and products +if re.search('tritium', warpx_used_inputs): + # If tritium appears in the file, than this is the D+T test + reaction_type = 'DT' + reactant_species = ['deuterium', 'tritium'] + product_species = ['helium4', 'neutron'] + ntests = 2 + E_fusion = 17.5893*MeV_to_Joule # Energy released during the fusion reaction +else: + # else, this is the D+D test + reaction_type = 'DD' + reactant_species = ['deuterium', 'hydrogen2'] + product_species = ['helium3', 'neutron'] + ntests = 1 + E_fusion = 3.268911e6*MeV_to_Joule + +mass = { + 'deuterium': 2.01410177812*scc.m_u, + 'hydrogen2': 2.01410177812*scc.m_u, + 'tritium': 3.0160492779*scc.m_u, + 'helium3': 3.016029*scc.m_u, + 'helium4': 4.00260325413*scc.m_u, + 'neutron': 1.0013784193052508*scc.m_p +} +m_reduced = np.product([mass[s] for s in reactant_species])/np.sum([mass[s] for s in reactant_species]) + ## Some numerical parameters for this test size_x = 8 size_y = 8 @@ -231,15 +244,28 @@ def cross_section( E_keV ): ## in H.-S. Bosch and G.M. Hale 1992 Nucl. Fusion 32 611 joule_to_keV = 1.e-3/scc.e B_G = scc.pi * scc.alpha * np.sqrt( 2.*m_reduced * scc.c**2 * joule_to_keV ); - A1 = 6.927e4; - A2 = 7.454e8; - A3 = 2.050e6; - A4 = 5.2002e4; - B1 = 6.38e1; - B2 = -9.95e-1; - B3 = 6.981e-5; - B4 = 1.728e-4; - astrophysical_factor = (A1 + E_keV*(A2 + E_keV*(A3 + E_keV*A4))) / (1 + E_keV*(B1 + E_keV*(B2 + E_keV*(B3 + E_keV*B4)))); + if reaction_type == 'DT': + A1 = 6.927e4; + A2 = 7.454e8; + A3 = 2.050e6; + A4 = 5.2002e4; + A5 = 0; + B1 = 6.38e1; + B2 = -9.95e-1; + B3 = 6.981e-5; + B4 = 1.728e-4; + elif reaction_type == 'DD': + A1 = 5.3701e4; + A2 = 3.3027e2; + A3 = -1.2706e-1; + A4 = 2.9327e-5; + A5 = -2.5151e-9; + B1 = 0; + B2 = 0; + B3 = 0; + B4 = 0; + + astrophysical_factor = (A1 + E_keV*(A2 + E_keV*(A3 + E_keV*(A4 + E_keV*A5)))) / (1 + E_keV*(B1 + E_keV*(B2 + E_keV*(B3 + E_keV*B4)))); millibarn_to_barn = 1.e-3; return millibarn_to_barn * astrophysical_factor/E_keV * np.exp(-B_G/np.sqrt(E_keV)) @@ -385,7 +411,6 @@ def main(): field_data_start = ds_start.covering_grid(level=0, left_edge=ds_start.domain_left_edge, dims=ds_start.domain_dimensions) - ntests = 2 for i in range(1, ntests+1): data = {} diff --git a/Examples/Modules/nuclear_fusion/inputs_deuterium_deuterium_3d b/Examples/Modules/nuclear_fusion/inputs_deuterium_deuterium_3d new file mode 100644 index 00000000000..8de397b0b10 --- /dev/null +++ b/Examples/Modules/nuclear_fusion/inputs_deuterium_deuterium_3d @@ -0,0 +1,83 @@ +################################# +####### GENERAL PARAMETERS ###### +################################# +## With these parameters, each cell has a size of exactly 1 by 1 by 1 +max_step = 1 +amr.n_cell = 8 8 16 +amr.max_grid_size = 8 +amr.blocking_factor = 8 +amr.max_level = 0 +geometry.dims = 3 +geometry.prob_lo = 0. 0. 0. +geometry.prob_hi = 8. 8. 16. + +################################# +###### Boundary Condition ####### +################################# +boundary.field_lo = periodic periodic periodic +boundary.field_hi = periodic periodic periodic + +################################# +############ NUMERICS ########### +################################# +warpx.verbose = 1 +warpx.cfl = 1.0 + +# Order of particle shape factors +algo.particle_shape = 1 + +################################# +############ PLASMA ############# +################################# +particles.species_names = deuterium_1 hydrogen2_1 helium3_1 neutron_1 +my_constants.m_deuterium = 2.01410177812*m_u +my_constants.keV_to_J = 1.e3*q_e +my_constants.Energy_step = 22. * keV_to_J + +deuterium_1.species_type = deuterium +deuterium_1.injection_style = "NRandomPerCell" +deuterium_1.num_particles_per_cell = 10000 +deuterium_1.profile = constant +deuterium_1.density = 1. +deuterium_1.momentum_distribution_type = "parse_momentum_function" +deuterium_1.momentum_function_ux(x,y,z) = 0. +deuterium_1.momentum_function_uy(x,y,z) = 0. +deuterium_1.momentum_function_uz(x,y,z) = sqrt(m_deuterium*Energy_step*(floor(z)**2))/(m_deuterium*clight) +deuterium_1.do_not_push = 1 +deuterium_1.do_not_deposit = 1 + +hydrogen2_1.species_type = deuterium +hydrogen2_1.injection_style = "NRandomPerCell" +hydrogen2_1.num_particles_per_cell = 10000 +hydrogen2_1.profile = constant +hydrogen2_1.density = 1. +hydrogen2_1.momentum_distribution_type = "parse_momentum_function" +hydrogen2_1.momentum_function_ux(x,y,z) = 0. +hydrogen2_1.momentum_function_uy(x,y,z) = 0. +hydrogen2_1.momentum_function_uz(x,y,z) = -sqrt(m_deuterium*Energy_step*(floor(z)**2))/(m_deuterium*clight) +hydrogen2_1.do_not_push = 1 +hydrogen2_1.do_not_deposit = 1 + +helium3_1.species_type = helium3 +helium3_1.do_not_push = 1 +helium3_1.do_not_deposit = 1 + +neutron_1.species_type = neutron +neutron_1.do_not_push = 1 +neutron_1.do_not_deposit = 1 + +################################# +############ COLLISION ########## +################################# +collisions.collision_names = DDNHeF1 + +DDNHeF1.species = deuterium_1 hydrogen2_1 +DDNHeF1.product_species = helium3_1 neutron_1 +DDNHeF1.type = nuclearfusion +DDNHeF1.fusion_multiplier = 1.e50 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 1 +diag1.diag_type = Full +diag1.fields_to_plot = rho diff --git a/Regression/Checksum/benchmarks_json/Deuterium_Deuterium_Fusion_3D.json b/Regression/Checksum/benchmarks_json/Deuterium_Deuterium_Fusion_3D.json new file mode 100644 index 00000000000..74ea1b732ac --- /dev/null +++ b/Regression/Checksum/benchmarks_json/Deuterium_Deuterium_Fusion_3D.json @@ -0,0 +1,41 @@ +{ + "deuterium_1": { + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 2.63689606713025e-13, + "particle_position_x": 40960140.72983793, + "particle_position_y": 40959772.69310104, + "particle_position_z": 81919021.52308556, + "particle_weight": 1024.000000000021 + }, + "helium3_1": { + "particle_momentum_x": 8.492383942294212e-16, + "particle_momentum_y": 8.507905928154269e-16, + "particle_momentum_z": 8.500197251270117e-16, + "particle_position_x": 151779.74563236872, + "particle_position_y": 152858.5501479658, + "particle_position_z": 322785.7613257161, + "particle_weight": 2.7065032055210726e-28 + }, + "hydrogen2_1": { + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 2.63689606713025e-13, + "particle_position_x": 40958301.591654316, + "particle_position_y": 40961136.14476712, + "particle_position_z": 81920546.19181262, + "particle_weight": 1024.000000000021 + }, + "lev=0": { + "rho": 0.0 + }, + "neutron_1": { + "particle_momentum_x": 8.492383942294212e-16, + "particle_momentum_y": 8.507905928154269e-16, + "particle_momentum_z": 8.500197251270117e-16, + "particle_position_x": 151779.74563236872, + "particle_position_y": 152858.5501479658, + "particle_position_z": 322785.7613257161, + "particle_weight": 2.7065032055210726e-28 + } +} \ No newline at end of file diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 68bfcc57b6c..9f70b774285 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -2308,7 +2308,23 @@ useOMP = 1 numthreads = 2 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py +analysisRoutine = Examples/Modules/nuclear_fusion/analysis_two_product_fusion.py + +[Deuterium_Deuterium_Fusion_3D] +buildDir = . +inputFile = Examples/Modules/nuclear_fusion/inputs_deuterium_deuterium_3d +runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 +dim = 3 +addToCompileString = +cmakeSetupOpts = -DWarpX_DIMS=3 +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +analysisRoutine = Examples/Modules/nuclear_fusion/analysis_two_product_fusion.py [Deuterium_Tritium_Fusion_RZ] buildDir = . @@ -2324,7 +2340,7 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/nuclear_fusion/analysis_deuterium_tritium_fusion.py +analysisRoutine = Examples/Modules/nuclear_fusion/analysis_two_product_fusion.py [Maxwell_Hybrid_QED_solver] buildDir = . diff --git a/Source/Particles/Collision/BinaryCollision/BinaryCollisionUtils.H b/Source/Particles/Collision/BinaryCollision/BinaryCollisionUtils.H index 2333e1f4102..09213ba0346 100644 --- a/Source/Particles/Collision/BinaryCollision/BinaryCollisionUtils.H +++ b/Source/Particles/Collision/BinaryCollision/BinaryCollisionUtils.H @@ -12,9 +12,20 @@ #include "Particles/MultiParticleContainer.H" -enum struct CollisionType { DeuteriumTritiumFusion, ProtonBoronFusion, Undefined }; - -enum struct NuclearFusionType { DeuteriumTritium, ProtonBoron, Undefined }; +enum struct CollisionType { DeuteriumTritiumToNeutronHeliumFusion, + DeuteriumDeuteriumToProtonTritiumFusion, + DeuteriumDeuteriumToNeutronHeliumFusion, + DeuteriumHeliumToProtonHeliumFusion, + ProtonBoronToAlphasFusion, + Undefined }; + +enum struct NuclearFusionType { + DeuteriumTritiumToNeutronHelium, + DeuteriumDeuteriumToProtonTritium, + DeuteriumDeuteriumToNeutronHelium, + DeuteriumHeliumToProtonHelium, + ProtonBoronToAlphas, + Undefined }; namespace BinaryCollisionUtils{ diff --git a/Source/Particles/Collision/BinaryCollision/BinaryCollisionUtils.cpp b/Source/Particles/Collision/BinaryCollision/BinaryCollisionUtils.cpp index 4bbdb4bc4a1..81acecd3e58 100644 --- a/Source/Particles/Collision/BinaryCollision/BinaryCollisionUtils.cpp +++ b/Source/Particles/Collision/BinaryCollision/BinaryCollisionUtils.cpp @@ -25,20 +25,75 @@ namespace BinaryCollisionUtils{ pp_collision_name.getarr("species", species_names); auto& species1 = mypc->GetParticleContainerFromName(species_names[0]); auto& species2 = mypc->GetParticleContainerFromName(species_names[1]); + amrex::Vector product_species_name; + pp_collision_name.getarr("product_species", product_species_name); if ((species1.AmIA() && species2.AmIA()) || (species1.AmIA() && species2.AmIA()) ) { - return NuclearFusionType::DeuteriumTritium; + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + product_species_name.size() == 2u, + "ERROR: Deuterium-tritium fusion must contain exactly two product species"); + auto& product_species1 = mypc->GetParticleContainerFromName(product_species_name[0]); + auto& product_species2 = mypc->GetParticleContainerFromName(product_species_name[1]); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + (product_species1.AmIA() && product_species2.AmIA()) + || + (product_species1.AmIA() && product_species2.AmIA()), + "ERROR: Product species of deuterium-tritium fusion must be of type neutron and helium4"); + return NuclearFusionType::DeuteriumTritiumToNeutronHelium; + } + else if (species1.AmIA() && species2.AmIA()) + { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + product_species_name.size() == 2u, + "ERROR: Deuterium-deuterium fusion must contain exactly two product species"); + auto& product_species1 = mypc->GetParticleContainerFromName(product_species_name[0]); + auto& product_species2 = mypc->GetParticleContainerFromName(product_species_name[1]); + if ( + (product_species1.AmIA() && product_species2.AmIA()) + ||(product_species1.AmIA() && product_species2.AmIA())){ + return NuclearFusionType::DeuteriumDeuteriumToNeutronHelium; + } else if ( + (product_species1.AmIA() && product_species2.AmIA()) + ||(product_species1.AmIA() && product_species2.AmIA())){ + return NuclearFusionType::DeuteriumDeuteriumToProtonTritium; + } else { + amrex::Abort("ERROR: Product species of proton-boron fusion must be of type helium3 and neutron, or tritium and proton"); + } + } + else if ((species1.AmIA() && species2.AmIA()) + || + (species1.AmIA() && species2.AmIA()) + ) + { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + product_species_name.size() == 2u, + "ERROR: Deuterium-helium fusion must contain exactly two product species"); + auto& product_species1 = mypc->GetParticleContainerFromName(product_species_name[0]); + auto& product_species2 = mypc->GetParticleContainerFromName(product_species_name[1]); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + (product_species1.AmIA() && product_species2.AmIA()) + || + (product_species1.AmIA() && product_species2.AmIA()), + "ERROR: Product species of deuterium-helium fusion must be of type proton and helium4"); + return NuclearFusionType::DeuteriumHeliumToProtonHelium; } else if ((species1.AmIA() && species2.AmIA()) || (species1.AmIA() && species2.AmIA()) ) { - return NuclearFusionType::ProtonBoron; + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + product_species_name.size() == 1, + "ERROR: Proton-boron must contain exactly one product species"); + auto& product_species = mypc->GetParticleContainerFromName(product_species_name[0]); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + product_species.AmIA(), + "ERROR: Product species of proton-boron fusion must be of type alpha"); + return NuclearFusionType::ProtonBoronToAlphas; } amrex::Abort("Binary nuclear fusion not implemented between species " + species_names[0] + " of type " + species1.getSpeciesTypeName() + @@ -63,10 +118,16 @@ namespace BinaryCollisionUtils{ CollisionType nuclear_fusion_type_to_collision_type (const NuclearFusionType fusion_type) { - if (fusion_type == NuclearFusionType::DeuteriumTritium) - return CollisionType::DeuteriumTritiumFusion; - if (fusion_type == NuclearFusionType::ProtonBoron) - return CollisionType::ProtonBoronFusion; + if (fusion_type == NuclearFusionType::DeuteriumTritiumToNeutronHelium) + return CollisionType::DeuteriumTritiumToNeutronHeliumFusion; + if (fusion_type == NuclearFusionType::DeuteriumDeuteriumToProtonTritium) + return CollisionType::DeuteriumDeuteriumToProtonTritiumFusion; + if (fusion_type == NuclearFusionType::DeuteriumDeuteriumToNeutronHelium) + return CollisionType::DeuteriumDeuteriumToNeutronHeliumFusion; + if (fusion_type == NuclearFusionType::DeuteriumHeliumToProtonHelium) + return CollisionType::DeuteriumHeliumToProtonHeliumFusion; + if (fusion_type == NuclearFusionType::ProtonBoronToAlphas) + return CollisionType::ProtonBoronToAlphasFusion; amrex::Abort("Invalid nuclear fusion type"); return CollisionType::Undefined; } diff --git a/Source/Particles/Collision/BinaryCollision/NuclearFusion/BoschHaleFusionCrossSection.H b/Source/Particles/Collision/BinaryCollision/NuclearFusion/BoschHaleFusionCrossSection.H new file mode 100644 index 00000000000..08af65ca647 --- /dev/null +++ b/Source/Particles/Collision/BinaryCollision/NuclearFusion/BoschHaleFusionCrossSection.H @@ -0,0 +1,111 @@ +/* Copyright 2022 Remi Lehe + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef BOSCH_HALE_FUSION_CROSS_SECTION_H +#define BOSCH_HALE_FUSION_CROSS_SECTION_H + +#include "Particles/Collision/BinaryCollision/BinaryCollisionUtils.H" +#include "Utils/WarpXConst.H" + +#include + +#include + +/** + * \brief Computes the fusion cross section, using the analytical fits given in + * H.-S. Bosch and G.M. Hale 1992 Nucl. Fusion 32 611 + * + * @param[in] E_kin_star the kinetic energy of the reactants in their center of mass frame, in SI units. + * @param[in] fusion_type indicates which fusion reaction to calculate the cross-section for + * @return The total cross section in SI units (square meters). + */ +AMREX_GPU_HOST_DEVICE AMREX_INLINE +amrex::ParticleReal BoschHaleFusionCrossSection ( + const amrex::ParticleReal& E_kin_star, + const NuclearFusionType& fusion_type, + const amrex::ParticleReal& m1, + const amrex::ParticleReal& m2 ) +{ + using namespace amrex::literals; + + constexpr amrex::ParticleReal joule_to_keV = 1.e-3_prt/PhysConst::q_e; + const amrex::ParticleReal E_keV = E_kin_star*joule_to_keV; + + // If kinetic energy is 0, return a 0 cross section and avoid later division by 0. + if (E_keV == 0._prt) {return 0._prt;} + + // Compute the Gamow constant B_G (in keV^{1/2}) + // (See Eq. 3 in H.-S. Bosch and G.M. Hale 1992 Nucl. Fusion 32 611) + const amrex::ParticleReal m_reduced = m1 / (1._prt + m1/m2); + // The formula for `B_G` below assumes that both reactants have Z=1 + // When one of the reactants is helium (Z=2), this formula is corrected further below. + amrex::ParticleReal B_G = MathConst::pi * PhysConst::alpha + * std::sqrt( 2._prt*m_reduced*PhysConst::c*PhysConst::c * joule_to_keV ); + if (fusion_type == NuclearFusionType::DeuteriumHeliumToProtonHelium) { + // Take into account the fact that Z=2 for one of the reactant (helium) in this case + B_G *= 2; + } + + // Compute astrophysical_factor + // (See Eq. 9 and Table IV in H.-S. Bosch and G.M. Hale 1992 Nucl. Fusion 32 611) + amrex::ParticleReal A1=0_prt, A2=0_prt, A3=0_prt, A4=0_prt, A5=0_prt, B1=0_prt, B2=0_prt, B3=0_prt, B4=0_prt; + if (fusion_type == NuclearFusionType::DeuteriumTritiumToNeutronHelium) { + A1 = 6.927e4_prt; + A2 = 7.454e8_prt; + A3 = 2.050e6_prt; + A4 = 5.2002e4_prt; + A5 = 0_prt; + B1 = 6.38e1_prt; + B2 = -9.95e-1_prt; + B3 = 6.981e-5_prt; + B4 = 1.728e-4_prt; + } + else if (fusion_type == NuclearFusionType::DeuteriumDeuteriumToProtonTritium) { + A1 = 5.5576e4_prt; + A2 = 2.1054e2_prt; + A3 = -3.2638e-2_prt; + A4 = 1.4987e-6_prt; + A5 = 1.8181e-10_prt; + B1 = 0_prt; + B2 = 0_prt; + B3 = 0_prt; + B4 = 0_prt; + } + else if (fusion_type == NuclearFusionType::DeuteriumDeuteriumToNeutronHelium) { + A1 = 5.3701e4_prt; + A2 = 3.3027e2_prt; + A3 = -1.2706e-1_prt; + A4 = 2.9327e-5_prt; + A5 = -2.5151e-9_prt; + B1 = 0_prt; + B2 = 0_prt; + B3 = 0_prt; + B4 = 0_prt; + } + else if (fusion_type == NuclearFusionType::DeuteriumHeliumToProtonHelium) { + A1 = 5.7501e6_prt; + A2 = 2.5226e3_prt; + A3 = 4.5566e1_prt; + A4 = 0_prt; + A5 = 0_prt; + B1 = -3.1995e-3_prt; + B2 = -8.5530e-6_prt; + B3 = 5.9014e-8_prt; + B4 = 0_prt; + } + + amrex::ParticleReal astrophysical_factor = + (A1 + E_keV*(A2 + E_keV*(A3 + E_keV*(A4 + E_keV*A5)))) / + (1_prt + E_keV*(B1 + E_keV*(B2 + E_keV*(B3 + E_keV*B4)))); + + // Compute cross-section in SI units + // (See Eq. 8 in H.-S. Bosch and G.M. Hale 1992 Nucl. Fusion 32 611) + constexpr amrex::ParticleReal millibarn_to_sqm = 1.e-31_prt; + return millibarn_to_sqm * astrophysical_factor/E_keV * std::exp(-B_G/std::sqrt(E_keV)); +} + +#endif // BOSCH_HALE_FUSION_CROSS_SECTION_H diff --git a/Source/Particles/Collision/BinaryCollision/NuclearFusion/DeuteriumTritiumFusionCrossSection.H b/Source/Particles/Collision/BinaryCollision/NuclearFusion/DeuteriumTritiumFusionCrossSection.H deleted file mode 100644 index 58aaa986b98..00000000000 --- a/Source/Particles/Collision/BinaryCollision/NuclearFusion/DeuteriumTritiumFusionCrossSection.H +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright 2022 Remi Lehe - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ - -#ifndef DEUTERIUM_TRITIUM_FUSION_CROSS_SECTION_H -#define DEUTERIUM_TRITIUM_FUSION_CROSS_SECTION_H - -#include "Utils/WarpXConst.H" - -#include - -#include - -/** - * \brief Computes the total deuterium-tritium fusion cross section, using - * the analytical fits given in H.-S. Bosch and G.M. Hale 1992 Nucl. Fusion 32 611 - * - * @param[in] E_kin_star the kinetic energy of the deuterium-tritium pair in its center of mass frame, in SI units. - * @return The total cross section in SI units (square meters). - */ -AMREX_GPU_HOST_DEVICE AMREX_INLINE -amrex::ParticleReal DeuteriumTritiumFusionCrossSection (const amrex::ParticleReal& E_kin_star) -{ - using namespace amrex::literals; - - constexpr amrex::ParticleReal joule_to_keV = 1.e-3_prt/PhysConst::q_e; - const amrex::ParticleReal E_keV = E_kin_star*joule_to_keV; - - // If kinetic energy is 0, return a 0 cross section and avoid later division by 0. - if (E_keV == 0._prt) {return 0._prt;} - - // Compute the Gamow constant B_G (in keV^{1/2}) - // (See Eq. 3 in H.-S. Bosch and G.M. Hale 1992 Nucl. Fusion 32 611) - constexpr amrex::ParticleReal m_D = 2.01410177812 * PhysConst::m_u; - constexpr amrex::ParticleReal m_T = 3.0160492779 * PhysConst::m_u; - constexpr amrex::ParticleReal m_reduced = m_D / (1._prt + m_D/m_T); - amrex::ParticleReal B_G = MathConst::pi * PhysConst::alpha * 1._prt * 1._prt - * std::sqrt( 2._prt*m_reduced*PhysConst::c*PhysConst::c * joule_to_keV ); - - // Compute astrophysical_factor - // (See Eq. 9 and Table IV in H.-S. Bosch and G.M. Hale 1992 Nucl. Fusion 32 611) - constexpr amrex::ParticleReal A1 = 6.927e4; - constexpr amrex::ParticleReal A2 = 7.454e8; - constexpr amrex::ParticleReal A3 = 2.050e6; - constexpr amrex::ParticleReal A4 = 5.2002e4; - constexpr amrex::ParticleReal B1 = 6.38e1; - constexpr amrex::ParticleReal B2 = -9.95e-1; - constexpr amrex::ParticleReal B3 = 6.981e-5; - constexpr amrex::ParticleReal B4 = 1.728e-4; - - amrex::ParticleReal astrophysical_factor = - (A1 + E_keV*(A2 + E_keV*(A3 + E_keV*A4))) / - (1_prt + E_keV*(B1 + E_keV*(B2 + E_keV*(B3 + E_keV*B4)))); - - // Compute cross-section in SI units - // (See Eq. 8 in H.-S. Bosch and G.M. Hale 1992 Nucl. Fusion 32 611) - constexpr amrex::ParticleReal millibarn_to_sqm = 1.e-31_prt; - return millibarn_to_sqm * astrophysical_factor/E_keV * std::exp(-B_G/std::sqrt(E_keV)); -} - -#endif // DEUTERIUM_TRITIUM_TRITIUM_FUSION_CROSS_SECTION_H diff --git a/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H b/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H index 25624469d71..2dcbe22e9f7 100644 --- a/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H +++ b/Source/Particles/Collision/BinaryCollision/NuclearFusion/NuclearFusionFunc.H @@ -65,33 +65,6 @@ public: m_fusion_type = BinaryCollisionUtils::get_nuclear_fusion_type(collision_name, mypc); amrex::ParmParse pp_collision_name(collision_name); - amrex::Vector product_species_name; - pp_collision_name.getarr("product_species", product_species_name); - - if (m_fusion_type == NuclearFusionType::DeuteriumTritium) - { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - product_species_name.size() == 2u, - "ERROR: Deuterium-tritium fusion must contain exactly two product species"); - auto& product_species1 = mypc->GetParticleContainerFromName(product_species_name[0]); - auto& product_species2 = mypc->GetParticleContainerFromName(product_species_name[1]); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - (product_species1.AmIA() && product_species2.AmIA()) - || - (product_species1.AmIA() && product_species2.AmIA()), - "ERROR: Product species of deuterium-tritium fusion must be of type neutron and helium4"); - } - if (m_fusion_type == NuclearFusionType::ProtonBoron) - { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - product_species_name.size() == 1, - "ERROR: Proton-boron must contain exactly one product species"); - auto& product_species = mypc->GetParticleContainerFromName(product_species_name[0]); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - product_species.AmIA(), - "ERROR: Product species of proton-boron fusion must be of type alpha"); - } - // default fusion multiplier m_fusion_multiplier = 1.0_prt; utils::parser::queryWithParser( diff --git a/Source/Particles/Collision/BinaryCollision/NuclearFusion/SingleNuclearFusionEvent.H b/Source/Particles/Collision/BinaryCollision/NuclearFusion/SingleNuclearFusionEvent.H index 0bab4bef45e..e4704f74665 100644 --- a/Source/Particles/Collision/BinaryCollision/NuclearFusion/SingleNuclearFusionEvent.H +++ b/Source/Particles/Collision/BinaryCollision/NuclearFusion/SingleNuclearFusionEvent.H @@ -8,7 +8,7 @@ #ifndef SINGLE_NUCLEAR_FUSION_EVENT_H_ #define SINGLE_NUCLEAR_FUSION_EVENT_H_ -#include "DeuteriumTritiumFusionCrossSection.H" +#include "BoschHaleFusionCrossSection.H" #include "ProtonBoronFusionCrossSection.H" #include "Particles/Collision/BinaryCollision/BinaryCollisionUtils.H" @@ -112,13 +112,15 @@ void SingleNuclearFusionEvent (const amrex::ParticleReal& u1x, const amrex::Part // Compute fusion cross section as a function of kinetic energy in the center of mass frame auto fusion_cross_section = amrex::ParticleReal(0.); - if (fusion_type == NuclearFusionType::DeuteriumTritium) + if (fusion_type == NuclearFusionType::ProtonBoronToAlphas) { - fusion_cross_section = DeuteriumTritiumFusionCrossSection(E_kin_star); + fusion_cross_section = ProtonBoronFusionCrossSection(E_kin_star); } - else if (fusion_type == NuclearFusionType::ProtonBoron) + else if ((fusion_type == NuclearFusionType::DeuteriumTritiumToNeutronHelium) + || (fusion_type == NuclearFusionType::DeuteriumDeuteriumToProtonTritium) + || (fusion_type == NuclearFusionType::DeuteriumDeuteriumToNeutronHelium)) { - fusion_cross_section = ProtonBoronFusionCrossSection(E_kin_star); + fusion_cross_section = BoschHaleFusionCrossSection(E_kin_star, fusion_type, m1, m2); } // Square of the norm of the momentum of one of the particles in the center of mass frame diff --git a/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.H b/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.H index be77ae9e075..085adcc8860 100644 --- a/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.H +++ b/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.H @@ -216,7 +216,7 @@ public: // Initialize the product particles' momentum, using a function depending on the // specific collision type - if (t_collision_type == CollisionType::ProtonBoronFusion) + if (t_collision_type == CollisionType::ProtonBoronToAlphasFusion) { const index_type product_start_index = products_np_data[0] + 2*p_offsets[i]* p_num_products_device[0]; @@ -224,11 +224,19 @@ public: p_pair_indices_1[i], p_pair_indices_2[i], product_start_index, m1, m2, engine); } - else if (t_collision_type == CollisionType::DeuteriumTritiumFusion) + else if ((t_collision_type == CollisionType::DeuteriumTritiumToNeutronHeliumFusion) + || (t_collision_type == CollisionType::DeuteriumDeuteriumToProtonTritiumFusion) + || (t_collision_type == CollisionType::DeuteriumDeuteriumToNeutronHeliumFusion)) { amrex::ParticleReal fusion_energy = 0.0_prt; - if (t_collision_type == CollisionType::DeuteriumTritiumFusion) { - fusion_energy = 17.5893e6_prt * PhysConst::q_e; // 17.6 MeV + if (t_collision_type == CollisionType::DeuteriumTritiumToNeutronHeliumFusion) { + fusion_energy = 17.5893e6_prt * PhysConst::q_e; + } + else if (t_collision_type == CollisionType::DeuteriumDeuteriumToProtonTritiumFusion) { + fusion_energy = 4.032667e6_prt * PhysConst::q_e; + } + else if (t_collision_type == CollisionType::DeuteriumDeuteriumToNeutronHeliumFusion) { + fusion_energy = 3.268911e6_prt * PhysConst::q_e; } TwoProductFusionInitializeMomentum(soa_1, soa_2, soa_products_data[0], soa_products_data[1], diff --git a/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.cpp b/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.cpp index bab92e48610..3b195110762 100644 --- a/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.cpp +++ b/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.cpp @@ -23,7 +23,7 @@ ParticleCreationFunc::ParticleCreationFunc (const std::string collision_name, m_collision_type = BinaryCollisionUtils::get_collision_type(collision_name, mypc); - if (m_collision_type == CollisionType::ProtonBoronFusion) + if (m_collision_type == CollisionType::ProtonBoronToAlphasFusion) { // Proton-Boron fusion only produces alpha particles m_num_product_species = 1; @@ -34,7 +34,9 @@ ParticleCreationFunc::ParticleCreationFunc (const std::string collision_name, m_num_products_device.push_back(3); #endif } - else if (m_collision_type == CollisionType::DeuteriumTritiumFusion) + else if ((m_collision_type == CollisionType::DeuteriumTritiumToNeutronHeliumFusion) + || (m_collision_type == CollisionType::DeuteriumDeuteriumToProtonTritiumFusion) + || (m_collision_type == CollisionType::DeuteriumDeuteriumToNeutronHeliumFusion)) { m_num_product_species = 2; m_num_products_host.push_back(1); From 663e7f6eebea9f2ff43cea0cc82f76e2c2b0f035 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 31 Oct 2022 17:03:56 -0700 Subject: [PATCH 0133/1346] Perlmutter (NERSC): Update Modules & Jobscript (#3493) * Perlmutter (NERSC): Update Modules Update after major system upgrade. * Perlmutter (NERSC): Update Jobscript * Perlmutter (NERSC): Remove Early Access Warning * Perlmutter (NERSC): Process Affinity Co-authored-by: Kevin Gott Co-authored-by: Kevin Gott --- Docs/source/install/hpc/perlmutter.rst | 5 ----- .../machines/perlmutter-nersc/perlmutter.sbatch | 16 +++++++++++----- .../perlmutter_warpx.profile.example | 11 +++-------- 3 files changed, 14 insertions(+), 18 deletions(-) diff --git a/Docs/source/install/hpc/perlmutter.rst b/Docs/source/install/hpc/perlmutter.rst index 6b0c67b6a44..9ae09656ff4 100644 --- a/Docs/source/install/hpc/perlmutter.rst +++ b/Docs/source/install/hpc/perlmutter.rst @@ -3,11 +3,6 @@ Perlmutter (NERSC) ================== -.. warning:: - - Perlmutter is still in acceptance testing and environments change often. - Please reach visit this page often for updates and reach out to us if something needs an update. - The `Perlmutter cluster `_ is located at NERSC. diff --git a/Tools/machines/perlmutter-nersc/perlmutter.sbatch b/Tools/machines/perlmutter-nersc/perlmutter.sbatch index 489ca6ccc81..95ccffd4591 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter.sbatch +++ b/Tools/machines/perlmutter-nersc/perlmutter.sbatch @@ -1,6 +1,6 @@ #!/bin/bash -l -# Copyright 2021 Axel Huebl, Kevin Gott +# Copyright 2021-2022 Axel Huebl, Kevin Gott # # This file is part of WarpX. # @@ -13,7 +13,7 @@ #SBATCH -A #SBATCH -q regular #SBATCH -C gpu -#SBATCH -c 32 +#SBATCH --exclusive #SBATCH --ntasks-per-gpu=1 #SBATCH --gpus-per-node=4 #SBATCH -o WarpX.o%j @@ -21,14 +21,20 @@ # GPU-aware MPI export MPICH_GPU_SUPPORT_ENABLED=1 +export MPICH_OFI_NIC_POLICY=GPU -# expose one GPU per MPI rank -export CUDA_VISIBLE_DEVICES=$SLURM_LOCALID +# threads for OpenMP and threaded compressors per MPI rank +export SRUN_CPUS_PER_TASK=32 EXE=./warpx #EXE=../WarpX/build/bin/warpx.3d.MPI.CUDA.DP.OPMD.QED #EXE=./main3d.gnu.TPROF.MPI.CUDA.ex INPUTS=inputs_small -srun ${EXE} ${INPUTS} \ +# CUDA visible devices are ordered inverse to local task IDs +srun /bin/bash -l -c " \ + export CUDA_VISIBLE_DEVICES=$((3-SLURM_LOCALID)); + ${EXE} ${INPUTS} \ + amrex.the_arena_is_managed=0 \ + amrex.use_gpu_aware_mpi=1" \ > output.txt diff --git a/Tools/machines/perlmutter-nersc/perlmutter_warpx.profile.example b/Tools/machines/perlmutter-nersc/perlmutter_warpx.profile.example index d6a1e15eb75..79d73ca35b6 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_warpx.profile.example +++ b/Tools/machines/perlmutter-nersc/perlmutter_warpx.profile.example @@ -1,19 +1,14 @@ # please set your project account -#export proj= # LBNL/AMP: m3906_g +#export proj="_g" # change me # required dependencies module load cmake/3.22.0 -module load PrgEnv-gnu -module load cudatoolkit - -# optional: just an additional text editor -# module load nano # TODO: request from support # optional: for QED support with detailed tables module load boost/1.78.0-gnu # optional: for openPMD and PSATD+RZ support -module load cray-hdf5-parallel/1.12.1.1 +module load cray-hdf5-parallel/1.12.1.5 export CMAKE_PREFIX_PATH=$HOME/sw/perlmutter/c-blosc-1.21.1:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=$HOME/sw/perlmutter/adios2-2.7.1:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=$HOME/sw/perlmutter/blaspp-master:$CMAKE_PREFIX_PATH @@ -25,7 +20,7 @@ export LD_LIBRARY_PATH=$HOME/sw/perlmutter/blaspp-master/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=$HOME/sw/perlmutter/lapackpp-master/lib64:$LD_LIBRARY_PATH # optional: for Python bindings or libEnsemble -module load cray-python/3.9.7.1 +module load cray-python/3.9.12.1 if [ -d "$HOME/sw/perlmutter/venvs/warpx" ] then From 58d42d4d29070f160d109c8b116b0f7559417328 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 2 Nov 2022 08:19:29 -0500 Subject: [PATCH 0134/1346] CUDA CI: Ubuntu 20.04+ (#3496) Migrate CUDA CI to use Ubuntu 18.04 -> 20.04 --- .github/workflows/cuda.yml | 12 ++-- .../dependencies/{nvcc11.sh => nvcc11-0.sh} | 8 ++- .github/workflows/dependencies/nvcc11-8.sh | 61 +++++++++++++++++++ 3 files changed, 72 insertions(+), 9 deletions(-) rename .github/workflows/dependencies/{nvcc11.sh => nvcc11-0.sh} (85%) create mode 100755 .github/workflows/dependencies/nvcc11-8.sh diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index b3554875332..e2700cb0f90 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -12,8 +12,8 @@ jobs: # https://github.com/ComputationalRadiationPhysics/picongpu/blob/0.5.0/share/picongpu/dockerfiles/ubuntu-1604/Dockerfile # https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/ build_nvcc: - name: NVCC 11.0.2 SP - runs-on: ubuntu-18.04 + name: NVCC 11.0.3 SP + runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false env: CXXFLAGS: "-Werror" @@ -30,7 +30,7 @@ jobs: python-version: '3.x' - name: install dependencies run: | - .github/workflows/dependencies/nvcc11.sh + .github/workflows/dependencies/nvcc11-0.sh - name: CCache Cache uses: actions/cache@v2 # - once stored under a key, they become immutable (even if local cache path content changes) @@ -83,14 +83,14 @@ jobs: # make sure legacy build system continues to build, i.e., that we don't forget # to add new .cpp files build_nvcc_gnumake: - name: NVCC 11.0.2 GNUmake - runs-on: ubuntu-18.04 + name: NVCC 11.8.0 GNUmake + runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: - uses: actions/checkout@v2 - name: install dependencies run: | - .github/workflows/dependencies/nvcc11.sh + .github/workflows/dependencies/nvcc11-8.sh - name: CCache Cache uses: actions/cache@v2 # - once stored under a key, they become immutable (even if local cache path content changes) diff --git a/.github/workflows/dependencies/nvcc11.sh b/.github/workflows/dependencies/nvcc11-0.sh similarity index 85% rename from .github/workflows/dependencies/nvcc11.sh rename to .github/workflows/dependencies/nvcc11-0.sh index 7f52c0ce000..2f840e41664 100755 --- a/.github/workflows/dependencies/nvcc11.sh +++ b/.github/workflows/dependencies/nvcc11-0.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # -# Copyright 2020 The WarpX Community +# Copyright 2020-2022 The WarpX Community # # License: BSD-3-Clause-LBNL # Authors: Axel Huebl @@ -21,8 +21,10 @@ sudo apt-get install -y \ pkg-config \ wget -sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub -echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64 /" \ +wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin +sudo mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600 +sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub +echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/ /" \ | sudo tee /etc/apt/sources.list.d/cuda.list sudo apt-get update diff --git a/.github/workflows/dependencies/nvcc11-8.sh b/.github/workflows/dependencies/nvcc11-8.sh new file mode 100755 index 00000000000..fbf1ee6ba62 --- /dev/null +++ b/.github/workflows/dependencies/nvcc11-8.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +# +# Copyright 2020-2022 The WarpX Community +# +# License: BSD-3-Clause-LBNL +# Authors: Axel Huebl + +set -eu -o pipefail + +sudo apt-get -qqq update +sudo apt-get install -y \ + build-essential \ + ca-certificates \ + cmake \ + gnupg \ + libhiredis-dev \ + libopenmpi-dev \ + libzstd-dev \ + ninja-build \ + openmpi-bin \ + pkg-config \ + wget + +wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-keyring_1.0-1_all.deb +sudo dpkg -i cuda-keyring_1.0-1_all.deb + +sudo apt-get update +sudo apt-get install -y \ + cuda-command-line-tools-11-8 \ + cuda-compiler-11-8 \ + cuda-cupti-dev-11-8 \ + cuda-minimal-build-11-8 \ + cuda-nvml-dev-11-8 \ + cuda-nvtx-11-8 \ + libcufft-dev-11-8 \ + libcurand-dev-11-8 +sudo ln -s cuda-11.8 /usr/local/cuda + +# if we run out of temporary storage in CI: +#du -sh /usr/local/cuda-11.8 +#echo "+++ REDUCING CUDA Toolkit install size +++" +#sudo rm -rf /usr/local/cuda-11.8/targets/x86_64-linux/lib/libcu{fft,pti,rand}_static.a +#sudo rm -rf /usr/local/cuda-11.8/targets/x86_64-linux/lib/libnvperf_host_static.a +#du -sh /usr/local/cuda-11.8/ +#df -h + +# cmake-easyinstall +# +sudo curl -L -o /usr/local/bin/cmake-easyinstall https://git.io/JvLxY +sudo chmod a+x /usr/local/bin/cmake-easyinstall +export CEI_SUDO="sudo" +export CEI_TMP="/tmp/cei" + +# ccache 4.2+ +# +CXXFLAGS="" cmake-easyinstall --prefix=/usr/local \ + git+https://github.com/ccache/ccache.git@v4.6 \ + -DCMAKE_BUILD_TYPE=Release \ + -DENABLE_DOCUMENTATION=OFF \ + -DENABLE_TESTING=OFF \ + -DWARNINGS_AS_ERRORS=OFF From 3eb6352c98c651f3330aafe7c6574d8ccb773f5a Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 2 Nov 2022 13:37:20 -0500 Subject: [PATCH 0135/1346] Release 22.11 (#3499) * AMReX: 22.11 * PICSAR: 22.11 * WarpX: 22.11 --- .github/workflows/cuda.yml | 2 +- CMakeLists.txt | 2 +- Docs/source/conf.py | 4 ++-- LICENSE.txt | 2 +- Python/setup.py | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 4 ++-- cmake/dependencies/PICSAR.cmake | 2 +- run_test.sh | 2 +- setup.py | 2 +- 11 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index e2700cb0f90..87b4352cd77 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 735c3513153f1d06f783e64f455816be85fb3602 && cd - + cd amrex && git checkout --detach 22.11 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/CMakeLists.txt b/CMakeLists.txt index 8f3ae506e49..209e4d118b5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ # Preamble #################################################################### # cmake_minimum_required(VERSION 3.20.0) -project(WarpX VERSION 22.10) +project(WarpX VERSION 22.11) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) diff --git a/Docs/source/conf.py b/Docs/source/conf.py index ac933290595..a26c8f4db46 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -73,9 +73,9 @@ # built documents. # # The short X.Y version. -version = u'22.10' +version = u'22.11' # The full version, including alpha/beta/rc tags. -release = u'22.10' +release = u'22.11' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/LICENSE.txt b/LICENSE.txt index 3ccd4f4875a..16647b93187 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -WarpX v22.10 Copyright (c) 2018-2022, The Regents of the University of California, through Lawrence Berkeley National Laboratory, and Lawrence Livermore National Security, LLC, for the operation of Lawrence Livermore National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. +WarpX v22.11 Copyright (c) 2018-2022, The Regents of the University of California, through Lawrence Berkeley National Laboratory, and Lawrence Livermore National Security, LLC, for the operation of Lawrence Livermore National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/Python/setup.py b/Python/setup.py index 83abf888e5c..03ddd684459 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -54,7 +54,7 @@ package_data = {} setup(name = 'pywarpx', - version = '22.10', + version = '22.11', packages = ['pywarpx'], package_dir = {'pywarpx': 'pywarpx'}, description = """Wrapper of WarpX""", diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 1de1c6b07aa..9866cf685c3 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 735c3513153f1d06f783e64f455816be85fb3602 +branch = 22.11 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 9f70b774285..2a39fb2169a 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 735c3513153f1d06f783e64f455816be85fb3602 +branch = 22.11 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 8b794d61460..82f251318cd 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -221,7 +221,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 22.10 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_DIM} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} TINYP LSOLVERS) + find_package(AMReX 22.11 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_DIM} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} TINYP LSOLVERS) message(STATUS "AMReX: Found version '${AMReX_VERSION}'") endif() endmacro() @@ -235,7 +235,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "735c3513153f1d06f783e64f455816be85fb3602" +set(WarpX_amrex_branch "22.11" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/PICSAR.cmake b/cmake/dependencies/PICSAR.cmake index 3ecee014139..600f5c128cb 100644 --- a/cmake/dependencies/PICSAR.cmake +++ b/cmake/dependencies/PICSAR.cmake @@ -82,7 +82,7 @@ function(find_picsar) #message(STATUS "PICSAR: Using version '${PICSAR_VERSION}'") else() # not supported by PICSAR (yet) - #find_package(PICSAR 22.10 CONFIG REQUIRED QED) + #find_package(PICSAR 22.11 CONFIG REQUIRED QED) #message(STATUS "PICSAR: Found version '${PICSAR_VERSION}'") message(FATAL_ERROR "PICSAR: Cannot be used as externally installed " "library yet. " diff --git a/run_test.sh b/run_test.sh index 2f2ab444877..4bf8b30d714 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 735c3513153f1d06f783e64f455816be85fb3602 && cd - +cd amrex && git checkout --detach 22.11 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git diff --git a/setup.py b/setup.py index 1dec15a38db..54959d9eda5 100644 --- a/setup.py +++ b/setup.py @@ -272,7 +272,7 @@ def build_extension(self, ext): setup( name='pywarpx', # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version = '22.10', + version = '22.11', packages = ['pywarpx'], package_dir = {'pywarpx': 'Python/pywarpx'}, author='Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.', From 9bc04e1a5ff082fb9f604adef5b3c30b4c76ec8d Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Wed, 2 Nov 2022 14:15:59 -0700 Subject: [PATCH 0136/1346] Allow arbitrary laser antenna normal in picmi (#3477) * allow arbitrary laser antenna normal - picmi * always check that laser propagation direction is along the normal direction of the antenna if the antenna normal is given --- Python/pywarpx/picmi.py | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 1c6ad317d50..c112f01576c 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1054,13 +1054,32 @@ def initialize_inputs(self): expression = pywarpx.my_constants.mangle_expression(self.field_expression, self.mangle_dict) self.laser.__setattr__('field_function(X,Y,t)', expression) + class LaserAntenna(picmistandard.PICMI_LaserAntenna): def initialize_inputs(self, laser): laser.laser.position = self.position # This point is on the laser plane - laser.laser.direction = self.normal_vector # The plane normal direction + if ( + self.normal_vector is not None + and not np.allclose(laser.laser.direction, self.normal_vector) + ): + raise AttributeError( + 'The specified laser direction does not match the ' + 'specified antenna normal.' + ) + self.normal_vector = laser.laser.direction # The plane normal direction if isinstance(laser, GaussianLaser): - laser.laser.profile_focal_distance = laser.focal_position[2] - self.position[2] # Focal distance from the antenna (in meters) - laser.laser.profile_t_peak = (self.position[2] - laser.centroid_position[2])/constants.c # The time at which the laser reaches its peak (in seconds) + # Focal distance from the antenna (in meters) + laser.laser.profile_focal_distance = np.sqrt( + (laser.focal_position[0] - self.position[0])**2 + + (laser.focal_position[1] - self.position[1])**2 + + (laser.focal_position[2] - self.position[2])**2 + ) + # The time at which the laser reaches its peak (in seconds) + laser.laser.profile_t_peak = np.sqrt( + (self.position[0] - laser.centroid_position[0])**2 + + (self.position[1] - laser.centroid_position[1])**2 + + (self.position[2] - laser.centroid_position[2])**2 + ) / constants.c class ConstantAppliedField(picmistandard.PICMI_ConstantAppliedField): From 51041aad7f92973ee2ca43f725c092786e9a66c3 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 2 Nov 2022 16:50:32 -0500 Subject: [PATCH 0137/1346] Perlmutter (NERSC): Finish Affinity Control (#3495) Slurm at Perlmutter is not yet configured to do proper affinity control itself. Thus, we do it ourselves. Co-authored-by: Kevin Gott --- .../perlmutter-nersc/perlmutter.sbatch | 29 ++++++++++--------- .../perlmutter_warpx.profile.example | 3 -- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/Tools/machines/perlmutter-nersc/perlmutter.sbatch b/Tools/machines/perlmutter-nersc/perlmutter.sbatch index 95ccffd4591..1495d8b9f74 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter.sbatch +++ b/Tools/machines/perlmutter-nersc/perlmutter.sbatch @@ -6,35 +6,36 @@ # # License: BSD-3-Clause-LBNL -#SBATCH -t 01:00:00 -#SBATCH -N 4 +#SBATCH -t 00:10:00 +#SBATCH -N 2 #SBATCH -J WarpX # note: must end on _g #SBATCH -A #SBATCH -q regular #SBATCH -C gpu #SBATCH --exclusive -#SBATCH --ntasks-per-gpu=1 +#SBATCH --gpu-bind=none #SBATCH --gpus-per-node=4 #SBATCH -o WarpX.o%j #SBATCH -e WarpX.e%j -# GPU-aware MPI -export MPICH_GPU_SUPPORT_ENABLED=1 +# executable & inputs file or python interpreter & PICMI script here +EXE=./warpx +INPUTS=inputs_small + +# pin to closest NIC to GPU export MPICH_OFI_NIC_POLICY=GPU # threads for OpenMP and threaded compressors per MPI rank export SRUN_CPUS_PER_TASK=32 -EXE=./warpx -#EXE=../WarpX/build/bin/warpx.3d.MPI.CUDA.DP.OPMD.QED -#EXE=./main3d.gnu.TPROF.MPI.CUDA.ex -INPUTS=inputs_small +# depends on https://github.com/ECP-WarpX/WarpX/issues/2009 +#GPU_AWARE_MPI="amrex.the_arena_is_managed=1 amrex.use_gpu_aware_mpi=1" +GPU_AWARE_MPI="" # CUDA visible devices are ordered inverse to local task IDs -srun /bin/bash -l -c " \ - export CUDA_VISIBLE_DEVICES=$((3-SLURM_LOCALID)); - ${EXE} ${INPUTS} \ - amrex.the_arena_is_managed=0 \ - amrex.use_gpu_aware_mpi=1" \ +# Reference: nvidia-smi topo -m +srun --cpu-bind=cores bash -c " + export CUDA_VISIBLE_DEVICES=\$((3-SLURM_LOCALID)); + ${EXE} ${INPUTS} ${GPU_AWARE_MPI}" \ > output.txt diff --git a/Tools/machines/perlmutter-nersc/perlmutter_warpx.profile.example b/Tools/machines/perlmutter-nersc/perlmutter_warpx.profile.example index 79d73ca35b6..88d7ccff9bd 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_warpx.profile.example +++ b/Tools/machines/perlmutter-nersc/perlmutter_warpx.profile.example @@ -34,9 +34,6 @@ alias getNode="salloc -N 1 --ntasks-per-node=4 -t 1:00:00 -q interactive -C gpu # usage: runNode alias runNode="srun -N 1 --ntasks-per-node=4 -t 0:30:00 -q interactive -C gpu --gpu-bind=single:1 -c 32 -G 4 -A $proj" -# GPU-aware MPI -export MPICH_GPU_SUPPORT_ENABLED=1 - # necessary to use CUDA-Aware MPI and run a job export CRAY_ACCEL_TARGET=nvidia80 From 4f780ff49b32ddd063ad6235c6e92fa12459fb00 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 2 Nov 2022 18:49:13 -0500 Subject: [PATCH 0138/1346] Summit (OLCF): Jupyter GPFS HDF5 Issues (#3498) Document how to work-around Jupyter HDF5 issues on OLCF until they fix it. --- Docs/source/install/hpc/summit.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Docs/source/install/hpc/summit.rst b/Docs/source/install/hpc/summit.rst index 2a22ab2e79b..b529111df78 100644 --- a/Docs/source/install/hpc/summit.rst +++ b/Docs/source/install/hpc/summit.rst @@ -258,6 +258,12 @@ Known System Issues import os os.environ['HDF5_USE_FILE_LOCKING'] = "FALSE" +.. warning:: + + Sep 20th, 2022 (OLCFHELP-8992): + The above **HDF5 Jupyter read** work-around for OLCFHELP-3685 does not work anymore, due to the way that GPFS is mounted via NSF on Jupyter nodes. + As a work-around until this is fixed, please copy your HDF5 data to ``/ccs``, ``$HOME`` or use ADIOS2 BP instead of HDF5 files. + .. warning:: Aug 27th, 2021 (OLCFHELP-3442): From 868684bf59a949c0f6795e56d65040c6cd9e7282 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 3 Nov 2022 17:58:45 -0500 Subject: [PATCH 0139/1346] Docs: Dependencies w/ Ascent & SENSEI (#3502) Add Ascent and SENSEI minimal versions to our dependency doc page. --- Docs/source/install/dependencies.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index 7a8aa0bb55c..e9a5ff2f090 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -25,6 +25,8 @@ Optional dependencies include: - `openPMD-api 0.14.2+ `__: we automatically download and compile a copy of openPMD-api for openPMD I/O support - see `optional I/O backends `__ +- `Ascent 0.8.0+ `__: for in situ 3D visualization +- `SENSEI 4.0.0+ `__: for in situ analysis and visualization - `CCache `__: to speed up rebuilds (For CUDA support, needs version 3.7.9+ and 4.2+ is recommended) - `Ninja `__: for faster parallel compiles - `Python 3.7+ `__ From 1baa62e23097434271e13cd82ba56d5b2bd8ea60 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 4 Nov 2022 12:56:06 -0500 Subject: [PATCH 0140/1346] CMake: AMReX_INTALL Update (Shared) (#3503) Reflect new project precedence option `AMReX_BUILD_SHARED_LIBS` in superbuild install logic defaults for AMReX. --- cmake/dependencies/AMReX.cmake | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 82f251318cd..dc8033df6a2 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -69,7 +69,11 @@ macro(find_amrex) set(AMReX_SENSEI ON CACHE INTERNAL "") endif() - set(AMReX_INSTALL ${BUILD_SHARED_LIBS} CACHE INTERNAL "") + if(DEFINED AMReX_BUILD_SHARED_LIBS) + set(AMReX_INSTALL ${AMReX_BUILD_SHARED_LIBS} CACHE INTERNAL "") + else() + set(AMReX_INSTALL ${BUILD_SHARED_LIBS} CACHE INTERNAL "") + endif() set(AMReX_AMRLEVEL OFF CACHE INTERNAL "") set(AMReX_ENABLE_TESTS OFF CACHE INTERNAL "") set(AMReX_FORTRAN OFF CACHE INTERNAL "") From 5dbcfa4b0018f14a27b7d5831c8d69ced0040d6d Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 4 Nov 2022 11:30:46 -0700 Subject: [PATCH 0141/1346] Add SymPy Notebook to Derive PSATD Equations (#3456) * Add SymPy Notebook to Derive PSATD Equations * Add Comments * Remove Redundant Simplification, Reduce Cost * Clear Cell Output * Don't Need IPython.display.Math * Remove Unused Function * Improve Inline Comments * Improve Markdown Cells * Add Automated Verification of ODEs Solution * Fix Bug in Automated Verification * Diagonalize only M for W1,W2,W3 --- Tools/Algorithms/psatd.ipynb | 721 +++++++++++++++++++++++++++++++++++ 1 file changed, 721 insertions(+) create mode 100644 Tools/Algorithms/psatd.ipynb diff --git a/Tools/Algorithms/psatd.ipynb b/Tools/Algorithms/psatd.ipynb new file mode 100644 index 00000000000..c2f326e4110 --- /dev/null +++ b/Tools/Algorithms/psatd.ipynb @@ -0,0 +1,721 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "import sympy as sp\n", + "from sympy import *\n", + "\n", + "sp.init_session()\n", + "sp.init_printing()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Selection of algorithm:\n", + "- `divE_cleaning` (bool: `True`, `False`): \"$\\text{div}(\\boldsymbol{E})$ cleaning\" using the additional scalar field $F$;\n", + "\n", + "- `divB_cleaning` (bool: `True`, `False`): \"$\\text{div}(\\boldsymbol{B})$ cleaning\" using the additional scalar field $G$;\n", + "\n", + "- `J_in_time` (str: `'constant'`, `'linear'`, `'quadratic'`): $\\boldsymbol{J}$ constant, linear, or quadratic in time;\n", + "\n", + "- `rho_in_time` (str: `'constant'`, `'linear'`, `'quadratic'`): $\\rho$ constant, linear, or quadratic in time.\n", + "\n", + "In the notebook, the constant, linear, and quadratic coefficients of $\\boldsymbol{J}$ and $\\rho$ are denoted with the suffixes `_c0`, `_c1`, `_c2`, respectively. However, the corresponding coefficients in the displayed equations are denoted with the prefixes $\\gamma$, $\\beta$, and $\\alpha$, respectively. For example, if $\\rho$ is quadratic in time, it will be denoted as `rho = rho_c0 + rho_c1*(t-tn) + rho_c2*(t-tn)**2` in the notebook and $\\rho(t) = \\gamma_{\\rho} + \\beta_{\\rho} (t-t_n) + \\alpha_{\\rho} (t-t_n)^2$ in the displayed equations." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "divE_cleaning = True\n", + "divB_cleaning = True\n", + "J_in_time = 'constant'\n", + "rho_in_time = 'constant'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Auxiliary functions:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def check_diag(W, D, P, invP):\n", + " \"\"\"\n", + " Check diagonalization of W as P*D*P**(-1).\n", + " \"\"\"\n", + " Wd = P * D * invP\n", + " for i in range(Wd.shape[0]):\n", + " for j in range(Wd.shape[1]):\n", + " Wd[i,j] = Wd[i,j].expand().simplify()\n", + " diff = W[i,j] - Wd[i,j]\n", + " diff = diff.expand().simplify()\n", + " assert (diff == 0), f'Diagonalization failed: W[{i},{j}] - Wd[{i},{j}] = {diff} is not zero'\n", + "\n", + "def simple_mat(W):\n", + " \"\"\"\n", + " Simplify matrix W.\n", + " \"\"\"\n", + " for i in range(W.shape[0]):\n", + " for j in range(W.shape[1]):\n", + " W[i,j] = W[i,j].expand().simplify()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Definition of symbols:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Define dimensionality parameter used throughout the notebook\n", + "dim = 6\n", + "if divE_cleaning:\n", + " dim += 1\n", + "if divB_cleaning:\n", + " dim += 1\n", + "\n", + "# Define symbols for physical constants\n", + "c = sp.symbols(r'c', real=True, positive=True)\n", + "mu0 = sp.symbols(r'\\mu_0', real=True, positive=True)\n", + "\n", + "# Define symbols for time variables\n", + "# (s is auxiliary variable used in integral over time)\n", + "s = sp.symbols(r's', real=True, positive=True)\n", + "t = sp.symbols(r't', real=True, positive=True)\n", + "tn = sp.symbols(r't_n', real=True, positive=True)\n", + "dt = sp.symbols(r'\\Delta{t}', real=True, positive=True)\n", + "\n", + "# The assumption that kx, ky and kz are positive is general enough\n", + "# and makes it easier for SymPy to perform some of the calculations\n", + "kx = sp.symbols(r'k_x', real=True, positive=True)\n", + "ky = sp.symbols(r'k_y', real=True, positive=True)\n", + "kz = sp.symbols(r'k_z', real=True, positive=True)\n", + "\n", + "# Define symbols for the Cartesian components of the electric field\n", + "Ex = sp.symbols(r'E^x')\n", + "Ey = sp.symbols(r'E^y')\n", + "Ez = sp.symbols(r'E^z')\n", + "E = Matrix([[Ex], [Ey], [Ez]])\n", + "\n", + "# Define symbols for the Cartesian components of the magnetic field\n", + "Bx = sp.symbols(r'B^x')\n", + "By = sp.symbols(r'B^y')\n", + "Bz = sp.symbols(r'B^z')\n", + "B = Matrix([[Bx], [By], [Bz]])\n", + "\n", + "# Define symbol for the scalar field F used with div(E) cleaning\n", + "if divE_cleaning:\n", + " F = sp.symbols(r'F')\n", + "\n", + "# Define symbol for the scalar field G used with div(B) cleaning\n", + "if divB_cleaning:\n", + " G = sp.symbols(r'G')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### First-order ODEs for $\\boldsymbol{E}$, $\\boldsymbol{B}$, $F$ and $G$:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Define first-order time derivatives of the electric field\n", + "dEx_dt = I*c**2*(ky*Bz-kz*By) \n", + "dEy_dt = I*c**2*(kz*Bx-kx*Bz)\n", + "dEz_dt = I*c**2*(kx*By-ky*Bx)\n", + "\n", + "# Define first-order time derivatives of the magnetic field\n", + "dBx_dt = -I*(ky*Ez-kz*Ey)\n", + "dBy_dt = -I*(kz*Ex-kx*Ez)\n", + "dBz_dt = -I*(kx*Ey-ky*Ex)\n", + "\n", + "# Define first-order time derivative of the scalar field F used with div(E) cleaning,\n", + "# and related additional terms in the first-order time derivative of the electric field\n", + "if divE_cleaning:\n", + " dEx_dt += I*c**2*F*kx \n", + " dEy_dt += I*c**2*F*ky\n", + " dEz_dt += I*c**2*F*kz\n", + " dF_dt = I*(kx*Ex+ky*Ey+kz*Ez)\n", + "\n", + "# Define first-order time derivative of the scalar field G used with div(B) cleaning,\n", + "# and related additional terms in the first-order time derivative of the magnetic field\n", + "if divB_cleaning:\n", + " dBx_dt += I*c**2*G*kx\n", + " dBy_dt += I*c**2*G*ky\n", + " dBz_dt += I*c**2*G*kz\n", + " dG_dt = I*(kx*Bx+ky*By+kz*Bz)\n", + "\n", + "# Define array of first-order time derivatives of the electric and magnetic fields\n", + "dE_dt = Matrix([[dEx_dt], [dEy_dt], [dEz_dt]])\n", + "dB_dt = Matrix([[dBx_dt], [dBy_dt], [dBz_dt]])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Linear system of ODEs for $\\boldsymbol{E}$, $\\boldsymbol{B}$, $F$ and $G$:\n", + "$$\n", + "\\frac{\\partial}{\\partial t}\n", + "\\begin{bmatrix}\n", + "\\boldsymbol{E} \\\\\n", + "\\boldsymbol{B} \\\\\n", + "F \\\\\n", + "G\n", + "\\end{bmatrix}\n", + "= M\n", + "\\begin{bmatrix}\n", + "\\boldsymbol{E} \\\\\n", + "\\boldsymbol{B} \\\\\n", + "F \\\\\n", + "G\n", + "\\end{bmatrix}\n", + "-\\mu_0 c^2 \n", + "\\begin{bmatrix}\n", + "\\boldsymbol{J} \\\\\n", + "\\boldsymbol{0} \\\\\n", + "\\rho \\\\\n", + " 0\n", + "\\end{bmatrix}\n", + "$$" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# Define array of all fields\n", + "fields_list = [Ex, Ey, Ez, Bx, By, Bz]\n", + "if divE_cleaning:\n", + " fields_list.append(F)\n", + "if divB_cleaning:\n", + " fields_list.append(G)\n", + "EBFG = zeros(dim, 1)\n", + "for i in range(EBFG.shape[0]):\n", + " EBFG[i] = fields_list[i]\n", + "\n", + "# Define array of first-order time derivatives of all fields\n", + "fields_list = [dEx_dt, dEy_dt, dEz_dt, dBx_dt, dBy_dt, dBz_dt]\n", + "if divE_cleaning:\n", + " fields_list.append(dF_dt)\n", + "if divB_cleaning:\n", + " fields_list.append(dG_dt)\n", + "dEBFG_dt = zeros(dim, 1)\n", + "for i in range(dEBFG_dt.shape[0]):\n", + " dEBFG_dt[i] = fields_list[i]\n", + "dEBFG_dt = dEBFG_dt.expand()\n", + "\n", + "# Define matrix M representing the linear system of ODEs\n", + "M = zeros(dim)\n", + "for i in range(M.shape[0]):\n", + " for j in range(M.shape[1]):\n", + " M[i,j] = dEBFG_dt[i].coeff(EBFG[j], 1)\n", + "print(r'M = ')\n", + "display(M)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Solution of linear system of ODEs for $\\boldsymbol{E}$, $\\boldsymbol{B}$, $F$ and $G$:\n", + "\n", + "The solution of the linear system of ODEs above is given by the superposition of the **general solution of the homogeneous system** (denoted with the subscript \"h\"),\n", + "\n", + "$$\n", + "\\begin{bmatrix}\n", + "\\boldsymbol{E}_h(t) \\\\\n", + "\\boldsymbol{B}_h(t) \\\\\n", + "F_h(t) \\\\\n", + "G_h(t)\n", + "\\end{bmatrix}\n", + "= e^{M (t-t_n)}\n", + "\\begin{bmatrix}\n", + "\\boldsymbol{E}(t_n) \\\\\n", + "\\boldsymbol{B}(t_n) \\\\\n", + "F(t_n) \\\\\n", + "G(t_n)\n", + "\\end{bmatrix} \\,,\n", + "$$\n", + "\n", + "and the **particular solution of the non-homogeneous system** (denoted with the subscript \"nh\"),\n", + "\n", + "$$\n", + "\\begin{bmatrix}\n", + "\\boldsymbol{E}_{nh}(t) \\\\\n", + "\\boldsymbol{B}_{nh}(t) \\\\\n", + "F_{nh}(t) \\\\\n", + "G_{nh}(t)\n", + "\\end{bmatrix}\n", + "= -\\mu_0 c^2 e^{M t} \\left(\\int_{t_n}^t e^{-M s}\n", + "\\begin{bmatrix}\n", + "\\boldsymbol{J} \\\\\n", + "\\boldsymbol{0} \\\\\n", + "\\rho \\\\\n", + "0\n", + "\\end{bmatrix}\n", + "ds\\right) \\,.\n", + "$$" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Diagonalization of $M = P D P^{-1}$:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "# Compute matrices of eigenvectors and eigenvalues for diagonalization of M\n", + "P, D = M.diagonalize()\n", + "invP = P**(-1)\n", + "expD = exp(D)\n", + "check_diag(M, D, P, invP)\n", + "print('P = ')\n", + "display(P)\n", + "print('D = ')\n", + "display(D)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Diagonalization of $W_1 = M (t-t_n) = P_1 D_1 P_1^{-1}$:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "# Compute matrices of eigenvectors and eigenvalues for diagonalization of W1\n", + "P1 = P\n", + "D1 = D * (t-tn)\n", + "invP1 = invP\n", + "expD1 = exp(D1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Diagonalization of $W_2 = M t = P_2 D_2 P_2^{-1}$:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "# Compute matrices of eigenvectors and eigenvalues for diagonalization of W2\n", + "P2 = P\n", + "D2 = D * t\n", + "invP2 = invP\n", + "expD2 = exp(D2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Diagonalization of $W_3 = -M s = P_3 D_3 P_3^{-1}$:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "# Compute matrices of eigenvectors and eigenvalues for diagonalization of W3\n", + "P3 = P\n", + "D3 = (-1) * D * s\n", + "invP3 = invP\n", + "expD3 = exp(D3)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### General solution (homogeneous system):\n", + "\n", + "$$\n", + "\\begin{bmatrix}\n", + "\\boldsymbol{E}_h(t) \\\\\n", + "\\boldsymbol{B}_h(t) \\\\\n", + "F_h(t) \\\\\n", + "G_h(t)\n", + "\\end{bmatrix}\n", + "= e^{M (t-t_n)}\n", + "\\begin{bmatrix}\n", + "\\boldsymbol{E}(t_n) \\\\\n", + "\\boldsymbol{B}(t_n) \\\\\n", + "F(t_n) \\\\\n", + "G(t_n)\n", + "\\end{bmatrix}\n", + "$$" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "# Compute exp(W1) = exp(M*(t-tn))\n", + "expW1 = P1 * expD1 * invP1\n", + "\n", + "# Compute general solution at time t = tn+dt\n", + "EBFG_h = expW1 * EBFG \n", + "EBFG_h_new = EBFG_h.subs(t, tn+dt)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Definition of $\\boldsymbol{J}$ and $\\rho$:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Define J\n", + "Jx_c0 = sp.symbols(r'\\gamma_{J_x}', real=True)\n", + "Jy_c0 = sp.symbols(r'\\gamma_{J_y}', real=True)\n", + "Jz_c0 = sp.symbols(r'\\gamma_{J_z}', real=True)\n", + "Jx = Jx_c0\n", + "Jy = Jy_c0\n", + "Jz = Jz_c0\n", + "if J_in_time == 'linear':\n", + " Jx_c1 = sp.symbols(r'\\beta_{J_x}', real=True)\n", + " Jy_c1 = sp.symbols(r'\\beta_{J_y}', real=True)\n", + " Jz_c1 = sp.symbols(r'\\beta_{J_z}', real=True)\n", + " Jx += Jx_c1*(s-tn)\n", + " Jy += Jy_c1*(s-tn)\n", + " Jz += Jz_c1*(s-tn)\n", + "if J_in_time == 'quadratic':\n", + " Jx_c1 = sp.symbols(r'\\beta_{J_x}', real=True)\n", + " Jy_c1 = sp.symbols(r'\\beta_{J_y}', real=True)\n", + " Jz_c1 = sp.symbols(r'\\beta_{J_z}', real=True)\n", + " Jx_c2 = sp.symbols(r'\\alpha_{J_x}', real=True)\n", + " Jy_c2 = sp.symbols(r'\\alpha_{J_y}', real=True)\n", + " Jz_c2 = sp.symbols(r'\\alpha_{J_z}', real=True)\n", + " Jx += Jx_c1*(s-tn) + Jx_c2*(s-tn)**2\n", + " Jy += Jy_c1*(s-tn) + Jy_c2*(s-tn)**2\n", + " Jz += Jz_c1*(s-tn) + Jz_c2*(s-tn)**2\n", + "\n", + "# Define rho\n", + "if divE_cleaning:\n", + " rho_c0 = sp.symbols(r'\\gamma_{\\rho}', real=True)\n", + " rho = rho_c0\n", + " if rho_in_time == 'linear':\n", + " rho_c1 = sp.symbols(r'\\beta_{\\rho}', real=True)\n", + " rho += rho_c1*(s-tn)\n", + " if rho_in_time == 'quadratic':\n", + " rho_c1 = sp.symbols(r'\\beta_{\\rho}', real=True)\n", + " rho_c2 = sp.symbols(r'\\alpha_{\\rho}', real=True)\n", + " rho += rho_c1*(s-tn) + rho_c2*(s-tn)**2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Particular solution (non-homogeneous system):\n", + "\n", + "$$\n", + "\\begin{bmatrix}\n", + "\\boldsymbol{E}_{nh}(t) \\\\\n", + "\\boldsymbol{B}_{nh}(t) \\\\\n", + "F_{nh}(t) \\\\\n", + "G_{nh}(t)\n", + "\\end{bmatrix}\n", + "= -\\mu_0 c^2 e^{M t} \\left(\\int_{t_n}^t e^{-M s}\n", + "\\begin{bmatrix}\n", + "\\boldsymbol{J} \\\\\n", + "\\boldsymbol{0} \\\\\n", + "\\rho \\\\\n", + "0\n", + "\\end{bmatrix}\n", + "ds\\right)\n", + "$$" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "%%time \n", + "\n", + "# Define array of source terms\n", + "fields_list = [Jx, Jy, Jz, 0, 0, 0]\n", + "if divE_cleaning:\n", + " fields_list.append(rho)\n", + "if divB_cleaning:\n", + " fields_list.append(0)\n", + "S = zeros(dim, 1)\n", + "for i in range(S.shape[0]):\n", + " S[i] = -mu0*c**2 * fields_list[i]\n", + "\n", + "# Compute integral of exp(W3)*S over s (assuming |k| is not zero)\n", + "integral = zeros(dim, 1)\n", + "tmp = expD3 * invP3 * S\n", + "simple_mat(tmp)\n", + "for i in range(dim):\n", + " r = integrate(tmp[i], (s, tn, t))\n", + " integral[i] = r\n", + " \n", + "# Compute particular solution at time t = tn+dt\n", + "tmp = invP2 * P3\n", + "simple_mat(tmp)\n", + "EBFG_nh = P2 * expD2 * tmp * integral\n", + "EBFG_nh_new = EBFG_nh.subs(t, tn+dt)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Verification of the solution:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "for i in range(EBFG.shape[0]):\n", + " lhs = dEBFG_dt[i] + S[i]\n", + " lhs = lhs.subs(s, tn) # sources were written as functions of s\n", + " rhs = (EBFG_h[i] + EBFG_nh[i]).diff(t)\n", + " rhs = rhs.subs(t, tn) # results were written as functions of t\n", + " rhs = rhs.simplify()\n", + " diff = lhs - rhs\n", + " diff = diff.simplify()\n", + " assert (diff == 0), f'Integration of linear system of ODEs failed'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Coefficients of PSATD equations (homogeneous system):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "L = ['Ex', 'Ey', 'Ez', 'Bx', 'By', 'Bz']\n", + "R = ['Ex', 'Ey', 'Ez', 'Bx', 'By', 'Bz']\n", + "if divE_cleaning:\n", + " L.append('F')\n", + " R.append('F')\n", + "if divB_cleaning:\n", + " L.append('G')\n", + " R.append('G')\n", + "\n", + "# Compute individual coefficients in the update equations\n", + "coeff_h = dict()\n", + "for i in range(dim):\n", + " for j in range(dim):\n", + " key = (L[i], R[j])\n", + " coeff_h[key] = EBFG_h_new[i].coeff(EBFG[j], 1).expand().simplify().rewrite(cos).trigsimp().simplify()\n", + " print(f'Coefficient of {L[i]} with respect to {R[j]}:')\n", + " display(coeff_h[key])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Coefficients of PSATD equations (non-homogeneous system):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "%%time\n", + "\n", + "L = ['Ex', 'Ey', 'Ez', 'Bx', 'By', 'Bz']\n", + "R = ['Jx_c0', 'Jy_c0', 'Jz_c0']\n", + "if J_in_time == 'linear':\n", + " R.append('Jx_c1')\n", + " R.append('Jy_c1')\n", + " R.append('Jz_c1')\n", + "if J_in_time == 'quadratic':\n", + " R.append('Jx_c1')\n", + " R.append('Jy_c1')\n", + " R.append('Jz_c1')\n", + " R.append('Jx_c2')\n", + " R.append('Jy_c2')\n", + " R.append('Jz_c2')\n", + "if divE_cleaning:\n", + " L.append('F')\n", + " R.append('rho_c0')\n", + " if rho_in_time == 'linear':\n", + " R.append('rho_c1')\n", + " if rho_in_time == 'quadratic':\n", + " R.append('rho_c1')\n", + " R.append('rho_c2')\n", + "if divB_cleaning:\n", + " L.append('G')\n", + "\n", + "cs = [Jx_c0, Jy_c0, Jz_c0]\n", + "if J_in_time == 'linear':\n", + " cs.append(Jx_c1)\n", + " cs.append(Jy_c1)\n", + " cs.append(Jz_c1)\n", + "if J_in_time == 'quadratic':\n", + " cs.append(Jx_c1)\n", + " cs.append(Jy_c1)\n", + " cs.append(Jz_c1)\n", + " cs.append(Jx_c2)\n", + " cs.append(Jy_c2)\n", + " cs.append(Jz_c2)\n", + "if divE_cleaning:\n", + " cs.append(rho_c0)\n", + " if rho_in_time == 'linear':\n", + " cs.append(rho_c1)\n", + " if rho_in_time == 'quadratic':\n", + " cs.append(rho_c1)\n", + " cs.append(rho_c2)\n", + " \n", + "# Compute individual coefficients in the update equation\n", + "coeff_nh = dict()\n", + "for i in range(len(L)):\n", + " for j in range(len(R)):\n", + " key = (L[i], R[j])\n", + " coeff_nh[key] = EBFG_nh_new[i].expand().coeff(cs[j], 1).expand().simplify().rewrite(cos).trigsimp().simplify()\n", + " print(f'Coefficient of {L[i]} with respect to {R[j]}:')\n", + " display(coeff_nh[key])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Coefficients of PSATD equations:\n", + "\n", + "Display the coefficients of the update equations one by one. For example, `coeff_h[('Ex', 'By')]` displays the coefficient of $E^x$ with respect to $B^y$ (resulting from the solution of the homogeneous system, hence the `_h` suffix), while `coeff_nh[('Ex', 'Jx_c0')]` displays the coefficient of $E^x$ with respect to $\\gamma_{J_x}$ (resulting from the solution of the non-homogeneous system, hence the `_nh` suffix). Note that $\\gamma_{J_x}$ is denoted as `Jx_c0` in the notebook, as described in the beginning." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "coeff_h[('Ex', 'By')]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "coeff_nh[('Ex', 'Jx_c0')]" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From f814d563216c2ed1c2c919e7183f8f8fb0d579df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ne=C3=AFl=20Zaim?= <49716072+NeilZaim@users.noreply.github.com> Date: Fri, 4 Nov 2022 20:14:28 +0100 Subject: [PATCH 0142/1346] Clean Species Physical Properties (#3505) --- Docs/source/usage/parameters.rst | 8 ++++---- Source/Particles/SpeciesPhysicalProperties.cpp | 4 +++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index b3906ed140d..84add7daac1 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -568,11 +568,11 @@ Particle initialization Type of physical species. Currently, the accepted species are ``"electron"``, ``"positron"``, ``"muon"``, ``"antimuon"``, ``"photon"``, ``"neutron"``, ``"proton"`` , ``"alpha"``, - ``"hydrogen1"`` (a.k.a. ``"protium"``), ``"hydrogen2"`` (a.k.a. ``"deuterium"``), ``"hydrogen3"`` (a.k.a.``"tritium"``), + ``"hydrogen1"`` (a.k.a. ``"protium"``), ``"hydrogen2"`` (a.k.a. ``"deuterium"``), ``"hydrogen3"`` (a.k.a. ``"tritium"``), ``"helium"``, ``"helium3"``, ``"helium4"``, - ``"lithium"``, ``"lithium6"``, ``"lithium7"``, ``"beryllium"``, ``"boron"``, ``"boron10"``, ``"boron11"``, - ``"carbon"``, ``"carbon12"``, ``"carbon13"``, ``"nitrogen"``, ``"nitrogen14"``, ``"nitrogen15"``, - ``"oxygen"``, ``"oxygen16"``, ``"oxygen17"``, ``"oxygen18"``, ``"fluorine"``, ``"neon"``, ``"neon20"``, + ``"lithium"``, ``"lithium6"``, ``"lithium7"``, ``"beryllium"``, ``"beryllium9"``, ``"boron"``, ``"boron10"``, ``"boron11"``, + ``"carbon"``, ``"carbon12"``, ``"carbon13"``, ``"carbon14"``, ``"nitrogen"``, ``"nitrogen14"``, ``"nitrogen15"``, + ``"oxygen"``, ``"oxygen16"``, ``"oxygen17"``, ``"oxygen18"``, ``"fluorine"``, ``"fluorine19"``, ``"neon"``, ``"neon20"``, ``"neon21"``, ``"neon22"``, ``"aluminium"``, ``"argon"``, ``"copper"``, ``"xenon"`` and ``"gold"``. The difference between ``"proton"`` and ``"hydrogen1"`` is that the mass of the latter includes also the mass of the bound electron (same for ``"alpha"`` and ``"helium4"``). When only the name of an element is specified, the mass diff --git a/Source/Particles/SpeciesPhysicalProperties.cpp b/Source/Particles/SpeciesPhysicalProperties.cpp index 8086b14bd29..118db98f2c2 100644 --- a/Source/Particles/SpeciesPhysicalProperties.cpp +++ b/Source/Particles/SpeciesPhysicalProperties.cpp @@ -40,7 +40,6 @@ namespace { {"tritium" , PhysicalSpecies::hydrogen3}, {"proton" , PhysicalSpecies::proton}, {"helium" , PhysicalSpecies::helium}, - {"alpha" , PhysicalSpecies::alpha}, {"helium3" , PhysicalSpecies::helium3}, {"helium4" , PhysicalSpecies::helium4}, {"alpha" , PhysicalSpecies::alpha}, @@ -255,6 +254,9 @@ namespace { {PhysicalSpecies::neon22, Properties{ amrex::Real(21.991385114) * PhysConst::m_u, amrex::Real(10) * PhysConst::q_e}}, + {PhysicalSpecies::aluminium, Properties{ + amrex::Real(26.98153853) * PhysConst::m_u, + amrex::Real(13) * PhysConst::q_e}}, {PhysicalSpecies::argon, Properties{ amrex::Real(39.948) * PhysConst::m_u, amrex::Real(18) * PhysConst::q_e}}, From 8f9da8cbfcbb304e7ee09531a64b567631a382e2 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 7 Nov 2022 10:17:38 -0800 Subject: [PATCH 0143/1346] AMReX: Weekly Update (#3509) --- .github/workflows/cuda.yml | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 2 +- run_test.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 87b4352cd77..9366fe8c016 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 22.11 && cd - + cd amrex && git checkout --detach 0d3deeb5c75cade14c381ef620921beaa2604c11 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 9866cf685c3..a4cb58e75e6 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 22.11 +branch = 0d3deeb5c75cade14c381ef620921beaa2604c11 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 2a39fb2169a..94fe785d7c0 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 22.11 +branch = 0d3deeb5c75cade14c381ef620921beaa2604c11 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index dc8033df6a2..b42244c5327 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -239,7 +239,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "22.11" +set(WarpX_amrex_branch "0d3deeb5c75cade14c381ef620921beaa2604c11" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/run_test.sh b/run_test.sh index 4bf8b30d714..7f6ce13d207 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 22.11 && cd - +cd amrex && git checkout --detach 0d3deeb5c75cade14c381ef620921beaa2604c11 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git From ada6fc49cf16f40d1764d3a95c8338126360f329 Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Mon, 7 Nov 2022 13:52:46 -0800 Subject: [PATCH 0144/1346] Load balancing bug fix: remake MultiFabs for Vay deposition, current centering, time averaging (#3508) Additional MultiFabs for Vay deposition, current centering, and time averaging need to be allocated after load-balancing, otherwise the simulation crashes. --- Source/Parallelization/WarpXRegrid.cpp | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/Source/Parallelization/WarpXRegrid.cpp b/Source/Parallelization/WarpXRegrid.cpp index 4e0662fbf9a..794a48a5b4f 100644 --- a/Source/Parallelization/WarpXRegrid.cpp +++ b/Source/Parallelization/WarpXRegrid.cpp @@ -1,6 +1,6 @@ /* Copyright 2019 Andrew Myers, Ann Almgren, Axel Huebl * David Grote, Maxence Thevenet, Michael Rowan - * Remi Lehe, Weiqun Zhang, levinem + * Remi Lehe, Weiqun Zhang, levinem, Revathi Jambunathan * * This file is part of WarpX. * @@ -170,7 +170,16 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi RemakeMultiFab(Efield_fp[lev][idim], dm, true); RemakeMultiFab(current_fp[lev][idim], dm, false); RemakeMultiFab(current_store[lev][idim], dm, false); - + if (current_deposition_algo == CurrentDepositionAlgo::Vay) { + RemakeMultiFab(current_fp_vay[lev][idim], dm, false); + } + if (do_current_centering) { + RemakeMultiFab(current_fp_nodal[lev][idim], dm, false); + } + if (fft_do_time_averaging) { + RemakeMultiFab(Efield_avg_fp[lev][idim], dm, true); + RemakeMultiFab(Bfield_avg_fp[lev][idim], dm, true); + } #ifdef AMREX_USE_EB if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::Yee || WarpX::maxwell_solver_id == MaxwellSolverAlgo::ECT || @@ -264,6 +273,10 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi RemakeMultiFab(Bfield_cp[lev][idim], dm, true); RemakeMultiFab(Efield_cp[lev][idim], dm, true); RemakeMultiFab(current_cp[lev][idim], dm, false); + if (fft_do_time_averaging) { + RemakeMultiFab(Efield_avg_cp[lev][idim], dm, true); + RemakeMultiFab(Bfield_avg_cp[lev][idim], dm, true); + } } RemakeMultiFab(F_cp[lev], dm, true); RemakeMultiFab(rho_cp[lev], dm, false); From bf535730cb6c170394da8375887b3ae57ad6ecf5 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Mon, 7 Nov 2022 20:30:31 -0800 Subject: [PATCH 0145/1346] Allow `None` for Maxwell solver (#3504) * Add "None" as an option for the Maxwell solver * fixed some of the reasons for failing CI tests * no longer pass `do_electrostatic` to `GuardCellManager` * renamed `MaxwellSolverAlgo` to `ElectromagneticSolverAlgo` * rename `do_electrostatic` to `electrostatic_solver_id` * rename `maxwell_solver_id` to `electromagnetic_solver_id` * changes requested during PR review * remove `do_no_deposit` from tests without field evolution * Fix doc-string in `GuardCellManager.H` Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Examples/Tests/collision/inputs_2d | 7 +- Examples/Tests/collision/inputs_3d | 7 +- Examples/Tests/collision/inputs_rz | 6 +- Examples/Tests/scraping/inputs_rz | 4 +- Source/BoundaryConditions/PML.cpp | 16 ++-- .../ComputeDiagFunctors/DivEFunctor.cpp | 2 +- .../ComputeDiagFunctors/RhoFunctor.cpp | 2 +- Source/Diagnostics/Diagnostics.cpp | 2 +- Source/Diagnostics/WarpXOpenPMD.cpp | 8 +- Source/Evolve/WarpXComputeDt.cpp | 29 ++++--- Source/Evolve/WarpXEvolve.cpp | 22 ++--- Source/FieldSolver/ElectrostaticSolver.cpp | 8 +- .../ApplySilverMuellerBoundary.cpp | 2 +- .../FiniteDifferenceSolver/ComputeDivE.cpp | 6 +- .../FiniteDifferenceSolver/EvolveB.cpp | 10 +-- .../FiniteDifferenceSolver/EvolveBPML.cpp | 4 +- .../FiniteDifferenceSolver/EvolveE.cpp | 8 +- .../FiniteDifferenceSolver/EvolveECTRho.cpp | 2 +- .../FiniteDifferenceSolver/EvolveEPML.cpp | 4 +- .../FiniteDifferenceSolver/EvolveF.cpp | 6 +- .../FiniteDifferenceSolver/EvolveFPML.cpp | 4 +- .../FiniteDifferenceSolver/EvolveG.cpp | 4 +- .../FiniteDifferenceSolver.cpp | 8 +- .../MacroscopicEvolveE.cpp | 4 +- Source/FieldSolver/WarpXPushFieldsEM.cpp | 2 +- Source/Initialization/WarpXInitData.cpp | 34 ++++---- Source/Parallelization/GuardCellManager.H | 6 +- Source/Parallelization/GuardCellManager.cpp | 23 ++--- Source/Parallelization/WarpXComm.cpp | 2 +- Source/Parallelization/WarpXRegrid.cpp | 10 +-- Source/Parallelization/WarpXSumGuardCells.H | 4 +- Source/Particles/MultiParticleContainer.cpp | 2 +- .../Particles/PhysicalParticleContainer.cpp | 4 +- Source/Particles/WarpXParticleContainer.cpp | 2 +- Source/Utils/WarpXAlgorithmSelection.H | 11 +-- Source/Utils/WarpXAlgorithmSelection.cpp | 17 ++-- Source/Utils/WarpXUtil.cpp | 12 +-- Source/WarpX.H | 4 +- Source/WarpX.cpp | 86 ++++++++++--------- 39 files changed, 199 insertions(+), 195 deletions(-) diff --git a/Examples/Tests/collision/inputs_2d b/Examples/Tests/collision/inputs_2d index 3bc7a771a9c..5f81205f360 100644 --- a/Examples/Tests/collision/inputs_2d +++ b/Examples/Tests/collision/inputs_2d @@ -21,7 +21,10 @@ boundary.field_hi = periodic periodic ################################# warpx.serialize_initial_conditions = 1 warpx.verbose = 1 -warpx.cfl = 1.0 +warpx.const_dt = 1.224744871e-07 + +# Do not evolve the E and B fields +algo.maxwell_solver = none # Order of particle shape factors algo.particle_shape = 1 @@ -42,7 +45,6 @@ electron.ux_th = 0.044237441120300 electron.uy_th = 0.044237441120300 electron.uz_th = 0.044237441120300 electron.ux_m = 0.044237441120300 -electron.do_not_deposit = 1 ion.charge = q_e ion.mass = 4.554691780000000e-30 @@ -54,7 +56,6 @@ ion.momentum_distribution_type = "gaussian" ion.ux_th = 0.006256118919701 ion.uy_th = 0.006256118919701 ion.uz_th = 0.006256118919701 -ion.do_not_deposit = 1 ################################# ############ COLLISION ########## diff --git a/Examples/Tests/collision/inputs_3d b/Examples/Tests/collision/inputs_3d index 31e34bcb42d..5058dcb8f4a 100644 --- a/Examples/Tests/collision/inputs_3d +++ b/Examples/Tests/collision/inputs_3d @@ -21,7 +21,10 @@ boundary.field_hi = periodic periodic periodic ################################# warpx.serialize_initial_conditions = 1 warpx.verbose = 1 -warpx.cfl = 1.0 +warpx.const_dt = 1.224744871e-07 + +# Do not evolve the E and B fields +algo.maxwell_solver = none # Order of particle shape factors algo.particle_shape = 1 @@ -42,7 +45,6 @@ electron.ux_th = 0.044237441120300 electron.uy_th = 0.044237441120300 electron.uz_th = 0.044237441120300 electron.ux_m = 0.044237441120300 -electron.do_not_deposit = 1 ion.charge = q_e ion.mass = 4.554691780000000e-30 @@ -54,7 +56,6 @@ ion.momentum_distribution_type = "gaussian" ion.ux_th = 0.006256118919701 ion.uy_th = 0.006256118919701 ion.uz_th = 0.006256118919701 -ion.do_not_deposit = 1 ################################# ############ COLLISION ########## diff --git a/Examples/Tests/collision/inputs_rz b/Examples/Tests/collision/inputs_rz index 16e742854d0..de6c8517c84 100644 --- a/Examples/Tests/collision/inputs_rz +++ b/Examples/Tests/collision/inputs_rz @@ -21,7 +21,10 @@ boundary.field_hi = none periodic ################################# warpx.serialize_initial_conditions = 1 warpx.verbose = 1 -warpx.cfl = 1.0 +warpx.const_dt = 1.224744871e-07 + +# Do not evolve the E and B fields +algo.maxwell_solver = none # Order of particle shape factors algo.particle_shape = 1 @@ -41,7 +44,6 @@ electron.momentum_distribution_type = parse_momentum_function electron.momentum_function_ux(x,y,z) = "if(x*x+y*y>0.0, 1.0*x/sqrt(x*x+y*y), 0.0)" electron.momentum_function_uy(x,y,z) = "if(x*x+y*y>0.0, 1.0*y/sqrt(x*x+y*y), 0.0)" electron.momentum_function_uz(x,y,z) = "0" -electron.do_not_deposit = 1 electron.do_not_push = 1 ################################# diff --git a/Examples/Tests/scraping/inputs_rz b/Examples/Tests/scraping/inputs_rz index e8994bb185f..875480fa7c4 100644 --- a/Examples/Tests/scraping/inputs_rz +++ b/Examples/Tests/scraping/inputs_rz @@ -22,9 +22,10 @@ boundary.potential_lo_z = 0 boundary.potential_hi_z = 0 warpx.const_dt = 1.216119097e-11 -warpx.do_electrostatic = relativistic warpx.eb_implicit_function = "-(x**2-0.1**2)" +# Do not evolve the E and B fields +algo.maxwell_solver = none algo.field_gathering = momentum-conserving algo.particle_shape = 1 @@ -39,7 +40,6 @@ electron.momentum_distribution_type = parse_momentum_function electron.momentum_function_ux(x,y,z) = "if(x*x+y*y>0.0, -1.0*x/sqrt(x*x+y*y), 0.0)" electron.momentum_function_uy(x,y,z) = "if(x*x+y*y>0.0, -1.0*y/sqrt(x*x+y*y), 0.0)" electron.momentum_function_uz(x,y,z) = "0" -electron.do_not_deposit = 1 electron.save_particles_at_eb = 1 diagnostics.diags_names = diag1 diag2 diag3 diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index 76dbd3fde55..5ba9ac2cd0b 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -598,7 +598,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri IntVect nge = IntVect(AMREX_D_DECL(2, 2, 2)); IntVect ngb = IntVect(AMREX_D_DECL(2, 2, 2)); int ngf_int = 0; - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::CKC) ngf_int = std::max( ngf_int, 1 ); + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC) ngf_int = std::max( ngf_int, 1 ); IntVect ngf = IntVect(AMREX_D_DECL(ngf_int, ngf_int, ngf_int)); if (do_moving_window) { @@ -610,7 +610,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri ngf[WarpX::moving_window_dir] = std::max(ngf[WarpX::moving_window_dir], rr); } - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { // Increase the number of guard cells, in order to fit the extent // of the stencil for the spectral solver int ngFFt_x = do_nodal ? nox_fft : nox_fft/2; @@ -701,9 +701,9 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri pml_edge_lengths[2] = std::make_unique(amrex::convert( ba, WarpX::GetInstance().getEfield_fp(0,2).ixType().toIntVect() ), dm, WarpX::ncomps, max_guard_EB ); - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::Yee || - WarpX::maxwell_solver_id == MaxwellSolverAlgo::CKC || - WarpX::maxwell_solver_id == MaxwellSolverAlgo::ECT) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee || + WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC || + WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { auto const eb_fact = fieldEBFactory(); @@ -736,7 +736,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri sigba_fp = std::make_unique(ba, dm, grid_ba_reduced, geom->CellSize(), IntVect(ncell), IntVect(delta), single_domain_box, v_sigma_sb); - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { #ifndef WARPX_USE_PSATD amrex::ignore_unused(lev, dt, J_in_time, rho_in_time); # if(AMREX_SPACEDIM!=3) @@ -765,7 +765,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri if (cgeom) { - if (WarpX::maxwell_solver_id != MaxwellSolverAlgo::PSATD) { + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { nge = IntVect(AMREX_D_DECL(1, 1, 1)); ngb = IntVect(AMREX_D_DECL(1, 1, 1)); } @@ -858,7 +858,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri sigba_cp = std::make_unique(cba, cdm, grid_cba_reduced, cgeom->CellSize(), cncells, cdelta, single_domain_box, v_sigma_sb); - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { #ifndef WARPX_USE_PSATD amrex::ignore_unused(dt); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(false, diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp index 2ec21308ae2..3859b859ce8 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp @@ -34,7 +34,7 @@ DivEFunctor::operator()(amrex::MultiFab& mf_dst, const int dcomp, const int /*i_ amrex::IntVect cell_type = amrex::IntVect::TheNodeVector(); #ifdef WARPX_DIM_RZ // For RZ spectral, all quantities are cell centered. - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) cell_type = amrex::IntVect::TheCellVector(); #endif const amrex::BoxArray& ba = amrex::convert(warpx.boxArray(m_lev), cell_type); diff --git a/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp index fab362e03ac..de92577227c 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp @@ -56,7 +56,7 @@ RhoFunctor::operator() ( amrex::MultiFab& mf_dst, const int dcomp, const int /*i #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_PSATD) // Apply k-space filtering when using the PSATD solver - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { if (WarpX::use_kspace_filter) { auto & solver = warpx.get_spectral_solver_fp(m_lev); diff --git a/Source/Diagnostics/Diagnostics.cpp b/Source/Diagnostics/Diagnostics.cpp index 236985de484..78d82e4e1b1 100644 --- a/Source/Diagnostics/Diagnostics.cpp +++ b/Source/Diagnostics/Diagnostics.cpp @@ -75,7 +75,7 @@ Diagnostics::BaseReadParameters () // Sanity check if user requests to plot phi if (utils::algorithms::is_in(m_varnames_fields, "phi")){ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - warpx.do_electrostatic==ElectrostaticSolverAlgo::LabFrame, + warpx.electrostatic_solver_id==ElectrostaticSolverAlgo::LabFrame, "plot phi only works if do_electrostatic = labframe"); } diff --git a/Source/Diagnostics/WarpXOpenPMD.cpp b/Source/Diagnostics/WarpXOpenPMD.cpp index 4320bc7ae1f..c5fdcc20522 100644 --- a/Source/Diagnostics/WarpXOpenPMD.cpp +++ b/Source/Diagnostics/WarpXOpenPMD.cpp @@ -1131,12 +1131,12 @@ WarpXOpenPMDPlot::SetupFields ( openPMD::Container< openPMD::Mesh >& meshes, } meshes.setAttribute("fieldSolver", []() { - switch (WarpX::maxwell_solver_id) { - case MaxwellSolverAlgo::Yee : + switch (WarpX::electromagnetic_solver_id) { + case ElectromagneticSolverAlgo::Yee : return "Yee"; - case MaxwellSolverAlgo::CKC : + case ElectromagneticSolverAlgo::CKC : return "CK"; - case MaxwellSolverAlgo::PSATD : + case ElectromagneticSolverAlgo::PSATD : return "PSATD"; default: return "other"; diff --git a/Source/Evolve/WarpXComputeDt.cpp b/Source/Evolve/WarpXComputeDt.cpp index 99cc307e66b..b68dfba4e54 100644 --- a/Source/Evolve/WarpXComputeDt.cpp +++ b/Source/Evolve/WarpXComputeDt.cpp @@ -32,11 +32,19 @@ void WarpX::ComputeDt () { - // Determine + // Handle cases where the timestep is not limited by the speed of light + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::None) { + for (int lev=0; lev<=max_level; lev++) { + dt[lev] = const_dt; + } + return; + } + + // Determine the appropriate timestep as limited by the speed of light const amrex::Real* dx = geom[max_level].CellSize(); amrex::Real deltat = 0.; - if (maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { // Computation of dt for spectral algorithm // (determined by the minimum cell size in all directions) #if defined(WARPX_DIM_1D_Z) @@ -50,21 +58,20 @@ WarpX::ComputeDt () // Computation of dt for FDTD algorithm #ifdef WARPX_DIM_RZ // - In RZ geometry - if (maxwell_solver_id == MaxwellSolverAlgo::Yee) { + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee) { deltat = cfl * CylindricalYeeAlgorithm::ComputeMaxDt(dx, n_rz_azimuthal_modes); #else // - In Cartesian geometry if (do_nodal) { deltat = cfl * CartesianNodalAlgorithm::ComputeMaxDt(dx); - } else if (maxwell_solver_id == MaxwellSolverAlgo::Yee - || maxwell_solver_id == MaxwellSolverAlgo::ECT) { + } else if (electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee + || electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { deltat = cfl * CartesianYeeAlgorithm::ComputeMaxDt(dx); - } else if (maxwell_solver_id == MaxwellSolverAlgo::CKC) { + } else if (electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC) { deltat = cfl * CartesianCKCAlgorithm::ComputeMaxDt(dx); #endif } else { - amrex::Abort(Utils::TextMsg::Err( - "ComputeDt: Unknown algorithm")); + amrex::Abort(Utils::TextMsg::Err("ComputeDt: Unknown algorithm")); } } @@ -76,12 +83,6 @@ WarpX::ComputeDt () dt[lev] = dt[lev+1] * refRatio(lev)[0]; } } - - if (do_electrostatic != ElectrostaticSolverAlgo::None) { - for (int lev=0; lev<=max_level; lev++) { - dt[lev] = const_dt; - } - } } void diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 2c87d1efa2e..6e04adc3be0 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -122,7 +122,7 @@ WarpX::Evolve (int numsteps) // Particles have p^{n} and x^{n}. // is_synchronized is true. if (is_synchronized) { - if (do_electrostatic == ElectrostaticSolverAlgo::None) { + if (electrostatic_solver_id == ElectrostaticSolverAlgo::None) { // Not called at each iteration, so exchange all guard cells FillBoundaryE(guard_cells.ng_alloc_EB); FillBoundaryB(guard_cells.ng_alloc_EB); @@ -138,7 +138,7 @@ WarpX::Evolve (int numsteps) } is_synchronized = false; } else { - if (do_electrostatic == ElectrostaticSolverAlgo::None) { + if (electrostatic_solver_id == ElectrostaticSolverAlgo::None) { // Beyond one step, we have E^{n} and B^{n}. // Particles have p^{n-1/2} and x^{n}. @@ -153,7 +153,7 @@ WarpX::Evolve (int numsteps) FillBoundaryB_avg(guard_cells.ng_FieldGather); } // TODO Remove call to FillBoundaryAux before UpdateAuxilaryData? - if (WarpX::maxwell_solver_id != MaxwellSolverAlgo::PSATD) + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) FillBoundaryAux(guard_cells.ng_UpdateAux); UpdateAuxilaryData(); FillBoundaryAux(guard_cells.ng_UpdateAux); @@ -177,7 +177,7 @@ WarpX::Evolve (int numsteps) ExecutePythonCallback("particleinjection"); // Electrostatic case: only gather fields and push particles, // deposition and calculation of fields done further below - if (do_electrostatic != ElectrostaticSolverAlgo::None) + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::None) { const bool skip_deposition = true; PushParticlesandDepose(cur_time, skip_deposition); @@ -278,8 +278,8 @@ WarpX::Evolve (int numsteps) m_particle_boundary_buffer->gatherParticles(*mypc, amrex::GetVecOfConstPtrs(m_distance_to_eb)); - // Electrostatic solver: particles can move by an arbitrary number of cells - if( do_electrostatic != ElectrostaticSolverAlgo::None ) + // Non-Maxwell solver: particles can move by an arbitrary number of cells + if( electromagnetic_solver_id == ElectromagneticSolverAlgo::None ) { mypc->Redistribute(); } else @@ -309,7 +309,7 @@ WarpX::Evolve (int numsteps) mypc->SortParticlesByBin(sort_bin_size); } - if( do_electrostatic != ElectrostaticSolverAlgo::None ) { + if( electrostatic_solver_id != ElectrostaticSolverAlgo::None ) { ExecutePythonCallback("beforeEsolve"); // Electrostatic solver: // For each species: deposit charge and add the associated space-charge @@ -413,7 +413,7 @@ WarpX::OneStep_nosub (Real cur_time) // Push E and B from {n} to {n+1} // (And update guard cells immediately afterwards) - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { if (use_hybrid_QED) { WarpX::Hybrid_QED_Push(dt); @@ -486,7 +486,7 @@ WarpX::OneStep_nosub (Real cur_time) void WarpX::SyncCurrentAndRho () { - if (maxwell_solver_id == MaxwellSolverAlgo::PSATD) + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { if (fft_periodic_single_box) { @@ -530,7 +530,7 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) #ifdef WARPX_USE_PSATD WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD, + WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD, "multi-J algorithm not implemented for FDTD" ); @@ -712,7 +712,7 @@ void WarpX::OneStep_sub1 (Real curtime) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - do_electrostatic == ElectrostaticSolverAlgo::None, + electrostatic_solver_id == ElectrostaticSolverAlgo::None, "Electrostatic solver cannot be used with sub-cycling." ); diff --git a/Source/FieldSolver/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolver.cpp index 103fff13199..c227506f6bd 100644 --- a/Source/FieldSolver/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolver.cpp @@ -69,7 +69,7 @@ WarpX::ComputeSpaceChargeField (bool const reset_fields) } } - if (do_electrostatic == ElectrostaticSolverAlgo::LabFrame) { + if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) { AddSpaceChargeFieldLabFrame(); } else { @@ -79,13 +79,13 @@ WarpX::ComputeSpaceChargeField (bool const reset_fields) for (int ispecies=0; ispeciesnSpecies(); ispecies++){ WarpXParticleContainer& species = mypc->GetParticleContainer(ispecies); if (species.initialize_self_fields || - (do_electrostatic == ElectrostaticSolverAlgo::Relativistic)) { + (electrostatic_solver_id == ElectrostaticSolverAlgo::Relativistic)) { AddSpaceChargeField(species); } } // Add the field due to the boundary potentials - if (do_electrostatic == ElectrostaticSolverAlgo::Relativistic){ + if (electrostatic_solver_id == ElectrostaticSolverAlgo::Relativistic){ AddBoundaryField(); } } @@ -311,7 +311,7 @@ WarpX::computePhi (const amrex::Vector >& rho, #if defined(AMREX_USE_EB) // EB: use AMReX to directly calculate the electric field since with EB's the // simple finite difference scheme in WarpX::computeE sometimes fails - if (do_electrostatic == ElectrostaticSolverAlgo::LabFrame) + if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) { // TODO: maybe make this a helper function or pass Efield_fp directly amrex::Vector< diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp index 01ae22a09da..d6ac45a9821 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp @@ -44,7 +44,7 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( // Ensure that we are using the Yee solver WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - m_fdtd_algo == MaxwellSolverAlgo::Yee, + m_fdtd_algo == ElectromagneticSolverAlgo::Yee, "The Silver-Mueller boundary conditions can only be used with the Yee solver." ); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp index f9e43469954..9ac32a19d8c 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp @@ -46,7 +46,7 @@ void FiniteDifferenceSolver::ComputeDivE ( // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ - if (m_fdtd_algo == MaxwellSolverAlgo::Yee){ + if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee){ ComputeDivECylindrical ( Efield, divEfield ); @@ -55,11 +55,11 @@ void FiniteDifferenceSolver::ComputeDivE ( ComputeDivECartesian ( Efield, divEfield ); - } else if (m_fdtd_algo == MaxwellSolverAlgo::Yee) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee) { ComputeDivECartesian ( Efield, divEfield ); - } else if (m_fdtd_algo == MaxwellSolverAlgo::CKC) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::CKC) { ComputeDivECartesian ( Efield, divEfield ); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp index 02cd6fc4279..0e6515a73ff 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp @@ -66,11 +66,11 @@ void FiniteDifferenceSolver::EvolveB ( // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ - if (m_fdtd_algo == MaxwellSolverAlgo::Yee){ + if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee){ ignore_unused(Gfield, face_areas); EvolveBCylindrical ( Bfield, Efield, lev, dt ); #else - if(m_do_nodal or m_fdtd_algo != MaxwellSolverAlgo::ECT){ + if(m_do_nodal or m_fdtd_algo != ElectromagneticSolverAlgo::ECT){ amrex::ignore_unused(face_areas); } @@ -78,15 +78,15 @@ void FiniteDifferenceSolver::EvolveB ( EvolveBCartesian ( Bfield, Efield, Gfield, lev, dt ); - } else if (m_fdtd_algo == MaxwellSolverAlgo::Yee) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee) { EvolveBCartesian ( Bfield, Efield, Gfield, lev, dt ); - } else if (m_fdtd_algo == MaxwellSolverAlgo::CKC) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::CKC) { EvolveBCartesian ( Bfield, Efield, Gfield, lev, dt ); #ifdef AMREX_USE_EB - } else if (m_fdtd_algo == MaxwellSolverAlgo::ECT) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::ECT) { EvolveBCartesianECT(Bfield, face_areas, area_mod, ECTRhofield, Venl, flag_info_cell, borrowing, lev, dt); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp index aad66d3d4da..2907e3fdf6c 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp @@ -57,11 +57,11 @@ void FiniteDifferenceSolver::EvolveBPML ( EvolveBPMLCartesian (Bfield, Efield, dt, dive_cleaning); - } else if (m_fdtd_algo == MaxwellSolverAlgo::Yee || m_fdtd_algo == MaxwellSolverAlgo::ECT) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee || m_fdtd_algo == ElectromagneticSolverAlgo::ECT) { EvolveBPMLCartesian (Bfield, Efield, dt, dive_cleaning); - } else if (m_fdtd_algo == MaxwellSolverAlgo::CKC) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::CKC) { EvolveBPMLCartesian (Bfield, Efield, dt, dive_cleaning); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp index da91eb73dc3..fe6181aca61 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp @@ -56,7 +56,7 @@ void FiniteDifferenceSolver::EvolveE ( int lev, amrex::Real const dt ) { #ifdef AMREX_USE_EB - if (m_fdtd_algo != MaxwellSolverAlgo::ECT) { + if (m_fdtd_algo != ElectromagneticSolverAlgo::ECT) { amrex::ignore_unused(face_areas, ECTRhofield); } #else @@ -66,7 +66,7 @@ void FiniteDifferenceSolver::EvolveE ( // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ - if (m_fdtd_algo == MaxwellSolverAlgo::Yee){ + if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee){ ignore_unused(edge_lengths); EvolveECylindrical ( Efield, Bfield, Jfield, Ffield, lev, dt ); #else @@ -74,11 +74,11 @@ void FiniteDifferenceSolver::EvolveE ( EvolveECartesian ( Efield, Bfield, Jfield, edge_lengths, Ffield, lev, dt ); - } else if (m_fdtd_algo == MaxwellSolverAlgo::Yee || m_fdtd_algo == MaxwellSolverAlgo::ECT) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee || m_fdtd_algo == ElectromagneticSolverAlgo::ECT) { EvolveECartesian ( Efield, Bfield, Jfield, edge_lengths, Ffield, lev, dt ); - } else if (m_fdtd_algo == MaxwellSolverAlgo::CKC) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::CKC) { EvolveECartesian ( Efield, Bfield, Jfield, edge_lengths, Ffield, lev, dt ); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp index 59a24792185..0ef5571a1fa 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp @@ -54,7 +54,7 @@ void FiniteDifferenceSolver::EvolveECTRho ( const int lev) { #if !defined(WARPX_DIM_RZ) and defined(AMREX_USE_EB) - if (m_fdtd_algo == MaxwellSolverAlgo::ECT) { + if (m_fdtd_algo == ElectromagneticSolverAlgo::ECT) { EvolveRhoCartesianECT(Efield, edge_lengths, face_areas, ECTRhofield, lev); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp index e01d813ff03..8232d2d0d4e 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp @@ -64,12 +64,12 @@ void FiniteDifferenceSolver::EvolveEPML ( EvolveEPMLCartesian ( Efield, Bfield, Jfield, edge_lengths, Ffield, sigba, dt, pml_has_particles ); - } else if (m_fdtd_algo == MaxwellSolverAlgo::Yee || m_fdtd_algo == MaxwellSolverAlgo::ECT) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee || m_fdtd_algo == ElectromagneticSolverAlgo::ECT) { EvolveEPMLCartesian ( Efield, Bfield, Jfield, edge_lengths, Ffield, sigba, dt, pml_has_particles ); - } else if (m_fdtd_algo == MaxwellSolverAlgo::CKC) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::CKC) { EvolveEPMLCartesian ( Efield, Bfield, Jfield, edge_lengths, Ffield, sigba, dt, pml_has_particles ); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp index ae5600dae38..c29c0cdb67b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp @@ -53,7 +53,7 @@ void FiniteDifferenceSolver::EvolveF ( // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ - if (m_fdtd_algo == MaxwellSolverAlgo::Yee){ + if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee){ EvolveFCylindrical ( Ffield, Efield, rhofield, rhocomp, dt ); @@ -62,11 +62,11 @@ void FiniteDifferenceSolver::EvolveF ( EvolveFCartesian ( Ffield, Efield, rhofield, rhocomp, dt ); - } else if (m_fdtd_algo == MaxwellSolverAlgo::Yee) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee) { EvolveFCartesian ( Ffield, Efield, rhofield, rhocomp, dt ); - } else if (m_fdtd_algo == MaxwellSolverAlgo::CKC) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::CKC) { EvolveFCartesian ( Ffield, Efield, rhofield, rhocomp, dt ); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp index c1d88282e47..6a94d205a36 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp @@ -55,11 +55,11 @@ void FiniteDifferenceSolver::EvolveFPML ( EvolveFPMLCartesian ( Ffield, Efield, dt ); - } else if (m_fdtd_algo == MaxwellSolverAlgo::Yee) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee) { EvolveFPMLCartesian ( Ffield, Efield, dt ); - } else if (m_fdtd_algo == MaxwellSolverAlgo::CKC) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::CKC) { EvolveFPMLCartesian ( Ffield, Efield, dt ); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp index 3a705698b94..0c971d5776b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp @@ -51,11 +51,11 @@ void FiniteDifferenceSolver::EvolveG ( { EvolveGCartesian(Gfield, Bfield, dt); } - else if (m_fdtd_algo == MaxwellSolverAlgo::Yee) + else if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee) { EvolveGCartesian(Gfield, Bfield, dt); } - else if (m_fdtd_algo == MaxwellSolverAlgo::CKC) + else if (m_fdtd_algo == ElectromagneticSolverAlgo::CKC) { EvolveGCartesian(Gfield, Bfield, dt); } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.cpp b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.cpp index f63028f5079..e8529ef1559 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.cpp @@ -37,7 +37,7 @@ FiniteDifferenceSolver::FiniteDifferenceSolver ( m_do_nodal = do_nodal; // return if not FDTD - if (fdtd_algo == MaxwellSolverAlgo::PSATD) + if (fdtd_algo == ElectromagneticSolverAlgo::None || fdtd_algo == ElectromagneticSolverAlgo::PSATD) return; // Calculate coefficients of finite-difference stencil @@ -45,7 +45,7 @@ FiniteDifferenceSolver::FiniteDifferenceSolver ( m_dr = cell_size[0]; m_nmodes = WarpX::GetInstance().n_rz_azimuthal_modes; m_rmin = WarpX::GetInstance().Geom(0).ProbLo(0); - if (fdtd_algo == MaxwellSolverAlgo::Yee) { + if (fdtd_algo == ElectromagneticSolverAlgo::Yee) { CylindricalYeeAlgorithm::InitializeStencilCoefficients( cell_size, m_h_stencil_coefs_r, m_h_stencil_coefs_z ); m_stencil_coefs_r.resize(m_h_stencil_coefs_r.size()); @@ -67,12 +67,12 @@ FiniteDifferenceSolver::FiniteDifferenceSolver ( CartesianNodalAlgorithm::InitializeStencilCoefficients( cell_size, m_h_stencil_coefs_x, m_h_stencil_coefs_y, m_h_stencil_coefs_z ); - } else if (fdtd_algo == MaxwellSolverAlgo::Yee || fdtd_algo == MaxwellSolverAlgo::ECT) { + } else if (fdtd_algo == ElectromagneticSolverAlgo::Yee || fdtd_algo == ElectromagneticSolverAlgo::ECT) { CartesianYeeAlgorithm::InitializeStencilCoefficients( cell_size, m_h_stencil_coefs_x, m_h_stencil_coefs_y, m_h_stencil_coefs_z ); - } else if (fdtd_algo == MaxwellSolverAlgo::CKC) { + } else if (fdtd_algo == ElectromagneticSolverAlgo::CKC) { CartesianCKCAlgorithm::InitializeStencilCoefficients( cell_size, m_h_stencil_coefs_x, m_h_stencil_coefs_y, m_h_stencil_coefs_z ); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp index d6bc9be9e8a..ab2e250142e 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp @@ -53,7 +53,7 @@ void FiniteDifferenceSolver::MacroscopicEvolveE ( !m_do_nodal, "macro E-push does not work for nodal"); - if (m_fdtd_algo == MaxwellSolverAlgo::Yee) { + if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee) { if (WarpX::macroscopic_solver_algo == MacroscopicSolverAlgo::LaxWendroff) { @@ -68,7 +68,7 @@ void FiniteDifferenceSolver::MacroscopicEvolveE ( } - } else if (m_fdtd_algo == MaxwellSolverAlgo::CKC) { + } else if (m_fdtd_algo == ElectromagneticSolverAlgo::CKC) { // Note : EvolveE is the same for CKC and Yee. // In the templated Yee and CKC calls, the core operations for EvolveE is the same. diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index e8f20661a2d..a3a8829628f 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -880,7 +880,7 @@ WarpX::EvolveE (int lev, PatchType patch_type, amrex::Real a_dt) // ECTRhofield must be recomputed at the very end of the Efield update to ensure // that ECTRhofield is consistent with Efield #ifdef AMREX_USE_EB - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::ECT) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { if (patch_type == PatchType::fine) { m_fdtd_solver_fp[lev]->EvolveECTRho(Efield_fp[lev], m_edge_lengths[lev], m_face_areas[lev], ECTRhofield[lev], lev); diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 4865cfa6537..987bab9d447 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -157,15 +157,15 @@ WarpX::PrintMainPICparameters () WarpX::n_rz_azimuthal_modes << "\n"; #endif // WARPX_USE_RZ //Print solver's operation mode (e.g., EM or electrostatic) - if (do_electrostatic == ElectrostaticSolverAlgo::LabFrame) { + if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) { amrex::Print() << "Operation mode: | Electrostatic" << "\n"; amrex::Print() << " | - laboratory frame" << "\n"; } - else if (do_electrostatic == ElectrostaticSolverAlgo::Relativistic){ + else if (electrostatic_solver_id == ElectrostaticSolverAlgo::Relativistic){ amrex::Print() << "Operation mode: | Electrostatic" << "\n"; amrex::Print() << " | - relativistic" << "\n"; } - else{ + else if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { amrex::Print() << "Operation mode: | Electromagnetic" << "\n"; } if (em_solver_medium == MediumForEM::Vacuum ){ @@ -218,18 +218,18 @@ WarpX::PrintMainPICparameters () amrex::Print() << "Particle Shape Factor:| " << WarpX::nox << "\n"; amrex::Print() << "-------------------------------------------------------------------------------\n"; // Print solver's type: Yee, CKC, ECT - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::Yee){ + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee){ amrex::Print() << "Maxwell Solver: | Yee \n"; - } - else if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::CKC){ + } + else if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC){ amrex::Print() << "Maxwell Solver: | CKC \n"; } - else if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::ECT){ + else if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT){ amrex::Print() << "Maxwell Solver: | ECT \n"; } #ifdef WARPX_USE_PSATD // Print PSATD solver's configuration - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD){ + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD){ amrex::Print() << "Maxwell Solver: | PSATD \n"; } if ((m_v_galilean[0]!=0) or (m_v_galilean[1]!=0) or (m_v_galilean[2]!=0)) { @@ -295,7 +295,7 @@ WarpX::PrintMainPICparameters () amrex::Print() << " | - use_hybrid_QED = true \n"; } - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD){ + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD){ // Print solver's order std::string psatd_nox_fft, psatd_noy_fft, psatd_noz_fft; psatd_nox_fft = (nox_fft == -1) ? "inf" : std::to_string(nox_fft); @@ -307,11 +307,11 @@ WarpX::PrintMainPICparameters () amrex::Print() << " | - psatd.noy = " << psatd_noy_fft << "\n"; amrex::Print() << " | - psatd.noz = " << psatd_noz_fft << "\n"; } - else if (dims=="2" and WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD){ + else if (dims=="2" and WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD){ amrex::Print() << "Spectral order: | - psatd.nox = " << psatd_nox_fft << "\n"; amrex::Print() << " | - psatd.noz = " << psatd_noz_fft << "\n"; } - else if (dims=="1" and WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD){ + else if (dims=="1" and WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD){ amrex::Print() << "Spectral order: | - psatd.noz = " << psatd_noz_fft << "\n"; } } @@ -867,7 +867,7 @@ WarpX::InitLevelData (int lev, Real /*time*/) #ifdef AMREX_USE_EB // We initialize ECTRhofield consistently with the Efield - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::ECT) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { m_fdtd_solver_fp[lev]->EvolveECTRho(Efield_fp[lev], m_edge_lengths[lev], m_face_areas[lev], ECTRhofield[lev], lev); @@ -897,7 +897,7 @@ WarpX::InitLevelData (int lev, Real /*time*/) 'E', lev); #ifdef AMREX_USE_EB - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::ECT) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { // We initialize ECTRhofield consistently with the Efield m_fdtd_solver_cp[lev]->EvolveECTRho(Efield_cp[lev], m_edge_lengths[lev], m_face_areas[lev], ECTRhofield[lev], lev); @@ -1240,9 +1240,7 @@ void WarpX::InitializeEBGridData (int lev) "particles are close to embedded boundaries"); } - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::Yee || - WarpX::maxwell_solver_id == MaxwellSolverAlgo::CKC || - WarpX::maxwell_solver_id == MaxwellSolverAlgo::ECT) { + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD ) { auto const eb_fact = fieldEBFactory(lev); @@ -1251,7 +1249,7 @@ void WarpX::InitializeEBGridData (int lev) ComputeFaceAreas(m_face_areas[lev], eb_fact); ScaleAreas(m_face_areas[lev], CellSize(lev)); - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::ECT) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { MarkCells(); ComputeFaceExtensions(); } @@ -1267,7 +1265,7 @@ void WarpX::InitializeEBGridData (int lev) void WarpX::CheckKnownIssues() { - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD && + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD && (std::any_of(do_pml_Lo[0].begin(),do_pml_Lo[0].end(),[](const auto& ee){return ee;}) || std::any_of(do_pml_Hi[0].begin(),do_pml_Hi[0].end(),[](const auto& ee){return ee;})) ) { diff --git a/Source/Parallelization/GuardCellManager.H b/Source/Parallelization/GuardCellManager.H index c122aaa96c9..248a3dabf3b 100644 --- a/Source/Parallelization/GuardCellManager.H +++ b/Source/Parallelization/GuardCellManager.H @@ -36,12 +36,11 @@ public: * \param noy_fft order of PSATD in y direction * \param noz_fft order of PSATD in z direction * \param nci_corr_stencil stencil of NCI corrector - * \param maxwell_solver_id if of Maxwell solver + * \param electromagnetic_solver_id Integer corresponding to the type of Maxwell solver * \param max_level max level of the simulation * \param v_galilean Velocity used in the Galilean PSATD scheme * \param v_comoving Velocity used in the comoving PSATD scheme * \param safe_guard_cells Run in safe mode, exchanging more guard cells, and more often in the PIC loop (for debugging). - * \param do_electrostatic Whether to run in electrostatic mode i.e. solving the Poisson equation instead of the Maxwell equations. * \param do_multi_J Whether to use the multi-J PSATD scheme * \param fft_do_time_averaging Whether to average the E and B field in time (with PSATD) before interpolating them onto the macro-particles * \param do_pml whether pml is turned on (only used by RZ PSATD) @@ -60,12 +59,11 @@ public: const int nox, const int nox_fft, const int noy_fft, const int noz_fft, const int nci_corr_stencil, - const int maxwell_solver_id, + const int electromagnetic_solver_id, const int max_level, const amrex::Vector v_galilean, const amrex::Vector v_comoving, const bool safe_guard_cells, - const int do_electrostatic, const int do_multi_J, const bool fft_do_time_averaging, const bool do_pml, diff --git a/Source/Parallelization/GuardCellManager.cpp b/Source/Parallelization/GuardCellManager.cpp index d01230e4b5e..aa9d9f44890 100644 --- a/Source/Parallelization/GuardCellManager.cpp +++ b/Source/Parallelization/GuardCellManager.cpp @@ -42,12 +42,11 @@ guardCellManager::Init ( const int nox, const int nox_fft, const int noy_fft, const int noz_fft, const int nci_corr_stencil, - const int maxwell_solver_id, + const int electromagnetic_solver_id, const int max_level, const amrex::Vector v_galilean, const amrex::Vector v_comoving, const bool safe_guard_cells, - const int do_electrostatic, const int do_multi_J, const bool fft_do_time_averaging, const bool do_pml, @@ -135,7 +134,7 @@ guardCellManager::Init ( // Electromagnetic simulations: account for change in particle positions within half a time step // for current deposition and within one time step for charge deposition (since rho is needed // both at the beginning and at the end of the PIC iteration) - if (do_electrostatic == ElectrostaticSolverAlgo::None) + if (electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { for (int i = 0; i < AMREX_SPACEDIM; i++) { @@ -170,7 +169,7 @@ guardCellManager::Init ( // After pushing particle int ng_alloc_F_int = (do_moving_window) ? 2 : 0; // CKC solver requires one additional guard cell - if (maxwell_solver_id == MaxwellSolverAlgo::CKC) ng_alloc_F_int = std::max( ng_alloc_F_int, 1 ); + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC) ng_alloc_F_int = std::max( ng_alloc_F_int, 1 ); ng_alloc_F = IntVect(AMREX_D_DECL(ng_alloc_F_int, ng_alloc_F_int, ng_alloc_F_int)); // Used if warpx.do_divb_cleaning = 1 @@ -178,7 +177,7 @@ guardCellManager::Init ( // TODO Does the CKC solver require one additional guard cell (as for F)? ng_alloc_G = IntVect(AMREX_D_DECL(ng_alloc_G_int, ng_alloc_G_int, ng_alloc_G_int)); - if (maxwell_solver_id == MaxwellSolverAlgo::PSATD) + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { // The number of guard cells should be enough to contain the stencil of the FFT solver. // @@ -245,13 +244,14 @@ guardCellManager::Init ( } // Compute number of cells required for Field Solver - if (maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { ng_FieldSolver = ng_alloc_EB; ng_FieldSolverF = ng_alloc_EB; ng_FieldSolverG = ng_alloc_EB; } #ifdef WARPX_DIM_RZ - else if (maxwell_solver_id == MaxwellSolverAlgo::Yee) { + else if (electromagnetic_solver_id == ElectromagneticSolverAlgo::None || + electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee) { ng_FieldSolver = CylindricalYeeAlgorithm::GetMaxGuardCell(); ng_FieldSolverF = CylindricalYeeAlgorithm::GetMaxGuardCell(); ng_FieldSolverG = CylindricalYeeAlgorithm::GetMaxGuardCell(); @@ -262,12 +262,13 @@ guardCellManager::Init ( ng_FieldSolver = CartesianNodalAlgorithm::GetMaxGuardCell(); ng_FieldSolverF = CartesianNodalAlgorithm::GetMaxGuardCell(); ng_FieldSolverG = CartesianNodalAlgorithm::GetMaxGuardCell(); - } else if (maxwell_solver_id == MaxwellSolverAlgo::Yee - || maxwell_solver_id == MaxwellSolverAlgo::ECT) { + } else if (electromagnetic_solver_id == ElectromagneticSolverAlgo::None || + electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee || + electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { ng_FieldSolver = CartesianYeeAlgorithm::GetMaxGuardCell(); ng_FieldSolverF = CartesianYeeAlgorithm::GetMaxGuardCell(); ng_FieldSolverG = CartesianYeeAlgorithm::GetMaxGuardCell(); - } else if (maxwell_solver_id == MaxwellSolverAlgo::CKC) { + } else if (electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC) { ng_FieldSolver = CartesianCKCAlgorithm::GetMaxGuardCell(); ng_FieldSolverF = CartesianCKCAlgorithm::GetMaxGuardCell(); ng_FieldSolverG = CartesianCKCAlgorithm::GetMaxGuardCell(); @@ -281,7 +282,7 @@ guardCellManager::Init ( ng_alloc_F.max( ng_FieldSolverF ); ng_alloc_G.max( ng_FieldSolverG ); - if (do_moving_window && maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (do_moving_window && electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { ng_afterPushPSATD = ng_alloc_EB; } diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index 396b0f24b72..fed81f2fce6 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -65,7 +65,7 @@ void WarpX::UpdateAuxilaryDataStagToNodal () { #ifndef WARPX_USE_PSATD - if (maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( false, "WarpX::UpdateAuxilaryDataStagToNodal: PSATD solver requires " "WarpX build with spectral solver support."); diff --git a/Source/Parallelization/WarpXRegrid.cpp b/Source/Parallelization/WarpXRegrid.cpp index 794a48a5b4f..99885f6193a 100644 --- a/Source/Parallelization/WarpXRegrid.cpp +++ b/Source/Parallelization/WarpXRegrid.cpp @@ -181,12 +181,10 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi RemakeMultiFab(Bfield_avg_fp[lev][idim], dm, true); } #ifdef AMREX_USE_EB - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::Yee || - WarpX::maxwell_solver_id == MaxwellSolverAlgo::ECT || - WarpX::maxwell_solver_id == MaxwellSolverAlgo::CKC){ + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { RemakeMultiFab(m_edge_lengths[lev][idim], dm, false); RemakeMultiFab(m_face_areas[lev][idim], dm, false); - if(WarpX::maxwell_solver_id == MaxwellSolverAlgo::ECT){ + if(WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT){ RemakeMultiFab(Venl[lev][idim], dm, false); RemakeMultiFab(m_flag_info_face[lev][idim], dm, false); RemakeMultiFab(m_flag_ext_face[lev][idim], dm, false); @@ -218,7 +216,7 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi #endif #ifdef WARPX_USE_PSATD - if (maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { if (spectral_solver_fp[lev] != nullptr) { // Get the cell-centered box BoxArray realspace_ba = ba; // Copy box @@ -282,7 +280,7 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi RemakeMultiFab(rho_cp[lev], dm, false); #ifdef WARPX_USE_PSATD - if (maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { if (spectral_solver_cp[lev] != nullptr) { BoxArray cba = ba; cba.coarsen(refRatio(lev-1)); diff --git a/Source/Parallelization/WarpXSumGuardCells.H b/Source/Parallelization/WarpXSumGuardCells.H index 1bfbf51623a..425ce320856 100644 --- a/Source/Parallelization/WarpXSumGuardCells.H +++ b/Source/Parallelization/WarpXSumGuardCells.H @@ -34,7 +34,7 @@ WarpXSumGuardCells(amrex::MultiFab& mf, const amrex::Periodicity& period, amrex::IntVect n_updated_guards; // Update both valid cells and guard cells - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) n_updated_guards = mf.nGrowVect(); else // Update only the valid cells n_updated_guards = amrex::IntVect::TheZeroVector(); @@ -65,7 +65,7 @@ WarpXSumGuardCells(amrex::MultiFab& dst, amrex::MultiFab& src, amrex::IntVect n_updated_guards; // Update both valid cells and guard cells - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) n_updated_guards = dst.nGrowVect(); else // Update only the valid cells n_updated_guards = amrex::IntVect::TheZeroVector(); diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index 9c7563a3f16..7200df69a33 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -536,7 +536,7 @@ MultiParticleContainer::GetZeroChargeDensity (const int lev) bool is_PSATD_RZ = false; #ifdef WARPX_DIM_RZ - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) is_PSATD_RZ = true; #endif if( !is_PSATD_RZ ) diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 7e04d690260..e79d526f1cc 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -2043,13 +2043,13 @@ PhysicalParticleContainer::Evolve (int lev, np_current, np-np_current, thread_num, lev, lev-1, dt, relative_time); } - } // end of "if do_electrostatic == ElectrostaticSolverAlgo::None" + } // end of "if electrostatic_solver_id == ElectrostaticSolverAlgo::None" } // end of "if do_not_push" if (rho && ! skip_deposition && ! do_not_deposit) { // Deposit charge after particle push, in component 1 of MultiFab rho. // (Skipped for electrostatic solver, as this may lead to out-of-bounds) - if (WarpX::do_electrostatic == ElectrostaticSolverAlgo::None) { + if (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::None) { int* AMREX_RESTRICT ion_lev; if (do_field_ionization){ ion_lev = pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr(); diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index 3f0e0bcbae3..34e25d1a205 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -735,7 +735,7 @@ WarpXParticleContainer::GetChargeDensity (int lev, bool local) bool is_PSATD_RZ = false; #ifdef WARPX_DIM_RZ - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) is_PSATD_RZ = true; #endif if( !is_PSATD_RZ ) diff --git a/Source/Utils/WarpXAlgorithmSelection.H b/Source/Utils/WarpXAlgorithmSelection.H index 9346159abf2..c3160cfad60 100644 --- a/Source/Utils/WarpXAlgorithmSelection.H +++ b/Source/Utils/WarpXAlgorithmSelection.H @@ -36,12 +36,13 @@ struct MacroscopicSolverAlgo { }; }; -struct MaxwellSolverAlgo { +struct ElectromagneticSolverAlgo { enum { - Yee = 0, - CKC = 1, - PSATD = 2, - ECT = 3 + None = 0, + Yee = 1, + CKC = 2, + PSATD = 3, + ECT = 4 }; }; diff --git a/Source/Utils/WarpXAlgorithmSelection.cpp b/Source/Utils/WarpXAlgorithmSelection.cpp index 1272c146a18..b99459b465c 100644 --- a/Source/Utils/WarpXAlgorithmSelection.cpp +++ b/Source/Utils/WarpXAlgorithmSelection.cpp @@ -22,12 +22,13 @@ // Define dictionary with correspondance between user-input strings, // and corresponding integer for use inside the code -const std::map maxwell_solver_algo_to_int = { - {"yee", MaxwellSolverAlgo::Yee }, - {"ckc", MaxwellSolverAlgo::CKC }, - {"psatd", MaxwellSolverAlgo::PSATD }, - {"ect", MaxwellSolverAlgo::ECT }, - {"default", MaxwellSolverAlgo::Yee } +const std::map electromagnetic_solver_algo_to_int = { + {"none", ElectromagneticSolverAlgo::None }, + {"yee", ElectromagneticSolverAlgo::Yee }, + {"ckc", ElectromagneticSolverAlgo::CKC }, + {"psatd", ElectromagneticSolverAlgo::PSATD }, + {"ect", ElectromagneticSolverAlgo::ECT }, + {"default", ElectromagneticSolverAlgo::Yee } }; const std::map electrostatic_solver_algo_to_int = { @@ -131,14 +132,14 @@ GetAlgorithmInteger( amrex::ParmParse& pp, const char* pp_search_key ){ // Pick the right dictionary std::map algo_to_int; if (0 == std::strcmp(pp_search_key, "maxwell_solver")) { - algo_to_int = maxwell_solver_algo_to_int; + algo_to_int = electromagnetic_solver_algo_to_int; } else if (0 == std::strcmp(pp_search_key, "do_electrostatic")) { algo_to_int = electrostatic_solver_algo_to_int; } else if (0 == std::strcmp(pp_search_key, "particle_pusher")) { algo_to_int = particle_pusher_algo_to_int; } else if (0 == std::strcmp(pp_search_key, "current_deposition")) { algo_to_int = current_deposition_algo_to_int; - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) algo_to_int["default"] = CurrentDepositionAlgo::Direct; } else if (0 == std::strcmp(pp_search_key, "charge_deposition")) { algo_to_int = charge_deposition_algo_to_int; diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index a36d31e93a1..42b2308b9bc 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -74,8 +74,8 @@ void ParseGeometryInput() #ifdef WARPX_DIM_RZ ParmParse pp_algo("algo"); - int maxwell_solver_id = GetAlgorithmInteger(pp_algo, "maxwell_solver"); - if (maxwell_solver_id == MaxwellSolverAlgo::PSATD) + int electromagnetic_solver_id = GetAlgorithmInteger(pp_algo, "maxwell_solver"); + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE(prob_lo[0] == 0., "Lower bound of radial coordinate (prob_lo[0]) with RZ PSATD solver must be zero"); @@ -302,10 +302,10 @@ void CheckGriddingForRZSpectral () CheckDims(); ParmParse pp_algo("algo"); - int maxwell_solver_id = GetAlgorithmInteger(pp_algo, "maxwell_solver"); + int electromagnetic_solver_id = GetAlgorithmInteger(pp_algo, "maxwell_solver"); // only check for PSATD in RZ - if (maxwell_solver_id != MaxwellSolverAlgo::PSATD) + if (electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) return; int max_level; @@ -393,7 +393,7 @@ void ReadBCParams () ParmParse pp_geometry("geometry"); ParmParse pp_warpx("warpx"); ParmParse pp_algo("algo"); - int maxwell_solver_id = GetAlgorithmInteger(pp_algo, "maxwell_solver"); + int electromagnetic_solver_id = GetAlgorithmInteger(pp_algo, "maxwell_solver"); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( !pp_geometry.queryarr("is_periodic", geom_periodicity), @@ -448,7 +448,7 @@ void ReadBCParams () } WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - (maxwell_solver_id != MaxwellSolverAlgo::PSATD) || + (electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) || ( WarpX::field_boundary_lo[idim] != FieldBoundaryType::PEC && WarpX::field_boundary_hi[idim] != FieldBoundaryType::PEC diff --git a/Source/WarpX.H b/Source/WarpX.H index 402b4ee660d..1cad22d40a4 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -155,7 +155,7 @@ public: //! Integer that corresponds to the particle push algorithm (Boris, Vay, Higuera-Cary) static short particle_pusher_algo; //! Integer that corresponds to the type of Maxwell solver (Yee, CKC, PSATD, ECT) - static short maxwell_solver_id; + static short electromagnetic_solver_id; /** Records a number corresponding to the load balance cost update strategy * being used (0, 1, 2 corresponding to timers, heuristic, or gpuclock). */ @@ -733,7 +733,7 @@ public: static const amrex::iMultiFab* CurrentBufferMasks (int lev); static const amrex::iMultiFab* GatherBufferMasks (int lev); - static int do_electrostatic; + static int electrostatic_solver_id; // Parameters for lab frame electrostatic static amrex::Real self_fields_required_precision; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 7f697f66810..2e0fe7c3e2a 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -120,7 +120,7 @@ short WarpX::current_deposition_algo; short WarpX::charge_deposition_algo; short WarpX::field_gathering_algo; short WarpX::particle_pusher_algo; -short WarpX::maxwell_solver_id; +short WarpX::electromagnetic_solver_id; short WarpX::J_in_time; short WarpX::rho_in_time; short WarpX::load_balance_costs_update_algo; @@ -182,7 +182,7 @@ Real WarpX::particle_slice_width_lab = 0.0_rt; bool WarpX::do_dynamic_scheduling = true; -int WarpX::do_electrostatic; +int WarpX::electrostatic_solver_id; Real WarpX::self_fields_required_precision = 1.e-11_rt; Real WarpX::self_fields_absolute_tolerance = 0.0_rt; int WarpX::self_fields_max_iters = 200; @@ -367,7 +367,7 @@ WarpX::WarpX () && WarpX::load_balance_costs_update_algo==LoadBalanceCostsUpdateAlgo::Heuristic) { #ifdef AMREX_USE_GPU - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { switch (WarpX::nox) { case 1: @@ -408,12 +408,12 @@ WarpX::WarpX () // Allocate field solver objects #ifdef WARPX_USE_PSATD - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { spectral_solver_fp.resize(nlevs_max); spectral_solver_cp.resize(nlevs_max); } #endif - if (WarpX::maxwell_solver_id != MaxwellSolverAlgo::PSATD) { + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { m_fdtd_solver_fp.resize(nlevs_max); m_fdtd_solver_cp.resize(nlevs_max); } @@ -426,7 +426,7 @@ WarpX::WarpX () // Sanity checks. Must be done after calling the MultiParticleContainer // constructor, as it reads additional parameters // (e.g., use_fdtd_nci_corr) - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { AMREX_ALWAYS_ASSERT(use_fdtd_nci_corr == 0); AMREX_ALWAYS_ASSERT(do_subcycling == 0); } @@ -467,7 +467,7 @@ WarpX::ReadParameters () { ParmParse pp_algo("algo"); - maxwell_solver_id = GetAlgorithmInteger(pp_algo, "maxwell_solver"); + electromagnetic_solver_id = GetAlgorithmInteger(pp_algo, "maxwell_solver"); } { @@ -687,14 +687,19 @@ WarpX::ReadParameters () "The boosted frame diagnostic currently only works if the moving window is in the z direction."); } - do_electrostatic = GetAlgorithmInteger(pp_warpx, "do_electrostatic"); + electrostatic_solver_id = GetAlgorithmInteger(pp_warpx, "do_electrostatic"); + // if an electrostatic solver is used, set the Maxwell solver to None + if (electrostatic_solver_id != ElectrostaticSolverAlgo::None) { + electromagnetic_solver_id = ElectromagneticSolverAlgo::None; + } #if defined(AMREX_USE_EB) && defined(WARPX_DIM_RZ) - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(do_electrostatic!=ElectrostaticSolverAlgo::None, - "Currently, the embedded boundary in RZ only works for electrostatic solvers."); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + electromagnetic_solver_id==ElectromagneticSolverAlgo::None, + "Currently, the embedded boundary in RZ only works for electrostatic solvers (or no solver)."); #endif - if (do_electrostatic == ElectrostaticSolverAlgo::LabFrame) { + if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) { // Note that with the relativistic version, these parameters would be // input for each species. utils::parser::queryWithParser( @@ -721,7 +726,7 @@ WarpX::ReadParameters () // Filter currently not working with FDTD solver in RZ geometry: turn OFF by default // (see https://github.com/ECP-WarpX/WarpX/issues/1943) #ifdef WARPX_DIM_RZ - if (WarpX::maxwell_solver_id != MaxwellSolverAlgo::PSATD) WarpX::use_filter = false; + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) WarpX::use_filter = false; #endif // Read filter and fill IntVect filter_npass_each_dir with @@ -742,7 +747,7 @@ WarpX::ReadParameters () // TODO When k-space filtering will be implemented also for Cartesian geometries, // this code block will have to be applied in all cases (remove #ifdef condition) #ifdef WARPX_DIM_RZ - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { // With RZ spectral, only use k-space filtering use_kspace_filter = use_filter; use_filter = false; @@ -814,7 +819,7 @@ WarpX::ReadParameters () { // SilverMueller is implemented for Yee WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - maxwell_solver_id == MaxwellSolverAlgo::Yee, + electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee, "The Silver-Mueller boundary condition can only be used with the Yee solver."); } } @@ -835,7 +840,7 @@ WarpX::ReadParameters () // Default values of WarpX::do_pml_dive_cleaning and WarpX::do_pml_divb_cleaning: // false for FDTD solver, true for PSATD solver. - if (maxwell_solver_id != MaxwellSolverAlgo::PSATD) + if (electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { do_pml_dive_cleaning = false; do_pml_divb_cleaning = false; @@ -853,14 +858,14 @@ WarpX::ReadParameters () // If WarpX::do_divb_cleaning = true, set also WarpX::do_pml_divb_cleaning = true // (possibly overwritten by users in the input file, see query below) // TODO Implement div(B) cleaning in PML with FDTD and remove second if condition - if (do_divb_cleaning && maxwell_solver_id == MaxwellSolverAlgo::PSATD) do_pml_divb_cleaning = true; + if (do_divb_cleaning && electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) do_pml_divb_cleaning = true; // Query input parameters to use div(E) and div(B) cleaning in PMLs pp_warpx.query("do_pml_dive_cleaning", do_pml_dive_cleaning); pp_warpx.query("do_pml_divb_cleaning", do_pml_divb_cleaning); // TODO Implement div(B) cleaning in PML with FDTD and remove ASSERT - if (maxwell_solver_id != MaxwellSolverAlgo::PSATD) + if (electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( do_pml_divb_cleaning == false, @@ -869,7 +874,7 @@ WarpX::ReadParameters () // Divergence cleaning in PMLs for PSATD solver implemented only // for both div(E) and div(B) cleaning - if (maxwell_solver_id == MaxwellSolverAlgo::PSATD) + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( do_pml_dive_cleaning == do_pml_divb_cleaning, @@ -883,7 +888,7 @@ WarpX::ReadParameters () } #ifdef WARPX_DIM_RZ - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( isAnyBoundaryPML() == false || maxwell_solver_id == MaxwellSolverAlgo::PSATD, + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( isAnyBoundaryPML() == false || electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD, "PML are not implemented in RZ geometry with FDTD; please set a different boundary condition using boundary.field_lo and boundary.field_hi."); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( field_boundary_lo[1] != FieldBoundaryType::PML && field_boundary_hi[1] != FieldBoundaryType::PML, "PML are not implemented in RZ geometry along z; please set a different boundary condition using boundary.field_lo and boundary.field_hi."); @@ -956,11 +961,11 @@ WarpX::ReadParameters () { ParmParse pp_algo("algo"); #ifdef WARPX_DIM_RZ - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( maxwell_solver_id != MaxwellSolverAlgo::CKC, + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( electromagnetic_solver_id != ElectromagneticSolverAlgo::CKC, "algo.maxwell_solver = ckc is not (yet) available for RZ geometry"); #endif #ifndef WARPX_USE_PSATD - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( maxwell_solver_id != MaxwellSolverAlgo::PSATD, + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD, "algo.maxwell_solver = psatd is not supported because WarpX was built without spectral solvers"); #endif @@ -975,7 +980,7 @@ WarpX::ReadParameters () "Error : Field boundary at r=0 must be ``none``. \n"); } - if (maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { // Force do_nodal=true (that is, not staggered) and // use same shape factors in all directions, for gathering do_nodal = true; @@ -1155,7 +1160,7 @@ WarpX::ReadParameters () } } - if (maxwell_solver_id == MaxwellSolverAlgo::PSATD) + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { ParmParse pp_psatd("psatd"); pp_psatd.query("periodic_single_box_fft", fft_periodic_single_box); @@ -1408,7 +1413,7 @@ WarpX::ReadParameters () } } - if (maxwell_solver_id != MaxwellSolverAlgo::PSATD ) { + if (electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD ) { for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( (WarpX::field_boundary_lo[idim] != FieldBoundaryType::Damped) && @@ -1670,7 +1675,7 @@ WarpX::ClearLevel (int lev) rho_cp[lev].reset(); #ifdef WARPX_USE_PSATD - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { spectral_solver_fp[lev].reset(); spectral_solver_cp[lev].reset(); } @@ -1711,12 +1716,11 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d WarpX::nox, nox_fft, noy_fft, noz_fft, NCIGodfreyFilter::m_stencil_width, - maxwell_solver_id, + electromagnetic_solver_id, maxLevel(), WarpX::m_v_galilean, WarpX::m_v_comoving, safe_guard_cells, - WarpX::do_electrostatic, WarpX::do_multi_J, WarpX::fft_do_time_averaging, WarpX::isAnyBoundaryPML(), @@ -1824,7 +1828,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm G_nodal_flag = amrex::IntVect::TheNodeVector(); } #ifdef WARPX_DIM_RZ - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { // Force cell-centered IndexType in r and z Ex_nodal_flag = IntVect::TheCellVector(); Ey_nodal_flag = IntVect::TheCellVector(); @@ -1911,9 +1915,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm // EB info are needed only at the finest level if (lev == maxLevel()) { - if(WarpX::maxwell_solver_id == MaxwellSolverAlgo::Yee - || WarpX::maxwell_solver_id == MaxwellSolverAlgo::CKC - || WarpX::maxwell_solver_id == MaxwellSolverAlgo::ECT) { + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { m_edge_lengths[lev][0] = std::make_unique(amrex::convert(ba, Ex_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[x]")); m_edge_lengths[lev][1] = std::make_unique(amrex::convert(ba, Ey_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[y]")); m_edge_lengths[lev][2] = std::make_unique(amrex::convert(ba, Ez_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[z]")); @@ -1921,7 +1923,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm m_face_areas[lev][1] = std::make_unique(amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[y]")); m_face_areas[lev][2] = std::make_unique(amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[z]")); } - if(WarpX::maxwell_solver_id == MaxwellSolverAlgo::ECT) { + if(WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { m_edge_lengths[lev][0] = std::make_unique(amrex::convert(ba, Ex_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[x]")); m_edge_lengths[lev][1] = std::make_unique(amrex::convert(ba, Ey_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[y]")); m_edge_lengths[lev][2] = std::make_unique(amrex::convert(ba, Ez_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[z]")); @@ -1954,8 +1956,8 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm } #endif - bool deposit_charge = do_dive_cleaning || (do_electrostatic == ElectrostaticSolverAlgo::LabFrame); - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) { + bool deposit_charge = do_dive_cleaning || (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame); + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { deposit_charge = do_dive_cleaning || update_with_rho || current_correction; } if (deposit_charge) @@ -1965,7 +1967,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm rho_fp[lev] = std::make_unique(amrex::convert(ba,rho_nodal_flag),dm,rho_ncomps,ngRho,tag("rho_fp")); } - if (do_electrostatic == ElectrostaticSolverAlgo::LabFrame) + if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) { IntVect ngPhi = IntVect( AMREX_D_DECL(1,1,1) ); phi_fp[lev] = std::make_unique(amrex::convert(ba,phi_nodal_flag),dm,ncomps,ngPhi,tag("phi_fp")); @@ -1989,7 +1991,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm G_fp[lev] = std::make_unique(amrex::convert(ba, G_nodal_flag), dm, ncomps, ngG, tag("G_fp")); } - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { // Allocate and initialize the spectral solver #ifndef WARPX_USE_PSATD @@ -2042,9 +2044,9 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm pml_flag_false); # endif #endif - } // MaxwellSolverAlgo::PSATD + } // ElectromagneticSolverAlgo::PSATD else { - m_fdtd_solver_fp[lev] = std::make_unique(maxwell_solver_id, dx, do_nodal); + m_fdtd_solver_fp[lev] = std::make_unique(electromagnetic_solver_id, dx, do_nodal); } // @@ -2151,7 +2153,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm } } - if (WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD) + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { // Allocate and initialize the spectral solver #ifndef WARPX_USE_PSATD @@ -2186,9 +2188,9 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm pml_flag_false); # endif #endif - } // MaxwellSolverAlgo::PSATD + } // ElectromagneticSolverAlgo::PSATD else { - m_fdtd_solver_cp[lev] = std::make_unique(maxwell_solver_id, cdx, + m_fdtd_solver_cp[lev] = std::make_unique(electromagnetic_solver_id, cdx, do_nodal); } } @@ -2505,7 +2507,7 @@ WarpX::ComputeDivB (amrex::MultiFab& divB, int const dcomp, void WarpX::ComputeDivE(amrex::MultiFab& divE, const int lev) { - if ( WarpX::maxwell_solver_id == MaxwellSolverAlgo::PSATD ) { + if ( WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD ) { #ifdef WARPX_USE_PSATD spectral_solver_fp[lev]->ComputeSpectralDivE( lev, Efield_aux[lev], divE ); #else From 6beaa9fc0f5a68694d8dfbb70d6eae01446e4490 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 8 Nov 2022 10:59:13 -0800 Subject: [PATCH 0146/1346] Docs: Improve MPI Threading User FAQ (#3501) * Docs: Improve MPI Threading User FAQ Clarify that this FAQ entry explains the initial output of WarpX in stdout. * Mention: `amrex.async_out_nfiles` * Link Build Options, too --- Docs/source/usage/faq.rst | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/Docs/source/usage/faq.rst b/Docs/source/usage/faq.rst index 59c8c3aee49..67cea8d6621 100644 --- a/Docs/source/usage/faq.rst +++ b/Docs/source/usage/faq.rst @@ -6,16 +6,27 @@ FAQ This section lists frequently asked usage questions. -What is MPI thread support level? ---------------------------------- +What is "MPI initialized with thread support level ..."? +-------------------------------------------------------- -We report this in output on startup together with other information. +When we start up WarpX, we report a couple of information on used MPI processes across parallel compute processes, CPU threads or GPUs and further capabilities. +For instance, a parallel, multi-process, multi-threaded CPU run could output:: -That is the `MPI support for threaded execution `__, e.g., with OpenMP or system threads. + MPI initialized with 4 MPI processes + MPI initialized with thread support level 3 + OMP initialized with 8 OMP threads + AMReX (22.10-20-g3082028e4287) initialized + ... +The 1st line is the number of parallel MPI processes (also called *MPI ranks*). + +The 2nd line reports on the `support level of MPI functions to be called from threads `__. We currently only use this for optional, :ref:`async IO with AMReX plotfiles `. In the past, requesting MPI threading support had performance penalties, but we have not seen such anymore on recent systems. -Thus, we request it by default but you can overwrite it with a compile time option if it ever becomes needed. +Thus, we request it by default but you can overwrite it with a :ref:`compile time option ` if it ever becomes needed. + +The 3rd line is the number of CPU OpenMP (OMP) threads per MPI process. +After that, information on software versions follow. How do I suppress tiny profiler output if I do not care to see it? From 3e98c31a491fb438cb98692c7a62dbcdd717c39b Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Wed, 9 Nov 2022 11:29:57 -0800 Subject: [PATCH 0147/1346] BTD: remove old/legacy back-transformed diagnostics (#3485) * Start removing old BTD * Remove GetCellCenteredData * Remove do_backtransform_fields and do_backtransform_particles * Remove more functions * Remove more variables * Update documentation * Fix CI test `RigidInjection_BTD` * Remove slicing from `BTD_ReducedSliceDiag` * Rename `BTD_ReducedSliceDiag` as `LaserAcceleration_BTD` * Query deprecated input and abort Co-authored-by: Edoardo Zoni --- Docs/source/usage/parameters.rst | 80 - .../analysis_rigid_injection_BoostedFrame.py | 34 +- .../RigidInjection/inputs_2d_BoostedFrame | 5 - ..._3Dbacktransformed_diag.py => analysis.py} | 24 - .../{inputs_3d_slice => inputs_3d} | 16 - .../plasma_acceleration/inputs_2d_boost | 3 - .../plasma_acceleration/inputs_3d_boost | 3 - Python/pywarpx/picmi.py | 63 +- ...ceDiag.json => LaserAcceleration_BTD.json} | 0 Regression/WarpX-tests.ini | 8 +- .../Diagnostics/BackTransformedDiagnostic.H | 287 --- .../Diagnostics/BackTransformedDiagnostic.cpp | 1662 ----------------- .../BackTransformedDiagnostic_fwd.H | 15 - Source/Diagnostics/CMakeLists.txt | 1 - Source/Diagnostics/Make.package | 1 - Source/Diagnostics/WarpXIO.cpp | 40 - Source/Evolve/WarpXEvolve.cpp | 14 - Source/Initialization/WarpXInitData.cpp | 23 - Source/Particles/LaserParticleContainer.cpp | 1 - Source/Particles/MultiParticleContainer.H | 15 - Source/Particles/MultiParticleContainer.cpp | 76 - Source/Particles/PhotonParticleContainer.cpp | 3 +- Source/Particles/PhysicalParticleContainer.H | 6 - .../Particles/PhysicalParticleContainer.cpp | 196 +- Source/Particles/WarpXParticleContainer.H | 7 - Source/WarpX.H | 28 - Source/WarpX.cpp | 131 +- Tools/LibEnsemble/sim/inputs | 9 - .../automated_test_4_labdiags_2ppc | 5 - 29 files changed, 85 insertions(+), 2671 deletions(-) rename Examples/Modules/boosted_diags/{analysis_3Dbacktransformed_diag.py => analysis.py} (62%) rename Examples/Modules/boosted_diags/{inputs_3d_slice => inputs_3d} (87%) rename Regression/Checksum/benchmarks_json/{BTD_ReducedSliceDiag.json => LaserAcceleration_BTD.json} (100%) delete mode 100644 Source/Diagnostics/BackTransformedDiagnostic.H delete mode 100644 Source/Diagnostics/BackTransformedDiagnostic.cpp delete mode 100644 Source/Diagnostics/BackTransformedDiagnostic_fwd.H diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 84add7daac1..21f7e7db4ba 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -955,11 +955,6 @@ Particle initialization lead to memory issues if not periodically cleared. To clear the buffer call ``warpx_clearParticleBoundaryBuffer()``. -* ``.do_back_transformed_diagnostics`` (`0` or `1` optional, default `1`) - Only used when ``warpx.do_back_transformed_diagnostics=1``. When running in a - boosted frame, whether or not to plot back-transformed diagnostics for - this species. - * ``.do_field_ionization`` (`0` or `1`) optional (default `0`) Do field ionization for this species (using the ADK theory). @@ -2182,81 +2177,6 @@ BackTransformed Diagnostics (with support for Plotfile/openPMD output) value for buffer size and use slices to reduce the memory footprint and maintain optimum I/O performance. -Back-Transformed Diagnostics (legacy output) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -``BackTransformedDiagnostics`` are used when running a simulation in a boosted frame, to reconstruct output data to the lab frame - -* ``warpx.do_back_transformed_diagnostics`` (`0` or `1`) - Whether to use the **back-transformed diagnostics** (i.e. diagnostics that - perform on-the-fly conversion to the laboratory frame, when running - boosted-frame simulations) - -* ``warpx.lab_data_directory`` (`string`) - The directory in which to save the lab frame data when using the - **back-transformed diagnostics**. If not specified, the default is - is `lab_frame_data`. - -* ``warpx.num_snapshots_lab`` (`integer`) - Only used when ``warpx.do_back_transformed_diagnostics`` is ``1``. - The number of lab-frame snapshots that will be written. - -* ``warpx.dt_snapshots_lab`` (`float`, in seconds) - Only used when ``warpx.do_back_transformed_diagnostics`` is ``1``. - The time interval inbetween the lab-frame snapshots (where this - time interval is expressed in the laboratory frame). - -* ``warpx.dz_snapshots_lab`` (`float`, in meters) - Only used when ``warpx.do_back_transformed_diagnostics`` is ``1``. - Distance between the lab-frame snapshots (expressed in the laboratory - frame). ``dt_snapshots_lab`` is then computed by - ``dt_snapshots_lab = dz_snapshots_lab/c``. Either `dt_snapshots_lab` - or `dz_snapshot_lab` is required. - -* ``warpx.do_back_transformed_fields`` (`0 or 1`) - Whether to use the **back-transformed diagnostics** for the fields. - -* ``warpx.back_transformed_diag_fields`` (space-separated list of `string`) - Which fields to dumped in back-transformed diagnostics. Choices are - 'Ex', 'Ey', Ez', 'Bx', 'By', Bz', 'jx', 'jy', jz' and 'rho'. Example: - ``warpx.back_transformed_diag_fields = Ex Ez By``. By default, all fields - are dumped. - -* ``warpx.buffer_size`` (`integer`) - The default size of the back transformed diagnostic buffers used to generate lab-frame - data is 256. That is, when the multifab with lab-frame data has 256 z-slices, - the data will be flushed out. However, if many lab-frame snapshots are required for - diagnostics and visualization, the GPU may run out of memory with many large boxes with - a size of 256 in the z-direction. This input parameter can then be used to set a - smaller buffer-size, preferably multiples of 8, such that, a large number of - lab-frame snapshot data can be generated without running out of gpu memory. - The downside to using a small buffer size, is that the I/O time may increase due - to frequent flushes of the lab-frame data. The other option is to keep the default - value for buffer size and use slices to reduce the memory footprint and maintain - optimum I/O performance. - -* ``slice.num_slice_snapshots_lab`` (`integer`) - Only used when ``warpx.do_back_transformed_diagnostics`` is ``1``. - The number of back-transformed field and particle data that - will be written for the reduced domain defined by ``slice.dom_lo`` - and ``slice.dom_hi``. Note that the 'slice' is a reduced - diagnostic which could be 1D, 2D, or 3D, aligned with the co-ordinate axes. - These slices can be visualized using read_raw_data.py and the HDF5 format can - be visualized using the h5py library. Please see the documentation on visualization - for further details. - -* ``slice.dt_slice_snapshots_lab`` (`float`, in seconds) - Only used when ``warpx.do_back_transformed_diagnostics`` is ``1``. - The time interval between the back-transformed reduced diagnostics (where this - time interval is expressed in the laboratory frame). - -* ``slice.particle_slice_width_lab`` (`float`, in meters) - Only used when ``warpx.do_back_transformed_diagnostics`` is ``1`` and - ``slice.num_slice_snapshots_lab`` is non-zero. Particles are - copied from the full back-transformed diagnostic to the reduced - slice diagnostic if there are within the user-defined width from - the slice region defined by ``slice.dom_lo`` and ``slice.dom_hi``. - Boundary Scraping Diagnostics ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/Examples/Modules/RigidInjection/analysis_rigid_injection_BoostedFrame.py b/Examples/Modules/RigidInjection/analysis_rigid_injection_BoostedFrame.py index 8881eac7ba9..ccb55183241 100755 --- a/Examples/Modules/RigidInjection/analysis_rigid_injection_BoostedFrame.py +++ b/Examples/Modules/RigidInjection/analysis_rigid_injection_BoostedFrame.py @@ -25,7 +25,6 @@ import numpy as np import openpmd_api as io -import read_raw_data from scipy.constants import m_e import yt @@ -36,18 +35,10 @@ filename = sys.argv[1] -# Tolerances to check consistency between legacy BTD and new BTD +# Tolerances to check consistency between plotfile BTD and openPMD BTD rtol = 1e-16 atol = 1e-16 -# Read data from legacy back-transformed diagnostics -snapshot = './lab_frame_data/snapshots/snapshot00001' -x_legacy = read_raw_data.get_particle_field(snapshot, 'beam', 'x') -z_legacy = read_raw_data.get_particle_field(snapshot, 'beam', 'z') -ux_legacy = read_raw_data.get_particle_field(snapshot, 'beam', 'ux') -uy_legacy = read_raw_data.get_particle_field(snapshot, 'beam', 'uy') -uz_legacy = read_raw_data.get_particle_field(snapshot, 'beam', 'uz') - # Read data from new back-transformed diagnostics (plotfile) ds_plotfile = yt.load(filename) x_plotfile = ds_plotfile.all_data()['beam', 'particle_position_x'].v @@ -66,19 +57,12 @@ uz_openpmd = ds_openpmd.particles['beam']['momentum']['z'][:] series.flush() -# Sort and compare arrays to check consistency between legacy BTD and new BTD (plotfile) -assert(np.allclose(np.sort(x_legacy), np.sort(x_plotfile), rtol=rtol, atol=atol)) -assert(np.allclose(np.sort(z_legacy), np.sort(z_plotfile), rtol=rtol, atol=atol)) -assert(np.allclose(np.sort(ux_legacy*m_e), np.sort(ux_plotfile), rtol=rtol, atol=atol)) -assert(np.allclose(np.sort(uy_legacy*m_e), np.sort(uy_plotfile), rtol=rtol, atol=atol)) -assert(np.allclose(np.sort(uz_legacy*m_e), np.sort(uz_plotfile), rtol=rtol, atol=atol)) - -# Sort and compare arrays to check consistency between legacy BTD and new BTD (openPMD) -assert(np.allclose(np.sort(x_legacy), np.sort(x_openpmd), rtol=rtol, atol=atol)) -assert(np.allclose(np.sort(z_legacy), np.sort(z_openpmd), rtol=rtol, atol=atol)) -assert(np.allclose(np.sort(ux_legacy*m_e), np.sort(ux_openpmd), rtol=rtol, atol=atol)) -assert(np.allclose(np.sort(uy_legacy*m_e), np.sort(uy_openpmd), rtol=rtol, atol=atol)) -assert(np.allclose(np.sort(uz_legacy*m_e), np.sort(uz_openpmd), rtol=rtol, atol=atol)) +# Sort and compare arrays to check consistency between plotfile BTD and openPMD BTD +assert(np.allclose(np.sort(x_plotfile), np.sort(x_openpmd), rtol=rtol, atol=atol)) +assert(np.allclose(np.sort(z_plotfile), np.sort(z_openpmd), rtol=rtol, atol=atol)) +assert(np.allclose(np.sort(ux_plotfile), np.sort(ux_openpmd), rtol=rtol, atol=atol)) +assert(np.allclose(np.sort(uy_plotfile), np.sort(uy_openpmd), rtol=rtol, atol=atol)) +assert(np.allclose(np.sort(uz_plotfile), np.sort(uz_openpmd), rtol=rtol, atol=atol)) # Initial parameters z0 = 20.e-6 @@ -86,8 +70,8 @@ theta0 = np.arcsin(0.1) # Theoretical beam width after propagation with rigid injection -z = np.mean(z_legacy) -x = np.std(x_legacy) +z = np.mean(z_plotfile) +x = np.std(x_plotfile) print(f'Beam position = {z}') print(f'Beam width = {x}') diff --git a/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame b/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame index d5c4c2b3cec..9bf858b8d7a 100644 --- a/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame +++ b/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame @@ -68,8 +68,3 @@ diag2.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho diag2.format = openpmd diag2.openpmd_backend = h5 diag2.buffer_size = 32 - -# old BTD diagnostics -warpx.do_back_transformed_diagnostics = 1 -warpx.num_snapshots_lab = 2 -warpx.dt_snapshots_lab = 1.8679589331096515e-13 diff --git a/Examples/Modules/boosted_diags/analysis_3Dbacktransformed_diag.py b/Examples/Modules/boosted_diags/analysis.py similarity index 62% rename from Examples/Modules/boosted_diags/analysis_3Dbacktransformed_diag.py rename to Examples/Modules/boosted_diags/analysis.py index 6fa4e9c936f..c6c089f9807 100755 --- a/Examples/Modules/boosted_diags/analysis_3Dbacktransformed_diag.py +++ b/Examples/Modules/boosted_diags/analysis.py @@ -21,7 +21,6 @@ import numpy as np import openpmd_api as io -import read_raw_data import yt yt.funcs.mylog.setLevel(0) @@ -35,22 +34,6 @@ rtol = 1e-16 atol = 1e-16 -# Read data from legacy back-transformed diagnostics (entire domain) -snapshot = './lab_frame_data/snapshots/snapshot00003' -header = './lab_frame_data/snapshots/Header' -allrd, info = read_raw_data.read_lab_snapshot(snapshot, header) -Ez_legacy = allrd['Ez'] -print(f'Ez_legacy.shape = {Ez_legacy.shape}') -Ez_legacy_1D = np.squeeze(Ez_legacy[Ez_legacy.shape[0]//2,Ez_legacy.shape[1]//2,:]) - -# Read data from reduced back-transformed diagnostics (slice) -snapshot_slice = './lab_frame_data/slices/slice00003' -header_slice = './lab_frame_data/slices/Header' -allrd, info = read_raw_data.read_lab_snapshot(snapshot_slice, header_slice) -Ez_legacy_slice = allrd['Ez'] -print(f'Ez_legacy_slice.shape = {Ez_legacy_slice.shape}') -Ez_legacy_slice_1D = np.squeeze(Ez_legacy_slice[Ez_legacy_slice.shape[0]//2,1,:]) - # Read data from new back-transformed diagnostics (plotfile) ds_plotfile = yt.load(filename) data = ds_plotfile.covering_grid( @@ -69,12 +52,5 @@ # Compare arrays to check consistency between new BTD formats (plotfile and openPMD) assert(np.allclose(Ez_plotfile, Ez_openpmd, rtol=rtol, atol=atol)) -# Check slicing -err = np.max(np.abs(Ez_legacy_slice_1D-Ez_legacy_1D)) / np.max(np.abs(Ez_legacy_1D)) -tol = 1e-16 -print(f'error = {err}') -print(f'tolerance = {tol}') -assert(err < tol) - test_name = os.path.split(os.getcwd())[1] checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Modules/boosted_diags/inputs_3d_slice b/Examples/Modules/boosted_diags/inputs_3d similarity index 87% rename from Examples/Modules/boosted_diags/inputs_3d_slice rename to Examples/Modules/boosted_diags/inputs_3d index ff68b590993..ba98558be47 100644 --- a/Examples/Modules/boosted_diags/inputs_3d_slice +++ b/Examples/Modules/boosted_diags/inputs_3d @@ -55,7 +55,6 @@ electrons.zmax = .003 electrons.profile = constant electrons.density = 3.5e24 electrons.do_continuous_injection = 1 -electrons.do_back_transformed_diagnostics = 1 ions.charge = q_e ions.mass = m_p @@ -71,7 +70,6 @@ ions.zmax = .003 ions.profile = constant ions.density = 3.5e24 ions.do_continuous_injection = 1 -ions.do_back_transformed_diagnostics = 1 beam.charge = -q_e beam.mass = m_e @@ -104,14 +102,6 @@ laser1.profile_t_peak = 40.e-15 # The time at which the laser reaches its pea laser1.profile_focal_distance = 0.5e-3 # Focal distance from the antenna (in meters) laser1.wavelength = 0.81e-6 # The wavelength of the laser (in meters) -slice.dom_lo = xmin 0.0 zmin -slice.dom_hi = xmax 0.0 zmax -slice.coarsening_ratio = 1 1 1 -slice.plot_int = -1 -slice.num_slice_snapshots_lab = 4 -slice.dt_slice_snapshots_lab = 3.3356409519815207e-12 -slice.particle_slice_width_lab = 2.e-6 - # Diagnostics diagnostics.diags_names = diag1 diag2 @@ -132,9 +122,3 @@ diag2.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho diag2.format = openpmd diag2.buffer_size = 32 diag2.openpmd_backend = h5 - -# old BTD diagnostics -warpx.do_back_transformed_diagnostics = 1 -warpx.num_snapshots_lab = 4 -warpx.dz_snapshots_lab = 0.001 -warpx.back_transformed_diag_fields= Ex Ey Ez By rho diff --git a/Examples/Physics_applications/plasma_acceleration/inputs_2d_boost b/Examples/Physics_applications/plasma_acceleration/inputs_2d_boost index b2c76e6859a..76dcd3ee286 100644 --- a/Examples/Physics_applications/plasma_acceleration/inputs_2d_boost +++ b/Examples/Physics_applications/plasma_acceleration/inputs_2d_boost @@ -38,9 +38,6 @@ algo.particle_shape = 3 ################################# warpx.gamma_boost = 10.0 warpx.boost_direction = z -warpx.do_back_transformed_diagnostics = 1 -warpx.num_snapshots_lab = 22 -warpx.dt_snapshots_lab = 3.335640951981521e-11 ################################# ############ PLASMA ############# diff --git a/Examples/Physics_applications/plasma_acceleration/inputs_3d_boost b/Examples/Physics_applications/plasma_acceleration/inputs_3d_boost index c1c630dcaf0..2264872ec43 100644 --- a/Examples/Physics_applications/plasma_acceleration/inputs_3d_boost +++ b/Examples/Physics_applications/plasma_acceleration/inputs_3d_boost @@ -38,9 +38,6 @@ algo.particle_shape = 3 ################################# warpx.gamma_boost = 10.0 warpx.boost_direction = z -warpx.do_back_transformed_diagnostics = 1 -warpx.num_snapshots_lab = 22 -warpx.dt_snapshots_lab = 3.335640951981521e-11 ################################# ############ PLASMA ############# diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index c112f01576c..30b9f4c7ca5 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1922,9 +1922,6 @@ class LabFrameFieldDiagnostic(picmistandard.PICMI_LabFrameFieldDiagnostic, Parameters ---------- - warpx_new_BTD: bool, optional - Use the new BTD diagnostics - warpx_format: string, optional Passed to .format @@ -1947,36 +1944,16 @@ class LabFrameFieldDiagnostic(picmistandard.PICMI_LabFrameFieldDiagnostic, Passed to .upper_bound """ def init(self, kw): - self.use_new_BTD = kw.pop('warpx_new_BTD', False) - if self.use_new_BTD: - # The user is using the new BTD - self.format = kw.pop('warpx_format', None) - self.openpmd_backend = kw.pop('warpx_openpmd_backend', None) - self.file_prefix = kw.pop('warpx_file_prefix', None) - self.file_min_digits = kw.pop('warpx_file_min_digits', None) - self.buffer_size = kw.pop('warpx_buffer_size', None) - self.lower_bound = kw.pop('warpx_lower_bound', None) - self.upper_bound = kw.pop('warpx_upper_bound', None) + # The user is using the new BTD + self.format = kw.pop('warpx_format', None) + self.openpmd_backend = kw.pop('warpx_openpmd_backend', None) + self.file_prefix = kw.pop('warpx_file_prefix', None) + self.file_min_digits = kw.pop('warpx_file_min_digits', None) + self.buffer_size = kw.pop('warpx_buffer_size', None) + self.lower_bound = kw.pop('warpx_lower_bound', None) + self.upper_bound = kw.pop('warpx_upper_bound', None) def initialize_inputs(self): - if self.use_new_BTD: - self.initialize_inputs_new() - else: - self.initialize_inputs_old() - - def initialize_inputs_old(self): - - pywarpx.warpx.check_consistency('num_snapshots_lab', self.num_snapshots, 'The number of snapshots must be the same in all lab frame diagnostics') - pywarpx.warpx.check_consistency('dt_snapshots_lab', self.dt_snapshots, 'The time between snapshots must be the same in all lab frame diagnostics') - pywarpx.warpx.check_consistency('lab_data_directory', self.write_dir, 'The write directory must be the same in all lab frame diagnostics') - - pywarpx.warpx.do_back_transformed_diagnostics = 1 - pywarpx.warpx.num_snapshots_lab = self.num_snapshots - pywarpx.warpx.dt_snapshots_lab = self.dt_snapshots - pywarpx.warpx.do_back_transformed_fields = 1 - pywarpx.warpx.lab_data_directory = self.write_dir - - def initialize_inputs_new(self): self.add_diagnostic() @@ -2025,30 +2002,6 @@ def initialize_inputs_new(self): self.set_write_dir() - -class LabFrameParticleDiagnostic(picmistandard.PICMI_LabFrameParticleDiagnostic): - def initialize_inputs(self): - - pywarpx.warpx.check_consistency('num_snapshots_lab', self.num_snapshots, 'The number of snapshots must be the same in all lab frame diagnostics') - pywarpx.warpx.check_consistency('dt_snapshots_lab', self.dt_snapshots, 'The time between snapshots must be the same in all lab frame diagnostics') - pywarpx.warpx.check_consistency('lab_data_directory', self.write_dir, 'The write directory must be the same in all lab frame diagnostics') - - pywarpx.warpx.do_back_transformed_diagnostics = 1 - - if isinstance(self.species, Species): - self.species.do_back_transformed_diagnostics = 1 - else: - try: - for specie in self.species: - specie.do_back_transformed_diagnostics = 1 - except TypeError: - pass - - pywarpx.warpx.num_snapshots_lab = self.num_snapshots - pywarpx.warpx.dt_snapshots_lab = self.dt_snapshots - pywarpx.warpx.lab_data_directory = self.write_dir - - class ReducedDiagnostic(picmistandard.base._ClassWithInit, WarpXDiagnosticBase): """ Sets up a reduced diagnostic in the simulation. diff --git a/Regression/Checksum/benchmarks_json/BTD_ReducedSliceDiag.json b/Regression/Checksum/benchmarks_json/LaserAcceleration_BTD.json similarity index 100% rename from Regression/Checksum/benchmarks_json/BTD_ReducedSliceDiag.json rename to Regression/Checksum/benchmarks_json/LaserAcceleration_BTD.json diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 94fe785d7c0..6f2f038cb41 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -231,12 +231,11 @@ compileTest = 0 doVis = 0 compareParticles = 0 doComparison = 0 -aux1File = Tools/PostProcessing/read_raw_data.py analysisRoutine = Examples/Modules/RigidInjection/analysis_rigid_injection_BoostedFrame.py -[BTD_ReducedSliceDiag] +[LaserAcceleration_BTD] buildDir = . -inputFile = Examples/Modules/boosted_diags/inputs_3d_slice +inputFile = Examples/Modules/boosted_diags/inputs_3d runtime_params = dim = 3 addToCompileString = USE_OPENPMD=TRUE @@ -250,8 +249,7 @@ compileTest = 0 doVis = 0 compareParticles = 0 doComparison = 0 -aux1File = Tools/PostProcessing/read_raw_data.py -analysisRoutine = Examples/Modules/boosted_diags/analysis_3Dbacktransformed_diag.py +analysisRoutine = Examples/Modules/boosted_diags/analysis.py [nci_corrector] buildDir = . diff --git a/Source/Diagnostics/BackTransformedDiagnostic.H b/Source/Diagnostics/BackTransformedDiagnostic.H deleted file mode 100644 index deb32e7a60f..00000000000 --- a/Source/Diagnostics/BackTransformedDiagnostic.H +++ /dev/null @@ -1,287 +0,0 @@ -/* Copyright 2019 Andrew Myers, Axel Huebl, Maxence Thevenet - * Revathi Jambunathan, Weiqun Zhang - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ -#ifndef WARPX_BackTransformedDiagnostic_H_ -#define WARPX_BackTransformedDiagnostic_H_ - -#include "BackTransformedDiagnostic_fwd.H" - -#include "Particles/MultiParticleContainer.H" -#include "Particles/WarpXParticleContainer.H" - -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - -/** \brief - * The capability for back-transformed lab-frame data is implemented to generate - * the full diagnostic snapshot for the entire domain and reduced diagnostic - * (1D, 2D or 3D 'slices') for a sub-domain. - * LabFrameDiag class defines the parameters required to backtrasform data from - * boosted frame at (z_boost,t_boost) to lab-frame at (z_lab, t_lab) using Lorentz - * transformation. This Lorentz transformation picks out one slice corresponding - * to both of those times, at position current_z_boost and current_z_lab in the - * boosted and lab frames, respectively. - * Two derived classes, namely, LabFrameSnapShot and LabFrameSlice are defined to - * store the full back-transformed diagnostic snapshot of the entire domain and - * reduced back-transformed diagnostic for a sub-domain, respectively. - * The approach here is to define an array of LabFrameDiag which would include - * both, full domain snapshots and reduced domain 'slices', sorted based on their - * respective t_lab. This is done to re-use the backtransformed data stored in - * the slice multifab at (z_lab,t_lab) - * for the full domain snapshot and sub-domain slices that have the same t_lab, - * instead of re-generating the backtransformed slice data at z_lab for a given - * t_lab for each diagnostic. - */ -class LabFrameDiag { - public: - std::string m_file_name; - amrex::Real m_t_lab; - amrex::RealBox m_prob_domain_lab_; - amrex::IntVect m_prob_ncells_lab_; - amrex::RealBox m_diag_domain_lab_; - amrex::Box m_buff_box_; - - amrex::Real m_current_z_lab; - amrex::Real m_current_z_boost; - amrex::Real m_inv_gamma_boost_; - amrex::Real m_inv_beta_boost_; - amrex::Real m_dz_lab_; - amrex::Real m_particle_slice_dx_lab_; - - int m_ncomp_to_dump_; - std::vector m_mesh_field_names_; - - int m_file_num; - - // For back-transformed diagnostics of grid fields, data_buffer_ - // stores a buffer of the fields in the lab frame (in a MultiFab, i.e. - // with all box data etc.). When the buffer if full, dump to file. - std::unique_ptr m_data_buffer_; - // particles_buffer_ is currently blind to refinement level. - // particles_buffer_[j] is a WarpXParticleContainer::DiagnosticParticleData - // where - j is the species number for the current diag - amrex::Vector m_particles_buffer_; - // buff_counter_ is the number of z slices in data_buffer_ - int m_buff_counter_; - int m_num_buffer_ = 256; - int m_max_box_size = 256; - void updateCurrentZPositions(amrex::Real t_boost, amrex::Real inv_gamma, - amrex::Real inv_beta); - - void createLabFrameDirectories(); - - void writeLabFrameHeader(); - - /// Back-transformed lab-frame field data is copied from - /// tmp_slice to data_buffer where it is stored. - /// For the full diagnostic, all the data in the MultiFab is copied. - /// For the reduced diagnostic, the data is selectively copied if the - /// extent of the z_lab multifab intersects with the user-defined sub-domain - /// for the reduced diagnostic (i.e., a 1D, 2D, or 3D region of the domain). - virtual void AddDataToBuffer(amrex::MultiFab& /*tmp_slice_ptr*/, int /*i_lab*/, - amrex::Vector const& /*map_actual_fields_to_dump*/){} - - /// Back-transformed lab-frame particles is copied from - /// tmp_particle_buffer to particles_buffer. - /// For the full diagnostic, all the particles are copied, - /// while for the reduced diagnostic, particles are selectively - /// copied if their position in within the user-defined - /// sub-domain +/- 1 cell size width for the reduced slice diagnostic. - virtual void AddPartDataToParticleBuffer( - amrex::Vector const& /*tmp_particle_buffer*/, - int /*nSpeciesBoostedFrame*/) {} - - // The destructor should also be a virtual function, so that - // a pointer to subclass of `LabFrameDiag` actually calls the subclass's destructor. - virtual ~LabFrameDiag() = default; -}; - -/** \brief - * LabFrameSnapShot stores the back-transformed lab-frame metadata - * corresponding to a single time snapshot of the full domain. - * The snapshot data is written to disk in the directory lab_frame_data/snapshots/. - * zmin_lab, zmax_lab, and t_lab are all constant for a given snapshot. - * current_z_lab and current_z_boost for each snapshot are updated as the - * simulation time in the boosted frame advances. - */ - -class LabFrameSnapShot : public LabFrameDiag { - public: - LabFrameSnapShot(amrex::Real t_lab_in, amrex::Real t_boost, - amrex::Real inv_gamma_boost_in, amrex::Real inv_beta_boost_in, - amrex::Real dz_lab_in, amrex::RealBox prob_domain_lab, - amrex::IntVect prob_ncells_lab, int ncomp_to_dump, - std::vector mesh_field_names, - amrex::RealBox diag_domain_lab, - amrex::Box diag_box, int file_num_in, const int max_box_size, - const int buffer_size); - void AddDataToBuffer( amrex::MultiFab& tmp_slice, int k_lab, - amrex::Vector const& map_actual_fields_to_dump) override; - void AddPartDataToParticleBuffer( - amrex::Vector const& tmp_particle_buffer, - int nSpeciesBoostedFrame) override; -}; - - -/** \brief - * LabFrameSlice stores the back-transformed metadata corresponding to a single time at the - * user-defined slice location. This could be a 1D line, 2D slice, or 3D box - * (a reduced back-transformed diagnostic) within the computational - * domain, as defined in the input file by the user. The slice is written to disk in the - * lab_frame_data/slices. Similar to snapshots, zmin_lab, zmax_lab, and - * t_lab are constant for a given slice. current_z_lab and current_z_boost - * for each snapshot are updated as the sim time in boosted frame advances. - */ -class LabFrameSlice : public LabFrameDiag { - public: - LabFrameSlice(amrex::Real t_lab_in, amrex::Real t_boost, - amrex::Real inv_gamma_boost_in, amrex::Real inv_beta_boost_in, - amrex::Real dz_lab_in, amrex::RealBox prob_domain_lab, - amrex::IntVect prob_ncells_lab, int ncomp_to_dump, - std::vector mesh_field_names, - amrex::RealBox diag_domain_lab, - amrex::Box diag_box, int file_num_in, - amrex::Real particle_slice_dx_lab, - const int max_box_size, - const int buffer_size); - void AddDataToBuffer( amrex::MultiFab& tmp_slice_ptr, int i_lab, - amrex::Vector const& map_actual_fields_to_dump) override; - void AddPartDataToParticleBuffer( - amrex::Vector const& tmp_particle_buffer, - int nSpeciesBoostedFrame) override; -}; - -/** \brief - * BackTransformedDiagnostic class handles the back-transformation of data when - * running simulations in a boosted frame of reference to the lab-frame. - * Because of the relativity of simultaneity, events that are synchronized - * in the simulation boosted frame are not - * synchronized in the lab frame. Thus, at a given t_boost, we must write - * slices of back-transformed data to multiple output files, each one - * corresponding to a given time in the lab frame. The member function - * writeLabFrameData() orchestrates the operations required to - * Lorentz-transform data from boosted-frame to lab-frame and store them - * in the LabFrameDiag class, which writes out the field and particle data - * to the output directory. The functions Flush() and writeLabFrameData() - * are called at the end of the simulation and when the - * the buffer for data storage is full, respectively. The particle data - * is collected and written only if particle.do_back_transformed_diagnostics = 1. - */ -class BackTransformedDiagnostic { - -public: - - BackTransformedDiagnostic (amrex::Real zmin_lab, amrex::Real zmax_lab, - amrex::Real v_window_lab, amrex::Real dt_snapshots_lab, - int N_snapshots, amrex::Real dt_slice_snapshots_lab, - int N_slice_snapshots, amrex::Real gamma_boost, - amrex::Real t_boost, amrex::Real dt_boost, - int boost_direction, const amrex::Geometry& geom, - amrex::RealBox& slice_realbox, - amrex::Real particle_slice_width_lab); - - /// Flush() is called at the end of the simulation when the buffers that contain - /// back-transformed lab-frame data even if they are not full. - void Flush (const amrex::Geometry& geom); - - /** \brief - * The order of operations performed in writeLabFrameData is as follows : - * 1. Loops over the sorted back-transformed diags and for each diag - * steps 2-7 are performed - * 2. Based on t_lab and t_boost, obtain z_lab and z_boost. - * 3. Define data_buffer multifab that will store the data in the BT diag. - * 4. Define slice multifab at z_index that corresponds to z_boost and - * getslicedata using cell-centered data at z_index and its distribution map. - * 5. Lorentz transform data stored in slice from z_boost,t_Boost to z_lab,t_lab - * and store in slice multifab. - * 6. Generate a temporary slice multifab with distribution map of lab-frame - * data but at z_boost and - * ParallelCopy data from the slice multifab to the temporary slice. - * 7. Finally, AddDataToBuffer is called where the data from temporary slice - * is simply copied from tmp_slice(i,j,k_boost) to - * LabFrameDiagSnapshot(i,j,k_lab) for full BT lab-frame diagnostic - * OR from tmp_slice(i,j,k_boost) to - * LabFrameDiagSlice(i,j,k_lab) for the reduced slice diagnostic - * 8. Similarly, particles that crossed the z_boost plane are selected - * and lorentz-transformed to the lab-frame and copied to the full - * and reduce diagnostic and stored in particle_buffer. - */ - void writeLabFrameData (const amrex::MultiFab* cell_centered_data, - const MultiParticleContainer& mypc, - const amrex::Geometry& geom, - const amrex::Real t_boost, const amrex::Real dt); - /// The metadata containg information on t_boost, num_snapshots, and Lorentz parameters. - void writeMetaData (); - -private: - amrex::Real m_gamma_boost_; - amrex::Real m_inv_gamma_boost_; - amrex::Real m_beta_boost_; - amrex::Real m_inv_beta_boost_; - amrex::Real m_dz_lab_; - amrex::Real m_inv_dz_lab_; - amrex::Real m_dt_snapshots_lab_; - amrex::Real m_dt_boost_; - int m_N_snapshots_; - int m_boost_direction_; - int m_N_slice_snapshots_; - amrex::Real m_dt_slice_snapshots_lab_; - amrex::Real m_particle_slice_width_lab_; - - int m_num_buffer_ = 256; - int m_max_box_size_ = 256; - - std::vector > m_LabFrameDiags_; - - void writeParticleData ( - const WarpXParticleContainer::DiagnosticParticleData& pdata, - const std::string& name, const int i_lab); - -#ifdef WARPX_USE_HDF5 - void writeParticleDataHDF5( - const WarpXParticleContainer::DiagnosticParticleData& pdata, - const std::string& name, const std::string& species_name); -#endif - // Map field names and component number in cell_centered_data - std::map m_possible_fields_to_dump = { - {"Ex" , 0}, - {"Ey" , 1}, - {"Ez" , 2}, - {"Bx" , 3}, - {"By" , 4}, - {"Bz" , 5}, - {"jx" , 6}, - {"jy" , 7}, - {"jz" , 8}, - {"rho", 9} }; - - // maps field index in data_buffer_[i] -> cell_centered_data for - // snapshots i. By default, all fields in cell_centered_data are dumped. - // Needs to be amrex::Vector because used in a ParallelFor kernel. - amrex::Vector map_actual_fields_to_dump; - // Name of fields to dump. By default, all fields in cell_centered_data. - // Needed for file headers only. - std::vector m_mesh_field_names = {"Ex", "Ey", "Ez", - "Bx", "By", "Bz", - "jx", "jy", "jz", "rho"}; - int m_ncomp_to_dump = 10; - - -}; - -#endif diff --git a/Source/Diagnostics/BackTransformedDiagnostic.cpp b/Source/Diagnostics/BackTransformedDiagnostic.cpp deleted file mode 100644 index 7d148abf15e..00000000000 --- a/Source/Diagnostics/BackTransformedDiagnostic.cpp +++ /dev/null @@ -1,1662 +0,0 @@ -/* Copyright 2019 Andrew Myers, Axel Huebl, Maxence Thevenet - * Revathi Jambunathan, Weiqun Zhang - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ -#include "BackTransformedDiagnostic.H" - -#include "Utils/Parser/ParserUtils.H" -#include "Utils/TextMsg.H" -#include "Utils/WarpXConst.H" -#include "Utils/WarpXProfilerWrapper.H" -#include "Utils/TextMsg.H" -#include "WarpX.H" - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef WARPX_USE_HDF5 - #include -#endif - -#include -#include -#include -#include - -using namespace amrex; - -namespace -{ - constexpr int permission_flag_rwxrxrx = 0755; -} - -#ifdef WARPX_USE_HDF5 - -/* - Helper functions for doing the HDF5 IO. - - */ -namespace -{ - - const std::vector particle_field_names = {"w", "x", "y", - "z", "ux", "uy", "uz"}; - - /* - Creates the HDF5 file in truncate mode and closes it. - Should be run only by the root process. - */ - void output_create (const std::string& file_path) { - WARPX_PROFILE("output_create"); - hid_t file = H5Fcreate(file_path.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - file >=0, - "Error: could not create file at " + file_path - ) - H5Fclose(file); - } - - /* - Writes a single string attribute to the given group. - Should only be called by the root process. - */ - void write_string_attribute (hid_t& group, const std::string& key, const std::string& val) - { - hid_t str_type = H5Tcopy(H5T_C_S1); - hid_t scalar_space = H5Screate(H5S_SCALAR); - - // Fix the str_type length for the format string. - H5Tset_size(str_type, strlen(val.c_str())); - - hid_t attr = H5Acreate(group, key.c_str(), str_type, scalar_space, H5P_DEFAULT, H5P_DEFAULT); - H5Awrite(attr, str_type, val.c_str()); - - H5Aclose(attr); - H5Sclose(scalar_space); - H5Tclose(str_type); - } - - /* - Writes a single double attribute to the given group. - Should only be called by the root process. - */ - void write_double_attribute (hid_t& group, const std::string& key, const double val) - { - hid_t scalar_space = H5Screate(H5S_SCALAR); - - hid_t attr = H5Acreate(group, key.c_str(), H5T_IEEE_F32LE, scalar_space, - H5P_DEFAULT, H5P_DEFAULT); - H5Awrite(attr, H5T_NATIVE_DOUBLE, &val); - - H5Aclose(attr); - H5Sclose(scalar_space); - } - - /* - Opens the output file and writes all of metadata attributes. - Should be run only by the root process. - */ - void output_write_metadata (const std::string& file_path, - const int istep, const Real time, const Real dt) - { - WARPX_PROFILE("output_write_metadata"); - hid_t file = H5Fopen(file_path.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); - - write_string_attribute(file, "software", "warpx"); - write_string_attribute(file, "softwareVersion", "0.0.0"); - write_string_attribute(file, "meshesPath", "fields/"); - write_string_attribute(file, "iterationEncoding", "fileBased"); - write_string_attribute(file, "iterationFormat", "data%T.h5"); - write_string_attribute(file, "openPMD", "1.1.0"); - write_string_attribute(file, "basePath", "/data/%T/"); - - hid_t group = H5Gcreate(file, "data", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - group = H5Gcreate(group, std::to_string(istep).c_str(), - H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - - write_double_attribute(group, "time", time); - write_double_attribute(group, "timeUnitSI", 1.0); - write_double_attribute(group, "dt", dt); - - // Field groups - group = H5Gcreate(group, "fields", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - - // Close all resources. - H5Gclose(group); - H5Fclose(file); - H5close(); - } - - /* - Creates a dataset with the given cell dimensions, at the path - "/native_fields/(field_name)". - Should be run only by the root rank. - */ - void output_create_field (const std::string& file_path, const std::string& field_path, - const unsigned nx, const unsigned ny, const unsigned nz) - { - WARPX_PROFILE("output_create_field"); - - // Open the output. - hid_t file = H5Fopen(file_path.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); - // Create a 3D, nx x ny x nz dataspace. -#if defined(WARPX_DIM_3D) - hsize_t dims[3] = {nx, ny, nz}; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - hsize_t dims[3] = {nx, nz}; -#else - hsize_t dims[3] = {nz}; -#endif - hid_t grid_space = H5Screate_simple(AMREX_SPACEDIM, dims, NULL); - - // Create the dataset. - hid_t dataset = H5Dcreate(file, field_path.c_str(), H5T_IEEE_F64LE, - grid_space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - dataset >=0, - "Error: could not create dataset. H5 returned " - + std::to_string(dataset)) - ); - - // Close resources. - H5Dclose(dataset); - H5Sclose(grid_space); - H5Fclose(file); - } - - /* - Creates a group associated with a single particle species. - Should be run by all processes collectively. - */ - void output_create_species_group (const std::string& file_path, const std::string& species_name) - { - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - int mpi_rank; - MPI_Comm_rank(comm, &mpi_rank); - - // Create the file access prop list. - hid_t pa_plist = H5Pcreate(H5P_FILE_ACCESS); - H5Pset_fapl_mpio(pa_plist, comm, info); - - // Open the output. - hid_t file = H5Fopen(file_path.c_str(), H5F_ACC_RDWR, pa_plist); - - hid_t group = H5Gcreate(file, species_name.c_str(), - H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - H5Gclose(group); - H5Fclose(file); - - } - - /* - Resize an extendible dataset, suitable for storing particle data. - Should be run only by the root rank. - */ - long output_resize_particle_field (const std::string& file_path, const std::string& field_path, - const long num_to_add) - { - WARPX_PROFILE("output_resize_particle_field"); - - // Open the output. - hid_t file = H5Fopen(file_path.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); - - int rank; - hsize_t dims[1]; - - hid_t dataset = H5Dopen2 (file, field_path.c_str(), H5P_DEFAULT); - hid_t filespace = H5Dget_space (dataset); - rank = H5Sget_simple_extent_ndims (filespace); - herr_t status = H5Sget_simple_extent_dims (filespace, dims, NULL); - - // set new size - hsize_t new_size[1]; - new_size[0] = dims[0] + num_to_add; - status = H5Dset_extent (dataset, new_size); - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - status >= 0, - "Error: set extent filed on dataset " - + std::to_string(dataset) - ); - - // Close resources. - H5Sclose(filespace); - H5Dclose(dataset); - H5Fclose(file); - - return dims[0]; - } - - /* - Writes to a dataset that has been extended to the proper size. Suitable for writing particle data. - Should be run on all ranks collectively. - */ - void output_write_particle_field (const std::string& file_path, const std::string& field_path, - const Real* data_ptr, const long count, const long index) - { - WARPX_PROFILE("output_write_particle_field"); - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - int mpi_rank; - MPI_Comm_rank(comm, &mpi_rank); - - // Create the file access prop list. - hid_t pa_plist = H5Pcreate(H5P_FILE_ACCESS); - H5Pset_fapl_mpio(pa_plist, comm, info); - - // Open the output. - hid_t file = H5Fopen(file_path.c_str(), H5F_ACC_RDWR, pa_plist); - - int RANK = 1; - hsize_t offset[1]; - hsize_t dims[1]; - herr_t status; - - hid_t dataset = H5Dopen (file, field_path.c_str(), H5P_DEFAULT); - - // Make sure the dataset is there. - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - dataset >= 0, - "Error on rank " + std::to_string(mpi_rank) - +". Count not find dataset " + field_path - ); - - hid_t filespace = H5Dget_space (dataset); - - offset[0] = index; - dims[0] = count; - - // Create collective io prop list. - hid_t collective_plist = H5Pcreate(H5P_DATASET_XFER); - H5Pset_dxpl_mpio(collective_plist, H5FD_MPIO_INDEPENDENT); - - if (count > 0) { - - /* Define memory space */ - hid_t memspace = H5Screate_simple (RANK, dims, NULL); - - status = H5Sselect_hyperslab (filespace, H5S_SELECT_SET, offset, NULL, - dims, NULL); - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - status >= 0, - "Error on rank " + std::to_string(ParallelDescriptor::MyProc()) - +" could not select hyperslab." - ); - - /* Write the data to the extended portion of dataset */ - status = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, - filespace, collective_plist, data_ptr); - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - status >= 0, - "Error on rank " + std::to_string(ParallelDescriptor::MyProc()) - +" could not write hyperslab." - ); - - status = H5Sclose (memspace); - } - - ParallelDescriptor::Barrier(); - - // Close resources. - H5Pclose(collective_plist); - H5Sclose(filespace); - H5Dclose(dataset); - H5Fclose(file); - H5Pclose(pa_plist); - } - - /* - Creates an extendible dataset, suitable for storing particle data. - Should be run on all ranks collectively. - */ - void output_create_particle_field (const std::string& file_path, const std::string& field_path) - { - WARPX_PROFILE("output_create_particle_field"); - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - int mpi_rank; - MPI_Comm_rank(comm, &mpi_rank); - - // Create the file access prop list. - hid_t pa_plist = H5Pcreate(H5P_FILE_ACCESS); - H5Pset_fapl_mpio(pa_plist, comm, info); - - // Open the output. - hid_t file = H5Fopen(file_path.c_str(), H5F_ACC_RDWR, pa_plist); - - constexpr int RANK = 1; - hsize_t dims[1] = {0}; - hsize_t maxdims[1] = {H5S_UNLIMITED}; - hsize_t chunk_dims[2] = {4}; - - hid_t dataspace = H5Screate_simple (RANK, dims, maxdims); - - // Enable chunking - hid_t prop = H5Pcreate (H5P_DATASET_CREATE); - herr_t status = H5Pset_chunk (prop, RANK, chunk_dims); - - hid_t dataset = H5Dcreate2 (file, field_path.c_str(), H5T_NATIVE_DOUBLE, dataspace, - H5P_DEFAULT, prop, H5P_DEFAULT); - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - dataset >= 0, - "Error: could not create dataset. H5 returned " - + std::to_string(dataset) - ); - - // Close resources. - H5Dclose(dataset); - H5Pclose(prop); - H5Sclose(dataspace); - H5Fclose(file); - } - - /* - Write the only component in the multifab to the dataset given by field_name. - Uses hdf5-parallel. - */ - void output_write_field (const std::string& file_path, - const std::string& field_path, - const MultiFab& mf, const int comp, - const int lo_x, const int lo_y, const int lo_z) - { - - WARPX_PROFILE("output_write_field"); - - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Info info = MPI_INFO_NULL; - int mpi_rank; - MPI_Comm_rank(comm, &mpi_rank); - - // Create the file access prop list. - hid_t pa_plist = H5Pcreate(H5P_FILE_ACCESS); - H5Pset_fapl_mpio(pa_plist, comm, info); - - // Open the file, and the group. - hid_t file = H5Fopen(file_path.c_str(), H5F_ACC_RDWR, pa_plist); - // Open the field dataset. - hid_t dataset = H5Dopen(file, field_path.c_str(), H5P_DEFAULT); - - // Make sure the dataset is there. - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - dataset >= 0, - "Error on rank " + std::to_string(mpi_rank) - +". Count not find dataset " + field_path - ); - - // Grab the dataspace of the field dataset from file. - hid_t file_dataspace = H5Dget_space(dataset); - - // Create collective io prop list. - hid_t collective_plist = H5Pcreate(H5P_DATASET_XFER); - H5Pset_dxpl_mpio(collective_plist, H5FD_MPIO_INDEPENDENT); - - // Iterate over Fabs, select matching hyperslab and write. - hid_t status; - // slab lo index and shape. -#if defined(WARPX_DIM_3D) - hsize_t slab_offsets[3], slab_dims[3]; - int shift[3]; - shift[0] = lo_x; - shift[1] = lo_y; - shift[2] = lo_z; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - hsize_t slab_offsets[2], slab_dims[2]; - int shift[2]; - shift[0] = lo_x; - shift[1] = lo_z; -#else - hsize_t slab_offsets[1], slab_dims[1]; - int shift[1]; - shift[0] = lo_z; -#endif - hid_t slab_dataspace; - - int write_count = 0; - - std::vector transposed_data; - - for (MFIter mfi(mf); mfi.isValid(); ++mfi) - { - const Box& box = mfi.validbox(); - const int *lo_vec = box.loVect(); - const int *hi_vec = box.hiVect(); - - transposed_data.resize(box.numPts(), 0.0); - - // Set slab offset and shape. - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) - { - AMREX_ASSERT(lo_vec[idim] >= 0); - AMREX_ASSERT(hi_vec[idim] > lo_vec[idim]); - slab_offsets[idim] = lo_vec[idim] - shift[idim]; - slab_dims[idim] = hi_vec[idim] - lo_vec[idim] + 1; - } - - int cnt = 0; - AMREX_D_TERM( - for (int i = lo_vec[0]; i <= hi_vec[0]; ++i), - for (int j = lo_vec[1]; j <= hi_vec[1]; ++j), - for (int k = lo_vec[2]; k <= hi_vec[2]; ++k)) - transposed_data[cnt++] = mf[mfi](IntVect(AMREX_D_DECL(i, j, k)), comp); - - // Create the slab space. - slab_dataspace = H5Screate_simple(AMREX_SPACEDIM, slab_dims, NULL); - - // Select the hyperslab matching this fab. - status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, - slab_offsets, NULL, slab_dims, NULL); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - status >= 0, - "Error on rank " + std::to_string(mpi_rank) - +" could not select hyperslab.\n" - ); - - // Write this pencil. - status = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, slab_dataspace, - file_dataspace, collective_plist, transposed_data.data()); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - status >= 0, - "Error on rank " + std::to_string(mpi_rank) - +" could not write hyperslab." - ); - - H5Sclose(slab_dataspace); - write_count++; - } - - ParallelDescriptor::Barrier(); - - // Close HDF5 resources. - H5Pclose(collective_plist); - H5Sclose(file_dataspace); - H5Dclose(dataset); - H5Fclose(file); - H5Pclose(pa_plist); - } -} -#endif - -bool compare_tlab_uptr (const std::unique_ptr&a, - const std::unique_ptr&b) -{ - return a->m_t_lab < b->m_t_lab; -} - -namespace -{ -void -LorentzTransformZ (MultiFab& data, Real gamma_boost, Real beta_boost) -{ - // Loop over tiles/boxes and in-place convert each slice from boosted - // frame to back-transformed lab frame. -#ifdef AMREX_USE_OMP -#pragma omp parallel if (Gpu::notInLaunchRegion()) -#endif - for (MFIter mfi(data, TilingIfNotGPU()); mfi.isValid(); ++mfi) { - const Box& tile_box = mfi.tilebox(); - Array4< Real > arr = data[mfi].array(); - // arr(x,y,z,comp) where 0->9 comps are - // Ex Ey Ez Bx By Bz jx jy jz rho - Real clight = PhysConst::c; - ParallelFor(tile_box, - [=] AMREX_GPU_DEVICE (int i, int j, int k) - { - // Transform the transverse E and B fields. Note that ez and bz are not - // changed by the tranform. - Real e_lab = 0.0_rt, b_lab = 0.0_rt, j_lab = 0.0_rt, r_lab = 0.0_rt; - e_lab = gamma_boost * (arr(i, j, k, 0) + - beta_boost*clight*arr(i, j, k, 4)); - b_lab = gamma_boost * (arr(i, j, k, 4) + - beta_boost*arr(i, j, k, 0)/clight); - - arr(i, j, k, 0) = e_lab; - arr(i, j, k, 4) = b_lab; - - e_lab = gamma_boost * (arr(i, j, k, 1) - - beta_boost*clight*arr(i, j, k, 3)); - b_lab = gamma_boost * (arr(i, j, k, 3) - - beta_boost*arr(i, j, k, 1)/clight); - - arr(i, j, k, 1) = e_lab; - arr(i, j, k, 3) = b_lab; - - // Transform the charge and current density. Only the z component of j is affected. - const int j_comp_index = 8; - const int r_comp_index = 9; - - j_lab = gamma_boost*(arr(i, j, k, j_comp_index) + - beta_boost*clight*arr(i, j, k, j_comp_index)); - r_lab = gamma_boost*(arr(i, j, k, r_comp_index) + - beta_boost*arr(i, j, k, r_comp_index)/clight); - - arr(i, j, k, j_comp_index) = j_lab; - arr(i, j, k, r_comp_index) = r_lab; - } - ); - } -} -} - -BackTransformedDiagnostic:: -BackTransformedDiagnostic (Real zmin_lab, Real zmax_lab, Real v_window_lab, - Real dt_snapshots_lab, int N_snapshots, - Real dt_slice_snapshots_lab, int N_slice_snapshots, - Real gamma_boost, Real t_boost, Real dt_boost, - int boost_direction, const Geometry& geom, - amrex::RealBox& slice_realbox, - amrex::Real particle_slice_width_lab) - : m_gamma_boost_(gamma_boost), - m_dt_snapshots_lab_(dt_snapshots_lab), - m_dt_boost_(dt_boost), - m_N_snapshots_(N_snapshots), - m_boost_direction_(boost_direction), - m_N_slice_snapshots_(N_slice_snapshots), - m_dt_slice_snapshots_lab_(dt_slice_snapshots_lab), - m_particle_slice_width_lab_(particle_slice_width_lab) -{ - -#ifdef WARPX_DIM_RZ - amrex::Abort(Utils::TextMsg::Err("BackTransformed diagnostics is currently not supported with RZ")); -#endif - WARPX_PROFILE("BackTransformedDiagnostic::BackTransformedDiagnostic"); - - AMREX_ALWAYS_ASSERT(WarpX::do_back_transformed_fields or - WarpX::do_back_transformed_particles); - - m_inv_gamma_boost_ = 1.0_rt / m_gamma_boost_; - m_beta_boost_ = std::sqrt(1.0_rt - m_inv_gamma_boost_*m_inv_gamma_boost_); - m_inv_beta_boost_ = 1.0_rt / m_beta_boost_; - - m_dz_lab_ = PhysConst::c * m_dt_boost_ * m_inv_beta_boost_ * m_inv_gamma_boost_; - m_inv_dz_lab_ = 1.0_rt / m_dz_lab_; - int Nz_lab = static_cast((zmax_lab - zmin_lab) * m_inv_dz_lab_); -#if (AMREX_SPACEDIM >= 2) - int Nx_lab = geom.Domain().length(0); -#endif -#if defined(WARPX_DIM_3D) - int Ny_lab = geom.Domain().length(1); - IntVect prob_ncells_lab = {Nx_lab, Ny_lab, Nz_lab}; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - // Ny_lab = 1; - IntVect prob_ncells_lab = {Nx_lab, Nz_lab}; -#else - // Nx_lab = 1; - // Ny_lab = 1; - IntVect prob_ncells_lab(Nz_lab); -#endif - writeMetaData(); - - // Query fields to dump - std::vector user_fields_to_dump; - ParmParse pp_warpx("warpx"); - bool do_user_fields = false; - do_user_fields = pp_warpx.queryarr("back_transformed_diag_fields", user_fields_to_dump); - if (utils::parser::queryWithParser(pp_warpx, "buffer_size", m_num_buffer_)) { - if (m_max_box_size_ < m_num_buffer_) m_max_box_size_ = m_num_buffer_; - } - // If user specifies fields to dump, overwrite ncomp_to_dump, - // map_actual_fields_to_dump and mesh_field_names. - for (int i = 0; i < 10; ++i) - map_actual_fields_to_dump.push_back(i); - - if (do_user_fields){ - m_ncomp_to_dump = static_cast(user_fields_to_dump.size()); - map_actual_fields_to_dump.resize(m_ncomp_to_dump); - m_mesh_field_names.resize(m_ncomp_to_dump); - for (int i=0; i(t_lab, t_boost, - m_inv_gamma_boost_, m_inv_beta_boost_, m_dz_lab_, - prob_domain_lab, prob_ncells_lab, - m_ncomp_to_dump, m_mesh_field_names, prob_domain_lab, - diag_box, i, m_max_box_size_, m_num_buffer_); - } - - - for (int i = 0; i < N_slice_snapshots; ++i) { - - - // To construct LabFrameSlice(), the location of lo() and hi() of the - // reduced diag is computed using the user-defined values of the - // reduced diag (1D, 2D, or 3D). For visualization of the diagnostics, - // the number of cells in each dimension is required and - // is computed below for the reduced back-transformed lab-frame diag, - // similar to the full-diag. - const amrex::Real* current_slice_lo = slice_realbox.lo(); - const amrex::Real* current_slice_hi = slice_realbox.hi(); - - const amrex::Real zmin_slice_lab = current_slice_lo[WARPX_ZINDEX] / - ( (1._rt+m_beta_boost_)*m_gamma_boost_); - const amrex::Real zmax_slice_lab = current_slice_hi[WARPX_ZINDEX] / - ( (1._rt+m_beta_boost_)*m_gamma_boost_); - auto Nz_slice_lab = static_cast( - (zmax_slice_lab - zmin_slice_lab) * m_inv_dz_lab_); -#if (AMREX_SPACEDIM >= 2) - auto Nx_slice_lab = static_cast( - (current_slice_hi[0] - current_slice_lo[0] ) / - geom.CellSize(0)); - if (Nx_slice_lab == 0 ) Nx_slice_lab = 1; - // if the x-dimension is reduced, increase total_cells by 1 - // to be consistent with the number of cells created for the output. - if (Nx_lab != Nx_slice_lab) Nx_slice_lab++; -#endif -#if defined(WARPX_DIM_3D) - auto Ny_slice_lab = static_cast( - (current_slice_hi[1] - current_slice_lo[1]) / - geom.CellSize(1)); - if (Ny_slice_lab == 0 ) Ny_slice_lab = 1; - // if the y-dimension is reduced, increase total_cells by 1 - // to be consistent with the number of cells created for the output. - if (Ny_lab != Ny_slice_lab) Ny_slice_lab++; - amrex::IntVect slice_ncells_lab = {Nx_slice_lab, Ny_slice_lab, Nz_slice_lab}; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::IntVect slice_ncells_lab = {Nx_slice_lab, Nz_slice_lab}; -#else - amrex::IntVect slice_ncells_lab(Nz_slice_lab); -#endif - - IntVect slice_lo(AMREX_D_DECL(0,0,0)); - IntVect slice_hi(AMREX_D_DECL(1,1,1)); - - for ( int i_dim=0; i_dim( - (slice_realbox.lo(i_dim) - geom.ProbLo(i_dim) - - 0.5*geom.CellSize(i_dim))/geom.CellSize(i_dim)); - slice_hi[i_dim] = static_cast( - (slice_realbox.hi(i_dim) - geom.ProbLo(i_dim) - - 0.5*geom.CellSize(i_dim))/geom.CellSize(i_dim)); - if (slice_lo[i_dim] == slice_hi[i_dim]) - { - slice_hi[i_dim] = slice_lo[i_dim] + 1; - } - } - Box stmp(slice_lo,slice_hi); - Box slicediag_box = stmp; - - // steps + initial box shift - Real const zmax_boost = geom.ProbHi(AMREX_SPACEDIM-1); - Real const t_slice_lab = - i * m_dt_slice_snapshots_lab_ + - m_gamma_boost_ * m_beta_boost_ * zmax_boost/PhysConst::c; - - RealBox prob_domain_lab = geom.ProbDomain(); - // replace z bounds by lab-frame coordinates - prob_domain_lab.setLo(WARPX_ZINDEX, zmin_lab + v_window_lab * t_slice_lab); - prob_domain_lab.setHi(WARPX_ZINDEX, zmax_lab + v_window_lab * t_slice_lab); - RealBox slice_dom_lab = slice_realbox; - // replace z bounds of slice in lab-frame coordinates - // note : x and y bounds are the same for lab and boosted frames - // initial lab slice extent // - slice_dom_lab.setLo(WARPX_ZINDEX, zmin_slice_lab + v_window_lab * t_slice_lab ); - slice_dom_lab.setHi(WARPX_ZINDEX, zmax_slice_lab + - v_window_lab * t_slice_lab ); - - // construct labframeslice - m_LabFrameDiags_[i+N_snapshots] = std::make_unique(t_slice_lab, t_boost, - m_inv_gamma_boost_, m_inv_beta_boost_, m_dz_lab_, - prob_domain_lab, slice_ncells_lab, - m_ncomp_to_dump, m_mesh_field_names, slice_dom_lab, - slicediag_box, i, m_particle_slice_width_lab_, - m_max_box_size_, m_num_buffer_); - } - // sort diags based on their respective t_lab - std::stable_sort(m_LabFrameDiags_.begin(), m_LabFrameDiags_.end(), compare_tlab_uptr); - - AMREX_ALWAYS_ASSERT(m_max_box_size_ >= m_num_buffer_); -} - -void BackTransformedDiagnostic::Flush (const Geometry& /*geom*/) -{ - WARPX_PROFILE("BackTransformedDiagnostic::Flush"); - - VisMF::Header::Version current_version = VisMF::GetHeaderVersion(); - VisMF::SetHeaderVersion(amrex::VisMF::Header::NoFabHeader_v1); - - const auto & mypc = WarpX::GetInstance().GetPartContainer(); - const std::vector species_names = mypc.GetSpeciesNames(); - - // Loop over BFD snapshots - for (auto& lf_diags : m_LabFrameDiags_) { - - Real zmin_lab = lf_diags->m_prob_domain_lab_.lo(WARPX_ZINDEX); - auto i_lab = static_cast( - (lf_diags->m_current_z_lab - zmin_lab) / m_dz_lab_); - - if (lf_diags->m_buff_counter_ != 0) { - if (WarpX::do_back_transformed_fields) { - const BoxArray& ba = lf_diags->m_data_buffer_->boxArray(); - const int hi = ba[0].bigEnd(m_boost_direction_); - const int lo = hi - lf_diags->m_buff_counter_ + 1; - - //Box buff_box = geom.Domain(); - Box buff_box = lf_diags->m_buff_box_; - buff_box.setSmall(m_boost_direction_, lo); - buff_box.setBig(m_boost_direction_, hi); - - BoxArray buff_ba(buff_box); - buff_ba.maxSize(m_max_box_size_); - DistributionMapping buff_dm(buff_ba); - - const int ncomp = lf_diags->m_data_buffer_->nComp(); - - MultiFab tmp(buff_ba, buff_dm, ncomp, 0); - tmp.setVal(0.0); - - ablastr::utils::communication::ParallelCopy(tmp, *lf_diags->m_data_buffer_, 0, 0, ncomp, - IntVect(AMREX_D_DECL(0, 0, 0)), - IntVect(AMREX_D_DECL(0, 0, 0)), - WarpX::do_single_precision_comms); - -#ifdef WARPX_USE_HDF5 - for (int comp = 0; comp < ncomp; ++comp) { - output_write_field(lf_diags->m_file_name, - m_mesh_field_names[comp], tmp, comp, - lbound(buff_box).x, lbound(buff_box).y, - lbound(buff_box).z); - } -#else - std::stringstream ss; - ss << lf_diags->m_file_name << "/Level_0/" - << Concatenate("buffer", i_lab, 5); - VisMF::Write(tmp, ss.str()); -#endif - } - - if (WarpX::do_back_transformed_particles) { - // Loop over species to be dumped to BFD - for (int j = 0; j < mypc.nSpeciesBackTransformedDiagnostics(); ++j) { - // Get species name - const std::string& species_name = - species_names[mypc.mapSpeciesBackTransformedDiagnostics(j)]; -#ifdef WARPX_USE_HDF5 - // Dump species data - writeParticleDataHDF5(lf_diags->m_particles_buffer_[j], - lf_diags->m_file_name, - species_name); -#else - std::stringstream part_ss; - part_ss << lf_diags->m_file_name + "/" + - species_name + "/"; - // Dump species data - writeParticleData(lf_diags->m_particles_buffer_[j], - part_ss.str(), i_lab); -#endif - } - lf_diags->m_particles_buffer_.clear(); - } - lf_diags->m_buff_counter_ = 0; - } - } - - VisMF::SetHeaderVersion(current_version); -} - - - - - -void -BackTransformedDiagnostic:: -writeLabFrameData (const MultiFab* cell_centered_data, - const MultiParticleContainer& mypc, - const Geometry& geom, const Real t_boost, const Real dt) { - - WARPX_PROFILE("BackTransformedDiagnostic::writeLabFrameData"); - VisMF::Header::Version current_version = VisMF::GetHeaderVersion(); - VisMF::SetHeaderVersion(amrex::VisMF::Header::NoFabHeader_v1); - - const RealBox& domain_z_boost = geom.ProbDomain(); - const Real zlo_boost = domain_z_boost.lo(m_boost_direction_); - const Real zhi_boost = domain_z_boost.hi(m_boost_direction_); - - const std::vector species_names = mypc.GetSpeciesNames(); - Real prev_t_lab = -dt; - std::unique_ptr tmp_slice_ptr; - std::unique_ptr slice; - amrex::Vector tmp_particle_buffer; - - // Loop over snapshots - for (auto& lf_diags : m_LabFrameDiags_) { - // Get updated z position of snapshot - const Real old_z_boost = lf_diags->m_current_z_boost; - lf_diags->updateCurrentZPositions(t_boost, - m_inv_gamma_boost_, - m_inv_beta_boost_); - - Real diag_zmin_lab = lf_diags->m_diag_domain_lab_.lo(WARPX_ZINDEX); - Real diag_zmax_lab = lf_diags->m_diag_domain_lab_.hi(WARPX_ZINDEX); - - if ( ( lf_diags->m_current_z_boost < zlo_boost) or - ( lf_diags->m_current_z_boost > zhi_boost) or - ( lf_diags->m_current_z_lab < diag_zmin_lab) or - ( lf_diags->m_current_z_lab > diag_zmax_lab) ) continue; - - // Get z index of data_buffer_ (i.e. in the lab frame) where - // simulation domain (t', [zmin',zmax']), back-transformed to lab - // frame, intersects with snapshot. - Real dom_zmin_lab = lf_diags->m_prob_domain_lab_.lo(WARPX_ZINDEX); - auto i_lab = static_cast( - ( lf_diags->m_current_z_lab - dom_zmin_lab) / m_dz_lab_); - // If buffer of snapshot i is empty... - if ( lf_diags->m_buff_counter_ == 0) { - // ... reset fields buffer data_buffer_ - if (WarpX::do_back_transformed_fields) { - lf_diags->m_buff_box_.setSmall(m_boost_direction_, - i_lab - m_num_buffer_ + 1); - lf_diags->m_buff_box_.setBig(m_boost_direction_, i_lab); - - BoxArray buff_ba(lf_diags->m_buff_box_); - buff_ba.maxSize(m_max_box_size_); - DistributionMapping buff_dm(buff_ba); - lf_diags->m_data_buffer_ = std::make_unique(buff_ba, - buff_dm, m_ncomp_to_dump, 0); - lf_diags->m_data_buffer_->setVal(0.0); - } - // ... reset particle buffer particles_buffer_[i] - if (WarpX::do_back_transformed_particles) - lf_diags->m_particles_buffer_.resize( - mypc.nSpeciesBackTransformedDiagnostics()); - } - - if (WarpX::do_back_transformed_fields) { - const int ncomp = cell_centered_data->nComp(); - const int start_comp = 0; - const bool interpolate = true; - // slice containing back-transformed data is generated only if t_lab != prev_t_lab and is re-used if multiple diags have the same z_lab,t_lab. - if (lf_diags->m_t_lab != prev_t_lab ) { - if (slice) - { - slice = nullptr; - } - slice = amrex::get_slice_data(m_boost_direction_, - lf_diags->m_current_z_boost, - *cell_centered_data, geom, - start_comp, ncomp, - interpolate); - // Back-transform data to the lab-frame - LorentzTransformZ(*slice, m_gamma_boost_, m_beta_boost_); - } - // Create a 2D box for the slice in the boosted frame - Real dx = geom.CellSize(m_boost_direction_); - auto i_boost = static_cast( - ( lf_diags->m_current_z_boost - - geom.ProbLo(m_boost_direction_))/dx); - //Box slice_box = geom.Domain(); - Box slice_box = lf_diags->m_buff_box_; - slice_box.setSmall(m_boost_direction_, i_boost); - slice_box.setBig(m_boost_direction_, i_boost); - - // Make it a BoxArray slice_ba - BoxArray slice_ba(slice_box); - slice_ba.maxSize(m_max_box_size_); - tmp_slice_ptr = std::make_unique(slice_ba, - lf_diags->m_data_buffer_->DistributionMap(), - ncomp, 0); - tmp_slice_ptr->setVal(0.0); - - // slice is re-used if the t_lab of a diag is equal to - // that of the previous diag. - // Back-transformed data is copied from slice - // which has the dmap of the domain to - // tmp_slice_ptr which has the dmap of the - // data_buffer that stores the back-transformed data. - ablastr::utils::communication::ParallelCopy(*tmp_slice_ptr, *slice, 0, 0, ncomp, - IntVect(AMREX_D_DECL(0, 0, 0)), - IntVect(AMREX_D_DECL(0, 0, 0)), - WarpX::do_single_precision_comms); - lf_diags->AddDataToBuffer(*tmp_slice_ptr, i_lab, - map_actual_fields_to_dump); - tmp_slice_ptr = nullptr; - } - - if (WarpX::do_back_transformed_particles) { - - if (lf_diags->m_t_lab != prev_t_lab ) { - if (!tmp_particle_buffer.empty()) - { - tmp_particle_buffer.clear(); - tmp_particle_buffer.shrink_to_fit(); - } - tmp_particle_buffer.resize(mypc.nSpeciesBackTransformedDiagnostics()); - mypc.GetLabFrameData(lf_diags->m_file_name, i_lab, - m_boost_direction_, old_z_boost, - lf_diags->m_current_z_boost, - t_boost, lf_diags->m_t_lab, dt, - tmp_particle_buffer); - } - lf_diags->AddPartDataToParticleBuffer(tmp_particle_buffer, - mypc.nSpeciesBackTransformedDiagnostics()); - } - - ++lf_diags->m_buff_counter_; - prev_t_lab = lf_diags->m_t_lab; - // If buffer full, write to disk. - if (lf_diags->m_buff_counter_ == m_num_buffer_) { - - if (WarpX::do_back_transformed_fields) { -#ifdef WARPX_USE_HDF5 - - Box buff_box = lf_diags->m_buff_box_; - for (int comp = 0; comp < lf_diags->m_data_buffer_->nComp(); ++comp) - output_write_field(lf_diags->m_file_name, - m_mesh_field_names[comp], - *lf_diags->m_data_buffer_, comp, - lbound(buff_box).x, lbound(buff_box).y, - lbound(buff_box).z); -#else - std::stringstream mesh_ss; - mesh_ss << lf_diags->m_file_name << "/Level_0/" << - Concatenate("buffer", i_lab, 5); - VisMF::Write( (*lf_diags->m_data_buffer_), mesh_ss.str()); -#endif - } - - if (WarpX::do_back_transformed_particles) { - // Loop over species to be dumped to BFD - for (int j = 0; j < mypc.nSpeciesBackTransformedDiagnostics(); ++j) { - // Get species name - const std::string& species_name = species_names[ - mypc.mapSpeciesBackTransformedDiagnostics(j)]; -#ifdef WARPX_USE_HDF5 - // Write data to disk (HDF5) - writeParticleDataHDF5(lf_diags->m_particles_buffer_[j], - lf_diags->m_file_name, - species_name); -#else - std::stringstream part_ss; - - part_ss << lf_diags->m_file_name + "/" + - species_name + "/"; - - // Write data to disk (custom) - writeParticleData(lf_diags->m_particles_buffer_[j], - part_ss.str(), i_lab); -#endif - } - lf_diags->m_particles_buffer_.clear(); - } - lf_diags->m_buff_counter_ = 0; - } - } - - VisMF::SetHeaderVersion(current_version); -} - -#ifdef WARPX_USE_HDF5 -void -BackTransformedDiagnostic:: -writeParticleDataHDF5 (const WarpXParticleContainer::DiagnosticParticleData& pdata, - const std::string& name, const std::string& species_name) -{ - auto np = pdata.GetRealData(DiagIdx::w).size(); - - Vector particle_counts(ParallelDescriptor::NProcs(), 0); - Vector particle_offsets(ParallelDescriptor::NProcs(), 0); - - ParallelAllGather::AllGather(np, particle_counts.data(), - ParallelContext::CommunicatorAll()); - - long total_np = 0; - for (int i = 0; i < ParallelDescriptor::NProcs(); ++i) { - particle_offsets[i] = total_np; - total_np += particle_counts[i]; - } - - if (total_np == 0) return; - - long old_np = 0; - if (ParallelDescriptor::IOProcessor()) - { - for (int k = 0; k < static_cast(particle_field_names.size()); ++k) - { - std::string field_path = species_name + "/" + particle_field_names[k]; - old_np = output_resize_particle_field(name, field_path, total_np); - } - } - - // Note, this has the effect of an MPI Barrier between the above resize operation - // and the below write. - ParallelDescriptor::ReduceLongMax(old_np); - - // Write data here - for (int k = 0; k < static_cast(particle_field_names.size()); ++k) - { - std::string field_path = species_name + "/" + particle_field_names[k]; - output_write_particle_field(name, field_path, - pdata.GetRealData(k).data(), - particle_counts[ParallelDescriptor::MyProc()], - particle_offsets[ParallelDescriptor::MyProc()] - + old_np); - } -} -#endif - -void -BackTransformedDiagnostic:: -writeParticleData (const WarpXParticleContainer::DiagnosticParticleData& pdata, - const std::string& name, const int i_lab) -{ - WARPX_PROFILE("BackTransformedDiagnostic::writeParticleData"); - - std::string field_name; - std::ofstream ofs; - - const int MyProc = ParallelDescriptor::MyProc(); - auto np = pdata.GetRealData(DiagIdx::w).size(); - if (np == 0) return; - - field_name = name + Concatenate("w_", i_lab, 5) + "_" + std::to_string(MyProc); - ofs.open(field_name.c_str(), std::ios::out|std::ios::binary); - writeData(pdata.GetRealData(DiagIdx::w).data(), np, ofs); - ofs.close(); - - field_name = name + Concatenate("x_", i_lab, 5) + "_" + std::to_string(MyProc); - ofs.open(field_name.c_str(), std::ios::out|std::ios::binary); - writeData(pdata.GetRealData(DiagIdx::x).data(), np, ofs); - ofs.close(); - - field_name = name + Concatenate("y_", i_lab, 5) + "_" + std::to_string(MyProc); - ofs.open(field_name.c_str(), std::ios::out|std::ios::binary); - writeData(pdata.GetRealData(DiagIdx::y).data(), np, ofs); - ofs.close(); - - field_name = name + Concatenate("z_", i_lab, 5) + "_" + std::to_string(MyProc); - ofs.open(field_name.c_str(), std::ios::out|std::ios::binary); - writeData(pdata.GetRealData(DiagIdx::z).data(), np, ofs); - ofs.close(); - - field_name = name + Concatenate("ux_", i_lab, 5) + "_" + std::to_string(MyProc); - ofs.open(field_name.c_str(), std::ios::out|std::ios::binary); - writeData(pdata.GetRealData(DiagIdx::ux).data(), np, ofs); - ofs.close(); - - field_name = name + Concatenate("uy_", i_lab, 5) + "_" + std::to_string(MyProc); - ofs.open(field_name.c_str(), std::ios::out|std::ios::binary); - writeData(pdata.GetRealData(DiagIdx::uy).data(), np, ofs); - ofs.close(); - - field_name = name + Concatenate("uz_", i_lab, 5) + "_" + std::to_string(MyProc); - ofs.open(field_name.c_str(), std::ios::out|std::ios::binary); - writeData(pdata.GetRealData(DiagIdx::uz).data(), np, ofs); - ofs.close(); -} - -void -BackTransformedDiagnostic:: -writeMetaData () -{ - WARPX_PROFILE("BackTransformedDiagnostic::writeMetaData"); - - if (ParallelDescriptor::IOProcessor()) { - const std::string fullpath = WarpX::lab_data_directory + "/snapshots"; - if (!UtilCreateDirectory(fullpath, permission_flag_rwxrxrx)) - CreateDirectoryFailed(fullpath); - - VisMF::IO_Buffer io_buffer(VisMF::IO_Buffer_Size); - std::ofstream HeaderFile; - HeaderFile.rdbuf()->pubsetbuf(io_buffer.dataPtr(), io_buffer.size()); - std::string HeaderFileName(WarpX::lab_data_directory + "/snapshots/Header"); - HeaderFile.open(HeaderFileName.c_str(), std::ofstream::out | - std::ofstream::trunc | - std::ofstream::binary); - if(!HeaderFile.good()) - FileOpenFailed(HeaderFileName); - - HeaderFile.precision(17); - - HeaderFile << m_N_snapshots_ << "\n"; - HeaderFile << m_dt_snapshots_lab_ << "\n"; - HeaderFile << m_gamma_boost_ << "\n"; - HeaderFile << m_beta_boost_ << "\n"; - - if (m_N_slice_snapshots_ > 0) { - const std::string fullpath_slice = WarpX::lab_data_directory + "/slices"; - if (!UtilCreateDirectory(fullpath_slice, permission_flag_rwxrxrx)) - CreateDirectoryFailed(fullpath_slice); - - VisMF::IO_Buffer io_buffer_slice(VisMF::IO_Buffer_Size); - std::ofstream HeaderFile_slice; - HeaderFile_slice.rdbuf()->pubsetbuf(io_buffer_slice.dataPtr(), - io_buffer_slice.size()); - std::string HeaderFileName_slice(WarpX::lab_data_directory+ - "/slices/Header"); - HeaderFile_slice.open(HeaderFileName_slice.c_str(), - std::ofstream::out | - std::ofstream::trunc | - std::ofstream::binary); - - if (!HeaderFile_slice.good()) - FileOpenFailed(HeaderFileName_slice); - - HeaderFile_slice.precision(17); - - HeaderFile_slice << m_N_slice_snapshots_ << "\n"; - HeaderFile_slice << m_dt_slice_snapshots_lab_ << "\n"; - HeaderFile_slice << m_gamma_boost_ << "\n"; - HeaderFile_slice << m_beta_boost_ << "\n"; - - } - - } - - -} - -LabFrameSnapShot:: -LabFrameSnapShot (Real t_lab_in, Real t_boost, Real inv_gamma_boost_in, - Real inv_beta_boost_in, Real dz_lab_in, RealBox prob_domain_lab, - IntVect prob_ncells_lab, int ncomp_to_dump, - std::vector mesh_field_names, - amrex::RealBox diag_domain_lab, Box diag_box, int file_num_in, - const int max_box_size, const int num_buffer) -{ - m_t_lab = t_lab_in; - m_dz_lab_ = dz_lab_in; - m_inv_gamma_boost_ = inv_gamma_boost_in; - m_inv_beta_boost_ = inv_beta_boost_in; - m_prob_domain_lab_ = prob_domain_lab; - m_prob_ncells_lab_ = prob_ncells_lab; - m_diag_domain_lab_ = diag_domain_lab; - m_buff_box_ = diag_box; - m_ncomp_to_dump_ = ncomp_to_dump; - m_mesh_field_names_ = std::move(mesh_field_names); - m_file_num = file_num_in; - m_current_z_lab = 0.0; - m_current_z_boost = 0.0; - updateCurrentZPositions(t_boost, m_inv_gamma_boost_, m_inv_beta_boost_); - m_file_name = Concatenate(WarpX::lab_data_directory + "/snapshots/snapshot", - m_file_num, 5); - createLabFrameDirectories(); - m_buff_counter_ = 0; - m_max_box_size = max_box_size; - m_num_buffer_ = num_buffer; - if (WarpX::do_back_transformed_fields) m_data_buffer_.reset(nullptr); -} - -void -LabFrameDiag:: -updateCurrentZPositions(Real t_boost, Real inv_gamma, Real inv_beta) -{ - m_current_z_boost = (m_t_lab*inv_gamma - t_boost)*PhysConst::c*inv_beta; - m_current_z_lab = (m_t_lab - t_boost*inv_gamma)*PhysConst::c*inv_beta; -} - -void -LabFrameDiag:: -createLabFrameDirectories() { -#ifdef WARPX_USE_HDF5 - if (ParallelDescriptor::IOProcessor()) - { - output_create(m_file_name); - } - - ParallelDescriptor::Barrier(); - - if (ParallelDescriptor::IOProcessor()) - { - if (WarpX::do_back_transformed_fields) - { - const auto lo = lbound(m_buff_box_); - for (int comp = 0; comp < m_ncomp_to_dump_; ++comp) { - output_create_field(m_file_name, m_mesh_field_names_[comp], -#if ( AMREX_SPACEDIM >= 2 ) - m_prob_ncells_lab_[0], -#else - 1, -#endif -#if defined(WARPX_DIM_3D) - m_prob_ncells_lab_[1], -#else - 1, -#endif - m_prob_ncells_lab_[WARPX_ZINDEX]+1); - } - } - } - - ParallelDescriptor::Barrier(); - - if (WarpX::do_back_transformed_particles){ - const auto & mypc = WarpX::GetInstance().GetPartContainer(); - const std::vector species_names = mypc.GetSpeciesNames(); - // Loop over species to be dumped to BFD - for (int j = 0; j < mypc.nSpeciesBackTransformedDiagnostics(); ++j) - { - // Loop over species to be dumped to BFD - std::string species_name = - species_names[mypc.mapSpeciesBackTransformedDiagnostics(j)]; - output_create_species_group(m_file_name, species_name); - for (int k = 0; k < static_cast(particle_field_names.size()); ++k) - { - std::string field_path = species_name + "/" + particle_field_names[k]; - output_create_particle_field(m_file_name, field_path); - } - } - } -#else - if (ParallelDescriptor::IOProcessor()) { - - if (!UtilCreateDirectory(m_file_name, permission_flag_rwxrxrx)) - CreateDirectoryFailed(m_file_name); - - const int nlevels = 1; - for(int i = 0; i < nlevels; ++i) { - const std::string &fullpath = LevelFullPath(i, m_file_name); - if (!UtilCreateDirectory(fullpath, permission_flag_rwxrxrx)) - CreateDirectoryFailed(fullpath); - } - - const auto & mypc = WarpX::GetInstance().GetPartContainer(); - const std::vector species_names = mypc.GetSpeciesNames(); - - const std::string particles_prefix = "particle"; - // Loop over species to be dumped to BFD - for(int i = 0; i < mypc.nSpeciesBackTransformedDiagnostics(); ++i) { - // Get species name - const std::string& species_name = - species_names[mypc.mapSpeciesBackTransformedDiagnostics(i)]; - const std::string fullpath = m_file_name + "/" + species_name; - if (!UtilCreateDirectory(fullpath, permission_flag_rwxrxrx)) - CreateDirectoryFailed(fullpath); - } - } -#endif - ParallelDescriptor::Barrier(); - - writeLabFrameHeader(); -} - -void -LabFrameDiag:: -writeLabFrameHeader() { -#ifndef WARPX_USE_HDF5 - if (ParallelDescriptor::IOProcessor()) { - VisMF::IO_Buffer io_buffer(VisMF::IO_Buffer_Size); - std::ofstream HeaderFile; - HeaderFile.rdbuf()->pubsetbuf(io_buffer.dataPtr(), io_buffer.size()); - std::string HeaderFileName(m_file_name + "/Header"); - HeaderFile.open(HeaderFileName.c_str(), std::ofstream::out | - std::ofstream::trunc | - std::ofstream::binary); - if(!HeaderFile.good()) - FileOpenFailed(HeaderFileName); - - HeaderFile.precision(17); - - HeaderFile << m_t_lab << "\n"; - // Write domain number of cells - HeaderFile << m_prob_ncells_lab_[0] << ' ' -#if defined(WARPX_DIM_3D) - << m_prob_ncells_lab_[1] << ' ' -#endif - << m_prob_ncells_lab_[WARPX_ZINDEX] <<'\n'; - // Write domain physical boundaries - // domain lower bound - HeaderFile << m_diag_domain_lab_.lo(0) << ' ' -#if defined(WARPX_DIM_3D) - << m_diag_domain_lab_.lo(1) << ' ' -#endif - << m_diag_domain_lab_.lo(WARPX_ZINDEX) <<'\n'; - // domain higher bound - HeaderFile << m_diag_domain_lab_.hi(0) << ' ' -#if defined(WARPX_DIM_3D) - << m_diag_domain_lab_.hi(1) << ' ' -#endif - << m_diag_domain_lab_.hi(WARPX_ZINDEX) <<'\n'; - // List of fields dumped to file - for (int i=0; i mesh_field_names, - RealBox diag_domain_lab, Box diag_box, int file_num_in, - amrex::Real particle_slice_dx_lab, const int max_box_size, - const int num_buffer) -{ - m_t_lab = t_lab_in; - m_dz_lab_ = dz_lab_in; - m_inv_gamma_boost_ = inv_gamma_boost_in; - m_inv_beta_boost_ = inv_beta_boost_in; - m_prob_domain_lab_ = prob_domain_lab; - m_prob_ncells_lab_ = prob_ncells_lab; - m_diag_domain_lab_ = diag_domain_lab; - m_buff_box_ = diag_box; - m_ncomp_to_dump_ = ncomp_to_dump; - m_mesh_field_names_ = std::move(mesh_field_names); - m_file_num = file_num_in; - m_current_z_lab = 0.0; - m_current_z_boost = 0.0; - updateCurrentZPositions(t_boost, m_inv_gamma_boost_, m_inv_beta_boost_); - m_file_name = Concatenate(WarpX::lab_data_directory+"/slices/slice",m_file_num,5); - createLabFrameDirectories(); - m_buff_counter_ = 0; - m_particle_slice_dx_lab_ = particle_slice_dx_lab; - m_max_box_size = max_box_size; - m_num_buffer_ = num_buffer; - - if (WarpX::do_back_transformed_fields) m_data_buffer_.reset(nullptr); -} - -void -LabFrameSnapShot:: -AddDataToBuffer( MultiFab& tmp, int k_lab, - amrex::Vector const& map_actual_fields_to_dump) -{ - const int ncomp_to_dump = static_cast(map_actual_fields_to_dump.size()); - MultiFab& buf = *m_data_buffer_; -#ifdef AMREX_USE_GPU - Gpu::DeviceVector d_map_actual_fields_to_dump(ncomp_to_dump); - Gpu::copyAsync(Gpu::hostToDevice, - map_actual_fields_to_dump.begin(), map_actual_fields_to_dump.end(), - d_map_actual_fields_to_dump.begin()); - Gpu::synchronize(); - int const* field_map_ptr = d_map_actual_fields_to_dump.dataPtr(); -#else - int const* field_map_ptr = map_actual_fields_to_dump.dataPtr(); -#endif - for (MFIter mfi(tmp, TilingIfNotGPU()); mfi.isValid(); ++mfi) { - Array4 tmp_arr = tmp[mfi].array(); - Array4 buf_arr = buf[mfi].array(); - // For 3D runs, tmp is a 2D (x,y) multifab that contains only - // slice to write to file - const Box& bx = mfi.tilebox(); - ParallelFor(bx, ncomp_to_dump, - [=] AMREX_GPU_DEVICE(int i, int j, int k, int n) - { - const int icomp = field_map_ptr[n]; -#if defined(WARPX_DIM_3D) - buf_arr(i,j,k_lab,n) = tmp_arr(i,j,k,icomp); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - buf_arr(i,k_lab,k,n) = tmp_arr(i,j,k,icomp); -#else - buf_arr(k_lab,j,k,n) = tmp_arr(i,j,k,icomp); -#endif - } - ); - } -} - - -void -LabFrameSlice:: -AddDataToBuffer( MultiFab& tmp, int k_lab, - amrex::Vector const& map_actual_fields_to_dump) -{ - const int ncomp_to_dump = static_cast(map_actual_fields_to_dump.size()); - MultiFab& buf = *m_data_buffer_; -#ifdef AMREX_USE_GPU - Gpu::DeviceVector d_map_actual_fields_to_dump(ncomp_to_dump); - Gpu::copyAsync(Gpu::hostToDevice, - map_actual_fields_to_dump.begin(), map_actual_fields_to_dump.end(), - d_map_actual_fields_to_dump.begin()); - Gpu::synchronize(); - int const* field_map_ptr = d_map_actual_fields_to_dump.dataPtr(); -#else - int const* field_map_ptr = map_actual_fields_to_dump.dataPtr(); -#endif - for (MFIter mfi(tmp, TilingIfNotGPU()); mfi.isValid(); ++mfi) - { - const Box& bx_bf = mfi.tilebox(); - Array4 tmp_arr = tmp[mfi].array(); - Array4 buf_arr = buf[mfi].array(); - ParallelFor(bx_bf, ncomp_to_dump, - [=] AMREX_GPU_DEVICE(int i, int j, int k, int n) - { - const int icomp = field_map_ptr[n]; -#if defined(WARPX_DIM_3D) - buf_arr(i,j,k_lab,n) = tmp_arr(i,j,k,icomp); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - buf_arr(i,k_lab,k,n) = tmp_arr(i,j,k,icomp); -#else - buf_arr(k_lab,j,k,n) = tmp_arr(i,j,k,icomp); -#endif - }); - } - -} - - -void -LabFrameSnapShot:: -AddPartDataToParticleBuffer( - Vector const& tmp_particle_buffer, - int nspeciesBoostedFrame) { - for (int isp = 0; isp < nspeciesBoostedFrame; ++isp) { - auto np = static_cast(tmp_particle_buffer[isp].GetRealData(DiagIdx::w).size()); - if (np == 0) continue; - - // allocate size of particle buffer array to np - // This is a growing array. Each time we add np elements - // to the existing array which has size = init_size - const int init_size = static_cast(m_particles_buffer_[isp].GetRealData(DiagIdx::w).size()); - const int total_size = init_size + np; - m_particles_buffer_[isp].resize(total_size); - - // Data pointers to particle attributes // - ParticleReal* const AMREX_RESTRICT wp_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::w).data(); - ParticleReal* const AMREX_RESTRICT x_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::x).data(); - ParticleReal* const AMREX_RESTRICT y_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::y).data(); - ParticleReal* const AMREX_RESTRICT z_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::z).data(); - ParticleReal* const AMREX_RESTRICT ux_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::ux).data(); - ParticleReal* const AMREX_RESTRICT uy_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::uy).data(); - ParticleReal* const AMREX_RESTRICT uz_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::uz).data(); - - ParticleReal const* const AMREX_RESTRICT wp_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::w).data(); - ParticleReal const* const AMREX_RESTRICT x_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::x).data(); - ParticleReal const* const AMREX_RESTRICT y_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::y).data(); - ParticleReal const* const AMREX_RESTRICT z_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::z).data(); - ParticleReal const* const AMREX_RESTRICT ux_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::ux).data(); - ParticleReal const* const AMREX_RESTRICT uy_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::uy).data(); - ParticleReal const* const AMREX_RESTRICT uz_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::uz).data(); - - // copy all the particles from tmp to buffer - amrex::ParallelFor(np, - [=] AMREX_GPU_DEVICE(int i) - { - wp_buff[init_size + i] = wp_temp[i]; - x_buff[init_size + i] = x_temp[i]; - y_buff[init_size + i] = y_temp[i]; - z_buff[init_size + i] = z_temp[i]; - ux_buff[init_size + i] = ux_temp[i]; - uy_buff[init_size + i] = uy_temp[i]; - uz_buff[init_size + i] = uz_temp[i]; - }); - } -} - -void -LabFrameSlice:: -AddPartDataToParticleBuffer( - Vector const& tmp_particle_buffer, - int nSpeciesBackTransformedDiagnostics) { - - - for (int isp = 0; isp < nSpeciesBackTransformedDiagnostics; ++isp) { - auto np = tmp_particle_buffer[isp].GetRealData(DiagIdx::w).size(); - - if (np == 0) continue; - - ParticleReal const* const AMREX_RESTRICT wp_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::w).data(); - ParticleReal const* const AMREX_RESTRICT x_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::x).data(); - ParticleReal const* const AMREX_RESTRICT y_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::y).data(); - ParticleReal const* const AMREX_RESTRICT z_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::z).data(); - ParticleReal const* const AMREX_RESTRICT ux_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::ux).data(); - ParticleReal const* const AMREX_RESTRICT uy_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::uy).data(); - ParticleReal const* const AMREX_RESTRICT uz_temp = - tmp_particle_buffer[isp].GetRealData(DiagIdx::uz).data(); - - // temporary arrays to store copy_flag and copy_index - // for particles that cross the reduced domain for diagnostics. - amrex::Gpu::DeviceVector FlagForPartCopy(np); - amrex::Gpu::DeviceVector IndexForPartCopy(np); - - int* const AMREX_RESTRICT Flag = FlagForPartCopy.dataPtr(); - int* const AMREX_RESTRICT IndexLocation = IndexForPartCopy.dataPtr(); - - // Compute extent of the reduced domain +/- user-defined physical width -#if (AMREX_SPACEDIM >= 2) - Real const xmin = m_diag_domain_lab_.lo(0)-m_particle_slice_dx_lab_; - Real const xmax = m_diag_domain_lab_.hi(0)+m_particle_slice_dx_lab_; -#endif -#if defined(WARPX_DIM_3D) - Real const ymin = m_diag_domain_lab_.lo(1)-m_particle_slice_dx_lab_; - Real const ymax = m_diag_domain_lab_.hi(1)+m_particle_slice_dx_lab_; -#endif - - //Flag particles that need to be copied if they are - // within the reduced slice +/- user-defined physical width - amrex::ParallelFor(np, - [=] AMREX_GPU_DEVICE(int i) - { - Flag[i] = 0; -#if (AMREX_SPACEDIM >= 2) - if ( x_temp[i] >= (xmin) && - x_temp[i] <= (xmax) ) -#endif - { -#if defined(WARPX_DIM_3D) - if (y_temp[i] >= (ymin) && - y_temp[i] <= (ymax) ) -#endif - { - Flag[i] = 1; - } - } - }); - - // Call exclusive scan to obtain location indices using - // flag values. These location indices are used to copy data - // from src to dst when the copy-flag is set to 1. - const int copy_size = amrex::Scan::ExclusiveSum(np, Flag, IndexLocation); - const auto init_size = static_cast( - m_particles_buffer_[isp].GetRealData(DiagIdx::w).size()); - const int total_reducedDiag_size = copy_size + init_size; - - // allocate array size for reduced diagnostic buffer array - m_particles_buffer_[isp].resize(total_reducedDiag_size); - - // Data pointers to particle attributes // - ParticleReal* const AMREX_RESTRICT wp_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::w).data(); - ParticleReal* const AMREX_RESTRICT x_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::x).data(); - ParticleReal* const AMREX_RESTRICT y_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::y).data(); - ParticleReal* const AMREX_RESTRICT z_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::z).data(); - ParticleReal* const AMREX_RESTRICT ux_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::ux).data(); - ParticleReal* const AMREX_RESTRICT uy_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::uy).data(); - ParticleReal* const AMREX_RESTRICT uz_buff = - m_particles_buffer_[isp].GetRealData(DiagIdx::uz).data(); - - // Selective copy of particle data from tmp array to reduced buffer - // array on the GPU using the flag value and index location. - amrex::ParallelFor(np, - [=] AMREX_GPU_DEVICE(int i) - { - if (Flag[i] == 1) - { - const int loc = IndexLocation[i] + init_size; - wp_buff[loc] = wp_temp[i]; - x_buff[loc] = x_temp[i]; - y_buff[loc] = y_temp[i]; - z_buff[loc] = z_temp[i]; - ux_buff[loc] = ux_temp[i]; - uy_buff[loc] = uy_temp[i]; - uz_buff[loc] = uz_temp[i]; - } - }); - - } -} diff --git a/Source/Diagnostics/BackTransformedDiagnostic_fwd.H b/Source/Diagnostics/BackTransformedDiagnostic_fwd.H deleted file mode 100644 index 2766d2e271c..00000000000 --- a/Source/Diagnostics/BackTransformedDiagnostic_fwd.H +++ /dev/null @@ -1,15 +0,0 @@ -/* Copyright 2021 Luca Fedeli, Axel Huebl - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ - -#ifndef WARPX_BACK_TRANSFORMED_DIAGNOSTICS_FWD_H -#define WARPX_BACK_TRANSFORMED_DIAGNOSTICS_FWD_H - -class LabFrameDiag; -class LabFrameSnapShot; -class BackTransformedDiagnostic; - -#endif /* WARPX_BACK_TRANSFORMED_DIAGNOSTICS_FWD_H */ diff --git a/Source/Diagnostics/CMakeLists.txt b/Source/Diagnostics/CMakeLists.txt index 4df0f68f85e..53358339472 100644 --- a/Source/Diagnostics/CMakeLists.txt +++ b/Source/Diagnostics/CMakeLists.txt @@ -1,6 +1,5 @@ target_sources(WarpX PRIVATE - BackTransformedDiagnostic.cpp Diagnostics.cpp FieldIO.cpp FullDiagnostics.cpp diff --git a/Source/Diagnostics/Make.package b/Source/Diagnostics/Make.package index f8bf69f989a..a09b1eaed5b 100644 --- a/Source/Diagnostics/Make.package +++ b/Source/Diagnostics/Make.package @@ -2,7 +2,6 @@ CEXE_sources += MultiDiagnostics.cpp CEXE_sources += Diagnostics.cpp CEXE_sources += FullDiagnostics.cpp CEXE_sources += WarpXIO.cpp -CEXE_sources += BackTransformedDiagnostic.cpp CEXE_sources += ParticleIO.cpp CEXE_sources += FieldIO.cpp CEXE_sources += SliceDiagnostic.cpp diff --git a/Source/Diagnostics/WarpXIO.cpp b/Source/Diagnostics/WarpXIO.cpp index 548c95cd8ec..1785a04c158 100644 --- a/Source/Diagnostics/WarpXIO.cpp +++ b/Source/Diagnostics/WarpXIO.cpp @@ -352,43 +352,3 @@ WarpX::InitFromCheckpoint () mypc->Restart(restart_chkfile); } - - -std::unique_ptr -WarpX::GetCellCenteredData() { - - WARPX_PROFILE("WarpX::GetCellCenteredData()"); - - const amrex::IntVect ng(1); - const int nc = 10; - - Vector > cc(finest_level+1); - - for (int lev = 0; lev <= finest_level; ++lev) - { - cc[lev] = std::make_unique(grids[lev], dmap[lev], nc, ng ); - - int dcomp = 0; - // first the electric field - AverageAndPackVectorField( *cc[lev], Efield_aux[lev], dmap[lev], dcomp, ng ); - dcomp += 3; - // then the magnetic field - AverageAndPackVectorField( *cc[lev], Bfield_aux[lev], dmap[lev], dcomp, ng ); - dcomp += 3; - // then the current density - AverageAndPackVectorField( *cc[lev], current_fp[lev], dmap[lev], dcomp, ng ); - dcomp += 3; - // then the charge density - const std::unique_ptr& charge_density = mypc->GetChargeDensity(lev); - AverageAndPackScalarField( *cc[lev], *charge_density, dmap[lev], dcomp, ng ); - - ablastr::utils::communication::FillBoundary(*cc[lev], WarpX::do_single_precision_comms, geom[lev].periodicity()); - } - - for (int lev = finest_level; lev > 0; --lev) - { - CoarsenIO::Coarsen( *cc[lev-1], *cc[lev], 0, 0, nc, 0, refRatio(lev-1) ); - } - - return std::move(cc[0]); -} diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 6e04adc3be0..42042b7c726 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -11,7 +11,6 @@ #include "WarpX.H" #include "BoundaryConditions/PML.H" -#include "Diagnostics/BackTransformedDiagnostic.H" #include "Diagnostics/MultiDiagnostics.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "Evolve/WarpXDtType.H" @@ -247,15 +246,6 @@ WarpX::Evolve (int numsteps) ShiftGalileanBoundary(); - if (do_back_transformed_diagnostics) { - std::unique_ptr cell_centered_data = nullptr; - if (WarpX::do_back_transformed_fields) { - cell_centered_data = GetCellCenteredData(); - } - myBFD->writeLabFrameData(cell_centered_data.get(), *mypc, geom[0], cur_time, dt[0]); - } - - // sync up time for (int i = 0; i <= max_level; ++i) { t_new[i] = cur_time; @@ -370,10 +360,6 @@ WarpX::Evolve (int numsteps) // End loop on time steps } multi_diags->FilterComputePackFlushLastTimestep( istep[0] ); - - if (do_back_transformed_diagnostics) { - myBFD->Flush(geom[0]); - } } /* /brief Perform one PIC iteration, without subcycling diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 987bab9d447..3a5e9ed24c6 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -14,7 +14,6 @@ #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_PSATD) # include "BoundaryConditions/PML_RZ.H" #endif -#include "Diagnostics/BackTransformedDiagnostic.H" #include "Diagnostics/MultiDiagnostics.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" @@ -438,28 +437,6 @@ WarpX::InitData () void WarpX::InitDiagnostics () { multi_diags->InitData(); - if (do_back_transformed_diagnostics) { - const Real* current_lo = geom[0].ProbLo(); - const Real* current_hi = geom[0].ProbHi(); - Real dt_boost = dt[0]; - Real boosted_moving_window_v = (moving_window_v - beta_boost*PhysConst::c)/(1 - beta_boost*moving_window_v/PhysConst::c); - // Find the positions of the lab-frame box that corresponds to the boosted-frame box at t=0 - Real zmin_lab = static_cast( - (current_lo[moving_window_dir] - boosted_moving_window_v*t_new[0])/( (1.+beta_boost)*gamma_boost )); - Real zmax_lab = static_cast( - (current_hi[moving_window_dir] - boosted_moving_window_v*t_new[0])/( (1.+beta_boost)*gamma_boost )); - myBFD = std::make_unique( - zmin_lab, - zmax_lab, - moving_window_v, dt_snapshots_lab, - num_snapshots_lab, - dt_slice_snapshots_lab, - num_slice_snapshots_lab, - gamma_boost, t_new[0], dt_boost, - moving_window_dir, geom[0], - slice_realbox, - particle_slice_width_lab); - } reduced_diags->InitData(); } diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index b810723fe6f..001fc4631c9 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -85,7 +85,6 @@ LaserParticleContainer::LaserParticleContainer (AmrCore* amr_core, int ispecies, { charge = 1.0; mass = std::numeric_limits::max(); - do_back_transformed_diagnostics = 0; ParmParse pp_laser_name(m_laser_name); diff --git a/Source/Particles/MultiParticleContainer.H b/Source/Particles/MultiParticleContainer.H index 7f355810e78..59815d150e3 100644 --- a/Source/Particles/MultiParticleContainer.H +++ b/Source/Particles/MultiParticleContainer.H @@ -236,9 +236,6 @@ public: int nSpecies() const {return species_names.size();} - int nSpeciesBackTransformedDiagnostics() const {return nspecies_back_transformed_diagnostics;} - int mapSpeciesBackTransformedDiagnostics(int i) const {return map_species_back_transformed_diagnostics[i];} - int doBackTransformedDiagnostics() const {return do_back_transformed_diagnostics;} /** Whether back-transformed diagnostics need to be performed for any plasma species. * * \param[in] do_back_transformed_particles The parameter to set if back-transformed particles are set to true/false @@ -263,12 +260,6 @@ public: return std::count( v.begin(), v.end(), fromMainGrid ); } - void GetLabFrameData(const std::string& snapshot_name, - const int i_lab, const int direction, - const amrex::Real z_old, const amrex::Real z_new, - const amrex::Real t_boost, const amrex::Real t_lab, const amrex::Real dt, - amrex::Vector& parts) const; - // Inject particles during the simulation (for particles entering the // simulation domain after some iterations, due to flowing plasma and/or // moving window). @@ -494,12 +485,6 @@ private: void mapSpeciesProduct (); - // Number of species dumped in BackTransformedDiagnostics - int nspecies_back_transformed_diagnostics = 0; - // map_species_back_transformed_diagnostics[i] is the species ID in - // MultiParticleContainer for 0 map_species_back_transformed_diagnostics; - int do_back_transformed_diagnostics = 0; bool m_do_back_transformed_particles = false; void MFItInfoCheckTiling(const WarpXParticleContainer& /*pc_src*/) const noexcept diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index 7200df69a33..9cbe898cd92 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -119,20 +119,6 @@ MultiParticleContainer::MultiParticleContainer (AmrCore* amr_core) pc_tmp = std::make_unique(amr_core); - // Compute the number of species for which lab-frame data is dumped - // nspecies_lab_frame_diags, and map their ID to MultiParticleContainer - // particle IDs in map_species_lab_diags. - map_species_back_transformed_diagnostics.resize(nspecies); - nspecies_back_transformed_diagnostics = 0; - for (int i=0; ido_back_transformed_diagnostics){ - map_species_back_transformed_diagnostics[nspecies_back_transformed_diagnostics] = i; - do_back_transformed_diagnostics = 1; - nspecies_back_transformed_diagnostics += 1; - } - } - // Setup particle collisions collisionhandler = std::make_unique(this); @@ -726,68 +712,6 @@ MultiParticleContainer::SetParticleDistributionMap (int lev, DistributionMapping } } -void -MultiParticleContainer -::GetLabFrameData (const std::string& /*snapshot_name*/, - const int /*i_lab*/, const int direction, - const Real z_old, const Real z_new, - const Real t_boost, const Real t_lab, const Real dt, - Vector& parts) const -{ - - WARPX_PROFILE("MultiParticleContainer::GetLabFrameData()"); - - // Loop over particle species - for (int i = 0; i < nspecies_back_transformed_diagnostics; ++i){ - int isp = map_species_back_transformed_diagnostics[i]; - WarpXParticleContainer* pc = allcontainers[isp].get(); - WarpXParticleContainer::DiagnosticParticles diagnostic_particles; - pc->GetParticleSlice(direction, z_old, z_new, t_boost, t_lab, dt, diagnostic_particles); - // Here, diagnostic_particles[lev][index] is a WarpXParticleContainer::DiagnosticParticleData - // where "lev" is the AMR level and "index" is a [grid index][tile index] pair. - - // Loop over AMR levels - for (int lev = 0; lev <= pc->finestLevel(); ++lev){ - // Loop over [grid index][tile index] pairs - // and Fills parts[species number i] with particle data from all grids and - // tiles in diagnostic_particles. parts contains particles from all - // AMR levels indistinctly. - for (const auto& dp : diagnostic_particles[lev]){ - // it->first is the [grid index][tile index] key - // it->second is the corresponding - // WarpXParticleContainer::DiagnosticParticleData value - parts[i].GetRealData(DiagIdx::w).insert( parts[i].GetRealData(DiagIdx::w ).end(), - dp.second.GetRealData(DiagIdx::w ).begin(), - dp.second.GetRealData(DiagIdx::w ).end()); - - parts[i].GetRealData(DiagIdx::x).insert( parts[i].GetRealData(DiagIdx::x ).end(), - dp.second.GetRealData(DiagIdx::x ).begin(), - dp.second.GetRealData(DiagIdx::x ).end()); - - parts[i].GetRealData(DiagIdx::y).insert( parts[i].GetRealData(DiagIdx::y ).end(), - dp.second.GetRealData(DiagIdx::y ).begin(), - dp.second.GetRealData(DiagIdx::y ).end()); - - parts[i].GetRealData(DiagIdx::z).insert( parts[i].GetRealData(DiagIdx::z ).end(), - dp.second.GetRealData(DiagIdx::z ).begin(), - dp.second.GetRealData(DiagIdx::z ).end()); - - parts[i].GetRealData(DiagIdx::ux).insert( parts[i].GetRealData(DiagIdx::ux).end(), - dp.second.GetRealData(DiagIdx::ux).begin(), - dp.second.GetRealData(DiagIdx::ux).end()); - - parts[i].GetRealData(DiagIdx::uy).insert( parts[i].GetRealData(DiagIdx::uy).end(), - dp.second.GetRealData(DiagIdx::uy).begin(), - dp.second.GetRealData(DiagIdx::uy).end()); - - parts[i].GetRealData(DiagIdx::uz).insert( parts[i].GetRealData(DiagIdx::uz).end(), - dp.second.GetRealData(DiagIdx::uz).begin(), - dp.second.GetRealData(DiagIdx::uz).end()); - } - } - } -} - /* \brief Continuous injection for particles initially outside of the domain. * \param injection_box: Domain where new particles should be injected. * Loop over all WarpXParticleContainer in MultiParticleContainer and diff --git a/Source/Particles/PhotonParticleContainer.cpp b/Source/Particles/PhotonParticleContainer.cpp index 29a835735ec..2de928aed05 100644 --- a/Source/Particles/PhotonParticleContainer.cpp +++ b/Source/Particles/PhotonParticleContainer.cpp @@ -127,8 +127,7 @@ PhotonParticleContainer::PushPX (WarpXParIter& pti, #endif auto copyAttribs = CopyParticleAttribs(pti, tmp_particle_data, offset); - int do_copy = (WarpX::do_back_transformed_diagnostics && - do_back_transformed_diagnostics && a_dt_type!=DtType::SecondHalf); + int do_copy = (m_do_back_transformed_particles && (a_dt_type!=DtType::SecondHalf) ); const auto GetPosition = GetParticlePosition(pti, offset); auto SetPosition = SetParticlePosition(pti, offset); diff --git a/Source/Particles/PhysicalParticleContainer.H b/Source/Particles/PhysicalParticleContainer.H index 16365810274..2f52688fbcb 100644 --- a/Source/Particles/PhysicalParticleContainer.H +++ b/Source/Particles/PhysicalParticleContainer.H @@ -240,12 +240,6 @@ public: const int n_external_attr_int, const amrex::RandomEngine& engine) override final; - virtual void GetParticleSlice ( - const int direction, const amrex::Real z_old, - const amrex::Real z_new, const amrex::Real t_boost, - const amrex::Real t_lab, const amrex::Real dt, - DiagnosticParticles& diagnostic_particles) final; - /** * \brief Apply NCI Godfrey filter to all components of E and B before gather * \param lev MR level diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index e79d526f1cc..b5ea417c091 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -264,9 +264,6 @@ PhysicalParticleContainer::PhysicalParticleContainer (AmrCore* amr_core, int isp utils::parser::queryWithParser( pp_species_name, "self_fields_max_iters", self_fields_max_iters); pp_species_name.query("self_fields_verbosity", self_fields_verbosity); - // Whether to plot back-transformed (lab-frame) diagnostics - // for this species. - pp_species_name.query("do_back_transformed_diagnostics", do_back_transformed_diagnostics); pp_species_name.query("do_field_ionization", do_field_ionization); @@ -1862,8 +1859,7 @@ PhysicalParticleContainer::Evolve (int lev, bool has_buffer = cEx || cjx; - if ( (WarpX::do_back_transformed_diagnostics && do_back_transformed_diagnostics) || - (m_do_back_transformed_particles) ) + if (m_do_back_transformed_particles) { for (WarpXParIter pti(*this, lev); pti.isValid(); ++pti) { @@ -2478,191 +2474,6 @@ PhysicalParticleContainer::PushP (int lev, Real dt, } } -void -PhysicalParticleContainer::GetParticleSlice ( - const int direction, const Real z_old, - const Real z_new, const Real t_boost, - const Real t_lab, const Real dt, - DiagnosticParticles& diagnostic_particles) -{ - WARPX_PROFILE("PhysicalParticleContainer::GetParticleSlice()"); - - // Assume that the boost in the positive z direction. -#if defined(WARPX_DIM_1D_Z) - AMREX_ALWAYS_ASSERT(direction == 0); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - AMREX_ALWAYS_ASSERT(direction == 1); -#else - AMREX_ALWAYS_ASSERT(direction == 2); -#endif - - // Note the the slice should always move in the negative boost direction. - AMREX_ALWAYS_ASSERT(z_new < z_old); - - AMREX_ALWAYS_ASSERT(do_back_transformed_diagnostics == 1); - - const int nlevs = std::max(0, finestLevel()+1); - diagnostic_particles.resize(finestLevel()+1); - - for (int lev = 0; lev < nlevs; ++lev) { - // first we touch each map entry in serial - for (WarpXParIter pti(*this, lev); pti.isValid(); ++pti) - { - auto index = std::make_pair(pti.index(), pti.LocalTileIndex()); - diagnostic_particles[lev][index]; - } - -#ifdef AMREX_USE_OMP -#pragma omp parallel -#endif - { - // Temporary arrays to store copy_flag and copy_index - // for particles that cross the z-slice - // These arrays are defined before the WarpXParIter to prevent them - // from going out of scope after each iteration, while the kernels - // may still need access to them. - // Note that the destructor for WarpXParIter is synchronized. - amrex::Gpu::DeviceVector FlagForPartCopy; - amrex::Gpu::DeviceVector IndexForPartCopy; - for (WarpXParIter pti(*this, lev); pti.isValid(); ++pti) - { - auto index = std::make_pair(pti.index(), pti.LocalTileIndex()); - - const auto GetPosition = GetParticlePosition(pti); - - auto& attribs = pti.GetAttribs(); - ParticleReal* const AMREX_RESTRICT wpnew = attribs[PIdx::w].dataPtr(); - ParticleReal* const AMREX_RESTRICT uxpnew = attribs[PIdx::ux].dataPtr(); - ParticleReal* const AMREX_RESTRICT uypnew = attribs[PIdx::uy].dataPtr(); - ParticleReal* const AMREX_RESTRICT uzpnew = attribs[PIdx::uz].dataPtr(); - - ParticleReal* const AMREX_RESTRICT - xpold = tmp_particle_data[lev][index][TmpIdx::xold].dataPtr(); - ParticleReal* const AMREX_RESTRICT - ypold = tmp_particle_data[lev][index][TmpIdx::yold].dataPtr(); - ParticleReal* const AMREX_RESTRICT - zpold = tmp_particle_data[lev][index][TmpIdx::zold].dataPtr(); - ParticleReal* const AMREX_RESTRICT - uxpold = tmp_particle_data[lev][index][TmpIdx::uxold].dataPtr(); - ParticleReal* const AMREX_RESTRICT - uypold = tmp_particle_data[lev][index][TmpIdx::uyold].dataPtr(); - ParticleReal* const AMREX_RESTRICT - uzpold = tmp_particle_data[lev][index][TmpIdx::uzold].dataPtr(); - - const long np = pti.numParticles(); - - Real uzfrm = -WarpX::gamma_boost*WarpX::beta_boost*PhysConst::c; - Real inv_c2 = 1.0_rt/PhysConst::c/PhysConst::c; - - FlagForPartCopy.resize(np); - IndexForPartCopy.resize(np); - - int* const AMREX_RESTRICT Flag = FlagForPartCopy.dataPtr(); - int* const AMREX_RESTRICT IndexLocation = IndexForPartCopy.dataPtr(); - - //Flag particles that need to be copied if they cross the z_slice - amrex::ParallelFor(np, - [=] AMREX_GPU_DEVICE(int i) - { - ParticleReal xp, yp, zp; - GetPosition(i, xp, yp, zp); - Flag[i] = 0; - if ( (((zp >= z_new) && (zpold[i] <= z_old)) || - ((zp <= z_new) && (zpold[i] >= z_old))) ) - { - Flag[i] = 1; - } - }); - - // exclusive scan to obtain location indices using flag values - // These location indices are used to copy data from - // src to dst when the copy-flag is set to 1. - const int total_partdiag_size = amrex::Scan::ExclusiveSum(np,Flag,IndexLocation); - - // allocate array size for diagnostic particle array - diagnostic_particles[lev][index].resize(total_partdiag_size); - - amrex::Real gammaboost = WarpX::gamma_boost; - amrex::Real betaboost = WarpX::beta_boost; - amrex::Real Phys_c = PhysConst::c; - - ParticleReal* const AMREX_RESTRICT diag_wp = - diagnostic_particles[lev][index].GetRealData(DiagIdx::w).data(); - ParticleReal* const AMREX_RESTRICT diag_xp = - diagnostic_particles[lev][index].GetRealData(DiagIdx::x).data(); - ParticleReal* const AMREX_RESTRICT diag_yp = - diagnostic_particles[lev][index].GetRealData(DiagIdx::y).data(); - ParticleReal* const AMREX_RESTRICT diag_zp = - diagnostic_particles[lev][index].GetRealData(DiagIdx::z).data(); - ParticleReal* const AMREX_RESTRICT diag_uxp = - diagnostic_particles[lev][index].GetRealData(DiagIdx::ux).data(); - ParticleReal* const AMREX_RESTRICT diag_uyp = - diagnostic_particles[lev][index].GetRealData(DiagIdx::uy).data(); - ParticleReal* const AMREX_RESTRICT diag_uzp = - diagnostic_particles[lev][index].GetRealData(DiagIdx::uz).data(); - - // Copy particle data to diagnostic particle array on the GPU - // using flag and index values - amrex::ParallelFor(np, - [=] AMREX_GPU_DEVICE(int i) - { - ParticleReal xp_new, yp_new, zp_new; - GetPosition(i, xp_new, yp_new, zp_new); - if (Flag[i] == 1) - { - // Lorentz Transform particles to lab-frame - const Real gamma_new_p = std::sqrt(1.0_rt + inv_c2* - (uxpnew[i]*uxpnew[i] - + uypnew[i]*uypnew[i] - + uzpnew[i]*uzpnew[i])); - const Real t_new_p = gammaboost*t_boost - uzfrm*zp_new*inv_c2; - const Real z_new_p = gammaboost*(zp_new + betaboost*Phys_c*t_boost); - const Real uz_new_p = gammaboost*uzpnew[i] - gamma_new_p*uzfrm; - - const Real gamma_old_p = std::sqrt(1.0_rt + inv_c2* - (uxpold[i]*uxpold[i] - + uypold[i]*uypold[i] - + uzpold[i]*uzpold[i])); - const Real t_old_p = gammaboost*(t_boost - dt) - - uzfrm*zpold[i]*inv_c2; - const Real z_old_p = gammaboost*(zpold[i] - + betaboost*Phys_c*(t_boost-dt)); - const Real uz_old_p = gammaboost*uzpold[i] - - gamma_old_p*uzfrm; - - // interpolate in time to t_lab - const Real weight_old = (t_new_p - t_lab) - / (t_new_p - t_old_p); - const Real weight_new = (t_lab - t_old_p) - / (t_new_p - t_old_p); - - const Real xp = xpold[i]*weight_old + xp_new*weight_new; - const Real yp = ypold[i]*weight_old + yp_new*weight_new; - const Real zp = z_old_p*weight_old + z_new_p*weight_new; - - const Real uxp = uxpold[i]*weight_old - + uxpnew[i]*weight_new; - const Real uyp = uypold[i]*weight_old - + uypnew[i]*weight_new; - const Real uzp = uz_old_p*weight_old - + uz_new_p *weight_new; - - const int loc = IndexLocation[i]; - diag_wp[loc] = wpnew[i]; - diag_xp[loc] = xp; - diag_yp[loc] = yp; - diag_zp[loc] = zp; - diag_uxp[loc] = uxp; - diag_uyp[loc] = uyp; - diag_uzp[loc] = uzp; - } - }); - Gpu::synchronize(); // because of FlagForPartCopy & IndexForPartCopy - } - } - } -} - /* \brief Inject particles during the simulation * \param injection_box: domain where particles should be injected. */ @@ -2766,10 +2577,7 @@ PhysicalParticleContainer::PushPX (WarpXParIter& pti, ParticleReal* const AMREX_RESTRICT uy = attribs[PIdx::uy].dataPtr() + offset; ParticleReal* const AMREX_RESTRICT uz = attribs[PIdx::uz].dataPtr() + offset; - int do_copy = ( (WarpX::do_back_transformed_diagnostics - && do_back_transformed_diagnostics - && a_dt_type!=DtType::SecondHalf) - || (m_do_back_transformed_particles && (a_dt_type!=DtType::SecondHalf)) ); + int do_copy = (m_do_back_transformed_particles && (a_dt_type!=DtType::SecondHalf) ); CopyParticleAttribs copyAttribs; if (do_copy) { copyAttribs = CopyParticleAttribs(pti, tmp_particle_data, offset); diff --git a/Source/Particles/WarpXParticleContainer.H b/Source/Particles/WarpXParticleContainer.H index 6f0aaf8e827..cb5542025e9 100644 --- a/Source/Particles/WarpXParticleContainer.H +++ b/Source/Particles/WarpXParticleContainer.H @@ -137,11 +137,6 @@ public: virtual void PostRestart () = 0; - virtual void GetParticleSlice(const int /*direction*/, const amrex::Real /*z_old*/, - const amrex::Real /*z_new*/, const amrex::Real /*t_boost*/, - const amrex::Real /*t_lab*/, const amrex::Real /*dt*/, - DiagnosticParticles& /*diagnostic_particles*/) {} - void AllocData (); /** @@ -315,7 +310,6 @@ public: // split along diagonals (0) or axes (1) int split_type = 0; - int doBackTransformedDiagnostics () const { return do_back_transformed_diagnostics; } /** Whether back-transformed diagnostics need to be performed for a particular species. * * \param[in] do_back_transformed_particles The parameter to set if back-transformed particles are set to true/false @@ -408,7 +402,6 @@ protected: int do_resampling = 0; - int do_back_transformed_diagnostics = 1; /** Whether back-transformed diagnostics is turned on for the corresponding species.*/ bool m_do_back_transformed_particles = false; diff --git a/Source/WarpX.H b/Source/WarpX.H index 1cad22d40a4..ecedba62b29 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -13,7 +13,6 @@ #define WARPX_H_ #include "BoundaryConditions/PML_fwd.H" -#include "Diagnostics/BackTransformedDiagnostic_fwd.H" #include "Diagnostics/MultiDiagnostics_fwd.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags_fwd.H" #include "EmbeddedBoundary/WarpXFaceInfoBox_fwd.H" @@ -273,22 +272,6 @@ public: //! If true, the initial conditions from random number generators are serialized (useful for reproducible testing with OpenMP) static bool serialize_initial_conditions; - //! If true, then lab-frame data will be computed for boosted frame simulations - //! with customized output format - static bool do_back_transformed_diagnostics; - //! Name of the back-transformed diagnostics output directory - static std::string lab_data_directory; - //! Number of back-tranformed snapshots in the lab-frame - static int num_snapshots_lab; - //! Time interval in lab-frame between the back-transformed snapshots - static amrex::Real dt_snapshots_lab; - //! If true, then lab-frame data will be computed for the fields and flushed out - //! in customized format - static bool do_back_transformed_fields; - //! If true, then lab-frame data will be computed for the particles and flushed out - //! in customized format - static bool do_back_transformed_particles; - //! Lorentz factor of the boosted frame in which a boosted-frame simulation is run static amrex::Real gamma_boost; //! Beta value corresponding to the Lorentz factor of the boosted frame of the simulation @@ -758,12 +741,6 @@ public: static amrex::Real moving_window_v; static bool fft_do_time_averaging; - // slice generation // - static int num_slice_snapshots_lab; - static amrex::Real dt_slice_snapshots_lab; - static amrex::Real particle_slice_width_lab; - amrex::RealBox getSliceRealBox() const {return slice_realbox;} - // these should be private, but can't due to Cuda limitations static void ComputeDivB (amrex::MultiFab& divB, int const dcomp, const std::array& B, @@ -1077,8 +1054,6 @@ private: /** Check the requested resources and write performance hints */ void PerformanceHints (); - std::unique_ptr GetCellCenteredData(); - void BuildBufferMasks (); void BuildBufferMasksInBox ( const amrex::Box tbx, amrex::IArrayBox &buffer_mask, const amrex::IArrayBox &guard_mask, const int ng ); @@ -1151,9 +1126,6 @@ private: std::unique_ptr mypc; std::unique_ptr multi_diags; - // Boosted Frame Diagnostics - std::unique_ptr myBFD; - // // Fields: First array for level, second for direction // diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 2e0fe7c3e2a..9ee07811d5a 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -12,7 +12,6 @@ #include "WarpX.H" #include "BoundaryConditions/PML.H" -#include "Diagnostics/BackTransformedDiagnostic.H" #include "Diagnostics/MultiDiagnostics.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "EmbeddedBoundary/WarpXFaceInfoBox.H" @@ -169,17 +168,6 @@ int WarpX::num_mirrors = 0; utils::parser::IntervalsParser WarpX::sort_intervals; amrex::IntVect WarpX::sort_bin_size(AMREX_D_DECL(1,1,1)); -bool WarpX::do_back_transformed_diagnostics = false; -std::string WarpX::lab_data_directory = "lab_frame_data"; -int WarpX::num_snapshots_lab = std::numeric_limits::lowest(); -Real WarpX::dt_snapshots_lab = std::numeric_limits::lowest(); -bool WarpX::do_back_transformed_fields = true; -bool WarpX::do_back_transformed_particles = true; - -int WarpX::num_slice_snapshots_lab = 0; -Real WarpX::dt_slice_snapshots_lab; -Real WarpX::particle_slice_width_lab = 0.0_rt; - bool WarpX::do_dynamic_scheduling = true; int WarpX::electrostatic_solver_id; @@ -270,7 +258,6 @@ WarpX::WarpX () current_injection_position = geom[0].ProbLo(moving_window_dir); } } - do_back_transformed_particles = mypc->doBackTransformedDiagnostics(); // Particle Boundary Buffer (i.e., scraped particles on boundary) m_particle_boundary_buffer = std::make_unique(); @@ -644,49 +631,6 @@ WarpX::ReadParameters () moving_window_v *= PhysConst::c; } - pp_warpx.query("do_back_transformed_diagnostics", do_back_transformed_diagnostics); - if (do_back_transformed_diagnostics) { - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(gamma_boost > 1.0, - "gamma_boost must be > 1 to use the boosted frame diagnostic."); - - pp_warpx.query("lab_data_directory", lab_data_directory); - - std::string s; - pp_warpx.get("boost_direction", s); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( (s == "z" || s == "Z"), - "The boosted frame diagnostic currently only works if the boost is in the z direction."); - - utils::parser::queryWithParser( - pp_warpx, "num_snapshots_lab", num_snapshots_lab); - - // Read either dz_snapshots_lab or dt_snapshots_lab - Real dz_snapshots_lab = 0; - const bool dt_snapshots_specified = - utils::parser::queryWithParser(pp_warpx, "dt_snapshots_lab", dt_snapshots_lab); - const bool dz_snapshots_specified = - utils::parser::queryWithParser(pp_warpx, "dz_snapshots_lab", dz_snapshots_lab); - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - dt_snapshots_specified || dz_snapshots_specified, - "When using back-transformed diagnostics, user should specify either dz_snapshots_lab or dt_snapshots_lab."); - - if (dz_snapshots_specified){ - dt_snapshots_lab = dz_snapshots_lab/PhysConst::c; - } - - utils::parser::getWithParser(pp_warpx, "gamma_boost", gamma_boost); - - pp_warpx.query("do_back_transformed_fields", do_back_transformed_fields); - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(do_moving_window, - "The moving window should be on if using the boosted frame diagnostic."); - - pp_warpx.get("moving_window_dir", s); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( (s == "z" || s == "Z"), - "The boosted frame diagnostic currently only works if the moving window is in the z direction."); - } - electrostatic_solver_id = GetAlgorithmInteger(pp_warpx, "do_electrostatic"); // if an electrostatic solver is used, set the Maxwell solver to None if (electrostatic_solver_id != ElectrostaticSolverAlgo::None) { @@ -1451,19 +1395,6 @@ WarpX::ReadParameters () slice_cr_ratio[idim] = slice_crse_ratio[idim]; } } - - if (do_back_transformed_diagnostics) { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(gamma_boost > 1.0, - "gamma_boost must be > 1 to use the boost frame diagnostic"); - utils::parser::queryWithParser( - pp_slice, "num_slice_snapshots_lab", num_slice_snapshots_lab); - if (num_slice_snapshots_lab > 0) { - utils::parser::getWithParser( - pp_slice, "dt_slice_snapshots_lab", dt_slice_snapshots_lab ); - utils::parser::getWithParser( - pp_slice, "particle_slice_width_lab",particle_slice_width_lab); - } - } } } @@ -1571,6 +1502,68 @@ WarpX::BackwardCompatibility () "Please use the renamed option warpx.serialize_initial_conditions instead." ); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !pp_warpx.query("do_back_transformed_diagnostics", backward_int), + "Legacy back-transformed diagnostics are not supported anymore. " + "Please use the new syntax for back-transformed diagnostics, see documentation." + ); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !pp_warpx.query("lab_data_directory", backward_str), + "Legacy back-transformed diagnostics are not supported anymore. " + "Please use the new syntax for back-transformed diagnostics, see documentation." + ); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !pp_warpx.query("num_snapshots_lab", backward_int), + "Legacy back-transformed diagnostics are not supported anymore. " + "Please use the new syntax for back-transformed diagnostics, see documentation." + ); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !pp_warpx.query("dt_snapshots_lab", backward_Real), + "Legacy back-transformed diagnostics are not supported anymore. " + "Please use the new syntax for back-transformed diagnostics, see documentation." + ); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !pp_warpx.query("dz_snapshots_lab", backward_Real), + "Legacy back-transformed diagnostics are not supported anymore. " + "Please use the new syntax for back-transformed diagnostics, see documentation." + ); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !pp_warpx.query("do_back_transformed_fields", backward_int), + "Legacy back-transformed diagnostics are not supported anymore. " + "Please use the new syntax for back-transformed diagnostics, see documentation." + ); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !pp_warpx.query("buffer_size", backward_int), + "Legacy back-transformed diagnostics are not supported anymore. " + "Please use the new syntax for back-transformed diagnostics, see documentation." + ); + + ParmParse pp_slice("slice"); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !pp_slice.query("num_slice_snapshots_lab", backward_int), + "Legacy back-transformed diagnostics are not supported anymore. " + "Please use the new syntax for back-transformed diagnostics, see documentation." + ); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !pp_slice.query("dt_slice_snapshots_lab", backward_Real), + "Legacy back-transformed diagnostics are not supported anymore. " + "Please use the new syntax for back-transformed diagnostics, see documentation." + ); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !pp_slice.query("particle_slice_width_lab", backward_Real), + "Legacy back-transformed diagnostics are not supported anymore. " + "Please use the new syntax for back-transformed diagnostics, see documentation." + ); + ParmParse pp_interpolation("interpolation"); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( !pp_interpolation.query("nox", backward_int) && diff --git a/Tools/LibEnsemble/sim/inputs b/Tools/LibEnsemble/sim/inputs index 11e9bb1e78f..03e9968e5b0 100644 --- a/Tools/LibEnsemble/sim/inputs +++ b/Tools/LibEnsemble/sim/inputs @@ -48,10 +48,6 @@ warpx.moving_window_v = 1.0 # in units of the speed of light ################################# warpx.gamma_boost = 30.0 warpx.boost_direction = z -warpx.do_back_transformed_diagnostics=0 # 1 -warpx.do_back_transformed_fields=0 -warpx.num_snapshots_lab=20 -warpx.dt_snapshots_lab=1.6678204759907604e-10 ################################# ############ PLASMA ############# @@ -77,7 +73,6 @@ electrons.predefined_profile_name = "parabolic_channel" # predefined_profile_params = z_start ramp_up plateau ramp_down rc n0 electrons.predefined_profile_params = 0.0 .02 .297 .003 40.e-6 1.7e23 electrons.do_continuous_injection = 1 -electrons.do_back_transformed_diagnostics=0 ions.charge = q_e ions.mass = m_p @@ -95,7 +90,6 @@ ions.predefined_profile_name = "parabolic_channel" # predefined_profile_params = z_start ramp_up plateau ramp_down rc n0 ions.predefined_profile_params = 0.0 .02 .297 .003 40.e-6 1.7e23 ions.do_continuous_injection = 1 -ions.do_back_transformed_diagnostics=0 electrons2.charge = -q_e electrons2.mass = m_e @@ -113,7 +107,6 @@ electrons2.predefined_profile_name = "parabolic_channel" # predefined_profile_params = z_start ramp_up plateau ramp_down rc n0 electrons2.predefined_profile_params = 0.3485 .02 .297 .003 40.e-6 1.7e23 electrons2.do_continuous_injection = 1 -electrons2.do_back_transformed_diagnostics=0 ions2.charge = q_e ions2.mass = m_p @@ -131,7 +124,6 @@ ions2.predefined_profile_name = "parabolic_channel" # predefined_profile_params = z_start ramp_up plateau ramp_down rc n0 ions2.predefined_profile_params = 0.3485 .02 .297 .003 40.e-6 1.7e23 ions2.do_continuous_injection = 1 -ions2.do_back_transformed_diagnostics=0 beam.charge = -q_e beam.mass = m_e @@ -156,7 +148,6 @@ beam.uy_th = 0. beam.uz_th = 39.138943248532286 beam.zinject_plane = 0.02 beam.rigid_advance = true -beam.do_back_transformed_diagnostics=1 ################################# ######### Lens Mirror ########### diff --git a/Tools/PerformanceTests/automated_test_4_labdiags_2ppc b/Tools/PerformanceTests/automated_test_4_labdiags_2ppc index ecfd42da168..f49d92acf26 100644 --- a/Tools/PerformanceTests/automated_test_4_labdiags_2ppc +++ b/Tools/PerformanceTests/automated_test_4_labdiags_2ppc @@ -34,11 +34,6 @@ warpx.moving_window_v = 1.0 # in units of the speed of light warpx.gamma_boost = 15. warpx.boost_direction = z -# Diagnostics -warpx.do_back_transformed_diagnostics = 1 -warpx.num_snapshots_lab = 20 -warpx.dt_snapshots_lab = 7.0e-14 - # Species particles.species_names = electrons ions From c69297d58a75735d7995c27c1ba95a708fe9bc07 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 9 Nov 2022 12:36:41 -0800 Subject: [PATCH 0148/1346] 2D/RZ Embedded Boundaries Bug Fix (#3510) * Run embedded_boundary_cube_2d in debug mode * Fix bug * Use `lbound`/`ubound` instead of `begin`/`end` * Run embedded_boundary_cube_2d in release mode --- Source/Initialization/WarpXInitData.cpp | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 3a5e9ed24c6..ab421f10658 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -933,6 +933,7 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect(); amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); amrex::IntVect z_nodal_flag = mfz->ixType().toIntVect(); + for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) { const amrex::Box& tbx = mfi.tilebox( x_nodal_flag, mfx->nGrowVect() ); @@ -951,6 +952,13 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( amrex::Array4 const& Sy = face_areas[1]->array(mfi); amrex::Array4 const& Sz = face_areas[2]->array(mfi); +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + const amrex::Dim3 lx_lo = amrex::lbound(lx); + const amrex::Dim3 lx_hi = amrex::ubound(lx); + const amrex::Dim3 lz_lo = amrex::lbound(lz); + const amrex::Dim3 lz_hi = amrex::ubound(lz); +#endif + #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) amrex::ignore_unused(ly, Sx, Sz); #elif defined(WARPX_DIM_1D_Z) @@ -1001,7 +1009,10 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( if((field=='E' and ly(i, j, k)<=0) or (field=='B' and Sy(i, j, k)<=0)) return; #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In XZ and RZ Ey is associated with a mesh node, so we need to check if the mesh node is covered - if((field=='E' and (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j, k)<=0 || lz(i, j-1, k)<=0)) or + if((field=='E' and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 + || lx(std::max(i-1, lx_lo.x), std::min(j , lx_hi.y), k)<=0 + || lz(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)<=0 + || lz(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)<=0)) or (field=='B' and Sy(i,j,k)<=0)) return; #endif #endif From 3dfe843bb74b64a6fb3a3ec893cf7c058f062b65 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Thu, 10 Nov 2022 11:00:32 -0800 Subject: [PATCH 0149/1346] Vay Deposition: Filter D, Exchange Guard Cells of J (#3388) * Vay Deposition: Filter D, Exchange Guard Cells of J * Improve Inline Comment --- Source/Evolve/WarpXEvolve.cpp | 7 +++++++ Source/FieldSolver/WarpXPushFieldsEM.cpp | 9 +++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 42042b7c726..5c3883007bd 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -501,6 +501,13 @@ void WarpX::SyncCurrentAndRho () SyncCurrent(current_fp, current_cp); SyncRho(); } + + if (current_deposition_algo == CurrentDepositionAlgo::Vay) + { + // TODO This works only without mesh refinement + const int lev = 0; + if (use_filter) ApplyFilterJ(current_fp_vay, lev); + } } } else // FDTD diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index a3a8829628f..3d68e8e52ed 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -724,8 +724,13 @@ WarpX::PushPSATD () PSATDBackwardTransformJ(current_fp, current_cp); PSATDSubtractCurrentPartialSumsAvg(); - // Synchronize J and rho (if used) - SyncCurrent(current_fp, current_cp); + // Synchronize J and rho (if used). + // Here we call SumBoundaryJ instead of SyncCurrent, because + // filtering has been already applied to D in OneStep_nosub, + // by calling SyncCurrentAndRho (see Evolve/WarpXEvolve.cpp). + // TODO This works only without mesh refinement + const int lev = 0; + SumBoundaryJ(current_fp, lev, Geom(lev).periodicity()); SyncRho(); } From d2ea078debfda27483fedcec9454e0b87b24861d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ne=C3=AFl=20Zaim?= <49716072+NeilZaim@users.noreply.github.com> Date: Thu, 10 Nov 2022 20:39:09 +0100 Subject: [PATCH 0150/1346] Use makeParser function for laser field parsing option (#3517) --- Source/Laser/LaserProfiles.H | 5 ---- .../LaserProfileFieldFunction.cpp | 27 +++---------------- .../LaserProfileFromTXYEFile.cpp | 1 - .../LaserProfileGaussian.cpp | 1 - .../LaserProfilesImpl/LaserProfileHarris.cpp | 1 - Source/Particles/LaserParticleContainer.cpp | 2 +- 6 files changed, 4 insertions(+), 33 deletions(-) diff --git a/Source/Laser/LaserProfiles.H b/Source/Laser/LaserProfiles.H index 9677bb9c2aa..8555b518716 100644 --- a/Source/Laser/LaserProfiles.H +++ b/Source/Laser/LaserProfiles.H @@ -64,7 +64,6 @@ public: virtual void init ( const amrex::ParmParse& ppl, - const amrex::ParmParse& ppc, CommonLaserParameters params) = 0; /** Update Laser Profile @@ -109,7 +108,6 @@ public: void init ( const amrex::ParmParse& ppl, - const amrex::ParmParse& ppc, CommonLaserParameters params) override final; //No update needed @@ -152,7 +150,6 @@ public: void init ( const amrex::ParmParse& ppl, - const amrex::ParmParse& ppc, CommonLaserParameters params) override final; //No update needed @@ -187,7 +184,6 @@ public: void init ( const amrex::ParmParse& ppl, - const amrex::ParmParse& ppc, CommonLaserParameters params) override final; //No update needed @@ -223,7 +219,6 @@ public: void init ( const amrex::ParmParse& ppl, - const amrex::ParmParse& ppc, CommonLaserParameters params) override final; /** \brief Reads new field data chunk from file if needed diff --git a/Source/Laser/LaserProfilesImpl/LaserProfileFieldFunction.cpp b/Source/Laser/LaserProfilesImpl/LaserProfileFieldFunction.cpp index bc0b4381df8..876a56d537e 100644 --- a/Source/Laser/LaserProfilesImpl/LaserProfileFieldFunction.cpp +++ b/Source/Laser/LaserProfilesImpl/LaserProfileFieldFunction.cpp @@ -27,33 +27,12 @@ using namespace amrex; void WarpXLaserProfiles::FieldFunctionLaserProfile::init ( const amrex::ParmParse& ppl, - const amrex::ParmParse& ppc, CommonLaserParameters /*params*/) { // Parse the properties of the parse_field_function profile - ppl.get("field_function(X,Y,t)", m_params.field_function); - m_parser.define(m_params.field_function); - m_parser.registerVariables({"X","Y","t"}); - - std::set symbols = m_parser.symbols(); - symbols.erase("X"); - symbols.erase("Y"); - symbols.erase("t"); // after removing variables, we are left with constants - for (auto it = symbols.begin(); it != symbols.end(); ) { - Real v; - if (utils::parser::queryWithParser(ppc, it->c_str(), v)) { - m_parser.setConstant(*it, v); - it = symbols.erase(it); - } else { - ++it; - } - } - - std::stringstream ss; - for (auto const& s : symbols) ss << " " << s; - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(symbols.empty(), - "Laser Profile: Unknown symbols " + ss.str()); - + utils::parser::Store_parserString( + ppl, "field_function(X,Y,t)", m_params.field_function); + m_parser = utils::parser::makeParser(m_params.field_function,{"X","Y","t"}); } void diff --git a/Source/Laser/LaserProfilesImpl/LaserProfileFromTXYEFile.cpp b/Source/Laser/LaserProfilesImpl/LaserProfileFromTXYEFile.cpp index 376ec949504..2ebd7fac8a2 100644 --- a/Source/Laser/LaserProfilesImpl/LaserProfileFromTXYEFile.cpp +++ b/Source/Laser/LaserProfilesImpl/LaserProfileFromTXYEFile.cpp @@ -45,7 +45,6 @@ using namespace amrex; void WarpXLaserProfiles::FromTXYEFileLaserProfile::init ( const amrex::ParmParse& ppl, - const amrex::ParmParse& /* ppc */, CommonLaserParameters params) { if (!std::numeric_limits< double >::is_iec559) diff --git a/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp b/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp index c1dca774a11..8b1ab21666e 100644 --- a/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp +++ b/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp @@ -32,7 +32,6 @@ using namespace amrex; void WarpXLaserProfiles::GaussianLaserProfile::init ( const amrex::ParmParse& ppl, - const amrex::ParmParse& /* ppc */, CommonLaserParameters params) { //Copy common params diff --git a/Source/Laser/LaserProfilesImpl/LaserProfileHarris.cpp b/Source/Laser/LaserProfilesImpl/LaserProfileHarris.cpp index 2fe1a2341dc..18bfc123d29 100644 --- a/Source/Laser/LaserProfilesImpl/LaserProfileHarris.cpp +++ b/Source/Laser/LaserProfilesImpl/LaserProfileHarris.cpp @@ -22,7 +22,6 @@ using namespace amrex; void WarpXLaserProfiles::HarrisLaserProfile::init ( const amrex::ParmParse& ppl, - const amrex::ParmParse& /* ppc */, CommonLaserParameters params) { // Parse the properties of the Harris profile diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index 001fc4631c9..a0cafadcd2c 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -249,7 +249,7 @@ LaserParticleContainer::LaserParticleContainer (AmrCore* amr_core, int ispecies, common_params.e_max = m_e_max; common_params.p_X = m_p_X; common_params.nvec = m_nvec; - m_up_laser_profile->init(pp_laser_name, ParmParse{"my_constants"}, common_params); + m_up_laser_profile->init(pp_laser_name, common_params); } /* \brief Check if laser particles enter the box, and inject if necessary. From 3afb1f6488ded17912fa80f389f0c6f0f8a62b2a Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 11 Nov 2022 21:27:59 -0500 Subject: [PATCH 0151/1346] CI: unbreak macOS (2to3) (#3520) brew shipped a breaking update again. --- .github/workflows/macos.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index b50cfc8647e..a7a12968b49 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -36,7 +36,7 @@ jobs: run: | brew --cache set +e - rm -rf /usr/local/bin/2to3 + rm -rf /usr/local/bin/2to3 /usr/local/bin/2to3-3.11 /usr/local/bin/idle3.11 brew unlink gcc brew update brew install ccache From 5d635b996feff9fac6065930df7b87b83cceb311 Mon Sep 17 00:00:00 2001 From: David Grote Date: Sat, 12 Nov 2022 08:54:50 -0800 Subject: [PATCH 0152/1346] Fix warnings with ceil in BTD code (#3518) * Added static_cast to some real to int conversions * Fixed typo --- Source/Diagnostics/BTDiagnostics.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 510c1b94692..90b00a24a08 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -135,7 +135,7 @@ void BTDiagnostics::DerivedInitData () const amrex::Real dz_snapshot_grid = dz_lab(dt_boosted_frame, ref_ratio); // Need enough buffers so the snapshot length is longer than the lab frame length // num_buffers * m_buffer_size * dz_snapshot_grid >= Lz - const int num_buffers = ceil(Lz_lab / m_buffer_size / dz_snapshot_grid); + const int num_buffers = static_cast(std::ceil(Lz_lab / m_buffer_size / dz_snapshot_grid)); const int final_snapshot_iteration = m_intervals.GetFinalIteration(); // the final snapshot starts filling when the @@ -156,7 +156,7 @@ void BTDiagnostics::DerivedInitData () // if j = final snapshot starting step, then we want to solve // j dt_boosted_frame >= t_intersect_boost = i * dt_snapshot / gamma / (1+beta) // j >= i / gamma / (1+beta) * dt_snapshot / dt_boosted_frame - const int final_snapshot_starting_step = ceil(final_snapshot_iteration / warpx.gamma_boost / (1._rt+warpx.beta_boost) * m_dt_snapshots_lab / dt_boosted_frame); + const int final_snapshot_starting_step = static_cast(std::ceil(final_snapshot_iteration / warpx.gamma_boost / (1._rt+warpx.beta_boost) * m_dt_snapshots_lab / dt_boosted_frame)); const int final_snapshot_fill_iteration = final_snapshot_starting_step + num_buffers * m_buffer_size - 1; if (final_snapshot_fill_iteration > warpx.maxStep()) { std::string warn_string = From c02f6bd3c6e6ef8253c4112358019eae8acb7342 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Mon, 14 Nov 2022 11:43:04 -0800 Subject: [PATCH 0153/1346] Flux injection: move particle only after performing checks (#3519) * Flux injection: move particle only after performing checks * Correct cylindrical to cartesian conversion * Update benchmarks --- .../benchmarks_json/FluxInjection.json | 18 +++---- .../Particles/PhysicalParticleContainer.cpp | 54 +++++++++---------- 2 files changed, 34 insertions(+), 38 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/FluxInjection.json b/Regression/Checksum/benchmarks_json/FluxInjection.json index 5d95fbe2b10..5553753d24d 100644 --- a/Regression/Checksum/benchmarks_json/FluxInjection.json +++ b/Regression/Checksum/benchmarks_json/FluxInjection.json @@ -1,14 +1,14 @@ { "electron": { - "particle_momentum_x": 1.7856085576594427e-42, - "particle_momentum_y": 1.7883824545080506e-42, - "particle_momentum_z": 4.565443974152731e-41, - "particle_position_x": 6992.343661648445, - "particle_position_y": 2049.9517671725316, - "particle_theta": 6536.38847902292, - "particle_weight": 3.240227722540627e-08 + "particle_momentum_x": 1.7879471038093652e-42, + "particle_momentum_y": 1.7494821186739744e-42, + "particle_momentum_z": 4.5268277440986243e-41, + "particle_position_x": 6940.335850058893, + "particle_position_y": 2046.2539850460196, + "particle_theta": 6498.1356057858175, + "particle_weight": 3.219739901337792e-08 }, "lev=0": { - "Bz": 2.20886367779576e-47 + "Bz": 2.1952258973082976e-47 } -} +} \ No newline at end of file diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index b5ea417c091..f37ea5ea38d 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -1661,9 +1661,6 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) pu.y *= PhysConst::c; pu.z *= PhysConst::c; - const amrex::Real t_fract = amrex::Random(engine)*dt; - UpdatePosition(ppos.x, ppos.y, ppos.z, pu.x, pu.y, pu.z, t_fract); - #if defined(WARPX_DIM_3D) if (!tile_realbox.contains(XDim3{ppos.x,ppos.y,ppos.z})) { p.id() = -1; @@ -1682,13 +1679,17 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) continue; } #endif - - // Save the x and y values to use in the insideBounds checks. - // This is needed with WARPX_DIM_RZ since x and y are modified. - Real xb = ppos.x; - Real yb = ppos.y; + // Lab-frame simulation + // If the particle is not within the species's + // xmin, xmax, ymin, ymax, zmin, zmax, go to + // the next generated particle. + if (!inj_pos->insideBounds(ppos.x, ppos.y, ppos.z)) { + p.id() = -1; + continue; + } #ifdef WARPX_DIM_RZ + // Conversion from cylindrical to Cartesian coordinates // Replace the x and y, setting an angle theta. // These x and y are used to get the momentum and density Real theta; @@ -1702,8 +1703,9 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) Real const cos_theta = std::cos(theta); Real const sin_theta = std::sin(theta); // Rotate the position - ppos.x = xb*cos_theta; - ppos.y = xb*sin_theta; + amrex::Real radial_position = ppos.x; + ppos.x = radial_position*cos_theta; + ppos.y = radial_position*sin_theta; if (loc_flux_normal_axis != 2) { // Rotate the momentum // This because, when the flux direction is e.g. "r" @@ -1716,19 +1718,7 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) pu.y = sin_theta*ur + cos_theta*ut; } #endif - - // Lab-frame simulation - // If the particle is not within the species's - // xmin, xmax, ymin, ymax, zmin, zmax, go to - // the next generated particle. - - if (!inj_pos->insideBounds(xb, yb, ppos.z)) { - p.id() = -1; - continue; - } - Real dens = inj_rho->getDensity(ppos.x, ppos.y, ppos.z); - // Remove particle if density below threshold if ( dens < density_min ){ p.id() = -1; @@ -1769,11 +1759,11 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) // the radius ; thus, the calculation is finalized here if (loc_flux_normal_axis != 1) { if (radially_weighted) { - weight *= 2._rt*MathConst::pi*xb; + weight *= 2._rt*MathConst::pi*radial_position; } else { // This is not correct since it might shift the particle // out of the local grid - ppos.x = std::sqrt(xb*rmax); + ppos.x = std::sqrt(radial_position*rmax); weight *= dx[0]; } } @@ -1783,15 +1773,21 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) pa[PIdx::uy][ip] = pu.y; pa[PIdx::uz][ip] = pu.z; + // Update particle position by a random `t_fract` + // so as to produce a continuous-looking flow of particles + const amrex::Real t_fract = amrex::Random(engine)*dt; + UpdatePosition(ppos.x, ppos.y, ppos.z, pu.x, pu.y, pu.z, t_fract); + #if defined(WARPX_DIM_3D) p.pos(0) = ppos.x; p.pos(1) = ppos.y; p.pos(2) = ppos.z; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) -#ifdef WARPX_DIM_RZ - pa[PIdx::theta][ip] = theta; -#endif - p.pos(0) = xb; +#elif defined(WARPX_DIM_RZ) + pa[PIdx::theta][ip] = std::atan2(ppos.y, ppos.x); + p.pos(0) = std::sqrt(ppos.x*ppos.x + ppos.y*ppos.y); + p.pos(1) = ppos.z; +#elif defined(WARPX_DIM_XZ) + p.pos(0) = ppos.x; p.pos(1) = ppos.z; #else p.pos(0) = ppos.z; From b43cac5d57d4d1b816577600fbd5d161da41b9f2 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 14 Nov 2022 11:46:13 -0800 Subject: [PATCH 0154/1346] CI: unbreak macOS (#3521) * CI: unbreak macOS (pydoc) * CI: unbreak macOS (python) --- .github/workflows/macos.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index a7a12968b49..22fce7b64d0 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -36,7 +36,7 @@ jobs: run: | brew --cache set +e - rm -rf /usr/local/bin/2to3 /usr/local/bin/2to3-3.11 /usr/local/bin/idle3.11 + rm -rf /usr/local/bin/2to3 /usr/local/bin/2to3-3.11 /usr/local/bin/idle3.11 /usr/local/bin/pydoc3.11 /usr/local/bin/python3.11 /usr/local/bin/python3.11-config brew unlink gcc brew update brew install ccache From 0c30574e08e271533b67dae20015ac15654951c0 Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 14 Nov 2022 13:16:03 -0800 Subject: [PATCH 0155/1346] Centralize the multi fab allocation (#3484) * New Function to Allocate MultiFabs w/ Initial Value * Use New Function in PML Allocations * Added AllocInitMultiFab and multifab_map * Make initial_value argument of AllocInitMultiFab optional * More updates Added initialization for iMultiFabs Added imultifab_map Added initial values for several MFs Updated MF init for EB data * Small clean up of ncomps name * Added init value to rho_cp * Use AllocInitMultiFab in PML_RZ * RemakeMultiFab now calls AllocInitMultiFab * Fixed spelling of AddToMultiFabMap * Removed the initialization of charge_buf * Added commentation * Fix tag names for _aux MFs * Cleaned up code using temporaries in PML * Apply suggestions from code review * Added initial value to AliasInitMultiFab Co-authored-by: Edoardo Zoni Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Source/BoundaryConditions/PML.cpp | 128 ++++------ Source/BoundaryConditions/PML_RZ.cpp | 21 +- Source/Initialization/WarpXInitData.cpp | 43 ---- Source/Parallelization/WarpXRegrid.cpp | 3 +- Source/WarpX.H | 91 +++++++ Source/WarpX.cpp | 302 ++++++++++++++---------- 6 files changed, 326 insertions(+), 262 deletions(-) diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index 5ba9ac2cd0b..c7b773123d2 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -661,45 +661,32 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri const int ncompe = (m_dive_cleaning) ? 3 : 2; const int ncompb = (m_divb_cleaning) ? 3 : 2; - pml_E_fp[0] = std::make_unique(amrex::convert( ba, - WarpX::GetInstance().getEfield_fp(0,0).ixType().toIntVect() ), dm, ncompe, nge ); - pml_E_fp[1] = std::make_unique(amrex::convert( ba, - WarpX::GetInstance().getEfield_fp(0,1).ixType().toIntVect() ), dm, ncompe, nge ); - pml_E_fp[2] = std::make_unique(amrex::convert( ba, - WarpX::GetInstance().getEfield_fp(0,2).ixType().toIntVect() ), dm, ncompe, nge ); - - pml_B_fp[0] = std::make_unique(amrex::convert( ba, - WarpX::GetInstance().getBfield_fp(0,0).ixType().toIntVect() ), dm, ncompb, ngb ); - pml_B_fp[1] = std::make_unique(amrex::convert( ba, - WarpX::GetInstance().getBfield_fp(0,1).ixType().toIntVect() ), dm, ncompb, ngb ); - pml_B_fp[2] = std::make_unique(amrex::convert( ba, - WarpX::GetInstance().getBfield_fp(0,2).ixType().toIntVect() ), dm, ncompb, ngb ); - - pml_E_fp[0]->setVal(0.0); - pml_E_fp[1]->setVal(0.0); - pml_E_fp[2]->setVal(0.0); - pml_B_fp[0]->setVal(0.0); - pml_B_fp[1]->setVal(0.0); - pml_B_fp[2]->setVal(0.0); - - pml_j_fp[0] = std::make_unique(amrex::convert( ba, - WarpX::GetInstance().getcurrent_fp(0,0).ixType().toIntVect() ), dm, 1, ngb ); - pml_j_fp[1] = std::make_unique(amrex::convert( ba, - WarpX::GetInstance().getcurrent_fp(0,1).ixType().toIntVect() ), dm, 1, ngb ); - pml_j_fp[2] = std::make_unique(amrex::convert( ba, - WarpX::GetInstance().getcurrent_fp(0,2).ixType().toIntVect() ), dm, 1, ngb ); - - pml_j_fp[0]->setVal(0.0); - pml_j_fp[1]->setVal(0.0); - pml_j_fp[2]->setVal(0.0); + const amrex::BoxArray ba_Ex = amrex::convert(ba, WarpX::GetInstance().getEfield_fp(0,0).ixType().toIntVect()); + const amrex::BoxArray ba_Ey = amrex::convert(ba, WarpX::GetInstance().getEfield_fp(0,1).ixType().toIntVect()); + const amrex::BoxArray ba_Ez = amrex::convert(ba, WarpX::GetInstance().getEfield_fp(0,2).ixType().toIntVect()); + WarpX::AllocInitMultiFab(pml_E_fp[0], ba_Ex, dm, ncompe, nge, "pml_E_fp[x]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_E_fp[1], ba_Ey, dm, ncompe, nge, "pml_E_fp[y]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_E_fp[2], ba_Ez, dm, ncompe, nge, "pml_E_fp[z]", 0.0_rt); + + const amrex::BoxArray ba_Bx = amrex::convert(ba, WarpX::GetInstance().getBfield_fp(0,0).ixType().toIntVect()); + const amrex::BoxArray ba_By = amrex::convert(ba, WarpX::GetInstance().getBfield_fp(0,1).ixType().toIntVect()); + const amrex::BoxArray ba_Bz = amrex::convert(ba, WarpX::GetInstance().getBfield_fp(0,2).ixType().toIntVect()); + WarpX::AllocInitMultiFab(pml_B_fp[0], ba_Bx, dm, ncompb, ngb, "pml_B_fp[x]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_B_fp[1], ba_By, dm, ncompb, ngb, "pml_B_fp[y]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_B_fp[2], ba_Bz, dm, ncompb, ngb, "pml_B_fp[z]", 0.0_rt); + + const amrex::BoxArray ba_jx = amrex::convert(ba, WarpX::GetInstance().getcurrent_fp(0,0).ixType().toIntVect()); + const amrex::BoxArray ba_jy = amrex::convert(ba, WarpX::GetInstance().getcurrent_fp(0,1).ixType().toIntVect()); + const amrex::BoxArray ba_jz = amrex::convert(ba, WarpX::GetInstance().getcurrent_fp(0,2).ixType().toIntVect()); + WarpX::AllocInitMultiFab(pml_j_fp[0], ba_jx, dm, 1, ngb, "pml_j_fp[x]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_j_fp[1], ba_jy, dm, 1, ngb, "pml_j_fp[y]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_j_fp[2], ba_jz, dm, 1, ngb, "pml_j_fp[z]", 0.0_rt); #ifdef AMREX_USE_EB - pml_edge_lengths[0] = std::make_unique(amrex::convert( ba, - WarpX::GetInstance().getEfield_fp(0,0).ixType().toIntVect() ), dm, WarpX::ncomps, max_guard_EB ); - pml_edge_lengths[1] = std::make_unique(amrex::convert( ba, - WarpX::GetInstance().getEfield_fp(0,1).ixType().toIntVect() ), dm, WarpX::ncomps, max_guard_EB ); - pml_edge_lengths[2] = std::make_unique(amrex::convert( ba, - WarpX::GetInstance().getEfield_fp(0,2).ixType().toIntVect() ), dm, WarpX::ncomps, max_guard_EB ); + const amrex::IntVect max_guard_EB_vect = amrex::IntVect(max_guard_EB); + WarpX::AllocInitMultiFab(pml_edge_lengths[0], ba_Ex, dm, WarpX::ncomps, max_guard_EB_vect, "pml_edge_lengths[x]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_edge_lengths[1], ba_Ey, dm, WarpX::ncomps, max_guard_EB_vect, "pml_edge_lengths[y]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_edge_lengths[2], ba_Ez, dm, WarpX::ncomps, max_guard_EB_vect, "pml_edge_lengths[z]", 0.0_rt); if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee || WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC || @@ -717,9 +704,8 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri if (m_dive_cleaning) { - const amrex::IntVect& F_nodal_flag = amrex::IntVect::TheNodeVector(); - pml_F_fp = std::make_unique(amrex::convert(ba, F_nodal_flag), dm, 3, ngf); - pml_F_fp->setVal(0.0); + const amrex::BoxArray ba_F_nodal = amrex::convert(ba, amrex::IntVect::TheNodeVector()); + WarpX::AllocInitMultiFab(pml_F_fp, ba_F_nodal, dm, 3, ngf, "pml_F_fp", 0.0_rt); } if (m_divb_cleaning) @@ -727,8 +713,8 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri // TODO Shall we define a separate guard cells parameter ngG? const amrex::IntVect& G_nodal_flag = (do_nodal) ? amrex::IntVect::TheNodeVector() : amrex::IntVect::TheCellVector(); - pml_G_fp = std::make_unique(amrex::convert(ba, G_nodal_flag), dm, 3, ngf); - pml_G_fp->setVal(0.0); + const amrex::BoxArray ba_G_nodal = amrex::convert(ba, G_nodal_flag); + WarpX::AllocInitMultiFab(pml_G_fp, ba_G_nodal, dm, 3, ngf, "pml_G_fp", 0.0_rt); } Box single_domain_box = is_single_box_domain ? domain0 : Box(); @@ -806,32 +792,24 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri cdm.define(cba); } - pml_E_cp[0] = std::make_unique(amrex::convert( cba, - WarpX::GetInstance().getEfield_cp(1,0).ixType().toIntVect() ), cdm, ncompe, nge ); - pml_E_cp[1] = std::make_unique(amrex::convert( cba, - WarpX::GetInstance().getEfield_cp(1,1).ixType().toIntVect() ), cdm, ncompe, nge ); - pml_E_cp[2] = std::make_unique(amrex::convert( cba, - WarpX::GetInstance().getEfield_cp(1,2).ixType().toIntVect() ), cdm, ncompe, nge ); - - pml_B_cp[0] = std::make_unique(amrex::convert( cba, - WarpX::GetInstance().getBfield_cp(1,0).ixType().toIntVect() ), cdm, ncompb, ngb ); - pml_B_cp[1] = std::make_unique(amrex::convert( cba, - WarpX::GetInstance().getBfield_cp(1,1).ixType().toIntVect() ), cdm, ncompb, ngb ); - pml_B_cp[2] = std::make_unique(amrex::convert( cba, - WarpX::GetInstance().getBfield_cp(1,2).ixType().toIntVect() ), cdm, ncompb, ngb ); - - pml_E_cp[0]->setVal(0.0); - pml_E_cp[1]->setVal(0.0); - pml_E_cp[2]->setVal(0.0); - pml_B_cp[0]->setVal(0.0); - pml_B_cp[1]->setVal(0.0); - pml_B_cp[2]->setVal(0.0); + const amrex::BoxArray cba_Ex = amrex::convert(cba, WarpX::GetInstance().getEfield_cp(1,0).ixType().toIntVect()); + const amrex::BoxArray cba_Ey = amrex::convert(cba, WarpX::GetInstance().getEfield_cp(1,1).ixType().toIntVect()); + const amrex::BoxArray cba_Ez = amrex::convert(cba, WarpX::GetInstance().getEfield_cp(1,2).ixType().toIntVect()); + WarpX::AllocInitMultiFab(pml_E_cp[0], cba_Ex, cdm, ncompe, nge, "pml_E_cp[x]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_E_cp[1], cba_Ey, cdm, ncompe, nge, "pml_E_cp[y]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_E_cp[2], cba_Ez, cdm, ncompe, nge, "pml_E_cp[z]", 0.0_rt); + + const amrex::BoxArray cba_Bx = amrex::convert(cba, WarpX::GetInstance().getBfield_cp(1,0).ixType().toIntVect()); + const amrex::BoxArray cba_By = amrex::convert(cba, WarpX::GetInstance().getBfield_cp(1,1).ixType().toIntVect()); + const amrex::BoxArray cba_Bz = amrex::convert(cba, WarpX::GetInstance().getBfield_cp(1,2).ixType().toIntVect()); + WarpX::AllocInitMultiFab(pml_B_cp[0], cba_Bx, cdm, ncompb, ngb, "pml_B_cp[x]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_B_cp[1], cba_By, cdm, ncompb, ngb, "pml_B_cp[y]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_B_cp[2], cba_Bz, cdm, ncompb, ngb, "pml_B_cp[z]", 0.0_rt); if (m_dive_cleaning) { - const amrex::IntVect& F_nodal_flag = amrex::IntVect::TheNodeVector(); - pml_F_cp = std::make_unique(amrex::convert(cba, F_nodal_flag), cdm, 3, ngf); - pml_F_cp->setVal(0.0); + const amrex::BoxArray cba_F_nodal = amrex::convert(cba, amrex::IntVect::TheNodeVector()); + WarpX::AllocInitMultiFab(pml_F_cp, cba_F_nodal, cdm, 3, ngf, "pml_F_cp", 0.0_rt); } if (m_divb_cleaning) @@ -839,20 +817,16 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri // TODO Shall we define a separate guard cells parameter ngG? const amrex::IntVect& G_nodal_flag = (do_nodal) ? amrex::IntVect::TheNodeVector() : amrex::IntVect::TheCellVector(); - pml_G_cp = std::make_unique(amrex::convert(cba, G_nodal_flag), cdm, 3, ngf); - pml_G_cp->setVal(0.0); + const amrex::BoxArray cba_G_nodal = amrex::convert(cba, G_nodal_flag); + WarpX::AllocInitMultiFab( pml_G_cp, cba_G_nodal, cdm, 3, ngf, "pml_G_cp", 0.0_rt); } - pml_j_cp[0] = std::make_unique(amrex::convert( cba, - WarpX::GetInstance().getcurrent_cp(1,0).ixType().toIntVect() ), cdm, 1, ngb ); - pml_j_cp[1] = std::make_unique(amrex::convert( cba, - WarpX::GetInstance().getcurrent_cp(1,1).ixType().toIntVect() ), cdm, 1, ngb ); - pml_j_cp[2] = std::make_unique(amrex::convert( cba, - WarpX::GetInstance().getcurrent_cp(1,2).ixType().toIntVect() ), cdm, 1, ngb ); - - pml_j_cp[0]->setVal(0.0); - pml_j_cp[1]->setVal(0.0); - pml_j_cp[2]->setVal(0.0); + const amrex::BoxArray cba_jx = amrex::convert(cba, WarpX::GetInstance().getcurrent_cp(1,0).ixType().toIntVect()); + const amrex::BoxArray cba_jy = amrex::convert(cba, WarpX::GetInstance().getcurrent_cp(1,1).ixType().toIntVect()); + const amrex::BoxArray cba_jz = amrex::convert(cba, WarpX::GetInstance().getcurrent_cp(1,2).ixType().toIntVect()); + WarpX::AllocInitMultiFab(pml_j_cp[0], cba_jx, cdm, 1, ngb, "pml_j_cp[x]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_j_cp[1], cba_jy, cdm, 1, ngb, "pml_j_cp[y]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_j_cp[2], cba_jz, cdm, 1, ngb, "pml_j_cp[z]", 0.0_rt); single_domain_box = is_single_box_domain ? cdomain : Box(); sigba_cp = std::make_unique(cba, cdm, grid_cba_reduced, cgeom->CellSize(), diff --git a/Source/BoundaryConditions/PML_RZ.cpp b/Source/BoundaryConditions/PML_RZ.cpp index a6ebc21181d..17e9883c1b9 100644 --- a/Source/BoundaryConditions/PML_RZ.cpp +++ b/Source/BoundaryConditions/PML_RZ.cpp @@ -43,22 +43,17 @@ PML_RZ::PML_RZ (const int lev, const amrex::BoxArray& grid_ba, const amrex::Dist const amrex::MultiFab & Er_fp = WarpX::GetInstance().getEfield_fp(lev,0); const amrex::MultiFab & Et_fp = WarpX::GetInstance().getEfield_fp(lev,1); - pml_E_fp[0] = std::make_unique( amrex::convert( grid_ba, Er_fp.ixType().toIntVect() ), grid_dm, - Er_fp.nComp(), Er_fp.nGrow() ); - pml_E_fp[1] = std::make_unique( amrex::convert( grid_ba, Et_fp.ixType().toIntVect() ), grid_dm, - Et_fp.nComp(), Et_fp.nGrow() ); + const amrex::BoxArray ba_Er = amrex::convert(grid_ba, Er_fp.ixType().toIntVect()); + const amrex::BoxArray ba_Et = amrex::convert(grid_ba, Et_fp.ixType().toIntVect()); + WarpX::AllocInitMultiFab(pml_E_fp[0], ba_Er, grid_dm, Er_fp.nComp(), Er_fp.nGrowVect(), "pml_E_fp[0]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_E_fp[1], ba_Et, grid_dm, Et_fp.nComp(), Et_fp.nGrowVect(), "pml_E_fp[1]", 0.0_rt); const amrex::MultiFab & Br_fp = WarpX::GetInstance().getBfield_fp(lev,0); const amrex::MultiFab & Bt_fp = WarpX::GetInstance().getBfield_fp(lev,1); - pml_B_fp[0] = std::make_unique( amrex::convert( grid_ba, Br_fp.ixType().toIntVect() ), grid_dm, - Br_fp.nComp(), Br_fp.nGrow() ); - pml_B_fp[1] = std::make_unique( amrex::convert( grid_ba, Bt_fp.ixType().toIntVect() ), grid_dm, - Bt_fp.nComp(), Bt_fp.nGrow() ); - - pml_E_fp[0]->setVal(0.0); - pml_E_fp[1]->setVal(0.0); - pml_B_fp[0]->setVal(0.0); - pml_B_fp[1]->setVal(0.0); + const amrex::BoxArray ba_Br = amrex::convert(grid_ba, Br_fp.ixType().toIntVect()); + const amrex::BoxArray ba_Bt = amrex::convert(grid_ba, Bt_fp.ixType().toIntVect()); + WarpX::AllocInitMultiFab(pml_B_fp[0], ba_Br, grid_dm, Br_fp.nComp(), Br_fp.nGrowVect(), "pml_B_fp[0]", 0.0_rt); + WarpX::AllocInitMultiFab(pml_B_fp[1], ba_Bt, grid_dm, Bt_fp.nComp(), Bt_fp.nGrowVect(), "pml_B_fp[1]", 0.0_rt); } diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index ab421f10658..4f6522d8fba 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -695,25 +695,6 @@ WarpX::InitLevelData (int lev, Real /*time*/) pp_psatd.query("do_time_averaging", fft_do_time_averaging ); for (int i = 0; i < 3; ++i) { - current_fp[lev][i]->setVal(0.0); - if (lev > 0) - current_cp[lev][i]->setVal(0.0); - - // Initialize aux MultiFabs on level 0 - if (lev == 0) { - Bfield_aux[lev][i]->setVal(0.0); - Efield_aux[lev][i]->setVal(0.0); - } - - if (WarpX::do_current_centering) - { - current_fp_nodal[lev][i]->setVal(0.0); - } - - if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) - { - current_fp_vay[lev][i]->setVal(0.0); - } if (B_ext_grid_s == "constant" || B_ext_grid_s == "default") { Bfield_fp[lev][i]->setVal(B_external_grid[i]); @@ -884,30 +865,6 @@ WarpX::InitLevelData (int lev, Real /*time*/) } } - if (F_fp[lev]) { - F_fp[lev]->setVal(0.0); - } - - if (G_fp[lev]) { - G_fp[lev]->setVal(0.0); - } - - if (rho_fp[lev]) { - rho_fp[lev]->setVal(0.0); - } - - if (F_cp[lev]) { - F_cp[lev]->setVal(0.0); - } - - if (G_cp[lev]) { - G_cp[lev]->setVal(0.0); - } - - if (rho_cp[lev]) { - rho_cp[lev]->setVal(0.0); - } - if (costs[lev]) { const auto iarr = costs[lev]->IndexArray(); for (int i : iarr) { diff --git a/Source/Parallelization/WarpXRegrid.cpp b/Source/Parallelization/WarpXRegrid.cpp index 99885f6193a..db91f2d7878 100644 --- a/Source/Parallelization/WarpXRegrid.cpp +++ b/Source/Parallelization/WarpXRegrid.cpp @@ -151,7 +151,8 @@ RemakeMultiFab (std::unique_ptr& mf, const DistributionMapping& dm { if (mf == nullptr) return; const IntVect& ng = mf->nGrowVect(); - auto pmf = std::make_unique(mf->boxArray(), dm, mf->nComp(), ng); + std::unique_ptr pmf; + WarpX::AllocInitMultiFab(pmf, mf->boxArray(), dm, mf->nComp(), ng, mf->tags()[0]); if (redistribute) pmf->Redistribute(*mf, 0, 0, mf->nComp(), ng); mf = std::move(pmf); } diff --git a/Source/WarpX.H b/Source/WarpX.H index ecedba62b29..d33f320f96e 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -64,6 +64,7 @@ #include #include #include +#include enum struct PatchType : int { @@ -315,6 +316,95 @@ public: // Global rho nodal flag to know about rho index type when rho MultiFab is not allocated amrex::IntVect m_rho_nodal_flag; + /** + * \brief + * Allocate and optionally initialize the MultiFab. This also adds the MultiFab + * to the map of MultiFabs (used to ease the access to MultiFabs from the Python + * interface + * + * \param mf[out] The MultiFab unique pointer to be allocated + * \param ba[in] The BoxArray describing the MultiFab + * \param dm[in] The DistributionMapping describing the MultiFab + * \param ncomp[in] The number of components in the MultiFab + * \param ngrow[in] The number of guard cells in the MultiFab + * \param name[in] The name of the MultiFab to use in the map + * \param initial_value[in] The optional initial value + */ + static void AllocInitMultiFab ( + std::unique_ptr& mf, + const amrex::BoxArray& ba, + const amrex::DistributionMapping& dm, + const int ncomp, + const amrex::IntVect& ngrow, + const std::string name, + std::optional initial_value = {}); + + /** + * \brief + * Allocate and optionally initialize the iMultiFab. This also adds the iMultiFab + * to the map of MultiFabs (used to ease the access to MultiFabs from the Python + * interface + * + * \param mf[out] The iMultiFab unique pointer to be allocated + * \param ba[in] The BoxArray describing the iMultiFab + * \param dm[in] The DistributionMapping describing the iMultiFab + * \param ncomp[in] The number of components in the iMultiFab + * \param ngrow[in] The number of guard cells in the iMultiFab + * \param name[in] The name of the iMultiFab to use in the map + * \param initial_value[in] The optional initial value + */ + static void AllocInitMultiFab ( + std::unique_ptr& mf, + const amrex::BoxArray& ba, + const amrex::DistributionMapping& dm, + const int ncomp, + const amrex::IntVect& ngrow, + const std::string name, + std::optional initial_value = {}); + + /** + * \brief + * Create an alias of a MultiFab, adding the alias to the MultiFab map + * \param mf[out] The MultiFab to create + * \param mf_to_alias[in] The MultiFab to alias + * \param scomp[in] The starting component to be aliased + * \param ncomp[in] The number of components to alias + * \param name[in] The name of the MultiFab to use in the map + */ + static void AliasInitMultiFab ( + std::unique_ptr& mf, + const amrex::MultiFab& mf_to_alias, + const int scomp, + const int ncomp, + const std::string name, + std::optional initial_value); + + // Maps of all of the MultiFabs and iMultiFabs used (this can include MFs from other classes) + // This is a convenience for the Python interface, allowing all MultiFabs + // to be easily referenced from Python. + static std::map multifab_map; + static std::map imultifab_map; + + /** + * \brief + * Add the MultiFab to the map of MultiFabs + * \param name The name of the MultiFab use to reference the MultiFab + * \parame mf The MultiFab to be added to the map (via a pointer to it) + */ + static void AddToMultiFabMap(const std::string name, const std::unique_ptr& mf) { + multifab_map[name] = mf.get(); + } + + /** + * \brief + * Add the iMultiFab to the map of MultiFabs + * \param name The name of the iMultiFab use to reference the iMultiFab + * \parame mf The iMultiFab to be added to the map (via a pointer to it) + */ + static void AddToMultiFabMap(const std::string name, const std::unique_ptr& mf) { + imultifab_map[name] = mf.get(); + } + std::array get_array_Bfield_aux (const int lev) const { return { @@ -1098,6 +1188,7 @@ private: const amrex::IntVect& ngEB, amrex::IntVect& ngJ, const amrex::IntVect& ngRho, const amrex::IntVect& ngF, const amrex::IntVect& ngG, const bool aux_is_nodal); + #ifdef WARPX_USE_PSATD # ifdef WARPX_DIM_RZ void AllocLevelSpectralSolverRZ (amrex::Vector>& spectral_solver, diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 9ee07811d5a..4b5c773cfe8 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -181,6 +181,9 @@ bool WarpX::do_multi_J = false; int WarpX::do_multi_J_n_depositions; bool WarpX::safe_guard_cells = 0; +std::map WarpX::multifab_map; +std::map WarpX::imultifab_map; + IntVect WarpX::filter_npass_each_dir(1); int WarpX::n_field_gather_buffer = -1; @@ -1851,7 +1854,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm // set human-readable tag for each MultiFab auto const tag = [lev]( std::string tagname ) { tagname.append("[l=").append(std::to_string(lev)).append("]"); - return MFInfo().SetTag(std::move(tagname)); + return tagname; }; // @@ -1859,92 +1862,86 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm // std::array dx = CellSize(lev); - Bfield_fp[lev][0] = std::make_unique(amrex::convert(ba,Bx_nodal_flag),dm,ncomps,ngEB,tag("Bfield_fp[x]")); - Bfield_fp[lev][1] = std::make_unique(amrex::convert(ba,By_nodal_flag),dm,ncomps,ngEB,tag("Bfield_fp[y]")); - Bfield_fp[lev][2] = std::make_unique(amrex::convert(ba,Bz_nodal_flag),dm,ncomps,ngEB,tag("Bfield_fp[z]")); + AllocInitMultiFab(Bfield_fp[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, tag("Bfield_fp[x]")); + AllocInitMultiFab(Bfield_fp[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, tag("Bfield_fp[y]")); + AllocInitMultiFab(Bfield_fp[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, tag("Bfield_fp[z]")); - Efield_fp[lev][0] = std::make_unique(amrex::convert(ba,Ex_nodal_flag),dm,ncomps,ngEB,tag("Efield_fp[x]")); - Efield_fp[lev][1] = std::make_unique(amrex::convert(ba,Ey_nodal_flag),dm,ncomps,ngEB,tag("Efield_fp[y]")); - Efield_fp[lev][2] = std::make_unique(amrex::convert(ba,Ez_nodal_flag),dm,ncomps,ngEB,tag("Efield_fp[z]")); + AllocInitMultiFab(Efield_fp[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, tag("Efield_fp[x]")); + AllocInitMultiFab(Efield_fp[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, tag("Efield_fp[y]")); + AllocInitMultiFab(Efield_fp[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, tag("Efield_fp[z]")); - current_fp[lev][0] = std::make_unique(amrex::convert(ba,jx_nodal_flag),dm,ncomps,ngJ,tag("current_fp[x]")); - current_fp[lev][1] = std::make_unique(amrex::convert(ba,jy_nodal_flag),dm,ncomps,ngJ,tag("current_fp[y]")); - current_fp[lev][2] = std::make_unique(amrex::convert(ba,jz_nodal_flag),dm,ncomps,ngJ,tag("current_fp[z]")); + AllocInitMultiFab(current_fp[lev][0], amrex::convert(ba, jx_nodal_flag), dm, ncomps, ngJ, tag("current_fp[x]"), 0.0_rt); + AllocInitMultiFab(current_fp[lev][1], amrex::convert(ba, jy_nodal_flag), dm, ncomps, ngJ, tag("current_fp[y]"), 0.0_rt); + AllocInitMultiFab(current_fp[lev][2], amrex::convert(ba, jz_nodal_flag), dm, ncomps, ngJ, tag("current_fp[z]"), 0.0_rt); if (do_current_centering) { amrex::BoxArray const& nodal_ba = amrex::convert(ba, amrex::IntVect::TheNodeVector()); - current_fp_nodal[lev][0] = std::make_unique(nodal_ba, dm, ncomps, ngJ); - current_fp_nodal[lev][1] = std::make_unique(nodal_ba, dm, ncomps, ngJ); - current_fp_nodal[lev][2] = std::make_unique(nodal_ba, dm, ncomps, ngJ); + AllocInitMultiFab(current_fp_nodal[lev][0], nodal_ba, dm, ncomps, ngJ, tag("current_fp_nodal[x]"), 0.0_rt); + AllocInitMultiFab(current_fp_nodal[lev][1], nodal_ba, dm, ncomps, ngJ, tag("current_fp_nodal[y]"), 0.0_rt); + AllocInitMultiFab(current_fp_nodal[lev][2], nodal_ba, dm, ncomps, ngJ, tag("current_fp_nodal[z]"), 0.0_rt); } if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) { - current_fp_vay[lev][0] = std::make_unique(amrex::convert(ba, rho_nodal_flag), - dm, ncomps, ngJ, tag("current_fp_vay[x]")); - current_fp_vay[lev][1] = std::make_unique(amrex::convert(ba, rho_nodal_flag), - dm, ncomps, ngJ, tag("current_fp_vay[y]")); - current_fp_vay[lev][2] = std::make_unique(amrex::convert(ba, rho_nodal_flag), - dm, ncomps, ngJ, tag("current_fp_vay[z]")); + AllocInitMultiFab(current_fp_vay[lev][0], amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, tag("current_fp_vay[x]"), 0.0_rt); + AllocInitMultiFab(current_fp_vay[lev][1], amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, tag("current_fp_vay[y]"), 0.0_rt); + AllocInitMultiFab(current_fp_vay[lev][2], amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, tag("current_fp_vay[z]"), 0.0_rt); } if (fft_do_time_averaging) { - Bfield_avg_fp[lev][0] = std::make_unique(amrex::convert(ba,Bx_nodal_flag),dm,ncomps,ngEB,tag("Bfield_avg_fp[x]")); - Bfield_avg_fp[lev][1] = std::make_unique(amrex::convert(ba,By_nodal_flag),dm,ncomps,ngEB,tag("Bfield_avg_fp[y]")); - Bfield_avg_fp[lev][2] = std::make_unique(amrex::convert(ba,Bz_nodal_flag),dm,ncomps,ngEB,tag("Bfield_avg_fp[z]")); + AllocInitMultiFab(Bfield_avg_fp[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, tag("Bfield_avg_fp[x]")); + AllocInitMultiFab(Bfield_avg_fp[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, tag("Bfield_avg_fp[y]")); + AllocInitMultiFab(Bfield_avg_fp[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, tag("Bfield_avg_fp[z]")); - Efield_avg_fp[lev][0] = std::make_unique(amrex::convert(ba,Ex_nodal_flag),dm,ncomps,ngEB,tag("Efield_avg_fp[x]")); - Efield_avg_fp[lev][1] = std::make_unique(amrex::convert(ba,Ey_nodal_flag),dm,ncomps,ngEB,tag("Efield_avg_fp[y]")); - Efield_avg_fp[lev][2] = std::make_unique(amrex::convert(ba,Ez_nodal_flag),dm,ncomps,ngEB,tag("Efield_avg_fp[z]")); + AllocInitMultiFab(Efield_avg_fp[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, tag("Efield_avg_fp[x]")); + AllocInitMultiFab(Efield_avg_fp[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, tag("Efield_avg_fp[y]")); + AllocInitMultiFab(Efield_avg_fp[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, tag("Efield_avg_fp[z]")); } #ifdef AMREX_USE_EB constexpr int nc_ls = 1; - constexpr int ng_ls = 2; - m_distance_to_eb[lev] = std::make_unique(amrex::convert(ba, IntVect::TheNodeVector()), dm, nc_ls, ng_ls, tag("m_distance_to_eb")); + amrex::IntVect ng_ls(2); + AllocInitMultiFab(m_distance_to_eb[lev], amrex::convert(ba, IntVect::TheNodeVector()), dm, nc_ls, ng_ls, tag("m_distance_to_eb")); // EB info are needed only at the finest level if (lev == maxLevel()) { if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { - m_edge_lengths[lev][0] = std::make_unique(amrex::convert(ba, Ex_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[x]")); - m_edge_lengths[lev][1] = std::make_unique(amrex::convert(ba, Ey_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[y]")); - m_edge_lengths[lev][2] = std::make_unique(amrex::convert(ba, Ez_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[z]")); - m_face_areas[lev][0] = std::make_unique(amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[x]")); - m_face_areas[lev][1] = std::make_unique(amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[y]")); - m_face_areas[lev][2] = std::make_unique(amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[z]")); + AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[x]")); + AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[y]")); + AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[z]")); + AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[x]")); + AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[y]")); + AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[z]")); } if(WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - m_edge_lengths[lev][0] = std::make_unique(amrex::convert(ba, Ex_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[x]")); - m_edge_lengths[lev][1] = std::make_unique(amrex::convert(ba, Ey_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[y]")); - m_edge_lengths[lev][2] = std::make_unique(amrex::convert(ba, Ez_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[z]")); - m_face_areas[lev][0] = std::make_unique(amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[x]")); - m_face_areas[lev][1] = std::make_unique(amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[y]")); - m_face_areas[lev][2] = std::make_unique(amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[z]")); - m_flag_info_face[lev][0] = std::make_unique(amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_flag_info_face[x]")); - m_flag_info_face[lev][1] = std::make_unique(amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_flag_info_face[y]")); - m_flag_info_face[lev][2] = std::make_unique(amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_flag_info_face[z]")); - m_flag_ext_face[lev][0] = std::make_unique(amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_flag_ext_face[x]")); - m_flag_ext_face[lev][1] = std::make_unique(amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_flag_ext_face[y]")); - m_flag_ext_face[lev][2] = std::make_unique(amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_flag_ext_face[z]")); - m_area_mod[lev][0] = std::make_unique(amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_area_mod[x]")); - m_area_mod[lev][1] = std::make_unique(amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_area_mod[y]")); - m_area_mod[lev][2] = std::make_unique(amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_area_mod[z]")); + AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[x]")); + AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[y]")); + AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_edge_lengths[z]")); + AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[x]")); + AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[y]")); + AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_face_areas[z]")); + AllocInitMultiFab(m_flag_info_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_flag_info_face[x]")); + AllocInitMultiFab(m_flag_info_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_flag_info_face[y]")); + AllocInitMultiFab(m_flag_info_face[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_flag_info_face[z]")); + AllocInitMultiFab(m_flag_ext_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_flag_ext_face[x]")); + AllocInitMultiFab(m_flag_ext_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_flag_ext_face[y]")); + AllocInitMultiFab(m_flag_ext_face[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_flag_ext_face[z]")); + AllocInitMultiFab(m_area_mod[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_area_mod[x]")); + AllocInitMultiFab(m_area_mod[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_area_mod[y]")); + AllocInitMultiFab(m_area_mod[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("m_area_mod[z]")); m_borrowing[lev][0] = std::make_unique>(amrex::convert(ba, Bx_nodal_flag), dm); m_borrowing[lev][1] = std::make_unique>(amrex::convert(ba, By_nodal_flag), dm); m_borrowing[lev][2] = std::make_unique>(amrex::convert(ba, Bz_nodal_flag), dm); - Venl[lev][0] = std::make_unique(amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("Venl[x]")); - Venl[lev][1] = std::make_unique(amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("Venl[y]")); - Venl[lev][2] = std::make_unique(amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("Venl[z]")); + AllocInitMultiFab(Venl[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("Venl[x]")); + AllocInitMultiFab(Venl[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("Venl[y]")); + AllocInitMultiFab(Venl[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("Venl[z]")); - ECTRhofield[lev][0] = std::make_unique(amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("ECTRhofield[x]")); - ECTRhofield[lev][1] = std::make_unique(amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("ECTRhofield[y]")); - ECTRhofield[lev][2] = std::make_unique(amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("ECTRhofield[z]")); - ECTRhofield[lev][0]->setVal(0.); - ECTRhofield[lev][1]->setVal(0.); - ECTRhofield[lev][2]->setVal(0.); + AllocInitMultiFab(ECTRhofield[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("ECTRhofield[x]"), 0.0_rt); + AllocInitMultiFab(ECTRhofield[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("ECTRhofield[y]"), 0.0_rt); + AllocInitMultiFab(ECTRhofield[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, tag("ECTRhofield[z]"), 0.0_rt); } } #endif @@ -1957,31 +1954,30 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm { // For the multi-J algorithm we can allocate only one rho component (no distinction between old and new) const int rho_ncomps = (WarpX::do_multi_J) ? ncomps : 2*ncomps; - rho_fp[lev] = std::make_unique(amrex::convert(ba,rho_nodal_flag),dm,rho_ncomps,ngRho,tag("rho_fp")); + AllocInitMultiFab(rho_fp[lev], amrex::convert(ba, rho_nodal_flag), dm, rho_ncomps, ngRho, tag("rho_fp"), 0.0_rt); } if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) { IntVect ngPhi = IntVect( AMREX_D_DECL(1,1,1) ); - phi_fp[lev] = std::make_unique(amrex::convert(ba,phi_nodal_flag),dm,ncomps,ngPhi,tag("phi_fp")); - phi_fp[lev]->setVal(0.); + AllocInitMultiFab(phi_fp[lev], amrex::convert(ba, phi_nodal_flag), dm, ncomps, ngPhi, tag("phi_fp"), 0.0_rt); } if (do_subcycling == 1 && lev == 0) { - current_store[lev][0] = std::make_unique(amrex::convert(ba,jx_nodal_flag),dm,ncomps,ngJ,tag("current_store[x]")); - current_store[lev][1] = std::make_unique(amrex::convert(ba,jy_nodal_flag),dm,ncomps,ngJ,tag("current_store[y]")); - current_store[lev][2] = std::make_unique(amrex::convert(ba,jz_nodal_flag),dm,ncomps,ngJ,tag("current_store[z]")); + AllocInitMultiFab(current_store[lev][0], amrex::convert(ba,jx_nodal_flag),dm,ncomps,ngJ,tag("current_store[x]")); + AllocInitMultiFab(current_store[lev][1], amrex::convert(ba,jy_nodal_flag),dm,ncomps,ngJ,tag("current_store[y]")); + AllocInitMultiFab(current_store[lev][2], amrex::convert(ba,jz_nodal_flag),dm,ncomps,ngJ,tag("current_store[z]")); } if (do_dive_cleaning) { - F_fp[lev] = std::make_unique(amrex::convert(ba, F_nodal_flag), dm, ncomps, ngF, tag("F_fp")); + AllocInitMultiFab(F_fp[lev], amrex::convert(ba, F_nodal_flag), dm, ncomps, ngF, tag("F_fp"), 0.0_rt); } if (do_divb_cleaning) { - G_fp[lev] = std::make_unique(amrex::convert(ba, G_nodal_flag), dm, ncomps, ngG, tag("G_fp")); + AllocInitMultiFab(G_fp[lev], amrex::convert(ba, G_nodal_flag), dm, ncomps, ngG, tag("G_fp"), 0.0_rt); } if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) @@ -2050,40 +2046,40 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm // Create aux multifabs on Nodal Box Array BoxArray const nba = amrex::convert(ba,IntVect::TheNodeVector()); - Bfield_aux[lev][0] = std::make_unique(nba,dm,ncomps,ngEB,tag("Bfield_aux[x]")); - Bfield_aux[lev][1] = std::make_unique(nba,dm,ncomps,ngEB,tag("Bfield_aux[y]")); - Bfield_aux[lev][2] = std::make_unique(nba,dm,ncomps,ngEB,tag("Bfield_aux[z]")); + AllocInitMultiFab(Bfield_aux[lev][0], nba, dm, ncomps, ngEB, tag("Bfield_aux[x]"), 0.0_rt); + AllocInitMultiFab(Bfield_aux[lev][1], nba, dm, ncomps, ngEB, tag("Bfield_aux[y]"), 0.0_rt); + AllocInitMultiFab(Bfield_aux[lev][2], nba, dm, ncomps, ngEB, tag("Bfield_aux[z]"), 0.0_rt); - Efield_aux[lev][0] = std::make_unique(nba,dm,ncomps,ngEB,tag("Efield_aux[x]")); - Efield_aux[lev][1] = std::make_unique(nba,dm,ncomps,ngEB,tag("Efield_aux[y]")); - Efield_aux[lev][2] = std::make_unique(nba,dm,ncomps,ngEB,tag("Efield_aux[z]")); + AllocInitMultiFab(Efield_aux[lev][0], nba, dm, ncomps, ngEB, tag("Efield_aux[x]"), 0.0_rt); + AllocInitMultiFab(Efield_aux[lev][1], nba, dm, ncomps, ngEB, tag("Efield_aux[y]"), 0.0_rt); + AllocInitMultiFab(Efield_aux[lev][2], nba, dm, ncomps, ngEB, tag("Efield_aux[z]"), 0.0_rt); } else if (lev == 0) { if (!WarpX::fft_do_time_averaging) { // In this case, the aux grid is simply an alias of the fp grid - Efield_aux[lev][0] = std::make_unique(*Efield_fp[lev][0], amrex::make_alias, 0, ncomps); - Efield_aux[lev][1] = std::make_unique(*Efield_fp[lev][1], amrex::make_alias, 0, ncomps); - Efield_aux[lev][2] = std::make_unique(*Efield_fp[lev][2], amrex::make_alias, 0, ncomps); + AliasInitMultiFab(Efield_aux[lev][0], *Efield_fp[lev][0], 0, ncomps, tag("Efield_aux[x]"), 0.0_rt); + AliasInitMultiFab(Efield_aux[lev][1], *Efield_fp[lev][1], 0, ncomps, tag("Efield_aux[y]"), 0.0_rt); + AliasInitMultiFab(Efield_aux[lev][2], *Efield_fp[lev][2], 0, ncomps, tag("Efield_aux[z]"), 0.0_rt); - Bfield_aux[lev][0] = std::make_unique(*Bfield_fp[lev][0], amrex::make_alias, 0, ncomps); - Bfield_aux[lev][1] = std::make_unique(*Bfield_fp[lev][1], amrex::make_alias, 0, ncomps); - Bfield_aux[lev][2] = std::make_unique(*Bfield_fp[lev][2], amrex::make_alias, 0, ncomps); + AliasInitMultiFab(Bfield_aux[lev][0], *Bfield_fp[lev][0], 0, ncomps, tag("Bfield_aux[x]"), 0.0_rt); + AliasInitMultiFab(Bfield_aux[lev][1], *Bfield_fp[lev][1], 0, ncomps, tag("Bfield_aux[y]"), 0.0_rt); + AliasInitMultiFab(Bfield_aux[lev][2], *Bfield_fp[lev][2], 0, ncomps, tag("Bfield_aux[z]"), 0.0_rt); } else { - Efield_aux[lev][0] = std::make_unique(*Efield_avg_fp[lev][0], amrex::make_alias, 0, ncomps); - Efield_aux[lev][1] = std::make_unique(*Efield_avg_fp[lev][1], amrex::make_alias, 0, ncomps); - Efield_aux[lev][2] = std::make_unique(*Efield_avg_fp[lev][2], amrex::make_alias, 0, ncomps); + AliasInitMultiFab(Efield_aux[lev][0], *Efield_avg_fp[lev][0], 0, ncomps, tag("Efield_aux[x]"), 0.0_rt); + AliasInitMultiFab(Efield_aux[lev][1], *Efield_avg_fp[lev][1], 0, ncomps, tag("Efield_aux[y]"), 0.0_rt); + AliasInitMultiFab(Efield_aux[lev][2], *Efield_avg_fp[lev][2], 0, ncomps, tag("Efield_aux[z]"), 0.0_rt); - Bfield_aux[lev][0] = std::make_unique(*Bfield_avg_fp[lev][0], amrex::make_alias, 0, ncomps); - Bfield_aux[lev][1] = std::make_unique(*Bfield_avg_fp[lev][1], amrex::make_alias, 0, ncomps); - Bfield_aux[lev][2] = std::make_unique(*Bfield_avg_fp[lev][2], amrex::make_alias, 0, ncomps); + AliasInitMultiFab(Bfield_aux[lev][0], *Bfield_avg_fp[lev][0], 0, ncomps, tag("Bfield_aux[x]"), 0.0_rt); + AliasInitMultiFab(Bfield_aux[lev][1], *Bfield_avg_fp[lev][1], 0, ncomps, tag("Bfield_aux[y]"), 0.0_rt); + AliasInitMultiFab(Bfield_aux[lev][2], *Bfield_avg_fp[lev][2], 0, ncomps, tag("Bfield_aux[z]"), 0.0_rt); } } else { - Bfield_aux[lev][0] = std::make_unique(amrex::convert(ba,Bx_nodal_flag),dm,ncomps,ngEB,tag("Bfield_aux[x]")); - Bfield_aux[lev][1] = std::make_unique(amrex::convert(ba,By_nodal_flag),dm,ncomps,ngEB,tag("Bfield_aux[y]")); - Bfield_aux[lev][2] = std::make_unique(amrex::convert(ba,Bz_nodal_flag),dm,ncomps,ngEB,tag("Bfield_aux[z]")); + AllocInitMultiFab(Bfield_aux[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, tag("Bfield_aux[x]")); + AllocInitMultiFab(Bfield_aux[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, tag("Bfield_aux[y]")); + AllocInitMultiFab(Bfield_aux[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, tag("Bfield_aux[z]")); - Efield_aux[lev][0] = std::make_unique(amrex::convert(ba,Ex_nodal_flag),dm,ncomps,ngEB,tag("Efield_aux[x]")); - Efield_aux[lev][1] = std::make_unique(amrex::convert(ba,Ey_nodal_flag),dm,ncomps,ngEB,tag("Efield_aux[y]")); - Efield_aux[lev][2] = std::make_unique(amrex::convert(ba,Ez_nodal_flag),dm,ncomps,ngEB,tag("Efield_aux[z]")); + AllocInitMultiFab(Efield_aux[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, tag("Efield_aux[x]")); + AllocInitMultiFab(Efield_aux[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, tag("Efield_aux[y]")); + AllocInitMultiFab(Efield_aux[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, tag("Efield_aux[z]")); } // @@ -2096,53 +2092,51 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm std::array cdx = CellSize(lev-1); // Create the MultiFabs for B - Bfield_cp[lev][0] = std::make_unique(amrex::convert(cba,Bx_nodal_flag),dm,ncomps,ngEB,tag("Bfield_cp[x]")); - Bfield_cp[lev][1] = std::make_unique(amrex::convert(cba,By_nodal_flag),dm,ncomps,ngEB,tag("Bfield_cp[y]")); - Bfield_cp[lev][2] = std::make_unique(amrex::convert(cba,Bz_nodal_flag),dm,ncomps,ngEB,tag("Bfield_cp[z]")); + AllocInitMultiFab(Bfield_cp[lev][0], amrex::convert(cba, Bx_nodal_flag), dm, ncomps, ngEB, tag("Bfield_cp[x]")); + AllocInitMultiFab(Bfield_cp[lev][1], amrex::convert(cba, By_nodal_flag), dm, ncomps, ngEB, tag("Bfield_cp[y]")); + AllocInitMultiFab(Bfield_cp[lev][2], amrex::convert(cba, Bz_nodal_flag), dm, ncomps, ngEB, tag("Bfield_cp[z]")); // Create the MultiFabs for E - Efield_cp[lev][0] = std::make_unique(amrex::convert(cba,Ex_nodal_flag),dm,ncomps,ngEB,tag("Efield_cp[x]")); - Efield_cp[lev][1] = std::make_unique(amrex::convert(cba,Ey_nodal_flag),dm,ncomps,ngEB,tag("Efield_cp[y]")); - Efield_cp[lev][2] = std::make_unique(amrex::convert(cba,Ez_nodal_flag),dm,ncomps,ngEB,tag("Efield_cp[z]")); + AllocInitMultiFab(Efield_cp[lev][0], amrex::convert(cba, Ex_nodal_flag), dm, ncomps, ngEB, tag("Efield_cp[x]")); + AllocInitMultiFab(Efield_cp[lev][1], amrex::convert(cba, Ey_nodal_flag), dm, ncomps, ngEB, tag("Efield_cp[y]")); + AllocInitMultiFab(Efield_cp[lev][2], amrex::convert(cba, Ez_nodal_flag), dm, ncomps, ngEB, tag("Efield_cp[z]")); if (fft_do_time_averaging) { - Bfield_avg_cp[lev][0] = std::make_unique(amrex::convert(cba,Bx_nodal_flag),dm,ncomps,ngEB,tag("Bfield_avg_cp[x]")); - Bfield_avg_cp[lev][1] = std::make_unique(amrex::convert(cba,By_nodal_flag),dm,ncomps,ngEB,tag("Bfield_avg_cp[y]")); - Bfield_avg_cp[lev][2] = std::make_unique(amrex::convert(cba,Bz_nodal_flag),dm,ncomps,ngEB,tag("Bfield_avg_cp[z]")); + AllocInitMultiFab(Bfield_avg_cp[lev][0], amrex::convert(cba, Bx_nodal_flag), dm, ncomps, ngEB, tag("Bfield_avg_cp[x]")); + AllocInitMultiFab(Bfield_avg_cp[lev][1], amrex::convert(cba, By_nodal_flag), dm, ncomps, ngEB, tag("Bfield_avg_cp[y]")); + AllocInitMultiFab(Bfield_avg_cp[lev][2], amrex::convert(cba, Bz_nodal_flag), dm, ncomps, ngEB, tag("Bfield_avg_cp[z]")); - Efield_avg_cp[lev][0] = std::make_unique(amrex::convert(cba,Ex_nodal_flag),dm,ncomps,ngEB,tag("Efield_avg_cp[x]")); - Efield_avg_cp[lev][1] = std::make_unique(amrex::convert(cba,Ey_nodal_flag),dm,ncomps,ngEB,tag("Efield_avg_cp[y]")); - Efield_avg_cp[lev][2] = std::make_unique(amrex::convert(cba,Ez_nodal_flag),dm,ncomps,ngEB,tag("Efield_avg_cp[z]")); + AllocInitMultiFab(Efield_avg_cp[lev][0], amrex::convert(cba, Ex_nodal_flag), dm, ncomps, ngEB, tag("Efield_avg_cp[x]")); + AllocInitMultiFab(Efield_avg_cp[lev][1], amrex::convert(cba, Ey_nodal_flag), dm, ncomps, ngEB, tag("Efield_avg_cp[y]")); + AllocInitMultiFab(Efield_avg_cp[lev][2], amrex::convert(cba, Ez_nodal_flag), dm, ncomps, ngEB, tag("Efield_avg_cp[z]")); } // Create the MultiFabs for the current - current_cp[lev][0] = std::make_unique(amrex::convert(cba,jx_nodal_flag),dm,ncomps,ngJ,tag("current_cp[x]")); - current_cp[lev][1] = std::make_unique(amrex::convert(cba,jy_nodal_flag),dm,ncomps,ngJ,tag("current_cp[y]")); - current_cp[lev][2] = std::make_unique(amrex::convert(cba,jz_nodal_flag),dm,ncomps,ngJ,tag("current_cp[z]")); + AllocInitMultiFab(current_cp[lev][0], amrex::convert(cba, jx_nodal_flag), dm, ncomps, ngJ, tag("current_cp[x]"), 0.0_rt); + AllocInitMultiFab(current_cp[lev][1], amrex::convert(cba, jy_nodal_flag), dm, ncomps, ngJ, tag("current_cp[y]"), 0.0_rt); + AllocInitMultiFab(current_cp[lev][2], amrex::convert(cba, jz_nodal_flag), dm, ncomps, ngJ, tag("current_cp[z]"), 0.0_rt); if (deposit_charge) { // For the multi-J algorithm we can allocate only one rho component (no distinction between old and new) const int rho_ncomps = (WarpX::do_multi_J) ? ncomps : 2*ncomps; - rho_cp[lev] = std::make_unique(amrex::convert(cba,rho_nodal_flag),dm,rho_ncomps,ngRho,tag("rho_cp")); + AllocInitMultiFab(rho_cp[lev], amrex::convert(cba, rho_nodal_flag), dm, rho_ncomps, ngRho, tag("rho_cp"), 0.0_rt); } if (do_dive_cleaning) { - F_cp[lev] = std::make_unique(amrex::convert(cba,IntVect::TheUnitVector()),dm,ncomps, ngF.max(),tag("F_cp")); + AllocInitMultiFab(F_cp[lev], amrex::convert(cba, IntVect::TheUnitVector()), dm, ncomps, ngF, tag("F_cp"), 0.0_rt); } if (do_divb_cleaning) { if (do_nodal) { - G_cp[lev] = std::make_unique(amrex::convert(cba, IntVect::TheUnitVector()), - dm, ncomps, ngG.max(), tag("G_cp")); + AllocInitMultiFab(G_cp[lev], amrex::convert(cba, IntVect::TheUnitVector()), dm, ncomps, ngG, tag("G_cp"), 0.0_rt); } else // do_nodal = 0 { - G_cp[lev] = std::make_unique(amrex::convert(cba, IntVect::TheZeroVector()), - dm, ncomps, ngG.max(), tag("G_cp")); + AllocInitMultiFab(G_cp[lev], amrex::convert(cba, IntVect::TheZeroVector()), dm, ncomps, ngG, tag("G_cp"), 0.0_rt); } } @@ -2200,37 +2194,37 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm if (n_field_gather_buffer > 0 || mypc->nSpeciesGatherFromMainGrid() > 0) { if (aux_is_nodal) { BoxArray const& cnba = amrex::convert(cba,IntVect::TheNodeVector()); - Bfield_cax[lev][0] = std::make_unique(cnba,dm,ncomps,ngEB,tag("Bfield_cax[x]")); - Bfield_cax[lev][1] = std::make_unique(cnba,dm,ncomps,ngEB,tag("Bfield_cax[y]")); - Bfield_cax[lev][2] = std::make_unique(cnba,dm,ncomps,ngEB,tag("Bfield_cax[z]")); - Efield_cax[lev][0] = std::make_unique(cnba,dm,ncomps,ngEB,tag("Efield_cax[x]")); - Efield_cax[lev][1] = std::make_unique(cnba,dm,ncomps,ngEB,tag("Efield_cax[y]")); - Efield_cax[lev][2] = std::make_unique(cnba,dm,ncomps,ngEB,tag("Efield_cax[z]")); + AllocInitMultiFab(Bfield_cax[lev][0], cnba,dm,ncomps,ngEB,tag("Bfield_cax[x]")); + AllocInitMultiFab(Bfield_cax[lev][1], cnba,dm,ncomps,ngEB,tag("Bfield_cax[y]")); + AllocInitMultiFab(Bfield_cax[lev][2], cnba,dm,ncomps,ngEB,tag("Bfield_cax[z]")); + AllocInitMultiFab(Efield_cax[lev][0], cnba,dm,ncomps,ngEB,tag("Efield_cax[x]")); + AllocInitMultiFab(Efield_cax[lev][1], cnba,dm,ncomps,ngEB,tag("Efield_cax[y]")); + AllocInitMultiFab(Efield_cax[lev][2], cnba,dm,ncomps,ngEB,tag("Efield_cax[z]")); } else { // Create the MultiFabs for B - Bfield_cax[lev][0] = std::make_unique(amrex::convert(cba,Bx_nodal_flag),dm,ncomps,ngEB,tag("Bfield_cax[x]")); - Bfield_cax[lev][1] = std::make_unique(amrex::convert(cba,By_nodal_flag),dm,ncomps,ngEB,tag("Bfield_cax[y]")); - Bfield_cax[lev][2] = std::make_unique(amrex::convert(cba,Bz_nodal_flag),dm,ncomps,ngEB,tag("Bfield_cax[z]")); + AllocInitMultiFab(Bfield_cax[lev][0], amrex::convert(cba,Bx_nodal_flag),dm,ncomps,ngEB,tag("Bfield_cax[x]")); + AllocInitMultiFab(Bfield_cax[lev][1], amrex::convert(cba,By_nodal_flag),dm,ncomps,ngEB,tag("Bfield_cax[y]")); + AllocInitMultiFab(Bfield_cax[lev][2], amrex::convert(cba,Bz_nodal_flag),dm,ncomps,ngEB,tag("Bfield_cax[z]")); // Create the MultiFabs for E - Efield_cax[lev][0] = std::make_unique(amrex::convert(cba,Ex_nodal_flag),dm,ncomps,ngEB,tag("Efield_cax[x]")); - Efield_cax[lev][1] = std::make_unique(amrex::convert(cba,Ey_nodal_flag),dm,ncomps,ngEB,tag("Efield_cax[y]")); - Efield_cax[lev][2] = std::make_unique(amrex::convert(cba,Ez_nodal_flag),dm,ncomps,ngEB,tag("Efield_cax[z]")); + AllocInitMultiFab(Efield_cax[lev][0], amrex::convert(cba,Ex_nodal_flag),dm,ncomps,ngEB,tag("Efield_cax[x]")); + AllocInitMultiFab(Efield_cax[lev][1], amrex::convert(cba,Ey_nodal_flag),dm,ncomps,ngEB,tag("Efield_cax[y]")); + AllocInitMultiFab(Efield_cax[lev][2], amrex::convert(cba,Ez_nodal_flag),dm,ncomps,ngEB,tag("Efield_cax[z]")); } - gather_buffer_masks[lev] = std::make_unique(ba, dm, ncomps, 1 ); + AllocInitMultiFab(gather_buffer_masks[lev], ba, dm, ncomps, amrex::IntVect(1), tag("gather_buffer_masks")); // Gather buffer masks have 1 ghost cell, because of the fact // that particles may move by more than one cell when using subcycling. } if (n_current_deposition_buffer > 0) { - current_buf[lev][0] = std::make_unique(amrex::convert(cba,jx_nodal_flag),dm,ncomps,ngJ,tag("current_buf[x]")); - current_buf[lev][1] = std::make_unique(amrex::convert(cba,jy_nodal_flag),dm,ncomps,ngJ,tag("current_buf[y]")); - current_buf[lev][2] = std::make_unique(amrex::convert(cba,jz_nodal_flag),dm,ncomps,ngJ,tag("current_buf[z]")); + AllocInitMultiFab(current_buf[lev][0], amrex::convert(cba,jx_nodal_flag),dm,ncomps,ngJ,tag("current_buf[x]")); + AllocInitMultiFab(current_buf[lev][1], amrex::convert(cba,jy_nodal_flag),dm,ncomps,ngJ,tag("current_buf[y]")); + AllocInitMultiFab(current_buf[lev][2], amrex::convert(cba,jz_nodal_flag),dm,ncomps,ngJ,tag("current_buf[z]")); if (rho_cp[lev]) { - charge_buf[lev] = std::make_unique(amrex::convert(cba,rho_nodal_flag),dm,2*ncomps,ngRho,tag("charge_buf")); + AllocInitMultiFab(charge_buf[lev], amrex::convert(cba,rho_nodal_flag),dm,2*ncomps,ngRho,tag("charge_buf")); } - current_buffer_masks[lev] = std::make_unique(ba, dm, ncomps, 1); + AllocInitMultiFab(current_buffer_masks[lev], ba, dm, ncomps, amrex::IntVect(1), tag("current_buffer_masks")); // Current buffer masks have 1 ghost cell, because of the fact // that particles may move by more than one cell when using subcycling. } @@ -2819,3 +2813,55 @@ WarpX::isAnyBoundaryPML() } return false; } + +void +WarpX::AllocInitMultiFab ( + std::unique_ptr& mf, + const amrex::BoxArray& ba, + const amrex::DistributionMapping& dm, + const int ncomp, + const amrex::IntVect& ngrow, + const std::string name, + std::optional initial_value) +{ + const auto tag = amrex::MFInfo().SetTag(std::move(name)); + mf = std::make_unique(ba, dm, ncomp, ngrow, tag); + if (initial_value) { + mf->setVal(*initial_value); + } + WarpX::AddToMultiFabMap(name, mf); +} + +void +WarpX::AllocInitMultiFab ( + std::unique_ptr& mf, + const amrex::BoxArray& ba, + const amrex::DistributionMapping& dm, + const int ncomp, + const amrex::IntVect& ngrow, + const std::string name, + std::optional initial_value) +{ + const auto tag = amrex::MFInfo().SetTag(std::move(name)); + mf = std::make_unique(ba, dm, ncomp, ngrow, tag); + if (initial_value) { + mf->setVal(*initial_value); + } + WarpX::AddToMultiFabMap(name, mf); +} + +void +WarpX::AliasInitMultiFab ( + std::unique_ptr& mf, + const amrex::MultiFab& mf_to_alias, + const int scomp, + const int ncomp, + const std::string name, + std::optional initial_value) +{ + mf = std::make_unique(mf_to_alias, amrex::make_alias, scomp, ncomp); + if (initial_value) { + mf->setVal(*initial_value); + } + WarpX::AddToMultiFabMap(name, mf); +} From 8ef7da8c2c8bba7cfeab2a92506170bd14033391 Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Mon, 14 Nov 2022 14:41:24 -0800 Subject: [PATCH 0156/1346] Abort when using Vay Deposition with FDTD (#3515) Vay deposition is currently implemented only for PSATD. --- Docs/source/usage/parameters.rst | 1 + Source/Evolve/WarpXEvolve.cpp | 1 + Source/WarpX.cpp | 6 ++++++ 3 files changed, 8 insertions(+) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 21f7e7db4ba..620e66d81a1 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -1553,6 +1553,7 @@ Numerics and algorithms Available options are: ``direct``, ``esirkepov``, and ``vay``. The default choice is ``esirkepov`` for FDTD maxwell solvers and ``direct`` for standard or Galilean PSATD solver (that is, with ``algo.maxwell_solver = psatd``). + Note that ``vay`` is only available for ``algo.maxwell_solver = psatd``. 1. ``direct`` diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 5c3883007bd..16110fd2250 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -919,6 +919,7 @@ WarpX::PushParticlesandDepose (int lev, amrex::Real cur_time, DtType a_dt_type, } else if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) { + // Note that Vay deposition is supported only for PSATD and the code currently aborts otherwise current_x = current_fp_vay[lev][0].get(); current_y = current_fp_vay[lev][1].get(); current_z = current_fp_vay[lev][2].get(); diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 4b5c773cfe8..5793874334a 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -957,6 +957,12 @@ WarpX::ReadParameters () maxLevel() <= 0, "Vay deposition not implemented with mesh refinement"); + if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD, + "Vay deposition is implemented only for PSATD"); + } + field_gathering_algo = GetAlgorithmInteger(pp_algo, "field_gathering"); if (field_gathering_algo == GatheringAlgo::MomentumConserving) { // Use same shape factors in all directions, for gathering From 92d30578564ba270ebf28bd2ec045bb330fd0f50 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 23:45:03 -0600 Subject: [PATCH 0157/1346] [pre-commit.ci] pre-commit autoupdate (#3524) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/hadialqattan/pycln: v2.1.1 → v2.1.2](https://github.com/hadialqattan/pycln/compare/v2.1.1...v2.1.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 668c0a31b34..6adde4fd9f9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -67,7 +67,7 @@ repos: # Autoremoves unused Python imports - repo: https://github.com/hadialqattan/pycln - rev: v2.1.1 + rev: v2.1.2 hooks: - id: pycln name: pycln (python) From fa9f871ae0e17e7f0d1deeaa1c3117ef7d48c867 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 15 Nov 2022 09:58:24 -0600 Subject: [PATCH 0158/1346] AMReX/PICSAR: Weekly Update (#3522) * AMReX: Weekly Update * CMake: Fix -fPIC (for all shared builds) openPMD currently side-injects shared build defaults, thus this surfaced. --- .github/workflows/cuda.yml | 2 +- CMakeLists.txt | 9 +++++++++ Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 5 +++-- run_test.sh | 2 +- 6 files changed, 16 insertions(+), 6 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 9366fe8c016..990e5e82c09 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 0d3deeb5c75cade14c381ef620921beaa2604c11 && cd - + cd amrex && git checkout --detach d8bc97f92a1a568b6e996db3b8d9715fced0464f && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/CMakeLists.txt b/CMakeLists.txt index 209e4d118b5..3633ee43321 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -112,6 +112,15 @@ set_default_build_type("Release") # (also know as "link-time optimization" or "whole program optimization") option(WarpX_IPO "Compile WarpX with interprocedural optimization (will take more time)" OFF) +# default ABLASTR and AMReX library type: static (as in CMake) +# this is temporary and we don't want to set it long term +# mainly to work-around a global setting in openPMD-api <= 0.14.5 that changes the +# default in superbuilds by accident +if(CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR) + option(BUILD_SHARED_LIBS "Build shared libraries (so/dylib/dll)." OFF) + mark_as_advanced(BUILD_SHARED_LIBS) +endif() + # note: we could skip this if we solely build WarpX_APP, but if we build a # shared WarpX library or a third party, like ImpactX, uses ablastr in a # shared library (e.g., for Python bindings), then we need relocatable code. diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index a4cb58e75e6..0d1b86390e6 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 0d3deeb5c75cade14c381ef620921beaa2604c11 +branch = d8bc97f92a1a568b6e996db3b8d9715fced0464f [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 6f2f038cb41..cb4cacf00a0 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 0d3deeb5c75cade14c381ef620921beaa2604c11 +branch = d8bc97f92a1a568b6e996db3b8d9715fced0464f [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index b42244c5327..7d9691c9b6f 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -92,7 +92,8 @@ macro(find_amrex) endif() # shared libs, i.e. for Python bindings, need relocatable code - if(WarpX_LIB OR ABLASTR_POSITION_INDEPENDENT_CODE) + # openPMD: currently triggers shared libs (TODO) + if(WarpX_LIB OR ABLASTR_POSITION_INDEPENDENT_CODE OR BUILD_SHARED_LIBS OR WarpX_OPENPMD) set(AMReX_PIC ON CACHE INTERNAL "") endif() @@ -239,7 +240,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "0d3deeb5c75cade14c381ef620921beaa2604c11" +set(WarpX_amrex_branch "d8bc97f92a1a568b6e996db3b8d9715fced0464f" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/run_test.sh b/run_test.sh index 7f6ce13d207..760b0128b9e 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 0d3deeb5c75cade14c381ef620921beaa2604c11 && cd - +cd amrex && git checkout --detach d8bc97f92a1a568b6e996db3b8d9715fced0464f && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git From 436934f18119543604a9c33fcb7eefcc162b4be3 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Tue, 15 Nov 2022 09:57:16 -0800 Subject: [PATCH 0159/1346] Move shared functionality between `picmi.UniformDistribution` and `picmi.AnalyticDistribution` into a parent class (#3476) * allow density function in picmi * created a new parent class to handle common functionality between `UniformDistribution` and `AnalyticDistribution` * fix bug caught by the `Python_Langmuir_rz_multimode` CI test * Place picmi inherited parent classes first to properly render documentation. * fix issue due to secondary parent class `init` not being called --- Python/pywarpx/picmi.py | 106 ++++++++++++++++------------------------ 1 file changed, 43 insertions(+), 63 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 30b9f4c7ca5..a85cc37845b 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -321,9 +321,20 @@ def initialize_inputs(self, species_number, layout, species, density_scale): species.uz = self.centroid_velocity[2]/constants.c -class UniformDistribution(picmistandard.PICMI_UniformDistribution): - def initialize_inputs(self, species_number, layout, species, density_scale): +class DensityDistributionBase(object): + """This is a base class for several predefined density distributions. It + captures universal initialization logic.""" + + def set_mangle_dict(self): + if not hasattr(self, 'mangle_dict'): + self.mangle_dict = None + + if hasattr(self, "user_defined_kw") and self.mangle_dict is None: + # Only do this once so that the same variables can be used multiple + # times + self.mangle_dict = pywarpx.my_constants.add_keywords(self.user_defined_kw) + def set_species_attributes(self, species, layout): if isinstance(layout, GriddedLayout): # --- Note that the grid attribute of GriddedLayout is ignored species.injection_style = "nuniformpercell" @@ -342,14 +353,16 @@ def initialize_inputs(self, species_number, layout, species, density_scale): species.zmin = self.lower_bound[2] species.zmax = self.upper_bound[2] - # --- Only constant density is supported at this time - species.profile = "constant" - species.density = self.density - if density_scale is not None: - species.density *= density_scale + if self.fill_in: + species.do_continuous_injection = 1 # --- Note that WarpX takes gamma*beta as input - if np.any(np.not_equal(self.rms_velocity, 0.)): + if (hasattr(self, "momentum_expressions") + and np.any(np.not_equal(self.momentum_expressions, None)) + ): + species.momentum_distribution_type = 'parse_momentum_function' + self.setup_parse_momentum_functions(species) + elif np.any(np.not_equal(self.rms_velocity, 0.)): species.momentum_distribution_type = "gaussian" species.ux_m = self.directed_velocity[0]/constants.c species.uy_m = self.directed_velocity[1]/constants.c @@ -363,74 +376,41 @@ def initialize_inputs(self, species_number, layout, species, density_scale): species.uy = self.directed_velocity[1]/constants.c species.uz = self.directed_velocity[2]/constants.c - if self.fill_in: - species.do_continuous_injection = 1 - + def setup_parse_momentum_functions(self, species): + for sdir, idir in zip(['x', 'y', 'z'], [0, 1, 2]): + if self.momentum_expressions[idir] is not None: + expression = pywarpx.my_constants.mangle_expression(self.momentum_expressions[idir], self.mangle_dict) + else: + expression = f'{self.directed_velocity[idir]}' + species.__setattr__(f'momentum_function_u{sdir}(x,y,z)', f'({expression})/{constants.c}') -class AnalyticDistribution(picmistandard.PICMI_AnalyticDistribution): - def init(self, kw): - self.mangle_dict = None +class UniformDistribution(picmistandard.PICMI_UniformDistribution, DensityDistributionBase): def initialize_inputs(self, species_number, layout, species, density_scale): - if isinstance(layout, GriddedLayout): - # --- Note that the grid attribute of GriddedLayout is ignored - species.injection_style = "nuniformpercell" - species.num_particles_per_cell_each_dim = layout.n_macroparticle_per_cell - elif isinstance(layout, PseudoRandomLayout): - assert (layout.n_macroparticles_per_cell is not None), Exception('WarpX only supports n_macroparticles_per_cell for the PseudoRandomLayout with UniformDistribution') - species.injection_style = "nrandompercell" - species.num_particles_per_cell = layout.n_macroparticles_per_cell - else: - raise Exception('WarpX does not support the specified layout for UniformDistribution') + self.set_mangle_dict() + self.set_species_attributes(species, layout) - species.xmin = self.lower_bound[0] - species.xmax = self.upper_bound[0] - species.ymin = self.lower_bound[1] - species.ymax = self.upper_bound[1] - species.zmin = self.lower_bound[2] - species.zmax = self.upper_bound[2] + # --- Only constant density is supported by this class + species.profile = "constant" + species.density = self.density + if density_scale is not None: + species.density *= density_scale - if self.mangle_dict is None: - # Only do this once so that the same variables are used in this distribution - # is used multiple times - self.mangle_dict = pywarpx.my_constants.add_keywords(self.user_defined_kw) - expression = pywarpx.my_constants.mangle_expression(self.density_expression, self.mangle_dict) + +class AnalyticDistribution(picmistandard.PICMI_AnalyticDistribution, DensityDistributionBase): + def initialize_inputs(self, species_number, layout, species, density_scale): + + self.set_mangle_dict() + self.set_species_attributes(species, layout) species.profile = "parse_density_function" + expression = pywarpx.my_constants.mangle_expression(self.density_expression, self.mangle_dict) if density_scale is None: species.__setattr__('density_function(x,y,z)', expression) else: species.__setattr__('density_function(x,y,z)', "{}*({})".format(density_scale, expression)) - # --- Note that WarpX takes gamma*beta as input - if np.any(np.not_equal(self.momentum_expressions, None)): - species.momentum_distribution_type = 'parse_momentum_function' - self.setup_parse_momentum_functions(species) - elif np.any(np.not_equal(self.rms_velocity, 0.)): - species.momentum_distribution_type = "gaussian" - species.ux_m = self.directed_velocity[0]/constants.c - species.uy_m = self.directed_velocity[1]/constants.c - species.uz_m = self.directed_velocity[2]/constants.c - species.ux_th = self.rms_velocity[0]/constants.c - species.uy_th = self.rms_velocity[1]/constants.c - species.uz_th = self.rms_velocity[2]/constants.c - else: - species.momentum_distribution_type = "constant" - species.ux = self.directed_velocity[0]/constants.c - species.uy = self.directed_velocity[1]/constants.c - species.uz = self.directed_velocity[2]/constants.c - - if self.fill_in: - species.do_continuous_injection = 1 - - def setup_parse_momentum_functions(self, species): - for sdir, idir in zip(['x', 'y', 'z'], [0, 1, 2]): - if self.momentum_expressions[idir] is not None: - expression = pywarpx.my_constants.mangle_expression(self.momentum_expressions[idir], self.mangle_dict) - else: - expression = f'{self.directed_velocity[idir]}' - species.__setattr__(f'momentum_function_u{sdir}(x,y,z)', f'({expression})/{constants.c}') class ParticleListDistribution(picmistandard.PICMI_ParticleListDistribution): def init(self, kw): From a0901d94a04f2ddbbf6b4f2772b98d0f0c3fe649 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Thu, 17 Nov 2022 09:36:50 -0800 Subject: [PATCH 0160/1346] Doc: remove obsolete section on reading BTD data (#3527) * Doc: remove obsolete section on reading BTD data * Update doc * Remove additional files * Remove unused cells --- .../dataanalysis/backtransformed_diags.rst | 80 ------ Docs/source/usage/parameters.rst | 4 +- Tools/PostProcessing/Visualization.ipynb | 73 +----- Tools/PostProcessing/boosted_frame_hdf5.ipynb | 236 ------------------ Tools/PostProcessing/read_lab_particles.py | 30 --- Tools/PostProcessing/read_raw_data.py | 81 ------ 6 files changed, 4 insertions(+), 500 deletions(-) delete mode 100644 Docs/source/dataanalysis/backtransformed_diags.rst delete mode 100644 Tools/PostProcessing/boosted_frame_hdf5.ipynb delete mode 100644 Tools/PostProcessing/read_lab_particles.py diff --git a/Docs/source/dataanalysis/backtransformed_diags.rst b/Docs/source/dataanalysis/backtransformed_diags.rst deleted file mode 100644 index b9d20034a55..00000000000 --- a/Docs/source/dataanalysis/backtransformed_diags.rst +++ /dev/null @@ -1,80 +0,0 @@ -.. _dataanalysis-btd: - -Back-Transformed Diagnostics -============================ - -When running a simulation in a boosted frame, WarpX has the capability to -back-transform the simulation results to the laboratory frame of reference, which -is often useful to study the physics. A set of functions can be found in the -python file :download:`read_raw_data.py<../../../Tools/PostProcessing/read_raw_data.py>`. The main commands can be found in our example jupyter notebook for postprocessing :download:`Visualization.ipynb<../../../Tools/PostProcessing/Visualization.ipynb>`. - -The full back-transformed diagnostics of the entire domain is written in ``lab_frame_data/snapshots/`` and the back-transformed diagnostics of the reduced domain is written to ``lab_frame_data/slices/`` -For instance: To plot the ``Ez`` field along the z-direction at the center of the 3D-domain of the full back-transformed diagnostics for the entire 3D domain: - -.. code-block:: python - - import read_raw_data - import matplotlib.pyplot as plt - - iteration = 0 - field = 'Ez' - snapshot = './lab_frame_data/snapshots/' + 'snapshot' + str(iteration).zfill(5) - header = './lab_frame_data/snapshots/Header' - allrd, info = read_raw_data.read_lab_snapshot(snapshot, header) # Read field data - F = allrd[field] - plt.plot(F[F.shape[0]//2,F.shape[1]//2-1,:]) - -Similarly, the back-transformed diagnostics on a reduced domain (1D line, 2D slice, 3D reduced diagnostic) can also be visualized using read_raw_data.py. For instance -- let us say that the user-input is an "x-z" slice (at the center of the domain in the "y-direction"), then, to plot ``Ez`` on this x-z slice: - -.. code-block:: python - - iteration = 0 - field = 'Ez' - snapshot = './lab_frame_data/slices/' + 'slice' + str(iteration).zfill(5) - header = './lab_frame_data/slices/Header' - allrd, info = read_raw_data.read_lab_snapshot(snapshot, header) # Read field data - F_RD = allrd[field] - plt.plot(F_RD[F_RD.shape[0]//2,0,:]) - - -Note that, in the above snippet, we compare the 0th cell of the reduced diagnostic with ``F.shape[1]//2-1``. For an x-z slice at y=y-mid of the domain, two cells are extracted at the center to ensure that the data format is HDF5 compliant. Let us consider that the domain consists of four cells in the y-dimension: [0,1,2,3], Then the 2D slice would contain the data that corresponds to [1,2]. That is the 0th cell of the reduced diagnostic corresponds to ``ny/2-1``, (where, ny is the number of cells in the y-dimension). - -If the back-transformed diagnostics are written in the HDF5 format (This can be done by compiling WarpX with USE_HDF5=TRUE), then the full domain snapshot and reduced domain diagnostics can be visualized using h5py: - -.. code-block:: python - - import matplotlib.pyplot as plt - import h5py - - f1 = h5py.File('lab_frame_data/snapshots/snapshot00000', 'r') - nx1 = f1['Ez'].shape[0] - ny1 = f1['Ez'].shape[1] - nz1 = f1['Ez'].shape[2] - plt.plot(f1['Ez'][nx1//2,ny1//2-1,:]) - - f2 = h5py.File('lab_frame_data/slices/slice00000', 'r') - nx2 = f2['Ez'].shape[0] - ny2 = f2['Ez'].shape[1] - nz2 = f2['Ez'].shape[2] - plt.figure() - plt.plot(f2['Ez'][nx2//2,0,:]) - -The back-transformed particle data on the full and reduced diagnostic can be visualized as follows - -.. code-block:: python - - species='ions' - iteration = 1 - - snapshot = './lab_frame_data/snapshots/' + 'snapshot' + str(iteration).zfill(5) - xbo = get_particle_field(snapshot, species, 'x') # Read particle data - ybo = get_particle_field(snapshot, species, 'y') - zbo = get_particle_field(snapshot, species, 'z') - - snapshot = './lab_frame_data/slices/' + 'slice' + str(iteration).zfill(5) - xbo_slice = get_particle_field(snapshot, species, 'x') # Read particle data - ybo_slice = get_particle_field(snapshot, species, 'y') - zbo_slice = get_particle_field(snapshot, species, 'z') - plt.figure() - plt.plot(xbo, ybo, 'r.', markersize=1.) - plt.plot(xbo_slice, ybo_slice, 'bx', markersize=1.) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 620e66d81a1..691cf81aef7 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2131,8 +2131,8 @@ In-situ capabilities can be used by turning on Sensei or Ascent (provided they a .. _running-cpp-parameters-diagnostics-btd: -BackTransformed Diagnostics (with support for Plotfile/openPMD output) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +BackTransformed Diagnostics +^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``BackTransformed`` diag type are used when running a simulation in a boosted frame, to reconstruct output data to the lab frame. This option can be set using ``.diag_type = BackTransformed``. Note that this diagnostic is not currently supported for RZ. Additional options for this diagnostic include: diff --git a/Tools/PostProcessing/Visualization.ipynb b/Tools/PostProcessing/Visualization.ipynb index dbeb9a734fc..ef05b69c2c0 100644 --- a/Tools/PostProcessing/Visualization.ipynb +++ b/Tools/PostProcessing/Visualization.ipynb @@ -103,81 +103,12 @@ "plt.imshow(Bx[:,Bx.shape[1]//2,:], extent=extent, aspect='auto')\n", "plt.scatter(z,x,s=.1,c='k')" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Read data back-transformed to the lab frame when the simulation runs in the boosted frame (example: 2D run)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# read_raw_data.py is located in warpx/Tools.\n", - "import os, glob\n", - "import read_raw_data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "iteration = 1\n", - "\n", - "snapshot = './lab_frame_data/' + 'snapshot' + str(iteration).zfill(5)\n", - "header = './lab_frame_data/Header'\n", - "allrd, info = read_raw_data.read_lab_snapshot(snapshot, header) # Read field data\n", - "F = allrd[field]\n", - "print( \"Available info: \", list(info.keys()) )\n", - "print(\"Available fields: \", info['field_names'])\n", - "nx = info['nx']\n", - "nz = info['nz']\n", - "x = info['x']\n", - "z = info['z']\n", - "xbo = read_raw_data.get_particle_field(snapshot, species, 'x') # Read particle data\n", - "ybo = read_raw_data.get_particle_field(snapshot, species, 'y')\n", - "zbo = read_raw_data.get_particle_field(snapshot, species, 'z')\n", - "uzbo = read_raw_data.get_particle_field(snapshot, species, 'uz')\n", - "\n", - "plt.figure(figsize=(6, 3))\n", - "extent = np.array([info['zmin'], info['zmax'], info['xmin'], info['xmax']])\n", - "plt.imshow(F[:,F.shape[1]//2,:], aspect='auto', extent=extent, cmap='seismic')\n", - "plt.colorbar()\n", - "plt.plot(zbo, xbo, 'g.', markersize=1.)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Read back-transformed data with hdf5 format (example: 3D run)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import h5py\n", - "import matplotlib.pyplot as plt\n", - "f = h5py.File('HDF5_lab_frame_data/snapshot00003', 'r')\n", - "print( list(f.keys()) )\n", - "# plt.figure()\n", - "plt.imshow(f['Ey'][:,,:])" - ] } ], "metadata": { "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -191,7 +122,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.1" + "version": "3.9.10" }, "widgets": { "state": { diff --git a/Tools/PostProcessing/boosted_frame_hdf5.ipynb b/Tools/PostProcessing/boosted_frame_hdf5.ipynb deleted file mode 100644 index 918a89668ac..00000000000 --- a/Tools/PostProcessing/boosted_frame_hdf5.ipynb +++ /dev/null @@ -1,236 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Populating the interactive namespace from numpy and matplotlib\n" - ] - } - ], - "source": [ - "%pylab inline\n", - "import h5py" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "f = h5py.File(\"/home/atmyers/AMReX-Codes/WarpX/Examples/Physics_applications/plasma_acceleration/lab_frame_data/snapshot00001\")" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "data": { - "text/plain": [ - "[u'Bx',\n", - " u'By',\n", - " u'Bz',\n", - " u'Ex',\n", - " u'Ey',\n", - " u'Ez',\n", - " u'beam',\n", - " u'driver',\n", - " u'jx',\n", - " u'jy',\n", - " u'jz',\n", - " u'plasma_e',\n", - " u'plasma_p',\n", - " u'rho']" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "f.keys()" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAD8CAYAAABn919SAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJztnW2sJUd55/9P9Tl37szYxp4AsxaghZUQEYo2gEZoIxDK\nxiELJIqt/YCIlN3ZlVfzZTci2pXCsJFW4pt3P0TZD6tIFpAdKSRZRMLaQtlExiGKIkUQXkxiMKwJ\nsYVZj8fY2J6Xe+853fXsh6rqrq7uPt33nnPPPaf9/0l3uru6urvqvNR0/85TVaKqIIQQsv2Yky4A\nIYSQ1cAGnRBCRgIbdEIIGQls0AkhZCSwQSeEkJHABp0QQkYCG3RCCBkJbNAJIWQksEEnhJCRMFnn\nxbLbz+rktXet85KEELL1zJ764Y9U9XV9+dbaoE9eexfu/sSvrfCMxzBsgaz+lISQBSiq791xjkSy\nxd/tp//1x58eko/KhRBCRgIbdEIIGQls0AkhJ89JDPo6woFm2aATQshIYINOCCEjgQ06IYSMhC1v\n0I8hDmmEXo0QEjHi7/iWN+iEEEICgxp0EblTRD4nIt8RkSdE5GdE5JyIPCIiT/olu4ASQsgJMvQO\n/b8D+FNV/UkAPw3gCQCXATyqqm8F8KjfHgcjfiQj5FVL+r3e4p6jXfQ26CLyGgDvA/ApAFDVmaq+\nBOBeAFd8tisA7juuQhJCCOlnyB36WwA8D+B3ReQbIvJJETkL4LyqPuvzXAVw/rgKSQghpJ8hDfoE\nwLsA/I6qvhPATSR6RVUVHaJCRC6JyFdF5KvF9ZvLlrftCsdwTkLI2uBXeGUMadCfAfCMqn7Zb38O\nroF/TkTuBgC/vNZ2sKo+qKoXVPVCdvvZVZSZEEJIC70NuqpeBfADEXmbT7oHwLcBPAzgok+7COCh\nYykhIYSQQQwdD/3XAHxGRHYAfB/Av4X7z+CzInI/gKcBfPh4ikgIIWQIgxp0VX0MwIWWXfestjiE\nEEKOCnuKEkLISGCDTgghI+EEGvTj6IbJQboIGQUMYVwK3qETQshIYINOCCEjgQ06IYSMhKFx6KvB\nKLIdC7WAqkCtRK6a8owQQpaBd+iEEDIS2KATQshIWKtyyYzizJkD5IWB9cqlKAzUVvpF9agaRrDS\nWEMaIELWi4LfuyXhHTohhIwENuiEEDIS1qpcjChu2z1AYQ1UgUINCivIiwzWiku3AluYeiQMwGgY\nQgjpgXfohBAyEtigE0LISGCDTgghI2GtDl1EMc0KTLMCVp0LVxVYldKrz63z6XlhKqduBWr7vDqH\nRySE9DDyZoJ36IQQMhLYoBNCyEhY7+BcEUYUBlpGIdrMrZzSvKZhCr+MNYwtjNcwoYepJj1Ml4SR\nkYSsF1rTlcA7dEIIGQls0AkhZCSwQSeEkJFwYg49xXiBlhkLwI+6mLl9FpFT9149LwzyIkPhfXrr\nqI3A0bwcR30j5OSgTz8yvEMnhJCRMOgOXUSeAnAdQAEgV9ULInIOwP8C8GYATwH4sKr++HiKSQgh\npI/DKJd/rqo/irYvA3hUVR8Qkct++2OHubiFlKoloCoQUYhU6RkUmQBTU5R5LCoFM7emVcFAxfUu\ntXJ4BbMoH3UMIatHkvUu9bmMjhn5d3cZ5XIvgCt+/QqA+5YvDiGEkKMytEFXAF8Uka+JyCWfdl5V\nn/XrVwGcX3npCCGEDGaocnmvqv5QRF4P4BER+U68U1VVYkcS4f8DuAQAp15/Rzkol/HZbfQMFPRL\n0CMdp0RmLDIAmVggA3Y7FIyqYJ5nvndpmDijbe7SQ9J33KLHxVVfa9sZ+SPwSdP1HVqWUmEeJ12X\n4Gemk0F36Kr6Q7+8BuDzAN4N4DkRuRsA/PJax7EPquoFVb0wec3p1ZSaEEJIg94GXUTOisjtYR3A\nLwB4HMDDAC76bBcBPHRchSSEENLPEOVyHsDnRSTk/31V/VMR+RsAnxWR+wE8DeDDx1dMQgghffQ2\n6Kr6fQA/3ZL+AoB7jnph2+LgLKR06+4i3rcnItkmx4moC23M6k499C7NC4O5dSGN8zyrepYWUoU0\nKrBYzh3CRWqyPFE2RTh2vBjH8RptSpVXyHG58KOyKeUJYc7x8tUMe4oSQshIYINOCCEjYc1ziqKm\nVFq1S4eKAaJjtdnDNH4ELBWM712qWaVg8iJDYQWzIkNeZMgLP2FGYeq9ShsKJl7fhMfNIz5anljR\npXV1OdakcU74Kf5E9MaWmAvxk+SIf9P7XquxqxneoRNCyEhgg04IISOBDTohhIyEtTp0A8Xpybzm\nydvWU79lW4ReOmRAfEzq0cJQAQBwKsth1XnyXN0IjWGogDBMgHaGNQInJxd7rruKUSTXRVqGo7yk\nctQDuwqxmqwANtc/r7hcmxC6qPBOfFHlomKGMsdlH5NP5x06IYSMBDbohBAyEtYctqjY8ZNUdFFT\nMGjqmHSZrvdhRMvwxykK2CwHANej1CsYqy6sMfQsLTrDGhdVtmf/4KfVYY+Svecd8BrpMT9BS1cR\nGgpmQEEOO5plI+9hH7OXVDRHeKo/ktJY13WWuN4qKRVKx/ujKr0Rx2PSL7xDJ4SQkcAGnRBCRsJa\nlUsgKI90We7veHwKCqZLv1g/KFecVh6bbKfXnhiLUwAw9efxA3vNbYZZnmFuM8xz17O0yE1zsgyg\nepwb8tQW8ix82m05UZq/sV0/plOjLLrucTx2ii5WOrXH4vr1e1XNqqMtWq+35MBth/ls4BAaZMD5\nVnmu6pzD8zYPXvL9avt8SvtnPVUxjWiY5JjwWm2reuEdOiGEjAQ26IQQMhLYoBNCyEg4EYduVRre\nHKjceeW3bW07zVeer+HWTX3bj7S4KPSxRkjO/LE7gtxPQH1QTJAXBrMiw2w+cRNT5wZQqYc1DmGo\nP4/zLXDmOjAf0KExh6jNvrp1+tH+0Ra7Tq1t5+xw7gt9e5+77ftdo7PqhxiJsyfcstd3H/XYhcct\nvqTL1P/hOGr4Y9/1VV2e8vPdch03imLYiL4TtUwtjrzjPd9Wl847dEIIGQls0AkhZCSsVbmkIUNd\n6gVo6pYuHZMl+9sISqZQgVXTomJMf8hj5pZndQYLp2AOiglm+cT1KrXiwxozFLkLaWyENdZfjA56\nVEubYqnlc/ul7djk+N4QyIXl7EIWbnamJx366hVId3bombbQyBY10/2I31HZRSqmswdqz7nS5D6t\ndOS87eldCmWRNulTI4OUy1F6sqaHtlwmvO8aqbVSw0R6RRXd6uXkxxpbGt6hE0LISGCDTgghI4EN\nOiGEjIQTCVsMxP48deTxeryvzZ2nvj1DfA7bOF99pEaDInbpLb49du3heJc2cyMzWjdC40E+wUEx\nwUE+wTx3QwZ0DhPQSovfTtx5w5urVDo09eytjr2ep9O1HzdxVRs+PYrti/fFkY8Dj2k7d6dn73Ts\nC5z1UYZ7CJupc+7b35on3d88pu086XGd/vswHr87+7BrDST+DlUvv5vkRqLEyqtHTt3/HhP2haEB\naqMyRsXbtvBF3qETQshIGNygi0gmIt8QkS/47XMi8oiIPOmXdx1fMQkhhPRxGOXyUQBPALjDb18G\n8KiqPiAil/32xxadQGrqJFpvUSppulvaRpiiEdtQLbW0Mn+qZSoV00aBoFuM1y+CuWalepmraSqZ\nHTdJxqzIsF9MsJdPcTB3CmY2n6DwIzXGIY3hlSlp0Sc1zRKrklifaHJsqlTix8guNdOVD+37D01H\n6GFTG4Qd2sibnqtKlyp/FOcm6XUbMXDJuYOSqXVsbZanu8dn/AImRZb2fd3p9XJ1nUu6ztV3rfZi\ndiiafm0T0xWOvOh8i2jMM9wSvqt+rmBIeHu8htEQwqhQ1Uq9oKls/EZrz9Ft0C6D7tBF5I0AfhHA\nJ6PkewFc8etXANy32qIRQgg5DEOVy28D+A2gdlt7XlWf9etXAZxfZcEIIYQcjl7lIiK/BOCaqn5N\nRH62LY+qqnQ8Q4nIJQCXAOD0+dswMbYWvRLrlK50wKmTWLO4pXZqFoN6dEuWKhex5fmy8nzVMXGk\nTFH2IDUofCRMoaZUMOkyVzchxsxmmNkJbuVT7M2n2M+n2JtPcDCbJvoFlTYBSmWSqpZOzdK2H0HL\n1HVM/Rzt+mWxpqkvlwlY0Fh/pArFuxIN+0K0gbTkL49x2qWuUOJzaqRzqv3lR1fi3syJvkmJr5vk\njc9X266lRWWK8nTqk5Zz1apZUzDtOgaoq5AhaqVNqyzSKX0qZVGP7i5s8iYE9RG0S6pcbPTdCdux\nfrEWXq3V1cuQiJdN1y5DHPp7APyyiHwIwC6AO0Tk9wA8JyJ3q+qzInI3gGttB6vqgwAeBIC7fvL1\ny8UrEUII6aRXuajqx1X1jar6ZgAfAfDnqvqrAB4GcNFnuwjgoWMrJSGEkF6WiUN/AMD7ReRJAD/v\ntwkhhJwQh+opqqp/AeAv/PoLAO45zPECYOJdd1d4Yhqa2NYLtJEWOfMuXx7Ol/ry+Hyxj3fb7RNs\nBILDm+vEL7Pqz7rlgZ2Uf/vFFLfyHdyY7+Dm7BT25hPkRYb5PEM+z2BzA9jIC6ZufIE3T325RPlE\nUZ5XFBCLRr5yaTuOb8uPqCx9eOccO3I1UZrU15tp0khTUzleNer3SeW3g3uPHLuGtFrYYSVMBS0h\nkuGFi+pRVj52263r0bF+n3QdE06fePVwHpOkt3n3NA9Q99Yhb59LTz/zQ/J0cVhvnjpzIA1TrBx6\nfR3uNy6/Hpx6UX6nxI/wCrhe26iFMYZSLgph3HSPzp6ihBAyEtigE0LISFjr4Fwiip0sL8MSu0IS\nu/RK3Osz7vHZF4rYVCnaqlXCeqxo6tv1gcHS3qaF//+xUMFcJ7AQ7Nsp5jrBvk5wYKe4Vexgr9jB\njWIHr8xPY1ZkeHm2ixsHp7B3MMVsNkExy9yTXhHpltCzNKgTG2kWG6kSK26fhUvXZL3wxxZVugn7\nkz93Pi2vB6Ae9li+sdVqHEZY6hPjVYepVEmpW3xa2FdTMabSKvHSvxnOnJioED5ksZbPP0yrf3R2\nC6k0SPWc3UQSZdKiW+IQxTbVkmqRkKdLsXSplViVSPk90IZCEdHG8ekgd430WM+0paG5v2u7L72P\n1sllkvR4juDwp77HdmFNuV1Yl5YXflA9a1yaNdXnQV0YozNwUvYSboQwbol24R06IYSMBDbohBAy\nEtigE0LISFivQ4dix+StzvwwvrwrDDF25fX15kiLreeAbfjx2LWnIzdmiYcP54uxEBQwmOsEM80w\n1wlu2R3s2ymu210c2Clezk/jxdlZvHhwBj8+OI1X9naxt7+D+f4EOjeQufGOW0pPjuDBC4EpEjee\nAyZ36yZsF3C+PFcYvy2FWxer3ptrFOaYOGb3BvpKSsOPO+8tiR8P+7RM16zKo8adX4y/TOzfwz5b\nrZflkQXLRcRhk3GdovDGMqyx5di2cMWwr9Hd/hD+PM4XHHl6bEivPHji08P+Fl/eGN20Fhbc9Ol9\nE7N37Y/zANVvTeW+Q4QvxqGLYbIZoJpcBgByP7FMmLQ9t260U6tuu1CDwhoUxrnzwiiMFVirNZdu\nBFDrQhjdB9YbdI18eRXZutHwDp0QQkYCG3RCCBkJaw5bBKZiW0MT2zTLopDEoFXSUMSuMMRFaiU+\nLi5DqlTic5RKJ8pXS18Q2mhVMNMM+zrFHBlu2lN4uTiDF/PbcG1+B57bvx1X927H8zdvw42bp5Df\n3AH2jNMrMwBWYOZOq5gZkM0AMw9LjZYKkytkrjCFOrWSK6SwEL9dDktnUf33LgIte1z6XnZZCD10\n+yp1IkCiUcowxUz9OSM/IWVcWNnLtDHHZ5S1NzpMkmVjXVs0S6JMOs5bhiz29A6thy6ioVvS0May\nyKVecdttuqUWjohm3jRc0eVp1yjNXtktyiUJKQ7HpJPM9I18Gp+zbfTTQNaS7npzVushHLgMYfST\nzuQ2QwG3tJByhNPcZsjVKZiZzTArslK/zIsMuW2qF2vgQ4K1CmEMb3dLmOKmhi7yDp0QQkYCG3RC\nCBkJa1UuQHtP0KBbhkS0xKoljWQJmqWhZzoUS6pXMtSPS7VKrFNKRRM9job9VT7Ut5PXwsJNnjFX\nwb5muGl38JI9g+dP34Fnzp7DP5z9CTx1/Rz+3+Q12Le7kD2DyQ1BdgBMbgHTW4rJnvvL9i0m+wXM\nzEJmBWReQIoCKBRibRS1kjgOkfBsDzXGFdIYt55JqV8kE6dSMpdX1GsXdfWQNOREgHpogNMs1aBj\n0V+UpSTZHqRf4kvWNIk/ZaxQ4jxphMuqdUt0TJtuibVJX6/QvsiW2naqVdDULG1KpU2/pAPmLerN\nnQ6Ul2rTtgHy4u04LaY20QzC3L5ZOdlMrhkOignmanBgJ8hthv1iilmWlfrlwEycdikM5gKICAoB\nxAqsuLl+az1Hw9seR7tscKQL79AJIWQksEEnhJCRwAadEEJGwpodehyu1AxXrPZV/rwrVDEdMTH1\n52mIYurOF3nz2JnXepf67QyhZ6uvRyh36cwFBkDmZajxOTKE7WREOSgKWMz1Fm7pTVyf/BhvmL6I\n89OXccdkH6qCp/emkGKKnVeAneuKM88X2PnxDJNXDiB7M+DgAJjnQFEAhYVqFC4pphL6pTM31dII\nYDJIZtx2pkBmAWugEwMRL5gz3yV0UlfcYbdbcbJbwtttUXpHVZRzSxzWQ7b18CzntZaOPHHa0hFm\nVYEbkz0Efx4nNXqP1tebkzHX07r8ebq/zZ+3OfLadqtXb4YppqOhdv3+VeWpvofhXPH3sRli3PwN\nzO2v98ROKXyvUdcDOytd+lwz7JtpbWKZU1mOm/kOcpthYm1UvwxSALkYiChyZID7yMMAiz36BsM7\ndEIIGQls0AkhZCScQNhi+7N2+rgW0/XoBSynW9JrtOmWPtWSSV2xGIjfFr9tSsWSiSn1S4qFxVwL\nnNECu5LDyCsoYPBycQbPnL4Lz0zvhFgXrrjziuLU8weYXHsZ+tLLsDf3YGczN8JQQAzECCTLoFkG\nyTLnAbLMhR2KATLjtq00PwmSOT9iAZjQm1ScBrBRCKIFkKFUOhJ6f4bzxKGJqE+AIdo0L0d5oh0c\n0rhhpJqmK62+v/n9iXuHdhFrlvi4Nt0S06dbunp39+lSd71muHEoV/z9NEmbYCVMJGOwIzlmOsFU\nc+zrFEYsDuy0/K5nUBSZIPfnsyqwmZSTZKgCVjIY8ZOjqJSTpAAdc4tucOgi79AJIWQksEEnhJCR\nwAadEEJGwmgb9NS7tdHm61eNhYWNhJvtuabtkHPlxMcCaIbaSIiDCd3+7fICsMtZb5TL7vHKhIyN\n0TbohBDyaqO3QReRXRH5ioh8U0S+JSKf8OnnROQREXnSL+86/uISQgjpYkjY4gGAn1PVGyIyBfBX\nIvJ/APxLAI+q6gMichnAZQAf6ztZPHh92z6IaU4KoWZh6GI4znXt8pNQlgP92WhfczuDdXMUqnHH\nojpP4ecrLESRqVMnbl3LEhpEo8WplmGMbjvuLSqAVvtq9YPCQjFXxU0FrtsJrhZ34KnZ6/CD/XN4\nYe8MirmBGMX8jCvr5PW72JkaTG47DXPrACb0FM1zwKrrKWq1ClEEXGhhNcGlC1kUceGLJvNLH87o\n5wgtEXEvTS0NrbcEcWhiOuqhhMEe05EV4/2r0jYuDm1FJ1s9qk1r1pZW3y+NEEULgdH2kMYyj39R\n4++e9Z9JwMCI9d+j+tyfRTnCoAGi3pzWb5eIib5b7ntUwIcc+56d4btp4UdWVAMLA+O/g2EiiwwW\nc82i73xWq0voKTrXzE8YE/UUtVMc2EnZW3RmJ9gvpsj95Bczm2FeZG6uUT/vqLOQAo3mEnV2Mnoj\n4pd2cz9S/Xfo6rjhN6f+TwHcC+CKT78C4L5jKSEhhJBBDHLoIpKJyGMArgF4RFW/DOC8qj7rs1wF\ncP6YykgIIWQAg3qKqmoB4B0icieAz4vITyX7VTqe90TkEoBLAHDbPzpbphdBc4THNvGPXqU+8foj\nPHYlj21OibihtdzjW6xSLKxm7tFRnHkoIMj8w13huzdmWu85OtesGqxLo96jWg0WlGk10FEG6zVK\nlKZVb7qQ5rabysgN0u+WN3WKW2GCi/x2PDM7h6f3zuGp6+dw9eU7UOxPMDHA/A6g2AWK3QyT157G\ndG8Xkz2LbN//HRSQ3E9wMXcTXGDgBBeQMIGFqa2ruIG5NEyEkVVLrWmccD5nO9S/xWE9HlArXT8K\nqZ4ZrGtS5TPsagjP2m3aI1Ul7iWu8sXHhEf5+BxdaSLqlYpflsrD9U52+kJL7dIqJkv9WCWV6rOs\nVri3s/47IzClHg1pmSuH7yVaaIZMtdQ1RgQZtFQ5RrS8TlAoZS9QdRrF1L5T3RNcFNEb1jfBhVMu\nppzgYr+YuHl8rZ8AI0xwYTNYKyisgbW+56gVWOv0C3xP0k1WLCmHinJR1ZcAfAnABwA8JyJ3A4Bf\nXus45kFVvaCqF3bv2l22vIQQQjoYEuXyOn9nDhE5DeD9AL4D4GEAF322iwAeOq5CEkII6WeIcrkb\nwBURyeD+A/isqn5BRP4awGdF5H4ATwP48DGWkxBCSA+9Dbqq/i2Ad7akvwDgnsNe0Krx0XS2Fg5V\nOjjR0ocXWrm+AiE8y8B6B2fEhRIaCKwPN3ROLhoZLvLfLqzQee9y4mitTxzt8oYwxGoEuODLKwcY\nT/5cH5C/5s/9+UNIllXxE0NPsK8TzHSCW/YUXirO4MXiLF6Y347n9m/H1b3b8cKts3jlxi7yW1PI\ngYFmQH5WgdOC/AxgcsDMBGaeIZtnMDMgmynMHMjmCjNTmFxh5m4pViHl0tVBrPeW/icLAKUMDs4c\n/m0KvlxLf+7TovVy6Xu01tJDCGNbSGPqtPscdxmGmuRN0kuvXuriKJSxdKPlnvJfd3g0rJ74f1T9\nwk19oGEKhHKXC31zkyEA0RQJ5bUFcUicd86IH5cVhUqIFAXgBrgMHh3+OxHestI8CxoePXx/Yl+e\nhg5bSDUJRpnflulGBTaaRDoLjhzqv4sm+qzXR28EUBuRMU4HhvXWjr87rrwGhQpym6GAWwZnnteW\nbmLoWeE8e2GN8+fWoLACGy2DPw/himolvBVuofXtTYU9RQkhZCSwQSeEkJGw1gkuVIG5VvNrhhCo\n8MiWhW2YcvD8OASqDCuUKvSpNqGFD4EyqRLxGsatK+I5C2uD8EePf5nEcyMmOkbbNIstH8dM9Bhd\nwPVGszDYt1PMNMOBTnHTnsKNYhf7dopX8l28ND+DFw/O4McHp3F9/xRu7Z1CfpDBzjJI7l4DzRTF\nLiCqsDsCsYAUAing/ixgcr+dC0we0twfLGAKLbelUEjh0sS6N0isNxLxAF6p1vBKxUWeShme6HQM\nqkHEon3VX5UnDmds/bwcRsO0hSJGyqOcp0ArrdJ3vkNrF1THpNrF/auJ8qnHE1o/UUpciFjHBO0S\n57X+PEHBuOI6VVJEoZKxeinDCbU+/2i5VMCIqc1F6pYu3DeejzRQz+NI5wpOj+nCRm9METSLmki5\nuBDDkJaHpTW19aBZCq9W3HZdtSjgVUulW5DolbZ5RDd1blHeoRNCyEhgg04IISOBDTohhIyE9Tp0\nCGZ2AgNFDpThUZWXroc8dYVAmRYfXnp1oOHVa+FUqVtPQqmyxI2nk06HMnThRmg0ZXfkuWbY1ynm\nmuFWcQoHdoIbxSnczE/hRn4KsyLDy7Nd3Jzt4Nb+DmazCezcQAsDLQRSeFdnAPWvU+gSrvAjI5Te\nGxDr3bqFD0v0y8LlEescu/PpUh4bHHx6rKi7Zpj8uVb1xFunXflTz47gzWueHbV9aNnfGt7YtWwp\nV2d67W1MQhfT91ijbJFHD/u01s2/6dElde7+/av/UFAPSyzPJVW6ip8QWhENL+A/G/56Icw33gYq\nfy2l764vO9NaXHnX9tB9bdgWL11688ifh+34T8PIqCEEUaXmzTVKK925ut+mGl39gfJ7VrLh4YoB\n3qETQshIYINOCCEjYc1hi4JZMamFOKXhUUGxuFEQ66FQQcNUxy7WMOX2EVRM6OEZztGG6/VpytHf\n5ppVg+1bF544twYH1o0At19McSvfwY35Dm7Nd3BrNkVeZJjPM+TzDDb33sG6UCqo2xTAdQH0j4hQ\nH4YXJnAoB5kUaKJIqnWpqxMb7UO1HdKqbWk5lz8mWl/4SBppkdqoiwvWa2n+tiNNi0Me1Wjr+SBa\nU0EqiCod/bURhy56zVHVyWkXTUMSvSIpdUrZ61bLw0QqRaM+UXyX1lAUG9RKdDmg+ixKY1nfH++L\nlUk8ImRb3rZ9XXm68nVx2JDFWnqkP4IKKVWL31Z1k1/YKAQxaJZwXDgG5X6gbWTFRbplU0MWAd6h\nE0LIaGCDTgghI2GtysUCyDUMouUG/QEi1aJVD7W8lm6akTChd2mZ11YDBMWDAWkzGgboiGIpJ9No\nKbt/7g/RK0G35Jphbivtkg4UFAbY35tPsZ9PsTefYDafIM8zFLlxvdSsVDrFI2Ez9DZUKSNOXBn9\nI3+pQ+paQFGpFvWaBmUaykwSny+qu0R52rZrZT1CBEAaEVOLjPHbtTwh6iPVM6i2Q2FqT8S1fNoe\nHRMpmM6n6eT1dfnDm6TuMbzl3IqgKoJ6kfLQskxlHtT21RRHVK5Ss6CZ1jguvhaaiqSZt021NJIW\nqpZFc5sCw9RLoE3BxMrFbVfplVap0qp8VVRLtR2dY4Bu2XR4h04IISOBDTohhIwENuiEEDIS1urQ\nASC3pproFlJ6b6thFDdbpoeJagEXxmXE+sme3WhyVuFHZwQMpJxstpzEFm4i2nLiWlT+rq1XGlAf\nTD8MpB9GdguO3EJ8uvH5xI/w5rz5rMgwsxn28ikO5hMc5M6bF4WBLQxsIWVYokOqRalb1blLlYZL\nd5stPh0owwvDcVLKxNqi8uuhCEk+xPlSjuIVk5c79dzt69rMm+TTlvwLe5GmefrO0UXtNZDqvQs+\nPS5jCG2MXszqmiF8sX6My59cs8OTp/kaDrtnf+vLO8Ctt5bRMzScsc+3A+1hgvUwxipfyFt+NaIQ\nxZC34c5rvpAsAAANl0lEQVT9AUPc+SaHLAK8QyeEkNHABp0QQkbC2gfnAqrHpaBV4kGOolkU/aD9\nQa9IbV91THtamJ/BiE2OrShDEeMB8zt0SsibDrIfD6p/kE9wUDjFMs8zzHxooi1M1fMzCQ+sb0SP\n7lLtC+rFjQnlNUSkW1KtUk6iGaxMfK1aaJY2lUraK67xqq2IPuXRkq9zPT2u69xt52851yLV0Xn9\nxg5teS3rj/TVYF6Jcuu8bvVdqTROan9qHx6fp3meNlVSz9dUQIvPWT+y6Nw7TLO00TrRRLpfm/lj\nJdOmWhrnbinepquWAO/QCSFkJLBBJ4SQkcAGnRBCRsLawxZjwsS1XWlhPfbozruX0+OWjs9qVk6g\nm4dzRQKwSDx4OtFsyB+HQ6WhjeWA+hDMCjei4iyfYFZkKKxgnmfIi7RL/2H8m/efUt+sNgCJ3Ka6\nuMTKBwKlP6+9rI39PjHVvF1qcxmR3lX1hqdOtxeH3rWHMg5z3V2hdp1jGCx6+w6jVhO9HX8uwqQV\n9ewtJ9e6g6699+G3lkYZW9xza12b4ZVdp2krW/mbQOvRi8tzKBY47vj1qH6vSkIc2/L0nHdb4B06\nIYSMhN4GXUTeJCJfEpFvi8i3ROSjPv2ciDwiIk/65V3HX1xCCCFdDFEuOYD/pKpfF5HbAXxNRB4B\n8G8APKqqD4jIZQCXAXxsVQWzaqKQQ0RqJTxGtYUhdocXVvvroZO95YjCEwtrcFA4xTLP3V9hDQo/\nMYX6iSkGPaalIWolSQhjSJJ0P2rhjEClYMr12mlrsaHL9QDtqt/QcLSOwwdrlAXnWPg031e+vrdt\n0Eem5xrNqEKXHHoAx2XssCJdny/puLa2XTM5R02ntGobbX6mWkpwlJDEPgPTf91UnTR7kbqNjvzJ\nvm2m9w5dVZ9V1a/79esAngDwBgD3Arjis10BcN9xFZIQQkg/h3LoIvJmAO8E8GUA51X1Wb/rKoDz\nKy0ZIYSQQzE4ykVEbgPwRwB+XVVfEYkfa1Sl41lLRC4BuAQAp8/fBqAauKctmiVsuxO3a5WuZeN4\nLFYrbQMIhfy5NSjUaZZZnmFWuAiWvDD9vT+H/jDeqV3CyRb31qtfNFIwyTGxiqmldZ6yrwJHeD5d\nQnf0BkUsq1KWzX+Y16NXLyyONGlcqk+VhHP1maAOFVRl6FY09fMM0Y2H+CwOKMvC86SRQwNVy7ZF\ntwQG3aGLyBSuMf+Mqv6xT35ORO72++8GcK3tWFV9UFUvqOqFnTtPr6LMhBBCWhgS5SIAPgXgCVX9\nrWjXwwAu+vWLAB5affEIIYQMZYhyeQ+AfwXg70TkMZ/2nwE8AOCzInI/gKcBfPh4ikgIIWQIvQ26\nqv4Vuu3aPUe5aFfoYM19L/DhQx15TOzLY3cf/lQFuRrX09MK5taHJRamnJiiFpa40EGj38MO8oYt\nJ5LFu9tOXHPrbecJR5a/ARxPDNfgDoJLhj8unbeVJV+TQ16/16WXGfuvM8QHD/Hs9XMuvubig4/B\nTw914b2/JWynOw+wpyghhIwENuiEEDIS1jo4l1XBfjGpbafr2qJdhmBq4Xv156pYtwTFEkISC2sw\nt8YPrOWUCxQoCgP1c3/WFcvCWMOKlVmLBbGQjd5/7dlcXq3ydGU5qafNo1z3pBXKIlb4Oqoesvfl\noqwLynVU1XBYVbNqBpV75JolhnfohBAyEtigE0LISGCDTgghI2Htk0QfeIc+NNwwJu2qbxI51uUa\nC+snf46cefDluQ9JdH9RV34FVipDl6Z7QoKuzY1lZeVck7zdlte1j8O+XEP09En55/B7Ubwccpj/\nTWJM3jyGd+iEEDIS2KATQshIWK9yUac/Al2KpG0UxKF6JTxK5WqgGjRLveentdIRlrhwuLmFdTsZ\nhpRpU7TMGl+/kT1NHzp0cWUXXv8lD4XXowI9lEoZq24BeIdOCCGjgQ06IYSMhLUqF6CuSpZRKzHq\nJ7dIo1isXxbWRbA0JqVonHqFz5ireqpbukgb/tw83qfflbKMJjgOXbNJ2kJbepm/WuEdOiGEjAQ2\n6IQQMhLYoBNCyEhYq0MXWd6bp2GJ8yJDoVL2/IzDEqFSn5QCWI1SXqeqoxYkS7K1brlr9NBlvsNb\n+lIMhXfohBAyEtigE0LISFh72GJKqluA9kkuCmvcxBTWIC8H13IhiRp6flqphyUCR388G/mjGSEb\nR6xYNFmu8hpA/fs9ZA7gLYF36IQQMhLYoBNCyEhgg04IISPhxB16OhG0VamNkliod+femVuVckIK\ntc6Vq11hSCIwGp9GyFay4aNVbDK8QyeEkJHQ26CLyKdF5JqIPB6lnRORR0TkSb+863iLSQghpI8h\nd+j/E8AHkrTLAB5V1bcCeNRvD8KqlH9hdMRZPsH+fIqbsx3cODiFV/Z28fKtXbyyt4vrN3dx4+Yp\n3Lp1Cgd7U8xuTTHfn6CYGdjcTVKB1pETl4CPfISsl5P8zo1IsfY26Kr6lwBeTJLvBXDFr18BcN+K\ny0UIIeSQHNWhn1fVZ/36VQDnV1QeQgghR2TpKBdVVVkwmpaIXAJwCQB2Xn8H8iJzusUrF2ulnIzC\nWoG1ArVmwaBaXc9HAroSQshCRt5MHPUO/TkRuRsA/PJaV0ZVfVBVL6jqhelrzhzxcoQQQvo4aoP+\nMICLfv0igIdWUxxCCCFHZUjY4h8A+GsAbxORZ0TkfgAPAHi/iDwJ4Of9NiGEkBOk16Gr6q907Lrn\nsBcrrOD6/ik3AYUfJdH6ERLbJ28eUTwRIYQcM+wpSgghI4ENOiGEjIS1Ds5lrWDv1o5TKyEkcWV6\nZcWxSCMa9J6QrYDft6XhHTohhIwENuiEEDIS2KATQshIWO8EFyqwc4PVy7IR9+UlhJCB8A6dEEJG\nAht0QggZCSfQoG9JbNKWFJOQUUF7uhS8QyeEkJHABp0QQkbCCBp0PqMRQggwigadEEIIwAadEEJG\nAxt0QggZCWzQ22DIIiHr46R+Bhvhz29s0AkhZCSwQSeEkJGw5Q36MTwzUbcQQraULW/QCSGEBNig\nE0LISGCDTgghI4ENOiHk1cGr4PcxNuiEEDISlmrQReQDIvJdEfmeiFxeVaEIIYQcniM36CKSAfgf\nAD4I4O0AfkVE3r6qgvXDkEVCyBGQZDkilrlDfzeA76nq91V1BuAPAdy7mmIRQgg5LMs06G8A8INo\n+xmfRggh5ASYHPcFROQSgEt+8+Dpi5cfP+5rrpHXAvjRSRdihbA+m82Y6jOmugDHX59/PCTTMg36\nDwG8Kdp+o0+roaoPAngQAETkq6p6YYlrbhSsz2bD+mwuY6oLsDn1WUa5/A2At4rIW0RkB8BHADy8\nmmIRQgg5LEe+Q1fVXET+A4A/A5AB+LSqfmtlJSOEEHIolnLoqvonAP7kEIc8uMz1NhDWZ7NhfTaX\nMdUF2JD6iOoIp+0ghJBXIez6TwghI2EtDfo2DhEgIp8WkWsi8niUdk5EHhGRJ/3yrmjfx339visi\n/+JkSt2NiLxJRL4kIt8WkW+JyEd9+lbWSUR2ReQrIvJNX59P+PStrA/gel+LyDdE5At+e2vrAgAi\n8pSI/J2IPCYiX/VpW1snEblTRD4nIt8RkSdE5Gc2rj6qeqx/cD+Y/j2AfwJgB8A3Abz9uK+7gnK/\nD8C7ADwepf03AJf9+mUA/9Wvv93X6xSAt/j6Ziddh6Q+dwN4l1+/HcD/9eXeyjrBddy+za9PAXwZ\nwD/b1vr4Mv5HAL8P4Avb/nnz5XwKwGuTtK2tE4ArAP6dX98BcOem1Wcdd+hbOUSAqv4lgBeT5Hvh\n3lT45X1R+h+q6oGq/gOA78HVe2NQ1WdV9et+/TqAJ+B69m5lndRxw29O/Z9iS+sjIm8E8IsAPhkl\nb2VdetjKOonIa+Bu8j4FAKo6U9WXsGH1WUeDPqYhAs6r6rN+/SqA8359q+ooIm8G8E64u9qtrZNX\nFI8BuAbgEVXd5vr8NoDfAGCjtG2tS0ABfFFEvuZ7jAPbW6e3AHgewO96LfZJETmLDasPfxQ9Iuqe\nq7YuREhEbgPwRwB+XVVfifdtW51UtVDVd8D1Un63iPxUsn8r6iMivwTgmqp+rSvPttQl4b3+/fkg\ngH8vIu+Ld25ZnSZwCvZ3VPWdAG7CKZaSTajPOhr0QUMEbAnPicjdAOCX13z6VtRRRKZwjflnVPWP\nffJW1wkA/KPvlwB8ANtZn/cA+GUReQpOSf6ciPwetrMuJar6Q7+8BuDzcMphW+v0DIBn/FMgAHwO\nroHfqPqso0Ef0xABDwO46NcvAngoSv+IiJwSkbcAeCuAr5xA+ToREYHzf0+o6m9Fu7ayTiLyOhG5\n06+fBvB+AN/BFtZHVT+uqm9U1TfDfT/+XFV/FVtYl4CInBWR28M6gF8A8Di2tE6qehXAD0TkbT7p\nHgDfxqbVZ02/Dn8ILqri7wH85jquuYIy/wGAZwHM4f53vh/ATwB4FMCTAL4I4FyU/zd9/b4L4IMn\nXf6W+rwX7nHwbwE85v8+tK11AvBPAXzD1+dxAP/Fp29lfaIy/iyqKJetrQtcVNs3/d+3wvd+y+v0\nDgBf9Z+5/w3grk2rD3uKEkLISOCPooQQMhLYoBNCyEhgg04IISOBDTohhIwENuiEEDIS2KATQshI\nYINOCCEjgQ06IYSMhP8PDPnsHuL8AxkAAAAASUVORK5CYII=\n", - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "plt.pcolormesh(f['Ez'])" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "data": { - "text/plain": [ - "array([ 6.83593750e-05, 6.44531250e-05, 6.05468750e-05, 5.66406250e-05,\n", - " 5.27343750e-05, 4.88281250e-05, 4.49218750e-05, 4.10156250e-05,\n", - " 3.71093750e-05, 3.32031250e-05, 2.92968750e-05, 2.53906250e-05,\n", - " 2.14843750e-05, 1.75781250e-05, 1.36718750e-05, 9.76562500e-06,\n", - " 5.85937500e-06, 1.95312500e-06, -1.95312500e-06, -5.85937500e-06,\n", - " -9.76562500e-06, -1.36718750e-05, -1.75781250e-05, -2.14843750e-05,\n", - " -2.53906250e-05, -2.92968750e-05, -3.32031250e-05, -3.71093750e-05,\n", - " -4.10156250e-05, -4.49218750e-05, -4.88281250e-05, -5.27343750e-05,\n", - " -5.66406250e-05, -6.05468750e-05, -6.44531250e-05, -6.83593750e-05,\n", - " 6.83876027e-05, 6.44813139e-05, 6.05765108e-05, 5.66733414e-05,\n", - " 5.27719786e-05, 4.88726974e-05, 4.49758894e-05, 4.10820839e-05,\n", - " 3.71919770e-05, 3.33064690e-05, 2.94267122e-05, 2.55541686e-05,\n", - " 2.16906798e-05, 1.78385065e-05, 1.39994445e-05, 1.01629180e-05,\n", - " 6.25389642e-06, 2.13426051e-06, -2.13537487e-06, -6.25672996e-06,\n", - " -1.01642969e-05, -1.39987241e-05, -1.78374765e-05, -2.16898278e-05,\n", - " -2.55534886e-05, -2.94261697e-05, -3.33060363e-05, -3.71916320e-05,\n", - " -4.10818090e-05, -4.49756705e-05, -4.88725230e-05, -5.27718395e-05,\n", - " -5.66732296e-05, -6.05764196e-05, -6.44812373e-05, -6.83875349e-05,\n", - " -6.94328986e-05, 6.94370565e-05, -6.54804990e-05, 6.54846416e-05,\n", - " -6.16436297e-05, -5.78848856e-05, 5.78901869e-05, 6.16481913e-05,\n", - " 5.42016308e-05, -5.41953082e-05, 5.05925132e-05, -5.05848475e-05,\n", - " 4.70774640e-05, -4.70680808e-05, 4.36746441e-05, -4.36631128e-05,\n", - " -4.03914652e-05, 4.04056198e-05, -3.72771388e-05, -2.27206907e-05,\n", - " 2.27565833e-05, 3.72944264e-05, -3.43446168e-05, -1.96046475e-05,\n", - " 3.43655171e-05, 2.47940760e-05, 1.95969324e-05, -2.47585931e-05,\n", - " 3.16399368e-05, -3.16150492e-05, 2.91329762e-05, 2.68551077e-05,\n", - " -2.68217237e-05, -2.91038009e-05, -1.07843879e-05, 1.05308179e-05,\n", - " 2.96653201e-05, 3.12839786e-05, -2.95750830e-05, -3.12447920e-05,\n", - " 2.98133659e-05, 2.93830002e-05, -2.93115653e-05, -2.97589493e-05,\n", - " -2.74057545e-05, 2.73784529e-05, -1.52491705e-05, -7.01325890e-05,\n", - " 1.48208154e-05, 7.01417548e-05, 6.46954805e-05, -6.46934720e-05,\n", - " -6.08183925e-05, 6.08202138e-05, 5.71486907e-05, -5.71455313e-05,\n", - " 5.33805358e-05, -5.33765815e-05, 4.96068508e-05, -4.96021507e-05,\n", - " 4.58433705e-05, -4.58378061e-05, -4.20780772e-05, 4.20846972e-05,\n", - " -3.83192376e-05, 3.83267274e-05, -3.45189971e-05, 3.45246001e-05,\n", - " 3.02811971e-05, -3.02860657e-05, -2.55059266e-05, 2.54917241e-05,\n", - " 2.08842578e-05, -2.08741456e-05, -1.82611992e-05, 1.83114039e-05,\n", - " -1.86607858e-05, -2.15563476e-05, 2.17295129e-05, 1.87602997e-05,\n", - " 1.81046006e-05, -1.82336607e-05, -2.63248837e-06, 2.13295028e-06,\n", - " 6.90404154e-05, -6.90337876e-05, -5.33277092e-06, 5.37905295e-06,\n", - " -6.62852213e-06, 6.75322114e-06, -6.29603687e-05, -9.14861847e-06,\n", - " 6.29561096e-05, 9.10958529e-06, 5.94327700e-05, -5.94359575e-05,\n", - " 5.41091993e-06, -5.86246514e-06, 8.01605544e-06, -8.25184434e-06,\n", - " 1.20612782e-06, -5.56780989e-05, 5.56764499e-05, -1.13647407e-06,\n", - " -5.15539960e-05, 5.15522972e-05, 1.36286373e-05, -1.33311883e-05,\n", - " -1.07985780e-06, 1.28446733e-06, 4.73653447e-05, -4.73671295e-05,\n", - " 1.31694055e-05, -1.38439857e-05, 1.79913802e-05, -1.80938646e-05,\n", - " 7.67216018e-06, -7.66591238e-06, 4.31119712e-05, -4.31142018e-05,\n", - " 5.19116877e-06, -5.26783859e-06, 3.87692231e-05, -3.87716695e-05,\n", - " -3.43907719e-05, -2.50114317e-05, 2.49860945e-05, 3.43895562e-05,\n", - " -3.00286808e-05, 3.00260697e-05, 5.65256480e-06, 2.39189621e-05,\n", - " -7.01103598e-05, 7.01176702e-05, -6.56694175e-05, 6.56745667e-05,\n", - " -6.18081621e-05, 6.18134766e-05, 5.81516122e-05, -5.81451298e-05,\n", - " 5.45628593e-05, -5.45549278e-05, -5.10607201e-05, 5.10704921e-05,\n", - " 4.76995594e-05, -4.76874269e-05, -4.44607445e-05, 4.44757956e-05,\n", - " 4.14264304e-05, -4.14079029e-05, -3.85517138e-05, 3.85739892e-05,\n", - " -3.58777162e-05, 3.59033324e-05, 3.34342649e-05, -3.34045786e-05,\n", - " -6.26378993e-06, -2.48158423e-05, 5.80313093e-06, -1.87047769e-05,\n", - " 1.86318709e-05, -5.73577625e-06, 3.67553286e-05, -3.77985357e-05,\n", - " 3.41772109e-05, 5.08392922e-05, -3.43192673e-05, -5.19919206e-05,\n", - " -1.87295260e-05, 1.91867538e-05, -6.76168870e-05, 6.76075499e-05,\n", - " -6.22893808e-05, 6.22728331e-05, 6.57150301e-05, 5.94638207e-05,\n", - " -5.94759189e-05, 2.34749156e-05, -2.36513726e-05, -6.69613180e-05,\n", - " 5.55959128e-05, -5.56051385e-05, 5.13624294e-05, -5.13702079e-05,\n", - " -4.73392969e-05, 4.73342692e-05, -4.33540639e-05, 4.33511061e-05,\n", - " 6.11405021e-06, -6.23564966e-06, -3.93497071e-05, 3.93483355e-05,\n", - " -3.54041574e-05, 3.54086610e-05, -2.84968045e-05, 2.85234903e-05,\n", - " 3.18223636e-05, -3.18050094e-05, 8.12908796e-05, 5.21495080e-05,\n", - " -5.23177145e-05, -8.26201799e-05, 8.36702777e-06, 1.82127884e-05,\n", - " -9.71071152e-06, -1.77502807e-05])" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "f['plasma_e/x'][:]" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "data": { - "text/plain": [ - "278" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "f['plasma_e/x'].size" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 2", - "language": "python", - "name": "python2" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 2 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.15" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Tools/PostProcessing/read_lab_particles.py b/Tools/PostProcessing/read_lab_particles.py deleted file mode 100644 index 5216c94efe8..00000000000 --- a/Tools/PostProcessing/read_lab_particles.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2018-2019 Andrew Myers, Maxence Thevenet -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL - -from glob import glob -import os - -import numpy as np - -it = 1 -fn = "./lab_frame_data/" + 'snapshot' + str(it).zfill(5) + "/particle1/" - -print(fn) - -def get_particle_field(field): - files = glob(os.path.join(fn, field + '_*')) - all_data = np.array([]) - files.sort() - for f in files: - data = np.fromfile(f) - all_data = np.concatenate((all_data, data)) - return all_data - -x = get_particle_field('x') -z = get_particle_field('z') - -ux = get_particle_field('ux') -uz = get_particle_field('uz') diff --git a/Tools/PostProcessing/read_raw_data.py b/Tools/PostProcessing/read_raw_data.py index f8c7e336029..c34ea11d301 100644 --- a/Tools/PostProcessing/read_raw_data.py +++ b/Tools/PostProcessing/read_raw_data.py @@ -7,7 +7,6 @@ from collections import namedtuple from glob import glob -import os import numpy as np @@ -51,81 +50,6 @@ def read_data(plt_file): return all_data -def read_lab_snapshot(snapshot, global_header): - ''' - - This reads the data from one of the lab frame snapshots generated when - WarpX is run with boosted frame diagnostics turned on. It returns a - dictionary of numpy arrays, where each key corresponds to one of the - data fields ("Ex", "By,", etc... ). These values are cell-centered. - - ''' - global_info = _read_global_Header(global_header) - - hdrs = glob(snapshot + "/Level_0/buffer*_H") - hdrs.sort() - - boxes, file_names, offsets, header = _read_header(hdrs[0]) - dom_lo, dom_hi = _combine_boxes(boxes) - domain_size = dom_hi - dom_lo + 1 - space_dim = len(dom_lo) - - local_info = _read_local_Header(snapshot + "/Header", space_dim) - ncellz_snapshots = local_info['nz'] - dzcell_snapshots = (local_info['zmax']-local_info['zmin'])/local_info['nz'] - _component_names = local_info['field_names'] - field1 = _component_names[0] - - if space_dim == 2: - direction = 1 - else: - direction = 2 - - buffer_fullsize = 0 - buffer_allsizes = [0] - for i, hdr in enumerate(hdrs): - buffer_data = _read_buffer(snapshot, hdr, _component_names) - buffer_fullsize += buffer_data[field1].shape[direction] - buffer_allsizes.append(buffer_data[field1].shape[direction]) - buffer_allstarts = np.cumsum(buffer_allsizes) - - data = {} - for i in range(header.ncomp): - if space_dim == 3: - data[_component_names[i]] = np.zeros((domain_size[0], domain_size[1], buffer_fullsize)) - elif space_dim == 2: - data[_component_names[i]] = np.zeros((domain_size[0], buffer_fullsize)) - - for i, hdr in enumerate(hdrs): - buffer_data = _read_buffer(snapshot, hdr, _component_names) - if data is None: - data = buffer_data - else: - for k,v in buffer_data.items(): - data[k][..., buffer_allstarts[i]:buffer_allstarts[i+1]] = v[...] - - - info = local_info - # Add some handy info - x = np.linspace(local_info['xmin'], local_info['xmax'], local_info['nx']) - y = np.linspace(local_info['ymin'], local_info['ymax'], local_info['ny']) - z = np.linspace(local_info['zmin'], local_info['zmax'], local_info['nz']) - info.update({ 'x' : x, 'y' : y, 'z' : z }) - return data, info - -# For the moment, the back-transformed diagnostics must be read with -# custom functions like this one. -# It should be OpenPMD-compliant hdf5 files soon, making this part outdated. -def get_particle_field(snapshot, species, field): - fn = snapshot + '/' + species - files = glob(os.path.join(fn, field + '_*')) - files.sort() - all_data = np.array([]) - for f in files: - data = np.fromfile(f) - all_data = np.concatenate((all_data, data)) - return all_data - def _get_field_names(raw_file): header_files = glob(raw_file + "*_H") return [hf.split("/")[-1][:-2] for hf in header_files] @@ -262,7 +186,6 @@ def _read_field(raw_file, field_name): header_file = raw_file + field_name + "_H" boxes, file_names, offsets, header = _read_header(header_file) - ng = header.nghost dom_lo, dom_hi = _combine_boxes(boxes) data_shape = dom_hi - dom_lo + 1 if header.ncomp > 1: @@ -294,7 +217,6 @@ def _read_buffer(snapshot, header_fn, _component_names): boxes, file_names, offsets, header = _read_header(header_fn) - ng = header.nghost dom_lo, dom_hi = _combine_boxes(boxes) all_data = {} @@ -378,6 +300,3 @@ def read_reduced_diags_histogram(filename, delimiter=' '): else: bin_data = data[:,2:] return metadata_dict, data_dict, bin_value, bin_data - -if __name__ == "__main__": - data = read_lab_snapshot("lab_frame_data/snapshot00012", "lab_frame_data/Header"); From f9ff146c68136a287d6b14d725d7f7d4e367efb3 Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Thu, 17 Nov 2022 10:11:22 -0800 Subject: [PATCH 0161/1346] BTD-RZ Add multiple modes (#3482) * cell center BTD functors for RZ with openpmd * add RZ modes to output varnames too * update varnames once and set map for RZ fields in BTfunctor * clean commented line * Apply suggestions from code review From Axels' review Co-authored-by: Axel Huebl * adding comments, doxygen, and clean-up * adding mulitple modes to RZ BTD * fix comment * fix bug using 2*nrz-1 , instead of nrz * add comments and clean up * fix typo * 1D * WARPX_DIM_XZ instead of 2D * Apply suggestions from code review suggestion from Edoardo Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> * remove BTD RZ field warning that does not apply anymore * BTD rz test for field using laser antenna * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * file starts with analysis * change analysis * fix typo * fix rz input so all snapshots are filled * remove plt from analysis script * initialize cell-centered data to 0 so that guard cells are initialized * Remi's suggestions Co-authored-by: Remi Lehe * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * AllocInitMultifab to add mf to maps of mfs * fix path to analysis script * analysis script executable * a better and succint for loop * unused var * Update Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.cpp Co-authored-by: Remi Lehe * fix unused var * add python path Co-authored-by: Axel Huebl Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Remi Lehe --- .../BTD_rz/analysis_BTD_laser_antenna.py | 53 ++++++++++++++ Examples/Tests/BTD_rz/inputs_rz_z_boosted_BTD | 64 +++++++++++++++++ Regression/WarpX-tests.ini | 16 +++++ Source/Diagnostics/BTDiagnostics.cpp | 12 ++-- .../BackTransformFunctor.H | 3 + .../BackTransformFunctor.cpp | 72 ++++++++++++++++++- Source/Diagnostics/MultiDiagnostics.cpp | 3 - 7 files changed, 213 insertions(+), 10 deletions(-) create mode 100755 Examples/Tests/BTD_rz/analysis_BTD_laser_antenna.py create mode 100644 Examples/Tests/BTD_rz/inputs_rz_z_boosted_BTD diff --git a/Examples/Tests/BTD_rz/analysis_BTD_laser_antenna.py b/Examples/Tests/BTD_rz/analysis_BTD_laser_antenna.py new file mode 100755 index 00000000000..5b5f8c94bf3 --- /dev/null +++ b/Examples/Tests/BTD_rz/analysis_BTD_laser_antenna.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 + +# Copyright 2022 +# Authors: Revathi Jambunathan, Remi Lehe +# +# This tests checks the backtransformed diagnostics by emitting a laser +# (with the antenna) in the boosted-frame and then checking that the +# fields recorded by the backtransformed diagnostics have the right amplitude, +# wavelength, and envelope (i.e. gaussian envelope with the right duration. + +import numpy as np +from openpmd_viewer import OpenPMDTimeSeries +from scipy.constants import c, e, m_e +from scipy.optimize import curve_fit + + +def gaussian_laser( z, a0, z0_phase, z0_prop, ctau, lambda0 ): + """ + Returns a Gaussian laser profile + """ + k0 = 2*np.pi/lambda0 + E0 = a0*m_e*c**2*k0/e + return( E0*np.exp( - (z-z0_prop)**2/ctau**2 ) \ + *np.cos( k0*(z-z0_phase) ) ) + +# Fit the on-axis profile to extract the phase (a.k.a. CEP) +def fit_function(z, z0_phase): + return( gaussian_laser( z, a0, z0_phase, + z0_b+Lprop_b, ctau0, lambda0 ) ) + +# The values must be consistent with the values provided in the simulation input +t_current = 80e-15 # Time of the snapshot1 +c = 299792458; +z0_antenna = -1.e-6 # position of laser +lambda0 = 0.8e-6 # wavelength of the signal +tau0 = 10e-15 # duration of the signal +ctau0 = tau0 * c +a0 = 15 # amplitude +t_peak = 20e-15 # Time at which laser reaches its peak +Lprop_b = c*t_current +z0_b = z0_antenna - c * t_peak + +ts = OpenPMDTimeSeries('./diags/back_rz') +Ex, info = ts.get_field('E', 'x', iteration=1, slice_across='r') + +fit_result = curve_fit( fit_function, info.z, Ex, + p0=np.array([z0_b+Lprop_b]) ) +z0_fit = fit_result[0] + +Ex_fit = gaussian_laser( info.z, a0, z0_fit, z0_b+Lprop_b, ctau0, lambda0) + +## Check that the a0 agrees within 5% of the predicted value +assert np.allclose( Ex, Ex_fit, atol=0.18*Ex.max() ) diff --git a/Examples/Tests/BTD_rz/inputs_rz_z_boosted_BTD b/Examples/Tests/BTD_rz/inputs_rz_z_boosted_BTD new file mode 100644 index 00000000000..a2bdc8089c1 --- /dev/null +++ b/Examples/Tests/BTD_rz/inputs_rz_z_boosted_BTD @@ -0,0 +1,64 @@ +# Maximum number of time steps +warpx.zmax_plasma_to_compute_max_step = 500e-6 +# number of grid points +amr.n_cell = 32 256 + +# Maximum allowable size of each subdomain in the problem domain; +# this is used to decompose the domain for parallel calculations. +amr.max_grid_size = 128 + +# Maximum level in hierarchy (for now must be 0, i.e., one level in total) +amr.max_level = 0 + +# Geometry +geometry.dims = RZ +geometry.prob_lo = 0.e-6 -20.e-6 +geometry.prob_hi = 40.e-6 0.e-6 + +boundary.field_lo = none absorbing_silver_mueller +boundary.field_hi = absorbing_silver_mueller absorbing_silver_mueller + +# Boosted frame and moving window +warpx.do_moving_window = 1 +warpx.moving_window_dir = z +warpx.moving_window_v = 1.0 # in units of the speed of light +warpx.gamma_boost = 10. +warpx.boost_direction = z + + +# Verbosity +warpx.verbose = 1 +warpx.n_rz_azimuthal_modes = 2 + +# Algorithms +warpx.cfl = 1.0 +warpx.use_filter = 0 + + +# Order of particle shape factors +algo.particle_shape = 1 + +# Laser +lasers.names = laser1 +laser1.profile = Gaussian +laser1.position = 0. 0. -1.e-6 # This point is on the laser plane +laser1.direction = 0. 0. 1. # The plane normal direction +laser1.polarization = 1. 0. 0. # The main polarization vector +laser1.a0 = 1.5e1 # Maximum amplitude of the laser field +laser1.profile_waist = 10.e-6 # The waist of the laser (in meters) +laser1.profile_duration = 10.e-15 # The duration of the laser (in seconds) +laser1.profile_t_peak = 20.e-15 # The time at which the laser reaches its peak (in seconds) +laser1.profile_focal_distance = 1.e-6 # Focal distance from the antenna (in meters) +laser1.wavelength = 0.8e-6 # The wavelength of the laser (in meters) + +# Diagnostics +diagnostics.diags_names = diag1 back_rz +diag1.intervals = 50 +diag1.diag_type = Full + +back_rz.diag_type = BackTransformed +back_rz.dt_snapshots_lab = 80.e-15 +back_rz.fields_to_plot = Er Et Ez Br Bt Bz jr jt jz rho +back_rz.format = openpmd +back_rz.buffer_size = 32 +back_rz.num_snapshots_lab = 2 diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index cb4cacf00a0..9934fb64c4a 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -3715,3 +3715,19 @@ doVis = 0 compareParticles = 1 particleTypes = electron ion analysisRoutine = Examples/Tests/VayDeposition/analysis.py + +[BTD_rz] +buildDir = . +inputFile = Examples/Tests/BTD_rz/inputs_rz_z_boosted_BTD +runtime_params = +dim = 2 +addToCompileString = USE_RZ=TRUE +cmakeSetupOpts = -DWarpX_DIMS=RZ +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +analysisRoutine = Examples/Tests/BTD_rz/analysis_BTD_laser_antenna.py diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 90b00a24a08..2b4efcb5ed8 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -198,7 +198,6 @@ BTDiagnostics::ReadParameters () m_crse_ratio == amrex::IntVect(1), "Only support for coarsening ratio of 1 in all directions is included for BTD\n" ); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(WarpX::n_rz_azimuthal_modes==1, "Currently only one mode is supported for BTD"); // Read list of back-transform diag parameters requested by the user // amrex::ParmParse pp_diag_name(m_diag_name); @@ -481,8 +480,12 @@ BTDiagnostics::DefineCellCenteredMultiFab(int lev) ba.coarsen(m_crse_ratio); amrex::DistributionMapping dmap = warpx.DistributionMap(lev); int ngrow = 1; +#ifdef WARPX_DIM_RZ + int ncomps = WarpX::ncomps * static_cast(m_cellcenter_varnames.size()); +#else int ncomps = static_cast(m_cellcenter_varnames.size()); - m_cell_centered_data[lev] = std::make_unique(ba, dmap, ncomps, ngrow); +#endif + WarpX::AllocInitMultiFab(m_cell_centered_data[lev], ba, dmap, ncomps, amrex::IntVect(ngrow), "cellcentered_BTD",0._rt); } @@ -520,7 +523,7 @@ BTDiagnostics::InitializeFieldFunctors (int lev) int nvars = static_cast(m_varnames.size()); m_all_field_functors[lev][i] = std::make_unique( m_cell_centered_data[lev].get(), lev, - nvars, m_num_buffers, m_varnames); + nvars, m_num_buffers, m_varnames, m_varnames_fields); } // Define all cell-centered functors required to compute cell-centere data @@ -632,7 +635,8 @@ BTDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) int nvars = static_cast(m_varnames.size()); m_all_field_functors[lev][i] = std::make_unique( m_cell_centered_data[lev].get(), lev, - nvars, m_num_buffers, m_varnames); + nvars, m_num_buffers, m_varnames, + m_varnames_fields); } // Reset field functors for cell-center multifab diff --git a/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.H index 35c22908cb3..142bb098da8 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.H @@ -47,6 +47,7 @@ public: BackTransformFunctor ( const amrex::MultiFab * const mf_src, const int lev, const int ncomp, const int num_buffers, amrex::Vector< std::string > varnames, + amrex::Vector< std::string > varnames_fields, const amrex::IntVect crse_ratio= amrex::IntVect(1)); /** \brief Lorentz-transform mf_src for the ith buffer and write the result in mf_dst. @@ -118,6 +119,8 @@ private: amrex::Vector m_k_index_zlab; /** Vector of user-defined field names to be stored in the output multifab */ amrex::Vector< std::string > m_varnames; + /** Vector of user-defined field names without modifications for rz modes */ + amrex::Vector< std::string > m_varnames_fields; /** max grid size used to generate BoxArray to define output MultiFabs */ int m_max_box_size = 256; diff --git a/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.cpp index 10a9ff12a7c..3678e03f0c2 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.cpp @@ -34,8 +34,10 @@ using namespace amrex; BackTransformFunctor::BackTransformFunctor (amrex::MultiFab const * mf_src, int lev, const int ncomp, const int num_buffers, amrex::Vector< std::string > varnames, - const amrex::IntVect crse_ratio) - : ComputeDiagFunctor(ncomp, crse_ratio), m_mf_src(mf_src), m_lev(lev), m_num_buffers(num_buffers), m_varnames(varnames) + amrex::Vector< std::string > varnames_fields, + const amrex::IntVect crse_ratio + ) + : ComputeDiagFunctor(ncomp, crse_ratio), m_mf_src(mf_src), m_lev(lev), m_num_buffers(num_buffers), m_varnames(varnames), m_varnames_fields(varnames_fields) { InitData(); } @@ -112,14 +114,32 @@ BackTransformFunctor::operator ()(amrex::MultiFab& mf_dst, int /*dcomp*/, const const Box& tbx = mfi.tilebox(); amrex::Array4 src_arr = tmp[mfi].array(); amrex::Array4 dst_arr = mf_dst[mfi].array(); +#ifdef WARPX_DIM_RZ + const int n_rz_comp = WarpX::ncomps; +#endif amrex::ParallelFor( tbx, ncomp_dst, [=] AMREX_GPU_DEVICE(int i, int j, int k, int n) { + // Field id that corresponds to the nth user-requested component const int icomp = field_map_ptr[n]; #if defined(WARPX_DIM_3D) dst_arr(i, j, k_lab, n) = src_arr(i, j, k, icomp); -#else +#elif defined(WARPX_DIM_XZ) dst_arr(i, k_lab, k, n) = src_arr(i, j, k, icomp); +#elif defined(WARPX_DIM_RZ) + // rzcomp below gives the component id, 0 to (n_rz_comp-1) for a given field + const int rzcomp = n % n_rz_comp; + // Accessing the correct rz component from the cell-centered multifab + // that has back-transformed fields and storing it for the appropriate user-requested field, icomp + // For example, for 2 rz modes, we have three components (n_rz_comp=3) for each field + // If n = 4 gives icomp = 1 (for Et) obtained from field_map_ptr, + // rzcomp = 4 - int(floor(4/3))*3 = 4 - 3 = 1 + // Thus we are accessing real component of mode 1 of Et (note that modes go from 0 to 1) + // Since the fields are stored contiguously in src_arr, icomp*n_rz_comp + rz_comp accesses + // real part of mode 1 for Et (1*3+1) = 4 + dst_arr(i, k_lab, k, n) = src_arr(i, j, k, icomp*n_rz_comp+rzcomp); +#else + dst_arr(k_lab, j, k, n) = src_arr(i, j, k, icomp); #endif } ); } @@ -185,7 +205,12 @@ BackTransformFunctor::InitData () for (int i = 0; i < m_varnames.size(); ++i) { +#ifdef WARPX_DIM_RZ + const int field_id = i / WarpX::ncomps; + m_map_varnames[i] = m_possible_fields_to_dump[ m_varnames_fields[field_id] ]; +#else m_map_varnames[i] = m_possible_fields_to_dump[ m_varnames[i] ] ; +#endif } } @@ -202,6 +227,46 @@ BackTransformFunctor::LorentzTransformZ (amrex::MultiFab& data, amrex::Real gamm amrex::Array4< amrex::Real > arr = data[mfi].array(); amrex::Real clight = PhysConst::c; amrex::Real inv_clight = 1.0_rt/clight; +#ifdef WARPX_DIM_RZ + const int n_rcomps = WarpX::ncomps; + amrex::ParallelFor( tbx, + [=] AMREX_GPU_DEVICE (int i, int j, int k) + { + for (int mode_comp = 0; mode_comp < n_rcomps; ++mode_comp) { + // Back-transform the transverse electric and magnetic fields. + // Note that the z-components, Ez, Bz, are not changed by the transform. + amrex::Real e_lab, b_lab, j_lab, rho_lab; + + // Transform Er_boost & Bt_boost to lab-frame for corresponding mode (mode_comp) + e_lab = gamma_boost * ( arr(i, j, k, n_rcomps*0 + mode_comp) + + beta_boost * clight * arr(i, j, k, n_rcomps*4+ mode_comp) ); + b_lab = gamma_boost * ( arr(i, j, k, n_rcomps*4 + mode_comp) + + beta_boost * inv_clight * arr(i, j, k, n_rcomps*0 + mode_comp) ); + // Store lab-frame data in-place + arr(i, j, k, n_rcomps*0 + mode_comp) = e_lab; + arr(i, j, k, n_rcomps*4 + mode_comp) = b_lab; + + // Transform Et_boost & Br_boost to lab-frame for corresponding mode (mode_comp) + e_lab = gamma_boost * ( arr(i, j, k, n_rcomps*1 + mode_comp) + - beta_boost * clight * arr(i, j, k, n_rcomps*3 + mode_comp) ); + b_lab = gamma_boost * ( arr(i, j, k, n_rcomps*3 + mode_comp) + - beta_boost * inv_clight * arr(i, j, k, n_rcomps*1 + mode_comp) ); + // Store lab-frame data in-place + arr(i, j, k, n_rcomps*1 + mode_comp) = e_lab; + arr(i, j, k, n_rcomps*3 + mode_comp) = b_lab; + + // Transform charge density z-component of current density + j_lab = gamma_boost * ( arr(i, j, k, n_rcomps*8 + mode_comp) + + beta_boost * clight * arr(i, j, k, n_rcomps*9 + mode_comp) ); + rho_lab = gamma_boost * ( arr(i, j, k, n_rcomps*9 + mode_comp) + + beta_boost * inv_clight * arr(i, j, k, n_rcomps*8 + mode_comp) ); + // Store lab-frame jz and rho in-place + arr(i, j, k, n_rcomps*8 + mode_comp) = j_lab; + arr(i, j, k, n_rcomps*9 + mode_comp) = rho_lab; + } + } + ); +#else // arr(x,y,z,comp) has ten-components namely, // Ex Ey Ez Bx By Bz jx jy jz rho in that order. amrex::ParallelFor( tbx, @@ -239,6 +304,7 @@ BackTransformFunctor::LorentzTransformZ (amrex::MultiFab& data, amrex::Real gamm arr(i, j, k, 9) = rho_lab; } ); +#endif } } diff --git a/Source/Diagnostics/MultiDiagnostics.cpp b/Source/Diagnostics/MultiDiagnostics.cpp index 62218a17568..1b9f5615ae7 100644 --- a/Source/Diagnostics/MultiDiagnostics.cpp +++ b/Source/Diagnostics/MultiDiagnostics.cpp @@ -25,9 +25,6 @@ MultiDiagnostics::MultiDiagnostics () alldiags[i] = std::make_unique(i, diags_names[i]); } else if ( diags_types[i] == DiagTypes::BackTransformed ){ alldiags[i] = std::make_unique(i, diags_names[i]); -#ifdef WARPX_DIM_RZ - ablastr::warn_manager::WMRecordWarning("MultiDiagnostics", "BackTransformed diagnostics for fields is not yet fully implemented in RZ. Field output might be incorrect."); -#endif } else if ( diags_types[i] == DiagTypes::BoundaryScraping ){ alldiags[i] = std::make_unique(i, diags_names[i]); } else { From 4e9245e0e95fa268a4aa14ad9e2bbf2c59c24643 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Thu, 17 Nov 2022 10:12:03 -0800 Subject: [PATCH 0162/1346] Add Python interface for flux injection (#3486) * Update PICMI code * Upgrade picmistandard version * Update PICMI version and assert --- Docs/requirements.txt | 2 +- Python/pywarpx/picmi.py | 26 ++++++++++++++++++++++++-- Python/setup.py | 2 +- requirements.txt | 2 +- 4 files changed, 27 insertions(+), 5 deletions(-) diff --git a/Docs/requirements.txt b/Docs/requirements.txt index ffd547be4e2..0ce759b3171 100644 --- a/Docs/requirements.txt +++ b/Docs/requirements.txt @@ -13,7 +13,7 @@ docutils<=0.16 # PICMI API docs # note: keep in sync with version in ../requirements.txt -picmistandard==0.0.20 +picmistandard==0.0.22 # for development against an unreleased PICMI version, use: # picmistandard @ git+https://github.com/picmi-standard/picmi.git#subdirectory=PICMI_Python diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index a85cc37845b..0d6eebf6327 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -340,11 +340,11 @@ def set_species_attributes(self, species, layout): species.injection_style = "nuniformpercell" species.num_particles_per_cell_each_dim = layout.n_macroparticle_per_cell elif isinstance(layout, PseudoRandomLayout): - assert (layout.n_macroparticles_per_cell is not None), Exception('WarpX only supports n_macroparticles_per_cell for the PseudoRandomLayout with UniformDistribution') + assert (layout.n_macroparticles_per_cell is not None), Exception('WarpX only supports n_macroparticles_per_cell for the PseudoRandomLayout with this distribution') species.injection_style = "nrandompercell" species.num_particles_per_cell = layout.n_macroparticles_per_cell else: - raise Exception('WarpX does not support the specified layout for UniformDistribution') + raise Exception('WarpX does not support the specified layout for this distribution') species.xmin = self.lower_bound[0] species.xmax = self.upper_bound[0] @@ -385,6 +385,28 @@ def setup_parse_momentum_functions(self, species): species.__setattr__(f'momentum_function_u{sdir}(x,y,z)', f'({expression})/{constants.c}') +class UniformFluxDistribution(picmistandard.PICMI_UniformFluxDistribution, DensityDistributionBase): + def initialize_inputs(self, species_number, layout, species, density_scale): + + self.fill_in = False + self.set_mangle_dict() + self.set_species_attributes(species, layout) + + species.profile = "constant" + species.density = self.flux + if density_scale is not None: + species.density *= density_scale + species.flux_normal_axis = self.flux_normal_axis + species.surface_flux_pos = self.surface_flux_position + species.flux_direction = self.flux_direction + + # --- Use specific attributes for flux injection + species.injection_style = "nfluxpercell" + assert (isinstance(layout, PseudoRandomLayout)), Exception('UniformFluxDistribution only supports the PseudoRandomLayout in WarpX') + if species.momentum_distribution_type == "gaussian": + species.momentum_distribution_type = "gaussianflux" + + class UniformDistribution(picmistandard.PICMI_UniformDistribution, DensityDistributionBase): def initialize_inputs(self, species_number, layout, species, density_scale): diff --git a/Python/setup.py b/Python/setup.py index 03ddd684459..57fca38ad72 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -59,7 +59,7 @@ package_dir = {'pywarpx': 'pywarpx'}, description = """Wrapper of WarpX""", package_data = package_data, - install_requires = ['numpy', 'picmistandard==0.0.20', 'periodictable'], + install_requires = ['numpy', 'picmistandard==0.0.22', 'periodictable'], python_requires = '>=3.7', zip_safe=False ) diff --git a/requirements.txt b/requirements.txt index 9d6f5bc8840..18c1f751b99 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ periodictable~=1.5 # PICMI # note: don't forget to update the version in Docs/requirements.txt, too -picmistandard==0.0.20 +picmistandard==0.0.22 # for development against an unreleased PICMI version, use: #picmistandard @ git+https://github.com/picmi-standard/picmi.git#subdirectory=PICMI_Python From 2c00044641882f35c70528b913a8d9efbb0a5336 Mon Sep 17 00:00:00 2001 From: Yinjian Zhao Date: Fri, 18 Nov 2022 10:54:54 -0700 Subject: [PATCH 0163/1346] Adding an if statement for the last step diagnostic (#3436) * modified * Include max_step and stop_time as input parameters * Add comments * update * update * Fix CI test: apply suggestion from @ax3l * Do not force consistency between `max_step` and `stop_time` Co-authored-by: Dave Grote Co-authored-by: Edoardo Zoni --- Python/pywarpx/WarpX.py | 16 +++++++++------- Python/pywarpx/picmi.py | 9 ++------- Source/Evolve/WarpXEvolve.cpp | 9 +++++++-- Source/Initialization/WarpXInitData.cpp | 12 ------------ Source/WarpX.H | 2 +- 5 files changed, 19 insertions(+), 29 deletions(-) diff --git a/Python/pywarpx/WarpX.py b/Python/pywarpx/WarpX.py index 517c74d421d..76f66c6d270 100644 --- a/Python/pywarpx/WarpX.py +++ b/Python/pywarpx/WarpX.py @@ -30,8 +30,13 @@ class WarpX(Bucket): A Python wrapper for the WarpX C++ class """ - def create_argv_list(self): + def create_argv_list(self, **kw): argv = [] + + for k, v in kw.items(): + if v is not None: + argv.append(f'{k} = {v}') + argv += warpx.attrlist() argv += my_constants.attrlist() argv += amr.attrlist() @@ -86,8 +91,8 @@ def create_argv_list(self): return argv - def init(self, mpi_comm=None): - argv = ['warpx'] + self.create_argv_list() + def init(self, mpi_comm=None, **kw): + argv = ['warpx'] + self.create_argv_list(**kw) libwarpx.initialize(argv, mpi_comm=mpi_comm) def evolve(self, nsteps=-1): @@ -103,10 +108,7 @@ def getProbHi(self, direction): return libwarpx.libwarpx_so.warpx_getProbHi(direction) def write_inputs(self, filename='inputs', **kw): - argv = self.create_argv_list() - - for k, v in kw.items(): - argv.append(f'{k} = {v}') + argv = self.create_argv_list(**kw) # Sort the argv list to make it more human readable argv.sort() diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 0d6eebf6327..9399fd92606 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1586,16 +1586,11 @@ def initialize_warpx(self, mpi_comm=None): return self.warpx_initialized = True - pywarpx.warpx.init(mpi_comm) + pywarpx.warpx.init(mpi_comm, max_step=self.max_steps, stop_time=self.max_time) def write_input_file(self, file_name='inputs'): self.initialize_inputs() - kw = {} - if self.max_steps is not None: - kw['max_step'] = self.max_steps - if self.max_time is not None: - kw['stop_time'] = self.max_time - pywarpx.warpx.write_inputs(file_name, **kw) + pywarpx.warpx.write_inputs(file_name, max_step=self.max_steps, stop_time=self.max_time) def step(self, nsteps=None, mpi_comm=None): self.initialize_inputs() diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 16110fd2250..c5832275796 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -68,7 +68,7 @@ WarpX::Evolve (int numsteps) if (numsteps < 0) { // Note that the default argument is numsteps = -1 numsteps_max = max_step; } else { - numsteps_max = std::min(istep[0]+numsteps, max_step); + numsteps_max = istep[0] + numsteps; } bool early_params_checked = false; // check typos in inputs after step 1 @@ -359,7 +359,12 @@ WarpX::Evolve (int numsteps) // End loop on time steps } - multi_diags->FilterComputePackFlushLastTimestep( istep[0] ); + // This if statement is needed for PICMI, which allows the Evolve routine to be + // called multiple times, otherwise diagnostics will be done at every call, + // regardless of the diagnostic period parameter provided in the inputs. + if (istep[0] == max_step || (stop_time - 1.e-3*dt[0] <= cur_time && cur_time < stop_time + dt[0])) { + multi_diags->FilterComputePackFlushLastTimestep( istep[0] ); + } } /* /brief Perform one PIC iteration, without subcycling diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 4f6522d8fba..0d3e919a040 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -545,18 +545,6 @@ WarpX::ComputeMaxStep () if (do_compute_max_step_from_zmax) { computeMaxStepBoostAccelerator(geom[0]); } - - // Make max_step and stop_time self-consistent, assuming constant dt. - - // If max_step is the limiting condition, decrease stop_time consistently - if (stop_time > t_new[0] + dt[0]*(max_step - istep[0]) ) { - stop_time = t_new[0] + dt[0]*(max_step - istep[0]); - } - // If stop_time is the limiting condition instead, decrease max_step consistently - else { - // The static_cast should not overflow since stop_time is the limiting condition here - max_step = static_cast(istep[0] + std::ceil( (stop_time-t_new[0])/dt[0] )); - } } /* \brief computes max_step for wakefield simulation in boosted frame. diff --git a/Source/WarpX.H b/Source/WarpX.H index d33f320f96e..20d4e4210d6 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -534,7 +534,7 @@ public: /** * \brief - * Compute the last timestep of the simulation and make max_step and stop_time self-consistent. + * Compute the last time step of the simulation * Calls computeMaxStepBoostAccelerator() if required. */ void ComputeMaxStep (); From 2775ac17fc78b3433d313da46a1c81f932e3912e Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Fri, 18 Nov 2022 10:00:12 -0800 Subject: [PATCH 0164/1346] PushPX: GPU kernel optimization (#3402) * PushPX: GPU kernel optimization The GatherAndPush kernel in the PushPX function has a very low occupancy due to register pressure. There are a number of reasons. By default, we compile with QED module on, even if we do not use it at run time. Another culprit is the GetExternalEB functor that contains 7 Parsers. Again, we have to pay a high runtime cost, even if we do not use it. In this PR, we move some runtime logic out of the GPU kernel to eleminate the unnecessary cost if QED and GetExternalEB are not used at run time. Here are some performance results before this PR. | QED | GetExternalEB | Time | |-----+---------------+------| | On | On | 2.17 | | Off | On | 1.79 | | Off | Commented out | 1.34 | Note that in the tests neither QED nor GetExternalEB is actually used at run time. But the extra cost is very high. With this PR, the kernel time is the same as that when both QED and GetExternalEB are disabled at compile time, even though both options are disabled at run time. More information on the kernels compiled for MI250X. The most expensive variant with both QED and GetExternalEB on has NumSgprs: 108 NumVgprs: 256 NumAgprs: 40 TotalNumVgprs: 296 ScratchSize: 264 Occupancy: 1 The cheapest variant with both QED and GetExternalEB disabled has NumSgprs: 104 NumVgprs: 249 NumAgprs: 0 TotalNumVgprs: 249 ScratchSize: 144 Occupancy: 2 * Fix Comments Co-authored-by: Axel Huebl --- Source/Particles/Gather/GetExternalFields.H | 6 +- .../Particles/PhysicalParticleContainer.cpp | 73 +++++++++++++++---- Source/Particles/Pusher/PushSelector.H | 47 +++++------- 3 files changed, 80 insertions(+), 46 deletions(-) diff --git a/Source/Particles/Gather/GetExternalFields.H b/Source/Particles/Gather/GetExternalFields.H index 92f1a21bcc0..ff107dce521 100644 --- a/Source/Particles/Gather/GetExternalFields.H +++ b/Source/Particles/Gather/GetExternalFields.H @@ -13,13 +13,12 @@ #include #include -enum ExternalFieldInitType { None, Constant, Parser, RepeatedPlasmaLens, Unknown }; - /** \brief Functor class that assigns external * field values (E and B) to particles. */ struct GetExternalEBField { + enum ExternalFieldInitType { None, Constant, Parser, RepeatedPlasmaLens, Unknown }; GetExternalEBField () = default; @@ -55,6 +54,9 @@ struct GetExternalEBField const amrex::ParticleReal* AMREX_RESTRICT m_uy = nullptr; const amrex::ParticleReal* AMREX_RESTRICT m_uz = nullptr; + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + bool isNoOp () const { return (m_Etype == None && m_Btype == None); } + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void operator () (long i, amrex::ParticleReal& field_Ex, diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index f37ea5ea38d..62d4df594e9 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -2624,7 +2624,24 @@ PhysicalParticleContainer::PushPX (WarpXParIter& pti, const auto t_do_not_gather = do_not_gather; - amrex::ParallelFor( np_to_push, [=] AMREX_GPU_DEVICE (long ip) + enum exteb_flags : int { no_exteb, has_exteb }; + enum qed_flags : int { no_qed, has_qed }; + + int exteb_runtime_flag = getExternalEB.isNoOp() ? no_exteb : has_exteb; +#ifdef WARPX_QED + int qed_runtime_flag = (local_has_quantum_sync || do_sync) ? has_qed : no_qed; +#else + int qed_runtime_flag = no_qed; +#endif + + // Using this version of ParallelFor with compile time options + // improves performance when qed or external EB are not used by reducing + // register pressure. + amrex::ParallelFor(TypeList, + CompileTimeOptions>{}, + {exteb_runtime_flag, qed_runtime_flag}, + np_to_push, [=] AMREX_GPU_DEVICE (long ip, auto exteb_control, + [[maybe_unused]] auto qed_control) { amrex::ParticleReal xp, yp, zp; getPosition(ip, xp, yp, zp); @@ -2650,30 +2667,54 @@ PhysicalParticleContainer::PushPX (WarpXParIter& pti, dx_arr, xyzmin_arr, lo, n_rz_azimuthal_modes, nox, galerkin_interpolation); } - // Externally applied E and B-field in Cartesian co-ordinates - getExternalEB(ip, Exp, Eyp, Ezp, Bxp, Byp, Bzp); + + auto const& externeb_fn = getExternalEB; // Have to do this for nvcc + if constexpr (exteb_control == has_exteb) { + externeb_fn(ip, Exp, Eyp, Ezp, Bxp, Byp, Bzp); + } scaleFields(xp, yp, zp, Exp, Eyp, Ezp, Bxp, Byp, Bzp); - doParticlePush(getPosition, setPosition, copyAttribs, ip, - ux[ip], uy[ip], uz[ip], - Exp, Eyp, Ezp, Bxp, Byp, Bzp, - ion_lev ? ion_lev[ip] : 0, - m, q, pusher_algo, do_crr, do_copy, #ifdef WARPX_QED - do_sync, - t_chi_max, + if (!do_sync) #endif - dt); - + { + doParticlePush<0>(getPosition, setPosition, copyAttribs, ip, + ux[ip], uy[ip], uz[ip], + Exp, Eyp, Ezp, Bxp, Byp, Bzp, + ion_lev ? ion_lev[ip] : 0, + m, q, pusher_algo, do_crr, do_copy, #ifdef WARPX_QED - if (local_has_quantum_sync) { - evolve_opt(ux[ip], uy[ip], uz[ip], - Exp, Eyp, Ezp,Bxp, Byp, Bzp, - dt, p_optical_depth_QSR[ip]); + t_chi_max, +#endif + dt); + } +#ifdef WARPX_QED + else { + if constexpr (qed_control == has_qed) { + doParticlePush<1>(getPosition, setPosition, copyAttribs, ip, + ux[ip], uy[ip], uz[ip], + Exp, Eyp, Ezp, Bxp, Byp, Bzp, + ion_lev ? ion_lev[ip] : 0, + m, q, pusher_algo, do_crr, do_copy, + t_chi_max, + dt); + } } #endif +#ifdef WARPX_QED + auto foo_local_has_quantum_sync = local_has_quantum_sync; + auto foo_podq = p_optical_depth_QSR; + auto& evolve_opt_fn = evolve_opt; // have to do all these for nvcc + if constexpr (qed_control == has_qed) { + if (foo_local_has_quantum_sync) { + evolve_opt_fn(ux[ip], uy[ip], uz[ip], + Exp, Eyp, Ezp,Bxp, Byp, Bzp, + dt, foo_podq[ip]); + } + } +#endif }); } diff --git a/Source/Particles/Pusher/PushSelector.H b/Source/Particles/Pusher/PushSelector.H index ed439b4b31c..a56dda2b925 100644 --- a/Source/Particles/Pusher/PushSelector.H +++ b/Source/Particles/Pusher/PushSelector.H @@ -23,6 +23,7 @@ /** * \brief Push position and momentum for a single particle * + * \tparam do_sync Whether to include quantum synchrotron radiation (QSR) * \param GetPosition A functor for returning the particle position. * \param SetPosition A functor for setting the particle position. * \param copyAttribs A functor for storing the old u and x @@ -36,10 +37,11 @@ * \param pusher_algo 0: Boris, 1: Vay, 2: HigueraCary * \param do_crr Whether to do the classical radiation reaction * \param do_copy Whether to copy the old x and u for the BTD - * \param do_sync Whether to include quantum synchrotron radiation (QSR) * \param t_chi_max Cutoff chi for QSR * \param dt Time step size */ + +template AMREX_GPU_DEVICE AMREX_FORCE_INLINE void doParticlePush(const GetParticlePosition& GetPosition, const SetParticlePosition& SetPosition, @@ -56,60 +58,53 @@ void doParticlePush(const GetParticlePosition& GetPosition, const amrex::ParticleReal Bz, const int ion_lev, const amrex::ParticleReal m, - const amrex::ParticleReal q, + const amrex::ParticleReal a_q, const int pusher_algo, const int do_crr, const int do_copy, #ifdef WARPX_QED - const int do_sync, const amrex::Real t_chi_max, #endif const amrex::Real dt) { + amrex::ParticleReal qp = a_q; + if (ion_lev) { qp *= ion_lev; } + if (do_copy) copyAttribs(i); if (do_crr) { #ifdef WARPX_QED - if (do_sync) { + amrex::ignore_unused(t_chi_max); + if constexpr (do_sync) { auto chi = QedUtils::chi_ele_pos(m*ux, m*uy, m*uz, Ex, Ey, Ez, Bx, By, Bz); if (chi < t_chi_max) { UpdateMomentumBorisWithRadiationReaction(ux, uy, uz, Ex, Ey, Ez, Bx, - By, Bz, q, m, dt); + By, Bz, qp, m, dt); } else { UpdateMomentumBoris( ux, uy, uz, Ex, Ey, Ez, Bx, - By, Bz, q, m, dt); + By, Bz, qp, m, dt); } amrex::ParticleReal x, y, z; GetPosition(i, x, y, z); UpdatePosition(x, y, z, ux, uy, uz, dt ); SetPosition(i, x, y, z); - } else { + } else +#endif + { + UpdateMomentumBorisWithRadiationReaction(ux, uy, uz, Ex, Ey, Ez, Bx, - By, Bz, q, m, dt); + By, Bz, qp, m, dt); amrex::ParticleReal x, y, z; GetPosition(i, x, y, z); UpdatePosition(x, y, z, ux, uy, uz, dt ); SetPosition(i, x, y, z); } -#else - amrex::ParticleReal qp = q; - if (ion_lev) { qp *= ion_lev; } - UpdateMomentumBorisWithRadiationReaction(ux, uy, uz, - Ex, Ey, Ez, Bx, - By, Bz, qp, m, dt); - amrex::ParticleReal x, y, z; - GetPosition(i, x, y, z); - UpdatePosition(x, y, z, ux, uy, uz, dt ); - SetPosition(i, x, y, z); -#endif } else if (pusher_algo == ParticlePusherAlgo::Boris) { - amrex::ParticleReal qp = q; - if (ion_lev) { qp *= ion_lev; } UpdateMomentumBoris( ux, uy, uz, Ex, Ey, Ez, Bx, By, Bz, qp, m, dt); @@ -118,8 +113,6 @@ void doParticlePush(const GetParticlePosition& GetPosition, UpdatePosition(x, y, z, ux, uy, uz, dt ); SetPosition(i, x, y, z); } else if (pusher_algo == ParticlePusherAlgo::Vay) { - amrex::ParticleReal qp = q; - if (ion_lev){ qp *= ion_lev; } UpdateMomentumVay( ux, uy, uz, Ex, Ey, Ez, Bx, By, Bz, qp, m, dt); @@ -128,8 +121,6 @@ void doParticlePush(const GetParticlePosition& GetPosition, UpdatePosition(x, y, z, ux, uy, uz, dt ); SetPosition(i, x, y, z); } else if (pusher_algo == ParticlePusherAlgo::HigueraCary) { - amrex::ParticleReal qp = q; - if (ion_lev){ qp *= ion_lev; } UpdateMomentumHigueraCary( ux, uy, uz, Ex, Ey, Ez, Bx, By, Bz, qp, m, dt); @@ -137,9 +128,9 @@ void doParticlePush(const GetParticlePosition& GetPosition, GetPosition(i, x, y, z); UpdatePosition(x, y, z, ux, uy, uz, dt ); SetPosition(i, x, y, z); - } else { - amrex::Abort("Unknown particle pusher"); - } + } //else { +// amrex::Abort("Unknown particle pusher"); +// } } #endif // WARPX_PARTICLES_PUSHER_SELECTOR_H_ From c54b43fb936caf3ceaf833ee697dfc3cdc27db27 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Sat, 19 Nov 2022 19:47:04 -0800 Subject: [PATCH 0165/1346] Bugfix for "particle coordinates" with plane field probe reduced diagnostic (#3497) * changed the method by which the grid coordinates are generated for "plane" field probe * use amrex::literals to avoid implicit cast * Restrict function arguments * make failing compilers happy Co-authored-by: Axel Huebl --- Source/Diagnostics/ReducedDiags/FieldProbe.H | 11 +++ .../Diagnostics/ReducedDiags/FieldProbe.cpp | 87 +++++++++---------- 2 files changed, 53 insertions(+), 45 deletions(-) diff --git a/Source/Diagnostics/ReducedDiags/FieldProbe.H b/Source/Diagnostics/ReducedDiags/FieldProbe.H index 655ebc2a068..4f72fc72068 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbe.H +++ b/Source/Diagnostics/ReducedDiags/FieldProbe.H @@ -114,6 +114,17 @@ private: /** Check if the probe is in the simulation domain boundary */ bool ProbeInDomain () const; + + /** + * Simple utility function to normalize the components of a "vector" + */ + void normalize(amrex::Real &AMREX_RESTRICT x, amrex::Real &AMREX_RESTRICT y, + amrex::Real &AMREX_RESTRICT z){ + amrex::Real mag = std::sqrt(x*x + y*y + z*z); + x /= mag; + y /= mag; + z /= mag; + } }; #endif // WARPX_DIAGNOSTICS_REDUCEDDIAGS_FIELDPROBE_H_ diff --git a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp index 17afcf62af2..19643829e17 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp @@ -240,31 +240,23 @@ FieldProbe::FieldProbe (std::string rd_name) void FieldProbe::InitData () { - if (m_probe_geometry == DetectorGeometry::Point) - { + using namespace amrex::literals; - // create 1D vector for X, Y, and Z of particles - amrex::Vector xpos; - amrex::Vector ypos; - amrex::Vector zpos; + // create 1D vector for X, Y, and Z coordinates of "particles" + amrex::Vector xpos; + amrex::Vector ypos; + amrex::Vector zpos; - // for now, only one MPI rank adds a probe particle - if (ParallelDescriptor::IOProcessor()) + // for now, only one MPI rank adds probe "particles" + if (ParallelDescriptor::IOProcessor()) + { + if (m_probe_geometry == DetectorGeometry::Point) { xpos.push_back(x_probe); ypos.push_back(y_probe); zpos.push_back(z_probe); } - - // add particles on lev 0 to m_probe - m_probe.AddNParticles(0, xpos, ypos, zpos); - } - else if (m_probe_geometry == DetectorGeometry::Line) - { - amrex::Vector xpos; - amrex::Vector ypos; - amrex::Vector zpos; - if (ParallelDescriptor::IOProcessor()) + else if (m_probe_geometry == DetectorGeometry::Line) { xpos.reserve(m_resolution); ypos.reserve(m_resolution); @@ -282,66 +274,71 @@ void FieldProbe::InitData () zpos.push_back(z_probe + (DetLineStepSize[2] * step)); } } - m_probe.AddNParticles(0, xpos, ypos, zpos); - } - else if (m_probe_geometry == DetectorGeometry::Plane) - { - amrex::Vector xpos; - amrex::Vector ypos; - amrex::Vector zpos; - if (ParallelDescriptor::IOProcessor()) + else if (m_probe_geometry == DetectorGeometry::Plane) { std::size_t const res2 = std::size_t(m_resolution) * std::size_t(m_resolution); xpos.reserve(res2); ypos.reserve(res2); zpos.reserve(res2); + // ensure that input vectors are normalized + normalize(target_normal_x, target_normal_y, target_normal_z); + normalize(target_up_x, target_up_y, target_up_z); + // create vector orthonormal to input vectors amrex::Real orthotarget[3]{ target_normal_y * target_up_z - target_normal_z * target_up_y, target_normal_z * target_up_x - target_normal_x * target_up_z, target_normal_x * target_up_y - target_normal_y * target_up_x}; + // find upper left and lower right bounds of detector amrex::Real direction[3]{ orthotarget[0] - target_up_x, orthotarget[1] - target_up_y, orthotarget[2] - target_up_z}; - amrex::Real upperleft[3]{ + normalize(direction[0], direction[1], direction[2]); + amrex::Real uppercorner[3]{ x_probe - (direction[0] * detector_radius), y_probe - (direction[1] * detector_radius), z_probe - (direction[2] * detector_radius)}; - amrex::Real lowerright[3]{ + amrex::Real lowercorner[3]{ + uppercorner[0] - (target_up_x * std::sqrt(2_rt) * detector_radius), + uppercorner[1] - (target_up_y * std::sqrt(2_rt) * detector_radius), + uppercorner[2] - (target_up_z * std::sqrt(2_rt) * detector_radius)}; + amrex::Real loweropposite[3]{ x_probe + (direction[0] * detector_radius), y_probe + (direction[1] * detector_radius), z_probe + (direction[2] * detector_radius)}; + // create array containing point-to-point step size - amrex::Real DetPlaneStepSize[3]{ - (lowerright[0] - upperleft[0]) / (m_resolution - 1), - (lowerright[1] - upperleft[1]) / (m_resolution - 1), - (lowerright[2] - upperleft[2]) / (m_resolution - 1)}; + amrex::Real SideStepSize[3]{ + (loweropposite[0] - lowercorner[0]) / (m_resolution - 1), + (loweropposite[1] - lowercorner[1]) / (m_resolution - 1), + (loweropposite[2] - lowercorner[2]) / (m_resolution - 1)}; + amrex::Real UpStepSize[3]{ + (uppercorner[0] - lowercorner[0]) / (m_resolution - 1), + (uppercorner[1] - lowercorner[1]) / (m_resolution - 1), + (uppercorner[2] - lowercorner[2]) / (m_resolution - 1)}; + amrex::Real temp_pos[3]{}; - // Target point on top of plane (arbitrarily top of plane perpendicular to yz) - // For each point along top of plane, fill in YZ's beneath, then push back - for ( int step = 0; step < m_resolution; step++) + // Starting at the lowercorner point, step sideways and up to form + // a grid of equally spaced coordinate points + for ( int sidestep = 0; sidestep < m_resolution; sidestep++) { - temp_pos[0] = upperleft[0] + (DetPlaneStepSize[0] * step); - for ( int yzstep = 0; yzstep < m_resolution; yzstep++) + for ( int upstep = 0; upstep < m_resolution; upstep++) { - temp_pos[1] = upperleft[1] + (DetPlaneStepSize[1] * yzstep); - temp_pos[2] = upperleft[2] + (DetPlaneStepSize[2] * yzstep); + temp_pos[0] = lowercorner[0] + SideStepSize[0] * sidestep + UpStepSize[0] * upstep; + temp_pos[1] = lowercorner[1] + SideStepSize[1] * sidestep + UpStepSize[1] * upstep; + temp_pos[2] = lowercorner[2] + SideStepSize[2] * sidestep + UpStepSize[2] * upstep; xpos.push_back(temp_pos[0]); ypos.push_back(temp_pos[1]); zpos.push_back(temp_pos[2]); } } } - m_probe.AddNParticles(0, xpos, ypos, zpos); - } - else - { - amrex::Abort(Utils::TextMsg::Err( - "Invalid probe geometry. Valid geometries are Point, Line, and Plane.")); } + // add particles on lev 0 to m_probe + m_probe.AddNParticles(0, xpos, ypos, zpos); } void FieldProbe::LoadBalance () From f1afd8b4cf043133ab112b47259e2f9337fc8fe5 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 21 Nov 2022 15:18:38 -0800 Subject: [PATCH 0166/1346] AMReX/PICSAR: Weekly Update (#3530) * AMReX/PICSAR: Weekly Update * Fix bug --- .github/workflows/cuda.yml | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- Source/BoundaryConditions/PML.H | 2 ++ cmake/dependencies/AMReX.cmake | 2 +- run_test.sh | 2 +- 6 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 990e5e82c09..620a94a7463 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach d8bc97f92a1a568b6e996db3b8d9715fced0464f && cd - + cd amrex && git checkout --detach cf0afb0c152e2c942073731da7a1e0007886eed4 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 0d1b86390e6..105652bab13 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = d8bc97f92a1a568b6e996db3b8d9715fced0464f +branch = cf0afb0c152e2c942073731da7a1e0007886eed4 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 9934fb64c4a..6a946de9ff0 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = d8bc97f92a1a568b6e996db3b8d9715fced0464f +branch = cf0afb0c152e2c942073731da7a1e0007886eed4 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/Source/BoundaryConditions/PML.H b/Source/BoundaryConditions/PML.H index 4688c19e508..a1ce21a9140 100644 --- a/Source/BoundaryConditions/PML.H +++ b/Source/BoundaryConditions/PML.H @@ -58,6 +58,8 @@ struct SigmaBox using SigmaVect = std::array; + using value_type = void; // needed by amrex::FabArray + SigmaVect sigma; SigmaVect sigma_cumsum; SigmaVect sigma_star; diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 7d9691c9b6f..ab0a904f2e4 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -240,7 +240,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "d8bc97f92a1a568b6e996db3b8d9715fced0464f" +set(WarpX_amrex_branch "cf0afb0c152e2c942073731da7a1e0007886eed4" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/run_test.sh b/run_test.sh index 760b0128b9e..6fab881bafe 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach d8bc97f92a1a568b6e996db3b8d9715fced0464f && cd - +cd amrex && git checkout --detach cf0afb0c152e2c942073731da7a1e0007886eed4 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git From 59348279b8d103e26c40fac6748368824ecd0da9 Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Mon, 21 Nov 2022 16:57:00 -0800 Subject: [PATCH 0167/1346] Assert for `diag_type` (prevent input typos) (#3532) --- Source/Diagnostics/MultiDiagnostics.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Source/Diagnostics/MultiDiagnostics.cpp b/Source/Diagnostics/MultiDiagnostics.cpp index 1b9f5615ae7..82ff784ab60 100644 --- a/Source/Diagnostics/MultiDiagnostics.cpp +++ b/Source/Diagnostics/MultiDiagnostics.cpp @@ -67,6 +67,9 @@ MultiDiagnostics::ReadParameters () ParmParse pp_diag_name(diags_names[i]); std::string diag_type_str; pp_diag_name.get("diag_type", diag_type_str); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + diag_type_str == "Full" || diag_type_str == "BackTransformed" || diag_type_str == "BoundaryScraping", + ".diag_type must be Full or BackTransformed or BoundaryScraping"); if (diag_type_str == "Full") diags_types[i] = DiagTypes::Full; if (diag_type_str == "BackTransformed") diags_types[i] = DiagTypes::BackTransformed; if (diag_type_str == "BoundaryScraping") diags_types[i] = DiagTypes::BoundaryScraping; From 9ba3e9222c3e6c0f1e9fefdd527fe4e3dca2a212 Mon Sep 17 00:00:00 2001 From: AlexanderSinn <64009254+AlexanderSinn@users.noreply.github.com> Date: Thu, 24 Nov 2022 02:26:00 +0100 Subject: [PATCH 0168/1346] Speed up Vay current deposition (#3529) * Speed up Vay current deposition by about 3 times * add endif * test 2D * fix ( * cleaning + check if i_new == i_old --- .../Particles/Deposition/CurrentDeposition.H | 186 +++++++++--------- 1 file changed, 91 insertions(+), 95 deletions(-) diff --git a/Source/Particles/Deposition/CurrentDeposition.H b/Source/Particles/Deposition/CurrentDeposition.H index edeaba7cd63..685907d0aa9 100644 --- a/Source/Particles/Deposition/CurrentDeposition.H +++ b/Source/Particles/Deposition/CurrentDeposition.H @@ -809,11 +809,16 @@ void doVayDepositionShapeN (const GetParticlePosition& GetPosition, const amrex::Real ymin = xyzmin[1]; #endif - // Auxiliary constants + // Allocate temporary arrays #if defined(WARPX_DIM_3D) - const amrex::Real onethird = 1._rt / 3._rt; - const amrex::Real onesixth = 1._rt / 6._rt; + AMREX_ALWAYS_ASSERT(jx_fab.box() == jy_fab.box() && jx_fab.box() == jz_fab.box()); + amrex::FArrayBox temp_fab{jx_fab.box(), 4}; +#elif defined(WARPX_DIM_XZ) + AMREX_ALWAYS_ASSERT(jx_fab.box() == jz_fab.box()); + amrex::FArrayBox temp_fab{jx_fab.box(), 2}; #endif + temp_fab.setVal(0._rt); + amrex::Array4 const& temp_arr = temp_fab.array(); // Inverse of light speed squared const amrex::Real invcsq = 1._rt / (PhysConst::c * PhysConst::c); @@ -933,44 +938,45 @@ void doVayDepositionShapeN (const GetParticlePosition& GetPosition, auto const sxn_szo = static_cast(sx_new[i] * sz_old[k]); auto const sxo_szo = static_cast(sx_old[i] * sz_old[k]); - // Jx - amrex::Gpu::Atomic::AddNoRet(&jx_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 0), - wq * invvol * invdt * 0.5_rt * sxn_szn); - - amrex::Gpu::Atomic::AddNoRet(&jx_arr(lo.x + i_old + i, lo.y + k_new + k, 0, 0), - - wq * invvol * invdt * 0.5_rt * sxo_szn); + if (i_new == i_old && k_new == k_old) { + // temp arrays for Jx and Jz + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 0), + wq * invvol * invdt * (sxn_szn - sxo_szo)); - amrex::Gpu::Atomic::AddNoRet(&jx_arr(lo.x + i_new + i, lo.y + k_old + k, 0, 0), - wq * invvol * invdt * 0.5_rt * sxn_szo); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 1), + wq * invvol * invdt * (sxn_szo - sxo_szn)); - amrex::Gpu::Atomic::AddNoRet(&jx_arr(lo.x + i_old + i, lo.y + k_old + k, 0, 0), - - wq * invvol * invdt * 0.5_rt * sxo_szo); + // Jy + amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 0), + wqy * 0.25_rt * (sxn_szn + sxn_szo + sxo_szn + sxo_szo)); + } else { + // temp arrays for Jx and Jz + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 0), + wq * invvol * invdt * sxn_szn); - // Jy - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 0), - wqy * 0.25_rt * sxn_szn); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_old + i, lo.y + k_old + k, 0, 0), + - wq * invvol * invdt * sxo_szo); - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_new + i, lo.y + k_old + k, 0, 0), - wqy * 0.25_rt * sxn_szo); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + k_old + k, 0, 1), + wq * invvol * invdt * sxn_szo); - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_old + i, lo.y + k_new + k, 0, 0), - wqy * 0.25_rt * sxo_szn); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_old + i, lo.y + k_new + k, 0, 1), + - wq * invvol * invdt * sxo_szn); - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_old + i, lo.y + k_old + k, 0, 0), - wqy * 0.25_rt * sxo_szo); + // Jy + amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 0), + wqy * 0.25_rt * sxn_szn); - // Jz - amrex::Gpu::Atomic::AddNoRet(&jz_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 0), - wq * invvol * invdt * 0.5_rt * sxn_szn); + amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_new + i, lo.y + k_old + k, 0, 0), + wqy * 0.25_rt * sxn_szo); - amrex::Gpu::Atomic::AddNoRet(&jz_arr(lo.x+i_new+i,lo.y+k_old+k,0,0), - - wq * invvol * invdt * 0.5_rt * sxn_szo); + amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_old + i, lo.y + k_new + k, 0, 0), + wqy * 0.25_rt * sxo_szn); - amrex::Gpu::Atomic::AddNoRet(&jz_arr(lo.x+i_old+i,lo.y+k_new+k,0,0), - wq * invvol * invdt * 0.5_rt * sxo_szn); + amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_old + i, lo.y + k_old + k, 0, 0), + wqy * 0.25_rt * sxo_szo); + } - amrex::Gpu::Atomic::AddNoRet(&jz_arr(lo.x + i_old + i, lo.y + k_old + k, 0, 0), - - wq * invvol * invdt * 0.5_rt * sxo_szo); } } @@ -995,85 +1001,75 @@ void doVayDepositionShapeN (const GetParticlePosition& GetPosition, auto const sxn_syo_szo = static_cast(sx_new[i]) * syo_szo; auto const sxo_syo_szo = static_cast(sx_old[i]) * syo_szo; - // Jx - amrex::Gpu::Atomic::AddNoRet(&jx_arr(lo.x + i_new + i, lo.y + j_new + j, lo.z + k_new + k), - wq * invvol * invdt * onethird * sxn_syn_szn); - - amrex::Gpu::Atomic::AddNoRet(&jx_arr(lo.x + i_old + i, lo.y + j_new + j, lo.z + k_new + k), - - wq * invvol * invdt * onethird * sxo_syn_szn); - - amrex::Gpu::Atomic::AddNoRet(&jx_arr(lo.x + i_new + i, lo.y + j_old + j, lo.z + k_new + k), - wq * invvol * invdt * onesixth * sxn_syo_szn); - - amrex::Gpu::Atomic::AddNoRet(&jx_arr(lo.x + i_old + i, lo.y + j_old + j,lo.z + k_new + k), - - wq * invvol * invdt * onesixth * sxo_syo_szn); - - amrex::Gpu::Atomic::AddNoRet(&jx_arr(lo.x + i_new + i, lo.y + j_new + j, lo.z + k_old + k), - wq * invvol * invdt * onesixth * sxn_syn_szo); - - amrex::Gpu::Atomic::AddNoRet(&jx_arr(lo.x + i_old + i, lo.y + j_new + j, lo.z + k_old + k), - - wq * invvol * invdt * onesixth * sxo_syn_szo); - - amrex::Gpu::Atomic::AddNoRet(&jx_arr(lo.x + i_new + i, lo.y + j_old + j, lo.z + k_old + k), - wq * invvol * invdt * onethird * sxn_syo_szo); - - amrex::Gpu::Atomic::AddNoRet(&jx_arr(lo.x + i_old + i, lo.y + j_old + j, lo.z + k_old + k), - - wq * invvol * invdt * onethird * sxo_syo_szo); - - // Jy - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_new + i, lo.y + j_new + j, lo.z + k_new + k), - wq * invvol * invdt * onethird * sxn_syn_szn); - - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_new + i, lo.y + j_old + j, lo.z + k_new + k), - - wq * invvol * invdt * onethird * sxn_syo_szn); - - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_old + i, lo.y + j_new + j, lo.z + k_new + k), - wq * invvol * invdt * onesixth * sxo_syn_szn); - - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_old + i, lo.y + j_old + j, lo.z + k_new + k), - - wq * invvol * invdt * onesixth * sxo_syo_szn); - - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_new + i, lo.y + j_new + j, lo.z + k_old + k), - wq * invvol * invdt * onesixth * sxn_syn_szo); + if (i_new == i_old && j_new == j_old && k_new == k_old) { + // temp arrays for Jx, Jy and Jz + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + j_new + j, lo.z + k_new + k, 0), + wq * invvol * invdt * (sxn_syn_szn - sxo_syo_szo)); - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_new + i, lo.y + j_old + j, lo.z + k_old + k), - - wq * invvol * invdt * onesixth * sxn_syo_szo); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + j_new + j, lo.z + k_new + k, 1), + wq * invvol * invdt * (sxn_syn_szo - sxo_syo_szn)); - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_old + i, lo.y + j_new + j, lo.z + k_old + k), - wq * invvol * invdt * onethird * sxo_syn_szo); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + j_new + j, lo.z + k_new + k, 2), + wq * invvol * invdt * (sxn_syo_szn - sxo_syn_szo)); - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_old + i, lo.y + j_old + j, lo.z + k_old + k), - - wq * invvol * invdt * onethird * sxo_syo_szo); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + j_new + j, lo.z + k_new + k, 3), + wq * invvol * invdt * (sxo_syn_szn - sxn_syo_szo)); + } else { + // temp arrays for Jx, Jy and Jz + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + j_new + j, lo.z + k_new + k, 0), + wq * invvol * invdt * sxn_syn_szn); - // Jz - amrex::Gpu::Atomic::AddNoRet(&jz_arr( lo.x + i_new + i, lo.y + j_new + j, lo.z + k_new + k), - wq * invvol * invdt * onethird * sxn_syn_szn); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_old + i, lo.y + j_old + j, lo.z + k_old + k, 0), + - wq * invvol * invdt * sxo_syo_szo); - amrex::Gpu::Atomic::AddNoRet(&jz_arr( lo.x + i_new + i, lo.y + j_new + j, lo.z + k_old + k), - - wq * invvol * invdt * onethird * sxn_syn_szo); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + j_new + j, lo.z + k_old + k, 1), + wq * invvol * invdt * sxn_syn_szo); - amrex::Gpu::Atomic::AddNoRet(&jz_arr( lo.x + i_old + i, lo.y + j_new + j, lo.z + k_new + k), - wq * invvol * invdt * onesixth * sxo_syn_szn); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_old + i, lo.y + j_old + j, lo.z + k_new + k, 1), + - wq * invvol * invdt * sxo_syo_szn); - amrex::Gpu::Atomic::AddNoRet(&jz_arr( lo.x + i_old + i, lo.y + j_new + j, lo.z + k_old + k), - - wq * invvol * invdt * onesixth * sxo_syn_szo); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + j_old + j, lo.z + k_new + k, 2), + wq * invvol * invdt * sxn_syo_szn); - amrex::Gpu::Atomic::AddNoRet(&jz_arr( lo.x + i_new + i, lo.y + j_old + j, lo.z + k_new + k), - wq * invvol * invdt * onesixth * sxn_syo_szn); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_old + i, lo.y + j_new + j, lo.z + k_old + k, 2), + - wq * invvol * invdt * sxo_syn_szo); - amrex::Gpu::Atomic::AddNoRet(&jz_arr( lo.x + i_new + i, lo.y + j_old + j, lo.z + k_old + k), - - wq * invvol * invdt * onesixth * sxn_syo_szo); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_old + i, lo.y + j_new + j, lo.z + k_new + k, 3), + wq * invvol * invdt * sxo_syn_szn); - amrex::Gpu::Atomic::AddNoRet(&jz_arr( lo.x + i_old + i, lo.y + j_old + j, lo.z + k_new + k), - wq * invvol * invdt * onethird * sxo_syo_szn); - - amrex::Gpu::Atomic::AddNoRet(&jz_arr( lo.x + i_old + i, lo.y + j_old + j, lo.z + k_old + k), - - wq * invvol * invdt * onethird * sxo_syo_szo); + amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + j_old + j, lo.z + k_old + k, 3), + - wq * invvol * invdt * sxn_syo_szo); + } } } } #endif } ); + +#if defined(WARPX_DIM_3D) + amrex::ParallelFor(jx_fab.box(), [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept + { + const amrex::Real t_a = temp_arr(i,j,k,0); + const amrex::Real t_b = temp_arr(i,j,k,1); + const amrex::Real t_c = temp_arr(i,j,k,2); + const amrex::Real t_d = temp_arr(i,j,k,3); + jx_arr(i,j,k) += (1._rt/6._rt)*(2_rt*t_a + t_b + t_c - 2._rt*t_d); + jy_arr(i,j,k) += (1._rt/6._rt)*(2_rt*t_a + t_b - 2._rt*t_c + t_d); + jz_arr(i,j,k) += (1._rt/6._rt)*(2_rt*t_a - 2._rt*t_b + t_c + t_d); + }); +#elif defined(WARPX_DIM_XZ) + amrex::ParallelFor(jx_fab.box(), [=] AMREX_GPU_DEVICE (int i, int j, int) noexcept + { + const amrex::Real t_a = temp_arr(i,j,0,0); + const amrex::Real t_b = temp_arr(i,j,0,1); + jx_arr(i,j,0) += (0.5_rt)*(t_a + t_b); + jz_arr(i,j,0) += (0.5_rt)*(t_a - t_b); + }); +#endif + // Synchronize so that temp_fab can be safely deallocated in its destructor + amrex::Gpu::streamSynchronize(); + + # if defined(WARPX_USE_GPUCLOCK) if( load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::GpuClock) { amrex::Gpu::streamSynchronize(); From 8978f6711d63b29709cd119afc3eadea7256bef2 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Wed, 23 Nov 2022 17:45:04 -0800 Subject: [PATCH 0169/1346] allow initial fields to be set through the picmi interface (#3536) --- Python/pywarpx/picmi.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 9399fd92606..5a970ffdf89 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1084,6 +1084,35 @@ def initialize_inputs(self, laser): ) / constants.c +class AnalyticInitialField(picmistandard.PICMI_AnalyticAppliedField): + def init(self, kw): + self.mangle_dict = None + + def initialize_inputs(self): + # Note that lower and upper_bound are not used by WarpX + + if self.mangle_dict is None: + # Only do this once so that the same variables are used in this distribution + # is used multiple times + self.mangle_dict = pywarpx.my_constants.add_keywords(self.user_defined_kw) + + if (self.Ex_expression is not None or + self.Ey_expression is not None or + self.Ez_expression is not None): + pywarpx.warpx.E_ext_grid_init_style = 'parse_e_ext_grid_function' + for sdir, expression in zip(['x', 'y', 'z'], [self.Ex_expression, self.Ey_expression, self.Ez_expression]): + expression = pywarpx.my_constants.mangle_expression(expression, self.mangle_dict) + pywarpx.warpx.__setattr__(f'E{sdir}_external_grid_function(x,y,z)', expression) + + if (self.Bx_expression is not None or + self.By_expression is not None or + self.Bz_expression is not None): + pywarpx.warpx.B_ext_grid_init_style = 'parse_b_ext_grid_function' + for sdir, expression in zip(['x', 'y', 'z'], [self.Bx_expression, self.By_expression, self.Bz_expression]): + expression = pywarpx.my_constants.mangle_expression(expression, self.mangle_dict) + pywarpx.warpx.__setattr__(f'B{sdir}_external_grid_function(x,y,z)', expression) + + class ConstantAppliedField(picmistandard.PICMI_ConstantAppliedField): def initialize_inputs(self): # Note that lower and upper_bound are not used by WarpX From 19bd6f4eeaa46aa9e80e2ca03cfbb7a4a76465f2 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Wed, 23 Nov 2022 18:52:16 -0800 Subject: [PATCH 0170/1346] Take into account mean velocity in gaussian flux injection (#3514) * Take into account normal mean velocity in gaussian flux injection * Avoid negative u * Adjust threshold to switch from one method to the other * Fix compilation error * Fix compilation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Raise error if momentum is positive * Fix compilation * Add correct import statement * Fix compilation * Fix automated test * Update checksum * Relax tolerance * Add analysis script * Add CI test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add checksum file * Make the code clearer Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 2 +- .../analysis_flux_injection_3d.py | 118 ++++++++++++++++++ .../analysis_flux_injection_rz.py | 2 +- Examples/Tests/FluxInjection/inputs_3d | 68 ++++++++++ .../benchmarks_json/FluxInjection.json | 16 +-- .../benchmarks_json/FluxInjection3D.json | 21 ++++ Regression/WarpX-tests.ini | 17 +++ Source/Initialization/InjectorMomentum.H | 99 +++++++++++++-- 8 files changed, 325 insertions(+), 18 deletions(-) create mode 100755 Examples/Tests/FluxInjection/analysis_flux_injection_3d.py create mode 100644 Examples/Tests/FluxInjection/inputs_3d create mode 100644 Regression/Checksum/benchmarks_json/FluxInjection3D.json diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 691cf81aef7..309d3d1a30d 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -757,7 +757,7 @@ Particle initialization ``.flux_direction``, for the average momenta along each direction ``.ux_m``, ``.uy_m`` and ``.uz_m``, as well as standard deviations along each direction ``.ux_th``, - ``.uy_th`` and ``.uz_th``. Note that the average momenta normal to the plane is not used. + ``.uy_th`` and ``.uz_th``. ``ux_m``, ``uy_m``, ``uz_m``, ``ux_th``, ``uy_th`` and ``uz_th`` are all ``0.`` by default. * ``maxwell_boltzmann``: Maxwell-Boltzmann distribution that takes a dimensionless diff --git a/Examples/Tests/FluxInjection/analysis_flux_injection_3d.py b/Examples/Tests/FluxInjection/analysis_flux_injection_3d.py new file mode 100755 index 00000000000..804cf95eb48 --- /dev/null +++ b/Examples/Tests/FluxInjection/analysis_flux_injection_3d.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Remi Lehe +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + +""" +This script tests the Gaussian-flux injection (and in particular +the rejection method in WarpX that we use to generate the right +velocity distribution). + +Two population of particles are injected, with a slightly different +ratio of u_m/u_th. (This is in order to test the two different +rejection methods implemented in WarpX, which depend on the u_m/u_th ratio.) + +After the particles are emitted with flux injection, this script produces +histograms of the velocity distribution and compares it with the expected +velocity distibution (Gaussian or Gaussian-flux depending on the direction +of space) +""" +import os +import re +import sys + +import matplotlib.pyplot as plt +import numpy as np +from scipy.constants import c, m_e, m_p +from scipy.special import erf +import yt + +sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +import checksumAPI + +yt.funcs.mylog.setLevel(0) + +# Open plotfile specified in command line +fn = sys.argv[1] +ds = yt.load( fn ) +ad = ds.all_data() +t_max = ds.current_time.item() # time of simulation + +# Total number of electrons expected: +# Simulation parameters determine the total number of particles emitted (Ntot) +flux = 1. # in m^-2.s^-1, from the input script +emission_surface = 8*8 # in m^2 +Ntot = flux * emission_surface * t_max + +# Parameters of the histogram +hist_bins = 50 +hist_range = [-0.5, 0.5] + +# Define function that histogram and check the data + +def gaussian_dist(u, u_th): + return 1./((2*np.pi)**.5*u_th) * np.exp(-u**2/(2*u_th**2) ) + +def gaussian_flux_dist(u, u_th, u_m): + normalization_factor = u_th**2 * np.exp(-u_m**2/(2*u_th**2)) + (np.pi/2)**.5*u_m*u_th * (1 + erf(u_m/(2**.5*u_th))) + return 1./normalization_factor * np.where( u>0, u * np.exp(-(u-u_m)**2/(2*u_th**2)), 0 ) + +def compare_gaussian(u, w, u_th, label=''): + du = (hist_range[1]-hist_range[0])/hist_bins + w_hist, u_hist = np.histogram(u, bins=hist_bins, weights=w/du, range=hist_range) + u_hist = 0.5*(u_hist[1:]+u_hist[:-1]) + w_th = Ntot*gaussian_dist(u_hist, u_th) + plt.plot( u_hist, w_hist, label=label+': simulation' ) + plt.plot( u_hist, w_th, '--', label=label+': theory' ) + assert np.allclose( w_hist, w_th, atol=0.07*w_th.max() ) + +def compare_gaussian_flux(u, w, u_th, u_m, label=''): + du = (hist_range[1]-hist_range[0])/hist_bins + w_hist, u_hist = np.histogram(u, bins=hist_bins, weights=w/du, range=hist_range) + u_hist = 0.5*(u_hist[1:]+u_hist[:-1]) + w_th = Ntot*gaussian_flux_dist(u_hist, u_th, u_m) + plt.plot( u_hist, w_hist, label=label+': simulation' ) + plt.plot( u_hist, w_th, '--', label=label+': theory' ) + assert np.allclose( w_hist, w_th, atol=0.05*w_th.max() ) + +# Load data and perform check + +plt.figure(figsize=(5,7)) + +plt.subplot(211) +plt.title('Electrons') + +ux = ad['electron','particle_momentum_x'].to_ndarray()/(m_e*c) +uy = ad['electron','particle_momentum_y'].to_ndarray()/(m_e*c) +uz = ad['electron','particle_momentum_z'].to_ndarray()/(m_e*c) +w = ad['electron', 'particle_weight'].to_ndarray() + +compare_gaussian(ux, w, u_th=0.1, label='u_x') +compare_gaussian_flux(uy, w, u_th=0.1, u_m=0.07, label='u_y') +compare_gaussian(uz, w, u_th=0.1, label='u_z') +plt.legend(loc=0) + +plt.subplot(212) +plt.title('Protons') + +ux = ad['proton','particle_momentum_x'].to_ndarray()/(m_p*c) +uy = ad['proton','particle_momentum_y'].to_ndarray()/(m_p*c) +uz = ad['proton','particle_momentum_z'].to_ndarray()/(m_p*c) +w = ad['proton', 'particle_weight'].to_ndarray() + +compare_gaussian_flux(ux, w, u_th=0.1, u_m=0.05, label='u_x') +compare_gaussian(uy, w, u_th=0.1, label='u_y') +compare_gaussian(uz, w, u_th=0.1, label='u_z') +plt.legend(loc=0) + +plt.savefig('Distribution.png') + +# Verify checksum +test_name = os.path.split(os.getcwd())[1] +if re.search( 'single_precision', fn ): + checksumAPI.evaluate_checksum(test_name, fn, rtol=1.e-3) +else: + checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/FluxInjection/analysis_flux_injection_rz.py b/Examples/Tests/FluxInjection/analysis_flux_injection_rz.py index ed9716cdb0d..8ec944c715d 100755 --- a/Examples/Tests/FluxInjection/analysis_flux_injection_rz.py +++ b/Examples/Tests/FluxInjection/analysis_flux_injection_rz.py @@ -55,7 +55,7 @@ # Check that the number of particles matches the expected one assert np.allclose( w.sum(), n_tot, rtol=0.05 ) # Check that the particles are at the right radius -assert np.all( (r >= 1.5) & (r <=1.9) ) +assert np.all( (r >= 1.48) & (r <=1.92) ) test_name = os.path.split(os.getcwd())[1] diff --git a/Examples/Tests/FluxInjection/inputs_3d b/Examples/Tests/FluxInjection/inputs_3d new file mode 100644 index 00000000000..f7cb807160c --- /dev/null +++ b/Examples/Tests/FluxInjection/inputs_3d @@ -0,0 +1,68 @@ +# Maximum number of time steps +max_step = 2 + +# number of grid points +amr.n_cell = 16 16 16 + +# The lo and hi ends of grids are multipliers of blocking factor +amr.blocking_factor = 8 + +# Maximum allowable size of each subdomain in the problem domain; +# this is used to decompose the domain for parallel calculations. +amr.max_grid_size = 16 + +# Maximum level in hierarchy (for now must be 0, i.e., one level in total) +amr.max_level = 0 + +# Geometry +geometry.dims = 3 +geometry.prob_lo = -4 -4 -4 +geometry.prob_hi = 4 4 4 + +# Deactivate Maxwell solver +algo.maxwell_solver = none +warpx.const_dt = 7e-9 + +# Boundary condition +boundary.field_lo = periodic periodic periodic +boundary.field_hi = periodic periodic periodic + +# particles +particles.species_names = electron proton +algo.particle_shape = 3 + +electron.charge = -q_e +electron.mass = m_e +electron.injection_style = NFluxPerCell +electron.num_particles_per_cell = 100 +electron.surface_flux_pos = -1. +electron.flux_normal_axis = y +electron.flux_direction = +1 +electron.profile = constant +electron.density = 1. +electron.momentum_distribution_type = gaussianflux +electron.ux_th = 0.1 +electron.uy_th = 0.1 +electron.uy_m = 0.07 +electron.uz_th = 0.1 + +proton.charge = +q_e +proton.mass = m_p +proton.injection_style = NFluxPerCell +proton.num_particles_per_cell = 100 +proton.surface_flux_pos = 1. +proton.flux_normal_axis = x +proton.flux_direction = +1 +proton.profile = constant +proton.density = 1. +proton.momentum_distribution_type = gaussianflux +proton.ux_th = 0.1 +proton.ux_m = 0.05 +proton.uy_th = 0.1 +proton.uz_th = 0.1 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 1000 +diag1.diag_type = Full +diag1.fields_to_plot = none diff --git a/Regression/Checksum/benchmarks_json/FluxInjection.json b/Regression/Checksum/benchmarks_json/FluxInjection.json index 5553753d24d..5a80590891c 100644 --- a/Regression/Checksum/benchmarks_json/FluxInjection.json +++ b/Regression/Checksum/benchmarks_json/FluxInjection.json @@ -1,14 +1,14 @@ { "electron": { - "particle_momentum_x": 1.7879471038093652e-42, - "particle_momentum_y": 1.7494821186739744e-42, - "particle_momentum_z": 4.5268277440986243e-41, - "particle_position_x": 6940.335850058893, - "particle_position_y": 2046.2539850460196, - "particle_theta": 6498.1356057858175, - "particle_weight": 3.219739901337792e-08 + "particle_momentum_x": 7.168456345337534e-18, + "particle_momentum_y": 7.02290351254873e-18, + "particle_momentum_z": 9.565641373942318e-42, + "particle_position_x": 6962.988311042427, + "particle_position_y": 2034.5301680154264, + "particle_theta": 6397.068924320389, + "particle_weight": 3.215011942598676e-08 }, "lev=0": { - "Bz": 2.1952258973082976e-47 + "Bz": 9.526664429810971e-24 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/FluxInjection3D.json b/Regression/Checksum/benchmarks_json/FluxInjection3D.json new file mode 100644 index 00000000000..953fdb64405 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/FluxInjection3D.json @@ -0,0 +1,21 @@ +{ + "electron": { + "particle_momentum_x": 1.1192116199394354e-18, + "particle_momentum_y": 2.238114590066897e-18, + "particle_momentum_z": 1.1156457989239728e-18, + "particle_position_x": 102495.14197173176, + "particle_position_y": 34752.73800291744, + "particle_position_z": 102423.13701045913, + "particle_weight": 8.959999999999998e-07 + }, + "lev=0": {}, + "proton": { + "particle_momentum_x": 3.835423016604918e-15, + "particle_momentum_y": 2.0468371931479925e-15, + "particle_momentum_z": 2.055186547721331e-15, + "particle_position_x": 66743.84539580689, + "particle_position_y": 102293.00576740496, + "particle_position_z": 102314.93877691089, + "particle_weight": 8.959999999999998e-07 + } +} \ No newline at end of file diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 6a946de9ff0..619ff337243 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -903,6 +903,23 @@ compareParticles = 1 particleTypes = electron analysisRoutine = Examples/Tests/FluxInjection/analysis_flux_injection_rz.py +[FluxInjection3D] +buildDir = . +inputFile = Examples/Tests/FluxInjection/inputs_3d +runtime_params = +dim = 3 +addToCompileString = +cmakeSetupOpts = +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +analysisRoutine = Examples/Tests/FluxInjection/analysis_flux_injection_3d.py + [Langmuir_multi_rz_psatd] buildDir = . inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rz_rt diff --git a/Source/Initialization/InjectorMomentum.H b/Source/Initialization/InjectorMomentum.H index eeaec670d27..c2172f4afd9 100644 --- a/Source/Initialization/InjectorMomentum.H +++ b/Source/Initialization/InjectorMomentum.H @@ -14,6 +14,7 @@ #include "TemperatureProperties.H" #include "VelocityProperties.H" #include "Utils/WarpXConst.H" +#include "Utils/TextMsg.H" #include #include @@ -86,6 +87,69 @@ private: amrex::Real m_ux_th, m_uy_th, m_uz_th; }; +namespace { + /** Return u sampled according to the probability distribution: + * p(u) \propto u \exp(-(u-u_m)^2/2u_th^2) + * + * @param u_m Central momentum + * @param u_th Momentum spread + * @param engine Object used to generate random numbers + */ + AMREX_GPU_HOST_DEVICE + amrex::Real + generateGaussianFluxDist( amrex::Real u_m, amrex::Real u_th, amrex::RandomEngine const& engine ) { + + using namespace amrex::literals; + + // Momentum to be returned at the end of this function + amrex::Real u = 0._rt; + + if (u_th == 0._rt) { + u = u_m; // Trivial case ; avoids division by 0 in the rest of the code below + } else if (u_m < 0.6*u_th) { + // Mean velocity is lower than thermal velocity + // Use the distribution u*exp(-u**2*(1-u_m/u_th)/(2*u_th**2)) as an approximation + // and then use the rejection method to correct it + // ( stop rejecting with probability exp(-u_m/(2*u_th**3)*(u-u_th)**2) ) + // Note that this is the method that is used in the common case u_m=0 + amrex::Real approx_u_th = u_th/std::sqrt( 1._rt - u_m/u_th ); + amrex::Real reject_prefactor = (u_m/u_th)/(2._rt*u_th*u_th); // To save computation + bool reject = true; + while (reject) { + // Generates u according to u*exp(-u**2/(2*approx_u_th**2), + // using the method of the inverse cumulative function + amrex::Real xrand = 1._rt - amrex::Random(engine); // ensures urand > 0 + u = approx_u_th * std::sqrt(2._rt*std::log(1._rt/xrand)); + // Rejection method + xrand = amrex::Random(engine); + if (xrand < std::exp(-reject_prefactor*(u-u_th)*(u-u_th))) reject = false; + } + } else { + // Mean velocity is greater than thermal velocity + // Use the distribution exp(-(u-u_m-u_th**2/u_m)**2/(2*u_th**2)) as an approximation + // and then use the rejection method to correct it + // ( stop rejecting with probability (u/u_m)*exp(1-(u/u_m)) ; note + // that this number is always between 0 and 1 ) + // Note that in the common case `u_m = 0`, this rejection method + // is not used, and the above rejection method is used instead. + bool reject = true; + amrex::Real approx_u_m = u_m + u_th*u_th/u_m; + amrex::Real inv_um = 1._rt/u_m; // To save computation + while (reject) { + // Approximate distribution: normal distribution, where we only retain positive u + u = -1._rt; + while (u < 0) { + u = amrex::RandomNormal(approx_u_m, u_th, engine); + } + // Rejection method + amrex::Real xrand = amrex::Random(engine); + if (xrand < u*inv_um* std::exp(1._rt - u*inv_um)) reject = false; + } + } + + return u; + } +} // struct whose getMomentum returns momentum for 1 particle, from random // gaussian flux distribution in the specified direction. @@ -101,7 +165,15 @@ struct InjectorMomentumGaussianFlux m_ux_th(a_ux_th), m_uy_th(a_uy_th), m_uz_th(a_uz_th), m_flux_normal_axis(a_flux_normal_axis), m_flux_direction(a_flux_direction) - {} + { + // For now, do not allow negative `u_m` along the flux axis + bool raise_error = false; + if ((m_flux_normal_axis == 0) && (m_ux_m < 0)) raise_error = true; + if ((m_flux_normal_axis == 1) && (m_uy_m < 0)) raise_error = true; + if ((m_flux_normal_axis == 2) && (m_uz_m < 0)) raise_error = true; + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( raise_error==false, + "When using the `gaussianflux` distribution, the central momentum along the flux axis must be positive or zero." ); + } AMREX_GPU_HOST_DEVICE amrex::XDim3 @@ -109,17 +181,28 @@ struct InjectorMomentumGaussianFlux amrex::RandomEngine const& engine) const noexcept { using namespace amrex::literals; - // Generate the v*Gaussian distribution. - amrex::Real const urand = 1._rt - amrex::Random(engine); - amrex::Real ur = std::sqrt(2._rt*std::log(1._rt/urand)); - if (m_flux_direction < 0) ur = -ur; + + // Generate the distribution in the direction of the flux + amrex::Real u_m = 0, u_th = 0; + if (m_flux_normal_axis == 0) { + u_m = m_ux_m; + u_th = m_ux_th; + } else if (m_flux_normal_axis == 1) { + u_m = m_uy_m; + u_th = m_uy_th; + } else if (m_flux_normal_axis == 2) { + u_m = m_uz_m; + u_th = m_uz_th; + } + amrex::Real u = generateGaussianFluxDist(u_m, u_th, engine); + if (m_flux_direction < 0) u = -u; // Note: Here, in RZ geometry, the variables `ux` and `uy` actually // correspond to the radial and azimuthal component of the momentum // (and e.g.`m_flux_normal_axis==1` corresponds to v*Gaussian along theta) - amrex::Real const ux = (m_flux_normal_axis == 0 ? m_ux_th*ur : amrex::RandomNormal(m_ux_m, m_ux_th, engine)); - amrex::Real const uy = (m_flux_normal_axis == 1 ? m_uy_th*ur : amrex::RandomNormal(m_uy_m, m_uy_th, engine)); - amrex::Real const uz = (m_flux_normal_axis == 2 ? m_uz_th*ur : amrex::RandomNormal(m_uz_m, m_uz_th, engine)); + amrex::Real const ux = (m_flux_normal_axis == 0 ? u : amrex::RandomNormal(m_ux_m, m_ux_th, engine)); + amrex::Real const uy = (m_flux_normal_axis == 1 ? u : amrex::RandomNormal(m_uy_m, m_uy_th, engine)); + amrex::Real const uz = (m_flux_normal_axis == 2 ? u : amrex::RandomNormal(m_uz_m, m_uz_th, engine)); return amrex::XDim3{ux, uy, uz}; } From 39ff1baea39e8bb0e8205004970f72f53bf148ea Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 28 Nov 2022 11:06:59 -0800 Subject: [PATCH 0171/1346] AMReX/PICSAR: Weekly Update (#3539) --- .github/workflows/cuda.yml | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 2 +- run_test.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 620a94a7463..5a06bf79395 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach cf0afb0c152e2c942073731da7a1e0007886eed4 && cd - + cd amrex && git checkout --detach 4d6413c45fa0e1aa6f366a02d75a9e2382c73850 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 105652bab13..6e378a81689 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = cf0afb0c152e2c942073731da7a1e0007886eed4 +branch = 4d6413c45fa0e1aa6f366a02d75a9e2382c73850 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 619ff337243..31838336670 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = cf0afb0c152e2c942073731da7a1e0007886eed4 +branch = 4d6413c45fa0e1aa6f366a02d75a9e2382c73850 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index ab0a904f2e4..dfe5b27d175 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -240,7 +240,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "cf0afb0c152e2c942073731da7a1e0007886eed4" +set(WarpX_amrex_branch "4d6413c45fa0e1aa6f366a02d75a9e2382c73850" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/run_test.sh b/run_test.sh index 6fab881bafe..ff31025e6ae 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach cf0afb0c152e2c942073731da7a1e0007886eed4 && cd - +cd amrex && git checkout --detach 4d6413c45fa0e1aa6f366a02d75a9e2382c73850 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git From 778c6183a5d5f845f95d054b06163c48b1438852 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Mon, 28 Nov 2022 14:48:36 -0800 Subject: [PATCH 0172/1346] enable the use of `FieldProbe` default parameter values (#3538) --- Python/pywarpx/picmi.py | 20 +++++----- Source/Diagnostics/ReducedDiags/FieldProbe.H | 17 +++++++-- .../Diagnostics/ReducedDiags/FieldProbe.cpp | 37 +++++++------------ 3 files changed, 36 insertions(+), 38 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 5a970ffdf89..aac083af076 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -2110,8 +2110,8 @@ def __init__(self, diag_type, name=None, period=1, path=None, def _handle_field_probe(self, **kw): """Utility function to grab required inputs for a field probe from kw""" self.probe_geometry = kw.pop("probe_geometry") - self.x_probe = kw.pop("x_probe") - self.y_probe = kw.pop("y_probe") + self.x_probe = kw.pop("x_probe", None) + self.y_probe = kw.pop("y_probe", None) self.z_probe = kw.pop("z_probe") self.interp_order = kw.pop("interp_order", None) @@ -2122,20 +2122,20 @@ def _handle_field_probe(self, **kw): self.resolution = kw.pop("resolution") if self.probe_geometry.lower() == 'line': - self.x1_probe = kw.pop("x1_probe") - self.y1_probe = kw.pop("y1_probe") + self.x1_probe = kw.pop("x1_probe", None) + self.y1_probe = kw.pop("y1_probe", None) self.z1_probe = kw.pop("z1_probe") if self.probe_geometry.lower() == 'plane': self.detector_radius = kw.pop("detector_radius") - self.target_normal_x = kw.pop("target_normal_x") - self.target_normal_y = kw.pop("target_normal_y") - self.target_normal_z = kw.pop("target_normal_z") + self.target_normal_x = kw.pop("target_normal_x", None) + self.target_normal_y = kw.pop("target_normal_y", None) + self.target_normal_z = kw.pop("target_normal_z", None) - self.target_up_x = kw.pop("target_up_x") - self.target_up_y = kw.pop("target_up_y") - self.target_up_z = kw.pop("target_up_z") + self.target_up_x = kw.pop("target_up_x", None) + self.target_up_y = kw.pop("target_up_y", None) + self.target_up_z = kw.pop("target_up_z", None) return kw diff --git a/Source/Diagnostics/ReducedDiags/FieldProbe.H b/Source/Diagnostics/ReducedDiags/FieldProbe.H index 4f72fc72068..e8dffd158b5 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbe.H +++ b/Source/Diagnostics/ReducedDiags/FieldProbe.H @@ -19,6 +19,8 @@ #include #include +using namespace amrex::literals; + /** * This enumeration is used for assigning structural geometry levels (point vs line vs plane) */ @@ -67,10 +69,17 @@ public: static constexpr int noutputs = FieldProbePIdx::nattribs + 3; private: - amrex::Real x_probe, y_probe, z_probe; - amrex::Real x1_probe, y1_probe, z1_probe; - amrex::Real target_normal_x, target_normal_y, target_normal_z; - amrex::Real target_up_x, target_up_y, target_up_z; + amrex::Real x_probe = 0._rt; + amrex::Real y_probe = 0._rt; + amrex::Real x1_probe = 0._rt; + amrex::Real y1_probe = 0._rt; + amrex::Real target_normal_x = 0._rt; + amrex::Real target_normal_y = 1._rt; + amrex::Real target_normal_z = 0._rt; + amrex::Real target_up_x = 0._rt; + amrex::Real target_up_y = 0._rt; + amrex::Real target_up_z = 1._rt; + amrex::Real z_probe, z1_probe; amrex::Real detector_radius; //! counts number of particles for all MPI ranks diff --git a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp index 19643829e17..cc1231d0ac7 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp @@ -86,8 +86,6 @@ FieldProbe::FieldProbe (std::string rd_name) if (m_probe_geometry_str == "Point") { m_probe_geometry = DetectorGeometry::Point; - x_probe = 0._rt; - y_probe = 0._rt; #if !defined(WARPX_DIM_1D_Z) utils::parser::getWithParser( pp_rd_name, "x_probe", x_probe); @@ -102,17 +100,13 @@ FieldProbe::FieldProbe (std::string rd_name) else if (m_probe_geometry_str == "Line") { m_probe_geometry = DetectorGeometry::Line; - x_probe = 0._rt; - x1_probe = 0._rt; - y_probe = 0._rt; - y1_probe = 0._rt; #if !defined(WARPX_DIM_1D_Z) - utils::parser::getWithParser(pp_rd_name, "x_probe", x_probe); - utils::parser::getWithParser(pp_rd_name, "x1_probe", x1_probe); + utils::parser::queryWithParser(pp_rd_name, "x_probe", x_probe); + utils::parser::queryWithParser(pp_rd_name, "x1_probe", x1_probe); #endif #if defined(WARPX_DIM_3D) - utils::parser::getWithParser(pp_rd_name, "y_probe", y_probe); - utils::parser::getWithParser(pp_rd_name, "y1_probe", y1_probe); + utils::parser::queryWithParser(pp_rd_name, "y_probe", y_probe); + utils::parser::queryWithParser(pp_rd_name, "y1_probe", y1_probe); #endif utils::parser::getWithParser(pp_rd_name, "z_probe", z_probe); utils::parser::getWithParser(pp_rd_name, "z1_probe", z1_probe); @@ -125,23 +119,18 @@ FieldProbe::FieldProbe (std::string rd_name) "ERROR: Plane probe should be used in a 2D or 3D simulation only")); #endif m_probe_geometry = DetectorGeometry::Plane; - y_probe = 0._rt; - target_normal_x = 0._rt; - target_normal_y = 1._rt; - target_normal_z = 0._rt; - target_up_y = 0._rt; #if defined(WARPX_DIM_3D) - utils::parser::getWithParser(pp_rd_name, "y_probe", y_probe); - utils::parser::getWithParser(pp_rd_name, "target_normal_x", target_normal_x); - utils::parser::getWithParser(pp_rd_name, "target_normal_y", target_normal_y); - utils::parser::getWithParser(pp_rd_name, "target_normal_z", target_normal_z); - utils::parser::getWithParser(pp_rd_name, "target_up_y", target_up_y); + utils::parser::queryWithParser(pp_rd_name, "y_probe", y_probe); + utils::parser::queryWithParser(pp_rd_name, "target_normal_x", target_normal_x); + utils::parser::queryWithParser(pp_rd_name, "target_normal_y", target_normal_y); + utils::parser::queryWithParser(pp_rd_name, "target_normal_z", target_normal_z); + utils::parser::queryWithParser(pp_rd_name, "target_up_y", target_up_y); #endif - utils::parser::getWithParser(pp_rd_name, "x_probe", x_probe); + utils::parser::queryWithParser(pp_rd_name, "x_probe", x_probe); utils::parser::getWithParser(pp_rd_name, "z_probe", z_probe); - utils::parser::getWithParser(pp_rd_name, "target_up_x", target_up_x); - utils::parser::getWithParser(pp_rd_name, "target_up_z", target_up_z); - utils::parser::getWithParser(pp_rd_name, "detector_radius", detector_radius); + utils::parser::queryWithParser(pp_rd_name, "target_up_x", target_up_x); + utils::parser::queryWithParser(pp_rd_name, "target_up_z", target_up_z); + utils::parser::queryWithParser(pp_rd_name, "detector_radius", detector_radius); utils::parser::getWithParser(pp_rd_name, "resolution", m_resolution); } else From 6fd619e4831e9abecbbe8fa570319293b546916a Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Mon, 28 Nov 2022 16:07:23 -0800 Subject: [PATCH 0173/1346] fix bug with reduced diagnostic FieldProbe in 1d (#3535) --- Source/Diagnostics/ReducedDiags/FieldProbe.cpp | 2 +- .../Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp index cc1231d0ac7..d6621f78983 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp @@ -351,7 +351,7 @@ bool FieldProbe::ProbeInDomain () const * and prob_hi[1] refer to z. This is a result of warpx.Geom(lev). */ #if defined(WARPX_DIM_1D_Z) - return z_probe >= prob_lo[1] && z_probe < prob_hi[1]; + return z_probe >= prob_lo[0] && z_probe < prob_hi[0]; #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) return x_probe >= prob_lo[0] && x_probe < prob_hi[0] && z_probe >= prob_lo[1] && z_probe < prob_hi[1]; diff --git a/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp b/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp index a49e1e08eaa..35e25fb4b2f 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp @@ -107,9 +107,12 @@ FieldProbeParticleContainer::AddNParticles (int lev, p.pos(1) = y[i]; p.pos(2) = z[i]; #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ignore_unused(y) ; + amrex::ignore_unused(y); p.pos(0) = x[i]; p.pos(1) = z[i]; +#elif defined(WARPX_DIM_1D_Z) + amrex::ignore_unused(x, y); + p.pos(0) = z[i]; #endif // write position, cpu id, and particle id to particle pinned_tile.push_back(p); From d3b31329b8d58d71d6d12eade5de40bea4b41855 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Nov 2022 08:34:30 -0800 Subject: [PATCH 0174/1346] [pre-commit.ci] pre-commit autoupdate (#3540) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.3.0 → v4.4.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.3.0...v4.4.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6adde4fd9f9..b97538cae67 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,7 +18,7 @@ exclude: '^share/openPMD/thirdParty' # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v4.4.0 hooks: - id: trailing-whitespace args: [--markdown-linebreak-ext=md] From 5063ee8f75583e7ac1e4de5326674f74b99a1527 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Tue, 29 Nov 2022 12:25:40 -0800 Subject: [PATCH 0175/1346] Fix Esirkepov deposition in 1D (#3537) * Fix Esirkepov deposition in 1D * Clean up code * Update benchmarks * Update benchmark --- .../benchmarks_json/LaserAcceleration_1d.json | 18 +++++++++--------- .../Python_LaserAcceleration_1d.json | 16 ++++++++-------- .../Particles/Deposition/CurrentDeposition.H | 8 +++----- 3 files changed, 20 insertions(+), 22 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/LaserAcceleration_1d.json b/Regression/Checksum/benchmarks_json/LaserAcceleration_1d.json index 83c305438fb..6c4787e1936 100644 --- a/Regression/Checksum/benchmarks_json/LaserAcceleration_1d.json +++ b/Regression/Checksum/benchmarks_json/LaserAcceleration_1d.json @@ -1,23 +1,23 @@ { "electrons": { "particle_momentum_x": 0.0, - "particle_momentum_y": 1.2426858089556802e-20, - "particle_momentum_z": 1.4187765007430268e-21, + "particle_momentum_y": 1.2426490415609688e-20, + "particle_momentum_z": 1.4127884427815013e-21, "particle_orig_z": 0.022432812500000038, - "particle_position_x": 0.02243266637270741, + "particle_position_x": 0.022432900595257786, "particle_regionofinterest": 40.0, "particle_weight": 5.20625e+18 }, "lev=0": { - "Bx": 178016.7504669478, + "Bx": 178016.75728377263, "By": 0.0, "Bz": 0.0, "Ex": 0.0, - "Ey": 40878227583310.83, - "Ez": 568254685.6950157, + "Ey": 40878228443447.7, + "Ez": 3426547096.153969, "jx": 0.0, - "jy": 30442928969125.46, - "jz": 1108530282155.6707, + "jy": 30439563140043.055, + "jz": 3381784198940.749, "rho": 3127749.1976868743 } -} +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/Python_LaserAcceleration_1d.json b/Regression/Checksum/benchmarks_json/Python_LaserAcceleration_1d.json index 56eb715590d..2bd5c63f9e6 100644 --- a/Regression/Checksum/benchmarks_json/Python_LaserAcceleration_1d.json +++ b/Regression/Checksum/benchmarks_json/Python_LaserAcceleration_1d.json @@ -1,21 +1,21 @@ { "electrons": { "particle_momentum_x": 0.0, - "particle_momentum_y": 1.2426858089556802e-20, - "particle_momentum_z": 1.4187765007430268e-21, - "particle_position_x": 0.02243266637270741, + "particle_momentum_y": 1.242649041560969e-20, + "particle_momentum_z": 1.412788442781501e-21, + "particle_position_x": 2.243290059525779e-02, "particle_weight": 5.20625e+18 }, "lev=0": { - "Bx": 178016.7504669478, + "Bx": 1.780167572837726e+05, "By": 0.0, "Bz": 0.0, "Ex": 0.0, - "Ey": 40878227583310.83, - "Ez": 568254685.6950157, + "Ey": 4.087822844344770e+13, + "Ez": 3.426547096153969e+09, "jx": 0.0, - "jy": 30442928969125.46, - "jz": 1108530282155.6707, + "jy": 3.043956314004305e+13, + "jz": 3.381784198940749e+12, "rho": 3127749.1976868743 } } diff --git a/Source/Particles/Deposition/CurrentDeposition.H b/Source/Particles/Deposition/CurrentDeposition.H index 685907d0aa9..d394345dcb8 100644 --- a/Source/Particles/Deposition/CurrentDeposition.H +++ b/Source/Particles/Deposition/CurrentDeposition.H @@ -685,17 +685,15 @@ void doEsirkepovDepositionShapeN (const GetParticlePosition& GetPosition, #elif defined(WARPX_DIM_1D_Z) for (int k=dkl; k<=depos_order+2-dku; k++) { - amrex::Real sdxi = 0._rt; - sdxi += wq*vx*invvol*0.5_rt*(sz_old[k] + sz_new[k]); + amrex::Real const sdxi = wq*vx*invvol*0.5_rt*(sz_old[k] + sz_new[k]); amrex::Gpu::Atomic::AddNoRet( &Jx_arr(lo.x+k_new-1+k, 0, 0, 0), sdxi); } for (int k=dkl; k<=depos_order+2-dku; k++) { - amrex::Real sdyj = 0._rt; - sdyj += wq*vy*invvol*0.5_rt*(sz_old[k] + sz_new[k]); + amrex::Real const sdyj = wq*vy*invvol*0.5_rt*(sz_old[k] + sz_new[k]); amrex::Gpu::Atomic::AddNoRet( &Jy_arr(lo.x+k_new-1+k, 0, 0, 0), sdyj); } + amrex::Real sdzk = 0._rt; for (int k=dkl; k<=depos_order+1-dku; k++) { - amrex::Real sdzk = 0._rt; sdzk += wqz*(sz_old[k] - sz_new[k]); amrex::Gpu::Atomic::AddNoRet( &Jz_arr(lo.x+k_new-1+k, 0, 0, 0), sdzk); } From 3b6a467d1b7dd79ce90b02048dd1c6a0db7b138d Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 30 Nov 2022 14:17:38 -0800 Subject: [PATCH 0176/1346] PSATD: Rewrite Equations with/without Rho (#3343) --- .../PsatdAlgorithmJConstantInTime.cpp | 147 ++++++++---------- 1 file changed, 64 insertions(+), 83 deletions(-) diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp index 8971061f6ce..9d1fbf3e158 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp @@ -165,10 +165,18 @@ PsatdAlgorithmJConstantInTime::pushSpectralFields (SpectralFieldData& f) const // Extract pointers for the k vectors const amrex::Real* modified_kx_arr = modified_kx_vec[mfi].dataPtr(); + const amrex::Real* modified_kx_arr_c = modified_kx_vec_centered[mfi].dataPtr(); #if defined(WARPX_DIM_3D) const amrex::Real* modified_ky_arr = modified_ky_vec[mfi].dataPtr(); + const amrex::Real* modified_ky_arr_c = modified_ky_vec_centered[mfi].dataPtr(); #endif const amrex::Real* modified_kz_arr = modified_kz_vec[mfi].dataPtr(); + const amrex::Real* modified_kz_arr_c = modified_kz_vec_centered[mfi].dataPtr(); + + // Galilean velocity + const amrex::Real vgx = m_v_galilean[0]; + const amrex::Real vgy = m_v_galilean[1]; + const amrex::Real vgz = m_v_galilean[2]; // Loop over indices within one box ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept @@ -181,12 +189,10 @@ PsatdAlgorithmJConstantInTime::pushSpectralFields (SpectralFieldData& f) const const Complex By_old = fields(i,j,k,Idx.By); const Complex Bz_old = fields(i,j,k,Idx.Bz); - // Shortcuts for the values of J and rho + // Shortcuts for the values of J const Complex Jx = fields(i,j,k,Idx.Jx); const Complex Jy = fields(i,j,k,Idx.Jy); const Complex Jz = fields(i,j,k,Idx.Jz); - const Complex rho_old = fields(i,j,k,Idx.rho_old); - const Complex rho_new = fields(i,j,k,Idx.rho_new); Complex F_old; if (dive_cleaning) @@ -202,15 +208,21 @@ PsatdAlgorithmJConstantInTime::pushSpectralFields (SpectralFieldData& f) const // k vector values const amrex::Real kx = modified_kx_arr[i]; + const amrex::Real kx_c = modified_kx_arr_c[i]; #if defined(WARPX_DIM_3D) const amrex::Real ky = modified_ky_arr[j]; + const amrex::Real ky_c = modified_ky_arr_c[j]; const amrex::Real kz = modified_kz_arr[k]; + const amrex::Real kz_c = modified_kz_arr_c[k]; #else constexpr amrex::Real ky = 0._rt; + constexpr amrex::Real ky_c = 0._rt; const amrex::Real kz = modified_kz_arr[j]; + const amrex::Real kz_c = modified_kz_arr_c[j]; #endif // Physical constants and imaginary unit constexpr Real c2 = PhysConst::c * PhysConst::c; + constexpr Real ep0 = PhysConst::ep0; constexpr Real inv_ep0 = 1._rt / PhysConst::ep0; constexpr Complex I = Complex{0._rt, 1._rt}; @@ -223,44 +235,45 @@ PsatdAlgorithmJConstantInTime::pushSpectralFields (SpectralFieldData& f) const const Complex X4 = (is_galilean) ? X4_arr(i,j,k) : - S_ck / PhysConst::ep0; const Complex T2 = (is_galilean) ? T2_arr(i,j,k) : 1.0_rt; - // Update equations for E in the formulation with rho - // T2 = 1 always with standard PSATD (zero Galilean velocity) - + // Shortcuts for the values of rho + Complex rho_old, rho_new; if (update_with_rho) { - fields(i,j,k,Idx.Ex) = T2 * C * Ex_old - + I * c2 * T2 * S_ck * (ky * Bz_old - kz * By_old) - + X4 * Jx - I * (X2 * rho_new - T2 * X3 * rho_old) * kx; + rho_old = fields(i,j,k,Idx.rho_old); + rho_new = fields(i,j,k,Idx.rho_new); + } + else // update_with_rho = 0 + { + const amrex::Real kc_dot_vg = kx_c*vgx + ky_c*vgy + kz_c*vgz; + const Complex k_dot_E = kx*Ex_old + ky*Ey_old + kz*Ez_old; + const Complex k_dot_J = kx*Jx + ky*Jy + kz*Jz; - fields(i,j,k,Idx.Ey) = T2 * C * Ey_old - + I * c2 * T2 * S_ck * (kz * Bx_old - kx * Bz_old) - + X4 * Jy - I * (X2 * rho_new - T2 * X3 * rho_old) * ky; + rho_old = I*ep0*k_dot_E; - fields(i,j,k,Idx.Ez) = T2 * C * Ez_old - + I * c2 * T2 * S_ck * (kx * By_old - ky * Bx_old) - + X4 * Jz - I * (X2 * rho_new - T2 * X3 * rho_old) * kz; + if (kc_dot_vg == 0._rt) + { + rho_new = rho_old - I*k_dot_J*dt; + } + else // Galilean PSATD + { + rho_new = T2*rho_old + (1._rt-T2)*k_dot_J/kc_dot_vg; + } } - // Update equations for E in the formulation without rho + // Update equations for E // T2 = 1 always with standard PSATD (zero Galilean velocity) - else { - - Complex k_dot_J = kx * Jx + ky * Jy + kz * Jz; - Complex k_dot_E = kx * Ex_old + ky * Ey_old + kz * Ez_old; - - fields(i,j,k,Idx.Ex) = T2 * C * Ex_old - + I * c2 * T2 * S_ck * (ky * Bz_old - kz * By_old) - + X4 * Jx + X2 * k_dot_E * kx + X3 * k_dot_J * kx; + fields(i,j,k,Idx.Ex) = T2 * C * Ex_old + + I * c2 * T2 * S_ck * (ky * Bz_old - kz * By_old) + + X4 * Jx - I * (X2 * rho_new - T2 * X3 * rho_old) * kx; - fields(i,j,k,Idx.Ey) = T2 * C * Ey_old - + I * c2 * T2 * S_ck * (kz * Bx_old - kx * Bz_old) - + X4 * Jy + X2 * k_dot_E * ky + X3 * k_dot_J * ky; + fields(i,j,k,Idx.Ey) = T2 * C * Ey_old + + I * c2 * T2 * S_ck * (kz * Bx_old - kx * Bz_old) + + X4 * Jy - I * (X2 * rho_new - T2 * X3 * rho_old) * ky; - fields(i,j,k,Idx.Ez) = T2 * C * Ez_old - + I * c2 * T2 * S_ck * (kx * By_old - ky * Bx_old) - + X4 * Jz + X2 * k_dot_E * kz + X3 * k_dot_J * kz; - } + fields(i,j,k,Idx.Ez) = T2 * C * Ez_old + + I * c2 * T2 * S_ck * (kx * By_old - ky * Bx_old) + + X4 * Jz - I * (X2 * rho_new - T2 * X3 * rho_old) * kz; // Update equations for B // T2 = 1 always with standard PSATD (zero Galilean velocity) @@ -345,7 +358,6 @@ void PsatdAlgorithmJConstantInTime::InitializeSpectralCoefficients ( const amrex::DistributionMapping& dm, const amrex::Real dt) { - const bool update_with_rho = m_update_with_rho; const bool is_galilean = m_is_galilean; const amrex::BoxArray& ba = spectral_kspace.spectralspace_ba; @@ -405,7 +417,6 @@ void PsatdAlgorithmJConstantInTime::InitializeSpectralCoefficients ( const amrex::Real c2 = std::pow(c, 2); const amrex::Real dt2 = std::pow(dt, 2); - const amrex::Real dt3 = std::pow(dt, 3); // Calculate the dot product of the k vector with the Galilean velocity. // This has to be computed always with the centered (that is, nodal) finite-order @@ -467,69 +478,39 @@ void PsatdAlgorithmJConstantInTime::InitializeSpectralCoefficients ( X1(i,j,k) = 0.5_rt * dt2 / ep0; } - // X2 (multiplies rho_new if update_with_rho = 1 in the update equation for E) - // X2 (multiplies ([k] \dot E) if update_with_rho = 0 in the update equation for E) - if (update_with_rho) + // X2 (multiplies rho_new in the update equation for E) + if (w_c != 0.) { - if (w_c != 0.) + X2(i,j,k) = c2 * (theta_c_star * X1(i,j,k) - theta_c * tmp) + / (theta_c_star - theta_c); + } + else // w_c = 0 + { + if (om_s != 0.) { - X2(i,j,k) = c2 * (theta_c_star * X1(i,j,k) - theta_c * tmp) - / (theta_c_star - theta_c); + X2(i,j,k) = c2 * (dt - S_ck(i,j,k)) / (ep0 * dt * om2_s); } - else // w_c = 0 + else // om_s = 0 and w_c = 0 { - if (om_s != 0.) - { - X2(i,j,k) = c2 * (dt - S_ck(i,j,k)) / (ep0 * dt * om2_s); - } - else // om_s = 0 and w_c = 0 - { - X2(i,j,k) = c2 * dt2 / (6._rt * ep0); - } + X2(i,j,k) = c2 * dt2 / (6._rt * ep0); } } - else // update_with_rho = 0 - { - X2(i,j,k) = c2 * ep0 * theta2_c * tmp; - } - // X3 (multiplies rho_old if update_with_rho = 1 in the update equation for E) - // X3 (multiplies ([k] \dot J) if update_with_rho = 0 in the update equation for E) - if (update_with_rho) + // X3 (multiplies rho_old in the update equation for E) + if (w_c != 0.) { - if (w_c != 0.) - { - X3(i,j,k) = c2 * (theta_c_star * X1(i,j,k) - theta_c_star * tmp) - / (theta_c_star - theta_c); - } - else // w_c = 0 - { - if (om_s != 0.) - { - X3(i,j,k) = c2 * (dt * C(i,j,k) - S_ck(i,j,k)) / (ep0 * dt * om2_s); - } - else // om_s = 0 and w_c = 0 - { - X3(i,j,k) = - c2 * dt2 / (3._rt * ep0); - } - } + X3(i,j,k) = c2 * (theta_c_star * X1(i,j,k) - theta_c_star * tmp) + / (theta_c_star - theta_c); } - else // update_with_rho = 0 + else // w_c = 0 { - if (w_c != 0.) + if (om_s != 0.) { - X3(i,j,k) = I * c2 * (theta2_c * tmp - X1(i,j,k)) / w_c; + X3(i,j,k) = c2 * (dt * C(i,j,k) - S_ck(i,j,k)) / (ep0 * dt * om2_s); } - else // w_c = 0 + else // om_s = 0 and w_c = 0 { - if (om_s != 0.) - { - X3(i,j,k) = c2 * (S_ck(i,j,k) - dt) / (ep0 * om2_s); - } - else // om_s = 0 and w_c = 0 - { - X3(i,j,k) = - c2 * dt3 / (6._rt * ep0); - } + X3(i,j,k) = - c2 * dt2 / (3._rt * ep0); } } From 2857ca08a97b3a8f82d902480816acac0b9614d6 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 2 Dec 2022 20:20:17 -0800 Subject: [PATCH 0177/1346] Clean up examples folders (#3545) * Clean up examples folders * Use `snake_case` names * Rename `nci_corrector` as `nci_fdtd_stability` --- .github/workflows/windows.yml | 4 +- Docs/source/developers/particles.rst | 2 +- Docs/source/developers/testing.rst | 8 +- Docs/source/usage/examples.rst | 10 +- Docs/source/usage/parameters.rst | 2 +- Docs/source/usage/python.rst | 2 +- .../boosted_diags/analysis.py | 0 .../boosted_diags/inputs_3d | 0 .../analysis_BTD_laser_antenna.py | 0 .../inputs_rz_z_boosted_BTD | 0 .../dive_cleaning/analysis.py | 0 .../dive_cleaning/inputs_3d | 0 .../PICMI_inputs_2d.py | 0 .../analysis.py | 0 .../inputs_2d | 0 .../analysis_electrostatic_sphere.py | 0 .../inputs_3d | 0 .../inputs_rz | 0 .../PICMI_inputs_3d.py | 0 .../analysis.py | 0 .../analysis_rz.py | 0 .../inputs_3d | 0 .../inputs_3d_mixed_BCs | 0 .../inputs_rz | 0 .../inputs_rz_mr | 0 .../embedded_boundary_cube/analysis_fields.py | 0 .../analysis_fields_2d.py | 0 .../embedded_boundary_cube/inputs_2d | 0 .../embedded_boundary_cube/inputs_3d | 0 .../PICMI_inputs_EB_API.py | 0 .../embedded_boundary_python_api}/analysis.py | 0 .../analysis_fields.py | 0 .../analysis_fields_2d.py | 0 .../embedded_boundary_rotated_cube/inputs_2d | 0 .../embedded_boundary_rotated_cube/inputs_3d | 0 .../analysis_field_probe.py | 0 .../{FieldProbe => field_probe}/inputs_2d | 0 .../analysis_flux_injection_3d.py | 0 .../analysis_flux_injection_rz.py | 0 .../inputs_3d | 0 .../inputs_rz | 0 .../PICMI_inputs_gaussian_beam.py | 0 .../ionization/analysis_ionization.py | 0 .../ionization/inputs_2d_bf_rt | 0 .../ionization/inputs_2d_rt | 0 .../PICMI_inputs_langmuir2d.py | 0 .../PICMI_inputs_langmuir_rt.py | 0 ...MI_inputs_langmuir_rz_multimode_analyze.py | 0 .../Tests/{Langmuir => langmuir}/README.md | 0 .../analysis_langmuir_multi.py | 0 .../analysis_langmuir_multi_1d.py | 0 .../analysis_langmuir_multi_2d.py | 0 .../analysis_langmuir_multi_rz.py | 0 .../{Langmuir => langmuir}/inputs_1d_multi_rt | 0 .../{Langmuir => langmuir}/inputs_2d_multi_rt | 0 .../inputs_2d_multi_rz_rt | 0 .../{Langmuir => langmuir}/inputs_3d_multi_rt | 0 .../Tests/{Larmor => larmor}/inputs_2d_mr | 0 .../laser_injection/analysis_1d.py | 0 .../laser_injection/analysis_2d.py | 0 .../laser_injection/analysis_laser.py | 0 .../laser_injection/inputs_1d_rt | 0 .../laser_injection/inputs_2d_rt | 0 .../laser_injection/inputs_3d_rt | 0 .../laser_injection_from_file/analysis.py | 0 .../inputs.2d_test_txye | 0 .../analysis_Maxwell_QED_Hybrid.py | 0 .../inputs_2d | 0 Examples/Tests/{multi_J => multi_j}/inputs_rz | 0 .../nci_fdtd_stability}/analysis_ncicorr.py | 0 .../nci_fdtd_stability}/inputs_2d | 0 .../analysis_proton_boron_fusion.py | 0 .../analysis_two_product_fusion.py | 0 .../inputs_deuterium_deuterium_3d | 0 .../inputs_deuterium_tritium_3d | 0 .../inputs_deuterium_tritium_rz | 0 .../nuclear_fusion/inputs_proton_boron_2d | 0 .../nuclear_fusion/inputs_proton_boron_3d | 0 .../PICMI_inputs_reflection.py | 0 .../analysis_absorption.py | 0 .../analysis_reflection.py | 0 .../inputs_absorption | 0 .../PICMI_inputs_scrape.py | 0 .../analysis_scrape.py | 0 .../particle_boundary_scrape}/inputs_scrape | 0 .../PICMI_inputs_2d.py | 0 .../PICMI_inputs_prev_pos_2d.py | 0 .../analysis.py | 0 .../analysis_particles_in_pml.py | 0 .../inputs_2d | 0 .../inputs_3d | 0 .../inputs_mr_2d | 0 .../inputs_mr_3d | 0 Examples/Tests/{PEC => pec}/analysis_pec.py | 0 .../Tests/{PEC => pec}/analysis_pec_mr.py | 0 .../Tests/{PEC => pec}/inputs_field_PEC_3d | 0 .../Tests/{PEC => pec}/inputs_field_PEC_mr_3d | 0 .../Tests/{PEC => pec}/inputs_particle_PEC_3d | 0 .../automated_test_1_uniform_rest_32ppc | 0 .../automated_test_2_uniform_rest_1ppc | 0 .../automated_test_3_uniform_drift_4ppc | 0 .../automated_test_4_labdiags_2ppc | 0 .../automated_test_5_loadimbalance | 0 .../automated_test_6_output_2ppc | 0 .../Tests/{PML => pml}/analysis_pml_ckc.py | 0 .../Tests/{PML => pml}/analysis_pml_psatd.py | 0 .../{PML => pml}/analysis_pml_psatd_rz.py | 0 .../Tests/{PML => pml}/analysis_pml_yee.py | 0 Examples/Tests/{PML => pml}/inputs_2d | 0 Examples/Tests/{PML => pml}/inputs_3d | 0 Examples/Tests/{PML => pml}/inputs_rz | 0 .../PICMI_inputs_2d.py | 0 .../qed/breit_wheeler/analysis_core.py | 0 .../qed/breit_wheeler/analysis_opmd.py | 0 .../qed/breit_wheeler/analysis_yt.py | 0 .../qed/breit_wheeler/inputs_2d | 0 .../qed/breit_wheeler/inputs_3d | 0 .../qed/quantum_synchrotron/analysis.py | 0 .../qed/quantum_synchrotron/inputs_2d | 0 .../qed/quantum_synchrotron/inputs_3d | 0 .../qed/schwinger/analysis_schwinger.py | 0 .../qed/schwinger/inputs_3d_schwinger | 0 .../analysis.py | 0 .../inputs_3d | 0 .../analysis_repelling.py | 0 .../inputs_2d | 0 .../resampling/analysis_leveling_thinning.py | 0 .../resampling/inputs_leveling_thinning | 0 .../restart_eb/PICMI_inputs_restart_eb.py | 2 +- .../analysis_rigid_injection_BoostedFrame.py | 0 .../analysis_rigid_injection_LabFrame.py | 0 .../rigid_injection}/inputs_2d_BoostedFrame | 0 .../rigid_injection}/inputs_2d_LabFrame | 0 .../analysis_silver_mueller.py | 0 .../inputs_2d_x | 0 .../inputs_2d_z | 0 .../inputs_rz_z | 0 .../analysis_bilinear_filter.py | 0 .../inputs_2d | 0 .../space_charge_initialization/analysis.py | 0 .../space_charge_initialization/inputs_3d | 0 .../analysis.py | 0 .../inputs_2d | 0 .../inputs_3d | 0 Regression/WarpX-GPU-tests.ini | 114 ++--- Regression/WarpX-tests.ini | 470 +++++++++--------- 146 files changed, 307 insertions(+), 307 deletions(-) rename Examples/{Modules => Tests}/boosted_diags/analysis.py (100%) rename Examples/{Modules => Tests}/boosted_diags/inputs_3d (100%) rename Examples/Tests/{BTD_rz => btd_rz}/analysis_BTD_laser_antenna.py (100%) rename Examples/Tests/{BTD_rz => btd_rz}/inputs_rz_z_boosted_BTD (100%) rename Examples/{Modules => Tests}/dive_cleaning/analysis.py (100%) rename Examples/{Modules => Tests}/dive_cleaning/inputs_3d (100%) rename Examples/Tests/{ElectrostaticDirichletBC => electrostatic_dirichlet_bc}/PICMI_inputs_2d.py (100%) rename Examples/Tests/{ElectrostaticDirichletBC => electrostatic_dirichlet_bc}/analysis.py (100%) rename Examples/Tests/{ElectrostaticDirichletBC => electrostatic_dirichlet_bc}/inputs_2d (100%) rename Examples/Tests/{ElectrostaticSphere => electrostatic_sphere}/analysis_electrostatic_sphere.py (100%) rename Examples/Tests/{ElectrostaticSphere => electrostatic_sphere}/inputs_3d (100%) rename Examples/Tests/{ElectrostaticSphere => electrostatic_sphere}/inputs_rz (100%) rename Examples/Tests/{ElectrostaticSphereEB => electrostatic_sphere_eb}/PICMI_inputs_3d.py (100%) rename Examples/Tests/{ElectrostaticSphereEB => electrostatic_sphere_eb}/analysis.py (100%) rename Examples/Tests/{ElectrostaticSphereEB => electrostatic_sphere_eb}/analysis_rz.py (100%) rename Examples/Tests/{ElectrostaticSphereEB => electrostatic_sphere_eb}/inputs_3d (100%) rename Examples/Tests/{ElectrostaticSphereEB => electrostatic_sphere_eb}/inputs_3d_mixed_BCs (100%) rename Examples/Tests/{ElectrostaticSphereEB => electrostatic_sphere_eb}/inputs_rz (100%) rename Examples/Tests/{ElectrostaticSphereEB => electrostatic_sphere_eb}/inputs_rz_mr (100%) rename Examples/{Modules => Tests}/embedded_boundary_cube/analysis_fields.py (100%) rename Examples/{Modules => Tests}/embedded_boundary_cube/analysis_fields_2d.py (100%) rename Examples/{Modules => Tests}/embedded_boundary_cube/inputs_2d (100%) rename Examples/{Modules => Tests}/embedded_boundary_cube/inputs_3d (100%) rename Examples/{Modules/embedded_boundary_python_API => Tests/embedded_boundary_python_api}/PICMI_inputs_EB_API.py (100%) rename Examples/{Modules/embedded_boundary_python_API => Tests/embedded_boundary_python_api}/analysis.py (100%) rename Examples/{Modules => Tests}/embedded_boundary_rotated_cube/analysis_fields.py (100%) rename Examples/{Modules => Tests}/embedded_boundary_rotated_cube/analysis_fields_2d.py (100%) rename Examples/{Modules => Tests}/embedded_boundary_rotated_cube/inputs_2d (100%) rename Examples/{Modules => Tests}/embedded_boundary_rotated_cube/inputs_3d (100%) rename Examples/Tests/{FieldProbe => field_probe}/analysis_field_probe.py (100%) rename Examples/Tests/{FieldProbe => field_probe}/inputs_2d (100%) rename Examples/Tests/{FluxInjection => flux_injection}/analysis_flux_injection_3d.py (100%) rename Examples/Tests/{FluxInjection => flux_injection}/analysis_flux_injection_rz.py (100%) rename Examples/Tests/{FluxInjection => flux_injection}/inputs_3d (100%) rename Examples/Tests/{FluxInjection => flux_injection}/inputs_rz (100%) rename Examples/{Modules => Tests}/gaussian_beam/PICMI_inputs_gaussian_beam.py (100%) rename Examples/{Modules => Tests}/ionization/analysis_ionization.py (100%) rename Examples/{Modules => Tests}/ionization/inputs_2d_bf_rt (100%) rename Examples/{Modules => Tests}/ionization/inputs_2d_rt (100%) rename Examples/Tests/{Langmuir => langmuir}/PICMI_inputs_langmuir2d.py (100%) rename Examples/Tests/{Langmuir => langmuir}/PICMI_inputs_langmuir_rt.py (100%) rename Examples/Tests/{Langmuir => langmuir}/PICMI_inputs_langmuir_rz_multimode_analyze.py (100%) rename Examples/Tests/{Langmuir => langmuir}/README.md (100%) rename Examples/Tests/{Langmuir => langmuir}/analysis_langmuir_multi.py (100%) rename Examples/Tests/{Langmuir => langmuir}/analysis_langmuir_multi_1d.py (100%) rename Examples/Tests/{Langmuir => langmuir}/analysis_langmuir_multi_2d.py (100%) rename Examples/Tests/{Langmuir => langmuir}/analysis_langmuir_multi_rz.py (100%) rename Examples/Tests/{Langmuir => langmuir}/inputs_1d_multi_rt (100%) rename Examples/Tests/{Langmuir => langmuir}/inputs_2d_multi_rt (100%) rename Examples/Tests/{Langmuir => langmuir}/inputs_2d_multi_rz_rt (100%) rename Examples/Tests/{Langmuir => langmuir}/inputs_3d_multi_rt (100%) rename Examples/Tests/{Larmor => larmor}/inputs_2d_mr (100%) rename Examples/{Modules => Tests}/laser_injection/analysis_1d.py (100%) rename Examples/{Modules => Tests}/laser_injection/analysis_2d.py (100%) rename Examples/{Modules => Tests}/laser_injection/analysis_laser.py (100%) rename Examples/{Modules => Tests}/laser_injection/inputs_1d_rt (100%) rename Examples/{Modules => Tests}/laser_injection/inputs_2d_rt (100%) rename Examples/{Modules => Tests}/laser_injection/inputs_3d_rt (100%) rename Examples/{Modules => Tests}/laser_injection_from_file/analysis.py (100%) rename Examples/{Modules => Tests}/laser_injection_from_file/inputs.2d_test_txye (100%) rename Examples/Tests/{Maxwell_Hybrid_QED => maxwell_hybrid_qed}/analysis_Maxwell_QED_Hybrid.py (100%) rename Examples/Tests/{Maxwell_Hybrid_QED => maxwell_hybrid_qed}/inputs_2d (100%) rename Examples/Tests/{multi_J => multi_j}/inputs_rz (100%) rename Examples/{Modules/nci_corrector => Tests/nci_fdtd_stability}/analysis_ncicorr.py (100%) rename Examples/{Modules/nci_corrector => Tests/nci_fdtd_stability}/inputs_2d (100%) rename Examples/{Modules => Tests}/nuclear_fusion/analysis_proton_boron_fusion.py (100%) rename Examples/{Modules => Tests}/nuclear_fusion/analysis_two_product_fusion.py (100%) rename Examples/{Modules => Tests}/nuclear_fusion/inputs_deuterium_deuterium_3d (100%) rename Examples/{Modules => Tests}/nuclear_fusion/inputs_deuterium_tritium_3d (100%) rename Examples/{Modules => Tests}/nuclear_fusion/inputs_deuterium_tritium_rz (100%) rename Examples/{Modules => Tests}/nuclear_fusion/inputs_proton_boron_2d (100%) rename Examples/{Modules => Tests}/nuclear_fusion/inputs_proton_boron_3d (100%) rename Examples/{Modules/ParticleBoundaryProcess => Tests/particle_boundary_process}/PICMI_inputs_reflection.py (100%) rename Examples/{Modules/ParticleBoundaryProcess => Tests/particle_boundary_process}/analysis_absorption.py (100%) rename Examples/{Modules/ParticleBoundaryProcess => Tests/particle_boundary_process}/analysis_reflection.py (100%) rename Examples/{Modules/ParticleBoundaryProcess => Tests/particle_boundary_process}/inputs_absorption (100%) rename Examples/{Modules/ParticleBoundaryScrape => Tests/particle_boundary_scrape}/PICMI_inputs_scrape.py (100%) rename Examples/{Modules/ParticleBoundaryScrape => Tests/particle_boundary_scrape}/analysis_scrape.py (100%) rename Examples/{Modules/ParticleBoundaryScrape => Tests/particle_boundary_scrape}/inputs_scrape (100%) rename Examples/Tests/{ParticleDataPython => particle_data_python}/PICMI_inputs_2d.py (100%) rename Examples/Tests/{ParticleDataPython => particle_data_python}/PICMI_inputs_prev_pos_2d.py (100%) rename Examples/Tests/{ParticleDataPython => particle_data_python}/analysis.py (100%) rename Examples/Tests/{particles_in_PML => particles_in_pml}/analysis_particles_in_pml.py (100%) rename Examples/Tests/{particles_in_PML => particles_in_pml}/inputs_2d (100%) rename Examples/Tests/{particles_in_PML => particles_in_pml}/inputs_3d (100%) rename Examples/Tests/{particles_in_PML => particles_in_pml}/inputs_mr_2d (100%) rename Examples/Tests/{particles_in_PML => particles_in_pml}/inputs_mr_3d (100%) rename Examples/Tests/{PEC => pec}/analysis_pec.py (100%) rename Examples/Tests/{PEC => pec}/analysis_pec_mr.py (100%) rename Examples/Tests/{PEC => pec}/inputs_field_PEC_3d (100%) rename Examples/Tests/{PEC => pec}/inputs_field_PEC_mr_3d (100%) rename Examples/Tests/{PEC => pec}/inputs_particle_PEC_3d (100%) rename Examples/Tests/{PerformanceTests => performance_tests}/automated_test_1_uniform_rest_32ppc (100%) rename Examples/Tests/{PerformanceTests => performance_tests}/automated_test_2_uniform_rest_1ppc (100%) rename Examples/Tests/{PerformanceTests => performance_tests}/automated_test_3_uniform_drift_4ppc (100%) rename Examples/Tests/{PerformanceTests => performance_tests}/automated_test_4_labdiags_2ppc (100%) rename Examples/Tests/{PerformanceTests => performance_tests}/automated_test_5_loadimbalance (100%) rename Examples/Tests/{PerformanceTests => performance_tests}/automated_test_6_output_2ppc (100%) rename Examples/Tests/{PML => pml}/analysis_pml_ckc.py (100%) rename Examples/Tests/{PML => pml}/analysis_pml_psatd.py (100%) rename Examples/Tests/{PML => pml}/analysis_pml_psatd_rz.py (100%) rename Examples/Tests/{PML => pml}/analysis_pml_yee.py (100%) rename Examples/Tests/{PML => pml}/inputs_2d (100%) rename Examples/Tests/{PML => pml}/inputs_3d (100%) rename Examples/Tests/{PML => pml}/inputs_rz (100%) rename Examples/Tests/{PythonWrappers => python_wrappers}/PICMI_inputs_2d.py (100%) rename Examples/{Modules => Tests}/qed/breit_wheeler/analysis_core.py (100%) rename Examples/{Modules => Tests}/qed/breit_wheeler/analysis_opmd.py (100%) rename Examples/{Modules => Tests}/qed/breit_wheeler/analysis_yt.py (100%) rename Examples/{Modules => Tests}/qed/breit_wheeler/inputs_2d (100%) rename Examples/{Modules => Tests}/qed/breit_wheeler/inputs_3d (100%) rename Examples/{Modules => Tests}/qed/quantum_synchrotron/analysis.py (100%) rename Examples/{Modules => Tests}/qed/quantum_synchrotron/inputs_2d (100%) rename Examples/{Modules => Tests}/qed/quantum_synchrotron/inputs_3d (100%) rename Examples/{Modules => Tests}/qed/schwinger/analysis_schwinger.py (100%) rename Examples/{Modules => Tests}/qed/schwinger/inputs_3d_schwinger (100%) rename Examples/{Modules => Tests}/relativistic_space_charge_initialization/analysis.py (100%) rename Examples/{Modules => Tests}/relativistic_space_charge_initialization/inputs_3d (100%) rename Examples/Tests/{RepellingParticles => repelling_particles}/analysis_repelling.py (100%) rename Examples/Tests/{RepellingParticles => repelling_particles}/inputs_2d (100%) rename Examples/{Modules => Tests}/resampling/analysis_leveling_thinning.py (100%) rename Examples/{Modules => Tests}/resampling/inputs_leveling_thinning (100%) rename Examples/{Modules/RigidInjection => Tests/rigid_injection}/analysis_rigid_injection_BoostedFrame.py (100%) rename Examples/{Modules/RigidInjection => Tests/rigid_injection}/analysis_rigid_injection_LabFrame.py (100%) rename Examples/{Modules/RigidInjection => Tests/rigid_injection}/inputs_2d_BoostedFrame (100%) rename Examples/{Modules/RigidInjection => Tests/rigid_injection}/inputs_2d_LabFrame (100%) rename Examples/Tests/{SilverMueller => silver_mueller}/analysis_silver_mueller.py (100%) rename Examples/Tests/{SilverMueller => silver_mueller}/inputs_2d_x (100%) rename Examples/Tests/{SilverMueller => silver_mueller}/inputs_2d_z (100%) rename Examples/Tests/{SilverMueller => silver_mueller}/inputs_rz_z (100%) rename Examples/Tests/{SingleParticle => single_particle}/analysis_bilinear_filter.py (100%) rename Examples/Tests/{SingleParticle => single_particle}/inputs_2d (100%) rename Examples/{Modules => Tests}/space_charge_initialization/analysis.py (100%) rename Examples/{Modules => Tests}/space_charge_initialization/inputs_3d (100%) rename Examples/Tests/{VayDeposition => vay_deposition}/analysis.py (100%) rename Examples/Tests/{VayDeposition => vay_deposition}/inputs_2d (100%) rename Examples/Tests/{VayDeposition => vay_deposition}/inputs_3d (100%) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index ee995ad54ac..3544954db46 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -43,7 +43,7 @@ jobs: cmake --build build --config Debug --target pip_install if(!$?) { Exit $LASTEXITCODE } - python3 Examples\Modules\gaussian_beam\PICMI_inputs_gaussian_beam.py + python3 Examples\Tests\gaussian_beam\PICMI_inputs_gaussian_beam.py # JSON writes are currently very slow (50min) with MSVC # --diagformat=openpmd @@ -90,5 +90,5 @@ jobs: cmake --build build --config Release --target pip_install if errorlevel 1 exit 1 - python3 Examples\Modules\gaussian_beam\PICMI_inputs_gaussian_beam.py --diagformat=openpmd + python3 Examples\Tests\gaussian_beam\PICMI_inputs_gaussian_beam.py --diagformat=openpmd if errorlevel 1 exit 1 diff --git a/Docs/source/developers/particles.rst b/Docs/source/developers/particles.rst index 988313079d4..e9ad7754771 100644 --- a/Docs/source/developers/particles.rst +++ b/Docs/source/developers/particles.rst @@ -147,7 +147,7 @@ Attribute name ``int``/``real`` Description Default when they were created. ================== ================ ================================= ============== -A Python example that adds runtime options can be found in :download:`Examples/Tests/ParticleDataPython <../../../Examples/Tests/ParticleDataPython/PICMI_inputs_prev_pos_2d.py>` +A Python example that adds runtime options can be found in :download:`Examples/Tests/particle_data_python <../../../Examples/Tests/particle_data_python/PICMI_inputs_prev_pos_2d.py>` .. note:: diff --git a/Docs/source/developers/testing.rst b/Docs/source/developers/testing.rst index 06a4cf1ccb9..227c2fd97b0 100644 --- a/Docs/source/developers/testing.rst +++ b/Docs/source/developers/testing.rst @@ -90,8 +90,8 @@ Add a test to the suite There are three steps to follow to add a new automated test (illustrated here for PML boundary conditions): -* An input file for your test, in folder `Example/Tests/...`. For the PML test, the input file is at ``Examples/Tests/PML/inputs_2d``. You can also re-use an existing input file (even better!) and pass specific parameters at runtime (see below). -* A Python script that reads simulation output and tests correctness versus theory or calibrated results. For the PML test, see ``Examples/Tests/PML/analysis_pml_yee.py``. It typically ends with Python statement ``assert( error<0.01 )``. +* An input file for your test, in folder `Example/Tests/...`. For the PML test, the input file is at ``Examples/Tests/pml/inputs_2d``. You can also re-use an existing input file (even better!) and pass specific parameters at runtime (see below). +* A Python script that reads simulation output and tests correctness versus theory or calibrated results. For the PML test, see ``Examples/Tests/pml/analysis_pml_yee.py``. It typically ends with Python statement ``assert( error<0.01 )``. * If you need a new Python package dependency for testing, add it in ``Regression/requirements.txt`` * Add an entry to ``Regression/WarpX-tests.ini``, so that a WarpX simulation runs your test in the continuous integration process, and the Python script is executed to assess the correctness. For the PML test, the entry is @@ -99,7 +99,7 @@ There are three steps to follow to add a new automated test (illustrated here fo [pml_x_yee] buildDir = . - inputFile = Examples/Tests/PML/inputs2d + inputFile = Examples/Tests/pml/inputs2d runtime_params = warpx.do_dynamic_scheduling=0 algo.maxwell_solver=yee dim = 2 addToCompileString = @@ -111,7 +111,7 @@ There are three steps to follow to add a new automated test (illustrated here fo numthreads = 1 compileTest = 0 doVis = 0 - analysisRoutine = Examples/Tests/PML/analysis_pml_yee.py + analysisRoutine = Examples/Tests/pml/analysis_pml_yee.py If you re-use an existing input file, you can add arguments to ``runtime_params``, like ``runtime_params = amr.max_level=1 amr.n_cell=32 512 max_step=100 plasma_e.zmin=-200.e-6``. diff --git a/Docs/source/usage/examples.rst b/Docs/source/usage/examples.rst index cbaaa6bcb0e..23afa630ef1 100644 --- a/Docs/source/usage/examples.rst +++ b/Docs/source/usage/examples.rst @@ -106,10 +106,10 @@ Test cases PICMI (Python) test cases included that can be used as a reference: -* :download:`Gaussian beam <../../../Examples/Modules/gaussian_beam/PICMI_inputs_gaussian_beam.py>` -* :download:`Langmuir plasma wave test in 3d <../../../Examples//Tests/Langmuir/PICMI_inputs_langmuir_rt.py>` -* :download:`Langmuir plasma wave test in RZ <../../../Examples//Tests/Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py>` -* :download:`Langmuir plasma wave test in 2D <../../../Examples//Tests/Langmuir/PICMI_inputs_langmuir2d.py>` +* :download:`Gaussian beam <../../../Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py>` +* :download:`Langmuir plasma wave test in 3d <../../../Examples/Tests/langmuir/PICMI_inputs_langmuir_rt.py>` +* :download:`Langmuir plasma wave test in RZ <../../../Examples/Tests/langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py>` +* :download:`Langmuir plasma wave test in 2D <../../../Examples/Tests/langmuir/PICMI_inputs_langmuir2d.py>` Manipulating fields via Python ------------------------------ @@ -120,4 +120,4 @@ An example of using Python to access the simulation charge density, solve the Po An example of initializing the fields by accessing their data through Python, advancing the simulation for a chosen number of time steps, and plotting the fields again through Python. The simulation runs with 128 regular cells, 8 guard cells, and 10 PML cells, in each direction. Moreover, it uses div(E) and div(B) cleaning both in the regular grid and in the PML and initializes all available electromagnetic fields (E,B,F,G) identically. -* :download:`Unit pulse with PML <../../../Examples/Tests/PythonWrappers/PICMI_inputs_2d.py>` +* :download:`Unit pulse with PML <../../../Examples/Tests/python_wrappers/PICMI_inputs_2d.py>` diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 309d3d1a30d..db7e5101cef 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -1163,7 +1163,7 @@ Laser initialization * field_data (double[nt * nx * ny], with nt being the slowest coordinate). - A file at this format can be generated from Python, see an example at ``Examples/Modules/laser_injection_from_file`` + A file at this format can be generated from Python, see an example at ``Examples/Tests/laser_injection_from_file`` * ``.profile_t_peak`` (`float`; in seconds) diff --git a/Docs/source/usage/python.rst b/Docs/source/usage/python.rst index afd8c5c0cac..f42a5cf577b 100644 --- a/Docs/source/usage/python.rst +++ b/Docs/source/usage/python.rst @@ -300,7 +300,7 @@ New components can be added via Python. Various diagnostics are also accessible from Python. This includes getting the deposited or total charge density from a given species as well as accessing the scraped particle buffer. See the example in -*Examples/Modules/ParticleBoudaryScrape* for a reference on how to interact +*Examples/Tests/ParticleBoudaryScrape* for a reference on how to interact with scraped particle data. .. autofunction:: pywarpx.picmi.Simulation.extension.get_species_charge_sum diff --git a/Examples/Modules/boosted_diags/analysis.py b/Examples/Tests/boosted_diags/analysis.py similarity index 100% rename from Examples/Modules/boosted_diags/analysis.py rename to Examples/Tests/boosted_diags/analysis.py diff --git a/Examples/Modules/boosted_diags/inputs_3d b/Examples/Tests/boosted_diags/inputs_3d similarity index 100% rename from Examples/Modules/boosted_diags/inputs_3d rename to Examples/Tests/boosted_diags/inputs_3d diff --git a/Examples/Tests/BTD_rz/analysis_BTD_laser_antenna.py b/Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py similarity index 100% rename from Examples/Tests/BTD_rz/analysis_BTD_laser_antenna.py rename to Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py diff --git a/Examples/Tests/BTD_rz/inputs_rz_z_boosted_BTD b/Examples/Tests/btd_rz/inputs_rz_z_boosted_BTD similarity index 100% rename from Examples/Tests/BTD_rz/inputs_rz_z_boosted_BTD rename to Examples/Tests/btd_rz/inputs_rz_z_boosted_BTD diff --git a/Examples/Modules/dive_cleaning/analysis.py b/Examples/Tests/dive_cleaning/analysis.py similarity index 100% rename from Examples/Modules/dive_cleaning/analysis.py rename to Examples/Tests/dive_cleaning/analysis.py diff --git a/Examples/Modules/dive_cleaning/inputs_3d b/Examples/Tests/dive_cleaning/inputs_3d similarity index 100% rename from Examples/Modules/dive_cleaning/inputs_3d rename to Examples/Tests/dive_cleaning/inputs_3d diff --git a/Examples/Tests/ElectrostaticDirichletBC/PICMI_inputs_2d.py b/Examples/Tests/electrostatic_dirichlet_bc/PICMI_inputs_2d.py similarity index 100% rename from Examples/Tests/ElectrostaticDirichletBC/PICMI_inputs_2d.py rename to Examples/Tests/electrostatic_dirichlet_bc/PICMI_inputs_2d.py diff --git a/Examples/Tests/ElectrostaticDirichletBC/analysis.py b/Examples/Tests/electrostatic_dirichlet_bc/analysis.py similarity index 100% rename from Examples/Tests/ElectrostaticDirichletBC/analysis.py rename to Examples/Tests/electrostatic_dirichlet_bc/analysis.py diff --git a/Examples/Tests/ElectrostaticDirichletBC/inputs_2d b/Examples/Tests/electrostatic_dirichlet_bc/inputs_2d similarity index 100% rename from Examples/Tests/ElectrostaticDirichletBC/inputs_2d rename to Examples/Tests/electrostatic_dirichlet_bc/inputs_2d diff --git a/Examples/Tests/ElectrostaticSphere/analysis_electrostatic_sphere.py b/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py similarity index 100% rename from Examples/Tests/ElectrostaticSphere/analysis_electrostatic_sphere.py rename to Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py diff --git a/Examples/Tests/ElectrostaticSphere/inputs_3d b/Examples/Tests/electrostatic_sphere/inputs_3d similarity index 100% rename from Examples/Tests/ElectrostaticSphere/inputs_3d rename to Examples/Tests/electrostatic_sphere/inputs_3d diff --git a/Examples/Tests/ElectrostaticSphere/inputs_rz b/Examples/Tests/electrostatic_sphere/inputs_rz similarity index 100% rename from Examples/Tests/ElectrostaticSphere/inputs_rz rename to Examples/Tests/electrostatic_sphere/inputs_rz diff --git a/Examples/Tests/ElectrostaticSphereEB/PICMI_inputs_3d.py b/Examples/Tests/electrostatic_sphere_eb/PICMI_inputs_3d.py similarity index 100% rename from Examples/Tests/ElectrostaticSphereEB/PICMI_inputs_3d.py rename to Examples/Tests/electrostatic_sphere_eb/PICMI_inputs_3d.py diff --git a/Examples/Tests/ElectrostaticSphereEB/analysis.py b/Examples/Tests/electrostatic_sphere_eb/analysis.py similarity index 100% rename from Examples/Tests/ElectrostaticSphereEB/analysis.py rename to Examples/Tests/electrostatic_sphere_eb/analysis.py diff --git a/Examples/Tests/ElectrostaticSphereEB/analysis_rz.py b/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py similarity index 100% rename from Examples/Tests/ElectrostaticSphereEB/analysis_rz.py rename to Examples/Tests/electrostatic_sphere_eb/analysis_rz.py diff --git a/Examples/Tests/ElectrostaticSphereEB/inputs_3d b/Examples/Tests/electrostatic_sphere_eb/inputs_3d similarity index 100% rename from Examples/Tests/ElectrostaticSphereEB/inputs_3d rename to Examples/Tests/electrostatic_sphere_eb/inputs_3d diff --git a/Examples/Tests/ElectrostaticSphereEB/inputs_3d_mixed_BCs b/Examples/Tests/electrostatic_sphere_eb/inputs_3d_mixed_BCs similarity index 100% rename from Examples/Tests/ElectrostaticSphereEB/inputs_3d_mixed_BCs rename to Examples/Tests/electrostatic_sphere_eb/inputs_3d_mixed_BCs diff --git a/Examples/Tests/ElectrostaticSphereEB/inputs_rz b/Examples/Tests/electrostatic_sphere_eb/inputs_rz similarity index 100% rename from Examples/Tests/ElectrostaticSphereEB/inputs_rz rename to Examples/Tests/electrostatic_sphere_eb/inputs_rz diff --git a/Examples/Tests/ElectrostaticSphereEB/inputs_rz_mr b/Examples/Tests/electrostatic_sphere_eb/inputs_rz_mr similarity index 100% rename from Examples/Tests/ElectrostaticSphereEB/inputs_rz_mr rename to Examples/Tests/electrostatic_sphere_eb/inputs_rz_mr diff --git a/Examples/Modules/embedded_boundary_cube/analysis_fields.py b/Examples/Tests/embedded_boundary_cube/analysis_fields.py similarity index 100% rename from Examples/Modules/embedded_boundary_cube/analysis_fields.py rename to Examples/Tests/embedded_boundary_cube/analysis_fields.py diff --git a/Examples/Modules/embedded_boundary_cube/analysis_fields_2d.py b/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py similarity index 100% rename from Examples/Modules/embedded_boundary_cube/analysis_fields_2d.py rename to Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py diff --git a/Examples/Modules/embedded_boundary_cube/inputs_2d b/Examples/Tests/embedded_boundary_cube/inputs_2d similarity index 100% rename from Examples/Modules/embedded_boundary_cube/inputs_2d rename to Examples/Tests/embedded_boundary_cube/inputs_2d diff --git a/Examples/Modules/embedded_boundary_cube/inputs_3d b/Examples/Tests/embedded_boundary_cube/inputs_3d similarity index 100% rename from Examples/Modules/embedded_boundary_cube/inputs_3d rename to Examples/Tests/embedded_boundary_cube/inputs_3d diff --git a/Examples/Modules/embedded_boundary_python_API/PICMI_inputs_EB_API.py b/Examples/Tests/embedded_boundary_python_api/PICMI_inputs_EB_API.py similarity index 100% rename from Examples/Modules/embedded_boundary_python_API/PICMI_inputs_EB_API.py rename to Examples/Tests/embedded_boundary_python_api/PICMI_inputs_EB_API.py diff --git a/Examples/Modules/embedded_boundary_python_API/analysis.py b/Examples/Tests/embedded_boundary_python_api/analysis.py similarity index 100% rename from Examples/Modules/embedded_boundary_python_API/analysis.py rename to Examples/Tests/embedded_boundary_python_api/analysis.py diff --git a/Examples/Modules/embedded_boundary_rotated_cube/analysis_fields.py b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields.py similarity index 100% rename from Examples/Modules/embedded_boundary_rotated_cube/analysis_fields.py rename to Examples/Tests/embedded_boundary_rotated_cube/analysis_fields.py diff --git a/Examples/Modules/embedded_boundary_rotated_cube/analysis_fields_2d.py b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py similarity index 100% rename from Examples/Modules/embedded_boundary_rotated_cube/analysis_fields_2d.py rename to Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py diff --git a/Examples/Modules/embedded_boundary_rotated_cube/inputs_2d b/Examples/Tests/embedded_boundary_rotated_cube/inputs_2d similarity index 100% rename from Examples/Modules/embedded_boundary_rotated_cube/inputs_2d rename to Examples/Tests/embedded_boundary_rotated_cube/inputs_2d diff --git a/Examples/Modules/embedded_boundary_rotated_cube/inputs_3d b/Examples/Tests/embedded_boundary_rotated_cube/inputs_3d similarity index 100% rename from Examples/Modules/embedded_boundary_rotated_cube/inputs_3d rename to Examples/Tests/embedded_boundary_rotated_cube/inputs_3d diff --git a/Examples/Tests/FieldProbe/analysis_field_probe.py b/Examples/Tests/field_probe/analysis_field_probe.py similarity index 100% rename from Examples/Tests/FieldProbe/analysis_field_probe.py rename to Examples/Tests/field_probe/analysis_field_probe.py diff --git a/Examples/Tests/FieldProbe/inputs_2d b/Examples/Tests/field_probe/inputs_2d similarity index 100% rename from Examples/Tests/FieldProbe/inputs_2d rename to Examples/Tests/field_probe/inputs_2d diff --git a/Examples/Tests/FluxInjection/analysis_flux_injection_3d.py b/Examples/Tests/flux_injection/analysis_flux_injection_3d.py similarity index 100% rename from Examples/Tests/FluxInjection/analysis_flux_injection_3d.py rename to Examples/Tests/flux_injection/analysis_flux_injection_3d.py diff --git a/Examples/Tests/FluxInjection/analysis_flux_injection_rz.py b/Examples/Tests/flux_injection/analysis_flux_injection_rz.py similarity index 100% rename from Examples/Tests/FluxInjection/analysis_flux_injection_rz.py rename to Examples/Tests/flux_injection/analysis_flux_injection_rz.py diff --git a/Examples/Tests/FluxInjection/inputs_3d b/Examples/Tests/flux_injection/inputs_3d similarity index 100% rename from Examples/Tests/FluxInjection/inputs_3d rename to Examples/Tests/flux_injection/inputs_3d diff --git a/Examples/Tests/FluxInjection/inputs_rz b/Examples/Tests/flux_injection/inputs_rz similarity index 100% rename from Examples/Tests/FluxInjection/inputs_rz rename to Examples/Tests/flux_injection/inputs_rz diff --git a/Examples/Modules/gaussian_beam/PICMI_inputs_gaussian_beam.py b/Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py similarity index 100% rename from Examples/Modules/gaussian_beam/PICMI_inputs_gaussian_beam.py rename to Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py diff --git a/Examples/Modules/ionization/analysis_ionization.py b/Examples/Tests/ionization/analysis_ionization.py similarity index 100% rename from Examples/Modules/ionization/analysis_ionization.py rename to Examples/Tests/ionization/analysis_ionization.py diff --git a/Examples/Modules/ionization/inputs_2d_bf_rt b/Examples/Tests/ionization/inputs_2d_bf_rt similarity index 100% rename from Examples/Modules/ionization/inputs_2d_bf_rt rename to Examples/Tests/ionization/inputs_2d_bf_rt diff --git a/Examples/Modules/ionization/inputs_2d_rt b/Examples/Tests/ionization/inputs_2d_rt similarity index 100% rename from Examples/Modules/ionization/inputs_2d_rt rename to Examples/Tests/ionization/inputs_2d_rt diff --git a/Examples/Tests/Langmuir/PICMI_inputs_langmuir2d.py b/Examples/Tests/langmuir/PICMI_inputs_langmuir2d.py similarity index 100% rename from Examples/Tests/Langmuir/PICMI_inputs_langmuir2d.py rename to Examples/Tests/langmuir/PICMI_inputs_langmuir2d.py diff --git a/Examples/Tests/Langmuir/PICMI_inputs_langmuir_rt.py b/Examples/Tests/langmuir/PICMI_inputs_langmuir_rt.py similarity index 100% rename from Examples/Tests/Langmuir/PICMI_inputs_langmuir_rt.py rename to Examples/Tests/langmuir/PICMI_inputs_langmuir_rt.py diff --git a/Examples/Tests/Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py b/Examples/Tests/langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py similarity index 100% rename from Examples/Tests/Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py rename to Examples/Tests/langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py diff --git a/Examples/Tests/Langmuir/README.md b/Examples/Tests/langmuir/README.md similarity index 100% rename from Examples/Tests/Langmuir/README.md rename to Examples/Tests/langmuir/README.md diff --git a/Examples/Tests/Langmuir/analysis_langmuir_multi.py b/Examples/Tests/langmuir/analysis_langmuir_multi.py similarity index 100% rename from Examples/Tests/Langmuir/analysis_langmuir_multi.py rename to Examples/Tests/langmuir/analysis_langmuir_multi.py diff --git a/Examples/Tests/Langmuir/analysis_langmuir_multi_1d.py b/Examples/Tests/langmuir/analysis_langmuir_multi_1d.py similarity index 100% rename from Examples/Tests/Langmuir/analysis_langmuir_multi_1d.py rename to Examples/Tests/langmuir/analysis_langmuir_multi_1d.py diff --git a/Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py b/Examples/Tests/langmuir/analysis_langmuir_multi_2d.py similarity index 100% rename from Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py rename to Examples/Tests/langmuir/analysis_langmuir_multi_2d.py diff --git a/Examples/Tests/Langmuir/analysis_langmuir_multi_rz.py b/Examples/Tests/langmuir/analysis_langmuir_multi_rz.py similarity index 100% rename from Examples/Tests/Langmuir/analysis_langmuir_multi_rz.py rename to Examples/Tests/langmuir/analysis_langmuir_multi_rz.py diff --git a/Examples/Tests/Langmuir/inputs_1d_multi_rt b/Examples/Tests/langmuir/inputs_1d_multi_rt similarity index 100% rename from Examples/Tests/Langmuir/inputs_1d_multi_rt rename to Examples/Tests/langmuir/inputs_1d_multi_rt diff --git a/Examples/Tests/Langmuir/inputs_2d_multi_rt b/Examples/Tests/langmuir/inputs_2d_multi_rt similarity index 100% rename from Examples/Tests/Langmuir/inputs_2d_multi_rt rename to Examples/Tests/langmuir/inputs_2d_multi_rt diff --git a/Examples/Tests/Langmuir/inputs_2d_multi_rz_rt b/Examples/Tests/langmuir/inputs_2d_multi_rz_rt similarity index 100% rename from Examples/Tests/Langmuir/inputs_2d_multi_rz_rt rename to Examples/Tests/langmuir/inputs_2d_multi_rz_rt diff --git a/Examples/Tests/Langmuir/inputs_3d_multi_rt b/Examples/Tests/langmuir/inputs_3d_multi_rt similarity index 100% rename from Examples/Tests/Langmuir/inputs_3d_multi_rt rename to Examples/Tests/langmuir/inputs_3d_multi_rt diff --git a/Examples/Tests/Larmor/inputs_2d_mr b/Examples/Tests/larmor/inputs_2d_mr similarity index 100% rename from Examples/Tests/Larmor/inputs_2d_mr rename to Examples/Tests/larmor/inputs_2d_mr diff --git a/Examples/Modules/laser_injection/analysis_1d.py b/Examples/Tests/laser_injection/analysis_1d.py similarity index 100% rename from Examples/Modules/laser_injection/analysis_1d.py rename to Examples/Tests/laser_injection/analysis_1d.py diff --git a/Examples/Modules/laser_injection/analysis_2d.py b/Examples/Tests/laser_injection/analysis_2d.py similarity index 100% rename from Examples/Modules/laser_injection/analysis_2d.py rename to Examples/Tests/laser_injection/analysis_2d.py diff --git a/Examples/Modules/laser_injection/analysis_laser.py b/Examples/Tests/laser_injection/analysis_laser.py similarity index 100% rename from Examples/Modules/laser_injection/analysis_laser.py rename to Examples/Tests/laser_injection/analysis_laser.py diff --git a/Examples/Modules/laser_injection/inputs_1d_rt b/Examples/Tests/laser_injection/inputs_1d_rt similarity index 100% rename from Examples/Modules/laser_injection/inputs_1d_rt rename to Examples/Tests/laser_injection/inputs_1d_rt diff --git a/Examples/Modules/laser_injection/inputs_2d_rt b/Examples/Tests/laser_injection/inputs_2d_rt similarity index 100% rename from Examples/Modules/laser_injection/inputs_2d_rt rename to Examples/Tests/laser_injection/inputs_2d_rt diff --git a/Examples/Modules/laser_injection/inputs_3d_rt b/Examples/Tests/laser_injection/inputs_3d_rt similarity index 100% rename from Examples/Modules/laser_injection/inputs_3d_rt rename to Examples/Tests/laser_injection/inputs_3d_rt diff --git a/Examples/Modules/laser_injection_from_file/analysis.py b/Examples/Tests/laser_injection_from_file/analysis.py similarity index 100% rename from Examples/Modules/laser_injection_from_file/analysis.py rename to Examples/Tests/laser_injection_from_file/analysis.py diff --git a/Examples/Modules/laser_injection_from_file/inputs.2d_test_txye b/Examples/Tests/laser_injection_from_file/inputs.2d_test_txye similarity index 100% rename from Examples/Modules/laser_injection_from_file/inputs.2d_test_txye rename to Examples/Tests/laser_injection_from_file/inputs.2d_test_txye diff --git a/Examples/Tests/Maxwell_Hybrid_QED/analysis_Maxwell_QED_Hybrid.py b/Examples/Tests/maxwell_hybrid_qed/analysis_Maxwell_QED_Hybrid.py similarity index 100% rename from Examples/Tests/Maxwell_Hybrid_QED/analysis_Maxwell_QED_Hybrid.py rename to Examples/Tests/maxwell_hybrid_qed/analysis_Maxwell_QED_Hybrid.py diff --git a/Examples/Tests/Maxwell_Hybrid_QED/inputs_2d b/Examples/Tests/maxwell_hybrid_qed/inputs_2d similarity index 100% rename from Examples/Tests/Maxwell_Hybrid_QED/inputs_2d rename to Examples/Tests/maxwell_hybrid_qed/inputs_2d diff --git a/Examples/Tests/multi_J/inputs_rz b/Examples/Tests/multi_j/inputs_rz similarity index 100% rename from Examples/Tests/multi_J/inputs_rz rename to Examples/Tests/multi_j/inputs_rz diff --git a/Examples/Modules/nci_corrector/analysis_ncicorr.py b/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py similarity index 100% rename from Examples/Modules/nci_corrector/analysis_ncicorr.py rename to Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py diff --git a/Examples/Modules/nci_corrector/inputs_2d b/Examples/Tests/nci_fdtd_stability/inputs_2d similarity index 100% rename from Examples/Modules/nci_corrector/inputs_2d rename to Examples/Tests/nci_fdtd_stability/inputs_2d diff --git a/Examples/Modules/nuclear_fusion/analysis_proton_boron_fusion.py b/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py similarity index 100% rename from Examples/Modules/nuclear_fusion/analysis_proton_boron_fusion.py rename to Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py diff --git a/Examples/Modules/nuclear_fusion/analysis_two_product_fusion.py b/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py similarity index 100% rename from Examples/Modules/nuclear_fusion/analysis_two_product_fusion.py rename to Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py diff --git a/Examples/Modules/nuclear_fusion/inputs_deuterium_deuterium_3d b/Examples/Tests/nuclear_fusion/inputs_deuterium_deuterium_3d similarity index 100% rename from Examples/Modules/nuclear_fusion/inputs_deuterium_deuterium_3d rename to Examples/Tests/nuclear_fusion/inputs_deuterium_deuterium_3d diff --git a/Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_3d b/Examples/Tests/nuclear_fusion/inputs_deuterium_tritium_3d similarity index 100% rename from Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_3d rename to Examples/Tests/nuclear_fusion/inputs_deuterium_tritium_3d diff --git a/Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_rz b/Examples/Tests/nuclear_fusion/inputs_deuterium_tritium_rz similarity index 100% rename from Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_rz rename to Examples/Tests/nuclear_fusion/inputs_deuterium_tritium_rz diff --git a/Examples/Modules/nuclear_fusion/inputs_proton_boron_2d b/Examples/Tests/nuclear_fusion/inputs_proton_boron_2d similarity index 100% rename from Examples/Modules/nuclear_fusion/inputs_proton_boron_2d rename to Examples/Tests/nuclear_fusion/inputs_proton_boron_2d diff --git a/Examples/Modules/nuclear_fusion/inputs_proton_boron_3d b/Examples/Tests/nuclear_fusion/inputs_proton_boron_3d similarity index 100% rename from Examples/Modules/nuclear_fusion/inputs_proton_boron_3d rename to Examples/Tests/nuclear_fusion/inputs_proton_boron_3d diff --git a/Examples/Modules/ParticleBoundaryProcess/PICMI_inputs_reflection.py b/Examples/Tests/particle_boundary_process/PICMI_inputs_reflection.py similarity index 100% rename from Examples/Modules/ParticleBoundaryProcess/PICMI_inputs_reflection.py rename to Examples/Tests/particle_boundary_process/PICMI_inputs_reflection.py diff --git a/Examples/Modules/ParticleBoundaryProcess/analysis_absorption.py b/Examples/Tests/particle_boundary_process/analysis_absorption.py similarity index 100% rename from Examples/Modules/ParticleBoundaryProcess/analysis_absorption.py rename to Examples/Tests/particle_boundary_process/analysis_absorption.py diff --git a/Examples/Modules/ParticleBoundaryProcess/analysis_reflection.py b/Examples/Tests/particle_boundary_process/analysis_reflection.py similarity index 100% rename from Examples/Modules/ParticleBoundaryProcess/analysis_reflection.py rename to Examples/Tests/particle_boundary_process/analysis_reflection.py diff --git a/Examples/Modules/ParticleBoundaryProcess/inputs_absorption b/Examples/Tests/particle_boundary_process/inputs_absorption similarity index 100% rename from Examples/Modules/ParticleBoundaryProcess/inputs_absorption rename to Examples/Tests/particle_boundary_process/inputs_absorption diff --git a/Examples/Modules/ParticleBoundaryScrape/PICMI_inputs_scrape.py b/Examples/Tests/particle_boundary_scrape/PICMI_inputs_scrape.py similarity index 100% rename from Examples/Modules/ParticleBoundaryScrape/PICMI_inputs_scrape.py rename to Examples/Tests/particle_boundary_scrape/PICMI_inputs_scrape.py diff --git a/Examples/Modules/ParticleBoundaryScrape/analysis_scrape.py b/Examples/Tests/particle_boundary_scrape/analysis_scrape.py similarity index 100% rename from Examples/Modules/ParticleBoundaryScrape/analysis_scrape.py rename to Examples/Tests/particle_boundary_scrape/analysis_scrape.py diff --git a/Examples/Modules/ParticleBoundaryScrape/inputs_scrape b/Examples/Tests/particle_boundary_scrape/inputs_scrape similarity index 100% rename from Examples/Modules/ParticleBoundaryScrape/inputs_scrape rename to Examples/Tests/particle_boundary_scrape/inputs_scrape diff --git a/Examples/Tests/ParticleDataPython/PICMI_inputs_2d.py b/Examples/Tests/particle_data_python/PICMI_inputs_2d.py similarity index 100% rename from Examples/Tests/ParticleDataPython/PICMI_inputs_2d.py rename to Examples/Tests/particle_data_python/PICMI_inputs_2d.py diff --git a/Examples/Tests/ParticleDataPython/PICMI_inputs_prev_pos_2d.py b/Examples/Tests/particle_data_python/PICMI_inputs_prev_pos_2d.py similarity index 100% rename from Examples/Tests/ParticleDataPython/PICMI_inputs_prev_pos_2d.py rename to Examples/Tests/particle_data_python/PICMI_inputs_prev_pos_2d.py diff --git a/Examples/Tests/ParticleDataPython/analysis.py b/Examples/Tests/particle_data_python/analysis.py similarity index 100% rename from Examples/Tests/ParticleDataPython/analysis.py rename to Examples/Tests/particle_data_python/analysis.py diff --git a/Examples/Tests/particles_in_PML/analysis_particles_in_pml.py b/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py similarity index 100% rename from Examples/Tests/particles_in_PML/analysis_particles_in_pml.py rename to Examples/Tests/particles_in_pml/analysis_particles_in_pml.py diff --git a/Examples/Tests/particles_in_PML/inputs_2d b/Examples/Tests/particles_in_pml/inputs_2d similarity index 100% rename from Examples/Tests/particles_in_PML/inputs_2d rename to Examples/Tests/particles_in_pml/inputs_2d diff --git a/Examples/Tests/particles_in_PML/inputs_3d b/Examples/Tests/particles_in_pml/inputs_3d similarity index 100% rename from Examples/Tests/particles_in_PML/inputs_3d rename to Examples/Tests/particles_in_pml/inputs_3d diff --git a/Examples/Tests/particles_in_PML/inputs_mr_2d b/Examples/Tests/particles_in_pml/inputs_mr_2d similarity index 100% rename from Examples/Tests/particles_in_PML/inputs_mr_2d rename to Examples/Tests/particles_in_pml/inputs_mr_2d diff --git a/Examples/Tests/particles_in_PML/inputs_mr_3d b/Examples/Tests/particles_in_pml/inputs_mr_3d similarity index 100% rename from Examples/Tests/particles_in_PML/inputs_mr_3d rename to Examples/Tests/particles_in_pml/inputs_mr_3d diff --git a/Examples/Tests/PEC/analysis_pec.py b/Examples/Tests/pec/analysis_pec.py similarity index 100% rename from Examples/Tests/PEC/analysis_pec.py rename to Examples/Tests/pec/analysis_pec.py diff --git a/Examples/Tests/PEC/analysis_pec_mr.py b/Examples/Tests/pec/analysis_pec_mr.py similarity index 100% rename from Examples/Tests/PEC/analysis_pec_mr.py rename to Examples/Tests/pec/analysis_pec_mr.py diff --git a/Examples/Tests/PEC/inputs_field_PEC_3d b/Examples/Tests/pec/inputs_field_PEC_3d similarity index 100% rename from Examples/Tests/PEC/inputs_field_PEC_3d rename to Examples/Tests/pec/inputs_field_PEC_3d diff --git a/Examples/Tests/PEC/inputs_field_PEC_mr_3d b/Examples/Tests/pec/inputs_field_PEC_mr_3d similarity index 100% rename from Examples/Tests/PEC/inputs_field_PEC_mr_3d rename to Examples/Tests/pec/inputs_field_PEC_mr_3d diff --git a/Examples/Tests/PEC/inputs_particle_PEC_3d b/Examples/Tests/pec/inputs_particle_PEC_3d similarity index 100% rename from Examples/Tests/PEC/inputs_particle_PEC_3d rename to Examples/Tests/pec/inputs_particle_PEC_3d diff --git a/Examples/Tests/PerformanceTests/automated_test_1_uniform_rest_32ppc b/Examples/Tests/performance_tests/automated_test_1_uniform_rest_32ppc similarity index 100% rename from Examples/Tests/PerformanceTests/automated_test_1_uniform_rest_32ppc rename to Examples/Tests/performance_tests/automated_test_1_uniform_rest_32ppc diff --git a/Examples/Tests/PerformanceTests/automated_test_2_uniform_rest_1ppc b/Examples/Tests/performance_tests/automated_test_2_uniform_rest_1ppc similarity index 100% rename from Examples/Tests/PerformanceTests/automated_test_2_uniform_rest_1ppc rename to Examples/Tests/performance_tests/automated_test_2_uniform_rest_1ppc diff --git a/Examples/Tests/PerformanceTests/automated_test_3_uniform_drift_4ppc b/Examples/Tests/performance_tests/automated_test_3_uniform_drift_4ppc similarity index 100% rename from Examples/Tests/PerformanceTests/automated_test_3_uniform_drift_4ppc rename to Examples/Tests/performance_tests/automated_test_3_uniform_drift_4ppc diff --git a/Examples/Tests/PerformanceTests/automated_test_4_labdiags_2ppc b/Examples/Tests/performance_tests/automated_test_4_labdiags_2ppc similarity index 100% rename from Examples/Tests/PerformanceTests/automated_test_4_labdiags_2ppc rename to Examples/Tests/performance_tests/automated_test_4_labdiags_2ppc diff --git a/Examples/Tests/PerformanceTests/automated_test_5_loadimbalance b/Examples/Tests/performance_tests/automated_test_5_loadimbalance similarity index 100% rename from Examples/Tests/PerformanceTests/automated_test_5_loadimbalance rename to Examples/Tests/performance_tests/automated_test_5_loadimbalance diff --git a/Examples/Tests/PerformanceTests/automated_test_6_output_2ppc b/Examples/Tests/performance_tests/automated_test_6_output_2ppc similarity index 100% rename from Examples/Tests/PerformanceTests/automated_test_6_output_2ppc rename to Examples/Tests/performance_tests/automated_test_6_output_2ppc diff --git a/Examples/Tests/PML/analysis_pml_ckc.py b/Examples/Tests/pml/analysis_pml_ckc.py similarity index 100% rename from Examples/Tests/PML/analysis_pml_ckc.py rename to Examples/Tests/pml/analysis_pml_ckc.py diff --git a/Examples/Tests/PML/analysis_pml_psatd.py b/Examples/Tests/pml/analysis_pml_psatd.py similarity index 100% rename from Examples/Tests/PML/analysis_pml_psatd.py rename to Examples/Tests/pml/analysis_pml_psatd.py diff --git a/Examples/Tests/PML/analysis_pml_psatd_rz.py b/Examples/Tests/pml/analysis_pml_psatd_rz.py similarity index 100% rename from Examples/Tests/PML/analysis_pml_psatd_rz.py rename to Examples/Tests/pml/analysis_pml_psatd_rz.py diff --git a/Examples/Tests/PML/analysis_pml_yee.py b/Examples/Tests/pml/analysis_pml_yee.py similarity index 100% rename from Examples/Tests/PML/analysis_pml_yee.py rename to Examples/Tests/pml/analysis_pml_yee.py diff --git a/Examples/Tests/PML/inputs_2d b/Examples/Tests/pml/inputs_2d similarity index 100% rename from Examples/Tests/PML/inputs_2d rename to Examples/Tests/pml/inputs_2d diff --git a/Examples/Tests/PML/inputs_3d b/Examples/Tests/pml/inputs_3d similarity index 100% rename from Examples/Tests/PML/inputs_3d rename to Examples/Tests/pml/inputs_3d diff --git a/Examples/Tests/PML/inputs_rz b/Examples/Tests/pml/inputs_rz similarity index 100% rename from Examples/Tests/PML/inputs_rz rename to Examples/Tests/pml/inputs_rz diff --git a/Examples/Tests/PythonWrappers/PICMI_inputs_2d.py b/Examples/Tests/python_wrappers/PICMI_inputs_2d.py similarity index 100% rename from Examples/Tests/PythonWrappers/PICMI_inputs_2d.py rename to Examples/Tests/python_wrappers/PICMI_inputs_2d.py diff --git a/Examples/Modules/qed/breit_wheeler/analysis_core.py b/Examples/Tests/qed/breit_wheeler/analysis_core.py similarity index 100% rename from Examples/Modules/qed/breit_wheeler/analysis_core.py rename to Examples/Tests/qed/breit_wheeler/analysis_core.py diff --git a/Examples/Modules/qed/breit_wheeler/analysis_opmd.py b/Examples/Tests/qed/breit_wheeler/analysis_opmd.py similarity index 100% rename from Examples/Modules/qed/breit_wheeler/analysis_opmd.py rename to Examples/Tests/qed/breit_wheeler/analysis_opmd.py diff --git a/Examples/Modules/qed/breit_wheeler/analysis_yt.py b/Examples/Tests/qed/breit_wheeler/analysis_yt.py similarity index 100% rename from Examples/Modules/qed/breit_wheeler/analysis_yt.py rename to Examples/Tests/qed/breit_wheeler/analysis_yt.py diff --git a/Examples/Modules/qed/breit_wheeler/inputs_2d b/Examples/Tests/qed/breit_wheeler/inputs_2d similarity index 100% rename from Examples/Modules/qed/breit_wheeler/inputs_2d rename to Examples/Tests/qed/breit_wheeler/inputs_2d diff --git a/Examples/Modules/qed/breit_wheeler/inputs_3d b/Examples/Tests/qed/breit_wheeler/inputs_3d similarity index 100% rename from Examples/Modules/qed/breit_wheeler/inputs_3d rename to Examples/Tests/qed/breit_wheeler/inputs_3d diff --git a/Examples/Modules/qed/quantum_synchrotron/analysis.py b/Examples/Tests/qed/quantum_synchrotron/analysis.py similarity index 100% rename from Examples/Modules/qed/quantum_synchrotron/analysis.py rename to Examples/Tests/qed/quantum_synchrotron/analysis.py diff --git a/Examples/Modules/qed/quantum_synchrotron/inputs_2d b/Examples/Tests/qed/quantum_synchrotron/inputs_2d similarity index 100% rename from Examples/Modules/qed/quantum_synchrotron/inputs_2d rename to Examples/Tests/qed/quantum_synchrotron/inputs_2d diff --git a/Examples/Modules/qed/quantum_synchrotron/inputs_3d b/Examples/Tests/qed/quantum_synchrotron/inputs_3d similarity index 100% rename from Examples/Modules/qed/quantum_synchrotron/inputs_3d rename to Examples/Tests/qed/quantum_synchrotron/inputs_3d diff --git a/Examples/Modules/qed/schwinger/analysis_schwinger.py b/Examples/Tests/qed/schwinger/analysis_schwinger.py similarity index 100% rename from Examples/Modules/qed/schwinger/analysis_schwinger.py rename to Examples/Tests/qed/schwinger/analysis_schwinger.py diff --git a/Examples/Modules/qed/schwinger/inputs_3d_schwinger b/Examples/Tests/qed/schwinger/inputs_3d_schwinger similarity index 100% rename from Examples/Modules/qed/schwinger/inputs_3d_schwinger rename to Examples/Tests/qed/schwinger/inputs_3d_schwinger diff --git a/Examples/Modules/relativistic_space_charge_initialization/analysis.py b/Examples/Tests/relativistic_space_charge_initialization/analysis.py similarity index 100% rename from Examples/Modules/relativistic_space_charge_initialization/analysis.py rename to Examples/Tests/relativistic_space_charge_initialization/analysis.py diff --git a/Examples/Modules/relativistic_space_charge_initialization/inputs_3d b/Examples/Tests/relativistic_space_charge_initialization/inputs_3d similarity index 100% rename from Examples/Modules/relativistic_space_charge_initialization/inputs_3d rename to Examples/Tests/relativistic_space_charge_initialization/inputs_3d diff --git a/Examples/Tests/RepellingParticles/analysis_repelling.py b/Examples/Tests/repelling_particles/analysis_repelling.py similarity index 100% rename from Examples/Tests/RepellingParticles/analysis_repelling.py rename to Examples/Tests/repelling_particles/analysis_repelling.py diff --git a/Examples/Tests/RepellingParticles/inputs_2d b/Examples/Tests/repelling_particles/inputs_2d similarity index 100% rename from Examples/Tests/RepellingParticles/inputs_2d rename to Examples/Tests/repelling_particles/inputs_2d diff --git a/Examples/Modules/resampling/analysis_leveling_thinning.py b/Examples/Tests/resampling/analysis_leveling_thinning.py similarity index 100% rename from Examples/Modules/resampling/analysis_leveling_thinning.py rename to Examples/Tests/resampling/analysis_leveling_thinning.py diff --git a/Examples/Modules/resampling/inputs_leveling_thinning b/Examples/Tests/resampling/inputs_leveling_thinning similarity index 100% rename from Examples/Modules/resampling/inputs_leveling_thinning rename to Examples/Tests/resampling/inputs_leveling_thinning diff --git a/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py b/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py index a22a8130cb8..240283e2a2e 100755 --- a/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py +++ b/Examples/Tests/restart_eb/PICMI_inputs_restart_eb.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Mirroring Modules/ParticleBoundaryScrape setup, this tests that restarting +# Mirroring Tests/ParticleBoundaryScrape setup, this tests that restarting # with an EB works properly. import sys diff --git a/Examples/Modules/RigidInjection/analysis_rigid_injection_BoostedFrame.py b/Examples/Tests/rigid_injection/analysis_rigid_injection_BoostedFrame.py similarity index 100% rename from Examples/Modules/RigidInjection/analysis_rigid_injection_BoostedFrame.py rename to Examples/Tests/rigid_injection/analysis_rigid_injection_BoostedFrame.py diff --git a/Examples/Modules/RigidInjection/analysis_rigid_injection_LabFrame.py b/Examples/Tests/rigid_injection/analysis_rigid_injection_LabFrame.py similarity index 100% rename from Examples/Modules/RigidInjection/analysis_rigid_injection_LabFrame.py rename to Examples/Tests/rigid_injection/analysis_rigid_injection_LabFrame.py diff --git a/Examples/Modules/RigidInjection/inputs_2d_BoostedFrame b/Examples/Tests/rigid_injection/inputs_2d_BoostedFrame similarity index 100% rename from Examples/Modules/RigidInjection/inputs_2d_BoostedFrame rename to Examples/Tests/rigid_injection/inputs_2d_BoostedFrame diff --git a/Examples/Modules/RigidInjection/inputs_2d_LabFrame b/Examples/Tests/rigid_injection/inputs_2d_LabFrame similarity index 100% rename from Examples/Modules/RigidInjection/inputs_2d_LabFrame rename to Examples/Tests/rigid_injection/inputs_2d_LabFrame diff --git a/Examples/Tests/SilverMueller/analysis_silver_mueller.py b/Examples/Tests/silver_mueller/analysis_silver_mueller.py similarity index 100% rename from Examples/Tests/SilverMueller/analysis_silver_mueller.py rename to Examples/Tests/silver_mueller/analysis_silver_mueller.py diff --git a/Examples/Tests/SilverMueller/inputs_2d_x b/Examples/Tests/silver_mueller/inputs_2d_x similarity index 100% rename from Examples/Tests/SilverMueller/inputs_2d_x rename to Examples/Tests/silver_mueller/inputs_2d_x diff --git a/Examples/Tests/SilverMueller/inputs_2d_z b/Examples/Tests/silver_mueller/inputs_2d_z similarity index 100% rename from Examples/Tests/SilverMueller/inputs_2d_z rename to Examples/Tests/silver_mueller/inputs_2d_z diff --git a/Examples/Tests/SilverMueller/inputs_rz_z b/Examples/Tests/silver_mueller/inputs_rz_z similarity index 100% rename from Examples/Tests/SilverMueller/inputs_rz_z rename to Examples/Tests/silver_mueller/inputs_rz_z diff --git a/Examples/Tests/SingleParticle/analysis_bilinear_filter.py b/Examples/Tests/single_particle/analysis_bilinear_filter.py similarity index 100% rename from Examples/Tests/SingleParticle/analysis_bilinear_filter.py rename to Examples/Tests/single_particle/analysis_bilinear_filter.py diff --git a/Examples/Tests/SingleParticle/inputs_2d b/Examples/Tests/single_particle/inputs_2d similarity index 100% rename from Examples/Tests/SingleParticle/inputs_2d rename to Examples/Tests/single_particle/inputs_2d diff --git a/Examples/Modules/space_charge_initialization/analysis.py b/Examples/Tests/space_charge_initialization/analysis.py similarity index 100% rename from Examples/Modules/space_charge_initialization/analysis.py rename to Examples/Tests/space_charge_initialization/analysis.py diff --git a/Examples/Modules/space_charge_initialization/inputs_3d b/Examples/Tests/space_charge_initialization/inputs_3d similarity index 100% rename from Examples/Modules/space_charge_initialization/inputs_3d rename to Examples/Tests/space_charge_initialization/inputs_3d diff --git a/Examples/Tests/VayDeposition/analysis.py b/Examples/Tests/vay_deposition/analysis.py similarity index 100% rename from Examples/Tests/VayDeposition/analysis.py rename to Examples/Tests/vay_deposition/analysis.py diff --git a/Examples/Tests/VayDeposition/inputs_2d b/Examples/Tests/vay_deposition/inputs_2d similarity index 100% rename from Examples/Tests/VayDeposition/inputs_2d rename to Examples/Tests/vay_deposition/inputs_2d diff --git a/Examples/Tests/VayDeposition/inputs_3d b/Examples/Tests/vay_deposition/inputs_3d similarity index 100% rename from Examples/Tests/VayDeposition/inputs_3d rename to Examples/Tests/vay_deposition/inputs_3d diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 6e378a81689..40525890d30 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -74,7 +74,7 @@ branch = 7b5449f92a4b30a095cc4a67f0a8b1fc69680e15 [pml_x_yee] buildDir = . -inputFile = Examples/Tests/PML/inputs_2d +inputFile = Examples/Tests/pml/inputs_2d runtime_params = warpx.do_dynamic_scheduling=0 algo.maxwell_solver=yee dim = 2 addToCompileString = USE_GPU=TRUE @@ -85,11 +85,11 @@ useOMP = 0 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/PML/analysis_pml_yee.py +analysisRoutine = Examples/Tests/pml/analysis_pml_yee.py [pml_x_ckc] buildDir = . -inputFile = Examples/Tests/PML/inputs_2d +inputFile = Examples/Tests/pml/inputs_2d runtime_params = warpx.do_dynamic_scheduling=0 algo.maxwell_solver=ckc dim = 2 addToCompileString = USE_GPU=TRUE @@ -100,11 +100,11 @@ useOMP = 0 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/PML/analysis_pml_ckc.py +analysisRoutine = Examples/Tests/pml/analysis_pml_ckc.py #[pml_x_psatd] #buildDir = . -#inputFile = Examples/Tests/PML/inputs_2d +#inputFile = Examples/Tests/pml/inputs_2d #runtime_params = algo.maxwell_solver=psatd warpx.do_dynamic_scheduling=0 #dim = 2 #addToCompileString = USE_PSATD=TRUE USE_GPU=TRUE @@ -115,11 +115,11 @@ analysisRoutine = Examples/Tests/PML/analysis_pml_ckc.py #numthreads = 1 #compileTest = 0 #doVis = 0 -#analysisRoutine = Examples/Tests/PML/analysis_pml_psatd.py +#analysisRoutine = Examples/Tests/pml/analysis_pml_psatd.py # [RigidInjection_lab] buildDir = . -inputFile = Examples/Modules/RigidInjection/inputs_2d_LabFrame +inputFile = Examples/Tests/RigidInjection/inputs_2d_LabFrame runtime_params = dim = 2 addToCompileString = USE_GPU=TRUE @@ -131,11 +131,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/RigidInjection/analysis_rigid_injection_LabFrame.py +analysisRoutine = Examples/Tests/RigidInjection/analysis_rigid_injection_LabFrame.py [RigidInjection_boost_backtransformed] buildDir = . -inputFile = Examples/Modules/RigidInjection/inputs_2d_BoostedFrame +inputFile = Examples/Tests/RigidInjection/inputs_2d_BoostedFrame runtime_params = dim = 2 addToCompileString = USE_GPU=TRUE @@ -149,11 +149,11 @@ doVis = 0 compareParticles = 0 doComparison = 0 aux1File = Tools/PostProcessing/read_raw_data.py -analysisRoutine = Examples/Modules/RigidInjection/analysis_rigid_injection_BoostedFrame.py +analysisRoutine = Examples/Tests/RigidInjection/analysis_rigid_injection_BoostedFrame.py [nci_corrector] buildDir = . -inputFile = Examples/Modules/nci_corrector/inputs_2d +inputFile = Examples/Tests/nci_fdtd_stability/inputs_2d runtime_params = amr.max_level=0 particles.use_fdtd_nci_corr=1 dim = 2 addToCompileString = USE_GPU=TRUE @@ -165,11 +165,11 @@ numthreads = 1 compileTest = 0 doVis = 0 doComparison = 0 -analysisRoutine = Examples/Modules/nci_corrector/analysis_ncicorr.py +analysisRoutine = Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py # [nci_correctorMR] # buildDir = . -# inputFile = Examples/Modules/nci_corrector/inputs_2d +# inputFile = Examples/Tests/nci_fdtd_stability/inputs_2d # runtime_params = amr.max_level=1 particles.use_fdtd_nci_corr=1 # dim = 2 # addToCompileString = USE_GPU=TRUE @@ -181,11 +181,11 @@ analysisRoutine = Examples/Modules/nci_corrector/analysis_ncicorr.py # compileTest = 0 # doVis = 0 # doComparison = 0 -# analysisRoutine = Examples/Modules/nci_corrector/analysis_ncicorr.py +# analysisRoutine = Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py # # [ionization_lab] # buildDir = . -# inputFile = Examples/Modules/ionization/inputs_2d_rt +# inputFile = Examples/Tests/ionization/inputs_2d_rt # runtime_params = # dim = 2 # addToCompileString = USE_GPU=TRUE @@ -196,11 +196,11 @@ analysisRoutine = Examples/Modules/nci_corrector/analysis_ncicorr.py # numthreads = 1 # compileTest = 0 # doVis = 0 -# analysisRoutine = Examples/Modules/ionization/analysis_ionization.py +# analysisRoutine = Examples/Tests/ionization/analysis_ionization.py # # [ionization_boost] # buildDir = . -# inputFile = Examples/Modules/ionization/inputs_2d_bf_rt +# inputFile = Examples/Tests/ionization/inputs_2d_bf_rt # runtime_params = # dim = 2 # addToCompileString = USE_GPU=TRUE @@ -211,11 +211,11 @@ analysisRoutine = Examples/Modules/nci_corrector/analysis_ncicorr.py # numthreads = 1 # compileTest = 0 # doVis = 0 -# analysisRoutine = Examples/Modules/ionization/analysis_ionization.py +# analysisRoutine = Examples/Tests/ionization/analysis_ionization.py # [bilinear_filter] buildDir = . -inputFile = Examples/Tests/SingleParticle/inputs_2d +inputFile = Examples/Tests/single_particle/inputs_2d runtime_params = warpx.use_filter=1 warpx.filter_npass_each_dir=1 5 dim = 2 addToCompileString = USE_GPU=TRUE @@ -226,11 +226,11 @@ useOMP = 0 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/SingleParticle/analysis_bilinear_filter.py +analysisRoutine = Examples/Tests/single_particle/analysis_bilinear_filter.py [Langmuir_2d] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_rt +inputFile = Examples/Tests/langmuir/inputs_3d_rt dim = 2 addToCompileString = USE_GPU=TRUE restartTest = 0 @@ -243,12 +243,12 @@ doVis = 0 compareParticles = 0 particleTypes = electrons runtime_params = electrons.ux=0.01 electrons.xmax=0.e-6 diag1.fields_to_plot=Ex jx diag1.electrons.variables=w ux -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir2d.py analysisOutputImage = langmuir2d_analysis.png [Langmuir_2d_single_precision] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_rt +inputFile = Examples/Tests/langmuir/inputs_3d_rt runtime_params = electrons.ux=0.01 electrons.xmax=0.e-6 diag1.fields_to_plot=Ex jx diag1.electrons.variables=w ux dim = 2 addToCompileString = USE_GPU=TRUE PRECISION=FLOAT USE_SINGLE_PRECISION_PARTICLES=TRUE @@ -261,12 +261,12 @@ compileTest = 0 doVis = 0 compareParticles = 0 particleTypes = electrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir2d.py analysisOutputImage = langmuir2d_analysis.png [Langmuir_2d_nompi] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_rt +inputFile = Examples/Tests/langmuir/inputs_3d_rt dim = 2 addToCompileString = USE_GPU=TRUE restartTest = 0 @@ -279,12 +279,12 @@ doVis = 0 compareParticles = 0 particleTypes = electrons runtime_params = electrons.ux=0.01 electrons.xmax=0.e-6 diag1.fields_to_plot=Ex jx diag1.electrons.variables=w ux -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir2d.py analysisOutputImage = langmuir2d_analysis.png [Langmuir_x] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_rt +inputFile = Examples/Tests/langmuir/inputs_3d_rt dim = 3 addToCompileString = USE_GPU=TRUE restartTest = 0 @@ -297,12 +297,12 @@ doVis = 0 compareParticles = 0 particleTypes = electrons runtime_params = electrons.ux=0.01 electrons.xmax=0.e-6 warpx.do_dynamic_scheduling=0 diag1.fields_to_plot = Ex jx diag1.electrons.variables=w ux -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir.py analysisOutputImage = langmuir_x_analysis.png [Langmuir_y] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_rt +inputFile = Examples/Tests/langmuir/inputs_3d_rt dim = 3 addToCompileString = USE_GPU=TRUE restartTest = 0 @@ -315,12 +315,12 @@ doVis = 0 compareParticles = 0 particleTypes = electrons runtime_params = electrons.uy=0.01 electrons.ymax=0.e-6 warpx.do_dynamic_scheduling=0 diag1.fields_to_plot = Ey jy diag1.electrons.variables=w uy -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir.py analysisOutputImage = langmuir_y_analysis.png [Langmuir_z] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_rt +inputFile = Examples/Tests/langmuir/inputs_3d_rt dim = 3 addToCompileString = USE_GPU=TRUE restartTest = 0 @@ -333,12 +333,12 @@ doVis = 0 compareParticles = 0 particleTypes = electrons runtime_params = electrons.uz=0.01 electrons.zmax=0.e-6 warpx.do_dynamic_scheduling=0 diag1.fields_to_plot = Ez jz diag1.electrons.variables=w uz -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir.py analysisOutputImage = langmuir_z_analysis.png [Langmuir_multi] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt dim = 3 addToCompileString = USE_GPU=TRUE restartTest = 0 @@ -351,12 +351,12 @@ doVis = 0 compareParticles = 0 runtime_params = warpx.do_dynamic_scheduling=0 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_nodal] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt dim = 3 addToCompileString = USE_GPU=TRUE restartTest = 0 @@ -369,12 +369,12 @@ doVis = 0 compareParticles = 0 runtime_params = warpx.do_dynamic_scheduling=0 warpx.do_nodal=1 algo.current_deposition=direct particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_psatd] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = algo.maxwell_solver=psatd warpx.cfl = 0.5773502691896258 dim = 3 addToCompileString = USE_PSATD=TRUE USE_GPU=TRUE @@ -387,12 +387,12 @@ compileTest = 0 doVis = 0 compareParticles = 0 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_psatd_nodal] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = algo.maxwell_solver=psatd warpx.do_dynamic_scheduling=0 warpx.do_nodal=1 algo.current_deposition=direct warpx.cfl = 0.5773502691896258 dim = 3 addToCompileString = USE_PSATD=TRUE USE_GPU=TRUE @@ -405,12 +405,12 @@ compileTest = 0 doVis = 0 compareParticles = 0 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_2d_nodal] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt dim = 2 addToCompileString = USE_GPU=TRUE restartTest = 0 @@ -423,12 +423,12 @@ doVis = 0 compareParticles = 0 runtime_params = warpx.do_nodal=1 algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = langmuir_multi_2d_analysis.png [Langmuir_multi_2d_psatd] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = algo.maxwell_solver=psatd diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell warpx.cfl = 0.7071067811865475 dim = 2 addToCompileString = USE_PSATD=TRUE USE_GPU=TRUE @@ -441,12 +441,12 @@ compileTest = 0 doVis = 0 compareParticles = 0 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = langmuir_multi_2d_analysis.png # [Langmuir_multi_2d_psatd_nodal] # buildDir = . -# inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +# inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt # runtime_params = algo.maxwell_solver=psatd warpx.do_nodal=1 algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell # dim = 2 # addToCompileString = USE_PSATD=TRUE USE_GPU=TRUE @@ -459,12 +459,12 @@ analysisOutputImage = langmuir_multi_2d_analysis.png # doVis = 0 # compareParticles = 0 # particleTypes = electrons positrons -# analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +# analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py # analysisOutputImage = langmuir_multi_2d_analysis.png # # [Langmuir_multi_rz] # buildDir = . -# inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rz_rt +# inputFile = Examples/Tests/langmuir/inputs_2d_multi_rz_rt # dim = 2 # addToCompileString = USE_RZ=TRUE USE_GPU=TRUE # restartTest = 0 @@ -477,12 +477,12 @@ analysisOutputImage = langmuir_multi_2d_analysis.png # runtime_params = diag1.electrons.variables=w ux uy uz diag1.ions.variables=w ux uy uz # compareParticles = 0 # particleTypes = electrons ions -# analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_rz.py +# analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_rz.py # analysisOutputImage = langmuir_multi_rz_analysis.png # # [Langmuir_rz_multimode] # buildDir = . -# inputFile = Examples/Tests/Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py +# inputFile = Examples/Tests/langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py # customRunCmd = python PICMI_inputs_langmuir_rz_multimode_analyze.py # runtime_params = # dim = 2 @@ -500,7 +500,7 @@ analysisOutputImage = langmuir_multi_2d_analysis.png # [LaserInjection] buildDir = . -inputFile = Examples/Modules/laser_injection/inputs_3d_rt +inputFile = Examples/Tests/laser_injection/inputs_3d_rt dim = 3 runtime_params = max_step=20 addToCompileString = USE_GPU=TRUE @@ -512,12 +512,12 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/laser_injection/analysis_laser.py +analysisRoutine = Examples/Tests/laser_injection/analysis_laser.py analysisOutputImage = laser_analysis.png [LaserInjection_2d] buildDir = . -inputFile = Examples/Modules/laser_injection/inputs_2d_rt +inputFile = Examples/Tests/laser_injection/inputs_2d_rt dim = 2 addToCompileString = USE_GPU=TRUE restartTest = 0 @@ -596,7 +596,7 @@ particleTypes = beam driver plasma_e [Python_Langmuir] buildDir = . -inputFile = Examples/Tests/Langmuir/PICMI_inputs_langmuir_rt.py +inputFile = Examples/Tests/langmuir/PICMI_inputs_langmuir_rt.py customRunCmd = python PICMI_inputs_langmuir_rt.py runtime_params = dim = 3 @@ -631,7 +631,7 @@ particleTypes = electrons [particles_in_pml_2d] buildDir = . -inputFile = Examples/Tests/particles_in_PML/inputs_2d +inputFile = Examples/Tests/particles_in_pml/inputs_2d runtime_params = dim = 2 addToCompileString = USE_GPU=TRUE @@ -643,11 +643,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/particles_in_PML/analysis_particles_in_pml.py +analysisRoutine = Examples/Tests/particles_in_pml/analysis_particles_in_pml.py [particles_in_pml] buildDir = . -inputFile = Examples/Tests/particles_in_PML/inputs_3d +inputFile = Examples/Tests/particles_in_pml/inputs_3d runtime_params = dim = 3 addToCompileString = USE_GPU=TRUE @@ -659,7 +659,7 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/particles_in_PML/analysis_particles_in_pml.py +analysisRoutine = Examples/Tests/particles_in_pml/analysis_particles_in_pml.py [photon_pusher] buildDir = . diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 31838336670..c3b1ad67ad1 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -70,7 +70,7 @@ cmakeSetupOpts = -DAMReX_ASSERTIONS=ON -DAMReX_TESTING=ON -DWarpX_LIB=ON [pml_x_yee] buildDir = . -inputFile = Examples/Tests/PML/inputs_2d +inputFile = Examples/Tests/pml/inputs_2d runtime_params = warpx.do_dynamic_scheduling=0 algo.maxwell_solver=yee chk.file_prefix=pml_x_yee_chk chk.file_min_digits=5 dim = 2 addToCompileString = @@ -83,11 +83,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/PML/analysis_pml_yee.py +analysisRoutine = Examples/Tests/pml/analysis_pml_yee.py [pml_x_ckc] buildDir = . -inputFile = Examples/Tests/PML/inputs_2d +inputFile = Examples/Tests/pml/inputs_2d runtime_params = warpx.do_dynamic_scheduling=0 algo.maxwell_solver=ckc dim = 2 addToCompileString = @@ -99,11 +99,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/PML/analysis_pml_ckc.py +analysisRoutine = Examples/Tests/pml/analysis_pml_ckc.py [pml_x_psatd] buildDir = . -inputFile = Examples/Tests/PML/inputs_2d +inputFile = Examples/Tests/pml/inputs_2d runtime_params = algo.maxwell_solver=psatd psatd.update_with_rho=1 warpx.do_dynamic_scheduling=0 diag1.fields_to_plot = Ex Ey Ez Bx By Bz rho divE warpx.cfl = 0.7071067811865475 warpx.do_pml_dive_cleaning=0 warpx.do_pml_divb_cleaning=0 chk.file_prefix=pml_x_psatd_chk chk.file_min_digits=5 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_PSATD=TRUE @@ -116,11 +116,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/PML/analysis_pml_psatd.py +analysisRoutine = Examples/Tests/pml/analysis_pml_psatd.py [pml_psatd_dive_divb_cleaning] buildDir = . -inputFile = Examples/Tests/PML/inputs_3d +inputFile = Examples/Tests/pml/inputs_3d runtime_params = warpx.do_similar_dm_pml=0 warpx.abort_on_warning_threshold=medium ablastr.fillboundary_always_sync=1 dim = 3 addToCompileString = USE_PSATD=TRUE @@ -136,7 +136,7 @@ analysisRoutine = Examples/analysis_default_regression.py [pml_psatd_rz] buildDir = . -inputFile = Examples/Tests/PML/inputs_rz +inputFile = Examples/Tests/pml/inputs_rz runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 warpx.cfl=0.7 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_RZ=TRUE USE_PSATD=TRUE @@ -148,11 +148,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/PML/analysis_pml_psatd_rz.py +analysisRoutine = Examples/Tests/pml/analysis_pml_psatd_rz.py [silver_mueller_2d_x] buildDir = . -inputFile = Examples/Tests/SilverMueller/inputs_2d_x +inputFile = Examples/Tests/silver_mueller/inputs_2d_x runtime_params = dim = 2 addToCompileString = @@ -164,11 +164,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/SilverMueller/analysis_silver_mueller.py +analysisRoutine = Examples/Tests/silver_mueller/analysis_silver_mueller.py [silver_mueller_2d_z] buildDir = . -inputFile = Examples/Tests/SilverMueller/inputs_2d_z +inputFile = Examples/Tests/silver_mueller/inputs_2d_z runtime_params = dim = 2 addToCompileString = @@ -180,11 +180,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/SilverMueller/analysis_silver_mueller.py +analysisRoutine = Examples/Tests/silver_mueller/analysis_silver_mueller.py [silver_mueller_rz_z] buildDir = . -inputFile = Examples/Tests/SilverMueller/inputs_rz_z +inputFile = Examples/Tests/silver_mueller/inputs_rz_z runtime_params = dim = 2 addToCompileString = USE_RZ=TRUE @@ -196,11 +196,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/SilverMueller/analysis_silver_mueller.py +analysisRoutine = Examples/Tests/silver_mueller/analysis_silver_mueller.py [RigidInjection_lab] buildDir = . -inputFile = Examples/Modules/RigidInjection/inputs_2d_LabFrame +inputFile = Examples/Tests/rigid_injection/inputs_2d_LabFrame runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 dim = 2 addToCompileString = @@ -213,11 +213,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/RigidInjection/analysis_rigid_injection_LabFrame.py +analysisRoutine = Examples/Tests/rigid_injection/analysis_rigid_injection_LabFrame.py [RigidInjection_BTD] buildDir = . -inputFile = Examples/Modules/RigidInjection/inputs_2d_BoostedFrame +inputFile = Examples/Tests/rigid_injection/inputs_2d_BoostedFrame runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 dim = 2 addToCompileString = USE_OPENPMD=TRUE @@ -231,11 +231,11 @@ compileTest = 0 doVis = 0 compareParticles = 0 doComparison = 0 -analysisRoutine = Examples/Modules/RigidInjection/analysis_rigid_injection_BoostedFrame.py +analysisRoutine = Examples/Tests/rigid_injection/analysis_rigid_injection_BoostedFrame.py [LaserAcceleration_BTD] buildDir = . -inputFile = Examples/Modules/boosted_diags/inputs_3d +inputFile = Examples/Tests/boosted_diags/inputs_3d runtime_params = dim = 3 addToCompileString = USE_OPENPMD=TRUE @@ -249,11 +249,11 @@ compileTest = 0 doVis = 0 compareParticles = 0 doComparison = 0 -analysisRoutine = Examples/Modules/boosted_diags/analysis.py +analysisRoutine = Examples/Tests/boosted_diags/analysis.py [nci_corrector] buildDir = . -inputFile = Examples/Modules/nci_corrector/inputs_2d +inputFile = Examples/Tests/nci_fdtd_stability/inputs_2d runtime_params = amr.max_level=0 particles.use_fdtd_nci_corr=1 dim = 2 addToCompileString = @@ -266,11 +266,11 @@ numthreads = 1 compileTest = 0 doVis = 0 doComparison = 0 -analysisRoutine = Examples/Modules/nci_corrector/analysis_ncicorr.py +analysisRoutine = Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py [nci_correctorMR] buildDir = . -inputFile = Examples/Modules/nci_corrector/inputs_2d +inputFile = Examples/Tests/nci_fdtd_stability/inputs_2d runtime_params = amr.max_level=1 particles.use_fdtd_nci_corr=1 amr.n_cell=64 64 warpx.fine_tag_lo=-20.e-6 -20.e-6 warpx.fine_tag_hi=20.e-6 20.e-6 dim = 2 addToCompileString = @@ -283,11 +283,11 @@ numthreads = 1 compileTest = 0 doVis = 0 doComparison = 0 -analysisRoutine = Examples/Modules/nci_corrector/analysis_ncicorr.py +analysisRoutine = Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py [ionization_lab] buildDir = . -inputFile = Examples/Modules/ionization/inputs_2d_rt +inputFile = Examples/Tests/ionization/inputs_2d_rt runtime_params = dim = 2 addToCompileString = @@ -299,11 +299,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/ionization/analysis_ionization.py +analysisRoutine = Examples/Tests/ionization/analysis_ionization.py [ionization_boost] buildDir = . -inputFile = Examples/Modules/ionization/inputs_2d_bf_rt +inputFile = Examples/Tests/ionization/inputs_2d_bf_rt runtime_params = dim = 2 addToCompileString = @@ -315,11 +315,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/ionization/analysis_ionization.py +analysisRoutine = Examples/Tests/ionization/analysis_ionization.py [bilinear_filter] buildDir = . -inputFile = Examples/Tests/SingleParticle/inputs_2d +inputFile = Examples/Tests/single_particle/inputs_2d runtime_params = warpx.use_filter=1 warpx.filter_npass_each_dir=1 5 dim = 2 addToCompileString = @@ -331,11 +331,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/SingleParticle/analysis_bilinear_filter.py +analysisRoutine = Examples/Tests/single_particle/analysis_bilinear_filter.py [Langmuir_multi] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = warpx.do_dynamic_scheduling=0 dim = 3 addToCompileString = @@ -349,12 +349,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_single_precision] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = warpx.do_dynamic_scheduling=0 dim = 3 addToCompileString = PRECISION=FLOAT USE_SINGLE_PRECISION_PARTICLES=TRUE @@ -368,12 +368,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_nodal] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = warpx.do_dynamic_scheduling=0 warpx.do_nodal=1 algo.current_deposition=direct dim = 3 addToCompileString = @@ -387,12 +387,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_psatd] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = algo.maxwell_solver=psatd warpx.cfl = 0.5773502691896258 dim = 3 addToCompileString = USE_PSATD=TRUE @@ -406,12 +406,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_psatd_multiJ] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium dim = 3 addToCompileString = USE_PSATD=TRUE @@ -425,12 +425,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = Langmuir_multi_psatd_multiJ.png [Langmuir_multi_psatd_multiJ_nodal] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.do_nodal=1 dim = 3 addToCompileString = USE_PSATD=TRUE @@ -444,12 +444,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = Langmuir_multi_psatd_multiJ_nodal.png [Langmuir_multi_psatd_div_cleaning] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = algo.maxwell_solver=psatd warpx.cfl = 0.5773502691896258 psatd.update_with_rho = 1 algo.current_deposition = direct warpx.do_dive_cleaning = 1 warpx.do_divb_cleaning = 1 diag1.intervals = 0, 38:40:1 diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz part_per_cell rho divE F warpx.abort_on_warning_threshold=medium dim = 3 addToCompileString = USE_PSATD=TRUE @@ -463,12 +463,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_psatd_current_correction] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = algo.maxwell_solver=psatd algo.current_deposition=esirkepov psatd.periodic_single_box_fft=1 psatd.current_correction=1 diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz part_per_cell rho divE warpx.cfl = 0.5773502691896258 dim = 3 addToCompileString = USE_PSATD=TRUE @@ -482,12 +482,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_psatd_current_correction_nodal] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = algo.maxwell_solver=psatd algo.current_deposition=direct psatd.periodic_single_box_fft=1 psatd.current_correction=1 warpx.do_nodal=1 diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz part_per_cell rho divE warpx.cfl = 0.5773502691896258 dim = 3 addToCompileString = USE_PSATD=TRUE @@ -501,12 +501,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_psatd_Vay_deposition] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = algo.maxwell_solver=psatd algo.current_deposition=vay diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.5773502691896258 dim = 3 addToCompileString = USE_PSATD=TRUE @@ -520,12 +520,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_psatd_Vay_deposition_nodal] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = algo.maxwell_solver=psatd warpx.do_nodal=1 algo.current_deposition=vay diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.5773502691896258 dim = 3 addToCompileString = USE_PSATD=TRUE @@ -539,12 +539,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_psatd_momentum_conserving] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = algo.maxwell_solver=psatd algo.field_gathering=momentum-conserving warpx.cfl = 0.5773502691896258 dim = 3 addToCompileString = USE_PSATD=TRUE @@ -558,12 +558,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_psatd_nodal] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = algo.maxwell_solver=psatd warpx.do_dynamic_scheduling=0 warpx.do_nodal=1 algo.current_deposition=direct warpx.cfl = 0.5773502691896258 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 3 addToCompileString = USE_PSATD=TRUE @@ -577,12 +577,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_psatd_single_precision] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_3d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt runtime_params = algo.maxwell_solver=psatd warpx.cfl = 0.5773502691896258 dim = 3 addToCompileString = USE_PSATD=TRUE PRECISION=FLOAT USE_SINGLE_PRECISION_PARTICLES=TRUE @@ -596,12 +596,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi.py analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_2d_nodal] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = warpx.do_nodal=1 algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz dim = 2 addToCompileString = @@ -615,12 +615,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = langmuir_multi_2d_analysis.png [Langmuir_multi_2d_MR] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = algo.maxwell_solver = ckc warpx.use_filter = 1 amr.max_level = 1 amr.ref_ratio = 4 warpx.fine_tag_lo = -10.e-6 -10.e-6 warpx.fine_tag_hi = 10.e-6 10.e-6 diag1.electrons.variables = w ux uy uz diag1.positrons.variables = w ux uy uz dim = 2 addToCompileString = @@ -634,12 +634,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = Langmuir_multi_2d_MR.png [Langmuir_multi_2d_MR_anisotropic] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = algo.maxwell_solver = ckc warpx.use_filter = 1 amr.max_level = 1 amr.ref_ratio_vect = 4 2 warpx.fine_tag_lo = -10.e-6 -10.e-6 warpx.fine_tag_hi = 10.e-6 10.e-6 diag1.electrons.variables = w ux uy uz diag1.positrons.variables = w ux uy uz dim = 2 addToCompileString = @@ -653,12 +653,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = Langmuir_multi_2d_MR.png [Langmuir_multi_2d_MR_psatd] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = algo.maxwell_solver = psatd warpx.use_filter = 1 amr.max_level = 1 amr.ref_ratio = 4 warpx.fine_tag_lo = -10.e-6 -10.e-6 warpx.fine_tag_hi = 10.e-6 10.e-6 diag1.electrons.variables = w ux uy uz diag1.positrons.variables = w ux uy uz psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_PSATD=TRUE @@ -672,12 +672,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = Langmuir_multi_2d_MR_psatd.png [Langmuir_multi_2d_psatd] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = algo.maxwell_solver=psatd diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell warpx.cfl = 0.7071067811865475 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_PSATD=TRUE @@ -691,12 +691,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = langmuir_multi_2d_analysis.png [Langmuir_multi_2d_psatd_multiJ] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_PSATD=TRUE @@ -710,12 +710,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = Langmuir_multi_2d_psatd_multiJ.png [Langmuir_multi_2d_psatd_multiJ_nodal] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.do_nodal=1 dim = 2 addToCompileString = USE_PSATD=TRUE @@ -729,12 +729,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = Langmuir_multi_2d_psatd_multiJ_nodal.png [Langmuir_multi_2d_psatd_momentum_conserving] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = algo.maxwell_solver=psatd algo.field_gathering=momentum-conserving diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell warpx.cfl = 0.7071067811865475 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_PSATD=TRUE @@ -748,12 +748,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = langmuir_multi_2d_analysis.png [Langmuir_multi_2d_psatd_current_correction] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = algo.maxwell_solver=psatd amr.max_grid_size=128 algo.current_deposition=esirkepov psatd.periodic_single_box_fft=1 psatd.current_correction=1 diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot =Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.7071067811865475 dim = 2 addToCompileString = USE_PSATD=TRUE @@ -767,12 +767,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = langmuir_multi_2d_analysis.png [Langmuir_multi_2d_psatd_current_correction_nodal] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = algo.maxwell_solver=psatd amr.max_grid_size=128 algo.current_deposition=direct psatd.periodic_single_box_fft=1 psatd.current_correction=1 warpx.do_nodal=1 diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot =Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.7071067811865475 dim = 2 addToCompileString = USE_PSATD=TRUE @@ -786,12 +786,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = langmuir_multi_2d_analysis.png [Langmuir_multi_2d_psatd_Vay_deposition] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = algo.maxwell_solver=psatd amr.max_grid_size=128 algo.current_deposition=vay diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.7071067811865475 dim = 2 addToCompileString = USE_PSATD=TRUE @@ -805,12 +805,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = langmuir_multi_2d_analysis.png [Langmuir_multi_2d_psatd_Vay_deposition_nodal] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = algo.maxwell_solver=psatd amr.max_grid_size=128 warpx.do_nodal=1 algo.current_deposition=vay diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot = Ex Ey Ez jx jy jz part_per_cell rho divE warpx.cfl = 0.7071067811865475 dim = 2 addToCompileString = USE_PSATD=TRUE @@ -824,12 +824,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = langmuir_multi_2d_analysis.png [Langmuir_multi_2d_psatd_nodal] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt runtime_params = algo.maxwell_solver=psatd warpx.do_nodal=1 algo.current_deposition=direct diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz diag1.fields_to_plot=Ex Ey Ez jx jy jz part_per_cell warpx.cfl = 0.7071067811865475 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_PSATD=TRUE @@ -843,12 +843,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_2d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_2d.py analysisOutputImage = langmuir_multi_2d_analysis.png [Langmuir_multi_1d] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_1d_multi_rt +inputFile = Examples/Tests/langmuir/inputs_1d_multi_rt runtime_params = algo.current_deposition=esirkepov diag1.electrons.variables=w ux uy uz diag1.positrons.variables=w ux uy uz dim = 1 addToCompileString = USE_OPENPMD=TRUE QED=FALSE @@ -862,12 +862,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons positrons -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_1d.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_1d.py analysisOutputImage = langmuir_multi_1d_analysis.png [Langmuir_multi_rz] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rz_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rz_rt runtime_params = diag1.electrons.variables=w ux uy uz diag1.ions.variables=w ux uy uz diag1.dump_rz_modes=0 dim = 2 addToCompileString = USE_RZ=TRUE @@ -881,13 +881,13 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_rz.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_rz.py analysisOutputImage = Langmuir_multi_rz_analysis.png aux1File = Regression/PostProcessingUtils/post_processing_utils.py [FluxInjection] buildDir = . -inputFile = Examples/Tests/FluxInjection/inputs_rz +inputFile = Examples/Tests/flux_injection/inputs_rz runtime_params = dim = 2 addToCompileString = USE_RZ=TRUE @@ -901,11 +901,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electron -analysisRoutine = Examples/Tests/FluxInjection/analysis_flux_injection_rz.py +analysisRoutine = Examples/Tests/flux_injection/analysis_flux_injection_rz.py [FluxInjection3D] buildDir = . -inputFile = Examples/Tests/FluxInjection/inputs_3d +inputFile = Examples/Tests/flux_injection/inputs_3d runtime_params = dim = 3 addToCompileString = @@ -918,11 +918,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 1 -analysisRoutine = Examples/Tests/FluxInjection/analysis_flux_injection_3d.py +analysisRoutine = Examples/Tests/flux_injection/analysis_flux_injection_3d.py [Langmuir_multi_rz_psatd] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rz_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rz_rt runtime_params = algo.maxwell_solver=psatd diag1.electrons.variables=w ux uy uz diag1.ions.variables=w ux uy uz diag1.dump_rz_modes=0 algo.current_deposition=direct warpx.do_dive_cleaning=0 psatd.update_with_rho=1 electrons.random_theta=0 ions.random_theta=0 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_RZ=TRUE USE_PSATD=TRUE BLAS_LIB=-lblas LAPACK_LIB=-llapack @@ -936,13 +936,13 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_rz.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_rz.py analysisOutputImage = Langmuir_multi_rz_psatd_analysis.png aux1File = Regression/PostProcessingUtils/post_processing_utils.py [Langmuir_multi_rz_psatd_current_correction] buildDir = . -inputFile = Examples/Tests/Langmuir/inputs_2d_multi_rz_rt +inputFile = Examples/Tests/langmuir/inputs_2d_multi_rz_rt runtime_params = algo.maxwell_solver=psatd diag1.electrons.variables=w ux uy uz diag1.ions.variables=w ux uy uz diag1.dump_rz_modes=0 algo.current_deposition=direct warpx.do_dive_cleaning=0 amr.max_grid_size=128 psatd.periodic_single_box_fft=1 psatd.current_correction=1 diag1.fields_to_plot=jx jz Ex Ez By rho divE electrons.random_theta=0 ions.random_theta=0 dim = 2 addToCompileString = USE_RZ=TRUE USE_PSATD=TRUE BLAS_LIB=-lblas LAPACK_LIB=-llapack @@ -956,13 +956,13 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/Langmuir/analysis_langmuir_multi_rz.py +analysisRoutine = Examples/Tests/langmuir/analysis_langmuir_multi_rz.py analysisOutputImage = Langmuir_multi_rz_psatd_analysis.png aux1File = Regression/PostProcessingUtils/post_processing_utils.py [Python_Langmuir_rz_multimode] buildDir = . -inputFile = Examples/Tests/Langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py +inputFile = Examples/Tests/langmuir/PICMI_inputs_langmuir_rz_multimode_analyze.py runtime_params = customRunCmd = python3 PICMI_inputs_langmuir_rz_multimode_analyze.py dim = 2 @@ -1042,7 +1042,7 @@ analysisRoutine = Examples/Tests/restart/analysis_restart.py [LaserInjection] buildDir = . -inputFile = Examples/Modules/laser_injection/inputs_3d_rt +inputFile = Examples/Tests/laser_injection/inputs_3d_rt runtime_params = max_step=20 dim = 3 addToCompileString = @@ -1055,12 +1055,12 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/laser_injection/analysis_laser.py +analysisRoutine = Examples/Tests/laser_injection/analysis_laser.py analysisOutputImage = laser_analysis.png [LaserInjection_2d] buildDir = . -inputFile = Examples/Modules/laser_injection/inputs_2d_rt +inputFile = Examples/Tests/laser_injection/inputs_2d_rt runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 dim = 2 addToCompileString = @@ -1073,11 +1073,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/laser_injection/analysis_2d.py +analysisRoutine = Examples/Tests/laser_injection/analysis_2d.py [LaserInjection_1d] buildDir = . -inputFile = Examples/Modules/laser_injection/inputs_1d_rt +inputFile = Examples/Tests/laser_injection/inputs_1d_rt runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 dim = 1 addToCompileString = USE_OPENPMD=TRUE QED=FALSE @@ -1090,7 +1090,7 @@ numthreads = 2 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/laser_injection/analysis_1d.py +analysisRoutine = Examples/Tests/laser_injection/analysis_1d.py [LaserAcceleration] buildDir = . @@ -1279,7 +1279,7 @@ analysisRoutine = Examples/analysis_default_regression.py [Python_Langmuir] buildDir = . -inputFile = Examples/Tests/Langmuir/PICMI_inputs_langmuir_rt.py +inputFile = Examples/Tests/langmuir/PICMI_inputs_langmuir_rt.py runtime_params = customRunCmd = python3 PICMI_inputs_langmuir_rt.py dim = 3 @@ -1375,7 +1375,7 @@ analysisRoutine = Examples/Tests/restart/analysis_restart.py [space_charge_initialization_2d] buildDir = . -inputFile = Examples/Modules/space_charge_initialization/inputs_3d +inputFile = Examples/Tests/space_charge_initialization/inputs_3d dim = 2 addToCompileString = cmakeSetupOpts = -DWarpX_DIMS=2 @@ -1388,12 +1388,12 @@ compileTest = 0 doVis = 0 compareParticles = 0 runtime_params = warpx.do_dynamic_scheduling=0 geometry.dims=2 -analysisRoutine = Examples/Modules/space_charge_initialization/analysis.py +analysisRoutine = Examples/Tests/space_charge_initialization/analysis.py analysisOutputImage = Comparison.png [space_charge_initialization] buildDir = . -inputFile = Examples/Modules/space_charge_initialization/inputs_3d +inputFile = Examples/Tests/space_charge_initialization/inputs_3d dim = 3 addToCompileString = cmakeSetupOpts = -DWarpX_DIMS=3 @@ -1406,12 +1406,12 @@ compileTest = 0 doVis = 0 compareParticles = 0 runtime_params = warpx.do_dynamic_scheduling=0 -analysisRoutine = Examples/Modules/space_charge_initialization/analysis.py +analysisRoutine = Examples/Tests/space_charge_initialization/analysis.py analysisOutputImage = Comparison.png [relativistic_space_charge_initialization] buildDir = . -inputFile = Examples/Modules/relativistic_space_charge_initialization/inputs_3d +inputFile = Examples/Tests/relativistic_space_charge_initialization/inputs_3d dim = 3 addToCompileString = cmakeSetupOpts = -DWarpX_DIMS=3 @@ -1424,7 +1424,7 @@ compileTest = 0 doVis = 0 compareParticles = 0 runtime_params = warpx.do_dynamic_scheduling=0 -analysisRoutine = Examples/Modules/relativistic_space_charge_initialization/analysis.py +analysisRoutine = Examples/Tests/relativistic_space_charge_initialization/analysis.py analysisOutputImage = Comparison.png [parabolic_channel_initialization_2d_single_precision] @@ -1463,7 +1463,7 @@ analysisRoutine = Examples/Tests/divb_cleaning/analysis.py [dive_cleaning_2d] buildDir = . -inputFile = Examples/Modules/dive_cleaning/inputs_3d +inputFile = Examples/Tests/dive_cleaning/inputs_3d dim = 2 addToCompileString = cmakeSetupOpts = -DWarpX_DIMS=2 @@ -1476,12 +1476,12 @@ compileTest = 0 doVis = 0 compareParticles = 0 runtime_params = warpx.do_dynamic_scheduling=0 geometry.dims=2 -analysisRoutine = Examples/Modules/dive_cleaning/analysis.py +analysisRoutine = Examples/Tests/dive_cleaning/analysis.py analysisOutputImage = Comparison.png [dive_cleaning_3d] buildDir = . -inputFile = Examples/Modules/dive_cleaning/inputs_3d +inputFile = Examples/Tests/dive_cleaning/inputs_3d dim = 3 addToCompileString = cmakeSetupOpts = -DWarpX_DIMS=3 @@ -1494,12 +1494,12 @@ compileTest = 0 doVis = 0 compareParticles = 0 runtime_params = warpx.do_dynamic_scheduling=0 -analysisRoutine = Examples/Modules/dive_cleaning/analysis.py +analysisRoutine = Examples/Tests/dive_cleaning/analysis.py analysisOutputImage = Comparison.png [particles_in_pml_2d] buildDir = . -inputFile = Examples/Tests/particles_in_PML/inputs_2d +inputFile = Examples/Tests/particles_in_pml/inputs_2d runtime_params = dim = 2 addToCompileString = @@ -1512,12 +1512,12 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/particles_in_PML/analysis_particles_in_pml.py +analysisRoutine = Examples/Tests/particles_in_pml/analysis_particles_in_pml.py [particles_in_pml_2d_MR] buildDir = . -inputFile = Examples/Tests/particles_in_PML/inputs_mr_2d +inputFile = Examples/Tests/particles_in_pml/inputs_mr_2d runtime_params = dim = 2 addToCompileString = @@ -1530,11 +1530,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/particles_in_PML/analysis_particles_in_pml.py +analysisRoutine = Examples/Tests/particles_in_pml/analysis_particles_in_pml.py [particles_in_pml] buildDir = . -inputFile = Examples/Tests/particles_in_PML/inputs_3d +inputFile = Examples/Tests/particles_in_pml/inputs_3d runtime_params = dim = 3 addToCompileString = @@ -1547,12 +1547,12 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/particles_in_PML/analysis_particles_in_pml.py +analysisRoutine = Examples/Tests/particles_in_pml/analysis_particles_in_pml.py [particles_in_pml_3d_MR] buildDir = . -inputFile = Examples/Tests/particles_in_PML/inputs_mr_3d +inputFile = Examples/Tests/particles_in_pml/inputs_mr_3d runtime_params = dim = 3 addToCompileString = @@ -1565,7 +1565,7 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/particles_in_PML/analysis_particles_in_pml.py +analysisRoutine = Examples/Tests/particles_in_pml/analysis_particles_in_pml.py [photon_pusher] buildDir = . @@ -1603,8 +1603,8 @@ analysisRoutine = Examples/Tests/radiation_reaction/test_const_B_analytical/ana [qed_breit_wheeler_2d] buildDir = . -inputFile = Examples/Modules/qed/breit_wheeler/inputs_2d -aux1File = Examples/Modules/qed/breit_wheeler/analysis_core.py +inputFile = Examples/Tests/qed/breit_wheeler/inputs_2d +aux1File = Examples/Tests/qed/breit_wheeler/analysis_core.py runtime_params = warpx.abort_on_warning_threshold = high dim = 2 addToCompileString = QED=TRUE @@ -1617,12 +1617,12 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/qed/breit_wheeler/analysis_yt.py +analysisRoutine = Examples/Tests/qed/breit_wheeler/analysis_yt.py [qed_breit_wheeler_3d] buildDir = . -inputFile = Examples/Modules/qed/breit_wheeler/inputs_3d -aux1File = Examples/Modules/qed/breit_wheeler/analysis_core.py +inputFile = Examples/Tests/qed/breit_wheeler/inputs_3d +aux1File = Examples/Tests/qed/breit_wheeler/analysis_core.py runtime_params = warpx.abort_on_warning_threshold = high dim = 3 addToCompileString = QED=TRUE @@ -1635,12 +1635,12 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/qed/breit_wheeler/analysis_yt.py +analysisRoutine = Examples/Tests/qed/breit_wheeler/analysis_yt.py [qed_breit_wheeler_2d_opmd] buildDir = . -inputFile = Examples/Modules/qed/breit_wheeler/inputs_2d -aux1File = Examples/Modules/qed/breit_wheeler/analysis_core.py +inputFile = Examples/Tests/qed/breit_wheeler/inputs_2d +aux1File = Examples/Tests/qed/breit_wheeler/analysis_core.py runtime_params = diag1.format = openpmd diag1.openpmd_backend = h5 warpx.abort_on_warning_threshold = high dim = 2 addToCompileString = QED=TRUE USE_OPENPMD=TRUE @@ -1654,12 +1654,12 @@ compileTest = 0 doVis = 0 compareParticles = 0 outputFile = qed_breit_wheeler_2d_opmd_plt -analysisRoutine = Examples/Modules/qed/breit_wheeler/analysis_opmd.py +analysisRoutine = Examples/Tests/qed/breit_wheeler/analysis_opmd.py [qed_breit_wheeler_3d_opmd] buildDir = . -inputFile = Examples/Modules/qed/breit_wheeler/inputs_3d -aux1File = Examples/Modules/qed/breit_wheeler/analysis_core.py +inputFile = Examples/Tests/qed/breit_wheeler/inputs_3d +aux1File = Examples/Tests/qed/breit_wheeler/analysis_core.py runtime_params = diag1.format = openpmd diag1.openpmd_backend = h5 warpx.abort_on_warning_threshold = high dim = 3 addToCompileString = QED=TRUE USE_OPENPMD=TRUE @@ -1673,11 +1673,11 @@ compileTest = 0 doVis = 0 compareParticles = 0 outputFile = qed_breit_wheeler_3d_opmd_plt -analysisRoutine = Examples/Modules/qed/breit_wheeler/analysis_opmd.py +analysisRoutine = Examples/Tests/qed/breit_wheeler/analysis_opmd.py [qed_quantum_sync_2d] buildDir = . -inputFile = Examples/Modules/qed/quantum_synchrotron/inputs_2d +inputFile = Examples/Tests/qed/quantum_synchrotron/inputs_2d runtime_params = warpx.abort_on_warning_threshold = high dim = 2 addToCompileString = QED=TRUE @@ -1690,11 +1690,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/qed/quantum_synchrotron/analysis.py +analysisRoutine = Examples/Tests/qed/quantum_synchrotron/analysis.py [qed_quantum_sync_3d] buildDir = . -inputFile = Examples/Modules/qed/quantum_synchrotron/inputs_3d +inputFile = Examples/Tests/qed/quantum_synchrotron/inputs_3d runtime_params = warpx.abort_on_warning_threshold = high dim = 3 addToCompileString = QED=TRUE @@ -1707,11 +1707,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/qed/quantum_synchrotron/analysis.py +analysisRoutine = Examples/Tests/qed/quantum_synchrotron/analysis.py [qed_schwinger1] buildDir = . -inputFile = Examples/Modules/qed/schwinger/inputs_3d_schwinger +inputFile = Examples/Tests/qed/schwinger/inputs_3d_schwinger runtime_params = warpx.E_external_grid = 1.e16 0 0 warpx.B_external_grid = 16792888.570516706 5256650.141557486 18363530.799561853 dim = 3 addToCompileString = QED=TRUE @@ -1723,11 +1723,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/qed/schwinger/analysis_schwinger.py +analysisRoutine = Examples/Tests/qed/schwinger/analysis_schwinger.py [qed_schwinger2] buildDir = . -inputFile = Examples/Modules/qed/schwinger/inputs_3d_schwinger +inputFile = Examples/Tests/qed/schwinger/inputs_3d_schwinger runtime_params = warpx.E_external_grid = 1.e18 0 0 warpx.B_external_grid = 1679288857.0516706 525665014.1557486 1836353079.9561853 qed_schwinger.xmin = -2.5e-7 qed_schwinger.xmax = 2.49e-7 dim = 3 addToCompileString = QED=TRUE @@ -1739,11 +1739,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/qed/schwinger/analysis_schwinger.py +analysisRoutine = Examples/Tests/qed/schwinger/analysis_schwinger.py [qed_schwinger3] buildDir = . -inputFile = Examples/Modules/qed/schwinger/inputs_3d_schwinger +inputFile = Examples/Tests/qed/schwinger/inputs_3d_schwinger runtime_params = warpx.E_external_grid = 0 1.090934525450495e+17 0 dim = 3 addToCompileString = QED=TRUE @@ -1755,11 +1755,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/qed/schwinger/analysis_schwinger.py +analysisRoutine = Examples/Tests/qed/schwinger/analysis_schwinger.py [qed_schwinger4] buildDir = . -inputFile = Examples/Modules/qed/schwinger/inputs_3d_schwinger +inputFile = Examples/Tests/qed/schwinger/inputs_3d_schwinger runtime_params = warpx.E_external_grid = 0 0 2.5e+20 warpx.B_external_grid = 0 833910140000. 0 qed_schwinger.ymin = -2.5e-7 qed_schwinger.zmax = 2.49e-7 dim = 3 addToCompileString = QED=TRUE @@ -1771,7 +1771,7 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/qed/schwinger/analysis_schwinger.py +analysisRoutine = Examples/Tests/qed/schwinger/analysis_schwinger.py [particle_pusher] buildDir = . @@ -1792,7 +1792,7 @@ analysisRoutine = Examples/Tests/particle_pusher/analysis_pusher.py [Python_gaussian_beam] buildDir = . -inputFile = Examples/Modules/gaussian_beam/PICMI_inputs_gaussian_beam.py +inputFile = Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py customRunCmd = python3 PICMI_inputs_gaussian_beam.py runtime_params = dim = 3 @@ -1812,7 +1812,7 @@ analysisRoutine = Examples/analysis_default_regression.py [Python_gaussian_beam_opmd] buildDir = . -inputFile = Examples/Modules/gaussian_beam/PICMI_inputs_gaussian_beam.py +inputFile = Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py customRunCmd = python3 PICMI_inputs_gaussian_beam.py --diagformat=openpmd runtime_params = dim = 3 @@ -1832,7 +1832,7 @@ analysisRoutine = [Python_gaussian_beam_no_field_output] buildDir = . -inputFile = Examples/Modules/gaussian_beam/PICMI_inputs_gaussian_beam.py +inputFile = Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py customRunCmd = python3 PICMI_inputs_gaussian_beam.py --fields_to_plot none runtime_params = dim = 3 @@ -1852,7 +1852,7 @@ analysisRoutine = [Python_gaussian_beam_opmd_no_field_output] buildDir = . -inputFile = Examples/Modules/gaussian_beam/PICMI_inputs_gaussian_beam.py +inputFile = Examples/Tests/gaussian_beam/PICMI_inputs_gaussian_beam.py customRunCmd = python PICMI_inputs_gaussian_beam.py --diagformat=openpmd --fields_to_plot none runtime_params = dim = 3 @@ -2086,7 +2086,7 @@ analysisRoutine = Examples/analysis_default_regression.py [Python_Langmuir_2d] buildDir = . -inputFile = Examples/Tests/Langmuir/PICMI_inputs_langmuir2d.py +inputFile = Examples/Tests/langmuir/PICMI_inputs_langmuir2d.py runtime_params = customRunCmd = python3 PICMI_inputs_langmuir2d.py dim = 2 @@ -2122,7 +2122,7 @@ analysisRoutine = Examples/analysis_default_regression.py [RepellingParticles] buildDir = . -inputFile = Examples/Tests/RepellingParticles/inputs_2d +inputFile = Examples/Tests/repelling_particles/inputs_2d runtime_params = dim = 2 addToCompileString = @@ -2134,11 +2134,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/RepellingParticles/analysis_repelling.py +analysisRoutine = Examples/Tests/repelling_particles/analysis_repelling.py [Larmor] buildDir = . -inputFile = Examples/Tests/Larmor/inputs_2d_mr +inputFile = Examples/Tests/larmor/inputs_2d_mr runtime_params = max_step=10 dim = 2 addToCompileString = @@ -2186,8 +2186,8 @@ analysisRoutine = Examples/analysis_default_regression.py [LaserInjectionFromTXYEFile] buildDir = . -inputFile = Examples/Modules/laser_injection_from_file/analysis.py -aux1File = Examples/Modules/laser_injection_from_file/inputs.2d_test_txye +inputFile = Examples/Tests/laser_injection_from_file/analysis.py +aux1File = Examples/Tests/laser_injection_from_file/inputs.2d_test_txye customRunCmd = ./analysis.py runtime_params = warpx.do_dynamic_scheduling=0 dim = 2 @@ -2279,7 +2279,7 @@ aux1File = Regression/PostProcessingUtils/post_processing_utils.py [Proton_Boron_Fusion_3D] buildDir = . -inputFile = Examples/Modules/nuclear_fusion/inputs_proton_boron_3d +inputFile = Examples/Tests/nuclear_fusion/inputs_proton_boron_3d runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 dim = 3 addToCompileString = @@ -2291,11 +2291,11 @@ useOMP = 1 numthreads = 2 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/nuclear_fusion/analysis_proton_boron_fusion.py +analysisRoutine = Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py [Proton_Boron_Fusion_2D] buildDir = . -inputFile = Examples/Modules/nuclear_fusion/inputs_proton_boron_2d +inputFile = Examples/Tests/nuclear_fusion/inputs_proton_boron_2d runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 dim = 2 addToCompileString = @@ -2307,11 +2307,11 @@ useOMP = 1 numthreads = 2 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/nuclear_fusion/analysis_proton_boron_fusion.py +analysisRoutine = Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py [Deuterium_Tritium_Fusion_3D] buildDir = . -inputFile = Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_3d +inputFile = Examples/Tests/nuclear_fusion/inputs_deuterium_tritium_3d runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 dim = 3 addToCompileString = @@ -2323,11 +2323,11 @@ useOMP = 1 numthreads = 2 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/nuclear_fusion/analysis_two_product_fusion.py +analysisRoutine = Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py [Deuterium_Deuterium_Fusion_3D] buildDir = . -inputFile = Examples/Modules/nuclear_fusion/inputs_deuterium_deuterium_3d +inputFile = Examples/Tests/nuclear_fusion/inputs_deuterium_deuterium_3d runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 dim = 3 addToCompileString = @@ -2339,11 +2339,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/nuclear_fusion/analysis_two_product_fusion.py +analysisRoutine = Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py [Deuterium_Tritium_Fusion_RZ] buildDir = . -inputFile = Examples/Modules/nuclear_fusion/inputs_deuterium_tritium_rz +inputFile = Examples/Tests/nuclear_fusion/inputs_deuterium_tritium_rz runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 dim = 2 addToCompileString = USE_RZ=TRUE @@ -2355,11 +2355,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/nuclear_fusion/analysis_two_product_fusion.py +analysisRoutine = Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py [Maxwell_Hybrid_QED_solver] buildDir = . -inputFile = Examples/Tests/Maxwell_Hybrid_QED/inputs_2d +inputFile = Examples/Tests/maxwell_hybrid_qed/inputs_2d runtime_params = warpx.cfl=0.7071067811865475 dim = 2 addToCompileString = USE_PSATD=TRUE @@ -2371,7 +2371,7 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/Maxwell_Hybrid_QED/analysis_Maxwell_QED_Hybrid.py +analysisRoutine = Examples/Tests/maxwell_hybrid_qed/analysis_Maxwell_QED_Hybrid.py [reduced_diags] buildDir = . @@ -2787,7 +2787,7 @@ analysisRoutine = Examples/Tests/galilean/analysis.py [multi_J_rz_psatd] buildDir = . -inputFile = Examples/Tests/multi_J/inputs_rz +inputFile = Examples/Tests/multi_j/inputs_rz runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 warpx.abort_on_warning_threshold=medium psatd.J_in_time=linear dim = 2 addToCompileString = USE_RZ=TRUE USE_PSATD=TRUE @@ -2805,7 +2805,7 @@ analysisRoutine = Examples/analysis_default_regression.py [ElectrostaticSphereEB] buildDir = . -inputFile = Examples/Tests/ElectrostaticSphereEB/inputs_3d +inputFile = Examples/Tests/electrostatic_sphere_eb/inputs_3d runtime_params = warpx.abort_on_warning_threshold = medium dim = 3 addToCompileString = USE_EB=TRUE @@ -2822,7 +2822,7 @@ analysisRoutine = Examples/analysis_default_regression.py [ElectrostaticSphereEB_RZ] buildDir = . -inputFile = Examples/Tests/ElectrostaticSphereEB/inputs_rz +inputFile = Examples/Tests/electrostatic_sphere_eb/inputs_rz runtime_params = warpx.abort_on_warning_threshold = medium dim = 2 addToCompileString = USE_EB=TRUE USE_RZ=TRUE @@ -2835,11 +2835,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/ElectrostaticSphereEB/analysis_rz.py +analysisRoutine = Examples/Tests/electrostatic_sphere_eb/analysis_rz.py [ElectrostaticSphereEB_RZ_MR] buildDir = . -inputFile = Examples/Tests/ElectrostaticSphereEB/inputs_rz_mr +inputFile = Examples/Tests/electrostatic_sphere_eb/inputs_rz_mr runtime_params = warpx.abort_on_warning_threshold = medium dim = 2 addToCompileString = USE_EB=TRUE USE_RZ=TRUE @@ -2852,11 +2852,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/ElectrostaticSphereEB/analysis_rz.py +analysisRoutine = Examples/Tests/electrostatic_sphere_eb/analysis_rz.py [Python_ElectrostaticSphereEB] buildDir = . -inputFile = Examples/Tests/ElectrostaticSphereEB/PICMI_inputs_3d.py +inputFile = Examples/Tests/electrostatic_sphere_eb/PICMI_inputs_3d.py runtime_params = customRunCmd = python3 PICMI_inputs_3d.py dim = 3 @@ -2871,11 +2871,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/ElectrostaticSphereEB/analysis.py +analysisRoutine = Examples/Tests/electrostatic_sphere_eb/analysis.py [ElectrostaticSphereEB_mixedBCs] buildDir = . -inputFile = Examples/Tests/ElectrostaticSphereEB/inputs_3d_mixed_BCs +inputFile = Examples/Tests/electrostatic_sphere_eb/inputs_3d_mixed_BCs runtime_params = warpx.abort_on_warning_threshold = medium dim = 3 addToCompileString = USE_EB=TRUE @@ -2892,7 +2892,7 @@ analysisRoutine = Examples/analysis_default_regression.py [ElectrostaticSphere] buildDir = . -inputFile = Examples/Tests/ElectrostaticSphere/inputs_3d +inputFile = Examples/Tests/electrostatic_sphere/inputs_3d runtime_params = warpx.abort_on_warning_threshold=medium dim = 3 addToCompileString = @@ -2905,11 +2905,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/ElectrostaticSphere/analysis_electrostatic_sphere.py +analysisRoutine = Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py [ElectrostaticSphereRZ] buildDir = . -inputFile = Examples/Tests/ElectrostaticSphere/inputs_rz +inputFile = Examples/Tests/electrostatic_sphere/inputs_rz runtime_params = warpx.abort_on_warning_threshold = medium dim = 2 addToCompileString = USE_RZ=TRUE @@ -2922,11 +2922,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/ElectrostaticSphere/analysis_electrostatic_sphere.py +analysisRoutine = Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py [ElectrostaticSphereLabFrame] buildDir = . -inputFile = Examples/Tests/ElectrostaticSphere/inputs_3d +inputFile = Examples/Tests/electrostatic_sphere/inputs_3d runtime_params = warpx.do_electrostatic=labframe dim = 3 addToCompileString = @@ -2939,11 +2939,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/ElectrostaticSphere/analysis_electrostatic_sphere.py +analysisRoutine = Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py [FieldProbe] buildDir = . -inputFile = Examples/Tests/FieldProbe/inputs_2d +inputFile = Examples/Tests/field_probe/inputs_2d runtime_params = dim = 2 addToCompileString = USE_EB=TRUE @@ -2956,7 +2956,7 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/FieldProbe/analysis_field_probe.py +analysisRoutine = Examples/Tests/field_probe/analysis_field_probe.py [embedded_circle] buildDir = . @@ -2996,7 +2996,7 @@ aux1File = Tools/PostProcessing/read_raw_data.py [leveling_thinning] buildDir = . -inputFile = Examples/Modules/resampling/inputs_leveling_thinning +inputFile = Examples/Tests/resampling/inputs_leveling_thinning runtime_params = dim = 2 addToCompileString = @@ -3009,7 +3009,7 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/resampling/analysis_leveling_thinning.py +analysisRoutine = Examples/Tests/resampling/analysis_leveling_thinning.py [particle_boundaries_3d] buildDir = . @@ -3030,7 +3030,7 @@ analysisRoutine = Examples/Tests/boundaries/analysis.py [embedded_boundary_cube] buildDir = . -inputFile = Examples/Modules/embedded_boundary_cube/inputs_3d +inputFile = Examples/Tests/embedded_boundary_cube/inputs_3d runtime_params = dim = 3 addToCompileString = USE_EB=TRUE @@ -3043,11 +3043,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/embedded_boundary_cube/analysis_fields.py +analysisRoutine = Examples/Tests/embedded_boundary_cube/analysis_fields.py [embedded_boundary_cube_macroscopic] buildDir = . -inputFile = Examples/Modules/embedded_boundary_cube/inputs_3d +inputFile = Examples/Tests/embedded_boundary_cube/inputs_3d runtime_params = algo.em_solver_medium=macroscopic macroscopic.epsilon=1.5*8.8541878128e-12 macroscopic.sigma=0 macroscopic.mu=1.25663706212e-06 dim = 3 addToCompileString = USE_EB=TRUE @@ -3060,11 +3060,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/embedded_boundary_cube/analysis_fields.py +analysisRoutine = Examples/Tests/embedded_boundary_cube/analysis_fields.py [embedded_boundary_cube_2d] buildDir = . -inputFile = Examples/Modules/embedded_boundary_cube/inputs_2d +inputFile = Examples/Tests/embedded_boundary_cube/inputs_2d runtime_params = dim = 2 addToCompileString = USE_EB=TRUE @@ -3077,11 +3077,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/embedded_boundary_cube/analysis_fields_2d.py +analysisRoutine = Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py [embedded_boundary_rotated_cube] buildDir = . -inputFile = Examples/Modules/embedded_boundary_rotated_cube/inputs_3d +inputFile = Examples/Tests/embedded_boundary_rotated_cube/inputs_3d runtime_params = warpx.abort_on_warning_threshold=medium dim = 3 addToCompileString = USE_EB=TRUE @@ -3094,11 +3094,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/embedded_boundary_rotated_cube/analysis_fields.py +analysisRoutine = Examples/Tests/embedded_boundary_rotated_cube/analysis_fields.py [embedded_boundary_rotated_cube_2d] buildDir = . -inputFile = Examples/Modules/embedded_boundary_rotated_cube/inputs_2d +inputFile = Examples/Tests/embedded_boundary_rotated_cube/inputs_2d runtime_params = warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_EB=TRUE @@ -3111,11 +3111,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/embedded_boundary_rotated_cube/analysis_fields_2d.py +analysisRoutine = Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py [dirichletbc] buildDir = . -inputFile = Examples/Tests/ElectrostaticDirichletBC/inputs_2d +inputFile = Examples/Tests/electrostatic_dirichlet_bc/inputs_2d runtime_params = warpx.abort_on_warning_threshold = medium dim = 2 addToCompileString = @@ -3128,11 +3128,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/ElectrostaticDirichletBC/analysis.py +analysisRoutine = Examples/Tests/electrostatic_dirichlet_bc/analysis.py [Python_dirichletbc] buildDir = . -inputFile = Examples/Tests/ElectrostaticDirichletBC/PICMI_inputs_2d.py +inputFile = Examples/Tests/electrostatic_dirichlet_bc/PICMI_inputs_2d.py runtime_params = customRunCmd = python3 PICMI_inputs_2d.py dim = 2 @@ -3147,11 +3147,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/ElectrostaticDirichletBC/analysis.py +analysisRoutine = Examples/Tests/electrostatic_dirichlet_bc/analysis.py [PEC_field] buildDir = . -inputFile = Examples/Tests/PEC/inputs_field_PEC_3d +inputFile = Examples/Tests/pec/inputs_field_PEC_3d runtime_params = dim = 3 addToCompileString = @@ -3164,11 +3164,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/PEC/analysis_pec.py +analysisRoutine = Examples/Tests/pec/analysis_pec.py [PEC_field_mr] buildDir = . -inputFile = Examples/Tests/PEC/inputs_field_PEC_mr_3d +inputFile = Examples/Tests/pec/inputs_field_PEC_mr_3d runtime_params = dim = 3 addToCompileString = @@ -3181,11 +3181,11 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Tests/PEC/analysis_pec_mr.py +analysisRoutine = Examples/Tests/pec/analysis_pec_mr.py [PEC_particle] buildDir = . -inputFile = Examples/Tests/PEC/inputs_particle_PEC_3d +inputFile = Examples/Tests/pec/inputs_particle_PEC_3d runtime_params = dim = 3 addToCompileString = @@ -3390,7 +3390,7 @@ analysisRoutine = Examples/Physics_applications/capacitive_discharge/analysis_1d [particle_absorption] buildDir = . -inputFile = Examples/Modules/ParticleBoundaryProcess/inputs_absorption +inputFile = Examples/Tests/particle_boundary_process/inputs_absorption runtime_params = dim = 3 addToCompileString = USE_EB=TRUE @@ -3404,11 +3404,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons -analysisRoutine = Examples/Modules/ParticleBoundaryProcess/analysis_absorption.py +analysisRoutine = Examples/Tests/particle_boundary_process/analysis_absorption.py [particle_scrape] buildDir = . -inputFile = Examples/Modules/ParticleBoundaryScrape/inputs_scrape +inputFile = Examples/Tests/particle_boundary_scrape/inputs_scrape runtime_params = dim = 3 addToCompileString = USE_EB=TRUE @@ -3422,11 +3422,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons -analysisRoutine = Examples/Modules/ParticleBoundaryScrape/analysis_scrape.py +analysisRoutine = Examples/Tests/particle_boundary_scrape/analysis_scrape.py [Python_particle_scrape] buildDir = . -inputFile = Examples/Modules/ParticleBoundaryScrape/PICMI_inputs_scrape.py +inputFile = Examples/Tests/particle_boundary_scrape/PICMI_inputs_scrape.py runtime_params = customRunCmd = python3 PICMI_inputs_scrape.py dim = 3 @@ -3442,11 +3442,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons -analysisRoutine = Examples/Modules/ParticleBoundaryScrape/analysis_scrape.py +analysisRoutine = Examples/Tests/particle_boundary_scrape/analysis_scrape.py [Python_particle_reflection] buildDir = . -inputFile = Examples/Modules/ParticleBoundaryProcess/PICMI_inputs_reflection.py +inputFile = Examples/Tests/particle_boundary_process/PICMI_inputs_reflection.py runtime_params = customRunCmd = python3 PICMI_inputs_reflection.py dim = 2 @@ -3460,11 +3460,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Modules/ParticleBoundaryProcess/analysis_reflection.py +analysisRoutine = Examples/Tests/particle_boundary_process/analysis_reflection.py [Python_particle_attr_access] buildDir = . -inputFile = Examples/Tests/ParticleDataPython/PICMI_inputs_2d.py +inputFile = Examples/Tests/particle_data_python/PICMI_inputs_2d.py runtime_params = customRunCmd = python3 PICMI_inputs_2d.py dim = 2 @@ -3478,11 +3478,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/ParticleDataPython/analysis.py +analysisRoutine = Examples/Tests/particle_data_python/analysis.py [Python_particle_attr_access_unique] buildDir = . -inputFile = Examples/Tests/ParticleDataPython/PICMI_inputs_2d.py +inputFile = Examples/Tests/particle_data_python/PICMI_inputs_2d.py runtime_params = customRunCmd = python3 PICMI_inputs_2d.py --unique dim = 2 @@ -3496,11 +3496,11 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/ParticleDataPython/analysis.py +analysisRoutine = Examples/Tests/particle_data_python/analysis.py [Python_prev_positions] buildDir = . -inputFile = Examples/Tests/ParticleDataPython/PICMI_inputs_prev_pos_2d.py +inputFile = Examples/Tests/particle_data_python/PICMI_inputs_prev_pos_2d.py runtime_params = customRunCmd = python3 PICMI_inputs_prev_pos_2d.py dim = 2 @@ -3519,7 +3519,7 @@ analysisRoutine = Examples/analysis_default_regression.py [Performance_works_1_uniform_rest_32ppc] buildDir = . -inputFile = Examples/Tests/PerformanceTests/automated_test_1_uniform_rest_32ppc +inputFile = Examples/Tests/performance_tests/automated_test_1_uniform_rest_32ppc runtime_params = amr.max_grid_size=32 amr.n_cell=32 32 32 max_step=5 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full dim = 3 addToCompileString = @@ -3537,7 +3537,7 @@ analysisRoutine = [Performance_works_2_uniform_rest_1ppc] buildDir = . -inputFile = Examples/Tests/PerformanceTests/automated_test_2_uniform_rest_1ppc +inputFile = Examples/Tests/performance_tests/automated_test_2_uniform_rest_1ppc runtime_params = amr.max_grid_size=32 amr.n_cell=32 32 32 max_step=5 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full dim = 3 addToCompileString = @@ -3555,7 +3555,7 @@ analysisRoutine = [Performance_works_3_uniform_drift_4ppc] buildDir = . -inputFile = Examples/Tests/PerformanceTests/automated_test_3_uniform_drift_4ppc +inputFile = Examples/Tests/performance_tests/automated_test_3_uniform_drift_4ppc runtime_params = amr.max_grid_size=32 amr.n_cell=32 32 32 max_step=5 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full dim = 3 addToCompileString = @@ -3573,7 +3573,7 @@ analysisRoutine = [Performance_works_4_labdiags_2ppc] buildDir = . -inputFile = Examples/Tests/PerformanceTests/automated_test_4_labdiags_2ppc +inputFile = Examples/Tests/performance_tests/automated_test_4_labdiags_2ppc runtime_params = amr.n_cell=64 64 64 max_step=10 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full dim = 3 addToCompileString = @@ -3591,7 +3591,7 @@ analysisRoutine = [Performance_works_5_loadimbalance] buildDir = . -inputFile = Examples/Tests/PerformanceTests/automated_test_5_loadimbalance +inputFile = Examples/Tests/performance_tests/automated_test_5_loadimbalance runtime_params = amr.max_grid_size=32 amr.n_cell=32 32 32 max_step=5 diagnostics.diags_names=diag1 diag1.intervals=0 diag1.diag_type=Full dim = 3 addToCompileString = @@ -3609,7 +3609,7 @@ analysisRoutine = [Performance_works_6_output_2ppc] buildDir = . -inputFile = Examples/Tests/PerformanceTests/automated_test_6_output_2ppc +inputFile = Examples/Tests/performance_tests/automated_test_6_output_2ppc runtime_params = amr.n_cell=64 64 64 max_step=10 dim = 3 addToCompileString = @@ -3627,7 +3627,7 @@ analysisRoutine = [Python_wrappers] buildDir = . -inputFile = Examples/Tests/PythonWrappers/PICMI_inputs_2d.py +inputFile = Examples/Tests/python_wrappers/PICMI_inputs_2d.py runtime_params = customRunCmd = python3 PICMI_inputs_2d.py dim = 2 @@ -3646,7 +3646,7 @@ analysisRoutine = Examples/analysis_default_regression.py [embedded_boundary_python_API] buildDir = . -inputFile = Examples/Modules/embedded_boundary_python_API/PICMI_inputs_EB_API.py +inputFile = Examples/Tests/embedded_boundary_python_api/PICMI_inputs_EB_API.py runtime_params = customRunCmd = python PICMI_inputs_EB_API.py dim = 3 @@ -3661,7 +3661,7 @@ numthreads = 1 compileTest = 0 doVis = 0 compareParticles = 0 -analysisRoutine = Examples/Modules/embedded_boundary_python_API/analysis.py +analysisRoutine = Examples/Tests/embedded_boundary_python_api/analysis.py [scraping] buildDir = . @@ -3699,7 +3699,7 @@ analysisRoutine = Examples/Tests/ion_stopping/analysis_ion_stopping.py [VayDeposition2D] buildDir = . -inputFile = Examples/Tests/VayDeposition/inputs_2d +inputFile = Examples/Tests/vay_deposition/inputs_2d runtime_params = dim = 2 addToCompileString = USE_PSATD=TRUE @@ -3713,11 +3713,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electron ion -analysisRoutine = Examples/Tests/VayDeposition/analysis.py +analysisRoutine = Examples/Tests/vay_deposition/analysis.py [VayDeposition3D] buildDir = . -inputFile = Examples/Tests/VayDeposition/inputs_3d +inputFile = Examples/Tests/vay_deposition/inputs_3d runtime_params = dim = 3 addToCompileString = USE_PSATD=TRUE @@ -3731,11 +3731,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electron ion -analysisRoutine = Examples/Tests/VayDeposition/analysis.py +analysisRoutine = Examples/Tests/vay_deposition/analysis.py [BTD_rz] buildDir = . -inputFile = Examples/Tests/BTD_rz/inputs_rz_z_boosted_BTD +inputFile = Examples/Tests/btd_rz/inputs_rz_z_boosted_BTD runtime_params = dim = 2 addToCompileString = USE_RZ=TRUE @@ -3747,4 +3747,4 @@ useOMP = 1 numthreads = 1 compileTest = 0 doVis = 0 -analysisRoutine = Examples/Tests/BTD_rz/analysis_BTD_laser_antenna.py +analysisRoutine = Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py From 6bc337f5f7100ba320098c4c4c92fa069e194772 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 2 Dec 2022 20:21:02 -0800 Subject: [PATCH 0178/1346] Check rho pointer in `if` conditions for charge deposition (#3544) --- Source/Evolve/WarpXEvolve.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index c5832275796..e8e6a025b56 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -548,7 +548,8 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) if (WarpX::fft_do_time_averaging) PSATDEraseAverageFields(); // 3) Deposit rho (in rho_new, since it will be moved during the loop) - if (WarpX::update_with_rho) + // (after checking that pointer to rho_fp on MR level 0 is not null) + if (rho_fp[0]) { // Deposit rho at relative time -dt // (dt[0] denotes the time step on mesh refinement level 0) @@ -612,7 +613,8 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) PSATDForwardTransformJ(current_fp, current_cp); // Deposit new rho - if (WarpX::update_with_rho) + // (after checking that pointer to rho_fp on MR level 0 is not null) + if (rho_fp[0]) { // Move rho deposited previously, from new to old PSATDMoveRhoNewToRhoOld(); From 3c6e064d9f40b2c4111f8cd2e7ea2af29336f270 Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 5 Dec 2022 09:36:59 -0800 Subject: [PATCH 0179/1346] Update docutils version (#3546) * Update Docs/Doxyfile * Update Docs/requirements.txt versions * Update Docs/source/index.rst --- Docs/Doxyfile | 788 ++++++++++++++++++++++++++++-------------- Docs/requirements.txt | 8 +- Docs/source/index.rst | 14 +- 3 files changed, 529 insertions(+), 281 deletions(-) diff --git a/Docs/Doxyfile b/Docs/Doxyfile index 4d20673fd7f..550ed9a7a01 100644 --- a/Docs/Doxyfile +++ b/Docs/Doxyfile @@ -1,4 +1,4 @@ -# Doxyfile 1.8.13 +# Doxyfile 1.9.5 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. @@ -12,16 +12,26 @@ # For lists, items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (\" \"). +# +# Note: +# +# Use doxygen to compare the used configuration file with the template +# configuration file: +# doxygen -x [configFile] +# Use doxygen to compare the used configuration file with the template +# configuration file without replacing the environment variables or CMake type +# replacement variables: +# doxygen -x_noenv [configFile] #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all text -# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv -# for the list of possible encodings. +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. # The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 @@ -32,7 +42,7 @@ DOXYFILE_ENCODING = UTF-8 # title of most generated pages and in a few other places. # The default value is: My Project. -PROJECT_NAME = "WarpX" +PROJECT_NAME = WarpX # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version @@ -60,16 +70,28 @@ PROJECT_LOGO = OUTPUT_DIRECTORY = -# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create up to 4096 +# sub-directories (in 2 levels) under the output directory of each output format +# and will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where # putting all generated files in the same directory would otherwise causes -# performance problems for the file system. +# performance problems for the file system. Adapt CREATE_SUBDIRS_LEVEL to +# control the number of sub-directories. # The default value is: NO. CREATE_SUBDIRS = NO +# Controls the number of sub-directories that will be created when +# CREATE_SUBDIRS tag is set to YES. Level 0 represents 16 directories, and every +# level increment doubles the number of directories, resulting in 4096 +# directories at level 8 which is the default and also the maximum value. The +# sub-directories are organized in 2 levels, the first level always has a fixed +# numer of 16 directories. +# Minimum value: 0, maximum value: 8, default value: 8. +# This tag requires that the tag CREATE_SUBDIRS is set to YES. + +CREATE_SUBDIRS_LEVEL = 8 + # If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII # characters to appear in the names of generated files. If set to NO, non-ASCII # characters will be escaped, for example _xE3_x81_x84 will be used for Unicode @@ -81,14 +103,14 @@ ALLOW_UNICODE_NAMES = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Bulgarian, +# Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, English +# (United States), Esperanto, Farsi (Persian), Finnish, French, German, Greek, +# Hindi, Hungarian, Indonesian, Italian, Japanese, Japanese-en (Japanese with +# English messages), Korean, Korean-en (Korean with English messages), Latvian, +# Lithuanian, Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, +# Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, +# Swedish, Turkish, Ukrainian and Vietnamese. # The default value is: English. OUTPUT_LANGUAGE = English @@ -189,6 +211,16 @@ SHORT_NAMES = NO JAVADOC_AUTOBRIEF = NO +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus @@ -209,6 +241,14 @@ QT_AUTOBRIEF = NO MULTILINE_CPP_IS_BRIEF = NO +# By default Python docstrings are displayed as preformatted text and doxygen's +# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the +# doxygen's special commands can be used and the contents of the docstring +# documentation blocks is shown as doxygen documentation. +# The default value is: YES. + +PYTHON_DOCSTRING = YES + # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. @@ -232,20 +272,19 @@ TAB_SIZE = 4 # the documentation. An alias has the form: # name=value # For example adding -# "sideeffect=@par Side Effects:\n" +# "sideeffect=@par Side Effects:^^" # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines. +# "Side Effects:". Note that you cannot put \n's in the value part of an alias +# to insert newlines (in the resulting output). You can put ^^ in the value part +# of an alias to insert a newline as if a physical newline was in the original +# file. When you need a literal { or } or , in the value part of an alias you +# have to escape them by means of a backslash (\), this can lead to conflicts +# with the commands \{ and \} for these it is advised to use the version @{ and +# @} or use a double escape (\\{ and \\}) ALIASES = -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all @@ -274,28 +313,40 @@ OPTIMIZE_FOR_FORTRAN = NO OPTIMIZE_OUTPUT_VHDL = NO +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: -# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: -# Fortran. In the later case the parser tries to guess whether the code is fixed -# or free formatted code, this is the default for Fortran type files), VHDL. For -# instance to make doxygen treat .inc files as Fortran files (default is PHP), -# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice, +# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. # # Note: For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. +# the files are not read by doxygen. When specifying no_extension you should add +# * to the FILE_PATTERNS. +# +# Note see also the list of default file extension mappings. EXTENSION_MAPPING = # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. +# documentation. See https://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. @@ -307,7 +358,7 @@ MARKDOWN_SUPPORT = YES # to that level are automatically included in the table of contents, even if # they do not have an id attribute. # Note: This feature currently applies only to Markdown headings. -# Minimum value: 0, maximum value: 99, default value: 0. +# Minimum value: 0, maximum value: 99, default value: 5. # This tag requires that the tag MARKDOWN_SUPPORT is set to YES. TOC_INCLUDE_HEADINGS = 0 @@ -328,7 +379,6 @@ AUTOLINK_SUPPORT = YES # diagrams that involve STL classes more complete and accurate. # The default value is: NO. -# TAGFILES += "cppreference-doxygen-web.tag.xml=http://en.cppreference.com/w/" BUILTIN_STL_SUPPORT = YES # If you use Microsoft's C++/CLI language, you should set this option to YES to @@ -338,7 +388,7 @@ BUILTIN_STL_SUPPORT = YES CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen # will parse them like normal C++ but will assume all classes use public instead # of private inheritance when no explicit protection keyword is present. # The default value is: NO. @@ -424,6 +474,19 @@ TYPEDEF_HIDES_STRUCT = NO LOOKUP_CACHE_SIZE = 0 +# The NUM_PROC_THREADS specifies the number of threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which effectively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- @@ -444,6 +507,12 @@ EXTRACT_ALL = YES EXTRACT_PRIVATE = YES +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO + # If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal # scope will be included in the documentation. # The default value is: NO. @@ -481,6 +550,13 @@ EXTRACT_LOCAL_METHODS = NO EXTRACT_ANON_NSPACES = NO +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation @@ -498,8 +574,8 @@ HIDE_UNDOC_MEMBERS = NO HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO, these declarations will be -# included in the documentation. +# declarations. If set to NO, these declarations will be included in the +# documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO @@ -518,12 +594,20 @@ HIDE_IN_BODY_DOCS = NO INTERNAL_DOCS = NO -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. -# The default value is: system dependent. +# With the correct setting of option CASE_SENSE_NAMES doxygen will better be +# able to match the capabilities of the underlying filesystem. In case the +# filesystem is case sensitive (i.e. it supports files in the same directory +# whose names only differ in casing), the option must be set to YES to properly +# deal with such files in case they appear in the input. For filesystems that +# are not case sensitive the option should be set to NO to properly deal with +# output files written for symbols that only differ in casing, such as for two +# classes, one named CLASS and the other named Class, and to also support +# references to files without having to specify the exact matching casing. On +# Windows (including Cygwin) and MacOS, users should typically set this option +# to NO, whereas on Linux or other Unix flavors it should typically be set to +# YES. +# Possible values are: SYSTEM, NO and YES. +# The default value is: SYSTEM. CASE_SENSE_NAMES = NO @@ -541,6 +625,12 @@ HIDE_SCOPE_NAMES = NO HIDE_COMPOUND_REFERENCE= NO +# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class +# will show which file needs to be included to use the class. +# The default value is: YES. + +SHOW_HEADERFILE = YES + # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. @@ -698,7 +788,8 @@ FILE_VERSION_FILTER = # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. You can # optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. +# will be used as the name of the layout file. See also section "Changing the +# layout of pages" for information. # # Note that if you run doxygen from a directory containing a file called # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE @@ -709,7 +800,7 @@ LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib # extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. See also \cite for info how to create references. @@ -744,23 +835,35 @@ WARNINGS = YES WARN_IF_UNDOCUMENTED = YES # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. +# potential errors in the documentation, such as documenting some parameters in +# a documented function twice, or documenting parameters that don't exist or +# using markup commands wrongly. # The default value is: YES. WARN_IF_DOC_ERROR = YES +# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete +# function parameter documentation. If set to NO, doxygen will accept that some +# parameters have no documentation without warning. +# The default value is: YES. + +WARN_IF_INCOMPLETE_DOC = YES + # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. +# value. If set to NO, doxygen will only warn about wrong parameter +# documentation, but not about the absence of documentation. If EXTRACT_ALL is +# set to YES then this flag will automatically be disabled. See also +# WARN_IF_INCOMPLETE_DOC # The default value is: NO. WARN_NO_PARAMDOC = NO # If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# Possible values are: NO, YES and FAIL_ON_WARNINGS. # The default value is: NO. WARN_AS_ERROR = NO @@ -771,13 +874,27 @@ WARN_AS_ERROR = NO # and the warning text. Optionally the format may contain $version, which will # be replaced by the version of the file (if it could be obtained via # FILE_VERSION_FILTER) +# See also: WARN_LINE_FORMAT # The default value is: $file:$line: $text. WARN_FORMAT = "$file:$line: $text" +# In the $text part of the WARN_FORMAT command it is possible that a reference +# to a more specific place is given. To make it easier to jump to this place +# (outside of doxygen) the user can define a custom "cut" / "paste" string. +# Example: +# WARN_LINE_FORMAT = "'vi $file +$line'" +# See also: WARN_FORMAT +# The default value is: at line $line of file $file. + +WARN_LINE_FORMAT = "at line $line of file $file" + # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard -# error (stderr). +# error (stderr). In case the file specified cannot be opened for writing the +# warning and error messages are written to standard error. When as file - is +# specified the warning and error messages are written to standard output +# (stdout). WARN_LOGFILE = @@ -791,18 +908,31 @@ WARN_LOGFILE = # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. -INPUT = Doxygen/main.dox ../Source/ ../Tools/ ../Regression/Checksum/ -RECURSIVE = YES +INPUT = Doxygen/main.dox \ + ../Source/ \ + ../Tools/ \ + ../Regression/Checksum/ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: http://www.gnu.org/software/libiconv) for the list of -# possible encodings. +# documentation (see: +# https://www.gnu.org/software/libiconv/) for the list of possible encodings. +# See also: INPUT_FILE_ENCODING # The default value is: UTF-8. INPUT_ENCODING = UTF-8 +# This tag can be used to specify the character encoding of the source files +# that doxygen parses The INPUT_FILE_ENCODING tag can be used to specify +# character encoding on a per file pattern basis. Doxygen will compare the file +# name with each pattern and apply the encoding instead of the default +# INPUT_ENCODING) if there is a match. The character encodings are a list of the +# form: pattern=encoding (like *.php=ISO-8859-1). See cfg_input_encoding +# "INPUT_ENCODING" for further information on supported encodings. + +INPUT_FILE_ENCODING = + # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and # *.h) to filter out the source-files in the directories. @@ -811,11 +941,15 @@ INPUT_ENCODING = UTF-8 # need to set EXTENSION_MAPPING for the extension otherwise the files are not # read by doxygen. # +# Note the list of default checked file patterns might differ from the list of +# default file extension mappings. +# # If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, -# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, -# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, -# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. +# *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, +# *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C +# comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, +# *.vhdl, *.ucf, *.qsf and *.ice. FILE_PATTERNS = *.c \ *.cc \ @@ -863,7 +997,7 @@ EXCLUDE_PATTERNS = # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test +# ANamespace::AClass, ANamespace::*Test # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories use the pattern */test/* @@ -911,6 +1045,11 @@ IMAGE_PATH = # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. # +# Note that doxygen will use the data processed and written to standard output +# for further processing, therefore nothing else, like debug statements or used +# commands (so in case of a Windows batch file always use @echo OFF), should be +# written to standard output. +# # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # properly processed by doxygen. @@ -952,6 +1091,15 @@ FILTER_SOURCE_PATTERNS = USE_MDFILE_AS_MAINPAGE = +# The Fortran standard specifies that for fixed formatted Fortran code all +# characters from position 72 are to be considered as comment. A common +# extension is to allow longer lines before the automatic comment starts. The +# setting FORTRAN_COMMENT_AFTER will also make it possible that longer lines can +# be processed before the automatic comment starts. +# Minimum value: 7, maximum value: 10000, default value: 72. + +FORTRAN_COMMENT_AFTER = 72 + #--------------------------------------------------------------------------- # Configuration options related to source browsing #--------------------------------------------------------------------------- @@ -979,7 +1127,7 @@ INLINE_SOURCES = NO STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# function all documented functions referencing it will be listed. +# entity all documented functions referencing it will be listed. # The default value is: NO. REFERENCED_BY_RELATION = NO @@ -1011,12 +1159,12 @@ SOURCE_TOOLTIPS = YES # If the USE_HTAGS tag is set to YES then the references to source code will # point to the HTML generated by the htags(1) tool instead of doxygen built-in # source browser. The htags tool is part of GNU's global source tagging system -# (see http://www.gnu.org/software/global/global.html). You will need version +# (see https://www.gnu.org/software/global/global.html). You will need version # 4.8.6 or higher. # # To use it do the following: # - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file # - Make sure the INPUT points to the root of the source tree # - Run doxygen as normal # @@ -1049,13 +1197,6 @@ VERBATIM_HEADERS = YES ALPHABETICAL_INDEX = YES -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored @@ -1153,10 +1294,27 @@ HTML_EXTRA_STYLESHEET = HTML_EXTRA_FILES = +# The HTML_COLORSTYLE tag can be used to specify if the generated HTML output +# should be rendered with a dark or light theme. Default setting AUTO_LIGHT +# enables light output unless the user preference is dark output. Other options +# are DARK to always use dark mode, LIGHT to always use light mode, AUTO_DARK to +# default to dark mode unless the user prefers light mode, and TOGGLE to let the +# user toggle between dark and light mode via a button. +# Possible values are: LIGHT Always generate light output., DARK Always generate +# dark output., AUTO_LIGHT Automatically set the mode according to the user +# preference, use light mode if no preference is set (the default)., AUTO_DARK +# Automatically set the mode according to the user preference, use dark mode if +# no preference is set. and TOGGLE Allow to user to switch between light and +# dark mode via a button.. +# The default value is: AUTO_LIGHT. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE = AUTO_LIGHT + # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# this color. Hue is specified as an angle on a color-wheel, see +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. # Minimum value: 0, maximum value: 359, default value: 220. @@ -1165,7 +1323,7 @@ HTML_EXTRA_FILES = HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A +# in the HTML output. For a value of 0 the output will use gray-scales only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -1192,6 +1350,17 @@ HTML_COLORSTYLE_GAMMA = 80 HTML_TIMESTAMP = NO +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via JavaScript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have JavaScript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = YES + # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. @@ -1215,13 +1384,14 @@ HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: http://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in +# environment (see: +# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To +# create a documentation set, doxygen will generate a Makefile in the HTML +# output directory. Running make will produce the docset in that directory and +# running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -1235,6 +1405,13 @@ GENERATE_DOCSET = NO DOCSET_FEEDNAME = "Doxygen generated docs" +# This tag determines the URL of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDURL = + # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. @@ -1260,8 +1437,12 @@ DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. +# on Windows. In the beginning of 2021 Microsoft took the original page, with +# a.o. the download links, offline the HTML help workshop was already many years +# in maintenance mode). You can download the HTML help workshop from the web +# archives at Installation executable (see: +# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo +# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe). # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML @@ -1291,7 +1472,7 @@ CHM_FILE = HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). +# (YES) or that it should be included in the main .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. @@ -1336,7 +1517,8 @@ QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace -# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1344,8 +1526,8 @@ QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- -# folders). +# Folders (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1353,30 +1535,30 @@ QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- -# filters). +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. +# The QHG_LOCATION tag can be used to specify the location (absolute path +# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to +# run qhelpgenerator on the generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = @@ -1419,16 +1601,28 @@ DISABLE_INDEX = NO # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the # HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. +# further fine tune the look of the index (see "Fine-tuning the output"). As an +# example, the default style sheet generated by doxygen has an example that +# shows how to put an image at the root of the tree instead of the PROJECT_NAME. +# Since the tree basically has the same information as the tab index, you could +# consider setting DISABLE_INDEX to YES when enabling this option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = NO +# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the +# FULL_SIDEBAR option determines if the side bar is limited to only the treeview +# area (value NO) or if it should extend to the full height of the window (value +# YES). Setting this to YES gives a layout similar to +# https://docs.readthedocs.io with more room for contents, but less room for the +# project logo, title, and description. If either GENERATE_TREEVIEW or +# DISABLE_INDEX is set to NO, this option has no effect. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FULL_SIDEBAR = NO + # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # @@ -1453,6 +1647,24 @@ TREEVIEW_WIDTH = 250 EXT_LINKS_IN_WINDOW = NO +# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email +# addresses. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +OBFUSCATE_EMAILS = YES + +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = png + # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML @@ -1462,19 +1674,14 @@ EXT_LINKS_IN_WINDOW = NO FORMULA_FONTSIZE = 10 -# Use the FORMULA_TRANPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. -FORMULA_TRANSPARENT = YES +FORMULA_MACROFILE = # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# http://www.mathjax.org) which uses client side Javascript for the rendering +# https://www.mathjax.org) which uses client side JavaScript for the rendering # instead of using pre-rendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path @@ -1484,11 +1691,29 @@ FORMULA_TRANSPARENT = YES USE_MATHJAX = NO +# With MATHJAX_VERSION it is possible to specify the MathJax version to be used. +# Note that the different versions of MathJax have different requirements with +# regards to the different settings, so it is possible that also other MathJax +# settings have to be changed when switching between the different MathJax +# versions. +# Possible values are: MathJax_2 and MathJax_3. +# The default value is: MathJax_2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_VERSION = MathJax_2 + # When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. +# the MathJax output. For more details about the output format see MathJax +# version 2 (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3 +# (see: +# http://docs.mathjax.org/en/latest/web/components/output.html). # Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. +# compatibility. This is the name for Mathjax version 2, for MathJax version 3 +# this will be translated into chtml), NativeMML (i.e. MathML. Only supported +# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This +# is the name for Mathjax version 3, for MathJax version 2 this will be +# translated into HTML-CSS) and SVG. # The default value is: HTML-CSS. # This tag requires that the tag USE_MATHJAX is set to YES. @@ -1501,22 +1726,29 @@ MATHJAX_FORMAT = HTML-CSS # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of -# MathJax from http://www.mathjax.org before deployment. -# The default value is: http://cdn.mathjax.org/mathjax/latest. +# MathJax from https://www.mathjax.org before deployment. The default value is: +# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2 +# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3 # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example +# for MathJax version 2 (see +# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions): # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# For example for MathJax version 3 (see +# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html): +# MATHJAX_EXTENSIONS = ams # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. @@ -1544,7 +1776,7 @@ MATHJAX_CODEFILE = SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be -# implemented using a web server instead of a web client using Javascript. There +# implemented using a web server instead of a web client using JavaScript. There # are two flavors of web server based searching depending on the EXTERNAL_SEARCH # setting. When disabled, doxygen will generate a PHP script for searching and # an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing @@ -1563,7 +1795,8 @@ SERVER_BASED_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: http://xapian.org/). +# Xapian (see: +# https://xapian.org/). # # See the section "External Indexing and Searching" for details. # The default value is: NO. @@ -1576,8 +1809,9 @@ EXTERNAL_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: http://xapian.org/). See the section "External Indexing and -# Searching" for details. +# Xapian (see: +# https://xapian.org/). See the section "External Indexing and Searching" for +# details. # This tag requires that the tag SEARCHENGINE is set to YES. SEARCHENGINE_URL = @@ -1628,21 +1862,35 @@ LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. # -# Note that when enabling USE_PDFLATEX this option is only used for generating -# bitmaps for formulas in the HTML output, but not in the Makefile that is -# written to the output directory. -# The default file is: latex. +# Note that when not enabling USE_PDFLATEX the default is latex when enabling +# USE_PDFLATEX the default is pdflatex and when in the later case latex is +# chosen this is overwritten by pdflatex. For specific output languages the +# default can have been set differently, this depends on the implementation of +# the output language. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate # index for LaTeX. +# Note: This tag is used in the Makefile / make.bat. +# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file +# (.tex). # The default file is: makeindex. # This tag requires that the tag GENERATE_LATEX is set to YES. MAKEINDEX_CMD_NAME = makeindex +# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to +# generate index for LaTeX. In case there is no backslash (\) as first character +# it will be automatically added in the LaTeX code. +# Note: This tag is used in the generated output file (.tex). +# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat. +# The default value is: makeindex. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_MAKEINDEX_CMD = makeindex + # If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX # documents. This may be useful for small projects and may help to save some # trees in general. @@ -1672,29 +1920,31 @@ PAPER_TYPE = a4 EXTRA_PACKAGES = -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the -# generated LaTeX document. The header should contain everything until the first -# chapter. If it is left blank doxygen will generate a standard header. See -# section "Doxygen usage" for information on how to let doxygen write the -# default header to a separate file. +# The LATEX_HEADER tag can be used to specify a user-defined LaTeX header for +# the generated LaTeX document. The header should contain everything until the +# first chapter. If it is left blank doxygen will generate a standard header. It +# is highly recommended to start with a default header using +# doxygen -w latex new_header.tex new_footer.tex new_stylesheet.sty +# and then modify the file new_header.tex. See also section "Doxygen usage" for +# information on how to generate the default header that doxygen normally uses. # -# Note: Only use a user-defined header if you know what you are doing! The -# following commands have a special meaning inside the header: $title, -# $datetime, $date, $doxygenversion, $projectname, $projectnumber, -# $projectbrief, $projectlogo. Doxygen will replace $title with the empty -# string, for the replacement values of the other commands the user is referred -# to HTML_HEADER. +# Note: Only use a user-defined header if you know what you are doing! +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. The following +# commands have a special meaning inside the header (and footer): For a +# description of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_HEADER = -# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the -# generated LaTeX document. The footer should contain everything after the last -# chapter. If it is left blank doxygen will generate a standard footer. See +# The LATEX_FOOTER tag can be used to specify a user-defined LaTeX footer for +# the generated LaTeX document. The footer should contain everything after the +# last chapter. If it is left blank doxygen will generate a standard footer. See # LATEX_HEADER for more information on how to generate a default footer and what -# special commands can be used inside the footer. -# -# Note: Only use a user-defined footer if you know what you are doing! +# special commands can be used inside the footer. See also section "Doxygen +# usage" for information on how to generate the default footer that doxygen +# normally uses. Note: Only use a user-defined footer if you know what you are +# doing! # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_FOOTER = @@ -1727,9 +1977,11 @@ LATEX_EXTRA_FILES = PDF_HYPERLINKS = YES -# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate -# the PDF file directly from the LaTeX files. Set this option to YES, to get a -# higher quality PDF documentation. +# If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as +# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX +# files. Set this option to YES, to get a higher quality PDF documentation. +# +# See also section LATEX_CMD_NAME for selecting the engine. # The default value is: YES. # This tag requires that the tag GENERATE_LATEX is set to YES. @@ -1737,8 +1989,7 @@ USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode # command to the generated LaTeX files. This will instruct LaTeX to keep running -# if errors occur, instead of asking the user for help. This option is also used -# when generating formulas in HTML. +# if errors occur, instead of asking the user for help. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. @@ -1751,19 +2002,9 @@ LATEX_BATCHMODE = NO LATEX_HIDE_INDICES = NO -# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source -# code with syntax highlighting in the LaTeX output. -# -# Note that which sources are shown also depends on other settings such as -# SOURCE_BROWSER. -# The default value is: NO. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_SOURCE_CODE = NO - # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. See -# http://en.wikipedia.org/wiki/BibTeX and \cite for more info. +# https://en.wikipedia.org/wiki/BibTeX and \cite for more info. # The default value is: plain. # This tag requires that the tag GENERATE_LATEX is set to YES. @@ -1777,6 +2018,14 @@ LATEX_BIB_STYLE = plain LATEX_TIMESTAMP = NO +# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute) +# path from which the emoji images will be read. If a relative path is entered, +# it will be relative to the LATEX_OUTPUT directory. If left blank the +# LATEX_OUTPUT directory will be used. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_EMOJI_DIRECTORY = + #--------------------------------------------------------------------------- # Configuration options related to the RTF output #--------------------------------------------------------------------------- @@ -1816,9 +2065,9 @@ COMPACT_RTF = NO RTF_HYPERLINKS = NO -# Load stylesheet definitions from file. Syntax is similar to doxygen's config -# file, i.e. a series of assignments. You only have to provide replacements, -# missing definitions are set to their default value. +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# configuration file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. # # See also section "Doxygen usage" for information on how to generate the # default style sheet that doxygen normally uses. @@ -1827,22 +2076,12 @@ RTF_HYPERLINKS = NO RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an RTF document. Syntax is -# similar to doxygen's config file. A template extensions file can be generated -# using doxygen -e rtf extensionFile. +# similar to doxygen's configuration file. A template extensions file can be +# generated using doxygen -e rtf extensionFile. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_EXTENSIONS_FILE = -# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code -# with syntax highlighting in the RTF output. -# -# Note that which sources are shown also depends on other settings such as -# SOURCE_BROWSER. -# The default value is: NO. -# This tag requires that the tag GENERATE_RTF is set to YES. - -RTF_SOURCE_CODE = NO - #--------------------------------------------------------------------------- # Configuration options related to the man page output #--------------------------------------------------------------------------- @@ -1914,6 +2153,13 @@ XML_OUTPUT = doxyxml XML_PROGRAMLISTING = YES +# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include +# namespace members in file scope as well, matching the HTML output. +# The default value is: NO. +# This tag requires that the tag GENERATE_XML is set to YES. + +XML_NS_MEMB_FILE_SCOPE = NO + #--------------------------------------------------------------------------- # Configuration options related to the DOCBOOK output #--------------------------------------------------------------------------- @@ -1932,23 +2178,14 @@ GENERATE_DOCBOOK = NO DOCBOOK_OUTPUT = docbook -# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the -# program listings (including syntax highlighting and cross-referencing -# information) to the DOCBOOK output. Note that enabling this will significantly -# increase the size of the DOCBOOK output. -# The default value is: NO. -# This tag requires that the tag GENERATE_DOCBOOK is set to YES. - -DOCBOOK_PROGRAMLISTING = NO - #--------------------------------------------------------------------------- # Configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an -# AutoGen Definitions (see http://autogen.sf.net) file that captures the -# structure of the code including all documentation. Note that this feature is -# still experimental and incomplete at the moment. +# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures +# the structure of the code including all documentation. Note that this feature +# is still experimental and incomplete at the moment. # The default value is: NO. GENERATE_AUTOGEN_DEF = NO @@ -2027,7 +2264,8 @@ SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by the -# preprocessor. +# preprocessor. Note that the INCLUDE_PATH is not recursive, so the setting of +# RECURSIVE has no effect here. # This tag requires that the tag SEARCH_INCLUDES is set to YES. INCLUDE_PATH = @@ -2048,25 +2286,24 @@ INCLUDE_FILE_PATTERNS = # recursively expanded use the := operator instead of the = operator. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -PREDEFINED = AMREX_Linux=1 \ - AMREX_PARTICLES=1 \ - AMREX_USE_MPI=1 \ - AMREX_USE_OMP=1 \ - AMREX_SPACEDIM=3 \ - AMREX_TINY_PROFILING=1 \ - BL_Linux=1 \ - BL_USE_MPI=1 \ - BL_USE_OMP=1 \ +PREDEFINED = AMREX_Linux=1 \ + AMREX_PARTICLES=1 \ + AMREX_USE_MPI=1 \ + AMREX_USE_OMP=1 \ + AMREX_SPACEDIM=3 \ + AMREX_TINY_PROFILING=1 \ + BL_Linux=1 \ + BL_USE_MPI=1 \ + BL_USE_OMP=1 \ AMREX_USE_SENSEI_INSITU=1 \ - WARPX=1 \ - WARPX_DIM_RZ=1 \ - WARPX_DIM_XZ=1 \ - WARPX_USE_GPU=1 \ - WARPX_USE_OPENPMD=1 \ - WARPX_USE_PSATD=1 \ - WARPX_QED=1 \ - WARPX_QED_TABLE_GEN=1 \ - + WARPX=1 \ + WARPX_DIM_RZ=1 \ + WARPX_DIM_XZ=1 \ + WARPX_USE_GPU=1 \ + WARPX_USE_OPENPMD=1 \ + WARPX_USE_PSATD=1 \ + WARPX_QED=1 \ + WARPX_QED_TABLE_GEN=1 # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The @@ -2104,7 +2341,7 @@ SKIP_FUNCTION_MACROS = YES # the path). If a tag file is not located in the directory in which doxygen is # run, you must also specify the path to the tagfile here. -TAGFILES = amrex-doxygen-web.tag.xml=https://amrex-codes.github.io/amrex/doxygen/ \ +TAGFILES = amrex-doxygen-web.tag.xml=https://amrex-codes.github.io/amrex/doxygen/ \ openpmd-api-doxygen-web.tag.xml=https://openpmd-api.readthedocs.io/en/latest/_static/doxyhtml/ # When a file name is specified after GENERATE_TAGFILE, doxygen will create a @@ -2134,34 +2371,10 @@ EXTERNAL_GROUPS = YES EXTERNAL_PAGES = YES -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of 'which perl'). -# The default file (with absolute path) is: /usr/bin/perl. - -PERL_PATH = /usr/bin/perl - #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- -# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram -# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to -# NO turns the diagrams off. Note that this option also works with HAVE_DOT -# disabled, but it is recommended to install and use dot, since it yields more -# powerful graphs. -# The default value is: YES. - -CLASS_DIAGRAMS = YES - -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see: -# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - # You can include diagrams made with dia in doxygen documentation. Doxygen will # then run dia to produce the diagram and insert it in the documentation. The # DIA_PATH tag allows you to specify the directory where the dia binary resides. @@ -2194,35 +2407,50 @@ HAVE_DOT = NO DOT_NUM_THREADS = 0 -# When you want a differently looking font in the dot files that doxygen -# generates you can specify the font name using DOT_FONTNAME. You need to make -# sure dot is able to find the font, which can be done by putting it in a -# standard location or by setting the DOTFONTPATH environment variable or by -# setting DOT_FONTPATH to the directory containing the font. -# The default value is: Helvetica. +# DOT_COMMON_ATTR is common attributes for nodes, edges and labels of +# subgraphs. When you want a differently looking font in the dot files that +# doxygen generates you can specify fontname, fontcolor and fontsize attributes. +# For details please see Node, +# Edge and Graph Attributes specification You need to make sure dot is able +# to find the font, which can be done by putting it in a standard location or by +# setting the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the +# directory containing the font. Default graphviz fontsize is 14. +# The default value is: fontname=Helvetica,fontsize=10. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_COMMON_ATTR = "fontname=Helvetica,fontsize=10" + +# DOT_EDGE_ATTR is concatenated with DOT_COMMON_ATTR. For elegant style you can +# add 'arrowhead=open, arrowtail=open, arrowsize=0.5'. Complete documentation about +# arrows shapes. +# The default value is: labelfontname=Helvetica,labelfontsize=10. # This tag requires that the tag HAVE_DOT is set to YES. -DOT_FONTNAME = Helvetica +DOT_EDGE_ATTR = "labelfontname=Helvetica,labelfontsize=10" -# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of -# dot graphs. -# Minimum value: 4, maximum value: 24, default value: 10. +# DOT_NODE_ATTR is concatenated with DOT_COMMON_ATTR. For view without boxes +# around nodes set 'shape=plain' or 'shape=plaintext' Shapes specification +# The default value is: shape=box,height=0.2,width=0.4. # This tag requires that the tag HAVE_DOT is set to YES. -DOT_FONTSIZE = 10 +DOT_NODE_ATTR = "shape=box,height=0.2,width=0.4" -# By default doxygen will tell dot to use the default font as specified with -# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set -# the path where dot can find it using this tag. +# You can set the path where dot can find font specified with fontname in +# DOT_COMMON_ATTR and others dot attributes. # This tag requires that the tag HAVE_DOT is set to YES. DOT_FONTPATH = -# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for -# each documented class showing the direct and indirect inheritance relations. -# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO. +# If the CLASS_GRAPH tag is set to YES (or GRAPH) then doxygen will generate a +# graph for each documented class showing the direct and indirect inheritance +# relations. In case HAVE_DOT is set as well dot will be used to draw the graph, +# otherwise the built-in generator will be used. If the CLASS_GRAPH tag is set +# to TEXT the direct and indirect inheritance relations will be shown as texts / +# links. +# Possible values are: NO, YES, TEXT and GRAPH. # The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. CLASS_GRAPH = YES @@ -2236,7 +2464,8 @@ CLASS_GRAPH = YES COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for -# groups, showing the direct groups dependencies. +# groups, showing the direct groups dependencies. See also the chapter Grouping +# in the manual. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. @@ -2259,10 +2488,32 @@ UML_LOOK = NO # but if the number exceeds 15, the total amount of fields shown is limited to # 10. # Minimum value: 0, maximum value: 100, default value: 10. -# This tag requires that the tag HAVE_DOT is set to YES. +# This tag requires that the tag UML_LOOK is set to YES. UML_LIMIT_NUM_FIELDS = 10 +# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and +# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS +# tag is set to YES, doxygen will add type and arguments for attributes and +# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen +# will not generate fields with class member information in the UML graphs. The +# class diagrams will look similar to the default class diagrams but using UML +# notation for the relationships. +# Possible values are: NO, YES and NONE. +# The default value is: NO. +# This tag requires that the tag UML_LOOK is set to YES. + +DOT_UML_DETAILS = NO + +# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters +# to display on a single line. If the actual line length exceeds this threshold +# significantly it will wrapped across multiple lines. Some heuristics are apply +# to avoid ugly line breaks. +# Minimum value: 0, maximum value: 1000, default value: 17. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_WRAP_THRESHOLD = 17 + # If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and # collaboration graphs will show the relations between templates and their # instances. @@ -2329,6 +2580,13 @@ GRAPHICAL_HIERARCHY = YES DIRECTORY_GRAPH = YES +# The DIR_GRAPH_MAX_DEPTH tag can be used to limit the maximum number of levels +# of child directories generated in directory dependency graphs by dot. +# Minimum value: 1, maximum value: 25, default value: 1. +# This tag requires that the tag DIRECTORY_GRAPH is set to YES. + +DIR_GRAPH_MAX_DEPTH = 1 + # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. For an explanation of the image formats see the section # output formats in the documentation of the dot tool (Graphviz (see: @@ -2382,10 +2640,10 @@ MSCFILE_DIRS = DIAFILE_DIRS = # When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the -# path where java can find the plantuml.jar file. If left blank, it is assumed -# PlantUML is not used or called during a preprocessing step. Doxygen will -# generate a warning when it encounters a \startuml command in this case and -# will not generate output for the diagram. +# path where java can find the plantuml.jar file or to the filename of jar file +# to be used. If left blank, it is assumed PlantUML is not used or called during +# a preprocessing step. Doxygen will generate a warning when it encounters a +# \startuml command in this case and will not generate output for the diagram. PLANTUML_JAR_PATH = @@ -2423,18 +2681,6 @@ DOT_GRAPH_MAX_NODES = 50 MAX_DOT_GRAPH_DEPTH = 0 -# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent -# background. This is disabled by default, because dot on Windows does not seem -# to support this out of the box. -# -# Warning: Depending on the platform used, enabling this option may lead to -# badly anti-aliased labels on the edges of a graph (i.e. they become hard to -# read). -# The default value is: NO. -# This tag requires that the tag HAVE_DOT is set to YES. - -DOT_TRANSPARENT = NO - # Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) support @@ -2447,14 +2693,18 @@ DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page # explaining the meaning of the various boxes and arrows in the dot generated # graphs. +# Note: This tag requires that UML_LOOK isn't set, i.e. the doxygen internal +# graphical representation for inheritance and collaboration diagrams is used. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. GENERATE_LEGEND = YES -# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot +# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate # files that are used to generate the various graphs. +# +# Note: This setting is not only used for dot files but also for msc temporary +# files. # The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. DOT_CLEANUP = YES diff --git a/Docs/requirements.txt b/Docs/requirements.txt index 0ce759b3171..1a4068a8e23 100644 --- a/Docs/requirements.txt +++ b/Docs/requirements.txt @@ -7,9 +7,7 @@ # WarpX PICMI bindings w/o C++ component (used for autoclass docs) -e Python breathe -# docutils 0.17 breaks HTML tags & RTD theme -# https://github.com/sphinx-doc/sphinx/issues/9001 -docutils<=0.16 +docutils>=0.17.1 # PICMI API docs # note: keep in sync with version in ../requirements.txt @@ -19,7 +17,7 @@ picmistandard==0.0.22 pygments recommonmark -sphinx>=2.0 +sphinx>=5.3 sphinx-design -sphinx_rtd_theme>=0.3.1 +sphinx_rtd_theme>=1.1.1 sphinxcontrib-napoleon diff --git a/Docs/source/index.rst b/Docs/source/index.rst index f01b5144f7e..80c307b1462 100644 --- a/Docs/source/index.rst +++ b/Docs/source/index.rst @@ -35,13 +35,13 @@ We also have a `discussion page /* front page: hide chapter titles * needed for consistent HTML-PDF-EPUB chapters */ - div#installation.section, - div#usage.section, - div#theory.section, - div#data-analysis.section, - div#development.section, - div#maintenance.section, - div#epilogue.section { + section#installation, + section#usage, + section#theory, + section#data-analysis, + section#development, + section#maintenance, + section#epilogue { display:none; } From 4921e89306f84dc674f1a3bc0dc0150a8f11aa0f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 6 Dec 2022 09:04:50 -0800 Subject: [PATCH 0180/1346] [pre-commit.ci] pre-commit autoupdate (#3547) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/mgedmin/check-manifest: 0.48 → 0.49](https://github.com/mgedmin/check-manifest/compare/0.48...0.49) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b97538cae67..a11f4c3e2f4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -105,7 +105,7 @@ repos: # Checks the manifest for missing files (native support) - repo: https://github.com/mgedmin/check-manifest - rev: "0.48" + rev: "0.49" hooks: - id: check-manifest # This is a slow hook, so only run this if --hook-stage manual is passed From 4306b449c2afc7d09b7a1094331d60597fdb31f5 Mon Sep 17 00:00:00 2001 From: kngott Date: Tue, 6 Dec 2022 17:17:45 -0800 Subject: [PATCH 0181/1346] Fix the CUDA Aware part of the Perlmutter script. (#3549) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As a Gordon Bell winner, I take no responsibility for this bug. But, I'll fix it. And it gives solid performance enhancement, so I will take responsibility for that. 😄 --- Tools/machines/perlmutter-nersc/perlmutter.sbatch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Tools/machines/perlmutter-nersc/perlmutter.sbatch b/Tools/machines/perlmutter-nersc/perlmutter.sbatch index 1495d8b9f74..bef40942ed6 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter.sbatch +++ b/Tools/machines/perlmutter-nersc/perlmutter.sbatch @@ -30,7 +30,7 @@ export MPICH_OFI_NIC_POLICY=GPU export SRUN_CPUS_PER_TASK=32 # depends on https://github.com/ECP-WarpX/WarpX/issues/2009 -#GPU_AWARE_MPI="amrex.the_arena_is_managed=1 amrex.use_gpu_aware_mpi=1" +#GPU_AWARE_MPI="amrex.the_arena_is_managed=0 amrex.use_gpu_aware_mpi=1" GPU_AWARE_MPI="" # CUDA visible devices are ordered inverse to local task IDs From 02447ce0f59e729865a8cbe9502bf6ca0c91e2cd Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 6 Dec 2022 17:33:15 -0800 Subject: [PATCH 0182/1346] Zenodo: Move Gunther to Contributors --- .zenodo.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 6aa723ea0e3..7d5505c9849 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -148,11 +148,6 @@ "name": "Scherpelz, Peter", "orcid": "0000-0001-8185-3387" }, - { - "affiliation": "Lawrence Berkeley National Laboratory", - "name": "Weber, Gunther H.", - "orcid": "0000-0002-1794-1398" - }, { "affiliation": "Lawrence Berkeley National Laboratory", "name": "Yang, Eloise", @@ -185,6 +180,11 @@ } ], "contributors": [ + { + "affiliation": "Lawrence Berkeley National Laboratory", + "name": "Weber, Gunther H.", + "orcid": "0000-0002-1794-1398" + }, { "type": "Other", "affiliation": "Intel", From 4073384c7b66b1848bcc94e6c986f7d532c7da11 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 7 Dec 2022 15:40:02 -0800 Subject: [PATCH 0183/1346] PSATD: Implement First-Order Equations (#3466) * Implement First-Order PSATD Equations * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix Unused Parameter Warning * Fix RZ Build * Fix Normalization of G to Match PML * Add CI Test: 3D Uniform Plasma * Cleaning * Update 2D CI Checksums * Update 3D CI Checksums * Add F,G to CI Checksums of `uniform_plasma_multiJ` * Allow User to Choose First-Order v. Second-Order * Add WARPX_ALWAYS_ASSERT_WITH_MESSAGE * Rename New Class `PsatdAlgorithmFirstOrder` * Remove Inline Comment * Update RZ CI Checksums * Fix inline comment * Use auxiliary variables to avoid divisions * Use auxiliary variables to avoid divisions * Make `nci_psatd_stability` dir and merge inputs * Move all Galilean tests to `nci_psatd_stability` * Fix CI * Fix index for backward FFT of J Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../analysis_galilean.py} | 0 .../nci_psatd_stability/analysis_multiJ.py | 49 +++ .../inputs_2d | 0 .../inputs_2d_hybrid | 0 .../inputs_3d | 117 +++--- .../inputs_avg_2d | 0 .../inputs_avg_3d | 0 .../inputs_rz | 0 .../Langmuir_multi_2d_psatd_multiJ.json | 24 +- .../Langmuir_multi_2d_psatd_multiJ_nodal.json | 26 +- .../Langmuir_multi_psatd_multiJ.json | 36 +- .../Langmuir_multi_psatd_multiJ_nodal.json | 28 +- .../benchmarks_json/multi_J_rz_psatd.json | 40 +- .../uniform_plasma_multiJ.json | 35 ++ Regression/WarpX-tests.ini | 86 ++-- Source/BoundaryConditions/PML.H | 2 +- Source/BoundaryConditions/PML.cpp | 8 +- Source/Evolve/WarpXEvolve.cpp | 21 +- .../SpectralAlgorithms/CMakeLists.txt | 1 + .../SpectralAlgorithms/Make.package | 1 + .../PsatdAlgorithmComoving.cpp | 24 +- .../PsatdAlgorithmFirstOrder.H | 100 +++++ .../PsatdAlgorithmFirstOrder.cpp | 375 ++++++++++++++++++ .../PsatdAlgorithmGalileanRZ.cpp | 12 +- .../PsatdAlgorithmJConstantInTime.cpp | 42 +- .../PsatdAlgorithmJLinearInTime.cpp | 6 +- .../SpectralAlgorithms/PsatdAlgorithmRZ.cpp | 27 +- .../SpectralSolver/SpectralFieldData.H | 11 +- .../SpectralSolver/SpectralFieldData.cpp | 20 +- .../SpectralSolver/SpectralSolver.H | 3 + .../SpectralSolver/SpectralSolver.cpp | 35 +- Source/FieldSolver/WarpXPushFieldsEM.cpp | 66 ++- Source/Initialization/WarpXInitData.cpp | 4 +- Source/Utils/WarpXAlgorithmSelection.H | 11 +- Source/Utils/WarpXAlgorithmSelection.cpp | 10 +- Source/WarpX.H | 7 +- Source/WarpX.cpp | 22 +- 37 files changed, 957 insertions(+), 292 deletions(-) rename Examples/Tests/{galilean/analysis.py => nci_psatd_stability/analysis_galilean.py} (100%) create mode 100755 Examples/Tests/nci_psatd_stability/analysis_multiJ.py rename Examples/Tests/{galilean => nci_psatd_stability}/inputs_2d (100%) rename Examples/Tests/{galilean => nci_psatd_stability}/inputs_2d_hybrid (100%) rename Examples/Tests/{galilean => nci_psatd_stability}/inputs_3d (63%) rename Examples/Tests/{averaged_galilean => nci_psatd_stability}/inputs_avg_2d (100%) rename Examples/Tests/{averaged_galilean => nci_psatd_stability}/inputs_avg_3d (100%) rename Examples/Tests/{galilean => nci_psatd_stability}/inputs_rz (100%) create mode 100644 Regression/Checksum/benchmarks_json/uniform_plasma_multiJ.json create mode 100644 Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.H create mode 100644 Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.cpp diff --git a/Examples/Tests/galilean/analysis.py b/Examples/Tests/nci_psatd_stability/analysis_galilean.py similarity index 100% rename from Examples/Tests/galilean/analysis.py rename to Examples/Tests/nci_psatd_stability/analysis_galilean.py diff --git a/Examples/Tests/nci_psatd_stability/analysis_multiJ.py b/Examples/Tests/nci_psatd_stability/analysis_multiJ.py new file mode 100755 index 00000000000..1c68b114c1a --- /dev/null +++ b/Examples/Tests/nci_psatd_stability/analysis_multiJ.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +""" +This script is used to test the results of the multi-J PSATD +first-order equations, with one J deposition. It compares the +energy of the electric field with a given reference energy. + +The reference energy is computed by running the same test with J constant +in time, rho linear in time, and without divergence cleaning. The reference +energy corresponds to unstable results due to NCI (suppressed by the use of +both J and rho constant in time, and with divergence cleaning). +""" +import os +import sys + +import numpy as np +import scipy.constants as scc + +import yt ; yt.funcs.mylog.setLevel(0) +sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +import checksumAPI + +filename = sys.argv[1] + +ds = yt.load(filename) + +# yt 4.0+ has rounding issues with our domain data: +# RuntimeError: yt attempted to read outside the boundaries +# of a non-periodic domain along dimension 0. +if 'force_periodicity' in dir(ds): ds.force_periodicity() + +all_data = ds.covering_grid(level=0, left_edge=ds.domain_left_edge, dims=ds.domain_dimensions) +Ex = all_data['boxlib', 'Ex'].squeeze().v +Ey = all_data['boxlib', 'Ey'].squeeze().v +Ez = all_data['boxlib', 'Ez'].squeeze().v + +# Set reference energy values, and tolerances for numerical stability and charge conservation +tol_energy = 1e-8 +energy_ref = 66e6 + +# Check numerical stability by comparing electric field energy to reference energy +energy = np.sum(scc.epsilon_0/2*(Ex**2+Ey**2+Ez**2)) +err_energy = energy / energy_ref +print('\nCheck numerical stability:') +print(f'err_energy = {err_energy}') +print(f'tol_energy = {tol_energy}') +assert(err_energy < tol_energy) + +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/galilean/inputs_2d b/Examples/Tests/nci_psatd_stability/inputs_2d similarity index 100% rename from Examples/Tests/galilean/inputs_2d rename to Examples/Tests/nci_psatd_stability/inputs_2d diff --git a/Examples/Tests/galilean/inputs_2d_hybrid b/Examples/Tests/nci_psatd_stability/inputs_2d_hybrid similarity index 100% rename from Examples/Tests/galilean/inputs_2d_hybrid rename to Examples/Tests/nci_psatd_stability/inputs_2d_hybrid diff --git a/Examples/Tests/galilean/inputs_3d b/Examples/Tests/nci_psatd_stability/inputs_3d similarity index 63% rename from Examples/Tests/galilean/inputs_3d rename to Examples/Tests/nci_psatd_stability/inputs_3d index 7a0d807486b..53305af12fe 100644 --- a/Examples/Tests/galilean/inputs_3d +++ b/Examples/Tests/nci_psatd_stability/inputs_3d @@ -1,84 +1,81 @@ -################################# -####### GENERAL PARAMETERS ###### -################################# -max_step = 300 - -amr.n_cell = 32 32 32 -warpx.numprocs = 1 1 2 -amr.max_level = 0 -psatd.v_galilean = 0. 0. 0.99498743710662 - -geometry.dims = 3 -geometry.prob_lo = -9.67 -9.67 -9.67 -geometry.prob_hi = 9.67 9.67 9.67 - -################################# -###### Boundary condition ####### -################################# -boundary.field_lo = periodic periodic periodic -boundary.field_hi = periodic periodic periodic - -################################# -############ NUMERICS ########### -################################# -warpx.verbose = 1 - +# algo algo.current_deposition = direct algo.maxwell_solver = psatd algo.particle_pusher = vay - -warpx.cfl = 1. algo.particle_shape = 3 -################################# -############ PLASMA ############# -################################# -particles.species_names = electrons ions +# amr +amr.max_level = 0 +amr.n_cell = 32 32 32 -warpx.do_nodal = 1 -warpx.use_filter = 1 +# boundary +boundary.field_hi = periodic periodic periodic +boundary.field_lo = periodic periodic periodic -psatd.nox = 8 -psatd.noy = 8 -psatd.noz = 8 +# diag1 +diag1.diag_type = Full +diag1.intervals = 300 +# diagnostics +diagnostics.diags_names = diag1 + +# electrons electrons.charge = -q_e -electrons.mass = m_e +electrons.density = 282197938148984.7 electrons.injection_style = "NUniformPerCell" +electrons.mass = m_e +electrons.momentum_distribution_type = "gaussian" electrons.num_particles_per_cell_each_dim = 1 1 1 electrons.profile = constant -electrons.density = 282197938148984.7 -electrons.momentum_distribution_type = "gaussian" -electrons.uz_m = 9.9498743710661994 -electrons.xmin = -9.67 -electrons.xmax = 9.67 -electrons.ymin = -9.67 -electrons.ymax = 9.67 -electrons.zmin = -9.67 -electrons.zmax = 9.67 electrons.ux_th = 0.0001 electrons.uy_th = 0.0001 +electrons.uz_m = 9.9498743710661994 electrons.uz_th = 0.0001 +electrons.xmax = 9.67 +electrons.xmin = -9.67 +electrons.ymax = 9.67 +electrons.ymin = -9.67 +electrons.zmax = 9.67 +electrons.zmin = -9.67 + +# geometry +geometry.dims = 3 +geometry.prob_hi = 9.67 9.67 9.67 +geometry.prob_lo = -9.67 -9.67 -9.67 +# ions ions.charge = q_e -ions.mass = m_p +ions.density = 282197938148984.7 ions.injection_style = "NUniformPerCell" +ions.mass = m_p +ions.momentum_distribution_type = "gaussian" ions.num_particles_per_cell_each_dim = 1 1 1 ions.profile = constant -ions.density = 282197938148984.7 -ions.momentum_distribution_type = "gaussian" -ions.uz_m = 9.9498743710661994 -ions.xmin = -9.67 -ions.xmax = 9.67 -ions.ymin = -9.67 -ions.ymax = 9.67 -ions.zmin = -9.67 -ions.zmax = 9.67 ions.ux_th = 0.0001 ions.uy_th = 0.0001 +ions.uz_m = 9.9498743710661994 ions.uz_th = 0.0001 +ions.xmax = 9.67 +ions.xmin = -9.67 +ions.ymax = 9.67 +ions.ymin = -9.67 +ions.zmax = 9.67 +ions.zmin = -9.67 -# Diagnostics -diagnostics.diags_names = diag1 -diag1.intervals = 100 -diag1.diag_type = Full +# max_step +max_step = 300 + +# particles +particles.species_names = electrons ions + +# psatd +psatd.nox = 8 +psatd.noy = 8 +psatd.noz = 8 + +# warpx +warpx.cfl = 1. +warpx.do_nodal = 1 +warpx.numprocs = 1 1 2 +warpx.use_filter = 1 +warpx.verbose = 1 diff --git a/Examples/Tests/averaged_galilean/inputs_avg_2d b/Examples/Tests/nci_psatd_stability/inputs_avg_2d similarity index 100% rename from Examples/Tests/averaged_galilean/inputs_avg_2d rename to Examples/Tests/nci_psatd_stability/inputs_avg_2d diff --git a/Examples/Tests/averaged_galilean/inputs_avg_3d b/Examples/Tests/nci_psatd_stability/inputs_avg_3d similarity index 100% rename from Examples/Tests/averaged_galilean/inputs_avg_3d rename to Examples/Tests/nci_psatd_stability/inputs_avg_3d diff --git a/Examples/Tests/galilean/inputs_rz b/Examples/Tests/nci_psatd_stability/inputs_rz similarity index 100% rename from Examples/Tests/galilean/inputs_rz rename to Examples/Tests/nci_psatd_stability/inputs_rz diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ.json b/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ.json index b0c362f0fb6..c342d9dcaeb 100644 --- a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ.json +++ b/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ.json @@ -1,29 +1,29 @@ { "electrons": { - "particle_momentum_x": 5.664739488600762e-20, + "particle_momentum_x": 5.663705657675969e-20, "particle_momentum_y": 0.0, - "particle_momentum_z": 5.664739488600764e-20, - "particle_position_x": 0.6553599999999999, + "particle_momentum_z": 5.663705657675969e-20, + "particle_position_x": 0.65536, "particle_position_y": 0.65536, "particle_weight": 3200000000000000.5 }, "lev=0": { "Bx": 0.0, - "By": 3.4900393205053586, + "By": 3.4892704618136277, "Bz": 0.0, - "Ex": 3771422651410.755, + "Ex": 3771082786646.7104, "Ey": 0.0, - "Ez": 3771422651410.742, - "jx": 1.0095457953459832e+16, + "Ez": 3771082786646.702, + "jx": 1.0093631772735916e+16, "jy": 0.0, - "jz": 1.0095457953459836e+16 + "jz": 1.0093631772735912e+16 }, "positrons": { - "particle_momentum_x": 5.664739488600754e-20, + "particle_momentum_x": 5.663705657675971e-20, "particle_momentum_y": 0.0, - "particle_momentum_z": 5.664739488600756e-20, - "particle_position_x": 0.6553599999999999, - "particle_position_y": 0.6553599999999999, + "particle_momentum_z": 5.663705657675969e-20, + "particle_position_x": 0.65536, + "particle_position_y": 0.65536, "particle_weight": 3200000000000000.5 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ_nodal.json b/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ_nodal.json index 66c8e3e8035..2aaa9e0f25d 100644 --- a/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ_nodal.json +++ b/Regression/Checksum/benchmarks_json/Langmuir_multi_2d_psatd_multiJ_nodal.json @@ -1,29 +1,29 @@ { "electrons": { - "particle_momentum_x": 5.668522616618711e-20, + "particle_momentum_x": 5.668409581183492e-20, "particle_momentum_y": 0.0, - "particle_momentum_z": 5.668522616618715e-20, - "particle_position_x": 0.6553600000002356, - "particle_position_y": 0.6553600000002355, + "particle_momentum_z": 5.668409581183495e-20, + "particle_position_x": 0.6553600000004717, + "particle_position_y": 0.6553600000004718, "particle_weight": 3200000000000000.5 }, "lev=0": { "Bx": 0.0, - "By": 5.6351165293218966, + "By": 5.634624433795568, "Bz": 0.0, - "Ex": 3747153697353.926, + "Ex": 3747068321640.6587, "Ey": 0.0, - "Ez": 3747153697353.9287, - "jx": 1.0088631639558242e+16, + "Ez": 3747068321640.659, + "jx": 1.0088430505673088e+16, "jy": 0.0, - "jz": 1.0088631639558248e+16 + "jz": 1.0088430505673096e+16 }, "positrons": { - "particle_momentum_x": 5.66852261661871e-20, + "particle_momentum_x": 5.668409581183492e-20, "particle_momentum_y": 0.0, - "particle_momentum_z": 5.668522616618714e-20, - "particle_position_x": 0.6553600000002356, - "particle_position_y": 0.6553600000002356, + "particle_momentum_z": 5.668409581183495e-20, + "particle_position_x": 0.6553600000004718, + "particle_position_y": 0.6553600000004717, "particle_weight": 3200000000000000.5 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ.json b/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ.json index c76d7cfc5c5..487d4a89828 100644 --- a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ.json +++ b/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ.json @@ -1,28 +1,28 @@ { "electrons": { - "particle_momentum_x": 9.629015522300135e-20, - "particle_position_x": 2.621440000009873, - "particle_position_y": 2.621440000009873, - "particle_position_z": 2.6214399999999998, + "particle_momentum_x": 9.633869745818886e-20, + "particle_position_x": 2.621440000001177, + "particle_position_y": 2.6214400000011784, + "particle_position_z": 2.6214400000000007, "particle_weight": 128000000000.00002 }, "lev=0": { - "Bx": 79.96476923345703, - "By": 79.96476923350225, - "Bz": 79.96690317049361, - "Ex": 84753887916472.72, - "Ey": 84753887916472.66, - "Ez": 84753877853695.67, - "jx": 6.081254778189634e+16, - "jy": 6.081254778189637e+16, - "jz": 6.081251943036953e+16, + "Bx": 80.96035538655111, + "By": 80.96035538657691, + "Bz": 80.96271445263956, + "Ex": 84777489275096.88, + "Ey": 84777489275096.88, + "Ez": 84777485856239.4, + "jx": 6.08447015360442e+16, + "jy": 6.084470153604425e+16, + "jz": 6.084470085554113e+16, "part_per_cell": 524288.0, - "rho": 703417424.2676101 + "rho": 703546536.8089281 }, "positrons": { - "particle_momentum_z": 9.629011306229332e-20, - "particle_position_x": 2.621440000009873, - "particle_position_y": 2.621440000009873, - "particle_position_z": 2.6214399999999998 + "particle_momentum_z": 9.63386961193585e-20, + "particle_position_x": 2.621440000001177, + "particle_position_y": 2.621440000001179, + "particle_position_z": 2.6214400000000007 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ_nodal.json b/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ_nodal.json index 1f89c4dcb14..e97556e5501 100644 --- a/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ_nodal.json +++ b/Regression/Checksum/benchmarks_json/Langmuir_multi_psatd_multiJ_nodal.json @@ -1,28 +1,28 @@ { "electrons": { - "particle_momentum_x": 9.3282651765877e-20, + "particle_momentum_x": 9.320515980761094e-20, "particle_position_x": 2.6214400000000015, "particle_position_y": 2.621440000000001, - "particle_position_z": 2.621440000000001, + "particle_position_z": 2.6214400000000007, "particle_weight": 128000000000.00002 }, "lev=0": { - "Bx": 17.338468210649435, - "By": 17.338468210679473, - "Bz": 17.338468210708463, - "Ex": 86130544037694.12, - "Ey": 86130544037694.16, - "Ez": 86130544037694.16, - "jx": 5.808322546347548e+16, - "jy": 5.80832254634755e+16, - "jz": 5.8083225463475464e+16, + "Bx": 17.319790986722108, + "By": 17.319790986747606, + "Bz": 17.319790986759728, + "Ex": 86079985956930.31, + "Ey": 86079985956930.3, + "Ez": 86079985956930.31, + "jx": 5.8033630197761816e+16, + "jy": 5.803363019776185e+16, + "jz": 5.803363019776182e+16, "part_per_cell": 524288.0, - "rho": 721143170.1131016 + "rho": 720717041.7116292 }, "positrons": { - "particle_momentum_z": 9.328265176587699e-20, + "particle_momentum_z": 9.320515980761093e-20, "particle_position_x": 2.6214400000000015, "particle_position_y": 2.621440000000001, - "particle_position_z": 2.621440000000001 + "particle_position_z": 2.6214400000000007 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/multi_J_rz_psatd.json b/Regression/Checksum/benchmarks_json/multi_J_rz_psatd.json index 458117917be..2a962090349 100644 --- a/Regression/Checksum/benchmarks_json/multi_J_rz_psatd.json +++ b/Regression/Checksum/benchmarks_json/multi_J_rz_psatd.json @@ -9,31 +9,31 @@ "particle_weight": 6241484108.424456 }, "lev=0": { - "By": 24912.66260587033, - "Ex": 4667306677660.305, - "Ez": 4307437890419.4253, - "jx": 362735950033724.2, - "jz": 1937267340131275.2, - "rho": 5308546.075566203, - "rho_driver": 6288266.101815152, - "rho_plasma_e": 49569864.00850832, - "rho_plasma_p": 50769174.61530346 + "By": 24045.34926330333, + "Ex": 4530500183998.0205, + "Ez": 4297045713383.818, + "jx": 361149490291532.8, + "jz": 1805428826325930.2, + "rho": 4895064.444869195, + "rho_driver": 6288266.101815153, + "rho_plasma_e": 49569825.13450765, + "rho_plasma_p": 50769176.974483095 }, "plasma_e": { - "particle_momentum_x": 6.65811141013141e-20, - "particle_momentum_y": 6.738987045495091e-20, - "particle_momentum_z": 2.846571109435123e-20, - "particle_position_x": 1.1423367495493797, - "particle_position_y": 0.6139715590509269, - "particle_theta": 20188.939948727297, + "particle_momentum_x": 6.649609416554171e-20, + "particle_momentum_y": 6.724424134488497e-20, + "particle_momentum_z": 2.81433599356851e-20, + "particle_position_x": 1.14233458508442, + "particle_position_y": 0.6140029351346352, + "particle_theta": 20188.939948727293, "particle_weight": 1002457942911.3788 }, "plasma_p": { - "particle_momentum_x": 6.640192720118765e-20, - "particle_momentum_y": 6.767588557043428e-20, - "particle_momentum_z": 5.58476124317102e-20, - "particle_position_x": 1.1365201600226575, - "particle_position_y": 0.6152066982817419, + "particle_momentum_x": 6.635739630451454e-20, + "particle_momentum_y": 6.761235868190358e-20, + "particle_momentum_z": 5.475884783890083e-20, + "particle_position_x": 1.1365201524804165, + "particle_position_y": 0.6152066555308389, "particle_theta": 20286.92798337582, "particle_weight": 1002457942911.3788 } diff --git a/Regression/Checksum/benchmarks_json/uniform_plasma_multiJ.json b/Regression/Checksum/benchmarks_json/uniform_plasma_multiJ.json new file mode 100644 index 00000000000..e215428fe45 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/uniform_plasma_multiJ.json @@ -0,0 +1,35 @@ +{ + "electrons": { + "particle_momentum_x": 2.4258140600451355e-21, + "particle_momentum_y": 2.4115944515722047e-21, + "particle_momentum_z": 8.903860561145549e-17, + "particle_position_x": 158433.52093622342, + "particle_position_y": 158432.7590925775, + "particle_position_z": 158433.27723231513, + "particle_weight": 2.041377132710917e+18 + }, + "ions": { + "particle_momentum_x": 1.3150385606388975e-18, + "particle_momentum_y": 1.3042988334363742e-18, + "particle_momentum_z": 1.6348805659682514e-13, + "particle_position_x": 158433.5805512471, + "particle_position_y": 158432.80155515939, + "particle_position_z": 158433.27779468897, + "particle_weight": 2.041377132710917e+18 + }, + "lev=0": { + "Bx": 0.054117593911342535, + "By": 0.053964421360199535, + "Bz": 0.0005552030959216721, + "divE": 18542936.02490211, + "Ex": 16260950.356941158, + "Ey": 16306834.079553638, + "Ez": 5784750.2181154955, + "F": 2.003020981926516e-02, + "G": 1.349588434641251e+03, + "jx": 984.2101557440694, + "jy": 1030.1973073214422, + "jz": 20803.708075753144, + "rho": 8.4139043087009e-05 + } +} diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index c3b1ad67ad1..57930ea44ba 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -412,7 +412,7 @@ analysisOutputImage = langmuir_multi_analysis.png [Langmuir_multi_psatd_multiJ] buildDir = . inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt -runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium +runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.solution_type=first-order psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium dim = 3 addToCompileString = USE_PSATD=TRUE cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON @@ -431,7 +431,7 @@ analysisOutputImage = Langmuir_multi_psatd_multiJ.png [Langmuir_multi_psatd_multiJ_nodal] buildDir = . inputFile = Examples/Tests/langmuir/inputs_3d_multi_rt -runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.do_nodal=1 +runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.5773502691896258 algo.current_deposition=direct psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.solution_type=first-order psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.do_nodal=1 dim = 3 addToCompileString = USE_PSATD=TRUE cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON @@ -697,7 +697,7 @@ analysisOutputImage = langmuir_multi_2d_analysis.png [Langmuir_multi_2d_psatd_multiJ] buildDir = . inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt -runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium +runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.solution_type=first-order psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_PSATD=TRUE cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON @@ -716,7 +716,7 @@ analysisOutputImage = Langmuir_multi_2d_psatd_multiJ.png [Langmuir_multi_2d_psatd_multiJ_nodal] buildDir = . inputFile = Examples/Tests/langmuir/inputs_2d_multi_rt -runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.do_nodal=1 +runtime_params = algo.maxwell_solver=psatd warpx.cfl=0.7071067811865475 psatd.update_with_rho=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=2 psatd.solution_type=first-order psatd.J_in_time=linear warpx.abort_on_warning_threshold=medium warpx.do_nodal=1 dim = 2 addToCompileString = USE_PSATD=TRUE cmakeSetupOpts = -DWarpX_DIMS=2 -DWarpX_PSATD=ON @@ -2517,7 +2517,7 @@ analysisRoutine = Examples/Tests/particle_fields_diags/analysis_particle_diags_s [galilean_2d_psatd] buildDir = . -inputFile = Examples/Tests/galilean/inputs_2d +inputFile = Examples/Tests/nci_psatd_stability/inputs_2d runtime_params = warpx.do_nodal=1 algo.current_deposition=direct psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_PSATD=TRUE @@ -2531,11 +2531,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis.py +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py [galilean_2d_psatd_current_correction_psb] buildDir = . -inputFile = Examples/Tests/galilean/inputs_2d +inputFile = Examples/Tests/nci_psatd_stability/inputs_2d runtime_params = psatd.periodic_single_box_fft=1 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE dim = 2 addToCompileString = USE_PSATD=TRUE @@ -2549,11 +2549,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis.py +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py [galilean_2d_psatd_current_correction] buildDir = . -inputFile = Examples/Tests/galilean/inputs_2d +inputFile = Examples/Tests/nci_psatd_stability/inputs_2d runtime_params = psatd.periodic_single_box_fft=0 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE amr.max_grid_size=64 amr.blocking_factor=64 dim = 2 addToCompileString = USE_PSATD=TRUE @@ -2567,11 +2567,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis.py +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py [galilean_2d_psatd_hybrid] buildDir = . -inputFile = Examples/Tests/galilean/inputs_2d_hybrid +inputFile = Examples/Tests/nci_psatd_stability/inputs_2d_hybrid runtime_params = psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_PSATD=TRUE @@ -2607,7 +2607,7 @@ analysisRoutine = Examples/analysis_default_regression.py [galilean_rz_psatd] buildDir = . -inputFile = Examples/Tests/galilean/inputs_rz +inputFile = Examples/Tests/nci_psatd_stability/inputs_rz runtime_params = warpx.do_dynamic_scheduling=0 warpx.serialize_initial_conditions=1 electrons.random_theta=0 ions.random_theta=0 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_RZ=TRUE USE_PSATD=TRUE BLAS_LIB=-lblas LAPACK_LIB=-llapack @@ -2621,11 +2621,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis.py +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py [galilean_rz_psatd_current_correction_psb] buildDir = . -inputFile = Examples/Tests/galilean/inputs_rz +inputFile = Examples/Tests/nci_psatd_stability/inputs_rz runtime_params = psatd.periodic_single_box_fft=1 psatd.current_correction=1 electrons.random_theta=0 ions.random_theta=0 dim = 2 addToCompileString = USE_RZ=TRUE USE_PSATD=TRUE BLAS_LIB=-lblas LAPACK_LIB=-llapack @@ -2639,11 +2639,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis.py +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py [galilean_rz_psatd_current_correction] buildDir = . -inputFile = Examples/Tests/galilean/inputs_rz +inputFile = Examples/Tests/nci_psatd_stability/inputs_rz runtime_params = psatd.periodic_single_box_fft=0 psatd.current_correction=1 electrons.random_theta=0 ions.random_theta=0 amr.max_grid_size=32 amr.blocking_factor=32 dim = 2 addToCompileString = USE_RZ=TRUE USE_PSATD=TRUE BLAS_LIB=-lblas LAPACK_LIB=-llapack @@ -2657,12 +2657,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis.py +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py [galilean_3d_psatd] buildDir = . -inputFile = Examples/Tests/galilean/inputs_3d -runtime_params = warpx.do_nodal=1 algo.current_deposition=direct psatd.current_correction=0 warpx.abort_on_warning_threshold=medium +inputFile = Examples/Tests/nci_psatd_stability/inputs_3d +runtime_params = psatd.v_galilean=0. 0. 0.99498743710662 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 3 addToCompileString = USE_PSATD=TRUE cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON @@ -2675,12 +2675,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis.py +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py [galilean_3d_psatd_current_correction_psb] buildDir = . -inputFile = Examples/Tests/galilean/inputs_3d -runtime_params = warpx.numprocs=1 1 1 psatd.periodic_single_box_fft=1 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE +inputFile = Examples/Tests/nci_psatd_stability/inputs_3d +runtime_params = psatd.v_galilean=0. 0. 0.99498743710662 warpx.numprocs=1 1 1 psatd.periodic_single_box_fft=1 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE dim = 3 addToCompileString = USE_PSATD=TRUE cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON @@ -2693,12 +2693,12 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis.py +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py [galilean_3d_psatd_current_correction] buildDir = . -inputFile = Examples/Tests/galilean/inputs_3d -runtime_params = warpx.numprocs=1 1 2 psatd.periodic_single_box_fft=0 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE +inputFile = Examples/Tests/nci_psatd_stability/inputs_3d +runtime_params = psatd.v_galilean=0. 0. 0.99498743710662 warpx.numprocs=1 1 2 psatd.periodic_single_box_fft=0 psatd.update_with_rho=0 psatd.current_correction=1 diag1.fields_to_plot=Ex Ey Ez Bx By Bz jx jy jz rho divE dim = 3 addToCompileString = USE_PSATD=TRUE cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON @@ -2711,11 +2711,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis.py +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py [averaged_galilean_2d_psatd] buildDir = . -inputFile = Examples/Tests/averaged_galilean/inputs_avg_2d +inputFile = Examples/Tests/nci_psatd_stability/inputs_avg_2d runtime_params = psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_PSATD=TRUE @@ -2729,11 +2729,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis.py +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py [averaged_galilean_2d_psatd_hybrid] buildDir = . -inputFile = Examples/Tests/averaged_galilean/inputs_avg_2d +inputFile = Examples/Tests/nci_psatd_stability/inputs_avg_2d runtime_params = amr.max_grid_size_x = 128 amr.max_grid_size_y = 64 warpx.do_nodal = 0 algo.field_gathering = momentum-conserving interpolation.field_centering_nox = 8 interpolation.field_centering_noz = 8 warpx.do_current_centering = 1 interpolation.current_centering_nox = 8 interpolation.current_centering_noz = 8 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 2 addToCompileString = USE_PSATD=TRUE @@ -2747,11 +2747,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis.py +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py [averaged_galilean_3d_psatd] buildDir = . -inputFile = Examples/Tests/averaged_galilean/inputs_avg_3d +inputFile = Examples/Tests/nci_psatd_stability/inputs_avg_3d runtime_params = psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 3 addToCompileString = USE_PSATD=TRUE @@ -2765,11 +2765,11 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis.py +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py [averaged_galilean_3d_psatd_hybrid] buildDir = . -inputFile = Examples/Tests/averaged_galilean/inputs_avg_3d +inputFile = Examples/Tests/nci_psatd_stability/inputs_avg_3d runtime_params = warpx.do_nodal = 0 algo.field_gathering = momentum-conserving interpolation.field_centering_nox = 8 interpolation.field_centering_noy = 8 interpolation.field_centering_noz = 8 warpx.do_current_centering = 1 interpolation.current_centering_nox = 8 interpolation.current_centering_noy = 8 interpolation.current_centering_noz = 8 psatd.current_correction=0 warpx.abort_on_warning_threshold=medium dim = 3 addToCompileString = USE_PSATD=TRUE @@ -2783,7 +2783,7 @@ compileTest = 0 doVis = 0 compareParticles = 1 particleTypes = electrons ions -analysisRoutine = Examples/Tests/galilean/analysis.py +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_galilean.py [multi_J_rz_psatd] buildDir = . @@ -3748,3 +3748,21 @@ numthreads = 1 compileTest = 0 doVis = 0 analysisRoutine = Examples/Tests/btd_rz/analysis_BTD_laser_antenna.py + +[uniform_plasma_multiJ] +buildDir = . +inputFile = Examples/Tests/nci_psatd_stability/inputs_3d +runtime_params = psatd.solution_type=first-order psatd.J_in_time=constant psatd.rho_in_time=constant warpx.do_dive_cleaning=1 warpx.do_divb_cleaning=1 warpx.do_multi_J=1 warpx.do_multi_J_n_depositions=1 diag1.fields_to_plot=Bx By Bz divE Ex Ey Ez F G jx jy jz rho warpx.abort_on_warning_threshold=medium +dim = 3 +addToCompileString = USE_PSATD=TRUE +cmakeSetupOpts = -DWarpX_DIMS=3 -DWarpX_PSATD=ON +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons ions +analysisRoutine = Examples/Tests/nci_psatd_stability/analysis_multiJ.py diff --git a/Source/BoundaryConditions/PML.H b/Source/BoundaryConditions/PML.H index a1ce21a9140..e519298cae3 100644 --- a/Source/BoundaryConditions/PML.H +++ b/Source/BoundaryConditions/PML.H @@ -133,7 +133,7 @@ public: int ncell, int delta, amrex::IntVect ref_ratio, amrex::Real dt, int nox_fft, int noy_fft, int noz_fft, bool do_nodal, int do_moving_window, int pml_has_particles, int do_pml_in_domain, - const int J_in_time, const int rho_in_time, + const int psatd_solution_type, const int J_in_time, const int rho_in_time, const bool do_pml_dive_cleaning, const bool do_pml_divb_cleaning, const amrex::IntVect& fill_guards_fields, const amrex::IntVect& fill_guards_current, diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index c7b773123d2..5e03822f120 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -548,7 +548,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri int ncell, int delta, amrex::IntVect ref_ratio, Real dt, int nox_fft, int noy_fft, int noz_fft, bool do_nodal, int do_moving_window, int /*pml_has_particles*/, int do_pml_in_domain, - const int J_in_time, const int rho_in_time, + const int psatd_solution_type, const int J_in_time, const int rho_in_time, const bool do_pml_dive_cleaning, const bool do_pml_divb_cleaning, const amrex::IntVect& fill_guards_fields, const amrex::IntVect& fill_guards_current, @@ -724,7 +724,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { #ifndef WARPX_USE_PSATD - amrex::ignore_unused(lev, dt, J_in_time, rho_in_time); + amrex::ignore_unused(lev, dt, psatd_solution_type, J_in_time, rho_in_time); # if(AMREX_SPACEDIM!=3) amrex::ignore_unused(noy_fft); # endif @@ -745,7 +745,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri spectral_solver_fp = std::make_unique(lev, realspace_ba, dm, nox_fft, noy_fft, noz_fft, do_nodal, v_galilean_zero, v_comoving_zero, dx, dt, in_pml, periodic_single_box, update_with_rho, - fft_do_time_averaging, J_in_time, rho_in_time, m_dive_cleaning, m_divb_cleaning); + fft_do_time_averaging, psatd_solution_type, J_in_time, rho_in_time, m_dive_cleaning, m_divb_cleaning); #endif } @@ -852,7 +852,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, const DistributionMapping& gri spectral_solver_cp = std::make_unique(lev, realspace_cba, cdm, nox_fft, noy_fft, noz_fft, do_nodal, v_galilean_zero, v_comoving_zero, cdx, dt, in_pml, periodic_single_box, update_with_rho, - fft_do_time_averaging, J_in_time, rho_in_time, m_dive_cleaning, m_divb_cleaning); + fft_do_time_averaging, psatd_solution_type, J_in_time, rho_in_time, m_dive_cleaning, m_divb_cleaning); #endif } } diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index e8e6a025b56..8abfa0e7f9b 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -549,14 +549,14 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // 3) Deposit rho (in rho_new, since it will be moved during the loop) // (after checking that pointer to rho_fp on MR level 0 is not null) - if (rho_fp[0]) + if (rho_fp[0] && rho_in_time == RhoInTime::Linear) { // Deposit rho at relative time -dt // (dt[0] denotes the time step on mesh refinement level 0) mypc->DepositCharge(rho_fp, -dt[0]); // Filter, exchange boundary, and interpolate across levels SyncRho(); - // Forward FFT of rho_new + // Forward FFT of rho PSATDForwardTransformRho(rho_fp, rho_cp, 0, 1); } @@ -587,17 +587,14 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // Loop over multi-J depositions for (int i_depose = 0; i_depose < n_loop; i_depose++) { - // Move J deposited previously, from new to old - if (J_in_time == JInTime::Linear) - { - PSATDMoveJNewToJOld(); - } + // Move J from new to old if J is linear in time + if (J_in_time == JInTime::Linear) PSATDMoveJNewToJOld(); const amrex::Real t_depose_current = (J_in_time == JInTime::Linear) ? (i_depose-n_depose+1)*sub_dt : (i_depose-n_depose+0.5_rt)*sub_dt; - // TODO Update this when rho quadratic in time is implemented - const amrex::Real t_depose_charge = (i_depose-n_depose+1)*sub_dt; + const amrex::Real t_depose_charge = (rho_in_time == RhoInTime::Linear) ? + (i_depose-n_depose+1)*sub_dt : (i_depose-n_depose+0.5_rt)*sub_dt; // Deposit new J at relative time t_depose_current with time step dt // (dt[0] denotes the time step on mesh refinement level 0) @@ -616,14 +613,14 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // (after checking that pointer to rho_fp on MR level 0 is not null) if (rho_fp[0]) { - // Move rho deposited previously, from new to old - PSATDMoveRhoNewToRhoOld(); + // Move rho from new to old if rho is linear in time + if (rho_in_time == RhoInTime::Linear) PSATDMoveRhoNewToRhoOld(); // Deposit rho at relative time t_depose_charge mypc->DepositCharge(rho_fp, t_depose_charge); // Filter, exchange boundary, and interpolate across levels SyncRho(); - // Forward FFT of rho_new + // Forward FFT of rho PSATDForwardTransformRho(rho_fp, rho_cp, 0, 1); } diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/CMakeLists.txt b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/CMakeLists.txt index a370d4b2d8d..912ed47c458 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/CMakeLists.txt +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/CMakeLists.txt @@ -1,5 +1,6 @@ target_sources(WarpX PRIVATE + PsatdAlgorithmFirstOrder.cpp PsatdAlgorithmJConstantInTime.cpp PsatdAlgorithmJLinearInTime.cpp PsatdAlgorithmPml.cpp diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/Make.package b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/Make.package index c798ffb01f5..40f9d0e9a19 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/Make.package +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/Make.package @@ -1,4 +1,5 @@ CEXE_sources += SpectralBaseAlgorithm.cpp +CEXE_sources += PsatdAlgorithmFirstOrder.cpp CEXE_sources += PsatdAlgorithmJConstantInTime.cpp CEXE_sources += PsatdAlgorithmJLinearInTime.cpp CEXE_sources += PsatdAlgorithmPml.cpp diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.cpp index 1d6248f8d76..dfd7ffe9261 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmComoving.cpp @@ -103,9 +103,9 @@ PsatdAlgorithmComoving::pushSpectralFields (SpectralFieldData& f) const const Complex Bz_old = fields(i,j,k,Idx.Bz); // Shortcuts for the values of J and rho - const Complex Jx = fields(i,j,k,Idx.Jx); - const Complex Jy = fields(i,j,k,Idx.Jy); - const Complex Jz = fields(i,j,k,Idx.Jz); + const Complex Jx = fields(i,j,k,Idx.Jx_mid); + const Complex Jy = fields(i,j,k,Idx.Jy_mid); + const Complex Jz = fields(i,j,k,Idx.Jz_mid); const Complex rho_old = fields(i,j,k,Idx.rho_old); const Complex rho_new = fields(i,j,k,Idx.rho_new); @@ -447,9 +447,9 @@ void PsatdAlgorithmComoving::CurrentCorrection (SpectralFieldData& field_data) amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { // Shortcuts for the values of J and rho - const Complex Jx = fields(i,j,k,Idx.Jx); - const Complex Jy = fields(i,j,k,Idx.Jy); - const Complex Jz = fields(i,j,k,Idx.Jz); + const Complex Jx = fields(i,j,k,Idx.Jx_mid); + const Complex Jy = fields(i,j,k,Idx.Jy_mid); + const Complex Jz = fields(i,j,k,Idx.Jz_mid); const Complex rho_old = fields(i,j,k,Idx.rho_old); const Complex rho_new = fields(i,j,k,Idx.rho_new); @@ -482,15 +482,15 @@ void PsatdAlgorithmComoving::CurrentCorrection (SpectralFieldData& field_data) const Complex theta = amrex::exp(- I * k_dot_v * dt * 0.5_rt); const Complex den = 1._rt - theta * theta; - fields(i,j,k,Idx.Jx) = Jx - (kmod_dot_J + k_dot_v * theta * (rho_new - rho_old) / den) * kx_mod / (knorm_mod * knorm_mod); - fields(i,j,k,Idx.Jy) = Jy - (kmod_dot_J + k_dot_v * theta * (rho_new - rho_old) / den) * ky_mod / (knorm_mod * knorm_mod); - fields(i,j,k,Idx.Jz) = Jz - (kmod_dot_J + k_dot_v * theta * (rho_new - rho_old) / den) * kz_mod / (knorm_mod * knorm_mod); + fields(i,j,k,Idx.Jx_mid) = Jx - (kmod_dot_J + k_dot_v * theta * (rho_new - rho_old) / den) * kx_mod / (knorm_mod * knorm_mod); + fields(i,j,k,Idx.Jy_mid) = Jy - (kmod_dot_J + k_dot_v * theta * (rho_new - rho_old) / den) * ky_mod / (knorm_mod * knorm_mod); + fields(i,j,k,Idx.Jz_mid) = Jz - (kmod_dot_J + k_dot_v * theta * (rho_new - rho_old) / den) * kz_mod / (knorm_mod * knorm_mod); } else { - fields(i,j,k,Idx.Jx) = Jx - (kmod_dot_J - I * (rho_new - rho_old) / dt) * kx_mod / (knorm_mod * knorm_mod); - fields(i,j,k,Idx.Jy) = Jy - (kmod_dot_J - I * (rho_new - rho_old) / dt) * ky_mod / (knorm_mod * knorm_mod); - fields(i,j,k,Idx.Jz) = Jz - (kmod_dot_J - I * (rho_new - rho_old) / dt) * kz_mod / (knorm_mod * knorm_mod); + fields(i,j,k,Idx.Jx_mid) = Jx - (kmod_dot_J - I * (rho_new - rho_old) / dt) * kx_mod / (knorm_mod * knorm_mod); + fields(i,j,k,Idx.Jy_mid) = Jy - (kmod_dot_J - I * (rho_new - rho_old) / dt) * ky_mod / (knorm_mod * knorm_mod); + fields(i,j,k,Idx.Jz_mid) = Jz - (kmod_dot_J - I * (rho_new - rho_old) / dt) * kz_mod / (knorm_mod * knorm_mod); } } }); diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.H new file mode 100644 index 00000000000..c90b19e1a59 --- /dev/null +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.H @@ -0,0 +1,100 @@ +/* Copyright 2019 + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_PSATD_ALGORITHM_FIRST_ORDER_H_ +#define WARPX_PSATD_ALGORITHM_FIRST_ORDER_H_ + +#include "FieldSolver/SpectralSolver/SpectralFieldData.H" +#include "FieldSolver/SpectralSolver/SpectralKSpace.H" +#include "SpectralBaseAlgorithm.H" + +#include +#include +#include + +#include + +#include +#include + +#if WARPX_USE_PSATD +/* + * \brief Class that updates the fields in spectral space according to the first-order PSATD equations. + */ +class PsatdAlgorithmFirstOrder : public SpectralBaseAlgorithm +{ + public: + + /** + * \brief Constructor of the class PsatdAlgorithmFirstOrder + * + * \param[in] spectral_kspace spectral space + * \param[in] dm distribution mapping + * \param[in] spectral_index object containing indices to access data in spectral space + * \param[in] norder_x order of the spectral solver along x + * \param[in] norder_y order of the spectral solver along y + * \param[in] norder_z order of the spectral solver along z + * \param[in] nodal whether the E and B fields are defined on a fully nodal grid or a Yee grid + * \param[in] dt time step of the simulation + * \param[in] div_cleaning whether to use divergence correction for both E and B (thus, F and G) + * \param[in] J_in_time time dependency of J (currently supported: constant, linear) + * \param[in] rho_in_time time dependency of rho (currently supported: constant, linear) + */ + PsatdAlgorithmFirstOrder ( + const SpectralKSpace& spectral_kspace, + const amrex::DistributionMapping& dm, + const SpectralFieldIndex& spectral_index, + const int norder_x, + const int norder_y, + const int norder_z, + const bool nodal, + const amrex::Real dt, + const bool div_cleaning, + const int J_in_time, + const int rho_in_time); + + /** + * \brief Updates E, B, F, and G fields in spectral space, + * according to the first-order PSATD equations + * + * \param[in,out] f all the fields in spectral space + */ + virtual void pushSpectralFields (SpectralFieldData& f) const override final; + + /** + * \brief Virtual function for current correction in Fourier space + * ( Vay et al, 2013). + * This function overrides the virtual function \c CurrentCorrection in the + * base class \c SpectralBaseAlgorithm and cannot be overridden by further + * derived classes. + * + * \param[in,out] field_data All fields in Fourier space + */ + virtual void CurrentCorrection (SpectralFieldData& field_data) override final; + + /** + * \brief Virtual function for Vay current deposition in Fourier space + * ( Vay et al, 2013). + * This function overrides the virtual function \c VayDeposition in the + * base class \c SpectralBaseAlgorithm and cannot be overridden by further + * derived classes. + * + * \param[in,out] field_data All fields in Fourier space + */ + virtual void VayDeposition (SpectralFieldData& field_data) override final; + + private: + + SpectralFieldIndex m_spectral_index; + + // Other member variables + amrex::Real m_dt; + bool m_div_cleaning; + int m_J_in_time; + int m_rho_in_time; +}; +#endif // WARPX_USE_PSATD +#endif // WARPX_PSATD_ALGORITHM_FIRST_ORDER_H_ diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.cpp new file mode 100644 index 00000000000..d32604760dc --- /dev/null +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmFirstOrder.cpp @@ -0,0 +1,375 @@ +/* Copyright 2019 + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#include "PsatdAlgorithmFirstOrder.H" + +#include "Utils/TextMsg.H" +#include "Utils/WarpXAlgorithmSelection.H" +#include "Utils/WarpXConst.H" +#include "Utils/WarpX_Complex.H" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#if WARPX_USE_PSATD + +using namespace amrex::literals; + +PsatdAlgorithmFirstOrder::PsatdAlgorithmFirstOrder( + const SpectralKSpace& spectral_kspace, + const amrex::DistributionMapping& dm, + const SpectralFieldIndex& spectral_index, + const int norder_x, + const int norder_y, + const int norder_z, + const bool nodal, + const amrex::Real dt, + const bool div_cleaning, + const int J_in_time, + const int rho_in_time) + // Initializer list + : SpectralBaseAlgorithm(spectral_kspace, dm, spectral_index, norder_x, norder_y, norder_z, nodal), + m_spectral_index(spectral_index), + m_dt(dt), + m_div_cleaning(div_cleaning), + m_J_in_time(J_in_time), + m_rho_in_time(rho_in_time) +{} + +void +PsatdAlgorithmFirstOrder::pushSpectralFields (SpectralFieldData& f) const +{ + const bool div_cleaning = m_div_cleaning; + + const bool J_constant = (m_J_in_time == JInTime::Constant) ? true : false; + const bool J_linear = (m_J_in_time == JInTime::Linear ) ? true : false; + const bool rho_constant = (m_rho_in_time == RhoInTime::Constant) ? true : false; + const bool rho_linear = (m_rho_in_time == RhoInTime::Linear ) ? true : false; + + const amrex::Real dt = m_dt; + const amrex::Real dt2 = dt*dt; + + const SpectralFieldIndex& Idx = m_spectral_index; + + // Loop over boxes + for (amrex::MFIter mfi(f.fields); mfi.isValid(); ++mfi) + { + const amrex::Box& bx = f.fields[mfi].box(); + + // Extract arrays for the fields to be updated + amrex::Array4 fields = f.fields[mfi].array(); + + // Extract pointers for the k vectors + const amrex::Real* modified_kx_arr = modified_kx_vec[mfi].dataPtr(); +#if defined(WARPX_DIM_3D) + const amrex::Real* modified_ky_arr = modified_ky_vec[mfi].dataPtr(); +#endif + const amrex::Real* modified_kz_arr = modified_kz_vec[mfi].dataPtr(); + + // Loop over indices within one box + amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept + { + // Record old values of the fields to be updated + const Complex Ex_old = fields(i,j,k,Idx.Ex); + const Complex Ey_old = fields(i,j,k,Idx.Ey); + const Complex Ez_old = fields(i,j,k,Idx.Ez); + const Complex Bx_old = fields(i,j,k,Idx.Bx); + const Complex By_old = fields(i,j,k,Idx.By); + const Complex Bz_old = fields(i,j,k,Idx.Bz); + + // Shortcuts for the values of J and rho + const Complex Jx_mid = (J_constant) ? fields(i,j,k,Idx.Jx_mid) : 0._rt; + const Complex Jy_mid = (J_constant) ? fields(i,j,k,Idx.Jy_mid) : 0._rt; + const Complex Jz_mid = (J_constant) ? fields(i,j,k,Idx.Jz_mid) : 0._rt; + const Complex Jx_old = (J_linear ) ? fields(i,j,k,Idx.Jx_old) : 0._rt; + const Complex Jy_old = (J_linear ) ? fields(i,j,k,Idx.Jy_old) : 0._rt; + const Complex Jz_old = (J_linear ) ? fields(i,j,k,Idx.Jz_old) : 0._rt; + const Complex Jx_new = (J_linear ) ? fields(i,j,k,Idx.Jx_new) : 0._rt; + const Complex Jy_new = (J_linear ) ? fields(i,j,k,Idx.Jy_new) : 0._rt; + const Complex Jz_new = (J_linear ) ? fields(i,j,k,Idx.Jz_new) : 0._rt; + + const Complex Jx_c0 = (J_constant) ? Jx_mid : Jx_old; + const Complex Jy_c0 = (J_constant) ? Jy_mid : Jy_old; + const Complex Jz_c0 = (J_constant) ? Jz_mid : Jz_old; + const Complex Jx_c1 = (J_linear ) ? (Jx_new-Jx_old)/dt : 0._rt; + const Complex Jy_c1 = (J_linear ) ? (Jy_new-Jy_old)/dt : 0._rt; + const Complex Jz_c1 = (J_linear ) ? (Jz_new-Jz_old)/dt : 0._rt; + + Complex rho_mid, rho_old, rho_new, F_old, G_old; + Complex rho_c0, rho_c1; + if (div_cleaning) + { + rho_mid = (rho_constant) ? fields(i,j,k,Idx.rho_mid) : 0._rt; + rho_old = (rho_linear ) ? fields(i,j,k,Idx.rho_old) : 0._rt; + rho_new = (rho_linear ) ? fields(i,j,k,Idx.rho_new) : 0._rt; + + F_old = fields(i,j,k,Idx.F); + G_old = fields(i,j,k,Idx.G); + + rho_c0 = (rho_constant) ? rho_mid : rho_old; + rho_c1 = (rho_linear ) ? (rho_new-rho_old)/dt : 0._rt; + } + + // k vector values + const amrex::Real kx = modified_kx_arr[i]; +#if defined(WARPX_DIM_3D) + const amrex::Real ky = modified_ky_arr[j]; + const amrex::Real kz = modified_kz_arr[k]; +#else + constexpr amrex::Real ky = 0._rt; + const amrex::Real kz = modified_kz_arr[j]; +#endif + // Physical constants and imaginary unit + constexpr amrex::Real c = PhysConst::c; + constexpr amrex::Real c2 = c*c; + constexpr amrex::Real inv_c = 1._rt/c; + constexpr amrex::Real mu0 = PhysConst::mu0; + constexpr Complex I = Complex{0._rt, 1._rt}; + + const amrex::Real kx2 = kx*kx; + const amrex::Real ky2 = ky*ky; + const amrex::Real kz2 = kz*kz; + + const amrex::Real knorm = std::sqrt(kx2 + ky2 + kz2); + const amrex::Real knorm2 = knorm*knorm; + const amrex::Real knorm4 = knorm2*knorm2; + + // Auxiliary variables + const amrex::Real inv_knorm = 1._rt/knorm; + const amrex::Real inv_knorm2 = 1._rt/knorm2; + const amrex::Real inv_knorm4 = 1._rt/knorm4; + + const amrex::Real C = std::cos(c*knorm*dt); + const amrex::Real S = std::sin(c*knorm*dt); + + // Update equations + + if (knorm == 0._rt) + { + fields(i,j,k,Idx.Ex) = Ex_old - mu0*c2*dt*Jx_c0 - 0.5_rt*mu0*c2*dt2*Jx_c1; + fields(i,j,k,Idx.Ey) = Ey_old - mu0*c2*dt*Jy_c0 - 0.5_rt*mu0*c2*dt2*Jy_c1; + fields(i,j,k,Idx.Ez) = Ez_old - mu0*c2*dt*Jz_c0 - 0.5_rt*mu0*c2*dt2*Jz_c1; + + if (div_cleaning) + { + fields(i,j,k,Idx.F) = F_old - mu0*c2*dt*rho_c0 - 0.5_rt*mu0*c2*dt2*rho_c1; + } + } + else // knorm != 0 + { + Complex C01, C02, C03, C04, C05, C06, C07, C08, + C09, C10, C11, C12, C13, C14, C15, C16; + + // Ex + C01 = (div_cleaning) ? C : (kx2+ky2*C+kz2*C)*inv_knorm2; + C02 = (div_cleaning) ? 0._rt : kx*ky*(1._rt-C)*inv_knorm2; + C03 = (div_cleaning) ? 0._rt : kx*kz*(1._rt-C)*inv_knorm2; + C04 = 0._rt; + C05 = -I*c*kz*S*inv_knorm; + C06 = I*c*ky*S*inv_knorm; + C07 = (div_cleaning) ? I*c*kx*S*inv_knorm : 0._rt; + C09 = (div_cleaning) ? -mu0*c*S*inv_knorm : -mu0*c*(dt*c*kx2*knorm2+ky2*knorm*S+kz2*knorm*S)*inv_knorm4; + C10 = (div_cleaning) ? 0._rt : mu0*c*kx*ky*(knorm*S-dt*c*knorm2)*inv_knorm4; + C11 = (div_cleaning) ? 0._rt : mu0*c*kx*kz*(knorm*S-dt*c*knorm2)*inv_knorm4; + C12 = 0._rt; // This is not redundant, do not remove this + if (J_linear) C12 = (div_cleaning) ? mu0*(C-1._rt)*inv_knorm2 : mu0*(2._rt*ky2*(C-1._rt)+2._rt*kz2*(C-1._rt)-dt2*c2*kx2*knorm2)*inv_knorm4*0.5_rt; + C13 = (J_linear && !div_cleaning) ? mu0*kx*ky*(2._rt*(1._rt-C)-dt2*c2*knorm2)*inv_knorm4*0.5_rt : 0._rt; + C14 = (J_linear && !div_cleaning) ? mu0*kx*kz*(2._rt*(1._rt-C)-dt2*c2*knorm2)*inv_knorm4*0.5_rt : 0._rt; + C15 = (div_cleaning) ? I*mu0*c2*kx*(C-1._rt)*inv_knorm2 : 0._rt; + C16 = (div_cleaning && rho_linear) ? I*mu0*c*kx*(knorm*S-dt*c*knorm2)*inv_knorm4 : 0._rt; + + fields(i,j,k,Idx.Ex) = C01*Ex_old + C02*Ey_old + C03*Ez_old + + C04*Bx_old + C05*By_old + C06*Bz_old + + C07*F_old // only with div cleaning + + C09*Jx_c0 + C10*Jy_c0 + C11*Jz_c0 + + C12*Jx_c1 + C13*Jy_c1 + C14*Jz_c1 // only with J linear in time + + C15*rho_c0 // only with div cleaning + + C16*rho_c1; // only with div cleaning and rho linear in time + + // Ey + C01 = (div_cleaning) ? 0._rt : kx*ky*(1._rt-C)*inv_knorm2; + C02 = (div_cleaning) ? C : (kx2*C+ky2+kz2*C)*inv_knorm2; + C03 = (div_cleaning) ? 0._rt : ky*kz*(1._rt-C)*inv_knorm2; + C04 = I*c*kz*S*inv_knorm; + C05 = 0._rt; + C06 = -I*c*kx*S*inv_knorm; + C07 = (div_cleaning) ? I*c*ky*S*inv_knorm : 0._rt; + C09 = (div_cleaning) ? 0._rt : mu0*c*kx*ky*(knorm*S-dt*c*knorm2)*inv_knorm4; + C10 = (div_cleaning) ? -mu0*c*S*inv_knorm : -mu0*c*(dt*c*ky2*knorm2+kx2*knorm*S+kz2*knorm*S)*inv_knorm4; + C11 = (div_cleaning) ? 0._rt : mu0*c*ky*kz*(knorm*S-dt*c*knorm2)*inv_knorm4; + C12 = (J_linear && !div_cleaning) ? mu0*kx*ky*(2._rt*(1._rt-C)-dt2*c2*knorm2)*inv_knorm4*0.5_rt : 0._rt; + C13 = 0._rt; // This is not redundant, do not remove this + if (J_linear) C13 = (div_cleaning) ? mu0*(C-1._rt)*inv_knorm2 : mu0*(2._rt*kx2*(C-1._rt)+2._rt*kz2*(C-1._rt)-dt2*c2*ky2*knorm2)*inv_knorm4*0.5_rt; + C14 = (J_linear && !div_cleaning) ? mu0*ky*kz*(2._rt*(1._rt-C)-dt2*c2*knorm2)*inv_knorm4*0.5_rt : 0._rt; + C15 = (div_cleaning) ? I*mu0*c2*ky*(C-1._rt)*inv_knorm2 : 0._rt; + C16 = (div_cleaning && rho_linear) ? I*mu0*c*ky*(knorm*S-dt*c*knorm2)*inv_knorm4 : 0._rt; + + fields(i,j,k,Idx.Ey) = C01*Ex_old + C02*Ey_old + C03*Ez_old + + C04*Bx_old + C05*By_old + C06*Bz_old + + C07*F_old // only with div cleaning + + C09*Jx_c0 + C10*Jy_c0 + C11*Jz_c0 + + C12*Jx_c1 + C13*Jy_c1 + C14*Jz_c1 // only with J linear in time + + C15*rho_c0 // only with div cleaning + + C16*rho_c1; // only with div cleaning and rho linear in time + + // Ez + C01 = (div_cleaning) ? 0._rt : kx*kz*(1._rt-C)*inv_knorm2; + C02 = (div_cleaning) ? 0._rt : ky*kz*(1._rt-C)*inv_knorm2; + C03 = (div_cleaning) ? C : (kx2*C+ky2*C+kz2)*inv_knorm2; + C04 = -I*c*ky*S*inv_knorm; + C05 = I*c*kx*S*inv_knorm; + C06 = 0._rt; + C07 = (div_cleaning) ? I*c*kz*S*inv_knorm : 0._rt; + C09 = (div_cleaning) ? 0._rt : mu0*c*kx*kz*(knorm*S-dt*c*knorm2)*inv_knorm4; + C10 = (div_cleaning) ? 0._rt : mu0*c*ky*kz*(knorm*S-dt*c*knorm2)*inv_knorm4; + C11 = (div_cleaning) ? -mu0*c*S*inv_knorm : -mu0*c*(dt*c*kz2*knorm2+kx2*knorm*S+ky2*knorm*S)*inv_knorm4; + C12 = (J_linear && !div_cleaning) ? mu0*kx*kz*(2._rt*(1._rt-C)-dt2*c2*knorm2)*inv_knorm4*0.5_rt : 0._rt; + C13 = (J_linear && !div_cleaning) ? mu0*ky*kz*(2._rt*(1._rt-C)-dt2*c2*knorm2)*inv_knorm4*0.5_rt : 0._rt; + C14 = 0._rt; // This is not redundant, do not remove this + if (J_linear) C14 = (div_cleaning) ? mu0*(C-1._rt)*inv_knorm2 : mu0*(2._rt*kx2*(C-1._rt)+2._rt*ky2*(C-1._rt)-dt2*c2*kz2*knorm2)*inv_knorm4*0.5_rt; + C15 = (div_cleaning) ? I*mu0*c2*kz*(C-1._rt)*inv_knorm2 : 0._rt; + C16 = (div_cleaning && rho_linear) ? I*mu0*c*kz*(knorm*S-dt*c*knorm2)*inv_knorm4 : 0._rt; + + fields(i,j,k,Idx.Ez) = C01*Ex_old + C02*Ey_old + C03*Ez_old + + C04*Bx_old + C05*By_old + C06*Bz_old + + C07*F_old // only with div cleaning + + C09*Jx_c0 + C10*Jy_c0 + C11*Jz_c0 + + C12*Jx_c1 + C13*Jy_c1 + C14*Jz_c1 // only with J linear in time + + C15*rho_c0 // only with div cleaning + + C16*rho_c1; // only with div cleaning and rho linear in time + + // Bx + C01 = 0._rt; + C02 = I*kz*S*inv_knorm*inv_c; + C03 = -I*ky*S*inv_knorm*inv_c; + C04 = (div_cleaning) ? C : (kx2+ky2*C+kz2*C)*inv_knorm2; + C05 = (div_cleaning) ? 0._rt : kx*ky*(1._rt-C)*inv_knorm2; + C06 = (div_cleaning) ? 0._rt : kx*kz*(1._rt-C)*inv_knorm2; + C08 = (div_cleaning) ? I*kx*S*inv_knorm*inv_c : 0._rt; + C09 = 0._rt; + C10 = I*mu0*kz*(C-1._rt)*inv_knorm2; + C11 = -I*mu0*ky*(C-1._rt)*inv_knorm2; + C12 = 0._rt; + C13 = (J_linear) ? I*mu0*kz*(knorm*S-dt*c*knorm2)*inv_knorm4*inv_c : 0._rt; + C14 = (J_linear) ? -I*mu0*ky*(knorm*S-dt*c*knorm2)*inv_knorm4*inv_c : 0._rt; + + fields(i,j,k,Idx.Bx) = C01*Ex_old + C02*Ey_old + C03*Ez_old + + C04*Bx_old + C05*By_old + C06*Bz_old + + C08*G_old // only with div cleaning + + C09*Jx_c0 + C10*Jy_c0 + C11*Jz_c0 + + C12*Jx_c1 + C13*Jy_c1 + C14*Jz_c1; // only with J linear in time + + // By + C01 = -I*kz*S*inv_knorm*inv_c; + C02 = 0._rt; + C03 = I*kx*S*inv_knorm*inv_c; + C04 = (div_cleaning) ? 0._rt : kx*ky*(1._rt-C)*inv_knorm2; + C05 = (div_cleaning) ? C : (kx2*C+ky2+kz2*C)*inv_knorm2; + C06 = (div_cleaning) ? 0._rt : ky*kz*(1._rt-C)*inv_knorm2; + C08 = (div_cleaning) ? I*ky*S*inv_knorm*inv_c : 0._rt; + C09 = -I*mu0*kz*(C-1._rt)*inv_knorm2; + C10 = 0._rt; + C11 = I*mu0*kx*(C-1._rt)*inv_knorm2; + C12 = (J_linear) ? -I*mu0*kz*(knorm*S-dt*c*knorm2)*inv_knorm4*inv_c : 0._rt; + C13 = 0._rt; + C14 = (J_linear) ? I*mu0*kx*(knorm*S-dt*c*knorm2)*inv_knorm4*inv_c : 0._rt; + + fields(i,j,k,Idx.By) = C01*Ex_old + C02*Ey_old + C03*Ez_old + + C04*Bx_old + C05*By_old + C06*Bz_old + + C08*G_old // only with div cleaning + + C09*Jx_c0 + C10*Jy_c0 + C11*Jz_c0 + + C12*Jx_c1 + C13*Jy_c1 + C14*Jz_c1; // only with J linear in time + + // Bz + C01 = I*ky*S*inv_knorm*inv_c; + C02 = -I*kx*S*inv_knorm*inv_c; + C03 = 0._rt; + C04 = (div_cleaning) ? 0._rt : kx*kz*(1._rt-C)*inv_knorm2; + C05 = (div_cleaning) ? 0._rt : ky*kz*(1._rt-C)*inv_knorm2; + C06 = (div_cleaning) ? C : (kx2*C+ky2*C+kz2)*inv_knorm2; + C08 = (div_cleaning) ? I*kz*S*inv_knorm*inv_c : 0._rt; + C09 = I*mu0*ky*(C-1._rt)*inv_knorm2; + C10 = -I*mu0*kx*(C-1._rt)*inv_knorm2; + C11 = 0._rt; + C12 = (J_linear) ? I*mu0*ky*(knorm*S-dt*c*knorm2)*inv_knorm4*inv_c : 0._rt; + C13 = (J_linear) ? -I*mu0*kx*(knorm*S-dt*c*knorm2)*inv_knorm4*inv_c : 0._rt; + C14 = 0._rt; + + fields(i,j,k,Idx.Bz) = C01*Ex_old + C02*Ey_old + C03*Ez_old + + C04*Bx_old + C05*By_old + C06*Bz_old + + C08*G_old // only with div cleaning + + C09*Jx_c0 + C10*Jy_c0 + C11*Jz_c0 + + C12*Jx_c1 + C13*Jy_c1 + C14*Jz_c1; // only with J linear in time + + if (div_cleaning) + { + // F + C01 = I*kx*S*inv_knorm*inv_c; + C02 = I*ky*S*inv_knorm*inv_c; + C03 = I*kz*S*inv_knorm*inv_c; + C07 = C; + C09 = I*mu0*kx*(C-1._rt)*inv_knorm2; + C10 = I*mu0*ky*(C-1._rt)*inv_knorm2; + C11 = I*mu0*kz*(C-1._rt)*inv_knorm2; + C12 = (J_linear) ? I*mu0*kx*(knorm*S-dt*c*knorm2)*inv_knorm4*inv_c : 0._rt; + C13 = (J_linear) ? I*mu0*ky*(knorm*S-dt*c*knorm2)*inv_knorm4*inv_c : 0._rt; + C14 = (J_linear) ? I*mu0*kz*(knorm*S-dt*c*knorm2)*inv_knorm4*inv_c : 0._rt; + C15 = -mu0*c*S*inv_knorm; + C16 = (rho_linear) ? mu0*(C-1._rt)*inv_knorm2 : 0._rt; + + fields(i,j,k,Idx.F) = C01*Ex_old + C02*Ey_old + C03*Ez_old + + C07*F_old + + C09*Jx_c0 + C10*Jy_c0 + C11*Jz_c0 + + C12*Jx_c1 + C13*Jy_c1 + C14*Jz_c1 // only with J linear in time + + C15*rho_c0 + + C16*rho_c1; // only with rho linear in time + + // G + C04 = I*c*kx*S*inv_knorm; + C05 = I*c*ky*S*inv_knorm; + C06 = I*c*kz*S*inv_knorm; + C08 = C; + + fields(i,j,k,Idx.G) = C04*Bx_old + C05*By_old + C06*Bz_old + + C08*G_old; + } + } + }); + } +} + +void PsatdAlgorithmFirstOrder::CurrentCorrection (SpectralFieldData& field_data) +{ + // Profiling + BL_PROFILE("PsatdAlgorithmFirstOrder::CurrentCorrection"); + + amrex::ignore_unused(field_data); + amrex::Abort(Utils::TextMsg::Err( + "Current correction not implemented for first-order PSATD equations")); +} + +void +PsatdAlgorithmFirstOrder::VayDeposition (SpectralFieldData& field_data) +{ + // Profiling + BL_PROFILE("PsatdAlgorithmFirstOrder::VayDeposition()"); + + amrex::ignore_unused(field_data); + amrex::Abort(Utils::TextMsg::Err( + "Vay deposition not implemented for first-order PSATD equations")); +} + +#endif // WARPX_USE_PSATD diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.cpp index be850e25296..af3e2e33614 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmGalileanRZ.cpp @@ -103,9 +103,9 @@ PsatdAlgorithmGalileanRZ::pushSpectralFields (SpectralFieldDataRZ & f) auto const Bp_m = Idx.Bx + Idx.n_fields*mode; auto const Bm_m = Idx.By + Idx.n_fields*mode; auto const Bz_m = Idx.Bz + Idx.n_fields*mode; - auto const Jp_m = Idx.Jx + Idx.n_fields*mode; - auto const Jm_m = Idx.Jy + Idx.n_fields*mode; - auto const Jz_m = Idx.Jz + Idx.n_fields*mode; + auto const Jp_m = Idx.Jx_mid + Idx.n_fields*mode; + auto const Jm_m = Idx.Jy_mid + Idx.n_fields*mode; + auto const Jz_m = Idx.Jz_mid + Idx.n_fields*mode; auto const rho_old_m = Idx.rho_old + Idx.n_fields*mode; auto const rho_new_m = Idx.rho_new + Idx.n_fields*mode; @@ -323,9 +323,9 @@ PsatdAlgorithmGalileanRZ::CurrentCorrection (SpectralFieldDataRZ& field_data) [=] AMREX_GPU_DEVICE(int i, int j, int k, int mode) noexcept { // All of the fields of each mode are grouped together - auto const Jp_m = Idx.Jx + Idx.n_fields*mode; - auto const Jm_m = Idx.Jy + Idx.n_fields*mode; - auto const Jz_m = Idx.Jz + Idx.n_fields*mode; + auto const Jp_m = Idx.Jx_mid + Idx.n_fields*mode; + auto const Jm_m = Idx.Jy_mid + Idx.n_fields*mode; + auto const Jz_m = Idx.Jz_mid + Idx.n_fields*mode; auto const rho_old_m = Idx.rho_old + Idx.n_fields*mode; auto const rho_new_m = Idx.rho_new + Idx.n_fields*mode; diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp index 9d1fbf3e158..c52437bf877 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJConstantInTime.cpp @@ -190,9 +190,9 @@ PsatdAlgorithmJConstantInTime::pushSpectralFields (SpectralFieldData& f) const const Complex Bz_old = fields(i,j,k,Idx.Bz); // Shortcuts for the values of J - const Complex Jx = fields(i,j,k,Idx.Jx); - const Complex Jy = fields(i,j,k,Idx.Jy); - const Complex Jz = fields(i,j,k,Idx.Jz); + const Complex Jx = fields(i,j,k,Idx.Jx_mid); + const Complex Jy = fields(i,j,k,Idx.Jy_mid); + const Complex Jz = fields(i,j,k,Idx.Jz_mid); Complex F_old; if (dive_cleaning) @@ -751,9 +751,9 @@ void PsatdAlgorithmJConstantInTime::CurrentCorrection (SpectralFieldData& field_ ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { // Shortcuts for the values of J and rho - const Complex Jx = fields(i,j,k,Idx.Jx); - const Complex Jy = fields(i,j,k,Idx.Jy); - const Complex Jz = fields(i,j,k,Idx.Jz); + const Complex Jx = fields(i,j,k,Idx.Jx_mid); + const Complex Jy = fields(i,j,k,Idx.Jy_mid); + const Complex Jz = fields(i,j,k,Idx.Jz_mid); const Complex rho_old = fields(i,j,k,Idx.rho_old); const Complex rho_new = fields(i,j,k,Idx.rho_new); @@ -787,25 +787,25 @@ void PsatdAlgorithmJConstantInTime::CurrentCorrection (SpectralFieldData& field_ const Complex rho_old_mod = rho_old * amrex::exp(I * k_dot_vg * dt); const Complex den = 1._rt - amrex::exp(I * k_dot_vg * dt); - fields(i,j,k,Idx.Jx) = Jx - (k_dot_J - k_dot_vg * (rho_new - rho_old_mod) / den) + fields(i,j,k,Idx.Jx_mid) = Jx - (k_dot_J - k_dot_vg * (rho_new - rho_old_mod) / den) * kx / (k_norm * k_norm); - fields(i,j,k,Idx.Jy) = Jy - (k_dot_J - k_dot_vg * (rho_new - rho_old_mod) / den) + fields(i,j,k,Idx.Jy_mid) = Jy - (k_dot_J - k_dot_vg * (rho_new - rho_old_mod) / den) * ky / (k_norm * k_norm); - fields(i,j,k,Idx.Jz) = Jz - (k_dot_J - k_dot_vg * (rho_new - rho_old_mod) / den) + fields(i,j,k,Idx.Jz_mid) = Jz - (k_dot_J - k_dot_vg * (rho_new - rho_old_mod) / den) * kz / (k_norm * k_norm); } else { - fields(i,j,k,Idx.Jx) = Jx - (k_dot_J - I * (rho_new - rho_old) / dt) + fields(i,j,k,Idx.Jx_mid) = Jx - (k_dot_J - I * (rho_new - rho_old) / dt) * kx / (k_norm * k_norm); - fields(i,j,k,Idx.Jy) = Jy - (k_dot_J - I * (rho_new - rho_old) / dt) + fields(i,j,k,Idx.Jy_mid) = Jy - (k_dot_J - I * (rho_new - rho_old) / dt) * ky / (k_norm * k_norm); - fields(i,j,k,Idx.Jz) = Jz - (k_dot_J - I * (rho_new - rho_old) / dt) + fields(i,j,k,Idx.Jz_mid) = Jz - (k_dot_J - I * (rho_new - rho_old) / dt) * kz / (k_norm * k_norm); } } @@ -840,11 +840,11 @@ PsatdAlgorithmJConstantInTime::VayDeposition (SpectralFieldData& field_data) ParallelFor(bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { // Shortcuts for the values of D - const Complex Dx = fields(i,j,k,Idx.Jx); + const Complex Dx = fields(i,j,k,Idx.Jx_mid); #if defined(WARPX_DIM_3D) - const Complex Dy = fields(i,j,k,Idx.Jy); + const Complex Dy = fields(i,j,k,Idx.Jy_mid); #endif - const Complex Dz = fields(i,j,k,Idx.Jz); + const Complex Dz = fields(i,j,k,Idx.Jz_mid); // Imaginary unit constexpr Complex I = Complex{0._rt, 1._rt}; @@ -859,18 +859,18 @@ PsatdAlgorithmJConstantInTime::VayDeposition (SpectralFieldData& field_data) #endif // Compute Jx - if (kx_mod != 0._rt) fields(i,j,k,Idx.Jx) = I * Dx / kx_mod; - else fields(i,j,k,Idx.Jx) = 0._rt; + if (kx_mod != 0._rt) fields(i,j,k,Idx.Jx_mid) = I * Dx / kx_mod; + else fields(i,j,k,Idx.Jx_mid) = 0._rt; #if defined(WARPX_DIM_3D) // Compute Jy - if (ky_mod != 0._rt) fields(i,j,k,Idx.Jy) = I * Dy / ky_mod; - else fields(i,j,k,Idx.Jy) = 0._rt; + if (ky_mod != 0._rt) fields(i,j,k,Idx.Jy_mid) = I * Dy / ky_mod; + else fields(i,j,k,Idx.Jy_mid) = 0._rt; #endif // Compute Jz - if (kz_mod != 0._rt) fields(i,j,k,Idx.Jz) = I * Dz / kz_mod; - else fields(i,j,k,Idx.Jz) = 0._rt; + if (kz_mod != 0._rt) fields(i,j,k,Idx.Jz_mid) = I * Dz / kz_mod; + else fields(i,j,k,Idx.Jz_mid) = 0._rt; }); } } diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.cpp index bd9df977e0b..861001c7825 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmJLinearInTime.cpp @@ -120,9 +120,9 @@ PsatdAlgorithmJLinearInTime::pushSpectralFields (SpectralFieldData& f) const const Complex Bz_old = fields(i,j,k,Idx.Bz); // Shortcuts for the values of J and rho - const Complex Jx_old = fields(i,j,k,Idx.Jx); - const Complex Jy_old = fields(i,j,k,Idx.Jy); - const Complex Jz_old = fields(i,j,k,Idx.Jz); + const Complex Jx_old = fields(i,j,k,Idx.Jx_old); + const Complex Jy_old = fields(i,j,k,Idx.Jy_old); + const Complex Jz_old = fields(i,j,k,Idx.Jz_old); const Complex Jx_new = fields(i,j,k,Idx.Jx_new); const Complex Jy_new = fields(i,j,k,Idx.Jy_new); const Complex Jz_new = fields(i,j,k,Idx.Jz_new); diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp index 55b58821ce9..effb1cc2b31 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/PsatdAlgorithmRZ.cpp @@ -83,7 +83,7 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) const bool update_with_rho = m_update_with_rho; const bool time_averaging = m_time_averaging; - const bool J_in_time_linear = (m_J_in_time == JInTime::Linear) ? true : false; + const bool J_linear = (m_J_in_time == JInTime::Linear) ? true : false; const bool dive_cleaning = m_dive_cleaning; const bool divb_cleaning = m_divb_cleaning; @@ -112,7 +112,7 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) amrex::Array4 X5_arr; amrex::Array4 X6_arr; - if (time_averaging && J_in_time_linear) + if (time_averaging && J_linear) { X5_arr = X5_coef[mfi].array(); X6_arr = X6_coef[mfi].array(); @@ -131,6 +131,9 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) amrex::ParallelFor(bx, modes, [=] AMREX_GPU_DEVICE(int i, int j, int k, int mode) noexcept { + int idx_jx = (J_linear) ? static_cast(Idx.Jx_old) : static_cast(Idx.Jx_mid); + int idx_jy = (J_linear) ? static_cast(Idx.Jy_old) : static_cast(Idx.Jy_mid); + int idx_jz = (J_linear) ? static_cast(Idx.Jz_old) : static_cast(Idx.Jz_mid); // All of the fields of each mode are grouped together int const Ep_m = Idx.Ex + Idx.n_fields*mode; @@ -139,9 +142,9 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) int const Bp_m = Idx.Bx + Idx.n_fields*mode; int const Bm_m = Idx.By + Idx.n_fields*mode; int const Bz_m = Idx.Bz + Idx.n_fields*mode; - int const Jp_m = Idx.Jx + Idx.n_fields*mode; - int const Jm_m = Idx.Jy + Idx.n_fields*mode; - int const Jz_m = Idx.Jz + Idx.n_fields*mode; + int const Jp_m = idx_jx + Idx.n_fields*mode; + int const Jm_m = idx_jy + Idx.n_fields*mode; + int const Jz_m = idx_jz + Idx.n_fields*mode; int const rho_old_m = Idx.rho_old + Idx.n_fields*mode; int const rho_new_m = Idx.rho_new + Idx.n_fields*mode; @@ -238,7 +241,7 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) G_old = fields(i,j,k,G_m); } - if (J_in_time_linear) + if (J_linear) { const int Jp_m_new = Idx.Jx_new + Idx.n_fields*mode; const int Jm_m_new = Idx.Jy_new + Idx.n_fields*mode; @@ -335,7 +338,7 @@ PsatdAlgorithmRZ::pushSpectralFields(SpectralFieldDataRZ & f) void PsatdAlgorithmRZ::InitializeSpectralCoefficients (SpectralFieldDataRZ const & f) { const bool time_averaging = m_time_averaging; - const bool J_in_time_linear = (m_J_in_time == JInTime::Linear) ? true : false; + const bool J_linear = (m_J_in_time == JInTime::Linear) ? true : false; // Fill them with the right values: // Loop over boxes and allocate the corresponding coefficients @@ -356,7 +359,7 @@ void PsatdAlgorithmRZ::InitializeSpectralCoefficients (SpectralFieldDataRZ const amrex::Array4 X5; amrex::Array4 X6; - if (time_averaging && J_in_time_linear) + if (time_averaging && J_linear) { X5 = X5_coef[mfi].array(); X6 = X6_coef[mfi].array(); @@ -395,7 +398,7 @@ void PsatdAlgorithmRZ::InitializeSpectralCoefficients (SpectralFieldDataRZ const X3(i,j,k,mode) = - c*c * dt*dt / (3._rt*ep0); } - if (time_averaging && J_in_time_linear) + if (time_averaging && J_linear) { constexpr amrex::Real c2 = PhysConst::c; const amrex::Real dt3 = dt * dt * dt; @@ -450,9 +453,9 @@ PsatdAlgorithmRZ::CurrentCorrection (SpectralFieldDataRZ& field_data) [=] AMREX_GPU_DEVICE(int i, int j, int k, int mode) noexcept { // All of the fields of each mode are grouped together - auto const Jp_m = Idx.Jx + Idx.n_fields*mode; - auto const Jm_m = Idx.Jy + Idx.n_fields*mode; - auto const Jz_m = Idx.Jz + Idx.n_fields*mode; + auto const Jp_m = Idx.Jx_mid + Idx.n_fields*mode; + auto const Jm_m = Idx.Jy_mid + Idx.n_fields*mode; + auto const Jz_m = Idx.Jz_mid + Idx.n_fields*mode; auto const rho_old_m = Idx.rho_old + Idx.n_fields*mode; auto const rho_new_m = Idx.rho_new + Idx.n_fields*mode; diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H index 4ab88f1a378..c7848d73120 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H @@ -83,18 +83,19 @@ class SpectralFieldIndex // Always int Ex = -1, Ey = -1, Ez = -1; int Bx = -1, By = -1, Bz = -1; - int Jx = -1, Jy = -1, Jz = -1; - int rho_old = -1, rho_new = -1, divE = -1; + int divE = -1; // Time averaging int Ex_avg = -1, Ey_avg = -1, Ez_avg = -1; int Bx_avg = -1, By_avg = -1, Bz_avg = -1; - // J linear in time + // J + int Jx_old = -1, Jy_old = -1, Jz_old = -1; + int Jx_mid = -1, Jy_mid = -1, Jz_mid = -1; int Jx_new = -1, Jy_new = -1, Jz_new = -1; - // rho quadratic in time - int rho_mid = -1; + // rho + int rho_old = -1, rho_mid = -1, rho_new = -1; // div(E) and div(B) cleaning int F = -1, G = -1; diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp b/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp index 56189e0ff06..e1db4e5abab 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldData.cpp @@ -51,10 +51,8 @@ SpectralFieldIndex::SpectralFieldIndex (const bool update_with_rho, { Ex = c++; Ey = c++; Ez = c++; Bx = c++; By = c++; Bz = c++; - Jx = c++; Jy = c++; Jz = c++; // TODO Allocate rho_old and rho_new only when needed - rho_old = c++; rho_new = c++; // Reuse data corresponding to index Bx = 3 to avoid storing extra memory divE = 3; @@ -69,17 +67,25 @@ SpectralFieldIndex::SpectralFieldIndex (const bool update_with_rho, if (divb_cleaning) G = c++; - if (J_in_time == JInTime::Linear) + if (J_in_time == JInTime::Constant) { - Jx_new = c++; - Jy_new = c++; - Jz_new = c++; + Jx_mid = c++; Jy_mid = c++; Jz_mid = c++; + } + else if (J_in_time == JInTime::Linear) + { + Jx_old = c++; Jy_old = c++; Jz_old = c++; + Jx_new = c++; Jy_new = c++; Jz_new = c++; } - if (rho_in_time == RhoInTime::Quadratic) + if (rho_in_time == RhoInTime::Constant) { rho_mid = c++; } + else if (rho_in_time == RhoInTime::Linear) + { + rho_old = c++; + rho_new = c++; + } if (pml_rz) { diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.H b/Source/FieldSolver/SpectralSolver/SpectralSolver.H index 38b2420105a..da4b9687b86 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.H @@ -58,6 +58,8 @@ class SpectralSolver * (no domain decomposition) * \param[in] update_with_rho whether rho is used in the field update equations * \param[in] fft_do_time_averaging whether the time averaging algorithm is used + * \param[in] psatd_solution_type whether the PSATD equations are derived + * from a first-order or second-order model * \param[in] J_in_time integer that corresponds to the time dependency of J * (constant, linear) for the PSATD algorithm * \param[in] rho_in_time integer that corresponds to the time dependency of rho @@ -80,6 +82,7 @@ class SpectralSolver const bool periodic_single_box, const bool update_with_rho, const bool fft_do_time_averaging, + const int psatd_solution_type, const int J_in_time, const int rho_in_time, const bool dive_cleaning, diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp index c0d7b89412b..f29a0bd0325 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp @@ -8,10 +8,12 @@ #include "FieldSolver/SpectralSolver/SpectralFieldData.H" #include "SpectralAlgorithms/PsatdAlgorithmComoving.H" #include "SpectralAlgorithms/PsatdAlgorithmPml.H" +#include "SpectralAlgorithms/PsatdAlgorithmFirstOrder.H" #include "SpectralAlgorithms/PsatdAlgorithmJConstantInTime.H" #include "SpectralAlgorithms/PsatdAlgorithmJLinearInTime.H" #include "SpectralKSpace.H" #include "SpectralSolver.H" +#include "Utils/TextMsg.H" #include "Utils/WarpXProfilerWrapper.H" #include @@ -30,6 +32,7 @@ SpectralSolver::SpectralSolver( const bool pml, const bool periodic_single_box, const bool update_with_rho, const bool fft_do_time_averaging, + const int psatd_solution_type, const int J_in_time, const int rho_in_time, const bool dive_cleaning, @@ -49,13 +52,13 @@ SpectralSolver::SpectralSolver( // - Select the algorithm depending on the input parameters // Initialize the corresponding coefficients over k space - if (pml) // PSATD equations in the PML grids + if (pml) // PSATD equations in the PML region { algorithm = std::make_unique( k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal, dt, dive_cleaning, divb_cleaning); } - else // PSATD equations in the regulard grids + else // PSATD equations in the regular domain { // Comoving PSATD algorithm if (v_comoving[0] != 0. || v_comoving[1] != 0. || v_comoving[2] != 0.) @@ -64,7 +67,31 @@ SpectralSolver::SpectralSolver( k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal, v_comoving, dt, update_with_rho); } - else // PSATD algorithms: standard, Galilean, averaged Galilean, multi-J + // Galilean PSATD algorithm (only J constant in time) + else if (v_galilean[0] != 0. || v_galilean[1] != 0. || v_galilean[2] != 0.) + { + algorithm = std::make_unique( + k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal, + v_galilean, dt, update_with_rho, fft_do_time_averaging, + dive_cleaning, divb_cleaning); + } + else if (psatd_solution_type == PSATDSolutionType::FirstOrder) + { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !fft_do_time_averaging, + "psatd.do_time_averaging=1 not supported when psatd.solution_type=first-order"); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + (!dive_cleaning && !divb_cleaning) || (dive_cleaning && divb_cleaning), + "warpx.do_dive_cleaning and warpx.do_divb_cleaning must be equal when psatd.solution_type=first-order"); + + const bool div_cleaning = (dive_cleaning && divb_cleaning); + + algorithm = std::make_unique( + k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal, + dt, div_cleaning, J_in_time, rho_in_time); + } + else if (psatd_solution_type == PSATDSolutionType::SecondOrder) { if (J_in_time == JInTime::Constant) { @@ -73,7 +100,7 @@ SpectralSolver::SpectralSolver( v_galilean, dt, update_with_rho, fft_do_time_averaging, dive_cleaning, divb_cleaning); } - else // J linear in time + else if (J_in_time == JInTime::Linear) { algorithm = std::make_unique( k_space, dm, m_spectral_index, norder_x, norder_y, norder_z, nodal, diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index 3d68e8e52ed..9df4bb21b29 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -280,9 +280,9 @@ void WarpX::PSATDForwardTransformJ ( { Idx = spectral_solver_fp[lev]->m_spectral_index; - idx_jx = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jx_new) : static_cast(Idx.Jx); - idx_jy = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy); - idx_jz = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz); + idx_jx = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jx_new) : static_cast(Idx.Jx_mid); + idx_jy = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy_mid); + idx_jz = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz_mid); ForwardTransformVect(lev, *spectral_solver_fp[lev], J_fp[lev], idx_jx, idx_jy, idx_jz); @@ -290,9 +290,9 @@ void WarpX::PSATDForwardTransformJ ( { Idx = spectral_solver_cp[lev]->m_spectral_index; - idx_jx = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jx_new) : static_cast(Idx.Jx); - idx_jy = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy); - idx_jz = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz); + idx_jx = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jx_new) : static_cast(Idx.Jx_mid); + idx_jy = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy_mid); + idx_jz = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz_mid); ForwardTransformVect(lev, *spectral_solver_cp[lev], J_cp[lev], idx_jx, idx_jy, idx_jz); } @@ -304,11 +304,23 @@ void WarpX::PSATDForwardTransformJ ( { for (int lev = 0; lev <= finest_level; ++lev) { - spectral_solver_fp[lev]->ApplyFilter(lev, Idx.Jx, Idx.Jy, Idx.Jz); + Idx = spectral_solver_fp[lev]->m_spectral_index; + + idx_jx = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jx_new) : static_cast(Idx.Jx_mid); + idx_jy = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy_mid); + idx_jz = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz_mid); + + spectral_solver_fp[lev]->ApplyFilter(lev, idx_jx, idx_jy, idx_jz); if (spectral_solver_cp[lev]) { - spectral_solver_cp[lev]->ApplyFilter(lev, Idx.Jx, Idx.Jy, Idx.Jz); + Idx = spectral_solver_cp[lev]->m_spectral_index; + + idx_jx = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jx_new) : static_cast(Idx.Jx_mid); + idx_jy = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy_mid); + idx_jz = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz_mid); + + spectral_solver_cp[lev]->ApplyFilter(lev, idx_jx, idx_jy, idx_jz); } } } @@ -328,9 +340,11 @@ void WarpX::PSATDBackwardTransformJ ( { Idx = spectral_solver_fp[lev]->m_spectral_index; - idx_jx = static_cast(Idx.Jx); - idx_jy = static_cast(Idx.Jy); - idx_jz = static_cast(Idx.Jz); + // Note that these backward FFTs are currently called only + // with algorithms that do not support J linear in time + idx_jx = static_cast(Idx.Jx_mid); + idx_jy = static_cast(Idx.Jy_mid); + idx_jz = static_cast(Idx.Jz_mid); BackwardTransformVect(lev, *spectral_solver_fp[lev], J_fp[lev], idx_jx, idx_jy, idx_jz, m_fill_guards_current); @@ -339,9 +353,11 @@ void WarpX::PSATDBackwardTransformJ ( { Idx = spectral_solver_cp[lev]->m_spectral_index; - idx_jx = static_cast(Idx.Jx); - idx_jy = static_cast(Idx.Jy); - idx_jz = static_cast(Idx.Jz); + // Note that these backward FFTs are currently called only + // with algorithms that do not support J linear in time + idx_jx = static_cast(Idx.Jx_mid); + idx_jy = static_cast(Idx.Jy_mid); + idx_jz = static_cast(Idx.Jz_mid); BackwardTransformVect(lev, *spectral_solver_cp[lev], J_cp[lev], idx_jx, idx_jy, idx_jz, m_fill_guards_current); @@ -359,7 +375,15 @@ void WarpX::PSATDForwardTransformRho ( const SpectralFieldIndex& Idx = spectral_solver_fp[0]->m_spectral_index; // Select index in k space - const int dst_comp = (dcomp == 0) ? Idx.rho_old : Idx.rho_new; + int dst_comp; + if (rho_in_time == RhoInTime::Constant) + { + dst_comp = Idx.rho_mid; + } + else // rho_in_time == RhoInTime::Linear + { + dst_comp = (dcomp == 0) ? Idx.rho_old : Idx.rho_new; + } for (int lev = 0; lev <= finest_level; ++lev) { @@ -568,15 +592,15 @@ WarpX::PSATDMoveJNewToJOld () for (int lev = 0; lev <= finest_level; ++lev) { - spectral_solver_fp[lev]->CopySpectralDataComp(Idx.Jx_new, Idx.Jx); - spectral_solver_fp[lev]->CopySpectralDataComp(Idx.Jy_new, Idx.Jy); - spectral_solver_fp[lev]->CopySpectralDataComp(Idx.Jz_new, Idx.Jz); + spectral_solver_fp[lev]->CopySpectralDataComp(Idx.Jx_new, Idx.Jx_old); + spectral_solver_fp[lev]->CopySpectralDataComp(Idx.Jy_new, Idx.Jy_old); + spectral_solver_fp[lev]->CopySpectralDataComp(Idx.Jz_new, Idx.Jz_old); if (spectral_solver_cp[lev]) { - spectral_solver_cp[lev]->CopySpectralDataComp(Idx.Jx_new, Idx.Jx); - spectral_solver_cp[lev]->CopySpectralDataComp(Idx.Jy_new, Idx.Jy); - spectral_solver_cp[lev]->CopySpectralDataComp(Idx.Jz_new, Idx.Jz); + spectral_solver_cp[lev]->CopySpectralDataComp(Idx.Jx_new, Idx.Jx_old); + spectral_solver_cp[lev]->CopySpectralDataComp(Idx.Jy_new, Idx.Jy_old); + spectral_solver_cp[lev]->CopySpectralDataComp(Idx.Jz_new, Idx.Jz_old); } } } diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 0d3e919a040..32cd11dfd3c 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -480,7 +480,7 @@ WarpX::InitPML () pml_ncell, pml_delta, amrex::IntVect::TheZeroVector(), dt[0], nox_fft, noy_fft, noz_fft, do_nodal, do_moving_window, pml_has_particles, do_pml_in_domain, - J_in_time, rho_in_time, + psatd_solution_type, J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, amrex::IntVect(0), amrex::IntVect(0), guard_cells.ng_FieldSolver.max(), @@ -517,7 +517,7 @@ WarpX::InitPML () pml_ncell, pml_delta, refRatio(lev-1), dt[lev], nox_fft, noy_fft, noz_fft, do_nodal, do_moving_window, pml_has_particles, do_pml_in_domain, - J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, + psatd_solution_type, J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, amrex::IntVect(0), amrex::IntVect(0), guard_cells.ng_FieldSolver.max(), v_particle_pml, diff --git a/Source/Utils/WarpXAlgorithmSelection.H b/Source/Utils/WarpXAlgorithmSelection.H index c3160cfad60..936fd3d2b55 100644 --- a/Source/Utils/WarpXAlgorithmSelection.H +++ b/Source/Utils/WarpXAlgorithmSelection.H @@ -84,6 +84,13 @@ struct GatheringAlgo { }; }; +struct PSATDSolutionType { + enum { + FirstOrder = 0, + SecondOrder = 1 + }; +}; + struct JInTime { enum { Constant = 0, @@ -93,8 +100,8 @@ struct JInTime { struct RhoInTime { enum { - Linear = 1, - Quadratic = 2 + Constant = 0, + Linear = 1 }; }; diff --git a/Source/Utils/WarpXAlgorithmSelection.cpp b/Source/Utils/WarpXAlgorithmSelection.cpp index b99459b465c..c5ef16743c1 100644 --- a/Source/Utils/WarpXAlgorithmSelection.cpp +++ b/Source/Utils/WarpXAlgorithmSelection.cpp @@ -63,6 +63,12 @@ const std::map gathering_algo_to_int = { {"default", GatheringAlgo::EnergyConserving } }; +const std::map psatd_solution_type_to_int = { + {"first-order", PSATDSolutionType::FirstOrder}, + {"second-order", PSATDSolutionType::SecondOrder}, + {"default", PSATDSolutionType::SecondOrder} +}; + const std::map J_in_time_to_int = { {"constant", JInTime::Constant}, {"linear", JInTime::Linear}, @@ -70,8 +76,8 @@ const std::map J_in_time_to_int = { }; const std::map rho_in_time_to_int = { + {"constant", RhoInTime::Constant}, {"linear", RhoInTime::Linear}, - {"quadratic", RhoInTime::Quadratic}, {"default", RhoInTime::Linear} }; @@ -145,6 +151,8 @@ GetAlgorithmInteger( amrex::ParmParse& pp, const char* pp_search_key ){ algo_to_int = charge_deposition_algo_to_int; } else if (0 == std::strcmp(pp_search_key, "field_gathering")) { algo_to_int = gathering_algo_to_int; + } else if (0 == std::strcmp(pp_search_key, "solution_type")) { + algo_to_int = psatd_solution_type_to_int; } else if (0 == std::strcmp(pp_search_key, "J_in_time")) { algo_to_int = J_in_time_to_int; } else if (0 == std::strcmp(pp_search_key, "rho_in_time")) { diff --git a/Source/WarpX.H b/Source/WarpX.H index 20d4e4210d6..2d1124c06e7 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -187,6 +187,11 @@ public: */ static amrex::Vector particle_boundary_hi; + //! Integer that corresponds to the order of the PSATD solution + //! (whether the PSATD equations are derived from first-order or + //! second-order solution) + static short psatd_solution_type; + //! Integers that correspond to the time dependency of J (constant, linear) //! and rho (linear, quadratic) for the PSATD algorithm static short J_in_time; @@ -1639,7 +1644,7 @@ private: const int icomp, const int dcomp, const bool apply_kspace_filter=true); /** - * \brief Copy rho_new to rho_old in spectral space + * \brief Copy rho_new to rho_old in spectral space (when rho is linear in time) */ void PSATDMoveRhoNewToRhoOld (); diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 5793874334a..e5119708941 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -120,6 +120,7 @@ short WarpX::charge_deposition_algo; short WarpX::field_gathering_algo; short WarpX::particle_pusher_algo; short WarpX::electromagnetic_solver_id; +short WarpX::psatd_solution_type; short WarpX::J_in_time; short WarpX::rho_in_time; short WarpX::load_balance_costs_update_algo; @@ -1148,6 +1149,11 @@ WarpX::ReadParameters () WARPX_ALWAYS_ASSERT_WITH_MESSAGE(noz_fft > 0, "PSATD order must be finite unless psatd.periodic_single_box_fft is used"); } + // Integer that corresponds to the order of the PSATD solution + // (whether the PSATD equations are derived from first-order or + // second-order solution) + psatd_solution_type = GetAlgorithmInteger(pp_psatd, "solution_type"); + // Integers that correspond to the time dependency of J (constant, linear) // and rho (linear, quadratic) for the PSATD algorithm J_in_time = GetAlgorithmInteger(pp_psatd, "J_in_time"); @@ -1311,13 +1317,6 @@ WarpX::ReadParameters () ); } - if (J_in_time == JInTime::Constant) - { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - rho_in_time == RhoInTime::Linear, - "psatd.J_in_time=constant supports only psatd.rho_in_time=linear"); - } - if (J_in_time == JInTime::Linear) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -1331,6 +1330,14 @@ WarpX::ReadParameters () WARPX_ALWAYS_ASSERT_WITH_MESSAGE( v_comoving_is_zero, "psatd.J_in_time=linear not implemented with comoving PSATD"); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !current_correction, + "psatd.current_correction=1 not implemented with psatd.J_in_time=linear"); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + current_deposition_algo != CurrentDepositionAlgo::Vay, + "algo.current_deposition=vay not implemented with psatd.J_in_time=linear"); } for (int dir = 0; dir < AMREX_SPACEDIM; dir++) @@ -2334,6 +2341,7 @@ void WarpX::AllocLevelSpectralSolver (amrex::Vector Date: Thu, 8 Dec 2022 11:37:28 -0800 Subject: [PATCH 0184/1346] Highlights & References: Gordon Bell (#3552) * Highlights & References: Gordon-Bell Add our award-winning Gordon Bell paper in documentation :) * Update Landing Pages --- Docs/source/acknowledge_us.rst | 12 +++++++++--- Docs/source/highlights.rst | 10 ++++++++++ Docs/source/index.rst | 3 ++- README.md | 5 ++++- 4 files changed, 25 insertions(+), 5 deletions(-) diff --git a/Docs/source/acknowledge_us.rst b/Docs/source/acknowledge_us.rst index 3101d81637e..49df08eb7fb 100644 --- a/Docs/source/acknowledge_us.rst +++ b/Docs/source/acknowledge_us.rst @@ -41,9 +41,10 @@ Latest WarpX reference If your project leads to a scientific publication, please consider citing the paper below. -- Myers A, Almgren A, Amorim LD, Bell J, Fedeli L, Ge L, Gott K, Grote DP, Hogan M, Huebl A, Jambunathan R, Lehe R, Ng C, Rowan M, Shapoval O, Thevenet M, Vay JL, Vincenti H, Yang E, Zaim N, Zhang W, Zhao Y, Zoni E. - **Porting WarpX to GPU-accelerated platforms**. *Parallel Computing*. 2021 Sep, 108:102833. - https://doi.org/10.1016/j.parco.2021.102833 +- Fedeli L, Huebl A, Boillod-Cerneux F, Clark T, Gott K, Hillairet C, Jaure S, Leblanc A, Lehe R, Myers A, Piechurski C, Sato M, Zaim N, Zhang W, Vay J-L, Vincenti H. + **Pushing the Frontier in the Design of Laser-Based Electron Accelerators with Groundbreaking Mesh-Refined Particle-In-Cell Simulations on Exascale-Class Supercomputers**. + *SC22: International Conference for High Performance Computing, Networking, Storage and Analysis (SC)*. ISSN:2167-4337, pp. 25-36, Dallas, TX, US, 2022. + https://www.computer.org/csdl/proceedings-article/sc/2022/544400a025/1I0bSKaoECc .. _acknowledge_warpx_all_refs: @@ -66,6 +67,11 @@ If your project uses the specific algorithms, please consider citing the respect **A hybrid nodal-staggered pseudo-spectral electromagnetic particle-in-cell method with finite-order centering**. *Computer Physics Communications* **279**, 2022. `DOI:10.1016/j.cpc.2022.108457 `__ +- Myers A, Almgren A, Amorim LD, Bell J, Fedeli L, Ge L, Gott K, Grote DP, Hogan M, Huebl A, Jambunathan R, Lehe R, Ng C, Rowan M, Shapoval O, Thevenet M, Vay JL, Vincenti H, Yang E, Zaim N, Zhang W, Zhao Y, Zoni E. + **Porting WarpX to GPU-accelerated platforms**. + *Parallel Computing*. 2021 Sep, 108:102833. + `DOI:10.1016/j.parco.2021.102833 `__ + - Shapoval O, Lehe R, Thevenet M, Zoni E, Zhao Y, Vay JL. **Overcoming timestep limitations in boosted-frame Particle-In-Cell simulations of plasma-based acceleration**. *Phys. Rev. E* Nov 2021, 104:055311. diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index 49a7b1122ca..c18b91cf1ba 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -14,6 +14,11 @@ Plasma-Based Acceleration Scientific works in laser-plasma and beam-plasma acceleration. +#. Fedeli L, Huebl A, Boillod-Cerneux F, Clark T, Gott K, Hillairet C, Jaure S, Leblanc A, Lehe R, Myers A, Piechurski C, Sato M, Zaim N, Zhang W, Vay J-L, Vincenti H. + **Pushing the Frontier in the Design of Laser-Based Electron Accelerators with Groundbreaking Mesh-Refined Particle-In-Cell Simulations on Exascale-Class Supercomputers**. + *SC22: International Conference for High Performance Computing, Networking, Storage and Analysis (SC)*. ISSN:2167-4337, pp. 25-36, Dallas, TX, US, 2022. + https://www.computer.org/csdl/proceedings-article/sc/2022/544400a025/1I0bSKaoECc + #. Wang J, Zeng M, Li D, Wang X, Lu W, Gao J. **Injection induced by coaxial laser interference in laser wakefield accelerators**. Matter and Radiation at Extremes 7, 054001, 2022. @@ -35,6 +40,11 @@ Laser-Plasma Interaction Scientific works in laser-ion acceleration and laser-matter interaction. +#. Fedeli L, Huebl A, Boillod-Cerneux F, Clark T, Gott K, Hillairet C, Jaure S, Leblanc A, Lehe R, Myers A, Piechurski C, Sato M, Zaim N, Zhang W, Vay J-L, Vincenti H. + **Pushing the Frontier in the Design of Laser-Based Electron Accelerators with Groundbreaking Mesh-Refined Particle-In-Cell Simulations on Exascale-Class Supercomputers**. + *SC22: International Conference for High Performance Computing, Networking, Storage and Analysis (SC)*. ISSN:2167-4337, pp. 25-36, Dallas, TX, US, 2022. + https://www.computer.org/csdl/proceedings-article/sc/2022/544400a025/1I0bSKaoECc + #. Hakimi S, Obst-Huebl L, Huebl A, Nakamura K, Bulanov SS, Steinke S, Leemans WP, Kober Z, Ostermayr TM, Schenkel T, Gonsalves AJ, Vay J-L, Tilborg Jv, Toth C, Schroeder CB, Esarey E, Geddes CGR. **Laser-solid interaction studies enabled by the new capabilities of the iP2 BELLA PW beamline**. Physics of Plasmas **29**, 083102, 2022. diff --git a/Docs/source/index.rst b/Docs/source/index.rst index 80c307b1462..d34bd6788de 100644 --- a/Docs/source/index.rst +++ b/Docs/source/index.rst @@ -3,7 +3,7 @@ WarpX ----- -WarpX is an advanced **electromagnetic Particle-In-Cell** code. +WarpX is an advanced, time-based, **electromagnetic & electrostatic Particle-In-Cell** code. It supports many features including: @@ -14,6 +14,7 @@ It supports many features including: For details on the algorithms that WarpX implements, see the :ref:`theory section `. WarpX is a *highly-parallel and highly-optimized code*, which can run on GPUs and multi-core CPUs, and includes load balancing capabilities. +WarpX scales to the world's largest supercomputers and was awarded the `2022 ACM Gordon Bell Prize `__. In addition, WarpX is also a *multi-platform code* and runs on Linux, macOS and Windows. .. _contact: diff --git a/README.md b/README.md index ef97291026d..235e619c6b7 100644 --- a/README.md +++ b/README.md @@ -17,9 +17,12 @@ ## Overview -WarpX is an advanced electromagnetic Particle-In-Cell code. +WarpX is an advanced **electromagnetic & electrostatic Particle-In-Cell** code. It supports many features including Perfectly-Matched Layers (PML), mesh refinement, and the boosted-frame technique. +WarpX is a *highly-parallel and highly-optimized code*, which can run on GPUs and multi-core CPUs, and includes load balancing capabilities. +WarpX scales to the world's largest supercomputers and was awarded the [2022 ACM Gordon Bell Prize](https://www.exascaleproject.org/ecp-supported-collaborative-teams-win-the-2022-acm-gordon-bell-prize-and-special-prize/). + ## Documentation [![PICMI](https://img.shields.io/static/v1?label="works%20with"&message="PICMI"&color="blueviolet")](https://picmi-standard.github.io) From 202b93c3eafd590773c893ee4cc924899b452a33 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 8 Dec 2022 13:08:07 -0800 Subject: [PATCH 0185/1346] ABLASTR: Coarsen Functions (#3433) * ABLASTR: Coarsen Functions Move coarsen functions to ABLASTR. Rename by property of the coarsening function. * Remove unused imports Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Source/Diagnostics/BTDiagnostics.cpp | 6 +- .../ComputeDiagFunctors/CellCenterFunctor.cpp | 9 +- .../ComputeDiagFunctors/DivBFunctor.cpp | 11 +- .../ComputeDiagFunctors/DivEFunctor.cpp | 9 +- .../PartPerCellFunctor.cpp | 5 +- .../PartPerGridFunctor.cpp | 5 +- .../ParticleReductionFunctor.cpp | 5 +- .../ComputeDiagFunctors/RhoFunctor.cpp | 9 +- Source/Diagnostics/FieldIO.cpp | 11 +- .../Diagnostics/ReducedDiags/FieldMaximum.cpp | 51 ++--- .../ReducedDiags/FieldMomentum.cpp | 15 +- .../FieldProbeParticleContainer.cpp | 1 - .../Diagnostics/ReducedDiags/FieldReduction.H | 27 +-- Source/Diagnostics/WarpXIO.cpp | 1 - .../FieldAccessorFunctors.H | 3 +- .../MacroscopicEvolveE.cpp | 29 +-- Source/Parallelization/WarpXComm.cpp | 18 +- Source/Particles/WarpXParticleContainer.cpp | 4 +- Source/Utils/CMakeLists.txt | 2 - Source/Utils/CoarsenIO.cpp | 148 -------------- Source/Utils/CoarsenMR.H | 154 -------------- Source/Utils/CoarsenMR.cpp | 104 ---------- Source/Utils/Make.package | 2 - .../Utils/check_interp_points_and_weights.py | 3 +- Source/ablastr/CMakeLists.txt | 1 + Source/ablastr/Make.package | 1 + Source/ablastr/coarsen/CMakeLists.txt | 5 + Source/ablastr/coarsen/Make.package | 4 + Source/ablastr/coarsen/average.H | 191 ++++++++++++++++++ Source/ablastr/coarsen/average.cpp | 114 +++++++++++ .../CoarsenIO.H => ablastr/coarsen/sample.H} | 78 ++++--- Source/ablastr/coarsen/sample.cpp | 160 +++++++++++++++ 32 files changed, 640 insertions(+), 546 deletions(-) delete mode 100644 Source/Utils/CoarsenIO.cpp delete mode 100644 Source/Utils/CoarsenMR.H delete mode 100644 Source/Utils/CoarsenMR.cpp create mode 100644 Source/ablastr/coarsen/CMakeLists.txt create mode 100644 Source/ablastr/coarsen/Make.package create mode 100644 Source/ablastr/coarsen/average.H create mode 100644 Source/ablastr/coarsen/average.cpp rename Source/{Utils/CoarsenIO.H => ablastr/coarsen/sample.H} (77%) create mode 100644 Source/ablastr/coarsen/sample.cpp diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 2b4efcb5ed8..84a0501b532 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -15,12 +15,12 @@ #include "Diagnostics/FlushFormats/FlushFormat.H" #include "ComputeDiagFunctors/BackTransformParticleFunctor.H" #include "Utils/Algorithms/IsIn.H" -#include "Utils/CoarsenIO.H" #include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" #include "WarpX.H" +#include #include #include #include @@ -772,8 +772,8 @@ BTDiagnostics::PrepareFieldDataForOutput () // Flattening out MF over levels for (int lev = warpx.finestLevel(); lev > 0; --lev) { - CoarsenIO::Coarsen( *m_cell_centered_data[lev-1], *m_cell_centered_data[lev], 0, 0, - m_cellcenter_varnames.size(), 0, WarpX::RefRatio(lev-1) ); + ablastr::coarsen::sample::Coarsen(*m_cell_centered_data[lev - 1], *m_cell_centered_data[lev], 0, 0, + m_cellcenter_varnames.size(), 0, WarpX::RefRatio(lev-1) ); } int num_BT_functors = 1; diff --git a/Source/Diagnostics/ComputeDiagFunctors/CellCenterFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/CellCenterFunctor.cpp index 2dac5fb0069..f43714a9c04 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/CellCenterFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/CellCenterFunctor.cpp @@ -1,11 +1,12 @@ #include "CellCenterFunctor.H" -#include "Utils/CoarsenIO.H" #include "Utils/TextMsg.H" #ifdef WARPX_DIM_RZ # include "WarpX.H" #endif +#include + #include #include #include @@ -35,14 +36,14 @@ CellCenterFunctor::operator()(amrex::MultiFab& mf_dst, int dcomp, const int /*i_ // All modes > 0 amrex::MultiFab::Add(mf_dst_stag, *m_mf_src, ic, 0, 1, m_mf_src->nGrowVect()); } - CoarsenIO::Coarsen( mf_dst, mf_dst_stag, dcomp, 0, nComp(), 0, m_crse_ratio); + ablastr::coarsen::sample::Coarsen( mf_dst, mf_dst_stag, dcomp, 0, nComp(), 0, m_crse_ratio); } else { - CoarsenIO::Coarsen( mf_dst, *m_mf_src, dcomp, 0, nComp(), 0, m_crse_ratio); + ablastr::coarsen::sample::Coarsen( mf_dst, *m_mf_src, dcomp, 0, nComp(), 0, m_crse_ratio); } #else // In cartesian geometry, coarsen and interpolate from simulation MultiFab, m_mf_src, // to output diagnostic MultiFab, mf_dst. - CoarsenIO::Coarsen( mf_dst, *m_mf_src, dcomp, 0, nComp(), mf_dst.nGrowVect(), m_crse_ratio); + ablastr::coarsen::sample::Coarsen(mf_dst, *m_mf_src, dcomp, 0, nComp(), mf_dst.nGrowVect(), m_crse_ratio); amrex::ignore_unused(m_lev, m_convertRZmodes2cartesian); #endif } diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp index 2dd401a283c..093b4960edf 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp @@ -1,8 +1,9 @@ #include "DivBFunctor.H" -#include "Utils/CoarsenIO.H" #include "WarpX.H" +#include + #include #include @@ -25,7 +26,7 @@ DivBFunctor::operator()(amrex::MultiFab& mf_dst, int dcomp, const int /*i_buffer amrex::MultiFab divB( warpx.boxArray(m_lev), warpx.DistributionMap(m_lev), warpx.ncomps, ng ); warpx.ComputeDivB(divB, 0, m_arr_mf_src, WarpX::CellSize(m_lev) ); // // Coarsen and Interpolate from divB to coarsened/reduced_domain mf_dst - // CoarsenIO::Coarsen( mf_dst, divB, dcomp, 0, nComp(), 0, m_crse_ratio); + // ablastr::coarsen::sample::Coarsen( mf_dst, divB, dcomp, 0, nComp(), 0, m_crse_ratio); #ifdef WARPX_DIM_RZ if (m_convertRZmodes2cartesian) { // In cylindrical geometry, sum real part of all modes of divE in @@ -41,15 +42,15 @@ DivBFunctor::operator()(amrex::MultiFab& mf_dst, int dcomp, const int /*i_buffer amrex::MultiFab::Add(mf_dst_stag, divB, ic, 0, 1, divB.nGrowVect()); } // TODO check if coarsening is needed, otherwise copy - CoarsenIO::Coarsen( mf_dst, mf_dst_stag, dcomp, 0, nComp(), 0, m_crse_ratio); + ablastr::coarsen::sample::Coarsen( mf_dst, mf_dst_stag, dcomp, 0, nComp(), 0, m_crse_ratio); } else { // TODO check if coarsening is needed, otherwise copy - CoarsenIO::Coarsen( mf_dst, divB, dcomp, 0, nComp(), 0, m_crse_ratio); + ablastr::coarsen::sample::Coarsen( mf_dst, divB, dcomp, 0, nComp(), 0, m_crse_ratio); } #else // In cartesian geometry, coarsen and interpolate from simulation MultiFab, divE, // to output diagnostic MultiFab, mf_dst. - CoarsenIO::Coarsen( mf_dst, divB, dcomp, 0, nComp(), 0, m_crse_ratio); + ablastr::coarsen::sample::Coarsen(mf_dst, divB, dcomp, 0, nComp(), 0, m_crse_ratio); amrex::ignore_unused(m_convertRZmodes2cartesian); #endif } diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp index 3859b859ce8..0fd525d4f1e 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp @@ -1,12 +1,13 @@ #include "DivEFunctor.H" -#include "Utils/CoarsenIO.H" #include "Utils/TextMsg.H" #ifdef WARPX_DIM_RZ # include "Utils/WarpXAlgorithmSelection.H" #endif #include "WarpX.H" +#include + #include #include #include @@ -55,14 +56,14 @@ DivEFunctor::operator()(amrex::MultiFab& mf_dst, const int dcomp, const int /*i_ // Real part of all modes > 0 amrex::MultiFab::Add(mf_dst_stag, divE, ic, 0, 1, divE.nGrowVect()); } - CoarsenIO::Coarsen( mf_dst, mf_dst_stag, dcomp, 0, nComp(), 0, m_crse_ratio); + ablastr::coarsen::sample::Coarsen( mf_dst, mf_dst_stag, dcomp, 0, nComp(), 0, m_crse_ratio); } else { - CoarsenIO::Coarsen( mf_dst, divE, dcomp, 0, nComp(), 0, m_crse_ratio); + ablastr::coarsen::sample::Coarsen( mf_dst, divE, dcomp, 0, nComp(), 0, m_crse_ratio); } #else // In cartesian geometry, coarsen and interpolate from simulation MultiFab, divE, // to output diagnostic MultiFab, mf_dst. - CoarsenIO::Coarsen( mf_dst, divE, dcomp, 0, nComp(), 0, m_crse_ratio); + ablastr::coarsen::sample::Coarsen(mf_dst, divE, dcomp, 0, nComp(), 0, m_crse_ratio); amrex::ignore_unused(m_convertRZmodes2cartesian); #endif } diff --git a/Source/Diagnostics/ComputeDiagFunctors/PartPerCellFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/PartPerCellFunctor.cpp index dd663cfb31f..493c52e2e7a 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/PartPerCellFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/PartPerCellFunctor.cpp @@ -2,9 +2,10 @@ #include "Diagnostics/ComputeDiagFunctors/ComputeDiagFunctor.H" #include "Particles/MultiParticleContainer.H" -#include "Utils/CoarsenIO.H" #include "WarpX.H" +#include + #include #include #include @@ -36,5 +37,5 @@ PartPerCellFunctor::operator()(amrex::MultiFab& mf_dst, const int dcomp, const i // Compute ppc which includes a summation over all species. warpx.GetPartContainer().Increment(ppc_mf, m_lev); // Coarsen and interpolate from ppc_mf to the output diagnostic MultiFab, mf_dst. - CoarsenIO::Coarsen(mf_dst, ppc_mf, dcomp, 0, nComp(), 0, m_crse_ratio); + ablastr::coarsen::sample::Coarsen(mf_dst, ppc_mf, dcomp, 0, nComp(), 0, m_crse_ratio); } diff --git a/Source/Diagnostics/ComputeDiagFunctors/PartPerGridFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/PartPerGridFunctor.cpp index 45a0ad11585..5c637f2c7ef 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/PartPerGridFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/PartPerGridFunctor.cpp @@ -2,9 +2,10 @@ #include "Diagnostics/ComputeDiagFunctors/ComputeDiagFunctor.H" #include "Particles/MultiParticleContainer.H" -#include "Utils/CoarsenIO.H" #include "WarpX.H" +#include + #include #include #include @@ -48,5 +49,5 @@ PartPerGridFunctor::operator()(amrex::MultiFab& mf_dst, const int dcomp, const i } // Coarsen and interpolate from ppg_mf to the output diagnostic MultiFab, mf_dst. - CoarsenIO::Coarsen(mf_dst, ppg_mf, dcomp, 0, nComp(), 0, m_crse_ratio); + ablastr::coarsen::sample::Coarsen(mf_dst, ppg_mf, dcomp, 0, nComp(), 0, m_crse_ratio); } diff --git a/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.cpp index 52952a37339..bdc5248980b 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.cpp @@ -4,9 +4,10 @@ #include "Diagnostics/ComputeDiagFunctors/ComputeDiagFunctor.H" #include "Particles/MultiParticleContainer.H" #include "Particles/WarpXParticleContainer.H" -#include "Utils/CoarsenIO.H" #include "WarpX.H" +#include + #include #include #include @@ -153,5 +154,5 @@ ParticleReductionFunctor::operator() (amrex::MultiFab& mf_dst, const int dcomp, } // Coarsen and interpolate from ppc_mf to the output diagnostic MultiFab, mf_dst. - CoarsenIO::Coarsen(mf_dst, red_mf, dcomp, 0, nComp(), 0, m_crse_ratio); + ablastr::coarsen::sample::Coarsen(mf_dst, red_mf, dcomp, 0, nComp(), 0, m_crse_ratio); } diff --git a/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp index de92577227c..6f4fcda4d70 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp @@ -8,10 +8,11 @@ #endif #include "Particles/MultiParticleContainer.H" #include "Particles/WarpXParticleContainer.H" -#include "Utils/CoarsenIO.H" #include "Utils/TextMsg.H" #include "WarpX.H" +#include + #include #include #include @@ -83,14 +84,14 @@ RhoFunctor::operator() ( amrex::MultiFab& mf_dst, const int dcomp, const int /*i // Real part of all modes > 0 amrex::MultiFab::Add( mf_dst_stag, *rho, ic, 0, 1, rho->nGrowVect() ); } - CoarsenIO::Coarsen( mf_dst, mf_dst_stag, dcomp, 0, nComp(), 0, m_crse_ratio ); + ablastr::coarsen::sample::Coarsen( mf_dst, mf_dst_stag, dcomp, 0, nComp(), 0, m_crse_ratio ); } else { - CoarsenIO::Coarsen( mf_dst, *rho, dcomp, 0, nComp(), 0, m_crse_ratio ); + ablastr::coarsen::sample::Coarsen( mf_dst, *rho, dcomp, 0, nComp(), 0, m_crse_ratio ); } #else // In Cartesian geometry, coarsen and interpolate from temporary MultiFab rho // to output diagnostic MultiFab mf_dst - CoarsenIO::Coarsen( mf_dst, *rho, dcomp, 0, nComp(), mf_dst.nGrowVect(), m_crse_ratio ); + ablastr::coarsen::sample::Coarsen(mf_dst, *rho, dcomp, 0, nComp(), mf_dst.nGrowVect(), m_crse_ratio ); amrex::ignore_unused(m_convertRZmodes2cartesian); #endif } diff --git a/Source/Diagnostics/FieldIO.cpp b/Source/Diagnostics/FieldIO.cpp index 0f2b8605007..0b7cc935614 100644 --- a/Source/Diagnostics/FieldIO.cpp +++ b/Source/Diagnostics/FieldIO.cpp @@ -7,9 +7,10 @@ */ #include "FieldIO.H" -#include "Utils/CoarsenIO.H" #include "Utils/TextMsg.H" +#include + #include #include #include @@ -183,9 +184,9 @@ AverageAndPackVectorField( MultiFab& mf_avg, const std::array,3> &vector_total = vector_field; #endif - CoarsenIO::Coarsen( mf_avg, *(vector_total[0]), dcomp , 0, 1, ngrow ); - CoarsenIO::Coarsen( mf_avg, *(vector_total[1]), dcomp+1, 0, 1, ngrow ); - CoarsenIO::Coarsen( mf_avg, *(vector_total[2]), dcomp+2, 0, 1, ngrow ); + ablastr::coarsen::sample::Coarsen(mf_avg, *(vector_total[0]), dcomp , 0, 1, ngrow ); + ablastr::coarsen::sample::Coarsen(mf_avg, *(vector_total[1]), dcomp + 1, 0, 1, ngrow ); + ablastr::coarsen::sample::Coarsen(mf_avg, *(vector_total[2]), dcomp + 2, 0, 1, ngrow ); } /** \brief Take a MultiFab `scalar_field` @@ -220,7 +221,7 @@ AverageAndPackScalarField (MultiFab& mf_avg, MultiFab::Copy( mf_avg, *scalar_total, 0, dcomp, 1, ngrow); } else if ( scalar_total->is_nodal() ){ // - Fully nodal - CoarsenIO::Coarsen( mf_avg, *scalar_total, dcomp, 0, 1, ngrow ); + ablastr::coarsen::sample::Coarsen(mf_avg, *scalar_total, dcomp, 0, 1, ngrow ); } else { amrex::Abort(Utils::TextMsg::Err("Unknown staggering.")); } diff --git a/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp b/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp index 804d1641abc..a36c349ddcb 100644 --- a/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp @@ -7,10 +7,11 @@ #include "FieldMaximum.H" -#include "Utils/CoarsenIO.H" #include "Utils/TextMsg.H" #include "WarpX.H" +#include + #include #include #include @@ -192,65 +193,65 @@ void FieldMaximum::ComputeDiags (int step) reduceEx_op.eval(box, reduceEx_data, [=] AMREX_GPU_DEVICE (int i, int j, int k) -> ReduceTuple { - const Real Ex_interp = CoarsenIO::Interp(arrEx, Extype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); + const Real Ex_interp = ablastr::coarsen::sample::Interp(arrEx, Extype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); return amrex::Math::abs(Ex_interp); }); reduceEy_op.eval(box, reduceEy_data, [=] AMREX_GPU_DEVICE (int i, int j, int k) -> ReduceTuple { - const Real Ey_interp = CoarsenIO::Interp(arrEy, Eytype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); + const Real Ey_interp = ablastr::coarsen::sample::Interp(arrEy, Eytype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); return amrex::Math::abs(Ey_interp); }); reduceEz_op.eval(box, reduceEz_data, [=] AMREX_GPU_DEVICE (int i, int j, int k) -> ReduceTuple { - const Real Ez_interp = CoarsenIO::Interp(arrEz, Eztype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); + const Real Ez_interp = ablastr::coarsen::sample::Interp(arrEz, Eztype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); return amrex::Math::abs(Ez_interp); }); reduceBx_op.eval(box, reduceBx_data, [=] AMREX_GPU_DEVICE (int i, int j, int k) -> ReduceTuple { - const Real Bx_interp = CoarsenIO::Interp(arrBx, Bxtype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); + const Real Bx_interp = ablastr::coarsen::sample::Interp(arrBx, Bxtype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); return amrex::Math::abs(Bx_interp); }); reduceBy_op.eval(box, reduceBy_data, [=] AMREX_GPU_DEVICE (int i, int j, int k) -> ReduceTuple { - const Real By_interp = CoarsenIO::Interp(arrBy, Bytype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); + const Real By_interp = ablastr::coarsen::sample::Interp(arrBy, Bytype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); return amrex::Math::abs(By_interp); }); reduceBz_op.eval(box, reduceBz_data, [=] AMREX_GPU_DEVICE (int i, int j, int k) -> ReduceTuple { - const Real Bz_interp = CoarsenIO::Interp(arrBz, Bztype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); + const Real Bz_interp = ablastr::coarsen::sample::Interp(arrBz, Bztype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); return amrex::Math::abs(Bz_interp); }); reduceE_op.eval(box, reduceE_data, [=] AMREX_GPU_DEVICE (int i, int j, int k) -> ReduceTuple { - const Real Ex_interp = CoarsenIO::Interp(arrEx, Extype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); - const Real Ey_interp = CoarsenIO::Interp(arrEy, Eytype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); - const Real Ez_interp = CoarsenIO::Interp(arrEz, Eztype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); + const Real Ex_interp = ablastr::coarsen::sample::Interp(arrEx, Extype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); + const Real Ey_interp = ablastr::coarsen::sample::Interp(arrEy, Eytype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); + const Real Ez_interp = ablastr::coarsen::sample::Interp(arrEz, Eztype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); return Ex_interp*Ex_interp + Ey_interp*Ey_interp + Ez_interp*Ez_interp; }); reduceB_op.eval(box, reduceB_data, [=] AMREX_GPU_DEVICE (int i, int j, int k) -> ReduceTuple { - const Real Bx_interp = CoarsenIO::Interp(arrBx, Bxtype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); - const Real By_interp = CoarsenIO::Interp(arrBy, Bytype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); - const Real Bz_interp = CoarsenIO::Interp(arrBz, Bztype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); + const Real Bx_interp = ablastr::coarsen::sample::Interp(arrBx, Bxtype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); + const Real By_interp = ablastr::coarsen::sample::Interp(arrBy, Bytype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); + const Real Bz_interp = ablastr::coarsen::sample::Interp(arrBz, Bztype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); return Bx_interp*Bx_interp + By_interp*By_interp + Bz_interp*Bz_interp; }); } diff --git a/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp b/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp index 45a5cc6cb7a..8ae51b0e6cb 100644 --- a/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp @@ -7,11 +7,12 @@ #include "FieldMomentum.H" -#include "Utils/CoarsenIO.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" #include "WarpX.H" +#include + #include #include #include @@ -162,13 +163,13 @@ void FieldMomentum::ComputeDiags (int step) reduce_ops.eval(box, reduce_data, [=] AMREX_GPU_DEVICE (int i, int j, int k) -> amrex::GpuTuple { - const amrex::Real Ex_cc = CoarsenIO::Interp(Ex_arr, Ex_stag, cc, cr, i, j, k, comp); - const amrex::Real Ey_cc = CoarsenIO::Interp(Ey_arr, Ey_stag, cc, cr, i, j, k, comp); - const amrex::Real Ez_cc = CoarsenIO::Interp(Ez_arr, Ez_stag, cc, cr, i, j, k, comp); + const amrex::Real Ex_cc = ablastr::coarsen::sample::Interp(Ex_arr, Ex_stag, cc, cr, i, j, k, comp); + const amrex::Real Ey_cc = ablastr::coarsen::sample::Interp(Ey_arr, Ey_stag, cc, cr, i, j, k, comp); + const amrex::Real Ez_cc = ablastr::coarsen::sample::Interp(Ez_arr, Ez_stag, cc, cr, i, j, k, comp); - const amrex::Real Bx_cc = CoarsenIO::Interp(Bx_arr, Bx_stag, cc, cr, i, j, k, comp); - const amrex::Real By_cc = CoarsenIO::Interp(By_arr, By_stag, cc, cr, i, j, k, comp); - const amrex::Real Bz_cc = CoarsenIO::Interp(Bz_arr, Bz_stag, cc, cr, i, j, k, comp); + const amrex::Real Bx_cc = ablastr::coarsen::sample::Interp(Bx_arr, Bx_stag, cc, cr, i, j, k, comp); + const amrex::Real By_cc = ablastr::coarsen::sample::Interp(By_arr, By_stag, cc, cr, i, j, k, comp); + const amrex::Real Bz_cc = ablastr::coarsen::sample::Interp(Bz_arr, Bz_stag, cc, cr, i, j, k, comp); return {Ey_cc * Bz_cc - Ez_cc * By_cc, Ez_cc * Bx_cc - Ex_cc * Bz_cc, diff --git a/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp b/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp index 35e25fb4b2f..54d31ed5a4f 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldProbeParticleContainer.cpp @@ -12,7 +12,6 @@ #include "Particles/Pusher/GetAndSetPosition.H" #include "Particles/Pusher/UpdatePosition.H" #include "Particles/ParticleBoundaries_K.H" -#include "Utils/CoarsenMR.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" diff --git a/Source/Diagnostics/ReducedDiags/FieldReduction.H b/Source/Diagnostics/ReducedDiags/FieldReduction.H index b6b9189465d..ca1b766d360 100644 --- a/Source/Diagnostics/ReducedDiags/FieldReduction.H +++ b/Source/Diagnostics/ReducedDiags/FieldReduction.H @@ -9,9 +9,10 @@ #define WARPX_DIAGNOSTICS_REDUCEDDIAGS_FIELDREDUCTION_H_ #include "ReducedDiags.H" -#include "Utils/CoarsenIO.H" #include "WarpX.H" +#include + #include #include #include @@ -157,18 +158,18 @@ public: const amrex::Real y = (j + 0.5_rt)*dx[1] + real_box.lo(1); const amrex::Real z = (k + 0.5_rt)*dx[2] + real_box.lo(2); #endif - const amrex::Real Ex_interp = CoarsenIO::Interp(arrEx, Extype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); - const amrex::Real Ey_interp = CoarsenIO::Interp(arrEy, Eytype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); - const amrex::Real Ez_interp = CoarsenIO::Interp(arrEz, Eztype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); - const amrex::Real Bx_interp = CoarsenIO::Interp(arrBx, Bxtype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); - const amrex::Real By_interp = CoarsenIO::Interp(arrBy, Bytype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); - const amrex::Real Bz_interp = CoarsenIO::Interp(arrBz, Bztype, cellCenteredtype, - reduction_coarsening_ratio, i, j, k, reduction_comp); + const amrex::Real Ex_interp = ablastr::coarsen::sample::Interp(arrEx, Extype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); + const amrex::Real Ey_interp = ablastr::coarsen::sample::Interp(arrEy, Eytype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); + const amrex::Real Ez_interp = ablastr::coarsen::sample::Interp(arrEz, Eztype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); + const amrex::Real Bx_interp = ablastr::coarsen::sample::Interp(arrBx, Bxtype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); + const amrex::Real By_interp = ablastr::coarsen::sample::Interp(arrBy, Bytype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); + const amrex::Real Bz_interp = ablastr::coarsen::sample::Interp(arrBz, Bztype, cellCenteredtype, + reduction_coarsening_ratio, i, j, k, reduction_comp); return reduction_function_parser(x, y, z, Ex_interp, Ey_interp, Ez_interp, Bx_interp, By_interp, Bz_interp); }); diff --git a/Source/Diagnostics/WarpXIO.cpp b/Source/Diagnostics/WarpXIO.cpp index 1785a04c158..65d71442ff0 100644 --- a/Source/Diagnostics/WarpXIO.cpp +++ b/Source/Diagnostics/WarpXIO.cpp @@ -13,7 +13,6 @@ #endif #include "FieldIO.H" #include "Particles/MultiParticleContainer.H" -#include "Utils/CoarsenIO.H" #include "Utils/TextMsg.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/FieldAccessorFunctors.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/FieldAccessorFunctors.H index 58c0837a89b..d4fdf207e52 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/FieldAccessorFunctors.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/FieldAccessorFunctors.H @@ -9,10 +9,11 @@ #define WARPX_FIELD_ACCESSOR_FUNCTORS_H #include "WarpX.H" -#include "Utils/CoarsenIO.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" + #include + /** * \brief Functor that returns the division of the source m_field Array4 value by macroparameter obtained using m_parameter, at the respective (i,j,k). diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp index ab2e250142e..22a22272658 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp @@ -8,11 +8,12 @@ # include "FiniteDifferenceAlgorithms/FieldAccessorFunctors.H" #endif #include "MacroscopicProperties/MacroscopicProperties.H" -#include "Utils/CoarsenIO.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "WarpX.H" +#include + #include #include #include @@ -112,7 +113,7 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( amrex::MultiFab& epsilon_mf = macroscopic_properties->getepsilon_mf(); amrex::MultiFab& mu_mf = macroscopic_properties->getmu_mf(); - // Index type required for calling CoarsenIO::Interp to interpolate macroscopic + // Index type required for calling ablastr::coarsen::sample::Interp to interpolate macroscopic // properties from their respective staggering to the Ex, Ey, Ez locations amrex::GpuArray const& sigma_stag = macroscopic_properties->sigma_IndexType; amrex::GpuArray const& epsilon_stag = macroscopic_properties->epsilon_IndexType; @@ -178,11 +179,11 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( if (lx(i, j, k) <= 0) return; #endif // Interpolate conductivity, sigma, to Ex position on the grid - amrex::Real const sigma_interp = CoarsenIO::Interp( sigma_arr, sigma_stag, - Ex_stag, macro_cr, i, j, k, scomp); + amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, + Ex_stag, macro_cr, i, j, k, scomp); // Interpolated permittivity, epsilon, to Ex position on the grid - amrex::Real const epsilon_interp = CoarsenIO::Interp( eps_arr, epsilon_stag, - Ex_stag, macro_cr, i, j, k, scomp); + amrex::Real const epsilon_interp = ablastr::coarsen::sample::Interp(eps_arr, epsilon_stag, + Ex_stag, macro_cr, i, j, k, scomp); amrex::Real alpha = T_MacroAlgo::alpha( sigma_interp, epsilon_interp, dt); amrex::Real beta = T_MacroAlgo::beta( sigma_interp, epsilon_interp, dt); Ex(i, j, k) = alpha * Ex(i, j, k) @@ -202,11 +203,11 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( #endif #endif // Interpolate conductivity, sigma, to Ey position on the grid - amrex::Real const sigma_interp = CoarsenIO::Interp( sigma_arr, sigma_stag, - Ey_stag, macro_cr, i, j, k, scomp); + amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, + Ey_stag, macro_cr, i, j, k, scomp); // Interpolated permittivity, epsilon, to Ey position on the grid - amrex::Real const epsilon_interp = CoarsenIO::Interp( eps_arr, epsilon_stag, - Ey_stag, macro_cr, i, j, k, scomp); + amrex::Real const epsilon_interp = ablastr::coarsen::sample::Interp(eps_arr, epsilon_stag, + Ey_stag, macro_cr, i, j, k, scomp); amrex::Real alpha = T_MacroAlgo::alpha( sigma_interp, epsilon_interp, dt); amrex::Real beta = T_MacroAlgo::beta( sigma_interp, epsilon_interp, dt); @@ -222,11 +223,11 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( if (lz(i,j,k) <= 0) return; #endif // Interpolate conductivity, sigma, to Ez position on the grid - amrex::Real const sigma_interp = CoarsenIO::Interp( sigma_arr, sigma_stag, - Ez_stag, macro_cr, i, j, k, scomp); + amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, + Ez_stag, macro_cr, i, j, k, scomp); // Interpolated permittivity, epsilon, to Ez position on the grid - amrex::Real const epsilon_interp = CoarsenIO::Interp( eps_arr, epsilon_stag, - Ez_stag, macro_cr, i, j, k, scomp); + amrex::Real const epsilon_interp = ablastr::coarsen::sample::Interp(eps_arr, epsilon_stag, + Ez_stag, macro_cr, i, j, k, scomp); amrex::Real alpha = T_MacroAlgo::alpha( sigma_interp, epsilon_interp, dt); amrex::Real beta = T_MacroAlgo::beta( sigma_interp, epsilon_interp, dt); diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index fed81f2fce6..8585da632ec 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -13,13 +13,13 @@ # include "BoundaryConditions/PML_RZ.H" #endif #include "Filter/BilinearFilter.H" -#include "Utils/CoarsenMR.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpXComm_K.H" #include "WarpXSumGuardCells.H" +#include #include #include @@ -887,9 +887,9 @@ WarpX::SyncCurrent ( std::array< MultiFab*,3> crse { J_cp[lev][0].get(), J_cp[lev][1].get(), J_cp[lev][2].get() }; - CoarsenMR::Coarsen( *crse[0], *fine[0], refinement_ratio ); - CoarsenMR::Coarsen( *crse[1], *fine[1], refinement_ratio ); - CoarsenMR::Coarsen( *crse[2], *fine[2], refinement_ratio ); + ablastr::coarsen::average::Coarsen(*crse[0], *fine[0], refinement_ratio ); + ablastr::coarsen::average::Coarsen(*crse[1], *fine[1], refinement_ratio ); + ablastr::coarsen::average::Coarsen(*crse[2], *fine[2], refinement_ratio ); } // For each level @@ -915,7 +915,7 @@ WarpX::SyncRho () { rho_cp[lev]->setVal(0.0); const IntVect& refinement_ratio = refRatio(lev-1); - CoarsenMR::Coarsen( *rho_cp[lev], *rho_fp[lev], refinement_ratio ); + ablastr::coarsen::average::Coarsen(*rho_cp[lev], *rho_fp[lev], refinement_ratio ); } // For each level @@ -947,9 +947,9 @@ void WarpX::RestrictCurrentFromFineToCoarsePatch ( std::array< MultiFab*,3> crse { J_cp[lev][0].get(), J_cp[lev][1].get(), J_cp[lev][2].get() }; - CoarsenMR::Coarsen( *crse[0], *fine[0], refinement_ratio ); - CoarsenMR::Coarsen( *crse[1], *fine[1], refinement_ratio ); - CoarsenMR::Coarsen( *crse[2], *fine[2], refinement_ratio ); + ablastr::coarsen::average::Coarsen(*crse[0], *fine[0], refinement_ratio ); + ablastr::coarsen::average::Coarsen(*crse[1], *fine[1], refinement_ratio ); + ablastr::coarsen::average::Coarsen(*crse[2], *fine[2], refinement_ratio ); } void WarpX::ApplyFilterJ ( @@ -1126,7 +1126,7 @@ void WarpX::RestrictRhoFromFineToCoarsePatch ( if (charge_fp[lev]) { charge_cp[lev]->setVal(0.0); const IntVect& refinement_ratio = refRatio(lev-1); - CoarsenMR::Coarsen( *charge_cp[lev], *charge_fp[lev], refinement_ratio ); + ablastr::coarsen::average::Coarsen(*charge_cp[lev], *charge_fp[lev], refinement_ratio ); } } diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index 34e25d1a205..e56c94583f6 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -15,13 +15,13 @@ #include "Pusher/GetAndSetPosition.H" #include "Pusher/UpdatePosition.H" #include "ParticleBoundaries_K.H" -#include "Utils/CoarsenMR.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" +#include #include #include @@ -714,7 +714,7 @@ WarpXParticleContainer::DepositCharge (amrex::VectornComp(), ngrow ); coarsened_fine_data.setVal(0.0); - CoarsenMR::Coarsen( coarsened_fine_data, *rho[lev+1], m_gdb->refRatio(lev) ); + ablastr::coarsen::average::Coarsen(coarsened_fine_data, *rho[lev + 1], m_gdb->refRatio(lev) ); ablastr::utils::communication::ParallelAdd(*rho[lev], coarsened_fine_data, 0, 0, rho[lev]->nComp(), amrex::IntVect::TheZeroVector(), diff --git a/Source/Utils/CMakeLists.txt b/Source/Utils/CMakeLists.txt index 19fe6bc3460..0cbd987802e 100644 --- a/Source/Utils/CMakeLists.txt +++ b/Source/Utils/CMakeLists.txt @@ -1,7 +1,5 @@ target_sources(WarpX PRIVATE - CoarsenIO.cpp - CoarsenMR.cpp Interpolate.cpp MPIInitHelpers.cpp ParticleUtils.cpp diff --git a/Source/Utils/CoarsenIO.cpp b/Source/Utils/CoarsenIO.cpp deleted file mode 100644 index 7357dc923e6..00000000000 --- a/Source/Utils/CoarsenIO.cpp +++ /dev/null @@ -1,148 +0,0 @@ -#include "CoarsenIO.H" - -#include "Utils/TextMsg.H" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace amrex; - -void -CoarsenIO::Loop ( MultiFab& mf_dst, - const MultiFab& mf_src, - const int dcomp, - const int scomp, - const int ncomp, - const IntVect ngrowvect, - const IntVect crse_ratio ) -{ - // Staggering of source fine MultiFab and destination coarse MultiFab - const IntVect stag_src = mf_src.boxArray().ixType().toIntVect(); - const IntVect stag_dst = mf_dst.boxArray().ixType().toIntVect(); - - if ( crse_ratio > IntVect(1) ) WARPX_ALWAYS_ASSERT_WITH_MESSAGE( ngrowvect == IntVect(0), - "option of filling guard cells of destination MultiFab with coarsening not supported for this interpolation" ); - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( mf_src.nGrowVect() >= stag_dst-stag_src+ngrowvect, - "source fine MultiFab does not have enough guard cells for this interpolation" ); - - // Auxiliary integer arrays (always 3D) - GpuArray sf; // staggering of source fine MultiFab - GpuArray sc; // staggering of destination coarse MultiFab - GpuArray cr; // coarsening ratio - - sf[0] = stag_src[0]; -#if defined(WARPX_DIM_1D_Z) - sf[1] = 0; -#else - sf[1] = stag_src[1]; -#endif -#if (AMREX_SPACEDIM <= 2) - sf[2] = 0; -#elif defined(WARPX_DIM_3D) - sf[2] = stag_src[2]; -#endif - - sc[0] = stag_dst[0]; -#if defined(WARPX_DIM_1D_Z) - sc[1] = 0; -#else - sc[1] = stag_dst[1]; -#endif -#if (AMREX_SPACEDIM <= 2) - sc[2] = 0; -#elif defined(WARPX_DIM_3D) - sc[2] = stag_dst[2]; -#endif - - cr[0] = crse_ratio[0]; -#if defined(WARPX_DIM_1D_Z) - cr[1] = 1; -#else - cr[1] = crse_ratio[1]; -#endif -#if (AMREX_SPACEDIM <= 2) - cr[2] = 1; -#elif defined(WARPX_DIM_3D) - cr[2] = crse_ratio[2]; -#endif - -#ifdef AMREX_USE_OMP -#pragma omp parallel if (Gpu::notInLaunchRegion()) -#endif - // Loop over boxes (or tiles if not on GPU) - for (MFIter mfi( mf_dst, TilingIfNotGPU() ); mfi.isValid(); ++mfi) - { - // Tiles defined at the coarse level - const Box& bx = mfi.growntilebox( ngrowvect ); - Array4 const& arr_dst = mf_dst.array( mfi ); - Array4 const& arr_src = mf_src.const_array( mfi ); - ParallelFor( bx, ncomp, - [=] AMREX_GPU_DEVICE( int i, int j, int k, int n ) - { - arr_dst(i,j,k,n+dcomp) = CoarsenIO::Interp( - arr_src, sf, sc, cr, i, j, k, n+scomp ); - } ); - } -} - -void -CoarsenIO::Coarsen ( MultiFab& mf_dst, - const MultiFab& mf_src, - const int dcomp, - const int scomp, - const int ncomp, - const int ngrow, - const IntVect crse_ratio ) -{ - amrex::IntVect ngrowvect(ngrow); - Coarsen(mf_dst, - mf_src, - dcomp, - scomp, - ncomp, - ngrowvect, - crse_ratio); -} - -void -CoarsenIO::Coarsen ( MultiFab& mf_dst, - const MultiFab& mf_src, - const int dcomp, - const int scomp, - const int ncomp, - const IntVect ngrowvect, - const IntVect crse_ratio ) -{ - BL_PROFILE("CoarsenIO::Coarsen()"); - - // Convert BoxArray of source MultiFab to staggering of destination MultiFab and coarsen it - BoxArray ba_tmp = amrex::convert( mf_src.boxArray(), mf_dst.ixType().toIntVect() ); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( ba_tmp.coarsenable( crse_ratio ), - "source MultiFab converted to staggering of destination MultiFab is not coarsenable" ); - ba_tmp.coarsen( crse_ratio ); - - if ( ba_tmp == mf_dst.boxArray() and mf_src.DistributionMap() == mf_dst.DistributionMap() ) - CoarsenIO::Loop( mf_dst, mf_src, dcomp, scomp, ncomp, ngrowvect, crse_ratio ); - else - { - // Cannot coarsen into MultiFab with different BoxArray or DistributionMapping: - // 1) create temporary MultiFab on coarsened version of source BoxArray with same DistributionMapping - MultiFab mf_tmp( ba_tmp, mf_src.DistributionMap(), ncomp, ngrowvect, MFInfo(), FArrayBoxFactory() ); - // 2) interpolate from mf_src to mf_tmp (start writing into component 0) - CoarsenIO::Loop( mf_tmp, mf_src, 0, scomp, ncomp, ngrowvect, crse_ratio ); - // 3) copy from mf_tmp to mf_dst (with different BoxArray or DistributionMapping) - mf_dst.ParallelCopy( mf_tmp, 0, dcomp, ncomp ); - } -} diff --git a/Source/Utils/CoarsenMR.H b/Source/Utils/CoarsenMR.H deleted file mode 100644 index 1191b0c7e1d..00000000000 --- a/Source/Utils/CoarsenMR.H +++ /dev/null @@ -1,154 +0,0 @@ -#ifndef WARPX_COARSEN_MR_H_ -#define WARPX_COARSEN_MR_H_ - -#include -#include -#include -#include -#include - -#include - -#include - -namespace CoarsenMR{ - - using namespace amrex; - - /** - * \brief Interpolates the floating point data contained in the source Array4 - * \c arr_src, extracted from a fine MultiFab, with weights defined in - * such a way that the total charge is preserved. - * - * \param[in] arr_src floating point data to be interpolated - * \param[in] sf staggering of the source fine MultiFab - * \param[in] sc staggering of the destination coarsened MultiFab - * \param[in] cr coarsening ratio along each spatial direction - * \param[in] i index along x of the coarsened Array4 to be filled - * \param[in] j index along y of the coarsened Array4 to be filled - * \param[in] k index along z of the coarsened Array4 to be filled - * \param[in] comp index along the fourth component of the Array4 \c arr_src - * containing the data to be interpolated - * - * \return interpolated field at cell (i,j,k) of a coarsened Array4 - */ - AMREX_GPU_DEVICE - AMREX_FORCE_INLINE - Real Interp ( Array4 const& arr_src, - GpuArray const& sf, - GpuArray const& sc, - GpuArray const& cr, - const int i, - const int j, - const int k, - const int comp ) - { - // Indices of destination array (coarse) - const int ic[3] = { i, j, k }; - - // Number of points and starting indices of source array (fine) - int np[3], idx_min[3]; - - // Compute number of points - for ( int l = 0; l < 3; ++l ) { - if ( cr[l] == 1 ) np[l] = 1; // no coarsening - else np[l] = cr[l]*(1-sf[l])*(1-sc[l]) // cell-centered - +(2*(cr[l]-1)+1)*sf[l]*sc[l]; // nodal - } - - // Compute starting indices of source array (fine) - for ( int l = 0; l < 3; ++l ) { - if ( cr[l] == 1 ) idx_min[l] = ic[l]; // no coarsening - else idx_min[l] = ic[l]*cr[l]*(1-sf[l])*(1-sc[l]) // cell-centered - +(ic[l]*cr[l]-cr[l]+1)*sf[l]*sc[l]; // nodal - } - - // Auxiliary integer variables - const int numx = np[0]; - const int numy = np[1]; - const int numz = np[2]; - const int imin = idx_min[0]; - const int jmin = idx_min[1]; - const int kmin = idx_min[2]; - const int sfx = sf[0]; - const int sfy = sf[1]; - const int sfz = sf[2]; - const int scx = sc[0]; - const int scy = sc[1]; - const int scz = sc[2]; - const int crx = cr[0]; - const int cry = cr[1]; - const int crz = cr[2]; - int ii, jj, kk; - Real wx, wy, wz; - - // Add neutral elements (=0) beyond guard cells in source array (fine) - auto const arr_src_safe = [arr_src] - AMREX_GPU_DEVICE (int const ix, int const iy, int const iz, int const n) noexcept - { - return arr_src.contains( ix, iy, iz ) ? arr_src(ix,iy,iz,n) : 0.0_rt; - }; - - // Interpolate over points computed above. Weights are computed in order - // to guarantee total charge conservation for both cell-centered data - // (equal weights) and nodal data (weights depend on distance between - // points on fine and coarse grids). Terms multiplied by (1-sf)*(1-sc) - // are ON for cell-centered data and OFF for nodal data, while terms - // multiplied by sf*sc are ON for nodal data and OFF for cell-centered data. - // Python script Source/Utils/check_interp_points_and_weights.py can be - // used to check interpolation points and weights in 1D. - Real c = 0.0_rt; - for (int kref = 0; kref < numz; ++kref) { - for (int jref = 0; jref < numy; ++jref) { - for (int iref = 0; iref < numx; ++iref) { - ii = imin+iref; - jj = jmin+jref; - kk = kmin+kref; - wx = (1.0_rt/static_cast(numx))*(1-sfx)*(1-scx) // if cell-centered - +((amrex::Math::abs(crx-amrex::Math::abs(ii-i*crx)))/static_cast(crx*crx))*sfx*scx; // if nodal - wy = (1.0_rt/static_cast(numy))*(1-sfy)*(1-scy) // if cell-centered - +((amrex::Math::abs(cry-amrex::Math::abs(jj-j*cry)))/static_cast(cry*cry))*sfy*scy; // if nodal - wz = (1.0_rt/static_cast(numz))*(1-sfz)*(1-scz) // if cell-centered - +((amrex::Math::abs(crz-amrex::Math::abs(kk-k*crz)))/static_cast(crz*crz))*sfz*scz; // if nodal - c += wx*wy*wz*arr_src_safe(ii,jj,kk,comp); - } - } - } - return c; - } - - /** - * \brief Loops over the boxes of the coarsened MultiFab \c mf_dst and fills - * them by interpolating the data contained in the fine MultiFab \c mf_src. - * - * \param[in,out] mf_dst coarsened MultiFab containing the floating point data - * to be filled by interpolating the source fine MultiFab - * \param[in] mf_src fine MultiFab containing the floating point data to be interpolated - * \param[in] ncomp number of components to loop over for the coarsened - * Array4 extracted from the coarsened MultiFab \c mf_dst - * \param[in] ngrow number of guard cells to fill along each spatial direction - * \param[in] crse_ratio coarsening ratio between the fine MultiFab \c mf_src - * and the coarsened MultiFab \c mf_dst along each spatial direction - */ - void Loop ( MultiFab& mf_dst, - const MultiFab& mf_src, - const int ncomp, - const IntVect ngrow, - const IntVect crse_ratio ); - - /** - * \brief Stores in the coarsened MultiFab \c mf_dst the values obtained by - * interpolating the data contained in the fine MultiFab \c mf_src. - * - * \param[in,out] mf_dst coarsened MultiFab containing the floating point data - * to be filled by interpolating the fine MultiFab \c mf_src - * \param[in] mf_src fine MultiFab containing the floating point data to be interpolated - * \param[in] crse_ratio coarsening ratio between the fine MultiFab \c mf_src - * and the coarsened MultiFab \c mf_dst along each spatial direction - */ - void Coarsen ( MultiFab& mf_dst, - const MultiFab& mf_src, - const IntVect crse_ratio ); -} - -#endif // WARPX_COARSEN_MR_H_ diff --git a/Source/Utils/CoarsenMR.cpp b/Source/Utils/CoarsenMR.cpp deleted file mode 100644 index 549ff6ea2b0..00000000000 --- a/Source/Utils/CoarsenMR.cpp +++ /dev/null @@ -1,104 +0,0 @@ -#include "CoarsenMR.H" - -#include "Utils/TextMsg.H" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace amrex; - -void -CoarsenMR::Loop ( MultiFab& mf_dst, - const MultiFab& mf_src, - const int ncomp, - const IntVect ngrow, - const IntVect crse_ratio ) -{ - // Staggering of source fine MultiFab and destination coarse MultiFab - const IntVect stag_src = mf_src.boxArray().ixType().toIntVect(); - const IntVect stag_dst = mf_dst.boxArray().ixType().toIntVect(); - - // Auxiliary integer arrays (always 3D) - GpuArray sf; // staggering of source fine MultiFab - GpuArray sc; // staggering of destination coarse MultiFab - GpuArray cr; // coarsening ratio - - sf[0] = stag_src[0]; -#if defined(WARPX_DIM_1D_Z) - sf[1] = 0; -#else - sf[1] = stag_src[1]; -#endif -#if (AMREX_SPACEDIM <= 2) - sf[2] = 0; -#elif defined(WARPX_DIM_3D) - sf[2] = stag_src[2]; -#endif - - sc[0] = stag_dst[0]; -#if defined(WARPX_DIM_1D_Z) - sc[1] = 0; -#else - sc[1] = stag_dst[1]; -#endif -#if (AMREX_SPACEDIM <= 2) - sc[2] = 0; -#elif defined(WARPX_DIM_3D) - sc[2] = stag_dst[2]; -#endif - - cr[0] = crse_ratio[0]; -#if defined(WARPX_DIM_1D_Z) - cr[1] = 1; -#else - cr[1] = crse_ratio[1]; -#endif -#if (AMREX_SPACEDIM <= 2) - cr[2] = 1; -#elif defined(WARPX_DIM_3D) - cr[2] = crse_ratio[2]; -#endif - -#ifdef AMREX_USE_OMP -#pragma omp parallel if (Gpu::notInLaunchRegion()) -#endif - // Loop over boxes (or tiles if not on GPU) - for (MFIter mfi( mf_dst, TilingIfNotGPU() ); mfi.isValid(); ++mfi) - { - // Tiles defined at the coarse level - const Box& bx = mfi.growntilebox( ngrow ); - Array4 const& arr_dst = mf_dst.array( mfi ); - Array4 const& arr_src = mf_src.const_array( mfi ); - ParallelFor( bx, ncomp, - [=] AMREX_GPU_DEVICE( int i, int j, int k, int n ) - { - arr_dst(i,j,k,n) = CoarsenMR::Interp( - arr_src, sf, sc, cr, i, j, k, n ); - } ); - } -} - -void -CoarsenMR::Coarsen ( MultiFab& mf_dst, - const MultiFab& mf_src, - const IntVect crse_ratio ) -{ - BL_PROFILE("CoarsenMR::Coarsen()"); - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( mf_src.ixType() == mf_dst.ixType(), - "source MultiFab and destination MultiFab have different IndexType" ); - - // Number of guard cells to fill on coarse patch and number of components - const IntVect ngrow = ( mf_src.nGrowVect() + 1 ) / crse_ratio; - const int ncomp = mf_src.nComp(); - - CoarsenMR::Loop( mf_dst, mf_src, ncomp, ngrow, crse_ratio ); -} diff --git a/Source/Utils/Make.package b/Source/Utils/Make.package index f4395372057..3d6da0de90e 100644 --- a/Source/Utils/Make.package +++ b/Source/Utils/Make.package @@ -3,8 +3,6 @@ CEXE_sources += WarpXTagging.cpp CEXE_sources += WarpXUtil.cpp CEXE_sources += WarpXVersion.cpp CEXE_sources += WarpXAlgorithmSelection.cpp -CEXE_sources += CoarsenIO.cpp -CEXE_sources += CoarsenMR.cpp CEXE_sources += Interpolate.cpp CEXE_sources += IntervalsParser.cpp CEXE_sources += MPIInitHelpers.cpp diff --git a/Source/Utils/check_interp_points_and_weights.py b/Source/Utils/check_interp_points_and_weights.py index 4d54af01cf0..a1d17c8dd3d 100644 --- a/Source/Utils/check_interp_points_and_weights.py +++ b/Source/Utils/check_interp_points_and_weights.py @@ -21,7 +21,8 @@ # For MR applications only the cases sc=sf=0 and sc=sf=1 are considered. Terms # multiplied by (1-sf)*(1-sc) are ON for cell-centered data and OFF for nodal data, # while terms multiplied by sf*sc are ON for nodal data and OFF for cell-centered -# data. C++ implementation in Source/Utils/CoarsenMR.H/.cpp and Source/Utils/CoarsenIO.H/.cpp +# data. C++ implementation in Source/ablastr/coarsen/average.(H/.cpp) and +# Source/ablastr/coarsen/sample.(H/.cpp) #------------------------------------------------------------------------------- import sys diff --git a/Source/ablastr/CMakeLists.txt b/Source/ablastr/CMakeLists.txt index 224145a9843..0224f1a8bb7 100644 --- a/Source/ablastr/CMakeLists.txt +++ b/Source/ablastr/CMakeLists.txt @@ -1,4 +1,5 @@ #add_subdirectory(fields) +add_subdirectory(coarsen) #add_subdirectory(particles) #add_subdirectory(profiler) add_subdirectory(utils) diff --git a/Source/ablastr/Make.package b/Source/ablastr/Make.package index 46b3f185845..b10d9f629e1 100644 --- a/Source/ablastr/Make.package +++ b/Source/ablastr/Make.package @@ -1,5 +1,6 @@ #CEXE_sources += ParticleBoundaries.cpp +include $(WARPX_HOME)/Source/ablastr/coarsen/Make.package include $(WARPX_HOME)/Source/ablastr/particles/Make.package include $(WARPX_HOME)/Source/ablastr/utils/Make.package include $(WARPX_HOME)/Source/ablastr/warn_manager/Make.package diff --git a/Source/ablastr/coarsen/CMakeLists.txt b/Source/ablastr/coarsen/CMakeLists.txt new file mode 100644 index 00000000000..7396c8d8a90 --- /dev/null +++ b/Source/ablastr/coarsen/CMakeLists.txt @@ -0,0 +1,5 @@ +target_sources(ablastr + PRIVATE + average.cpp + sample.cpp +) diff --git a/Source/ablastr/coarsen/Make.package b/Source/ablastr/coarsen/Make.package new file mode 100644 index 00000000000..4fcd0e2ec2d --- /dev/null +++ b/Source/ablastr/coarsen/Make.package @@ -0,0 +1,4 @@ +CEXE_sources += average.cpp +CEXE_sources += sample.cpp + +VPATH_LOCATIONS += $(WARPX_HOME)/Source/ablastr/coarsen diff --git a/Source/ablastr/coarsen/average.H b/Source/ablastr/coarsen/average.H new file mode 100644 index 00000000000..269403f7b2c --- /dev/null +++ b/Source/ablastr/coarsen/average.H @@ -0,0 +1,191 @@ +/* Copyright 2022 Edoardo Zoni, Remi Lehe, Prabhat Kumar, Axel Huebl + * + * This file is part of ABLASTR. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef ABLASTR_COARSEN_AVERAGE_H_ +#define ABLASTR_COARSEN_AVERAGE_H_ + + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + + +/** Mesh Coarsening by Averaging + * + * These methods are mostly used for mesh-refinement. + */ +namespace ablastr::coarsen::average +{ + /** + * \brief Interpolates the floating point data contained in the source Array4 + * \c arr_src, extracted from a fine MultiFab, with weights defined in + * such a way that the total charge is preserved. + * + * The input (sf) and output (sc) staggering need to be the same. + * + * \param[in] arr_src floating point data to be interpolated + * \param[in] sf staggering of the source fine MultiFab + * \param[in] sc staggering of the destination coarsened MultiFab + * \param[in] cr coarsening ratio along each spatial direction + * \param[in] i index along x of the coarsened Array4 to be filled + * \param[in] j index along y of the coarsened Array4 to be filled + * \param[in] k index along z of the coarsened Array4 to be filled + * \param[in] comp index along the fourth component of the Array4 \c arr_src + * containing the data to be interpolated + * + * \return interpolated field at cell (i,j,k) of a coarsened Array4 + */ + AMREX_GPU_DEVICE + AMREX_FORCE_INLINE + amrex::Real + Interp ( + amrex::Array4 const &arr_src, + amrex::GpuArray const &sf, + amrex::GpuArray const &sc, + amrex::GpuArray const &cr, + int const i, + int const j, + int const k, + int const comp + ) + { + using namespace amrex::literals; + + AMREX_ASSERT_WITH_MESSAGE(sf[0] == sc[0], "Interp: Staggering for component 0 does not match!"); + AMREX_ASSERT_WITH_MESSAGE(sf[1] == sc[1], "Interp: Staggering for component 1 does not match!"); + AMREX_ASSERT_WITH_MESSAGE(sf[2] == sc[2], "Interp: Staggering for component 2 does not match!"); + + // Indices of destination array (coarse) + int const ic[3] = {i, j, k}; + + // Number of points and starting indices of source array (fine) + int np[3], idx_min[3]; + + // Compute number of points + for (int l = 0; l < 3; ++l) { + if (cr[l] == 1) + np[l] = 1; // no coarsening + else + np[l] = cr[l] * (1 - sf[l]) * (1 - sc[l]) // cell-centered + + (2 * (cr[l] - 1) + 1) * sf[l] * sc[l]; // nodal + } + + // Compute starting indices of source array (fine) + for (int l = 0; l < 3; ++l) { + if (cr[l] == 1) + idx_min[l] = ic[l]; // no coarsening + else + idx_min[l] = ic[l] * cr[l] * (1 - sf[l]) * (1 - sc[l]) // cell-centered + + (ic[l] * cr[l] - cr[l] + 1) * sf[l] * sc[l]; // nodal + } + + // Auxiliary integer variables + int const numx = np[0]; + int const numy = np[1]; + int const numz = np[2]; + int const imin = idx_min[0]; + int const jmin = idx_min[1]; + int const kmin = idx_min[2]; + int const sfx = sf[0]; + int const sfy = sf[1]; + int const sfz = sf[2]; + int const scx = sc[0]; + int const scy = sc[1]; + int const scz = sc[2]; + int const crx = cr[0]; + int const cry = cr[1]; + int const crz = cr[2]; + int ii, jj, kk; + amrex::Real wx, wy, wz; + + // Add neutral elements (=0) beyond guard cells in source array (fine) + auto const arr_src_safe = [arr_src] + AMREX_GPU_DEVICE(int const ix, int const iy, int const iz, int const n) noexcept { + return arr_src.contains(ix, iy, iz) ? arr_src(ix, iy, iz, n) : 0.0_rt; + }; + + // Interpolate over points computed above. Weights are computed in order + // to guarantee total charge conservation for both cell-centered data + // (equal weights) and nodal data (weights depend on distance between + // points on fine and coarse grids). Terms multiplied by (1-sf)*(1-sc) + // are ON for cell-centered data and OFF for nodal data, while terms + // multiplied by sf*sc are ON for nodal data and OFF for cell-centered data. + // Python script Source/Utils/check_interp_points_and_weights.py can be + // used to check interpolation points and weights in 1D. + amrex::Real c = 0.0_rt; + for (int kref = 0; kref < numz; ++kref) { + for (int jref = 0; jref < numy; ++jref) { + for (int iref = 0; iref < numx; ++iref) { + ii = imin + iref; + jj = jmin + jref; + kk = kmin + kref; + wx = (1.0_rt / static_cast(numx)) * (1 - sfx) * (1 - scx) // if cell-centered + + ((amrex::Math::abs(crx - amrex::Math::abs(ii - i * crx))) / + static_cast(crx * crx)) * sfx * scx; // if nodal + wy = (1.0_rt / static_cast(numy)) * (1 - sfy) * (1 - scy) // if cell-centered + + ((amrex::Math::abs(cry - amrex::Math::abs(jj - j * cry))) / + static_cast(cry * cry)) * sfy * scy; // if nodal + wz = (1.0_rt / static_cast(numz)) * (1 - sfz) * (1 - scz) // if cell-centered + + ((amrex::Math::abs(crz - amrex::Math::abs(kk - k * crz))) / + static_cast(crz * crz)) * sfz * scz; // if nodal + c += wx * wy * wz * arr_src_safe(ii, jj, kk, comp); + } + } + } + return c; + } + + /** + * \brief Loops over the boxes of the coarsened MultiFab \c mf_dst and fills + * them by interpolating the data contained in the fine MultiFab \c mf_src. + * + * \param[in,out] mf_dst coarsened MultiFab containing the floating point data + * to be filled by interpolating the source fine MultiFab + * \param[in] mf_src fine MultiFab containing the floating point data to be interpolated + * \param[in] ncomp number of components to loop over for the coarsened + * Array4 extracted from the coarsened MultiFab \c mf_dst + * \param[in] ngrow number of guard cells to fill along each spatial direction + * \param[in] crse_ratio coarsening ratio between the fine MultiFab \c mf_src + * and the coarsened MultiFab \c mf_dst along each spatial direction + */ + void + Loop ( + amrex::MultiFab & mf_dst, + amrex::MultiFab const & mf_src, + int const ncomp, + amrex::IntVect const ngrow, + amrex::IntVect const crse_ratio + ); + + /** + * \brief Stores in the coarsened MultiFab \c mf_dst the values obtained by + * interpolating the data contained in the fine MultiFab \c mf_src. + * + * \param[in,out] mf_dst coarsened MultiFab containing the floating point data + * to be filled by interpolating the fine MultiFab \c mf_src + * \param[in] mf_src fine MultiFab containing the floating point data to be interpolated + * \param[in] crse_ratio coarsening ratio between the fine MultiFab \c mf_src + * and the coarsened MultiFab \c mf_dst along each spatial direction + */ + void + Coarsen ( + amrex::MultiFab & mf_dst, + amrex::MultiFab const & mf_src, + amrex::IntVect const crse_ratio + ); + +} // namespace ablastr::coarsen::average + +#endif // ABLASTR_COARSEN_AVERAGE_H_ diff --git a/Source/ablastr/coarsen/average.cpp b/Source/ablastr/coarsen/average.cpp new file mode 100644 index 00000000000..2e333867d7a --- /dev/null +++ b/Source/ablastr/coarsen/average.cpp @@ -0,0 +1,114 @@ +/* Copyright 2022 Edoardo Zoni, Remi Lehe, Prabhat Kumar + * + * This file is part of ABLASTR. + * + * License: BSD-3-Clause-LBNL + */ +#include "average.H" + +#include "ablastr/utils/TextMsg.H" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace ablastr::coarsen::average +{ + void + Loop ( + amrex::MultiFab & mf_dst, + amrex::MultiFab const & mf_src, + int const ncomp, + amrex::IntVect const ngrow, + amrex::IntVect const crse_ratio + ) + { + // Staggering of source fine MultiFab and destination coarse MultiFab + amrex::IntVect const stag_src = mf_src.boxArray().ixType().toIntVect(); + amrex::IntVect const stag_dst = mf_dst.boxArray().ixType().toIntVect(); + + // Auxiliary integer arrays (always 3D) + amrex::GpuArray sf; // staggering of source fine MultiFab + amrex::GpuArray sc; // staggering of destination coarse MultiFab + amrex::GpuArray cr; // coarsening ratio + + sf[0] = stag_src[0]; +#if defined(WARPX_DIM_1D_Z) + sf[1] = 0; +#else + sf[1] = stag_src[1]; +#endif +#if (AMREX_SPACEDIM <= 2) + sf[2] = 0; +#elif defined(WARPX_DIM_3D) + sf[2] = stag_src[2]; +#endif + + sc[0] = stag_dst[0]; +#if defined(WARPX_DIM_1D_Z) + sc[1] = 0; +#else + sc[1] = stag_dst[1]; +#endif +#if (AMREX_SPACEDIM <= 2) + sc[2] = 0; +#elif defined(WARPX_DIM_3D) + sc[2] = stag_dst[2]; +#endif + + cr[0] = crse_ratio[0]; +#if defined(WARPX_DIM_1D_Z) + cr[1] = 1; +#else + cr[1] = crse_ratio[1]; +#endif +#if (AMREX_SPACEDIM <= 2) + cr[2] = 1; +#elif defined(WARPX_DIM_3D) + cr[2] = crse_ratio[2]; +#endif + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + // Loop over boxes (or tiles if not on GPU) + for (amrex::MFIter mfi(mf_dst, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + // Tiles defined at the coarse level + amrex::Box const & bx = mfi.growntilebox(ngrow); + amrex::Array4 const &arr_dst = mf_dst.array(mfi); + amrex::Array4 const &arr_src = mf_src.const_array(mfi); + ParallelFor(bx, ncomp, + [=] AMREX_GPU_DEVICE(int i, int j, int k, int n) { + arr_dst(i, j, k, n) = Interp( + arr_src, sf, sc, cr, i, j, k, n); + }); + } + } + + void + Coarsen ( + amrex::MultiFab & mf_dst, + amrex::MultiFab const & mf_src, + amrex::IntVect const crse_ratio + ) + { + BL_PROFILE("ablastr::coarsen::Coarsen()"); + + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(mf_src.ixType() == mf_dst.ixType(), + "source MultiFab and destination MultiFab have different IndexType"); + + // Number of guard cells to fill on coarse patch and number of components + const amrex::IntVect ngrow = (mf_src.nGrowVect() + 1) / crse_ratio; + const int ncomp = mf_src.nComp(); + + Loop(mf_dst, mf_src, ncomp, ngrow, crse_ratio); + } + +} // namespace ablastr::coarsen::average diff --git a/Source/Utils/CoarsenIO.H b/Source/ablastr/coarsen/sample.H similarity index 77% rename from Source/Utils/CoarsenIO.H rename to Source/ablastr/coarsen/sample.H index 0b53831e6a4..1390cbebb3c 100644 --- a/Source/Utils/CoarsenIO.H +++ b/Source/ablastr/coarsen/sample.H @@ -1,21 +1,33 @@ -#ifndef WARPX_COARSEN_IO_H_ -#define WARPX_COARSEN_IO_H_ +/* Copyright 2022 Edoardo Zoni, Remi Lehe, David Grote, Axel Huebl + * + * This file is part of ABLASTR. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef ABLASTR_COARSEN_SAMPLE_H_ +#define ABLASTR_COARSEN_SAMPLE_H_ + #include #include +#include #include #include #include +#include #include #include #include -namespace CoarsenIO{ - - using namespace amrex; +/** Mesh Coarsening by Sampling + * + * These methods are mostly used for I/O. + */ +namespace ablastr::coarsen::sample +{ /** * \brief Interpolates the floating point data contained in the source Array4 * \c arr_src, extracted from a fine MultiFab, by averaging over either @@ -35,15 +47,19 @@ namespace CoarsenIO{ */ AMREX_GPU_DEVICE AMREX_FORCE_INLINE - Real Interp ( Array4 const& arr_src, - GpuArray const& sf, - GpuArray const& sc, - GpuArray const& cr, - const int i, - const int j, - const int k, - const int comp ) + amrex::Real Interp ( + amrex::Array4 const& arr_src, + amrex::GpuArray const& sf, + amrex::GpuArray const& sc, + amrex::GpuArray const& cr, + const int i, + const int j, + const int k, + const int comp + ) { + using namespace amrex::literals; + // Indices of destination array (coarse) const int ic[3] = { i, j, k }; @@ -70,19 +86,18 @@ namespace CoarsenIO{ const int jmin = idx_min[1]; const int kmin = idx_min[2]; int ii, jj, kk; - Real wx, wy, wz; + amrex::Real const wx = 1.0_rt / static_cast(numx); + amrex::Real const wy = 1.0_rt / static_cast(numy); + amrex::Real const wz = 1.0_rt / static_cast(numz); // Interpolate over points computed above - Real c = 0.0_rt; + amrex::Real c = 0.0_rt; for (int kref = 0; kref < numz; ++kref) { for (int jref = 0; jref < numy; ++jref) { for (int iref = 0; iref < numx; ++iref) { ii = imin+iref; jj = jmin+jref; kk = kmin+kref; - wx = 1.0_rt/static_cast(numx); - wy = 1.0_rt/static_cast(numy); - wz = 1.0_rt/static_cast(numz); c += wx*wy*wz*arr_src(ii,jj,kk,comp); } } @@ -109,13 +124,13 @@ namespace CoarsenIO{ * \param[in] crse_ratio coarsening ratio between the fine MultiFab \c mf_src * and the coarsened MultiFab \c mf_dst along each spatial direction */ - void Loop ( MultiFab& mf_dst, - const MultiFab& mf_src, + void Loop ( amrex::MultiFab& mf_dst, + const amrex::MultiFab& mf_src, const int dcomp, const int scomp, const int ncomp, - const IntVect ngrow, - const IntVect crse_ratio=IntVect(1) ); + const amrex::IntVect ngrow, + const amrex::IntVect crse_ratio=amrex::IntVect(1) ); /** * \brief Stores in the coarsened MultiFab \c mf_dst the values obtained by @@ -136,20 +151,21 @@ namespace CoarsenIO{ * \param[in] crse_ratio coarsening ratio between the fine MultiFab \c mf_src * and the coarsened MultiFab \c mf_dst along each spatial direction */ - void Coarsen ( MultiFab& mf_dst, - const MultiFab& mf_src, + void Coarsen ( amrex::MultiFab& mf_dst, + const amrex::MultiFab& mf_src, const int dcomp, const int scomp, const int ncomp, const int ngrow, - const IntVect crse_ratio=IntVect(1) ); - void Coarsen ( MultiFab& mf_dst, - const MultiFab& mf_src, + const amrex::IntVect crse_ratio=amrex::IntVect(1) ); + void Coarsen ( amrex::MultiFab& mf_dst, + const amrex::MultiFab& mf_src, const int dcomp, const int scomp, const int ncomp, - const IntVect ngrowvect, - const IntVect crse_ratio=IntVect(1) ); -} + const amrex::IntVect ngrowvect, + const amrex::IntVect crse_ratio=amrex::IntVect(1) ); + +} // namespace ablastr::coarsen::sample -#endif // WARPX_COARSEN_IO_H_ +#endif // ABLASTR_COARSEN_SAMPLE_H_ diff --git a/Source/ablastr/coarsen/sample.cpp b/Source/ablastr/coarsen/sample.cpp new file mode 100644 index 00000000000..e77869017a5 --- /dev/null +++ b/Source/ablastr/coarsen/sample.cpp @@ -0,0 +1,160 @@ +/* Copyright 2022 Edoardo Zoni, Remi Lehe, David Grote, Axel Huebl + * + * This file is part of ABLASTR. + * + * License: BSD-3-Clause-LBNL + */ +#include "sample.H" + +#include "ablastr/utils/TextMsg.H" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace ablastr::coarsen::sample +{ + void + Loop ( + amrex::MultiFab& mf_dst, + const amrex::MultiFab& mf_src, + const int dcomp, + const int scomp, + const int ncomp, + const amrex::IntVect ngrowvect, + const amrex::IntVect crse_ratio + ) + { + // Staggering of source fine MultiFab and destination coarse MultiFab + const amrex::IntVect stag_src = mf_src.boxArray().ixType().toIntVect(); + const amrex::IntVect stag_dst = mf_dst.boxArray().ixType().toIntVect(); + + if ( crse_ratio > amrex::IntVect(1) ) + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( ngrowvect == amrex::IntVect(0), + "option of filling guard cells of destination MultiFab with coarsening not supported for this interpolation" ); + + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( mf_src.nGrowVect() >= stag_dst-stag_src+ngrowvect, + "source fine MultiFab does not have enough guard cells for this interpolation" ); + + // Auxiliary integer arrays (always 3D) + amrex::GpuArray sf; // staggering of source fine MultiFab + amrex::GpuArray sc; // staggering of destination coarse MultiFab + amrex::GpuArray cr; // coarsening ratio + + sf[0] = stag_src[0]; +#if defined(WARPX_DIM_1D_Z) + sf[1] = 0; +#else + sf[1] = stag_src[1]; +#endif +#if (AMREX_SPACEDIM <= 2) + sf[2] = 0; +#elif defined(WARPX_DIM_3D) + sf[2] = stag_src[2]; +#endif + + sc[0] = stag_dst[0]; +#if defined(WARPX_DIM_1D_Z) + sc[1] = 0; +#else + sc[1] = stag_dst[1]; +#endif +#if (AMREX_SPACEDIM <= 2) + sc[2] = 0; +#elif defined(WARPX_DIM_3D) + sc[2] = stag_dst[2]; +#endif + + cr[0] = crse_ratio[0]; +#if defined(WARPX_DIM_1D_Z) + cr[1] = 1; +#else + cr[1] = crse_ratio[1]; +#endif +#if (AMREX_SPACEDIM <= 2) + cr[2] = 1; +#elif defined(WARPX_DIM_3D) + cr[2] = crse_ratio[2]; +#endif + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + // Loop over boxes (or tiles if not on GPU) + for (amrex::MFIter mfi( mf_dst, amrex::TilingIfNotGPU() ); mfi.isValid(); ++mfi) + { + // Tiles defined at the coarse level + const amrex::Box& bx = mfi.growntilebox( ngrowvect ); + amrex::Array4 const& arr_dst = mf_dst.array( mfi ); + amrex::Array4 const& arr_src = mf_src.const_array( mfi ); + ParallelFor( bx, ncomp, + [=] AMREX_GPU_DEVICE( int i, int j, int k, int n ) + { + arr_dst(i,j,k,n+dcomp) = Interp( + arr_src, sf, sc, cr, i, j, k, n+scomp ); + } ); + } + } + + void + Coarsen ( + amrex::MultiFab& mf_dst, + const amrex::MultiFab& mf_src, + const int dcomp, + const int scomp, + const int ncomp, + const int ngrow, + const amrex::IntVect crse_ratio + ) + { + amrex::IntVect ngrowvect(ngrow); + Coarsen(mf_dst, + mf_src, + dcomp, + scomp, + ncomp, + ngrowvect, + crse_ratio); + } + + void + Coarsen ( + amrex::MultiFab& mf_dst, + const amrex::MultiFab& mf_src, + const int dcomp, + const int scomp, + const int ncomp, + const amrex::IntVect ngrowvect, + const amrex::IntVect crse_ratio + ) + { + BL_PROFILE("sample::Coarsen()"); + + // Convert BoxArray of source MultiFab to staggering of destination MultiFab and coarsen it + amrex::BoxArray ba_tmp = amrex::convert( mf_src.boxArray(), mf_dst.ixType().toIntVect() ); + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( ba_tmp.coarsenable( crse_ratio ), + "source MultiFab converted to staggering of destination MultiFab is not coarsenable" ); + ba_tmp.coarsen( crse_ratio ); + + if ( ba_tmp == mf_dst.boxArray() and mf_src.DistributionMap() == mf_dst.DistributionMap() ) + Loop( mf_dst, mf_src, dcomp, scomp, ncomp, ngrowvect, crse_ratio ); + else + { + // Cannot coarsen into MultiFab with different BoxArray or DistributionMapping: + // 1) create temporary MultiFab on coarsened version of source BoxArray with same DistributionMapping + amrex::MultiFab mf_tmp( ba_tmp, mf_src.DistributionMap(), ncomp, ngrowvect, amrex::MFInfo(), amrex::FArrayBoxFactory() ); + // 2) interpolate from mf_src to mf_tmp (start writing into component 0) + Loop( mf_tmp, mf_src, 0, scomp, ncomp, ngrowvect, crse_ratio ); + // 3) copy from mf_tmp to mf_dst (with different BoxArray or DistributionMapping) + mf_dst.ParallelCopy( mf_tmp, 0, dcomp, ncomp ); + } + } + +} // namespace ablastr::coarsen::sample From 763692919adc29d0e0f5ab7b0fcf434e7744ecec Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 8 Dec 2022 13:13:43 -0800 Subject: [PATCH 0186/1346] Docs: Repo Vis Link (#3553) --- Docs/source/developers/repo_organization.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/developers/repo_organization.rst b/Docs/source/developers/repo_organization.rst index 85bf784a802..b8a964f9839 100644 --- a/Docs/source/developers/repo_organization.rst +++ b/Docs/source/developers/repo_organization.rst @@ -10,7 +10,7 @@ All the WarpX source code is located in ``Source/``. All sub-directories have a pretty straightforward name. The PIC loop is part of the WarpX class, in function ``WarpX::EvolveEM`` implemented in ``Source/WarpXEvolveEM.cpp``. The core of the PIC loop (i.e., without diagnostics etc.) is in ``WarpX::OneStep_nosub`` (when subcycling is OFF) or ``WarpX::OneStep_sub1`` (when subcycling is ON, with method 1). -Here is a `visual representation `__ of the repository structure. +Here is a `visual representation `__ of the repository structure. Code organization From 18b986e444e0b19eebda1fd9d709174b3e3a849a Mon Sep 17 00:00:00 2001 From: "lgtm-com[bot]" <43144390+lgtm-com[bot]@users.noreply.github.com> Date: Thu, 8 Dec 2022 15:06:34 -0800 Subject: [PATCH 0187/1346] Add CodeQL workflow for GitHub code scanning (#3555) * Add CodeQL workflow for GitHub code scanning * Remove: old LGTM config * Fix: CodeQL Deps Co-authored-by: LGTM Migrator Co-authored-by: Axel Huebl --- .github/workflows/codeql.yml | 63 ++++++++++++++++++++++++++++++++++++ .lgtm.yml | 29 ----------------- 2 files changed, 63 insertions(+), 29 deletions(-) create mode 100644 .github/workflows/codeql.yml delete mode 100644 .lgtm.yml diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 00000000000..fc6d5e5fd78 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,63 @@ +name: "CodeQL" + +on: + push: + branches: [ "development" ] + pull_request: + branches: [ "development" ] + schedule: + - cron: "27 3 * * 0" + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ python, cpp ] + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Install Packages (C++) + if: ${{ matrix.language == 'cpp' }} + run: | + sudo apt-get update + sudo apt-get install --yes cmake openmpi-bin libopenmpi-dev libhdf5-openmpi-dev libadios-openmpi-dev + + python -m pip install --upgrade pip + python -m pip install --upgrade wheel + python -m pip install --upgrade cmake + export CMAKE="$HOME/.local/bin/cmake" && echo "CMAKE=$CMAKE" >> $GITHUB_ENV + + - name: Configure (C++) + if: ${{ matrix.language == 'cpp' }} + run: | + $CMAKE -S . -B build -DWarpX_OPENPMD=ON + + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + queries: +security-and-quality + + - name: Build (py) + uses: github/codeql-action/autobuild@v2 + if: ${{ matrix.language == 'python' }} + + - name: Build (C++) + if: ${{ matrix.language == 'cpp' }} + run: | + $CMAKE --build build -j 2 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 + with: + category: "/language:${{ matrix.language }}" diff --git a/.lgtm.yml b/.lgtm.yml deleted file mode 100644 index efaaf9307ce..00000000000 --- a/.lgtm.yml +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2019-2021 Axel Huebl -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL - -# docs: -# https://help.semmle.com/lgtm-enterprise/user/help/lgtm.yml-configuration-file.html#example-of-complete-lgtmyml-file - -extraction: - cpp: - prepare: - packages: - - cmake - - openmpi-bin - - libopenmpi-dev - - libhdf5-openmpi-dev - - libadios-openmpi-dev - after_prepare: - - python -m pip install --upgrade pip - - python -m pip install --upgrade wheel - - python -m pip install --upgrade cmake - - export CMAKE="$HOME/.local/bin/cmake" - configure: - command: - - $CMAKE -S . -B build -DWarpX_OPENPMD=ON - index: - build_command: - - $CMAKE --build build -j 2 From 412e194b83bf0adb66fcabbaa047396cc5700b0c Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 8 Dec 2022 17:21:50 -0800 Subject: [PATCH 0188/1346] CI: Concurrency Limit CodeQL Same as other runners: aborts when new updates are pushed to safe CI time. --- .github/workflows/codeql.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index fc6d5e5fd78..112c683bc1c 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -8,6 +8,10 @@ on: schedule: - cron: "27 3 * * 0" +concurrency: + group: ${{ github.ref }}-${{ github.head_ref }}-codeql + cancel-in-progress: true + jobs: analyze: name: Analyze From 7960683cc0665f1492e00393f631bbc8726b20b7 Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Fri, 9 Dec 2022 10:30:32 -0800 Subject: [PATCH 0189/1346] Renamed currents in Vay deposition from `j` to `D` (#3557) * Renamed Vay currents from j to D * [pre-commit.ci] auto fixes from pre-commit.com hooks --- .../Particles/Deposition/CurrentDeposition.H | 70 +++++++++---------- Source/Particles/WarpXParticleContainer.cpp | 1 + 2 files changed, 36 insertions(+), 35 deletions(-) diff --git a/Source/Particles/Deposition/CurrentDeposition.H b/Source/Particles/Deposition/CurrentDeposition.H index d394345dcb8..d478d838ef2 100644 --- a/Source/Particles/Deposition/CurrentDeposition.H +++ b/Source/Particles/Deposition/CurrentDeposition.H @@ -713,7 +713,7 @@ void doEsirkepovDepositionShapeN (const GetParticlePosition& GetPosition, * \brief Vay current deposition * ( Vay et al, 2013) * for thread \c thread_num: deposit \c D in real space and store the result in - * \c jx_fab, \c jy_fab, \c jz_fab + * \c Dx_fab, \c Dy_fab, \c Dz_fab * * \tparam depos_order deposition order * \param[in] GetPosition Functor that returns the particle position @@ -722,10 +722,10 @@ void doEsirkepovDepositionShapeN (const GetParticlePosition& GetPosition, * \param[in] ion_lev Pointer to array of particle ionization level. This is required to have the charge of each macroparticle since \c q is a scalar. For non-ionizable species, \c ion_lev is \c null - * \param[in,out] jx_fab,jy_fab,jz_fab FArrayBox of current density, either full array or tile + * \param[in,out] Dx_fab,Dy_fab,Dz_fab FArrayBox of Vay current density, either full array or tile * \param[in] np_to_depose Number of particles for which current is deposited * \param[in] dt Time step for particle level - * \param[in] relative_time Time at which to deposit J, relative to the time of the + * \param[in] relative_time Time at which to deposit D, relative to the time of the * current positions of the particles. When different than 0, * the particle position will be temporarily modified to match * the time of the deposition. @@ -745,9 +745,9 @@ void doVayDepositionShapeN (const GetParticlePosition& GetPosition, const amrex::ParticleReal* const uyp, const amrex::ParticleReal* const uzp, const int* const ion_lev, - amrex::FArrayBox& jx_fab, - amrex::FArrayBox& jy_fab, - amrex::FArrayBox& jz_fab, + amrex::FArrayBox& Dx_fab, + amrex::FArrayBox& Dy_fab, + amrex::FArrayBox& Dz_fab, const long np_to_depose, const amrex::Real dt, const amrex::Real relative_time, @@ -761,14 +761,14 @@ void doVayDepositionShapeN (const GetParticlePosition& GetPosition, { #if defined(WARPX_DIM_RZ) amrex::ignore_unused(GetPosition, - wp, uxp, uyp, uzp, ion_lev, jx_fab, jy_fab, jz_fab, + wp, uxp, uyp, uzp, ion_lev, Dx_fab, Dy_fab, Dz_fab, np_to_depose, dt, relative_time, dx, xyzmin, lo, q, n_rz_azimuthal_modes); amrex::Abort("Vay deposition not implemented in RZ geometry"); #endif #if defined(WARPX_DIM_1D_Z) amrex::ignore_unused(GetPosition, - wp, uxp, uyp, uzp, ion_lev, jx_fab, jy_fab, jz_fab, + wp, uxp, uyp, uzp, ion_lev, Dx_fab, Dy_fab, Dz_fab, np_to_depose, dt, relative_time, dx, xyzmin, lo, q, n_rz_azimuthal_modes); amrex::Abort("Vay deposition not implemented in cartesian 1D geometry"); #endif @@ -809,11 +809,11 @@ void doVayDepositionShapeN (const GetParticlePosition& GetPosition, // Allocate temporary arrays #if defined(WARPX_DIM_3D) - AMREX_ALWAYS_ASSERT(jx_fab.box() == jy_fab.box() && jx_fab.box() == jz_fab.box()); - amrex::FArrayBox temp_fab{jx_fab.box(), 4}; + AMREX_ALWAYS_ASSERT(Dx_fab.box() == Dy_fab.box() && Dx_fab.box() == Dz_fab.box()); + amrex::FArrayBox temp_fab{Dx_fab.box(), 4}; #elif defined(WARPX_DIM_XZ) - AMREX_ALWAYS_ASSERT(jx_fab.box() == jz_fab.box()); - amrex::FArrayBox temp_fab{jx_fab.box(), 2}; + AMREX_ALWAYS_ASSERT(Dx_fab.box() == Dz_fab.box()); + amrex::FArrayBox temp_fab{Dx_fab.box(), 2}; #endif temp_fab.setVal(0._rt); amrex::Array4 const& temp_arr = temp_fab.array(); @@ -822,11 +822,11 @@ void doVayDepositionShapeN (const GetParticlePosition& GetPosition, const amrex::Real invcsq = 1._rt / (PhysConst::c * PhysConst::c); // Arrays where D will be stored - amrex::Array4 const& jx_arr = jx_fab.array(); - amrex::Array4 const& jy_arr = jy_fab.array(); - amrex::Array4 const& jz_arr = jz_fab.array(); + amrex::Array4 const& Dx_arr = Dx_fab.array(); + amrex::Array4 const& Dy_arr = Dy_fab.array(); + amrex::Array4 const& Dz_arr = Dz_fab.array(); - // Loop over particles and deposit (Dx,Dy,Dz) into jx_fab, jy_fab and jz_fab + // Loop over particles and deposit (Dx,Dy,Dz) into Dx_fab, Dy_fab and Dz_fab #if defined(WARPX_USE_GPUCLOCK) amrex::Real* cost_real = nullptr; if( load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::GpuClock) { @@ -923,7 +923,7 @@ void doVayDepositionShapeN (const GetParticlePosition& GetPosition, // sz_old shape factor along z for the centering of each current const int k_old = compute_shape_factor(sz_old, z_old); - // Deposit current into jx_arr, jy_arr and jz_arr + // Deposit current into Dx_arr, Dy_arr and Dz_arr #if defined(WARPX_DIM_XZ) for (int k=0; k<=depos_order; k++) { @@ -937,18 +937,18 @@ void doVayDepositionShapeN (const GetParticlePosition& GetPosition, auto const sxo_szo = static_cast(sx_old[i] * sz_old[k]); if (i_new == i_old && k_new == k_old) { - // temp arrays for Jx and Jz + // temp arrays for Dx and Dz amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 0), wq * invvol * invdt * (sxn_szn - sxo_szo)); amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 1), wq * invvol * invdt * (sxn_szo - sxo_szn)); - // Jy - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 0), + // Dy + amrex::Gpu::Atomic::AddNoRet(&Dy_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 0), wqy * 0.25_rt * (sxn_szn + sxn_szo + sxo_szn + sxo_szo)); } else { - // temp arrays for Jx and Jz + // temp arrays for Dx and Dz amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 0), wq * invvol * invdt * sxn_szn); @@ -961,17 +961,17 @@ void doVayDepositionShapeN (const GetParticlePosition& GetPosition, amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_old + i, lo.y + k_new + k, 0, 1), - wq * invvol * invdt * sxo_szn); - // Jy - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 0), + // Dy + amrex::Gpu::Atomic::AddNoRet(&Dy_arr(lo.x + i_new + i, lo.y + k_new + k, 0, 0), wqy * 0.25_rt * sxn_szn); - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_new + i, lo.y + k_old + k, 0, 0), + amrex::Gpu::Atomic::AddNoRet(&Dy_arr(lo.x + i_new + i, lo.y + k_old + k, 0, 0), wqy * 0.25_rt * sxn_szo); - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_old + i, lo.y + k_new + k, 0, 0), + amrex::Gpu::Atomic::AddNoRet(&Dy_arr(lo.x + i_old + i, lo.y + k_new + k, 0, 0), wqy * 0.25_rt * sxo_szn); - amrex::Gpu::Atomic::AddNoRet(&jy_arr(lo.x + i_old + i, lo.y + k_old + k, 0, 0), + amrex::Gpu::Atomic::AddNoRet(&Dy_arr(lo.x + i_old + i, lo.y + k_old + k, 0, 0), wqy * 0.25_rt * sxo_szo); } @@ -1000,7 +1000,7 @@ void doVayDepositionShapeN (const GetParticlePosition& GetPosition, auto const sxo_syo_szo = static_cast(sx_old[i]) * syo_szo; if (i_new == i_old && j_new == j_old && k_new == k_old) { - // temp arrays for Jx, Jy and Jz + // temp arrays for Dx, Dy and Dz amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + j_new + j, lo.z + k_new + k, 0), wq * invvol * invdt * (sxn_syn_szn - sxo_syo_szo)); @@ -1013,7 +1013,7 @@ void doVayDepositionShapeN (const GetParticlePosition& GetPosition, amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + j_new + j, lo.z + k_new + k, 3), wq * invvol * invdt * (sxo_syn_szn - sxn_syo_szo)); } else { - // temp arrays for Jx, Jy and Jz + // temp arrays for Dx, Dy and Dz amrex::Gpu::Atomic::AddNoRet(&temp_arr(lo.x + i_new + i, lo.y + j_new + j, lo.z + k_new + k, 0), wq * invvol * invdt * sxn_syn_szn); @@ -1045,23 +1045,23 @@ void doVayDepositionShapeN (const GetParticlePosition& GetPosition, } ); #if defined(WARPX_DIM_3D) - amrex::ParallelFor(jx_fab.box(), [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept + amrex::ParallelFor(Dx_fab.box(), [=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept { const amrex::Real t_a = temp_arr(i,j,k,0); const amrex::Real t_b = temp_arr(i,j,k,1); const amrex::Real t_c = temp_arr(i,j,k,2); const amrex::Real t_d = temp_arr(i,j,k,3); - jx_arr(i,j,k) += (1._rt/6._rt)*(2_rt*t_a + t_b + t_c - 2._rt*t_d); - jy_arr(i,j,k) += (1._rt/6._rt)*(2_rt*t_a + t_b - 2._rt*t_c + t_d); - jz_arr(i,j,k) += (1._rt/6._rt)*(2_rt*t_a - 2._rt*t_b + t_c + t_d); + Dx_arr(i,j,k) += (1._rt/6._rt)*(2_rt*t_a + t_b + t_c - 2._rt*t_d); + Dy_arr(i,j,k) += (1._rt/6._rt)*(2_rt*t_a + t_b - 2._rt*t_c + t_d); + Dz_arr(i,j,k) += (1._rt/6._rt)*(2_rt*t_a - 2._rt*t_b + t_c + t_d); }); #elif defined(WARPX_DIM_XZ) - amrex::ParallelFor(jx_fab.box(), [=] AMREX_GPU_DEVICE (int i, int j, int) noexcept + amrex::ParallelFor(Dx_fab.box(), [=] AMREX_GPU_DEVICE (int i, int j, int) noexcept { const amrex::Real t_a = temp_arr(i,j,0,0); const amrex::Real t_b = temp_arr(i,j,0,1); - jx_arr(i,j,0) += (0.5_rt)*(t_a + t_b); - jz_arr(i,j,0) += (0.5_rt)*(t_a - t_b); + Dx_arr(i,j,0) += (0.5_rt)*(t_a + t_b); + Dz_arr(i,j,0) += (0.5_rt)*(t_a - t_b); }); #endif // Synchronize so that temp_fab can be safely deallocated in its destructor diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index e56c94583f6..6eae0d0b643 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -463,6 +463,7 @@ WarpXParticleContainer::DepositCurrent (WarpXParIter& pti, WarpX::load_balance_costs_update_algo); } } else if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) { + // jx_fab, jy_fab and jz_fab are Vay currents (D), not physical currents (j) if (WarpX::nox == 1){ doVayDepositionShapeN<1>( GetPosition, wp.dataPtr() + offset, uxp.dataPtr() + offset, From 7fbfd20121ea8726d0f6a91bcbd9b88dd1500f81 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 9 Dec 2022 13:01:13 -0800 Subject: [PATCH 0190/1346] CI: Source Checks on ubuntu 22.04 (#3558) Updated Doxygen from 1.8.17 to 1.9.1 --- .github/workflows/source.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/source.yml b/.github/workflows/source.yml index 52acbf82d61..2945c6c74f7 100644 --- a/.github/workflows/source.yml +++ b/.github/workflows/source.yml @@ -15,7 +15,7 @@ concurrency: jobs: style: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v2 From e6fae8b2b338dd765eae60bdabb4233b709b7b13 Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Fri, 9 Dec 2022 14:43:50 -0800 Subject: [PATCH 0191/1346] Improve memory footprint for ParticleInitialize (#3392) --- Examples/Tests/nci_fdtd_stability/inputs_2d | 16 ++++---- .../Tests/resampling/inputs_leveling_thinning | 8 ++-- .../benchmarks_json/LaserIonAcc2d.json | 40 +++++++++---------- .../benchmarks_json/leveling_thinning.json | 12 +++--- .../Particles/PhysicalParticleContainer.cpp | 21 ++++++++++ 5 files changed, 59 insertions(+), 38 deletions(-) diff --git a/Examples/Tests/nci_fdtd_stability/inputs_2d b/Examples/Tests/nci_fdtd_stability/inputs_2d index 4fcd95a313a..9359fba74be 100644 --- a/Examples/Tests/nci_fdtd_stability/inputs_2d +++ b/Examples/Tests/nci_fdtd_stability/inputs_2d @@ -48,10 +48,10 @@ electrons.ux_th = 1.e-5 electrons.uy_th = 1.e-5 electrons.uz_th = 1.e-5 electrons.uz_m = 1000. # Mean momentum along z (unitless) -electrons.xmin = -10.e-6 -electrons.xmax = +10.e-6 -electrons.ymin = -10.e-6 -electrons.ymax = +10.e-6 +electrons.xmin = -10.e-6 - 1e-7 # adding buffer of fraction of cell-size +electrons.xmax = +10.e-6 + 1e-7 # adding buffer of fraction of cell-size +electrons.ymin = -10.e-6 - 1e-7 # adding buffer of fraction of cell-size +electrons.ymax = +10.e-6 + 1e-7 # adding buffer of fraction of cell-size ions.charge = q_e ions.mass = m_p @@ -61,10 +61,10 @@ ions.profile = constant ions.density = 1.e29 # number of electrons per m^3 ions.momentum_distribution_type = "constant" ions.uz = 1000. # Momentum along z (unitless) -ions.xmin = -10.e-6 -ions.xmax = +10.e-6 -ions.ymin = -10.e-6 -ions.ymax = +10.e-6 +ions.xmin = -10.e-6 - 1e-7 # adding buffer of fraction of cell-size +ions.xmax = +10.e-6 + 1e-7 # adding buffer of fraction of cell-size +ions.ymin = -10.e-6 - 1e-7 # adding buffer of fraction of cell-size +ions.ymax = +10.e-6 + 1e-7 # adding buffer of fraction of cell-size # Diagnostics diagnostics.diags_names = diag1 diff --git a/Examples/Tests/resampling/inputs_leveling_thinning b/Examples/Tests/resampling/inputs_leveling_thinning index 674b37d7e01..9ed49ae358e 100644 --- a/Examples/Tests/resampling/inputs_leveling_thinning +++ b/Examples/Tests/resampling/inputs_leveling_thinning @@ -40,10 +40,10 @@ resampled_part1.resampling_trigger_max_avg_ppc = 395 resampled_part2.species_type = electron resampled_part2.injection_style = NRandomPerCell resampled_part2.num_particles_per_cell = 100000 -resampled_part2.zmin = 0. -resampled_part2.zmax = 1. -resampled_part2.xmin = 0. -resampled_part2.xmax = 1. +resampled_part2.zmin = 0.001 +resampled_part2.zmax = 0.999 +resampled_part2.xmin = 0.001 +resampled_part2.xmax = 0.999 resampled_part2.profile = parse_density_function # Trick to get a Gaussian weight distribution is to do a Box-Muller transform using the position # within the cell as the two random variables. Here, we have a distribution with standard deviation diff --git a/Regression/Checksum/benchmarks_json/LaserIonAcc2d.json b/Regression/Checksum/benchmarks_json/LaserIonAcc2d.json index 6b9d29fbbe1..15b0a8dbec0 100644 --- a/Regression/Checksum/benchmarks_json/LaserIonAcc2d.json +++ b/Regression/Checksum/benchmarks_json/LaserIonAcc2d.json @@ -1,33 +1,33 @@ { "electrons": { - "particle_momentum_x": 3.7558265697785297e-19, + "particle_momentum_x": 3.924978793639722e-19, "particle_momentum_y": 0.0, - "particle_momentum_z": 1.6241045337016777e-18, - "particle_position_x": 0.008080139452222582, - "particle_position_y": 0.030470786164249836, - "particle_weight": 2.6527193922723818e+17 + "particle_momentum_z": 1.6531781161630182e-18, + "particle_position_x": 0.008174406825176781, + "particle_position_y": 0.030854054377836164, + "particle_weight": 2.6494574815747686e+17 }, "hydrogen": { - "particle_momentum_x": 2.230242228305449e-18, - "particle_momentum_z": 1.087276856218956e-18, - "particle_orig_x": 0.008248212890625, - "particle_orig_z": 0.0368645947265625, - "particle_position_x": 0.008247833494376897, - "particle_position_y": 0.03686279813152423, - "particle_weight": 2.6934893377423152e+17 + "particle_momentum_x": 2.2282828780834146e-18, + "particle_momentum_z": 1.0851862321717955e-18, + "particle_orig_x": 0.008197167968750002, + "particle_orig_z": 0.036522314453125, + "particle_position_x": 0.008196791928459133, + "particle_position_y": 0.03652051811944703, + "particle_weight": 2.7152730477070458e+17 }, "lev=0": { "Bx": 0.0, - "By": 11411806.976599155, + "By": 11404616.733041402, "Bz": 0.0, - "Ex": 2035695789467976.2, + "Ex": 2032977227921656.0, "Ey": 0.0, - "Ez": 323118235034526.9, - "jx": 1.656704421803856e+19, + "Ez": 317987205247174.75, + "jx": 1.6377970880819999e+19, "jy": 0.0, - "jz": 8.846078579875918e+18, - "rho": 61752907894.83176, - "rho_electrons": 17451375232572.703, - "rho_hydrogen": 17441818436520.373 + "jz": 8.817456212655565e+18, + "rho": 60944064977.941765, + "rho_electrons": 17448235212566.305, + "rho_hydrogen": 17441805684524.002 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/leveling_thinning.json b/Regression/Checksum/benchmarks_json/leveling_thinning.json index 57e4b872b60..f0ebdf5137e 100644 --- a/Regression/Checksum/benchmarks_json/leveling_thinning.json +++ b/Regression/Checksum/benchmarks_json/leveling_thinning.json @@ -14,16 +14,16 @@ "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, "particle_momentum_z": 0.0, - "particle_position_x": 485368.303735276, - "particle_position_y": 484224.66261508386, - "particle_weight": 255.7814999999979 + "particle_position_x": 485879.18021793937, + "particle_position_y": 484466.34922379535, + "particle_weight": 256.2546999999979 }, "resampled_part2": { "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, "particle_momentum_z": 0.0, - "particle_position_x": 38419.04911639948, - "particle_position_y": 38054.448891953645, - "particle_weight": 2.836692166357862 + "particle_position_x": 38089.14806219578, + "particle_position_y": 37818.512934148945, + "particle_weight": 2.8175309517949136 } } \ No newline at end of file diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 62d4df594e9..cbb0a693203 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -990,6 +990,27 @@ PhysicalParticleContainer::AddPlasma (int lev, RealBox part_realbox) r = 1; } pcounts[index] = num_ppc*r; + // update pcount by checking if cell-corners or cell-center + // has non-zero density + const auto xlim = GpuArray{lo.x,(lo.x+hi.x)/2._rt,hi.x}; + const auto ylim = GpuArray{lo.y,(lo.y+hi.y)/2._rt,hi.y}; + const auto zlim = GpuArray{lo.z,(lo.z+hi.z)/2._rt,hi.z}; + + const auto checker = [&](){ + for (const auto& x : xlim) + for (const auto& y : ylim) + for (const auto& z : zlim) + if (inj_pos->insideBounds(x,y,z) and (inj_rho->getDensity(x,y,z) > 0) ) { + return 1; + } + return 0; + }; + const int flag_pcount = checker(); + if (flag_pcount == 1) { + pcounts[index] = num_ppc*r; + } else { + pcounts[index] = 0; + } } #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) amrex::ignore_unused(k); From 3a88fd92b320d649e6d2a797c4bcd4cfa5b787be Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 9 Dec 2022 17:24:21 -0800 Subject: [PATCH 0192/1346] =?UTF-8?q?CI:=20CodeQL=20w/=20=F0=9F=94=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/codeql.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 112c683bc1c..83fbdb6dd19 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -1,4 +1,4 @@ -name: "CodeQL" +name: 🔍 CodeQL on: push: From 008ddca89d9f694fef05dc1beb4d2d448c05b1a1 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 13 Dec 2022 02:05:27 +0100 Subject: [PATCH 0193/1346] Update documentation to compile and run on LUMI supercomputer (#3562) * update example profile * update documentation for LUMI * Fix minor typo Co-authored-by: Axel Huebl --- Docs/source/install/hpc/lumi.rst | 23 ++++++++++++-- .../lumi-csc/lumi_warpx.profile.example | 12 ++++---- Tools/machines/lumi-csc/submit.sh | 30 +++++++++++++++++++ 3 files changed, 57 insertions(+), 8 deletions(-) create mode 100644 Tools/machines/lumi-csc/submit.sh diff --git a/Docs/source/install/hpc/lumi.rst b/Docs/source/install/hpc/lumi.rst index 8933ff279d3..64effa1fb6a 100644 --- a/Docs/source/install/hpc/lumi.rst +++ b/Docs/source/install/hpc/lumi.rst @@ -66,10 +66,11 @@ Running MI250X GPUs (2x64 GB) ^^^^^^^^^^^^^^^^^^^^^ -.. note:: - - TODO: Add batch script template. +In non-interactive runs: +.. literalinclude:: ../../../../Tools/machines/lumi-csc/submit.sh + :language: bash + :caption: You can copy this file from ``Tools/machines/lumi-csc/submit.sh``. .. _post-processing-lumi: @@ -79,3 +80,19 @@ Post-Processing .. note:: TODO: Document any Jupyter or data services. + +Known System Issues +------------------- + +.. warning:: + + December 12th, 2022: + There is a caching bug in libFabric that causes WarpX simulations to occasionally hang on LUMI on more than 1 node. + + As a work-around, please export the following environment variable in your job scripts until the issue is fixed: + + .. code-block:: bash + + #export FI_MR_CACHE_MAX_COUNT=0 # libfabric disable caching + # or, less invasive: + export FI_MR_CACHE_MONITOR=memhooks # alternative cache monitor diff --git a/Tools/machines/lumi-csc/lumi_warpx.profile.example b/Tools/machines/lumi-csc/lumi_warpx.profile.example index c85d50731af..e8e0669b46b 100644 --- a/Tools/machines/lumi-csc/lumi_warpx.profile.example +++ b/Tools/machines/lumi-csc/lumi_warpx.profile.example @@ -2,15 +2,14 @@ #export proj= # optional: just an additional text editor -module load nano +# module load nano # required dependencies -module load LUMI/22.08 partition/G -module load buildtools +module load CrayEnv module load craype-accel-amd-gfx90a -module load rocm/5.0.2 module load cray-mpich -module load cce/14.0.2 +module load rocm +module load buildtools/22.08 # optional: faster re-builds #module load ccache @@ -42,6 +41,9 @@ fi # usage: nrun #alias runNode="..." +# GPU-aware MPI +export MPICH_GPU_SUPPORT_ENABLED=1 + # optimize ROCm compilation for MI250X export AMREX_AMD_ARCH=gfx90a diff --git a/Tools/machines/lumi-csc/submit.sh b/Tools/machines/lumi-csc/submit.sh new file mode 100644 index 00000000000..c421983397f --- /dev/null +++ b/Tools/machines/lumi-csc/submit.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +#SBATCH -A +#SBATCH -J warpx +#SBATCH -o %x-%j.out +#SBATCH -t 00:10:00 +# Early access to the GPU partition +#SBATCH -p eap +#SBATCH --nodes=2 +#SBATCH --ntasks-per-node=8 +#SBATCH --cpus-per-task=1 +#SBATCH --gpus-per-node=8 +#SBATCH --gpu-bind=closest + +export MPICH_GPU_SUPPORT_ENABLED=1 + +# note (12-12-22) +# this environment setting is currently needed on LUMI to work-around a +# known issue with Libfabric +#export FI_MR_CACHE_MAX_COUNT=0 # libfabric disable caching +# or, less invasive: +export FI_MR_CACHE_MONITOR=memhooks # alternative cache monitor + +# note (9-2-22, OLCFDEV-1079) +# this environment setting is needed to avoid that rocFFT writes a cache in +# the home directory, which does not scale. +export ROCFFT_RTC_CACHE_PATH=/dev/null + +export OMP_NUM_THREADS=1 +srun ../warpx inputs > outputs From 33abd3da434a08ec7ea3cc5aafdadedc289d0719 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 13 Dec 2022 02:32:46 +0000 Subject: [PATCH 0194/1346] [pre-commit.ci] pre-commit autoupdate (#3564) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pycqa/isort: 5.10.1 → 5.11.1](https://github.com/pycqa/isort/compare/5.10.1...5.11.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a11f4c3e2f4..ec5824a3aa9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -75,7 +75,7 @@ repos: # Sorts Python imports according to PEP8 # https://www.python.org/dev/peps/pep-0008/#imports - repo: https://github.com/pycqa/isort - rev: 5.10.1 + rev: 5.11.1 hooks: - id: isort name: isort (python) From 028f9a076b484a358c47daac05102cde50ec7cf6 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 12 Dec 2022 19:26:34 -0800 Subject: [PATCH 0195/1346] CI CodeQL: Filter Third Party (#3563) Filter out third party warnings, i.e., from AMReX, PICSAR, and openPMD. --- .github/codeql/warpx-codeql.yml | 6 ++++++ .github/workflows/codeql.yml | 21 +++++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 .github/codeql/warpx-codeql.yml diff --git a/.github/codeql/warpx-codeql.yml b/.github/codeql/warpx-codeql.yml new file mode 100644 index 00000000000..c75a5d0ea54 --- /dev/null +++ b/.github/codeql/warpx-codeql.yml @@ -0,0 +1,6 @@ +name: "WarpX CodeQL config" + +# ignore AMReX, pyAMReX, PICSAR, openPMD et al. +# note: not yet suppored, thus doing post-analysis SARIF filtering +paths-ignore: + - build/_deps diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 83fbdb6dd19..53fd59e73bd 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -49,6 +49,7 @@ jobs: - name: Initialize CodeQL uses: github/codeql-action/init@v2 with: + config-file: ./.github/codeql/warpx-codeql.yml languages: ${{ matrix.language }} queries: +security-and-quality @@ -65,3 +66,23 @@ jobs: uses: github/codeql-action/analyze@v2 with: category: "/language:${{ matrix.language }}" + upload: False + output: sarif-results + + - name: filter-sarif + uses: advanced-security/filter-sarif@v1 + with: + patterns: | + -build/_deps/*/* + -build/_deps/*/*/* + -build/_deps/*/*/*/* + -build/_deps/*/*/*/*/* + -build/_deps/*/*/*/*/*/* + -build/_deps/*/*/*/*/*/*/* + input: sarif-results/${{ matrix.language }}.sarif + output: sarif-results/${{ matrix.language }}.sarif + + - name: Upload SARIF + uses: github/codeql-action/upload-sarif@v2 + with: + sarif_file: sarif-results/${{ matrix.language }}.sarif From 269c2c5af3bee9ffcf43a9b172a465b5a65c593b Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 12 Dec 2022 21:12:25 -0800 Subject: [PATCH 0196/1346] CodeQL Filter: One Level Deeper --- .github/workflows/codeql.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 53fd59e73bd..ee436dc9c4b 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -79,6 +79,7 @@ jobs: -build/_deps/*/*/*/*/* -build/_deps/*/*/*/*/*/* -build/_deps/*/*/*/*/*/*/* + -build/_deps/*/*/*/*/*/*/*/* input: sarif-results/${{ matrix.language }}.sarif output: sarif-results/${{ matrix.language }}.sarif From ad0dd5c34047df4eb1ca24f136463968fe218829 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 13 Dec 2022 10:23:39 -0800 Subject: [PATCH 0197/1346] Release 22.12 (#3565) * AMReX: 22.12 * PICSAR: 22.12 * WarpX: 22.12 --- .github/workflows/cuda.yml | 2 +- CMakeLists.txt | 2 +- Docs/source/conf.py | 4 ++-- LICENSE.txt | 2 +- Python/setup.py | 2 +- Regression/WarpX-GPU-tests.ini | 2 +- Regression/WarpX-tests.ini | 2 +- cmake/dependencies/AMReX.cmake | 4 ++-- cmake/dependencies/PICSAR.cmake | 4 ++-- run_test.sh | 2 +- setup.py | 2 +- 11 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 5a06bf79395..fadc7853a9e 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -110,7 +110,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd amrex && git checkout --detach 4d6413c45fa0e1aa6f366a02d75a9e2382c73850 && cd - + cd amrex && git checkout --detach 22.12 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_PSATD=TRUE USE_CCACHE=TRUE -j 2 build_nvhpc21-11-nvcc: diff --git a/CMakeLists.txt b/CMakeLists.txt index 3633ee43321..834cb221450 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ # Preamble #################################################################### # cmake_minimum_required(VERSION 3.20.0) -project(WarpX VERSION 22.11) +project(WarpX VERSION 22.12) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) diff --git a/Docs/source/conf.py b/Docs/source/conf.py index a26c8f4db46..775b86355e8 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -73,9 +73,9 @@ # built documents. # # The short X.Y version. -version = u'22.11' +version = u'22.12' # The full version, including alpha/beta/rc tags. -release = u'22.11' +release = u'22.12' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/LICENSE.txt b/LICENSE.txt index 16647b93187..61fbae2ffab 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -WarpX v22.11 Copyright (c) 2018-2022, The Regents of the University of California, through Lawrence Berkeley National Laboratory, and Lawrence Livermore National Security, LLC, for the operation of Lawrence Livermore National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. +WarpX v22.12 Copyright (c) 2018-2022, The Regents of the University of California, through Lawrence Berkeley National Laboratory, and Lawrence Livermore National Security, LLC, for the operation of Lawrence Livermore National Laboratory (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/Python/setup.py b/Python/setup.py index 57fca38ad72..5f631c0f717 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -54,7 +54,7 @@ package_data = {} setup(name = 'pywarpx', - version = '22.11', + version = '22.12', packages = ['pywarpx'], package_dir = {'pywarpx': 'pywarpx'}, description = """Wrapper of WarpX""", diff --git a/Regression/WarpX-GPU-tests.ini b/Regression/WarpX-GPU-tests.ini index 40525890d30..98355a120f4 100644 --- a/Regression/WarpX-GPU-tests.ini +++ b/Regression/WarpX-GPU-tests.ini @@ -60,7 +60,7 @@ emailBody = Check https://ccse.lbl.gov/pub/GpuRegressionTesting/WarpX/ for more [AMReX] dir = /home/regtester/git/amrex/ -branch = 4d6413c45fa0e1aa6f366a02d75a9e2382c73850 +branch = 22.12 [source] dir = /home/regtester/git/WarpX diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index 57930ea44ba..b0b6b4d2eb3 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -59,7 +59,7 @@ emailBody = Check https://ccse.lbl.gov/pub/RegressionTesting/WarpX/ for more det [AMReX] dir = /home/regtester/AMReX_RegTesting/amrex/ -branch = 4d6413c45fa0e1aa6f366a02d75a9e2382c73850 +branch = 22.12 [source] dir = /home/regtester/AMReX_RegTesting/warpx diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index dfe5b27d175..096b26acbb2 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -226,7 +226,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 22.11 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_DIM} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} TINYP LSOLVERS) + find_package(AMReX 22.12 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_DIM} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} TINYP LSOLVERS) message(STATUS "AMReX: Found version '${AMReX_VERSION}'") endif() endmacro() @@ -240,7 +240,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "4d6413c45fa0e1aa6f366a02d75a9e2382c73850" +set(WarpX_amrex_branch "22.12" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/PICSAR.cmake b/cmake/dependencies/PICSAR.cmake index 600f5c128cb..3f1bb0f8968 100644 --- a/cmake/dependencies/PICSAR.cmake +++ b/cmake/dependencies/PICSAR.cmake @@ -82,7 +82,7 @@ function(find_picsar) #message(STATUS "PICSAR: Using version '${PICSAR_VERSION}'") else() # not supported by PICSAR (yet) - #find_package(PICSAR 22.11 CONFIG REQUIRED QED) + #find_package(PICSAR 22.12 CONFIG REQUIRED QED) #message(STATUS "PICSAR: Found version '${PICSAR_VERSION}'") message(FATAL_ERROR "PICSAR: Cannot be used as externally installed " "library yet. " @@ -103,7 +103,7 @@ if(WarpX_QED) set(WarpX_picsar_repo "https://github.com/ECP-WarpX/picsar.git" CACHE STRING "Repository URI to pull and build PICSAR from if(WarpX_picsar_internal)") - set(WarpX_picsar_branch "4252e567089fce30d2a3a82d78998e8d3d8220c2" + set(WarpX_picsar_branch "006f65f332681f13de47c16fbd41c11f1e346764" CACHE STRING "Repository branch for WarpX_picsar_repo if(WarpX_picsar_internal)") diff --git a/run_test.sh b/run_test.sh index ff31025e6ae..b64d7ae5c01 100755 --- a/run_test.sh +++ b/run_test.sh @@ -71,7 +71,7 @@ python3 -m pip install --upgrade -r warpx/Regression/requirements.txt # Clone AMReX and warpx-data git clone https://github.com/AMReX-Codes/amrex.git -cd amrex && git checkout --detach 4d6413c45fa0e1aa6f366a02d75a9e2382c73850 && cd - +cd amrex && git checkout --detach 22.12 && cd - # warpx-data contains various required data sets git clone --depth 1 https://github.com/ECP-WarpX/warpx-data.git diff --git a/setup.py b/setup.py index 54959d9eda5..a5272377faa 100644 --- a/setup.py +++ b/setup.py @@ -272,7 +272,7 @@ def build_extension(self, ext): setup( name='pywarpx', # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version = '22.11', + version = '22.12', packages = ['pywarpx'], package_dir = {'pywarpx': 'Python/pywarpx'}, author='Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.', From 6e2fec794e708b5b1b218c7a35115f60fcffcb12 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 13 Dec 2022 13:55:47 -0800 Subject: [PATCH 0198/1346] Typo: Fewer Ranks (Perf. Hint) (#3569) * Typo: Fewer Ranks (Perf. Hint) * fix warning message (space) Co-authored-by: lucafedeli88 --- Source/Initialization/WarpXInitData.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 32cd11dfd3c..2c93c163083 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1043,8 +1043,8 @@ WarpX::PerformanceHints () << "each GPU's memory sufficiently. If you do not rely on dynamic " << "load-balancing, then one large box per GPU is ideal.\n" #endif - << "Consider decreasing the amr.blocking_factor and" - << "amr.max_grid_size parameters and/or using less MPI ranks.\n" + << "Consider decreasing the amr.blocking_factor and " + << "amr.max_grid_size parameters and/or using fewer MPI ranks.\n" << " More information:\n" << " https://warpx.readthedocs.io/en/latest/usage/workflows/parallelization.html\n"; From b1fef21cfee3a8a8f9fd668f477af7dfacb6c14c Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 13 Dec 2022 15:35:12 -0800 Subject: [PATCH 0199/1346] Fix Syntax in .zenodo.json follow-up to last commit updating contributors --- .zenodo.json | 1 + 1 file changed, 1 insertion(+) diff --git a/.zenodo.json b/.zenodo.json index 7d5505c9849..1b3e152a39e 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -181,6 +181,7 @@ ], "contributors": [ { + "type": "Other", "affiliation": "Lawrence Berkeley National Laboratory", "name": "Weber, Gunther H.", "orcid": "0000-0002-1794-1398" From 9782feb22784b6f79a773c66e20c890aacf7ac1d Mon Sep 17 00:00:00 2001 From: David Grote Date: Thu, 15 Dec 2022 15:42:00 -0800 Subject: [PATCH 0200/1346] Allow surface flux injection from the domain boundary (#3554) * Fix to AddPlasmaFlux to allow flux surface to be on the domain boundary * Update FluxInjection3D.json since the particle positions changed * Update comments in AddPlasmaFlux --- .../analysis_flux_injection_3d.py | 10 +++++++--- Examples/Tests/flux_injection/inputs_3d | 6 +++--- .../benchmarks_json/FluxInjection3D.json | 8 ++++---- Source/Initialization/InjectorPosition.H | 20 ++++++++++++++++++- .../Particles/PhysicalParticleContainer.cpp | 17 +++++++++------- Source/Utils/ParticleUtils.H | 16 +++++++++++++++ 6 files changed, 59 insertions(+), 18 deletions(-) diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_3d.py b/Examples/Tests/flux_injection/analysis_flux_injection_3d.py index 804cf95eb48..048ef70f9cc 100755 --- a/Examples/Tests/flux_injection/analysis_flux_injection_3d.py +++ b/Examples/Tests/flux_injection/analysis_flux_injection_3d.py @@ -57,8 +57,12 @@ def gaussian_dist(u, u_th): return 1./((2*np.pi)**.5*u_th) * np.exp(-u**2/(2*u_th**2) ) def gaussian_flux_dist(u, u_th, u_m): - normalization_factor = u_th**2 * np.exp(-u_m**2/(2*u_th**2)) + (np.pi/2)**.5*u_m*u_th * (1 + erf(u_m/(2**.5*u_th))) - return 1./normalization_factor * np.where( u>0, u * np.exp(-(u-u_m)**2/(2*u_th**2)), 0 ) + au_m = np.abs(u_m) + normalization_factor = u_th**2 * np.exp(-au_m**2/(2*u_th**2)) + (np.pi/2)**.5*au_m*u_th * (1 + erf(au_m/(2**.5*u_th))) + result = 1./normalization_factor * np.where( u>0, u * np.exp(-(u-au_m)**2/(2*u_th**2)), 0 ) + if u_m < 0.: + result = result[::-1] + return result def compare_gaussian(u, w, u_th, label=''): du = (hist_range[1]-hist_range[0])/hist_bins @@ -103,7 +107,7 @@ def compare_gaussian_flux(u, w, u_th, u_m, label=''): uz = ad['proton','particle_momentum_z'].to_ndarray()/(m_p*c) w = ad['proton', 'particle_weight'].to_ndarray() -compare_gaussian_flux(ux, w, u_th=0.1, u_m=0.05, label='u_x') +compare_gaussian_flux(ux, w, u_th=0.1, u_m=-0.05, label='u_x') compare_gaussian(uy, w, u_th=0.1, label='u_y') compare_gaussian(uz, w, u_th=0.1, label='u_z') plt.legend(loc=0) diff --git a/Examples/Tests/flux_injection/inputs_3d b/Examples/Tests/flux_injection/inputs_3d index f7cb807160c..e3de9dc3660 100644 --- a/Examples/Tests/flux_injection/inputs_3d +++ b/Examples/Tests/flux_injection/inputs_3d @@ -35,7 +35,7 @@ electron.charge = -q_e electron.mass = m_e electron.injection_style = NFluxPerCell electron.num_particles_per_cell = 100 -electron.surface_flux_pos = -1. +electron.surface_flux_pos = -4. electron.flux_normal_axis = y electron.flux_direction = +1 electron.profile = constant @@ -50,9 +50,9 @@ proton.charge = +q_e proton.mass = m_p proton.injection_style = NFluxPerCell proton.num_particles_per_cell = 100 -proton.surface_flux_pos = 1. +proton.surface_flux_pos = 4. proton.flux_normal_axis = x -proton.flux_direction = +1 +proton.flux_direction = -1 proton.profile = constant proton.density = 1. proton.momentum_distribution_type = gaussianflux diff --git a/Regression/Checksum/benchmarks_json/FluxInjection3D.json b/Regression/Checksum/benchmarks_json/FluxInjection3D.json index 953fdb64405..b2b3733737e 100644 --- a/Regression/Checksum/benchmarks_json/FluxInjection3D.json +++ b/Regression/Checksum/benchmarks_json/FluxInjection3D.json @@ -2,9 +2,9 @@ "electron": { "particle_momentum_x": 1.1192116199394354e-18, "particle_momentum_y": 2.238114590066897e-18, - "particle_momentum_z": 1.1156457989239728e-18, + "particle_momentum_z": 1.1156457989239732e-18, "particle_position_x": 102495.14197173176, - "particle_position_y": 34752.73800291744, + "particle_position_y": 188132.22608016344, "particle_position_z": 102423.13701045913, "particle_weight": 8.959999999999998e-07 }, @@ -13,9 +13,9 @@ "particle_momentum_x": 3.835423016604918e-15, "particle_momentum_y": 2.0468371931479925e-15, "particle_momentum_z": 2.055186547721331e-15, - "particle_position_x": 66743.84539580689, + "particle_position_x": 189256.1546041931, "particle_position_y": 102293.00576740496, "particle_position_z": 102314.93877691089, "particle_weight": 8.959999999999998e-07 } -} \ No newline at end of file +} diff --git a/Source/Initialization/InjectorPosition.H b/Source/Initialization/InjectorPosition.H index 0595909795c..012ea0ffc19 100644 --- a/Source/Initialization/InjectorPosition.H +++ b/Source/Initialization/InjectorPosition.H @@ -175,7 +175,11 @@ struct InjectorPosition }; } - // bool: whether position specified is within bounds. + /* \brief Flags whether the point (x, y, z) is inside the plasma region + * or on the lower boundary + * \param x, y, z the point to check + * \returns bool flag + */ AMREX_GPU_HOST_DEVICE bool insideBounds (amrex::Real x, amrex::Real y, amrex::Real z) const noexcept @@ -185,6 +189,20 @@ struct InjectorPosition z < zmax and z >= zmin); } + /* \brief Flags whether the point (x, y, z) is inside the plasma region + * or on the lower or upper boundary + * \param x, y, z the point to check + * \returns bool flag + */ + AMREX_GPU_HOST_DEVICE + bool + insideBoundsInclusive (amrex::Real x, amrex::Real y, amrex::Real z) const noexcept + { + return (x <= xmax and x >= xmin and + y <= ymax and y >= ymin and + z <= zmax and z >= zmin); + } + // bool: whether the region defined by lo and hi overaps with the plasma region AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index cbb0a693203..18cf1716295 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -32,6 +32,7 @@ #include "Particles/SpeciesPhysicalProperties.H" #include "Particles/WarpXParticleContainer.H" #include "Utils/Parser/ParserUtils.H" +#include "Utils/ParticleUtils.H" #include "Utils/Physics/IonizationEnergiesTable.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" @@ -1682,29 +1683,31 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) pu.y *= PhysConst::c; pu.z *= PhysConst::c; + // The containsInclusive is used to allow the case of the flux surface + // being on the boundary of the domain. After the UpdatePosition below, + // the particles will be within the domain. #if defined(WARPX_DIM_3D) - if (!tile_realbox.contains(XDim3{ppos.x,ppos.y,ppos.z})) { + if (!ParticleUtils::containsInclusive(tile_realbox, XDim3{ppos.x,ppos.y,ppos.z})) { p.id() = -1; continue; } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) amrex::ignore_unused(k); - if (!tile_realbox.contains(XDim3{ppos.x,ppos.z,0.0_prt})) { + if (!ParticleUtils::containsInclusive(tile_realbox, XDim3{ppos.x,ppos.z,0.0_prt})) { p.id() = -1; continue; } #else amrex::ignore_unused(j,k); - if (!tile_realbox.contains(XDim3{ppos.z,0.0_prt,0.0_prt})) { + if (!ParticleUtils::containsInclusive(tile_realbox, XDim3{ppos.z,0.0_prt,0.0_prt})) { p.id() = -1; continue; } #endif // Lab-frame simulation - // If the particle is not within the species's - // xmin, xmax, ymin, ymax, zmin, zmax, go to - // the next generated particle. - if (!inj_pos->insideBounds(ppos.x, ppos.y, ppos.z)) { + // If the particle's initial position is not within or on the species's + // xmin, xmax, ymin, ymax, zmin, zmax, go to the next generated particle. + if (!inj_pos->insideBoundsInclusive(ppos.x, ppos.y, ppos.z)) { p.id() = -1; continue; } diff --git a/Source/Utils/ParticleUtils.H b/Source/Utils/ParticleUtils.H index b9d8aa0ecc6..a997412f691 100644 --- a/Source/Utils/ParticleUtils.H +++ b/Source/Utils/ParticleUtils.H @@ -169,6 +169,22 @@ namespace ParticleUtils { uy = y * vp; uz = z * vp; } + + /* \brief Determines whether the point is within the tilebox, inclusive of the boundaries. + * Note that this routine is needed since tilebox.contains excludes the boundaries. + * \param[in] tilebox The tilebox being checked + * \param[in] point The point being checked + * \result true if the point with within the boundary, otherwise false + */ + AMREX_GPU_HOST_DEVICE AMREX_INLINE + bool containsInclusive (amrex::RealBox const& tilebox, amrex::XDim3 const point) { + const auto xlo = tilebox.lo(); + const auto xhi = tilebox.hi(); + return AMREX_D_TERM((xlo[0] <= point.x) && (point.x <= xhi[0]), + && (xlo[1] <= point.y) && (point.y <= xhi[1]), + && (xlo[2] <= point.z) && (point.z <= xhi[2])); + } + } #endif // WARPX_PARTICLE_UTILS_H_ From cab21ead1b5140e9f8a9374fa0991fd07d444ae0 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Thu, 15 Dec 2022 16:58:36 -0800 Subject: [PATCH 0201/1346] Refactor update of auxiliary data for electrostatic solver (#3573) * Refactor update of auxiliary data for electrostatic solver * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Source/Evolve/WarpXEvolve.cpp | 8 ++++---- Source/FieldSolver/ElectrostaticSolver.cpp | 6 ------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 8abfa0e7f9b..ed70cec31a4 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -125,9 +125,9 @@ WarpX::Evolve (int numsteps) // Not called at each iteration, so exchange all guard cells FillBoundaryE(guard_cells.ng_alloc_EB); FillBoundaryB(guard_cells.ng_alloc_EB); - UpdateAuxilaryData(); - FillBoundaryAux(guard_cells.ng_UpdateAux); } + UpdateAuxilaryData(); + FillBoundaryAux(guard_cells.ng_UpdateAux); // on first step, push p by -0.5*dt for (int lev = 0; lev <= finest_level; ++lev) { @@ -154,9 +154,9 @@ WarpX::Evolve (int numsteps) // TODO Remove call to FillBoundaryAux before UpdateAuxilaryData? if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) FillBoundaryAux(guard_cells.ng_UpdateAux); - UpdateAuxilaryData(); - FillBoundaryAux(guard_cells.ng_UpdateAux); } + UpdateAuxilaryData(); + FillBoundaryAux(guard_cells.ng_UpdateAux); } // Run multi-physics modules: diff --git a/Source/FieldSolver/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolver.cpp index c227506f6bd..b352bacfa24 100644 --- a/Source/FieldSolver/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolver.cpp @@ -89,12 +89,6 @@ WarpX::ComputeSpaceChargeField (bool const reset_fields) AddBoundaryField(); } } - // Transfer fields from 'fp' array to 'aux' array. - // This is needed when using momentum conservation - // since they are different arrays in that case. - UpdateAuxilaryData(); - FillBoundaryAux(guard_cells.ng_UpdateAux); - } /* Compute the potential `phi` by solving the Poisson equation with the From 1362124f1673fcca76b3ad4bed9df484aa11f57d Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Fri, 16 Dec 2022 09:37:18 -0800 Subject: [PATCH 0202/1346] Docs: add ``photo_species`` input param and fix typo (#3575) * Docs: add photo_species input param and fix typo This PR makes the following changes to the _Input Parameters_ page of the documentation: - Adds the input parameter ``particles.photon_species`` to the Particle Initialization section. - Fixes a typo in the section _Lookup tables and other settings for QED modules_ under the bullet point about ``qed_qs.lookup_table_mode`` (from ``qed_bw.save_table_in`` to ``qed_qs.save_table_in``) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index db7e5101cef..bb82c28b5b3 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -546,6 +546,10 @@ Particle initialization The name of each species. This is then used in the rest of the input deck ; in this documentation we use `` as a placeholder. +* ``particles.photon_species`` (`strings`, separated by spaces) + List of species that are photon species, if any. + **This is required when compiling with QED=TRUE.** + * ``particles.use_fdtd_nci_corr`` (`0` or `1`) optional (default `0`) Whether to activate the FDTD Numerical Cherenkov Instability corrector. Not currently available in the RZ configuration. @@ -2637,7 +2641,7 @@ Lookup tables store pre-computed values for functions used by the QED modules. * ``qed_qs.tab_em_frac_min`` (`float`): minimum value to be considered for the second axis of lookup table 2 - * ``qed_bw.save_table_in`` (`string`): where to save the lookup table + * ``qed_qs.save_table_in`` (`string`): where to save the lookup table * ``load``: a lookup table is loaded from a pre-generated binary file. The following parameter must be specified: From e26f3e7ed7438e8e3e53a9f01c92bf18757fa345 Mon Sep 17 00:00:00 2001 From: Andrew Myers Date: Fri, 16 Dec 2022 15:36:23 -0800 Subject: [PATCH 0203/1346] Add a new section on domain decomposition to the docs. (#3572) * Add a new section on domain decomposition to the docs. * Remove old parallelization section and migrate more info --- Docs/source/index.rst | 1 + Docs/source/usage/domain_decomposition.rst | 84 ++++++++++++++++++ Docs/source/usage/parameters.rst | 2 +- Docs/source/usage/workflows.rst | 1 - .../usage/workflows/parallelization.rst | 88 ------------------- 5 files changed, 86 insertions(+), 90 deletions(-) create mode 100644 Docs/source/usage/domain_decomposition.rst delete mode 100644 Docs/source/usage/workflows/parallelization.rst diff --git a/Docs/source/index.rst b/Docs/source/index.rst index d34bd6788de..c1755f44f4e 100644 --- a/Docs/source/index.rst +++ b/Docs/source/index.rst @@ -75,6 +75,7 @@ Usage :hidden: usage/how_to_run + usage/domain_decomposition usage/parameters usage/python usage/examples diff --git a/Docs/source/usage/domain_decomposition.rst b/Docs/source/usage/domain_decomposition.rst new file mode 100644 index 00000000000..d3df5b78f5c --- /dev/null +++ b/Docs/source/usage/domain_decomposition.rst @@ -0,0 +1,84 @@ +.. _usage_domain_decomposition: + +Domain Decomposition +==================== + +WarpX relies on a spatial domain decomposition for MPI parallelization. It provides two different ways for users to specify this decomposition, a `simple` way recommended for most users, and a `flexible` way recommended if more control is desired. The `flexible` method is required for dynamic load balancing to be useful. + +1. Simple Method +---------------- + +The first and simplest method is to provide the ``warpx.numprocs = nx ny nz`` parameter, either at the command line or somewhere in your inputs deck. In this case, WarpX will split up the overall problem domain into exactly the specified number of subdomains, or `Boxes `__ in the AMReX terminology, with the data defined on each `Box` having its own guard cells. The product of ``nx, ny, and nz`` must be exactly the desired number of MPI ranks. Note that, because there is exactly one `Box` per MPI rank when run this way, dynamic load balancing will not be possible, as there is no way of shifting `Boxes` around to achieve a more even load. This is the approach recommended for new users as it is the easiest to use. + +.. note:: + + If ``warpx.numprocs`` is *not* specified, WarpX will fall back on using the ``amr.max_grid_size`` and ``amr.blocking_factor`` parameters, described below. + +2. More General Method +---------------------- + +The second way of specifying the domain decomposition provides greater flexibility and enables dynamic load balancing, but is not as easy to use. In this method, the user specifies inputs parameters ``amr.max_grid_size`` and ``amr.blocking_factor``, which can be thought of as the maximum and minimum allowed `Box` sizes. Now, the overall problem domain (specified by the ``amr.ncell`` input parameter) will be broken up into some number of `Boxes` with the specified characteristics. By default, WarpX will make the `Boxes` as big as possible given the constraints. + +For example, if ``amr.ncell = 768 768 768``, ``amr.max_grid_size = 128``, and ``amr.blocking_factor = 32``, then AMReX will make 6 `Boxes` in each direction, for a total of 216 (the ``amr.blocking_factor`` does not factor in yet; however, see the section on mesh refinement below). If this problem is then run on 54 MPI ranks, there will be 4 boxes per rank initially. This problem could be run on as many as 216 ranks without performing any splitting. + +.. note:: + + Both ``amr.ncell`` and ``amr.max_grid_size`` must be divisible by ``amr.blocking_factor``, in each direction. + +When WarpX is run using this approach to domain decomposition, the number of MPI ranks does not need to be exactly equal to the number of ``Boxes``. Note also that if you run WarpX with more MPI ranks than there are boxes on the base level, WarpX will attempt to split the available ``Boxes`` until there is at least one for each rank to work on; this may cause it violate the constraints of ``amr.max_grid_size`` and ``amr.blocking_factor``. + +.. note:: + + The AMReX documentation on `Grid Creation `__ may also be helpful. + +You can also specify a separate `max_grid_size` and `blocking_factor` for each direction, using the parameters ``amr.max_grid_size_x``, ``amr.max_grid_size_y``, etc... . This allows you to request, for example, a "pencil" type domain decomposition that is long in one direction. Note that, in RZ geometry, the parameters corresponding to the longitudinal direction are ``amr.max_grid_size_y`` and ``amr.blocking_factor_y``. + +3. Performance Considerations +----------------------------- + +In terms of performance, in general there is a trade off. Having many small boxes provides flexibility in terms of load balancing; however, the cost is increased time spent in communication due to surface-to-volume effects and increased kernel launch overhead when running on the GPUs. The ideal number of boxes per rank depends on how important dynamic load balancing is on your problem. If your problem is intrinsically well-balanced, like in a uniform plasma, then having a few, large boxes is best. But, if the problem is non-uniform and achieving a good load balance is critical for performance, having more, smaller `Boxes` can be worth it. In general, we find that running with something in the range of 4-8 `Boxes` per process is a good compromise for most problems. + +.. note:: + + For specific information on the dynamic load balancer used in WarpX, visit the + `Load Balancing `__ + page on the AMReX documentation. + +The best values for these parameters can also depend strongly on a number of +numerical parameters: + +* Algorithms used (Maxwell/spectral field solver, filters, order of the + particle shape factor) + +* Number of guard cells (that depends on the particle shape factor and + the type and order of the Maxwell solver, the filters used, `etc.`) + +* Number of particles per cell, and the number of species + +and the details of the on-node parallelization and computer architecture used for the run: + +* GPU or CPU + +* Number of OpenMP threads + +* Amount of high-bandwidth memory. + +Because these parameters put additional constraints on the domain size for a +simulation, it can be cumbersome to calculate the number of cells and the +physical size of the computational domain for a given resolution. This +:download:`Python script <../../../Tools/DevUtils/compute_domain.py>` does it +automatically. + +When using the RZ spectral solver, the values of ``amr.max_grid_size`` and ``amr.blocking_factor`` are constrained since the solver +requires that the full radial extent be within a each block. +For the radial values, any input is ignored and the max grid size and blocking factor are both set equal to the number of radial cells. +For the longitudinal values, the blocking factor has a minimum size of 8, allowing the computational domain of each block to be large enough relative to the guard cells for reasonable performance, but the max grid size and blocking factor must also be small enough so that there will be at least one block per processor. +If max grid size and/or blocking factor are too large, they will be silently reduced as needed. +If there are too many processors so that there is not enough blocks for the number processors, WarpX will abort. + +4. Mesh Refinement +------------------ + +With mesh refinement, the above picture is more complicated, as in general the number of boxes can not be predicted at the start of the simulation. The decomposition of the base level will proceed as outlined above. The refined region, however, will be covered by some number of Boxes whose sizes are consistent with ``amr.max_grid_size`` and ``amr.blocking_factor``. With mesh refinement, the blocking factor is important, as WarpX may decide to use `Boxes` smaller than ``amr.max_grid_size`` so as not to over-refine outside of the requested area. Note that you can specify a vector of values to make these parameters vary by level. For example, ``amr.max_grid_size = 128 64`` will make the max grid size be 128 on level 0 and 64 on level 1. + +In general, the above performance considerations apply - varying these values such that there are 4-8 Boxes per rank on each level is a good guideline. diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index bb82c28b5b3..36855a52dde 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2067,7 +2067,7 @@ In-situ capabilities can be used by turning on Sensei or Ascent (provided they a Reduce size of the field output by this ratio in each dimension. (This is done by averaging the field over 1 or 2 points along each direction, depending on the staggering). If ``blocking_factor`` and ``max_grid_size`` are used for the domain decomposition, as detailed in - the :ref:`parallelization ` section, ``coarsening_ratio`` should be an integer + the :ref:`domain decomposition ` section, ``coarsening_ratio`` should be an integer divisor of ``blocking_factor``. If ``warpx.numprocs`` is used instead, the total number of cells in a given dimension must be a multiple of the ``coarsening_ratio`` multiplied by ``numprocs`` in that dimension. diff --git a/Docs/source/usage/workflows.rst b/Docs/source/usage/workflows.rst index 74999c9df13..6fefcd9a243 100644 --- a/Docs/source/usage/workflows.rst +++ b/Docs/source/usage/workflows.rst @@ -8,7 +8,6 @@ This section collects typical user workflows and best practices for WarpX. .. toctree:: :maxdepth: 2 - workflows/parallelization workflows/debugging workflows/libensemble workflows/plot_timestep_duration diff --git a/Docs/source/usage/workflows/parallelization.rst b/Docs/source/usage/workflows/parallelization.rst deleted file mode 100644 index baef9385855..00000000000 --- a/Docs/source/usage/workflows/parallelization.rst +++ /dev/null @@ -1,88 +0,0 @@ -.. _parallelization_warpx: - -Parallelization in WarpX -======================== - -When running a simulation, the domain is split into independent -rectangular sub-domains (called **grids**). This is the way AMReX, a core -component of WarpX, handles parallelization and/or mesh refinement. Furthermore, -this decomposition makes load balancing possible: each MPI rank typically computes -a few grids, and a rank with a lot of work can transfer one or several **grids** -to their neighbors. - -A user -does not specify this decomposition explicitly. Instead, the user gives hints to -the code, and the actual decomposition is determined at runtime, depending on -the parallelization. The main user-defined parameters are -``amr.max_grid_size`` and ``amr.blocking_factor``. - -AMReX ``max_grid_size`` and ``blocking_factor`` ------------------------------------------------ - -* ``amr.max_grid_size`` is the maximum number of cells per **grid** along each - direction (default ``amr.max_grid_size=32`` in 3D). - -* ``amr.blocking_factor``: is the minimum number of cells per **grid** along each - direction (default ``amr.blocking_factor=8``). - Note that both the domain (at each level) and ``max_grid_size`` must be divisible by ``blocking_factor``. - - .. note:: - - You can use the parameters above if you want the same number of cells in all directions. - Or you can set ``amr.max_grid_size_x``, ``amr.max_grid_size_y`` and ``amr.max_grid_size_z``; -  ``amr.blocking_factor_x``, ``amr.blocking_factor_y`` and ``amr.blocking_factor_z`` to different numbers of cells. - Note that, in RZ geometry, the parameters corresponding to the longitudinal direction are ``amr.max_grid_size_y`` and ``amr.blocking_factor_y``. - -The total number of **grids** is determined using those two restrictions and the number of -ranks used to run the simulation. You can visit `AMReX `_ -documentation for more information on the two parameters. - -These parameters can have a dramatic impact on the code performance. Each -**grid** in the decomposition is surrounded by guard cells, thus increasing the -amount of data, computation and communication. Hence having a too small -``max_grid_size``, may ruin the code performance. - -On the other hand, a too-large ``max_grid_size`` is likely to result in a single -grid per MPI rank, thus preventing load balancing. By setting these two -parameters, the user wants to give some flexibility to the code while avoiding -pathological behaviors. - -For more information on this decomposition, see the -`Gridding and Load Balancing `__ -page on AMReX documentation. - -For specific information on the dynamic load balancer used in WarpX, visit the -`Load Balancing `__ -page on the AMReX documentation. - -The best values for these parameters strongly depends on a number of parameters, -among which numerical parameters: - -* Algorithms used (Maxwell/spectral field solver, filters, order of the - particle shape factor) - -* Number of guard cells (that depends on the particle shape factor and - the type and order of the Maxwell solver, the filters used, `etc.`) - -* Number of particles per cell, and the number of species - -and MPI decomposition and computer architecture used for the run: - -* GPU or CPU - -* Number of OpenMP threads - -* Amount of high-bandwidth memory. - -Because these parameters put additional constraints on the domain size for a -simulation, it can be cumbersome to calculate the number of cells and the -physical size of the computational domain for a given resolution. This -:download:`Python script <../../../../Tools/DevUtils/compute_domain.py>` does it -automatically. - -When using the RZ spectral solver, the values of ``amr.max_grid_size`` and ``amr.blocking_factor`` are constrained since the solver -requires that the full radial extent be within a each block. -For the radial values, any input is ignored and the max grid size and blocking factor are both set equal to the number of radial cells. -For the longitudinal values, the blocking factor has a minimum size of 8, allowing the computational domain of each block to be large enough relative to the guard cells for reasonable performance, but the max grid size and blocking factor must also be small enough so that there will be at least one block per processor. -If max grid size and/or blocking factor are too large, they will be silently reduced as needed. -If there are too many processors so that there is not enough blocks for the number processors, WarpX will abort. From 4913d07348eeed38263745a47ffe3a9f9a680076 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 19 Dec 2022 13:12:52 +0100 Subject: [PATCH 0204/1346] Docs: rst Formatting Issue in GB Paper --- Docs/source/highlights.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index c18b91cf1ba..97fde2405ce 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -15,9 +15,9 @@ Plasma-Based Acceleration Scientific works in laser-plasma and beam-plasma acceleration. #. Fedeli L, Huebl A, Boillod-Cerneux F, Clark T, Gott K, Hillairet C, Jaure S, Leblanc A, Lehe R, Myers A, Piechurski C, Sato M, Zaim N, Zhang W, Vay J-L, Vincenti H. - **Pushing the Frontier in the Design of Laser-Based Electron Accelerators with Groundbreaking Mesh-Refined Particle-In-Cell Simulations on Exascale-Class Supercomputers**. - *SC22: International Conference for High Performance Computing, Networking, Storage and Analysis (SC)*. ISSN:2167-4337, pp. 25-36, Dallas, TX, US, 2022. - https://www.computer.org/csdl/proceedings-article/sc/2022/544400a025/1I0bSKaoECc + **Pushing the Frontier in the Design of Laser-Based Electron Accelerators with Groundbreaking Mesh-Refined Particle-In-Cell Simulations on Exascale-Class Supercomputers**. + *SC22: International Conference for High Performance Computing, Networking, Storage and Analysis (SC)*. ISSN:2167-4337, pp. 25-36, Dallas, TX, US, 2022. + https://www.computer.org/csdl/proceedings-article/sc/2022/544400a025/1I0bSKaoECc #. Wang J, Zeng M, Li D, Wang X, Lu W, Gao J. **Injection induced by coaxial laser interference in laser wakefield accelerators**. @@ -41,9 +41,9 @@ Laser-Plasma Interaction Scientific works in laser-ion acceleration and laser-matter interaction. #. Fedeli L, Huebl A, Boillod-Cerneux F, Clark T, Gott K, Hillairet C, Jaure S, Leblanc A, Lehe R, Myers A, Piechurski C, Sato M, Zaim N, Zhang W, Vay J-L, Vincenti H. - **Pushing the Frontier in the Design of Laser-Based Electron Accelerators with Groundbreaking Mesh-Refined Particle-In-Cell Simulations on Exascale-Class Supercomputers**. - *SC22: International Conference for High Performance Computing, Networking, Storage and Analysis (SC)*. ISSN:2167-4337, pp. 25-36, Dallas, TX, US, 2022. - https://www.computer.org/csdl/proceedings-article/sc/2022/544400a025/1I0bSKaoECc + **Pushing the Frontier in the Design of Laser-Based Electron Accelerators with Groundbreaking Mesh-Refined Particle-In-Cell Simulations on Exascale-Class Supercomputers**. + *SC22: International Conference for High Performance Computing, Networking, Storage and Analysis (SC)*. ISSN:2167-4337, pp. 25-36, Dallas, TX, US, 2022. + https://www.computer.org/csdl/proceedings-article/sc/2022/544400a025/1I0bSKaoECc #. Hakimi S, Obst-Huebl L, Huebl A, Nakamura K, Bulanov SS, Steinke S, Leemans WP, Kober Z, Ostermayr TM, Schenkel T, Gonsalves AJ, Vay J-L, Tilborg Jv, Toth C, Schroeder CB, Esarey E, Geddes CGR. **Laser-solid interaction studies enabled by the new capabilities of the iP2 BELLA PW beamline**. From c3eb6ea1efeff3910e8520d94e260d94367d68f7 Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 19 Dec 2022 05:43:16 -0800 Subject: [PATCH 0205/1346] Clean up documentation (#3542) * Fix comments for documentation * More fixes to comments for documentation * Fixes in the doc files * Fix typo in parameters.rst --- Docs/source/developers/fields.rst | 4 +- Docs/source/index.rst | 1 - Docs/source/usage/parameters.rst | 2 +- Docs/source/usage/python.rst | 12 ---- Source/BoundaryConditions/WarpX_PEC.H | 2 +- Source/Diagnostics/BTD_Plotfile_Header_Impl.H | 13 ++-- .../BackTransformFunctor.H | 1 + .../BackTransformParticleFunctor.H | 10 ++- .../ComputeParticleDiagFunctor.H | 4 +- .../ComputeDiagFunctors/DivBFunctor.H | 2 + .../FlushFormats/FlushFormatAscent.H | 3 +- .../FlushFormats/FlushFormatPlotfile.H | 1 + Source/Diagnostics/FullDiagnostics.H | 2 +- Source/Diagnostics/WarpXOpenPMD.H | 5 +- Source/Diagnostics/WarpXOpenPMD.cpp | 10 +-- .../FiniteDifferenceSolver.H | 2 +- .../SpectralSolver/SpectralFieldData.H | 5 +- .../SpectralSolver/SpectralSolver.H | 4 -- Source/Laser/LaserProfiles.H | 1 - Source/Parallelization/GuardCellManager.H | 2 + .../BinaryCollision/BinaryCollision.H | 4 +- .../BoschHaleFusionCrossSection.H | 2 + .../Particles/Collision/CollisionHandler.cpp | 1 + .../Particles/Deposition/CurrentDeposition.H | 1 - Source/Particles/Pusher/PushSelector.H | 2 +- Source/Particles/WarpXParticleContainer.H | 5 +- Source/Particles/WarpXParticleContainer.cpp | 12 ++-- Source/Utils/Parser/IntervalsParser.H | 1 + Source/Utils/ParticleUtils.H | 5 +- Source/WarpX.H | 69 ++++++++++--------- Source/ablastr/constant.H | 4 +- 31 files changed, 100 insertions(+), 92 deletions(-) diff --git a/Docs/source/developers/fields.rst b/Docs/source/developers/fields.rst index b40bb750ecf..af834354dcd 100644 --- a/Docs/source/developers/fields.rst +++ b/Docs/source/developers/fields.rst @@ -112,7 +112,9 @@ Bilinear filter The multi-pass bilinear filter (applied on the current density) is implemented in ``Source/Filter/``, and class ``WarpX`` holds an instance of this class in member variable ``WarpX::bilinear_filter``. For performance reasons (to avoid creating too many guard cells), this filter is directly applied in communication routines, see ``WarpX::AddCurrentFromFineLevelandSumBoundary`` above and -.. doxygenfunction:: WarpX::ApplyFilterandSumBoundaryJ +.. doxygenfunction:: WarpX::ApplyFilterJ(const amrex::Vector, 3>> ¤t, const int lev, const int idim) + +.. doxygenfunction:: WarpX::SumBoundaryJ(const amrex::Vector, 3>> ¤t, const int lev, const int idim, const amrex::Periodicity &period) Godfrey's anti-NCI filter for FDTD simulations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/Docs/source/index.rst b/Docs/source/index.rst index c1755f44f4e..ad9719d90e8 100644 --- a/Docs/source/index.rst +++ b/Docs/source/index.rst @@ -98,7 +98,6 @@ Data Analysis dataanalysis/visit dataanalysis/visualpic dataanalysis/picviewer - dataanalysis/backtransformed_diags dataanalysis/reduced_diags Theory diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 36855a52dde..22f01196a4f 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -766,7 +766,7 @@ Particle initialization * ``maxwell_boltzmann``: Maxwell-Boltzmann distribution that takes a dimensionless temperature parameter :math:`\theta` as an input, where :math:`\theta = \frac{k_\mathrm{B} \cdot T}{m \cdot c^2}`, - "math:`T` is the temperature in Kelvin, :math:`k_\mathrm{B}` is the Boltzmann constant, :math:`c` is the speed of light, and :math:`m` is the mass of the species. + :math:`T` is the temperature in Kelvin, :math:`k_\mathrm{B}` is the Boltzmann constant, :math:`c` is the speed of light, and :math:`m` is the mass of the species. Theta is specified by a combination of ``.theta_distribution_type``, ``.theta``, and ``.theta_function(x,y,z)`` (see below). For values of :math:`\theta > 0.01`, errors due to ignored relativistic terms exceed 1%. Temperatures less than zero are not allowed. diff --git a/Docs/source/usage/python.rst b/Docs/source/usage/python.rst index f42a5cf577b..d5af350b76e 100644 --- a/Docs/source/usage/python.rst +++ b/Docs/source/usage/python.rst @@ -102,10 +102,6 @@ ElectrostaticFieldDiagnostic Lab-frame diagnostics diagnostics are used when running boosted-frame simulations. -LabFrameParticleDiagnostic -"""""""""""""""""""""""""" -.. autoclass:: pywarpx.picmi.LabFrameParticleDiagnostic - LabFrameFieldDiagnostic """"""""""""""""""""""" .. autoclass:: pywarpx.picmi.LabFrameFieldDiagnostic @@ -258,12 +254,6 @@ simulation objects (particles, fields and memory buffers) as well as general pro .. autofunction:: pywarpx.picmi.Simulation.extension.finalize -.. autofunction:: pywarpx.picmi.Simulation.extension.getistep - -.. autofunction:: pywarpx.picmi.Simulation.extension.gett_new - -.. autofunction:: pywarpx.picmi.Simulation.extension.evolve - .. autofunction:: pywarpx.picmi.Simulation.extension.getProbLo .. autofunction:: pywarpx.picmi.Simulation.extension.getProbHi @@ -309,8 +299,6 @@ with scraped particle data. .. autofunction:: pywarpx.picmi.Simulation.extension.get_particle_boundary_buffer_size -.. autofunction:: pywarpx.picmi.Simulation.extension.get_particle_boundary_buffer_size - .. autofunction:: pywarpx.picmi.Simulation.extension.get_particle_boundary_buffer_structs .. autofunction:: pywarpx.picmi.Simulation.extension.get_particle_boundary_buffer diff --git a/Source/BoundaryConditions/WarpX_PEC.H b/Source/BoundaryConditions/WarpX_PEC.H index 7fb9a882128..fe182feb7a5 100644 --- a/Source/BoundaryConditions/WarpX_PEC.H +++ b/Source/BoundaryConditions/WarpX_PEC.H @@ -26,7 +26,7 @@ using namespace amrex; * \param[in] fboundary Value containing boundary type * \param[in] dir direction * - * \param[out] 1 if the boundary type is PEC else 0 + * \returns 1 if the boundary type is PEC else 0 */ AMREX_GPU_DEVICE AMREX_FORCE_INLINE bool is_boundary_PEC (amrex::GpuArray const& fboundary, int dir) { diff --git a/Source/Diagnostics/BTD_Plotfile_Header_Impl.H b/Source/Diagnostics/BTD_Plotfile_Header_Impl.H index a7fc9fd1e48..c4f9d52b485 100644 --- a/Source/Diagnostics/BTD_Plotfile_Header_Impl.H +++ b/Source/Diagnostics/BTD_Plotfile_Header_Impl.H @@ -245,7 +245,8 @@ class BTDSpeciesHeaderImpl { public: /** Constructor. - * \param[in] string containing path of Headerfile + * \param[in] Headerfile_path string containing path of Headerfile + * \param[in] species_name string containing species name */ BTDSpeciesHeaderImpl (std::string const& Headerfile_path, std::string const& species_name); ~BTDSpeciesHeaderImpl () = default; @@ -256,7 +257,7 @@ public: /** Set data Index of the data-file, DATAXXXXX, that the particles belong to*/ void set_DataIndex (const int lev, const int box_id, const int data_index); /** Add new_particles to existing to obtain the total number of particles of the species. - \param[in] new_particles, total particles in the new buffer + \param[in] new_particles total particles in the new buffer */ void AddTotalParticles (const int new_particles) { m_total_particles += new_particles;} /** Increment number of boxes in a box array by 1, with every flush. */ @@ -310,7 +311,7 @@ class BTDParticleDataHeaderImpl { public: /** Constructor. - * \param[in] string containing path of Headerfile + * \param[in] Headerfile_path containing path of Headerfile */ BTDParticleDataHeaderImpl (std::string const& Headerfile_path); /** Destructor */ @@ -326,14 +327,14 @@ public: */ void IncreaseBoxArraySize ( const int add_size) { m_ba_size += add_size;} /** Returns box corresponding to the ith box in the BoxArray, m_ba. - * \param[in] int ibox, index of the box in the BoxArray. + * \param[in] ibox index of the box in the BoxArray. */ amrex::Box ba_box (int ibox) {return m_ba[ibox]; } /** Resize boxArray, m_ba, to size, m_ba_size. */ void ResizeBoxArray () { m_ba.resize(m_ba_size); } /** Set Box indices of the ith-box in Box Array, m_ba, to the new Box, ba_box. - * \param[in] int ibox, index of the ith box in BoxArray, m_ba. - * \param[in] amrex::Box box dimensions corresponding to the ith Fab. + * \param[in] ibox index of the ith box in BoxArray, m_ba. + * \param[in] ba_box dimensions corresponding to the ith Fab. */ void SetBox (int ibox, amrex::Box ba_box) { m_ba.set(ibox, ba_box); } /** Size of BoxArray, m_ba*/ diff --git a/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.H index 142bb098da8..63dadd1271d 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.H @@ -42,6 +42,7 @@ public: and store in destination multifab. * \param[in] num_buffers number of user-defined snapshots in the back-transformed lab-frame * \param[in] varnames names of the field-components as defined by the user for back-transformed diagnostics. + * \param[in] varnames_fields base names of field-components for the RZ modes * \param[in] crse_ratio the coarsening ratio for fields */ BackTransformFunctor ( const amrex::MultiFab * const mf_src, const int lev, diff --git a/Source/Diagnostics/ComputeDiagFunctors/BackTransformParticleFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/BackTransformParticleFunctor.H index 39c70553aae..4bb1b0656aa 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/BackTransformParticleFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/BackTransformParticleFunctor.H @@ -30,6 +30,7 @@ struct SelectParticles * @param[in] tmp_particle_data temporary particle data * @param[in] current_z_boost current z-position of the slice in boosted frame * @param[in] old_z_boost previous z-position of the slice in boosted frame + * @param[in] a_offset index offset for particles to be selected */ SelectParticles( const WarpXParIter& a_pti, TmpParticles& tmp_particle_data, amrex::Real current_z_boost, amrex::Real old_z_boost, @@ -40,7 +41,8 @@ struct SelectParticles * for Lorentz transformation in obtaining the lab-frame data. The particles that * with positions that correspond to the specific z-slice in boosted frame are selected. * - * @param[in] SrcData particle tile data + * @tparam SrcData type of source data + * @param[in] src SrcData particle tile data * @param[in] i particle index * @return 1 if particles is selected for transformation, else 0 */ @@ -85,6 +87,7 @@ struct LorentzTransformParticles * @param[in] t_boost time in boosted frame * @param[in] dt timestep in boosted-frame * @param[in] t_lab time in lab-frame + * @param[in] a_offset index offset for particles to be transformed */ LorentzTransformParticles ( const WarpXParIter& a_pti, TmpParticles& tmp_particle_data, amrex::Real t_boost, amrex::Real dt, @@ -94,8 +97,8 @@ struct LorentzTransformParticles * \brief Functor call. This method computes the Lorentz-transform for particle * attributes to obtain the lab-frame snapshot data. * - * @param[out] DstData particle tile data that stores the transformed particle data - * @param[in] SrcData particle tile data that is selected for transformation + * @param[out] dst DstData particle tile data that stores the transformed particle data + * @param[in] src SrcData particle tile data that is selected for transformation * @param[in] i_src particle index of the source particles * @param[in] i_dst particle index of the target particles (transformed data). */ @@ -206,6 +209,7 @@ public: * \param[in] z_slice_in_domain if the z-slice at current_z_boost is within the bounds of * the boosted-frame and lab-frame domain. The particles are transformed * only if this value is true. + * \param[in] old_z_boost previous z-position of the slice in boosted frame * \param[in] current_z_boost z co-ordinate of the slice selected in boosted-frame. * \param[in] t_lab current time in lab-frame for snapshot, i_buffer. * \param[in] snapshot_full if the current snapshot, with index, i_buffer, is diff --git a/Source/Diagnostics/ComputeDiagFunctors/ComputeParticleDiagFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/ComputeParticleDiagFunctor.H index a76cde936a0..e14e777e066 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/ComputeParticleDiagFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/ComputeParticleDiagFunctor.H @@ -30,9 +30,10 @@ public: * back-transformed diagnostics, that are unused for regular diagnostics. * * \param[in] i_buffer index of the snapshot - * \param[in] z_slice_in_domain if the z-slice at current_z_boost is within the bounds of + * \param[in] ZSliceInDomain if the z-slice at current_z_boost is within the bounds of * the boosted-frame and lab-frame domain. The particles are transformed * only if this value is true. + * \param[in] old_z_boost old z co-ordinate of the slice selected in boosted-frame. * \param[in] current_z_boost z co-ordinate of the slice selected in boosted-frame. * \param[in] t_lab current time in lab-frame for snapshot, i_buffer. * \param[in] snapshot_full if the current snapshot, with index, i_buffer, is @@ -50,6 +51,7 @@ public: } /** Compute particle attributes and store the result in pc_dst particle container. * \param[out] pc_dst output particle container where the result is stored. + * \param[out] totalParticlesInBuffer total number of particles in the container * \param[in] i_buffer snapshot index for which the particle buffer is processed */ virtual void operator () (PinnedMemoryParticleContainer& pc_dst, int &totalParticlesInBuffer, int i_buffer = 0) const = 0; diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H index ebc62808cde..a76411bf37b 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H @@ -19,6 +19,8 @@ public: * \param[in] lev level of multifab. * \param[in] crse_ratio for interpolating field values from simulation MultiFabs to the output diagnostic MultiFab, mf_dst. + * \param[in] convertRZmodes2cartesian whether to generate the result in Cartesian coordinates + * (summing over modes) * \param[in] ncomp Number of component of mf_src to cell-center in dst multifab. */ DivBFunctor(const std::array arr_mf_src, const int lev, amrex::IntVect crse_ratio, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatAscent.H b/Source/Diagnostics/FlushFormats/FlushFormatAscent.H index 01b051373c9..cdf08eaf876 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatAscent.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatAscent.H @@ -44,11 +44,12 @@ public: bool isLastBTDFlush = false, const amrex::Vector& totalParticlesFlushedAlready = amrex::Vector() ) const override; +#ifdef AMREX_USE_ASCENT /** \brief Do in-situ visualization for particle data. * \param[in] particle_diags Each element of this vector handles output of 1 species. + * \param[out] a_bp_mesh blueprint mesh generated from the container * Only compile if AMREX_USE_ASCENT because we need to pass a conduit class */ -#ifdef AMREX_USE_ASCENT void WriteParticles(const amrex::Vector& particle_diags, conduit::Node& a_bp_mesh) const; #endif diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H index e773f8e2f75..744ef2e4b13 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H @@ -48,6 +48,7 @@ public: /** \brief Write particles data to file. * \param[in] filename name of output directory * \param[in] particle_diags Each element of this vector handles output of 1 species. + * \param[in] isBTD whether this is a back-transformed diagnostic */ void WriteParticles(const std::string& filename, const amrex::Vector& particle_diags, diff --git a/Source/Diagnostics/FullDiagnostics.H b/Source/Diagnostics/FullDiagnostics.H index bf0b8b6a5de..8620af75ed4 100644 --- a/Source/Diagnostics/FullDiagnostics.H +++ b/Source/Diagnostics/FullDiagnostics.H @@ -54,7 +54,7 @@ private: */ void InitializeBufferData ( int i_buffer, int lev ) override; /** Initialize functors that store pointers to the RZ fields requested by the user. - * \parm[in] lev level on which the vector of unique_ptrs to field functors is initialized. + * \param[in] lev level on which the vector of unique_ptrs to field functors is initialized. */ void InitializeFieldFunctorsRZopenPMD (int lev) override; /** Append m_all_field_functors[lev] with all modes of all components of E B and j diff --git a/Source/Diagnostics/WarpXOpenPMD.H b/Source/Diagnostics/WarpXOpenPMD.H index 4d22b295388..aaa0acba88b 100644 --- a/Source/Diagnostics/WarpXOpenPMD.H +++ b/Source/Diagnostics/WarpXOpenPMD.H @@ -106,6 +106,8 @@ public: * @param filetype file backend, e.g. "bp" or "h5" * @param operator_type openPMD-api backend operator (compressor) for ADIOS2 * @param operator_parameters openPMD-api backend operator parameters for ADIOS2 + * @param engine_type ADIOS engine for output + * @param engine_parameters map of parameters for the engine * @param fieldPMLdirections PML field solver, @see WarpX::getPMLdirections() */ WarpXOpenPMDPlot (openPMD::IterationEncoding ie, @@ -143,7 +145,7 @@ public: * * @param varnames variable names in each multifab * @param mf multifab for each level - * @param geometry for each level + * @param geom for each level * @param output_levels the finest level to output, <= maxLevel * @param iteration the current iteration or reconstructed labframe station number * @param time the current simulation time in the lab frame @@ -262,6 +264,7 @@ private: * @param[in] write_int_comp The int attribute ids, from WarpX * @param[in] int_comp_names The int attribute names, from WarpX * @param[in] np Number of particles + * @param[in] isBTD whether this is a back-transformed diagnostic */ void SetupRealProperties (ParticleContainer const * pc, openPMD::ParticleSpecies& currSpecies, diff --git a/Source/Diagnostics/WarpXOpenPMD.cpp b/Source/Diagnostics/WarpXOpenPMD.cpp index c5fdcc20522..2cee7db218e 100644 --- a/Source/Diagnostics/WarpXOpenPMD.cpp +++ b/Source/Diagnostics/WarpXOpenPMD.cpp @@ -1103,8 +1103,8 @@ WarpXOpenPMDPlot::SetConstParticleRecordsEDPIC ( /* * Set up parameter for mesh container using the geometry (from level 0) * - * @param [IN] meshes: openPMD-api mesh container - * @param [IN] full_geom: field geometry + * @param [in] meshes: openPMD-api mesh container + * @param [in] full_geom: field geometry * */ void @@ -1177,9 +1177,9 @@ WarpXOpenPMDPlot::SetupFields ( openPMD::Container< openPMD::Mesh >& meshes, /* * Setup component properties for a field mesh - * @param [IN]: mesh a mesh field - * @param [IN]: full_geom geometry for the mesh - * @param [IN]: mesh_comp a component for the mesh + * @param [in]: mesh a mesh field + * @param [in]: full_geom geometry for the mesh + * @param [in]: mesh_comp a component for the mesh */ void WarpXOpenPMDPlot::SetupMeshComp (openPMD::Mesh& mesh, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index afa731734c7..ed6f918b300 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -101,10 +101,10 @@ class FiniteDifferenceSolver * \param[out] Efield vector of electric field MultiFabs updated at a given level * \param[in] Bfield vector of magnetic field MultiFabs at a given level * \param[in] Jfield vector of current density MultiFabs at a given level + * \param[in] edge_lengths length of edges along embedded boundaries * \param[in] dt timestep of the simulation * \param[in] macroscopic_properties contains user-defined properties of the medium. */ - void MacroscopicEvolveE ( std::array< std::unique_ptr, 3>& Efield, std::array< std::unique_ptr, 3> const& Bfield, std::array< std::unique_ptr, 3 > const& Jfield, diff --git a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H index c7848d73120..e863484840e 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralFieldData.H +++ b/Source/FieldSolver/SpectralSolver/SpectralFieldData.H @@ -41,9 +41,12 @@ class SpectralFieldIndex * * \param[in] update_with_rho whether rho is used in the field update equations * \param[in] time_averaging whether the time averaging algorithm is used - * \param[in] do_multi_J whether the multi-J algorithm is used (hence two currents + * \param[in] J_in_time the multi-J algorithm used (hence two currents * computed at the beginning and the end of the time interval * instead of one current computed at half time) + * \param[in] rho_in_time the multi-rho algorithm used (hence two densities + * computed at the beginning and the end of the time interval + * instead of one density computed at half time) * \param[in] dive_cleaning whether to use div(E) cleaning to account for errors in * Gauss law (new field F in the update equations) * \param[in] divb_cleaning whether to use div(B) cleaning to account for errors in diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.H b/Source/FieldSolver/SpectralSolver/SpectralSolver.H index da4b9687b86..66fe1b816d5 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.H @@ -143,10 +143,6 @@ class SpectralSolver * declared in the base class SpectralBaseAlgorithm and defined in its * derived classes, from objects of class SpectralSolver through the private * unique pointer \c algorithm. - * - * \param[in] lev The mesh refinement level - * \param[in,out] current Array of unique pointers to \c MultiFab storing - * the three components of the current density */ void VayDeposition () { diff --git a/Source/Laser/LaserProfiles.H b/Source/Laser/LaserProfiles.H index 8555b518716..098a48e8c66 100644 --- a/Source/Laser/LaserProfiles.H +++ b/Source/Laser/LaserProfiles.H @@ -58,7 +58,6 @@ public: * member variables of the laser profile class. * * @param[in] ppl should be amrex::ParmParse(laser_name) - * @param[in] ppc should be amrex::ParmParse("my_constants") * @param[in] params common laser profile parameters */ virtual void diff --git a/Source/Parallelization/GuardCellManager.H b/Source/Parallelization/GuardCellManager.H index 248a3dabf3b..35df8551410 100644 --- a/Source/Parallelization/GuardCellManager.H +++ b/Source/Parallelization/GuardCellManager.H @@ -47,6 +47,8 @@ public: * \param do_pml_in_domain whether pml is done in the domain (only used by RZ PSATD) * \param pml_ncell number of cells on the pml layer (only used by RZ PSATD) * \param ref_ratios mesh refinement ratios between mesh-refinement levels + * \param use_filter whether filtering will be done + * \param bilinear_filter_stencil_length the size of the stencil for filtering */ void Init( const amrex::Real dt, diff --git a/Source/Particles/Collision/BinaryCollision/BinaryCollision.H b/Source/Particles/Collision/BinaryCollision/BinaryCollision.H index 0c5ec182ee3..2c1cdc68944 100644 --- a/Source/Particles/Collision/BinaryCollision/BinaryCollision.H +++ b/Source/Particles/Collision/BinaryCollision/BinaryCollision.H @@ -111,8 +111,9 @@ public: * @param mypc Container of species involved * */ - void doCollisions (amrex::Real /*cur_time*/, amrex::Real dt, MultiParticleContainer* mypc) override + void doCollisions (amrex::Real cur_time, amrex::Real dt, MultiParticleContainer* mypc) override { + amrex::ignore_unused(cur_time); auto& species1 = mypc->GetParticleContainerFromName(m_species_names[0]); auto& species2 = mypc->GetParticleContainerFromName(m_species_names[1]); @@ -192,6 +193,7 @@ public: /** Perform all binary collisions within a tile * + * \param[in] dt time step size * \param[in] lev the mesh-refinement level * \param[in] mfi iterator for multifab * \param species_1 first species container diff --git a/Source/Particles/Collision/BinaryCollision/NuclearFusion/BoschHaleFusionCrossSection.H b/Source/Particles/Collision/BinaryCollision/NuclearFusion/BoschHaleFusionCrossSection.H index 08af65ca647..e2a4fca2518 100644 --- a/Source/Particles/Collision/BinaryCollision/NuclearFusion/BoschHaleFusionCrossSection.H +++ b/Source/Particles/Collision/BinaryCollision/NuclearFusion/BoschHaleFusionCrossSection.H @@ -21,6 +21,8 @@ * * @param[in] E_kin_star the kinetic energy of the reactants in their center of mass frame, in SI units. * @param[in] fusion_type indicates which fusion reaction to calculate the cross-section for + * @param[in] m1 mass of the incoming particle + * @param[in] m2 mass of the target particle * @return The total cross section in SI units (square meters). */ AMREX_GPU_HOST_DEVICE AMREX_INLINE diff --git a/Source/Particles/Collision/CollisionHandler.cpp b/Source/Particles/Collision/CollisionHandler.cpp index cc8d8eb4af7..e8f25a78bb5 100644 --- a/Source/Particles/Collision/CollisionHandler.cpp +++ b/Source/Particles/Collision/CollisionHandler.cpp @@ -68,6 +68,7 @@ CollisionHandler::CollisionHandler(MultiParticleContainer const * const mypc) /** Perform all collisions * * @param cur_time Current time + * @param dt time step size * @param mypc MultiParticleContainer calling this method * */ diff --git a/Source/Particles/Deposition/CurrentDeposition.H b/Source/Particles/Deposition/CurrentDeposition.H index d478d838ef2..84d696ce4ec 100644 --- a/Source/Particles/Deposition/CurrentDeposition.H +++ b/Source/Particles/Deposition/CurrentDeposition.H @@ -36,7 +36,6 @@ using namespace amrex::literals; ion_lev is a null pointer. * \param jx_fab,jy_fab,jz_fab FArrayBox of current density, either full array or tile. * \param np_to_depose Number of particles for which current is deposited. - * \param dt Time step for particle level * \param relative_time Time at which to deposit J, relative to the time of the * current positions of the particles. When different than 0, * the particle position will be temporarily modified to match diff --git a/Source/Particles/Pusher/PushSelector.H b/Source/Particles/Pusher/PushSelector.H index a56dda2b925..b1e59029958 100644 --- a/Source/Particles/Pusher/PushSelector.H +++ b/Source/Particles/Pusher/PushSelector.H @@ -33,7 +33,7 @@ * \param Bx, By, Bz Magnetic field on particles. * \param ion_lev Ionization level of this particle (0 if ioniziation not on) * \param m Mass of this species. - * \param q Charge of this species. + * \param a_q Charge of this species. * \param pusher_algo 0: Boris, 1: Vay, 2: HigueraCary * \param do_crr Whether to do the classical radiation reaction * \param do_copy Whether to copy the old x and u for the BTD diff --git a/Source/Particles/WarpXParticleContainer.H b/Source/Particles/WarpXParticleContainer.H index cb5542025e9..4f226fbf891 100644 --- a/Source/Particles/WarpXParticleContainer.H +++ b/Source/Particles/WarpXParticleContainer.H @@ -279,7 +279,7 @@ public: */ void AddNParticles (int lev, int n, const amrex::ParticleReal* x, const amrex::ParticleReal* y, const amrex::ParticleReal* z, - const amrex::ParticleReal* vx, const amrex::ParticleReal* vy, const amrex::ParticleReal* vz, + const amrex::ParticleReal* ux, const amrex::ParticleReal* uy, const amrex::ParticleReal* uz, const int nattr_real, const amrex::ParticleReal* attr_real, const int nattr_int, const int* attr_int, int uniqueparticles, amrex::Long id=-1); @@ -293,9 +293,6 @@ public: static void BackwardCompatibility (); /** \brief Apply particle BC. - * - * \param[in] boundary_conditions Type of boundary conditions. For now, only absorbing or none - * are supported */ void ApplyBoundaryConditions (); diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index 6eae0d0b643..21ab1408cb6 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -144,9 +144,9 @@ WarpXParticleContainer::AddNParticles (int /*lev*/, int n, const amrex::ParticleReal* x, const amrex::ParticleReal* y, const amrex::ParticleReal* z, - const amrex::ParticleReal* vx, - const amrex::ParticleReal* vy, - const amrex::ParticleReal* vz, + const amrex::ParticleReal* ux, + const amrex::ParticleReal* uy, + const amrex::ParticleReal* uz, const int nattr_real, const amrex::ParticleReal* attr_real, const int nattr_int, const int* attr_int, int uniqueparticles, amrex::Long id) @@ -226,9 +226,9 @@ WarpXParticleContainer::AddNParticles (int /*lev*/, if (np > 0) { pinned_tile.push_back_real(PIdx::w , weight.data(), weight.data() + np); - pinned_tile.push_back_real(PIdx::ux, vx + ibegin, vx + iend); - pinned_tile.push_back_real(PIdx::uy, vy + ibegin, vy + iend); - pinned_tile.push_back_real(PIdx::uz, vz + ibegin, vz + iend); + pinned_tile.push_back_real(PIdx::ux, ux + ibegin, ux + iend); + pinned_tile.push_back_real(PIdx::uy, uy + ibegin, uy + iend); + pinned_tile.push_back_real(PIdx::uz, uz + ibegin, uz + iend); if ( (NumRuntimeRealComps()>0) || (NumRuntimeIntComps()>0) ){ DefineAndReturnParticleTile(0, 0, 0); diff --git a/Source/Utils/Parser/IntervalsParser.H b/Source/Utils/Parser/IntervalsParser.H index f590139b30c..329e114bbb0 100644 --- a/Source/Utils/Parser/IntervalsParser.H +++ b/Source/Utils/Parser/IntervalsParser.H @@ -31,6 +31,7 @@ namespace utils::parser * Any of these integers may be omitted in which case it will be equal to their default value * (0 for the starting point, std::numeric_limits::max() for the stopping point and 1 for * the period). For example SliceParser(":1000:") is equivalent to SliceParser("0:1000:1"). + * @param[in] isBTD whether this is a back-transformed diagnostic */ SliceParser (const std::string& instr, bool isBTD=false); diff --git a/Source/Utils/ParticleUtils.H b/Source/Utils/ParticleUtils.H index a997412f691..621f9725b07 100644 --- a/Source/Utils/ParticleUtils.H +++ b/Source/Utils/ParticleUtils.H @@ -60,6 +60,7 @@ namespace ParticleUtils { * * @param[in] u2 square of particle speed (i.e. u dot u where u = gamma*v) * @param[in] m, M mass of projectile and target, respectively + * @param[out] gamma relativistic factor of the particle * @param[out] energy particle energy in eV */ AMREX_GPU_HOST_DEVICE AMREX_INLINE @@ -82,7 +83,7 @@ namespace ParticleUtils { * \brief Perform a Lorentz transformation of the given velocity * to a frame moving with velocity (Vx, Vy, Vz) relative to the present one. * - * @param[in/out] ux,uy,uz components of velocity vector in the current + * @param[in,out] ux,uy,uz components of velocity vector in the current frame - importantly these quantities are gamma * velocity * @param[in] Vx,Vy,Vz velocity of the new frame relative to the current one, NOT gamma*velocity! @@ -150,7 +151,7 @@ namespace ParticleUtils { * random velocity vector with given magnitude. This is used in isotropic * collision events. * - * @param[in/out] ux, uy, uz colliding particle's velocity + * @param[in,out] ux, uy, uz colliding particle's velocity * @param[in] vp velocity magnitude of the colliding particle after collision. * @param[in] engine the random-engine */ diff --git a/Source/WarpX.H b/Source/WarpX.H index 2d1124c06e7..5c25eb1807c 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -327,13 +327,13 @@ public: * to the map of MultiFabs (used to ease the access to MultiFabs from the Python * interface * - * \param mf[out] The MultiFab unique pointer to be allocated - * \param ba[in] The BoxArray describing the MultiFab - * \param dm[in] The DistributionMapping describing the MultiFab - * \param ncomp[in] The number of components in the MultiFab - * \param ngrow[in] The number of guard cells in the MultiFab - * \param name[in] The name of the MultiFab to use in the map - * \param initial_value[in] The optional initial value + * \param[out] mf The MultiFab unique pointer to be allocated + * \param[in] ba The BoxArray describing the MultiFab + * \param[in] dm The DistributionMapping describing the MultiFab + * \param[in] ncomp The number of components in the MultiFab + * \param[in] ngrow The number of guard cells in the MultiFab + * \param[in] name The name of the MultiFab to use in the map + * \param[in] initial_value The optional initial value */ static void AllocInitMultiFab ( std::unique_ptr& mf, @@ -350,13 +350,13 @@ public: * to the map of MultiFabs (used to ease the access to MultiFabs from the Python * interface * - * \param mf[out] The iMultiFab unique pointer to be allocated - * \param ba[in] The BoxArray describing the iMultiFab - * \param dm[in] The DistributionMapping describing the iMultiFab - * \param ncomp[in] The number of components in the iMultiFab - * \param ngrow[in] The number of guard cells in the iMultiFab - * \param name[in] The name of the iMultiFab to use in the map - * \param initial_value[in] The optional initial value + * \param[out] mf The iMultiFab unique pointer to be allocated + * \param[in] ba The BoxArray describing the iMultiFab + * \param[in] dm The DistributionMapping describing the iMultiFab + * \param[in] ncomp The number of components in the iMultiFab + * \param[in] ngrow The number of guard cells in the iMultiFab + * \param[in] name The name of the iMultiFab to use in the map + * \param[in] initial_value The optional initial value */ static void AllocInitMultiFab ( std::unique_ptr& mf, @@ -370,11 +370,12 @@ public: /** * \brief * Create an alias of a MultiFab, adding the alias to the MultiFab map - * \param mf[out] The MultiFab to create - * \param mf_to_alias[in] The MultiFab to alias - * \param scomp[in] The starting component to be aliased - * \param ncomp[in] The number of components to alias - * \param name[in] The name of the MultiFab to use in the map + * \param[out] mf The MultiFab to create + * \param[in] mf_to_alias The MultiFab to alias + * \param[in] scomp The starting component to be aliased + * \param[in] ncomp The number of components to alias + * \param[in] name The name of the MultiFab to use in the map + * \param[in] initial_value optional initial value for MultiFab */ static void AliasInitMultiFab ( std::unique_ptr& mf, @@ -394,7 +395,7 @@ public: * \brief * Add the MultiFab to the map of MultiFabs * \param name The name of the MultiFab use to reference the MultiFab - * \parame mf The MultiFab to be added to the map (via a pointer to it) + * \param mf The MultiFab to be added to the map (via a pointer to it) */ static void AddToMultiFabMap(const std::string name, const std::unique_ptr& mf) { multifab_map[name] = mf.get(); @@ -404,7 +405,7 @@ public: * \brief * Add the iMultiFab to the map of MultiFabs * \param name The name of the iMultiFab use to reference the iMultiFab - * \parame mf The iMultiFab to be added to the map (via a pointer to it) + * \param mf The iMultiFab to be added to the map (via a pointer to it) */ static void AddToMultiFabMap(const std::string name, const std::unique_ptr& mf) { imultifab_map[name] = mf.get(); @@ -593,7 +594,7 @@ public: /** apply QED correction on electric field for level lev and patch type patch_type * * \param lev mesh refinement level - * \param dt patch_type which MR patch: PatchType::fine or PatchType::coarse + * \param patch_type which MR patch: PatchType::fine or PatchType::coarse * \param dt time step */ void Hybrid_QED_Push (int lev, PatchType patch_type, amrex::Real dt); @@ -896,16 +897,16 @@ public: * on the staggered yee-grid or cell-centered grid, in the interior cells * and guard cells. * - * \param[in] mfx, x-component of the field to be initialized - * \param[in] mfy, y-component of the field to be initialized - * \param[in] mfz, z-component of the field to be initialized - * \param[in] xfield_parser, parser function to initialize x-field - * \param[in] yfield_parser, parser function to initialize y-field - * \param[in] zfield_parser, parser function to initialize z-field - * \param[in] edge_lengths, edge lengths information - * \param[in] face_areas, face areas information - * \param[in] field, flag indicating which field is being initialized ('E' for electric, 'B' for magnetic) - * \param[in] lev, level of the Multifabs that is initialized + * \param[in] mfx x-component of the field to be initialized + * \param[in] mfy y-component of the field to be initialized + * \param[in] mfz z-component of the field to be initialized + * \param[in] xfield_parser parser function to initialize x-field + * \param[in] yfield_parser parser function to initialize y-field + * \param[in] zfield_parser parser function to initialize z-field + * \param[in] edge_lengths edge lengths information + * \param[in] face_areas face areas information + * \param[in] field flag indicating which field is being initialized ('E' for electric, 'B' for magnetic) + * \param[in] lev level of the Multifabs that is initialized */ void InitializeExternalFieldsOnGridUsingParser ( amrex::MultiFab *mfx, amrex::MultiFab *mfy, amrex::MultiFab *mfz, @@ -923,7 +924,7 @@ public: * EBs such as edge lengths, face areas, distance to EB, etc. It also * appropriately communicates EB data to guard cells. * - * \param[in] lev, level of the Multifabs that is initialized + * \param[in] lev level of the Multifabs that is initialized */ void InitializeEBGridData(int lev); @@ -1248,7 +1249,7 @@ private: amrex::Vector, 3 > > m_face_areas; /** EB: for every mesh face flag_info_face contains a: - * * 0 if the face needs to be extended + * * 0 if the face needs to be extended * * 1 if the face is large enough to lend area to other faces * * 2 if the face is actually intruded by other face * It is initialized in WarpX::MarkCells diff --git a/Source/ablastr/constant.H b/Source/ablastr/constant.H index 477f57f9040..afa15e1038b 100644 --- a/Source/ablastr/constant.H +++ b/Source/ablastr/constant.H @@ -57,9 +57,9 @@ namespace ablastr::constant //! reduced Planck Constant = h / tau [J*s] static constexpr auto hbar = 1.054571817e-34_rt; - //! fine-structure constant = mu0/(4*MathConst::pi)*q_e*q_e*c/hbar [dimensionless] + //! fine-structure constant = mu0/(4*pi)*q_e*q_e*c/hbar [dimensionless] static constexpr auto alpha = 0.007297352573748943_rt; - //! classical electron radius = 1./(4*MathConst::pi*ep0) * q_e*q_e/(m_e*c*c) [m] + //! classical electron radius = 1./(4*pi*ep0) * q_e*q_e/(m_e*c*c) [m] static constexpr auto r_e = 2.817940326204929e-15_rt; //! xi: nonlinearity parameter of Heisenberg-Euler effective theory = (2.*alpha*alpha*ep0*ep0*hbar*hbar*hbar)/(45.*m_e*m_e*m_e*m_e*c*c*c*c*c) static constexpr double xi = 1.3050122447005176e-52; From a080c13de54b94ad24f606f94075a6ac4176ea06 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 19 Dec 2022 08:55:30 -0800 Subject: [PATCH 0206/1346] Docs: Profiler Sync (Debug) (#3571) Document the `amrex.use_profiler_syncs` option in our debugging section. --- Docs/source/usage/parameters.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 22f01196a4f..b654eb4ae37 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2777,6 +2777,10 @@ When developing, testing and :ref:`debugging WarpX `, the follo When set to ``1``, this option causes simulation to fail *after* its completion if there were unused parameters. It is mainly intended for continuous integration and automated testing to check that all tests and inputs are adapted to API changes. +* ``amrex.use_profiler_syncs`` (``0`` or ``1``; default is ``0`` for false) + Adds a synchronization at the start of communication, so any load balance will be caught there (the timer is called ``SyncBeforeComms``), then the comm operation will run. + This will slow down the run. + * ``warpx.serialize_initial_conditions`` (`0` or `1`) optional (default `0`) Serialize the initial conditions for reproducible testing, e.g, in our continuous integration tests. Mainly whether or not to use OpenMP threading for particle initialization. From fe3e6eb4474be8c00bc3f42fdd22ea0a27329570 Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Mon, 19 Dec 2022 17:59:04 -0800 Subject: [PATCH 0207/1346] managed memory for user defined attribs in addplasma (#3576) --- .../Particles/PhysicalParticleContainer.cpp | 91 ++++++++++++++----- 1 file changed, 66 insertions(+), 25 deletions(-) diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 18cf1716295..017be509b27 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -1059,23 +1059,43 @@ PhysicalParticleContainer::AddPlasma (int lev, RealBox part_realbox) // user-defined integer and real attributes const int n_user_int_attribs = m_user_int_attribs.size(); const int n_user_real_attribs = m_user_real_attribs.size(); - amrex::Gpu::DeviceVector pa_user_int(n_user_int_attribs); - amrex::Gpu::DeviceVector pa_user_real(n_user_real_attribs); - amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > user_int_attrib_parserexec(n_user_int_attribs); - amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > user_real_attrib_parserexec(n_user_real_attribs); + amrex::Gpu::PinnedVector pa_user_int_pinned(n_user_int_attribs); + amrex::Gpu::PinnedVector pa_user_real_pinned(n_user_real_attribs); + amrex::Gpu::PinnedVector< amrex::ParserExecutor<7> > user_int_attrib_parserexec_pinned(n_user_int_attribs); + amrex::Gpu::PinnedVector< amrex::ParserExecutor<7> > user_real_attrib_parserexec_pinned(n_user_real_attribs); for (int ia = 0; ia < n_user_int_attribs; ++ia) { - pa_user_int[ia] = soa.GetIntData(particle_icomps[m_user_int_attribs[ia]]).data() + old_size; - user_int_attrib_parserexec[ia] = m_user_int_attrib_parser[ia]->compile<7>(); + pa_user_int_pinned[ia] = soa.GetIntData(particle_icomps[m_user_int_attribs[ia]]).data() + old_size; + user_int_attrib_parserexec_pinned[ia] = m_user_int_attrib_parser[ia]->compile<7>(); } for (int ia = 0; ia < n_user_real_attribs; ++ia) { - pa_user_real[ia] = soa.GetRealData(particle_comps[m_user_real_attribs[ia]]).data() + old_size; - user_real_attrib_parserexec[ia] = m_user_real_attrib_parser[ia]->compile<7>(); + pa_user_real_pinned[ia] = soa.GetRealData(particle_comps[m_user_real_attribs[ia]]).data() + old_size; + user_real_attrib_parserexec_pinned[ia] = m_user_real_attrib_parser[ia]->compile<7>(); } - int** pa_user_int_data = pa_user_int.dataPtr(); - ParticleReal** pa_user_real_data = pa_user_real.dataPtr(); - amrex::ParserExecutor<7> const* user_int_parserexec_data = user_int_attrib_parserexec.dataPtr(); - amrex::ParserExecutor<7> const* user_real_parserexec_data = user_real_attrib_parserexec.dataPtr(); - +#ifdef AMREX_USE_GPU + // To avoid using managed memory, we first define pinned memory vector, initialize on cpu, + // and them memcpy to device from host + amrex::Gpu::DeviceVector d_pa_user_int(n_user_int_attribs); + amrex::Gpu::DeviceVector d_pa_user_real(n_user_real_attribs); + amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > d_user_int_attrib_parserexec(n_user_int_attribs); + amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > d_user_real_attrib_parserexec(n_user_real_attribs); + amrex::Gpu::copyAsync(Gpu::hostToDevice, pa_user_int_pinned.begin(), + pa_user_int_pinned.end(), d_pa_user_int.begin()); + amrex::Gpu::copyAsync(Gpu::hostToDevice, pa_user_real_pinned.begin(), + pa_user_real_pinned.end(), d_pa_user_real.begin()); + amrex::Gpu::copyAsync(Gpu::hostToDevice, user_int_attrib_parserexec_pinned.begin(), + user_int_attrib_parserexec_pinned.end(), d_user_int_attrib_parserexec.begin()); + amrex::Gpu::copyAsync(Gpu::hostToDevice, user_real_attrib_parserexec_pinned.begin(), + user_real_attrib_parserexec_pinned.end(), d_user_real_attrib_parserexec.begin()); + int** pa_user_int_data = d_pa_user_int.dataPtr(); + ParticleReal** pa_user_real_data = d_pa_user_real.dataPtr(); + amrex::ParserExecutor<7> const* user_int_parserexec_data = d_user_int_attrib_parserexec.dataPtr(); + amrex::ParserExecutor<7> const* user_real_parserexec_data = d_user_real_attrib_parserexec.dataPtr(); +#else + int** pa_user_int_data = pa_user_int_pinned.dataPtr(); + ParticleReal** pa_user_real_data = pa_user_real_pinned.dataPtr(); + amrex::ParserExecutor<7> const* user_int_parserexec_data = user_int_attrib_parserexec_pinned.dataPtr(); + amrex::ParserExecutor<7> const* user_real_parserexec_data = user_real_attrib_parserexec_pinned.dataPtr(); +#endif int* pi = nullptr; if (do_field_ionization) { @@ -1591,22 +1611,43 @@ PhysicalParticleContainer::AddPlasmaFlux (amrex::Real dt) // user-defined integer and real attributes const int n_user_int_attribs = m_user_int_attribs.size(); const int n_user_real_attribs = m_user_real_attribs.size(); - amrex::Gpu::DeviceVector pa_user_int(n_user_int_attribs); - amrex::Gpu::DeviceVector pa_user_real(n_user_real_attribs); - amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > user_int_attrib_parserexec(n_user_int_attribs); - amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > user_real_attrib_parserexec(n_user_real_attribs); + amrex::Gpu::PinnedVector pa_user_int_pinned(n_user_int_attribs); + amrex::Gpu::PinnedVector pa_user_real_pinned(n_user_real_attribs); + amrex::Gpu::PinnedVector< amrex::ParserExecutor<7> > user_int_attrib_parserexec_pinned(n_user_int_attribs); + amrex::Gpu::PinnedVector< amrex::ParserExecutor<7> > user_real_attrib_parserexec_pinned(n_user_real_attribs); for (int ia = 0; ia < n_user_int_attribs; ++ia) { - pa_user_int[ia] = soa.GetIntData(particle_icomps[m_user_int_attribs[ia]]).data() + old_size; - user_int_attrib_parserexec[ia] = m_user_int_attrib_parser[ia]->compile<7>(); + pa_user_int_pinned[ia] = soa.GetIntData(particle_icomps[m_user_int_attribs[ia]]).data() + old_size; + user_int_attrib_parserexec_pinned[ia] = m_user_int_attrib_parser[ia]->compile<7>(); } for (int ia = 0; ia < n_user_real_attribs; ++ia) { - pa_user_real[ia] = soa.GetRealData(particle_comps[m_user_real_attribs[ia]]).data() + old_size; - user_real_attrib_parserexec[ia] = m_user_real_attrib_parser[ia]->compile<7>(); + pa_user_real_pinned[ia] = soa.GetRealData(particle_comps[m_user_real_attribs[ia]]).data() + old_size; + user_real_attrib_parserexec_pinned[ia] = m_user_real_attrib_parser[ia]->compile<7>(); } - int** pa_user_int_data = pa_user_int.dataPtr(); - ParticleReal** pa_user_real_data = pa_user_real.dataPtr(); - amrex::ParserExecutor<7> const* user_int_parserexec_data = user_int_attrib_parserexec.dataPtr(); - amrex::ParserExecutor<7> const* user_real_parserexec_data = user_real_attrib_parserexec.dataPtr(); +#ifdef AMREX_USE_GPU + // To avoid using managed memory, we first define pinned memory vector, initialize on cpu, + // and them memcpy to device from host + amrex::Gpu::DeviceVector d_pa_user_int(n_user_int_attribs); + amrex::Gpu::DeviceVector d_pa_user_real(n_user_real_attribs); + amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > d_user_int_attrib_parserexec(n_user_int_attribs); + amrex::Gpu::DeviceVector< amrex::ParserExecutor<7> > d_user_real_attrib_parserexec(n_user_real_attribs); + amrex::Gpu::copyAsync(Gpu::hostToDevice, pa_user_int_pinned.begin(), + pa_user_int_pinned.end(), d_pa_user_int.begin()); + amrex::Gpu::copyAsync(Gpu::hostToDevice, pa_user_real_pinned.begin(), + pa_user_real_pinned.end(), d_pa_user_real.begin()); + amrex::Gpu::copyAsync(Gpu::hostToDevice, user_int_attrib_parserexec_pinned.begin(), + user_int_attrib_parserexec_pinned.end(), d_user_int_attrib_parserexec.begin()); + amrex::Gpu::copyAsync(Gpu::hostToDevice, user_real_attrib_parserexec_pinned.begin(), + user_real_attrib_parserexec_pinned.end(), d_user_real_attrib_parserexec.begin()); + int** pa_user_int_data = d_pa_user_int.dataPtr(); + ParticleReal** pa_user_real_data = d_pa_user_real.dataPtr(); + amrex::ParserExecutor<7> const* user_int_parserexec_data = d_user_int_attrib_parserexec.dataPtr(); + amrex::ParserExecutor<7> const* user_real_parserexec_data = d_user_real_attrib_parserexec.dataPtr(); +#else + int** pa_user_int_data = pa_user_int_pinned.dataPtr(); + ParticleReal** pa_user_real_data = pa_user_real_pinned.dataPtr(); + amrex::ParserExecutor<7> const* user_int_parserexec_data = user_int_attrib_parserexec_pinned.dataPtr(); + amrex::ParserExecutor<7> const* user_real_parserexec_data = user_real_attrib_parserexec_pinned.dataPtr(); +#endif int* p_ion_level = nullptr; if (do_field_ionization) { From 18c321a6ffb5fa7570aff5ec9126db4598e824f7 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Tue, 20 Dec 2022 10:03:57 -0800 Subject: [PATCH 0208/1346] Fix typo in documentation for Haswell (#3583) --- Docs/source/install/hpc/cori.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/install/hpc/cori.rst b/Docs/source/install/hpc/cori.rst index 15675c674d7..afbed3ff29b 100644 --- a/Docs/source/install/hpc/cori.rst +++ b/Docs/source/install/hpc/cori.rst @@ -128,7 +128,7 @@ And install ADIOS2, BLAS++ and LAPACK++: cmake --build src/blaspp-haswell-build --target install --parallel 16 # LAPACK++ (for PSATD+RZ) - git clone https://github.com/icl-utk-edu/blaspp.git src/lapackpp + git clone https://github.com/icl-utk-edu/lapackpp.git src/lapackpp rm -rf src/lapackpp-haswell-build CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S src/lapackpp -B src/lapackpp-haswell-build -Duse_cmake_find_lapack=ON -DBLAS_LIBRARIES=${CRAY_LIBSCI_PREFIX_DIR}/lib/libsci_gnu.a -DLAPACK_LIBRARIES=${CRAY_LIBSCI_PREFIX_DIR}/lib/libsci_gnu.a -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=$HOME/sw/haswell/lapackpp-master-install cmake --build src/lapackpp-haswell-build --target install --parallel 16 From ab705182ed934f2b35baa6478d08dc191ccf9f86 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 20 Dec 2022 19:07:14 +0100 Subject: [PATCH 0209/1346] [pre-commit.ci] pre-commit autoupdate (#3580) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pycqa/isort: 5.11.1 → v5.11.3](https://github.com/pycqa/isort/compare/5.11.1...v5.11.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ec5824a3aa9..10aa84c4a71 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -75,7 +75,7 @@ repos: # Sorts Python imports according to PEP8 # https://www.python.org/dev/peps/pep-0008/#imports - repo: https://github.com/pycqa/isort - rev: 5.11.1 + rev: v5.11.3 hooks: - id: isort name: isort (python) From 89858527a314ddd224dd9f26b69e8be63cc4dea5 Mon Sep 17 00:00:00 2001 From: David Grote Date: Wed, 21 Dec 2022 11:29:00 -0800 Subject: [PATCH 0210/1346] Add accelerator lattice, starting with quadrupoles (#3063) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Initial version of accelerator lattice * Clean up EOL white space * Small clean up for GPU * Fixed up consts * Added hard edge fraction plus other clean ups * More clean up * Restructure to work on GPUs * Now this grabs its own copies of particle info * Updates, including adding dBdx * Small cleanup in Quad * Small fixes for GPU * More cleanup for GPU * More GPU cleanup * Rewrite of the accelerator lattice implementation to better handle GPU * Fix struct forward definition * Another forward definition fix * Bug fix * Added LatticeElementBase * Removed zcenters array * Added CI test case * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Clean up in CI analysis.py * Cleanup of coding * Added CI test hard_edged_quadrupoles_moving * Added Lorentz transform between boosted frame and lab frame * Fixes for working in the boosted frame * Added boosted CI test * Change input name, adding the prefix "lattice." * Added plasma lens lattice element This will replace the external field plasma lens * Fixed CI analysis script to look for "lattice.quad" * Added checks of lattice element input * Added documentation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed duplicate call to lattice finder UpdateIndices * Added extensive comments * Reworked the input to use the MAD like description This is the same as the method used in ImpactX * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Remove old lines from inputs_lattice_3d * Added "lattice" element type * Fixed some Real and ParticleReals * [pre-commit.ci] pre-commit autoupdate (#3246) updates: - [github.com/hadialqattan/pycln: v2.0.1 → v2.0.3](https://github.com/hadialqattan/pycln/compare/v2.0.1...v2.0.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * ABLASTR: Update Poisson Solver API (#3243) Update the Poisson Solver API to be more usable. Needed for ImpactX. * Docs: New OLCF Machine (#3228) * D-T fusion (#3153) * initial work * fixed bugs and added species * update documentation * delete unused file * Add properties for neutron, hydrogen isotopes, helium isotopes * Update code to be more consistent * Correct typo * Parse deuterium-tritium fusion * Start putting in place the files for deuterium-tritium * Update documentation * Prepare structures for deuterium tritium * Fix typo * Fix compilation * Add neutron * Add correct formula for the cross-section * Correct compilation error * Fix nuclear fusion * Reset benchmarks * Prepare creation functor for 2-product fusion * First implementation of momentum initialization * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Use utility function for fusion * Minor modification of variable names * Fix GPU compilation * Fix single precision compilation * Update types * Use util function in P-B fusion * Correct compilation errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Correct errors * Update values of mass and charge * Correct compilation error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Correct compilation error * Correct compilation error * Correct compilation error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Reset benchmark * Use helium particle in proton-boron, to avoid resetting benchmark * Fixed proton-boron test * Revert "Fixed proton-boron test" This reverts commit 73c8d9d0be8417d5cd08a23daeebbc322c984808. * Incorporate Neil's recommendations * Reset benchmarks * Correct compilation errors * Add new deuterium tritium automated test * Correct formula of cross-section * Correct cross-section * Improve analysis script * Add test of energy conservation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add test of conservation of momentum * Progress in analysis script * Fix error in the initial energy of the deuterium particles * Add check of isotropy * Clean up the test script * Rewrite p_sq formula in a way to avoids machine-precision negative numbers * Add checksum * Clean up code * Apply suggestions from code review * Update PR according to comments * Update benchmark * Address additional comments * Numerical Literals Co-authored-by: Luca Fedeli Co-authored-by: Neïl Zaim <49716072+NeilZaim@users.noreply.github.com> * Docs: gaussian beam `q_tot` is not optional (#3249) * Fix a bug in GPU version of Hankel Transform (#3253) amrex::Array4 is a 4D array that can be accessed with three spatial indices plus an optional component index. We must always provide all three spatial indices even in 2D. * Add Python Callback Call when Checkpointing Signal is Received (#3251) * CI: Add Missing Regression Analysis (NCI corrector) (#3252) * Fixes to allow mixed precision, ParticleReal float, Real double (#3239) * Fixes to allow mixed precision, ParticleReal float, Real double * Fix for the optical depth * A different way of fixing QuantumSynchrotronEvolveOpticalDepth * In the QED code, consistently use ParticleReal * Use ParticleReal type consistently * Fix typo Docs/source/usage/parameters.rst Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> * Fix typo Docs/source/usage/parameters.rst Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> * Fix small error in plasma lens lattice documentation * Small addition to the documentation * Fix the residence correction to allow short elements * Updated CI benchmarks * Added check of lattice to isNoOp * Updated the hard_edged_quadrupoles CI benchmarks It is not clear why there was a change, but the difference is essentially round off in the E field. The important thing is that the particles are still correct. * Update Source/AcceleratorLattice/AcceleratorLattice.H Add include statements Co-authored-by: Axel Huebl * Update Source/AcceleratorLattice/LatticeElements/LatticeElementBase.H Add includes Co-authored-by: Axel Huebl * Renamed to README.rst and updated headers * Made d_lattice_element_finder optional type * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Include `` * Docs: Developer AccLattice Inclusion Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Axel Huebl Co-authored-by: Remi Lehe Co-authored-by: Luca Fedeli Co-authored-by: Neïl Zaim <49716072+NeilZaim@users.noreply.github.com> Co-authored-by: Weiqun Zhang Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- CMakeLists.txt | 1 + .../source/developers/accelerator_lattice.rst | 1 + Docs/source/developers/developers.rst | 1 + Docs/source/usage/parameters.rst | 54 ++++ Examples/Tests/AcceleratorLattice/analysis.py | 116 +++++++ .../Tests/AcceleratorLattice/inputs_quad_3d | 51 ++++ .../AcceleratorLattice/inputs_quad_boosted_3d | 45 +++ .../AcceleratorLattice/inputs_quad_moving_3d | 49 +++ Examples/Tests/plasma_lens/analysis.py | 29 +- Examples/Tests/plasma_lens/inputs_lattice_3d | 73 +++++ .../hard_edged_lasma_lens.json | 21 ++ .../hard_edged_quadrupoles.json | 22 ++ .../hard_edged_quadrupoles_boosted.json | 22 ++ .../hard_edged_quadrupoles_moving.json | 22 ++ Regression/WarpX-tests.ini | 72 +++++ .../AcceleratorLattice/AcceleratorLattice.H | 78 +++++ .../AcceleratorLattice/AcceleratorLattice.cpp | 93 ++++++ Source/AcceleratorLattice/CMakeLists.txt | 7 + .../AcceleratorLattice/LatticeElementFinder.H | 283 ++++++++++++++++++ .../LatticeElementFinder.cpp | 123 ++++++++ .../LatticeElements/CMakeLists.txt | 7 + .../LatticeElements/Drift.H | 33 ++ .../LatticeElements/Drift.cpp | 22 ++ .../LatticeElements/HardEdgedPlasmaLens.H | 120 ++++++++ .../LatticeElements/HardEdgedPlasmaLens.cpp | 70 +++++ .../LatticeElements/HardEdgedQuadrupole.H | 120 ++++++++ .../LatticeElements/HardEdgedQuadrupole.cpp | 70 +++++ .../LatticeElements/HardEdged_K.H | 50 ++++ .../LatticeElements/LatticeElementBase.H | 59 ++++ .../LatticeElements/LatticeElementBase.cpp | 42 +++ .../LatticeElements/Make.package | 6 + Source/AcceleratorLattice/Make.package | 6 + Source/AcceleratorLattice/README.rst | 43 +++ Source/Evolve/WarpXEvolve.cpp | 8 + Source/Make.WarpX | 1 + Source/Parallelization/WarpXRegrid.cpp | 3 + Source/Particles/Gather/GetExternalFields.H | 15 +- Source/Particles/Gather/GetExternalFields.cpp | 9 + Source/WarpX.H | 7 + Source/WarpX.cpp | 9 +- 40 files changed, 1856 insertions(+), 7 deletions(-) create mode 120000 Docs/source/developers/accelerator_lattice.rst create mode 100755 Examples/Tests/AcceleratorLattice/analysis.py create mode 100644 Examples/Tests/AcceleratorLattice/inputs_quad_3d create mode 100644 Examples/Tests/AcceleratorLattice/inputs_quad_boosted_3d create mode 100644 Examples/Tests/AcceleratorLattice/inputs_quad_moving_3d create mode 100644 Examples/Tests/plasma_lens/inputs_lattice_3d create mode 100644 Regression/Checksum/benchmarks_json/hard_edged_lasma_lens.json create mode 100644 Regression/Checksum/benchmarks_json/hard_edged_quadrupoles.json create mode 100644 Regression/Checksum/benchmarks_json/hard_edged_quadrupoles_boosted.json create mode 100644 Regression/Checksum/benchmarks_json/hard_edged_quadrupoles_moving.json create mode 100644 Source/AcceleratorLattice/AcceleratorLattice.H create mode 100644 Source/AcceleratorLattice/AcceleratorLattice.cpp create mode 100644 Source/AcceleratorLattice/CMakeLists.txt create mode 100644 Source/AcceleratorLattice/LatticeElementFinder.H create mode 100644 Source/AcceleratorLattice/LatticeElementFinder.cpp create mode 100644 Source/AcceleratorLattice/LatticeElements/CMakeLists.txt create mode 100644 Source/AcceleratorLattice/LatticeElements/Drift.H create mode 100644 Source/AcceleratorLattice/LatticeElements/Drift.cpp create mode 100644 Source/AcceleratorLattice/LatticeElements/HardEdgedPlasmaLens.H create mode 100644 Source/AcceleratorLattice/LatticeElements/HardEdgedPlasmaLens.cpp create mode 100644 Source/AcceleratorLattice/LatticeElements/HardEdgedQuadrupole.H create mode 100644 Source/AcceleratorLattice/LatticeElements/HardEdgedQuadrupole.cpp create mode 100644 Source/AcceleratorLattice/LatticeElements/HardEdged_K.H create mode 100644 Source/AcceleratorLattice/LatticeElements/LatticeElementBase.H create mode 100644 Source/AcceleratorLattice/LatticeElements/LatticeElementBase.cpp create mode 100644 Source/AcceleratorLattice/LatticeElements/Make.package create mode 100644 Source/AcceleratorLattice/Make.package create mode 100644 Source/AcceleratorLattice/README.rst diff --git a/CMakeLists.txt b/CMakeLists.txt index 834cb221450..d98af7ca83c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -232,6 +232,7 @@ if(WarpX_APP) endif() add_subdirectory(Source/ablastr) +add_subdirectory(Source/AcceleratorLattice) add_subdirectory(Source/BoundaryConditions) add_subdirectory(Source/Diagnostics) add_subdirectory(Source/EmbeddedBoundary) diff --git a/Docs/source/developers/accelerator_lattice.rst b/Docs/source/developers/accelerator_lattice.rst new file mode 120000 index 00000000000..5b4702f09b5 --- /dev/null +++ b/Docs/source/developers/accelerator_lattice.rst @@ -0,0 +1 @@ +../../../Source/AcceleratorLattice/README.rst \ No newline at end of file diff --git a/Docs/source/developers/developers.rst b/Docs/source/developers/developers.rst index b46624bc15e..aa2e6196377 100644 --- a/Docs/source/developers/developers.rst +++ b/Docs/source/developers/developers.rst @@ -11,6 +11,7 @@ Implementation Details dimensionality fields particles + accelerator_lattice initialization diagnostics moving_window diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index b654eb4ae37..d1d9cf1cbac 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -1279,6 +1279,9 @@ Laser initialization External fields --------------- +Grid initialization +^^^^^^^^^^^^^^^^^^^ + * ``warpx.B_ext_grid_init_style`` (string) optional (default is "default") This parameter determines the type of initialization for the external magnetic field. The "default" style initializes the @@ -1335,6 +1338,9 @@ External fields the field solver. In particular, do not use any other boundary condition than periodic. +Applied to Particles +^^^^^^^^^^^^^^^^^^^^ + * ``particles.E_ext_particle_init_style`` & ``particles.B_ext_particle_init_style`` (string) optional (default "none") These parameters determine the type of the external electric and magnetic fields respectively that are applied directly to the particles at every timestep. @@ -1388,6 +1394,54 @@ External fields and :math:`E_z = 0`, and :math:`B_x = \mathrm{strength} \cdot y`, :math:`B_y = -\mathrm{strength} \cdot x`, and :math:`B_z = 0`. +Accelerator Lattice +^^^^^^^^^^^^^^^^^^^ + +Several accelerator lattice elements can be defined as described below. +The elements are defined relative to the `z` axis and in the lab frame, starting at `z = 0`. +They are described using a simplified MAD like syntax. +Note that elements of the same type cannot overlap each other. + +* ``lattice.elements`` (``list of strings``) optional (default: no elements) + A list of names (one name per lattice element), in the order that they + appear in the lattice. + +* ``.type`` (``string``) + Indicates the element type for this lattice element. This should be one of: + + * ``drift`` for free drift. This requires this additional parameter: + + * ``.ds`` (``float``, in meters) the segment length + + * ``quad`` for a hard edged quadrupole. + This applies a quadrupole field that is uniform within the `z` extent of the element with a sharp cut off at the ends. + This uses residence corrections, with the field scaled by the amount of time within the element for particles entering + or leaving it, to increase the accuracy. + This requires these additional parameters: + + * ``.ds`` (``float``, in meters) the segment length + + * ``.dEdx`` (``float``, in volts/meter^2) optional (default: 0.) the electric quadrupole field gradient + The field applied to the particles will be `Ex = dEdx*x` and `Ey = -dEdx*y`. + + * ``.dBdx`` (``float``, in Tesla/meter) optional (default: 0.) the magnetic quadrupole field gradient + The field applied to the particles will be `Bx = dBdx*y` and `By = dBdx*x`. + + * ``plasmalens`` for a field modeling a plasma lens + This applies a radially directed plasma lens field that is uniform within the `z` extent of the element with + a sharp cut off at the ends. + This uses residence corrections, with the field scaled by the amount of time within the element for particles entering + or leaving it, to increase the accuracy. + This requires these additional parameters: + + * ``.ds`` (``float``, in meters) the segment length + + * ``.dEdx`` (``float``, in volts/meter^2) optional (default: 0.) the electric field gradient + The field applied to the particles will be `Ex = dEdx*x` and `Ey = dEdx*y`. + + * ``.dBdx`` (``float``, in Tesla/meter) optional (default: 0.) the magnetic field gradient + The field applied to the particles will be `Bx = dBdx*y` and `By = -dBdx*x`. + .. _running-cpp-parameters-collision: Collision initialization diff --git a/Examples/Tests/AcceleratorLattice/analysis.py b/Examples/Tests/AcceleratorLattice/analysis.py new file mode 100755 index 00000000000..d2fd7f6ffe3 --- /dev/null +++ b/Examples/Tests/AcceleratorLattice/analysis.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 + +# Copyright 2022 David Grote +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + + +""" +This script tests the quad lattice +The input file sets up a series of quadrupoles and propagates two particles through them. +One particle is in the X plane, the other the Y plane. +The final positions are compared to the analytic solutions. +The motion is slow enough that relativistic effects are ignored. +""" + +import os +import sys + +import numpy as np +from scipy.constants import c, e, m_e +import yt + +yt.funcs.mylog.setLevel(0) +sys.path.insert(1, '../../../../warpx/Regression/Checksum/') +import checksumAPI + +filename = sys.argv[1] +ds = yt.load( filename ) +ad = ds.all_data() + +gamma_boost = float(ds.parameters.get('warpx.gamma_boost', 1.)) +uz_boost = np.sqrt(gamma_boost*gamma_boost - 1.)*c + +# Fetch the final particle position +xx_sim = ad['electron', 'particle_position_x'].v[0] +zz_sim = ad['electron', 'particle_position_z'].v[0] +ux_sim = ad['electron', 'particle_momentum_x'].v[0]/m_e + +if gamma_boost > 1.: + # The simulation data is in the boosted frame. + # Transform the z position to the lab frame. + time = ds.current_time.value + zz_sim = gamma_boost*zz_sim + uz_boost*time; + +# Fetch the quadrupole lattice data +quad_starts = [] +quad_lengths = [] +quad_strengths_E = [] +z_location = 0. +def read_lattice(rootname, z_location): + lattice_elements = ds.parameters.get(f'{rootname}.elements').split() + for element in lattice_elements: + element_type = ds.parameters.get(f'{element}.type') + if element_type == 'drift': + length = float(ds.parameters.get(f'{element}.ds')) + z_location += length + elif element_type == 'quad': + length = float(ds.parameters.get(f'{element}.ds')) + quad_starts.append(z_location) + quad_lengths.append(length) + quad_strengths_E.append(float(ds.parameters.get(f'{element}.dEdx'))) + z_location += length + elif element_type == 'lattice': + z_location = read_lattice(element, z_location) + return z_location + +read_lattice('lattice', z_location) + +# Fetch the initial position of the particle +x0 = [float(x) for x in ds.parameters.get('electron.single_particle_pos').split()] +ux0 = [float(x)*c for x in ds.parameters.get('electron.single_particle_vel').split()] + +xx = x0[0] +zz = x0[2] +ux = ux0[0] +uz = ux0[2] + +gamma = np.sqrt(uz**2/c**2 + 1.) +vz = uz/gamma + +def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): + """Use analytic solution of a particle with a transverse dependent field""" + kb0 = np.sqrt(e/(m_e*gamma*vz0**2)*abs(lens_strength)) + if lens_strength >= 0.: + x1 = x0*np.cos(kb0*lens_length) + (vx0/vz0)/kb0*np.sin(kb0*lens_length) + vx1 = vz0*(-kb0*x0*np.sin(kb0*lens_length) + (vx0/vz0)*np.cos(kb0*lens_length)) + else: + x1 = x0*np.cosh(kb0*lens_length) + (vx0/vz0)/kb0*np.sinh(kb0*lens_length) + vx1 = vz0*(+kb0*x0*np.sinh(kb0*lens_length) + (vx0/vz0)*np.cosh(kb0*lens_length)) + return x1, vx1 + +# Integrate the particle using the analytic solution +for i in range(len(quad_starts)): + z_lens = quad_starts[i] + vx = ux/gamma + dt = (z_lens - zz)/vz + xx = xx + dt*vx + xx, vx = applylens(xx, vx, vz, gamma, quad_lengths[i], quad_strengths_E[i]) + ux = gamma*vx + zz = z_lens + quad_lengths[i] + +dt = (zz_sim - zz)/vz +vx = ux/gamma +xx = xx + dt*vx + +# Compare the analytic to the simulated final values +print(f'Error in x position is {abs(np.abs((xx - xx_sim)/xx))}, which should be < 0.01') +print(f'Error in x velocity is {abs(np.abs((ux - ux_sim)/ux))}, which should be < 0.002') + +assert abs(np.abs((xx - xx_sim)/xx)) < 0.01, Exception('error in x particle position') +assert abs(np.abs((ux - ux_sim)/ux)) < 0.002, Exception('error in x particle velocity') + +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, filename) diff --git a/Examples/Tests/AcceleratorLattice/inputs_quad_3d b/Examples/Tests/AcceleratorLattice/inputs_quad_3d new file mode 100644 index 00000000000..0a3b433f66d --- /dev/null +++ b/Examples/Tests/AcceleratorLattice/inputs_quad_3d @@ -0,0 +1,51 @@ +max_step = 50 +amr.n_cell = 8 8 32 +amr.max_level = 0 +geometry.dims = 3 +geometry.prob_lo = -0.2 -0.2 0. +geometry.prob_hi = +0.2 +0.2 1.6 + +warpx.const_dt = 1e-9 +warpx.do_electrostatic = labframe + +# Boundary condition +boundary.field_lo = pec pec pec +boundary.field_hi = pec pec pec + +# Order of particle shape factors +algo.particle_shape = 1 + +particles.species_names = electron +electron.charge = -q_e +electron.mass = m_e +electron.injection_style = "SingleParticle" +electron.single_particle_pos = 0.05 0.0 0.0 +electron.single_particle_vel = 0.0 0.0 0.1 # gamma*beta +electron.single_particle_weight = 1.0 + +lattice.elements = lattice1 lattice2 + +lattice1.type = lattice +lattice1.elements = drift1 quad1 + +lattice2.type = lattice +lattice2.elements = drift2 quad2 + +drift1.type = drift +drift1.ds = 0.2 + +quad1.type = quad +quad1.ds = 0.2 +quad1.dEdx = 1.e4 + +drift2.type = drift +drift2.ds = 0.6 + +quad2.type = quad +quad2.ds = 0.4 +quad2.dEdx = -1.e4 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 50 +diag1.diag_type = Full diff --git a/Examples/Tests/AcceleratorLattice/inputs_quad_boosted_3d b/Examples/Tests/AcceleratorLattice/inputs_quad_boosted_3d new file mode 100644 index 00000000000..ac1ea3b9279 --- /dev/null +++ b/Examples/Tests/AcceleratorLattice/inputs_quad_boosted_3d @@ -0,0 +1,45 @@ +max_step = 50 +amr.n_cell = 16 16 8 +amr.max_level = 0 +geometry.dims = 3 +geometry.prob_lo = -0.2 -0.2 -0.1 +geometry.prob_hi = +0.2 +0.2 +0.1 + +# Boundary condition +boundary.field_lo = pec pec pec +boundary.field_hi = pec pec pec + +warpx.gamma_boost = 2. +warpx.boost_direction = z + +# Order of particle shape factors +algo.particle_shape = 1 + +particles.species_names = electron +electron.charge = -q_e +electron.mass = m_e +electron.injection_style = "SingleParticle" +electron.single_particle_pos = 0.05 0.0 0.0 +electron.single_particle_vel = 0.0 0.0 2.0 # gamma*beta +electron.single_particle_weight = 1.0 + +lattice.elements = drift1 quad1 drift2 quad2 + +drift1.type = drift +drift1.ds = 0.2 + +quad1.type = quad +quad1.ds = 0.2 +quad1.dEdx = 1.e4 + +drift2.type = drift +drift2.ds = 0.6 + +quad2.type = quad +quad2.ds = 0.4 +quad2.dEdx = -1.e4 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 50 +diag1.diag_type = Full diff --git a/Examples/Tests/AcceleratorLattice/inputs_quad_moving_3d b/Examples/Tests/AcceleratorLattice/inputs_quad_moving_3d new file mode 100644 index 00000000000..98d3d40ad0b --- /dev/null +++ b/Examples/Tests/AcceleratorLattice/inputs_quad_moving_3d @@ -0,0 +1,49 @@ +max_step = 50 +amr.n_cell = 16 16 8 +amr.max_level = 0 +geometry.dims = 3 +geometry.prob_lo = -0.2 -0.2 -0.1 +geometry.prob_hi = +0.2 +0.2 +0.1 + +warpx.const_dt = 1e-9 +warpx.do_electrostatic = labframe + +# Boundary condition +boundary.field_lo = pec pec pec +boundary.field_hi = pec pec pec + +warpx.do_moving_window = 1 +warpx.moving_window_dir = z +warpx.moving_window_v = 0.1 # in units of the speed of light + +# Order of particle shape factors +algo.particle_shape = 1 + +particles.species_names = electron +electron.charge = -q_e +electron.mass = m_e +electron.injection_style = "SingleParticle" +electron.single_particle_pos = 0.05 0.0 0.0 +electron.single_particle_vel = 0.0 0.0 0.1 # gamma*beta +electron.single_particle_weight = 1.0 + +lattice.elements = drift1 quad1 drift2 quad2 + +drift1.type = drift +drift1.ds = 0.2 + +quad1.type = quad +quad1.ds = 0.2 +quad1.dEdx = 1.e4 + +drift2.type = drift +drift2.ds = 0.6 + +quad2.type = quad +quad2.ds = 0.4 +quad2.dEdx = -1.e4 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 50 +diag1.diag_type = Full diff --git a/Examples/Tests/plasma_lens/analysis.py b/Examples/Tests/plasma_lens/analysis.py index 1d2313ba140..80fdc418127 100755 --- a/Examples/Tests/plasma_lens/analysis.py +++ b/Examples/Tests/plasma_lens/analysis.py @@ -66,11 +66,30 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): # vel_z is not saved in my_constants with the PICMI version vel_z = 0.5*c -plasma_lens_period = float(ds.parameters.get('particles.repeated_plasma_lens_period')) -plasma_lens_starts = [float(x) for x in ds.parameters.get('particles.repeated_plasma_lens_starts').split()] -plasma_lens_lengths = [float(x) for x in ds.parameters.get('particles.repeated_plasma_lens_lengths').split()] -plasma_lens_strengths_E = [eval(x) for x in ds.parameters.get('particles.repeated_plasma_lens_strengths_E').split()] -plasma_lens_strengths_B = [eval(x) for x in ds.parameters.get('particles.repeated_plasma_lens_strengths_B').split()] +if 'particles.repeated_plasma_lens_period' in ds.parameters: + plasma_lens_period = float(ds.parameters.get('particles.repeated_plasma_lens_period')) + plasma_lens_starts = [float(x) for x in ds.parameters.get('particles.repeated_plasma_lens_starts').split()] + plasma_lens_lengths = [float(x) for x in ds.parameters.get('particles.repeated_plasma_lens_lengths').split()] + plasma_lens_strengths_E = [eval(x) for x in ds.parameters.get('particles.repeated_plasma_lens_strengths_E').split()] + plasma_lens_strengths_B = [eval(x) for x in ds.parameters.get('particles.repeated_plasma_lens_strengths_B').split()] +elif 'lattice.elements' in ds.parameters: + lattice_elements = ds.parameters.get('lattice.elements').split() + plasma_lens_zstarts = [] + plasma_lens_lengths = [] + plasma_lens_strengths_E = [] + z_location = 0. + for element in lattice_elements: + element_type = ds.parameters.get(f'{element}.type') + length = float(ds.parameters.get(f'{element}.ds')) + if element_type == 'plasmalens': + plasma_lens_zstarts.append(z_location) + plasma_lens_lengths.append(length) + plasma_lens_strengths_E.append(float(ds.parameters.get(f'{element}.dEdx'))) + z_location += length + + plasma_lens_period = 0.5 + plasma_lens_starts = plasma_lens_zstarts - plasma_lens_period*np.arange(len(plasma_lens_zstarts)) + plasma_lens_strengths_B = np.zeros(len(plasma_lens_zstarts)) x0 = float(ds.parameters.get('electrons.multiple_particles_pos_x').split()[0]) diff --git a/Examples/Tests/plasma_lens/inputs_lattice_3d b/Examples/Tests/plasma_lens/inputs_lattice_3d new file mode 100644 index 00000000000..3473f142044 --- /dev/null +++ b/Examples/Tests/plasma_lens/inputs_lattice_3d @@ -0,0 +1,73 @@ +# Maximum number of time steps +max_step = 84 + +# number of grid points +amr.n_cell = 16 16 16 + +amr.max_level = 0 + +# Geometry +geometry.dims = 3 +geometry.prob_lo = -1.0 -1.0 0.0 # physical domain +geometry.prob_hi = 1.0 1.0 2.0 + +boundary.field_lo = pec pec pec +boundary.field_hi = pec pec pec +boundary.particle_lo = absorbing absorbing absorbing +boundary.particle_hi = absorbing absorbing absorbing + +# Algorithms +algo.particle_shape = 1 +warpx.cfl = 0.7 + +my_constants.vel_z = 0.5*clight + +# particles +particles.species_names = electrons + +electrons.charge = -q_e +electrons.mass = m_e +electrons.injection_style = "MultipleParticles" +electrons.multiple_particles_pos_x = 0.05 0. +electrons.multiple_particles_pos_y = 0. 0.04 +electrons.multiple_particles_pos_z = 0.05 0.05 +electrons.multiple_particles_vel_x = 0. 0. +electrons.multiple_particles_vel_y = 0. 0. +electrons.multiple_particles_vel_z = vel_z/clight vel_z/clight +electrons.multiple_particles_weight = 1. 1. + +lattice.elements = drift1 plasmalens1 drift2 plasmalens2 drift3 plasmalens3 drift4 plasmalens4 + +drift1.type = drift +drift1.ds = 0.1 + +plasmalens1.type = plasmalens +plasmalens1.ds = 0.1 +plasmalens1.dEdx = 600000. + +drift2.type = drift +drift2.ds = 0.41 + +plasmalens2.type = plasmalens +plasmalens2.ds = 0.11 +plasmalens2.dEdx = 800000. + +drift3.type = drift +drift3.ds = 0.4 + +plasmalens3.type = plasmalens +plasmalens3.ds = 0.12 +plasmalens3.dEdx = 600000. + +drift4.type = drift +drift4.ds = 0.39 + +plasmalens4.type = plasmalens +plasmalens4.ds = 0.13 +plasmalens4.dEdx = 200000. + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 84 +diag1.diag_type = Full +diag1.electrons.variables = ux uy uz diff --git a/Regression/Checksum/benchmarks_json/hard_edged_lasma_lens.json b/Regression/Checksum/benchmarks_json/hard_edged_lasma_lens.json new file mode 100644 index 00000000000..34d35698705 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/hard_edged_lasma_lens.json @@ -0,0 +1,21 @@ +{ + "electrons": { + "particle_momentum_x": 7.424668333879405e-24, + "particle_momentum_y": 5.93963893779683e-24, + "particle_momentum_z": 2.7309245307375727e-22, + "particle_position_x": 0.0360838943897417, + "particle_position_y": 0.028872102262743393, + "particle_position_z": 3.894799963324205 + }, + "lev=0": { + "Bx": 3.4518920711615626e-14, + "By": 3.4457590162508847e-14, + "Bz": 3.1975595242439324e-16, + "Ex": 4.460637209979829e-06, + "Ey": 4.4877026638440135e-06, + "Ez": 9.259738797694699e-06, + "jx": 4.136556647397788e-10, + "jy": 3.3072821927157533e-10, + "jz": 1.649309087211398e-08 + } +} diff --git a/Regression/Checksum/benchmarks_json/hard_edged_quadrupoles.json b/Regression/Checksum/benchmarks_json/hard_edged_quadrupoles.json new file mode 100644 index 00000000000..979e7ebfe85 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/hard_edged_quadrupoles.json @@ -0,0 +1,22 @@ +{ + "electron": { + "particle_momentum_x": 2.0819392991319207e-25, + "particle_momentum_y": 6.04686989476619e-34, + "particle_momentum_z": 2.7309245307290386e-23, + "particle_position_x": 0.03492328774658799, + "particle_position_y": 2.2742551618036812e-11, + "particle_position_z": 1.4915217664612073, + "particle_weight": 1.0 + }, + "lev=0": { + "Bx": 0.0, + "By": 0.0, + "Bz": 0.0, + "Ex": 9.882421125782868e-06, + "Ey": 1.044026102498192e-05, + "Ez": 1.003739698556440e-05, + "jx": 0.0, + "jy": 0.0, + "jz": 0.0 + } +} diff --git a/Regression/Checksum/benchmarks_json/hard_edged_quadrupoles_boosted.json b/Regression/Checksum/benchmarks_json/hard_edged_quadrupoles_boosted.json new file mode 100644 index 00000000000..acec34286f7 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/hard_edged_quadrupoles_boosted.json @@ -0,0 +1,22 @@ +{ + "electron": { + "particle_momentum_x": 5.955475926588059e-26, + "particle_momentum_y": 1.4612764777454504e-35, + "particle_momentum_z": 3.4687284535374423e-23, + "particle_position_x": 0.049960237123814574, + "particle_position_y": 8.397636119991403e-15, + "particle_position_z": 0.10931687737912647, + "particle_weight": 1.0 + }, + "lev=0": { + "Bx": 3.254531465641299e-14, + "By": 3.2768092409497234e-14, + "Bz": 1.0615286316115558e-16, + "Ex": 2.30845657253269e-05, + "Ey": 2.2656898931877975e-05, + "Ez": 1.997747654112569e-05, + "jx": 1.7819477343635878e-10, + "jy": 4.2163030523377745e-20, + "jz": 1.0378839382497739e-07 + } +} diff --git a/Regression/Checksum/benchmarks_json/hard_edged_quadrupoles_moving.json b/Regression/Checksum/benchmarks_json/hard_edged_quadrupoles_moving.json new file mode 100644 index 00000000000..f00caa73681 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/hard_edged_quadrupoles_moving.json @@ -0,0 +1,22 @@ +{ + "electron": { + "particle_momentum_x": 2.0819392998019253e-25, + "particle_momentum_y": 2.3316091155621773e-33, + "particle_momentum_z": 2.730924530757351e-23, + "particle_position_x": 0.034923287741880055, + "particle_position_y": 9.300626840674331e-11, + "particle_position_z": 1.491521766472402, + "particle_weight": 1.0 + }, + "lev=0": { + "Bx": 0.0, + "By": 0.0, + "Bz": 0.0, + "Ex": 6.028256519009052e-05, + "Ey": 6.384796595673982e-05, + "Ez": 7.88045921306518e-05, + "jx": 0.0, + "jy": 0.0, + "jz": 0.0 + } +} diff --git a/Regression/WarpX-tests.ini b/Regression/WarpX-tests.ini index b0b6b4d2eb3..5bc36d71a99 100644 --- a/Regression/WarpX-tests.ini +++ b/Regression/WarpX-tests.ini @@ -3295,6 +3295,78 @@ compareParticles = 1 particleTypes = electrons analysisRoutine = Examples/Tests/plasma_lens/analysis.py +[hard_edged_quadrupoles] +buildDir = . +inputFile = Examples/Tests/AcceleratorLattice/inputs_quad_3d +runtime_params = +dim = 3 +addToCompileString = +cmakeSetupOpts = -DWarpX_DIMS=3 +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electron +analysisRoutine = Examples/Tests/AcceleratorLattice/analysis.py + +[hard_edged_quadrupoles_moving] +buildDir = . +inputFile = Examples/Tests/AcceleratorLattice/inputs_quad_moving_3d +runtime_params = +dim = 3 +addToCompileString = +cmakeSetupOpts = -DWarpX_DIMS=3 +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electron +analysisRoutine = Examples/Tests/AcceleratorLattice/analysis.py + +[hard_edged_quadrupoles_boosted] +buildDir = . +inputFile = Examples/Tests/AcceleratorLattice/inputs_quad_boosted_3d +runtime_params = +dim = 3 +addToCompileString = +cmakeSetupOpts = -DWarpX_DIMS=3 +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electron +analysisRoutine = Examples/Tests/AcceleratorLattice/analysis.py + +[hard_edged_lasma_lens] +buildDir = . +inputFile = Examples/Tests/plasma_lens/inputs_lattice_3d +runtime_params = +dim = 3 +addToCompileString = +cmakeSetupOpts = -DWarpX_DIMS=3 +restartTest = 0 +useMPI = 1 +numprocs = 2 +useOMP = 1 +numthreads = 1 +compileTest = 0 +doVis = 0 +compareParticles = 1 +particleTypes = electrons +analysisRoutine = Examples/Tests/plasma_lens/analysis.py + [background_mcc] buildDir = . inputFile = Examples/Physics_applications/capacitive_discharge/inputs_2d diff --git a/Source/AcceleratorLattice/AcceleratorLattice.H b/Source/AcceleratorLattice/AcceleratorLattice.H new file mode 100644 index 00000000000..b8151a1a049 --- /dev/null +++ b/Source/AcceleratorLattice/AcceleratorLattice.H @@ -0,0 +1,78 @@ +/* Copyright 2022 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_ACCELERATORLATTICE_ACCELERATORLATTICE_H_ +#define WARPX_ACCELERATORLATTICE_ACCELERATORLATTICE_H_ + +#include "LatticeElementFinder.H" +#include "LatticeElements/Drift.H" +#include "LatticeElements/HardEdgedQuadrupole.H" +#include "LatticeElements/HardEdgedPlasmaLens.H" + +#include +#include + +/* \brief AcceleratorLattice contains the lattice elements + */ +class AcceleratorLattice +{ +public: + + AcceleratorLattice (); + + /** + * \brief Read in the MAD like lattice description + * + * @param[in] root_name base name for the input variables + * @param[in] z_location starting z location for the lattice + */ + void + ReadLattice (std::string const & root_name, amrex::ParticleReal & z_location); + + /* Refinement level at which this accelerator lattice is defined */ + int m_level; + + /* Flags where any lattice elements have been specified by the user */ + bool m_lattice_defined = false; + + /** + * \brief Initialize the element finder instance at the given level of refinement + * + * @param[in] lev the level of refinement + * @param[in] ba the box array at the level of refinement + * @param[in] dm the distribution map at the level of refinement + */ + void InitElementFinder (int const lev, amrex::BoxArray const & ba, amrex::DistributionMapping const & dm); + + /** + * \brief Update the element finder, needed when the simulation frame has moved relative to the lab frame + * + * @param[in] lev the level of refinement + */ + void UpdateElementFinder (int const lev); + + /* The lattice element finder handles the lookup that finds the elements at the particle locations. + * It should follow the same grid layout as the main grids. + */ + std::unique_ptr> m_element_finder; + + /** + * \brief Return an instance of a lattice finder associated with the grid and that can be used + * on the device (such as a GPU) + * + * @param[in] a_pti the grid where the finder is needed + * @param[in] a_offset the particle offset since the finded needs information about the particles as well + */ + LatticeElementFinderDevice GetFinderDeviceInstance (WarpXParIter const& a_pti, int const a_offset) const; + + /* All of the available lattice element types */ + Drift h_drift; + HardEdgedQuadrupole h_quad; + HardEdgedPlasmaLens h_plasmalens; + +}; + +#endif // WARPX_ACCELERATORLATTICE_ACCELERATORLATTICE_H_ diff --git a/Source/AcceleratorLattice/AcceleratorLattice.cpp b/Source/AcceleratorLattice/AcceleratorLattice.cpp new file mode 100644 index 00000000000..1c22e6244ad --- /dev/null +++ b/Source/AcceleratorLattice/AcceleratorLattice.cpp @@ -0,0 +1,93 @@ +/* Copyright 2022 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#include "AcceleratorLattice.H" +#include "LatticeElements/Drift.H" +#include "LatticeElements/HardEdgedQuadrupole.H" +#include "LatticeElements/HardEdgedPlasmaLens.H" + +#include + +AcceleratorLattice::AcceleratorLattice () +{ + + using namespace amrex::literals; + + /* Get the inputs for and initialize all of the lattice element types */ + amrex::ParticleReal z_location = 0._prt; + ReadLattice("lattice", z_location); + + h_quad.WriteToDevice(); + h_plasmalens.WriteToDevice(); +} + +void +AcceleratorLattice::ReadLattice (std::string const & root_name, amrex::ParticleReal & z_location) +{ + amrex::ParmParse pp_lattice(root_name); + std::vector lattice_elements; + pp_lattice.queryarr("elements", lattice_elements); + + if (!lattice_elements.empty()) { + m_lattice_defined = true; + } + + // Loop through lattice elements + for (std::string const & element_name : lattice_elements) { + // Check the element type + amrex::ParmParse pp_element(element_name); + std::string element_type; + pp_element.get("type", element_type); + + // Initialize the corresponding element according to its type + if (element_type == "drift") { + h_drift.AddElement(pp_element, z_location); + } + else if (element_type == "quad") { + h_quad.AddElement(pp_element, z_location); + } + else if (element_type == "plasmalens") { + h_plasmalens.AddElement(pp_element, z_location); + } + else if (element_type == "lattice") { + ReadLattice(element_name, z_location); + } + else { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(false, + "ERROR: Unknown accelerator lattice element type " + element_type); + } + } +} + +void +AcceleratorLattice::InitElementFinder (int const lev, amrex::BoxArray const & ba, amrex::DistributionMapping const & dm) +{ + if (m_lattice_defined) { + m_element_finder = std::make_unique>(ba, dm); + for (amrex::MFIter mfi(*m_element_finder); mfi.isValid(); ++mfi) + { + (*m_element_finder)[mfi].InitElementFinder(lev, mfi, *this); + } + } +} + +void +AcceleratorLattice::UpdateElementFinder (int const lev) +{ + if (m_lattice_defined) { + for (amrex::MFIter mfi(*m_element_finder); mfi.isValid(); ++mfi) + { + (*m_element_finder)[mfi].UpdateIndices(lev, mfi, *this); + } + } +} + +LatticeElementFinderDevice +AcceleratorLattice::GetFinderDeviceInstance (WarpXParIter const& a_pti, int const a_offset) const +{ + LatticeElementFinder & finder = (*m_element_finder)[a_pti]; + return finder.GetFinderDeviceInstance(a_pti, a_offset, *this); +} diff --git a/Source/AcceleratorLattice/CMakeLists.txt b/Source/AcceleratorLattice/CMakeLists.txt new file mode 100644 index 00000000000..a72723beaba --- /dev/null +++ b/Source/AcceleratorLattice/CMakeLists.txt @@ -0,0 +1,7 @@ +target_sources(WarpX + PRIVATE + AcceleratorLattice.cpp + LatticeElementFinder.cpp +) + +add_subdirectory(LatticeElements) diff --git a/Source/AcceleratorLattice/LatticeElementFinder.H b/Source/AcceleratorLattice/LatticeElementFinder.H new file mode 100644 index 00000000000..e527741bc12 --- /dev/null +++ b/Source/AcceleratorLattice/LatticeElementFinder.H @@ -0,0 +1,283 @@ +/* Copyright 2022 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_LATTICEELEMENTFINDER_H_ +#define WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_LATTICEELEMENTFINDER_H_ + +#include "LatticeElements/HardEdgedQuadrupole.H" +#include "LatticeElements/HardEdgedPlasmaLens.H" +#include "Particles/Pusher/GetAndSetPosition.H" +#include "Particles/WarpXParticleContainer.H" + +#include +#include + +class AcceleratorLattice; +struct LatticeElementFinderDevice; + +// Instances of the LatticeElementFinder class are saved in the AcceleratorLattice class +// as the objects in a LayoutData. +// The LatticeElementFinder handles the lookup needed to find the lattice elements at +// particle locations. + +struct LatticeElementFinder +{ + + /** + * \brief Initialize the element finder at the level and grid + * + * @param[in] lev the refinement level + * @param[in] a_mfi specifies the grid where the finder is defined + * @param[in] accelerator_lattice a reference to the accelerator lattice at the refinement level + */ + void InitElementFinder (int const lev, amrex::MFIter const& a_mfi, + AcceleratorLattice const& accelerator_lattice); + + /** + * \brief Allocate the index lookup tables for each element type + * + * @param[in] accelerator_lattice a reference to the accelerator lattice at the refinement level + */ + void AllocateIndices (AcceleratorLattice const& accelerator_lattice); + + /** + * \brief Update the index lookup tables for each element type, filling in the values + * + * @param[in] lev the refinement level + * @param[in] a_mfi specifies the grid where the finder is defined + * @param[in] accelerator_lattice a reference to the accelerator lattice at the refinement level + */ + void UpdateIndices (int const lev, amrex::MFIter const& a_mfi, + AcceleratorLattice const& accelerator_lattice); + + /* Define the location and size of the index lookup table */ + /* Use the type Real to be consistent with the way the main grid is defined */ + int m_nz; + amrex::Real m_zmin; + amrex::Real m_dz; + + /* Parameters needed for the Lorentz transforms into and out of the boosted frame */ + /* The time for m_time is consistent with the main time variable */ + amrex::ParticleReal m_gamma_boost; + amrex::ParticleReal m_uz_boost; + amrex::Real m_time; + + /** + * \brief Get the device level instance associated with this instance + * + * @param[in] a_pti specifies the grid where the finder is defined + * @param[in] a_offset particle index offset needed to access particle info + * @param[in] accelerator_lattice a reference to the accelerator lattice at the refinement level + */ + LatticeElementFinderDevice GetFinderDeviceInstance (WarpXParIter const& a_pti, int const a_offset, + AcceleratorLattice const& accelerator_lattice); + + /* The index lookup tables for each lattice element type */ + amrex::Gpu::DeviceVector d_quad_indices; + amrex::Gpu::DeviceVector d_plasmalens_indices; + + /** + * \brief Fill in the index lookup tables + * This loops over the grid (in z) and finds the lattice element closest to each grid point + * + * @param[in] zs list of the starts of the lattice elements + * @param[in] ze list of the ends of the lattice elements + * @param[in] indices the index lookup table to be filled in + */ + void setup_lattice_indices (amrex::Gpu::DeviceVector const & zs, + amrex::Gpu::DeviceVector const & ze, + amrex::Gpu::DeviceVector & indices) + { + + using namespace amrex::literals; + + int nelements = static_cast(zs.size()); + amrex::ParticleReal const * zs_arr = zs.data(); + amrex::ParticleReal const * ze_arr = ze.data(); + int * indices_arr = indices.data(); + + amrex::Real const zmin = m_zmin; + amrex::Real const dz = m_dz; + + amrex::ParticleReal const gamma_boost = m_gamma_boost; + amrex::ParticleReal const uz_boost = m_uz_boost; + amrex::Real const time = m_time; + + amrex::ParallelFor( m_nz, + [=] AMREX_GPU_DEVICE (int iz) { + + // Get the location of the grid node + amrex::Real z_node = zmin + iz*dz; + + if (gamma_boost > 1._prt) { + // Transform to lab frame + z_node = gamma_boost*z_node + uz_boost*time; + } + + // Find the index to the element that is closest to the grid cell. + // For now, this assumes that there is no overlap among elements of the same type. + for (int ie = 0 ; ie < nelements ; ie++) { + // Find the mid points between element ie and the ones before and after it. + // The first and last element need special handling. + amrex::ParticleReal zcenter_left, zcenter_right; + if (ie == 0) { + zcenter_left = std::numeric_limits::lowest(); + } else { + zcenter_left = 0.5_prt*(ze_arr[ie-1] + zs_arr[ie]); + } + if (ie < nelements - 1) { + zcenter_right = 0.5_prt*(ze_arr[ie] + zs_arr[ie+1]); + } else { + zcenter_right = std::numeric_limits::max(); + } + + if (zcenter_left <= z_node && z_node < zcenter_right) { + indices_arr[iz] = ie; + } + + } + } + ); + } + +}; + +/** + * \brief The lattice element finder class that can be trivially copied to the device. + * This only has simple data and pointers. + */ +struct LatticeElementFinderDevice +{ + + /** + * \brief Initialize the data needed to do the lookups + * + * @param[in] a_pti specifies the grid where the finder is defined + * @param[in] a_offset particle index offset needed to access particle info + * @param[in] accelerator_lattice a reference to the accelerator lattice at the refinement level + * @param[in] The host level instance of the element finder that this is associated with + */ + void + InitLatticeElementFinderDevice (WarpXParIter const& a_pti, int const a_offset, + AcceleratorLattice const& accelerator_lattice, + LatticeElementFinder const & h_finder); + + /* Size and location of the index lookup table */ + amrex::Real m_zmin; + amrex::Real m_dz; + amrex::Real m_dt; + + /* Parameters needed for the Lorentz transforms into and out of the boosted frame */ + amrex::ParticleReal m_gamma_boost; + amrex::ParticleReal m_uz_boost; + amrex::Real m_time; + + GetParticlePosition m_get_position; + const amrex::ParticleReal* AMREX_RESTRICT m_ux = nullptr; + const amrex::ParticleReal* AMREX_RESTRICT m_uy = nullptr; + const amrex::ParticleReal* AMREX_RESTRICT m_uz = nullptr; + + /* Device level instances for each lattice element type */ + HardEdgedQuadrupoleDevice d_quad; + HardEdgedPlasmaLensDevice d_plasmalens; + + /* Device level index lookup tables for each element type */ + int const* d_quad_indices_arr = nullptr; + int const* d_plasmalens_indices_arr = nullptr; + + /** + * \brief Gather the field for the particle from the lattice elements + * + * @param[in] i the particle index + * @param[out] field_Ex, ..., the gathered E and B fields + */ + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + void operator () (const long i, + amrex::ParticleReal& field_Ex, + amrex::ParticleReal& field_Ey, + amrex::ParticleReal& field_Ez, + amrex::ParticleReal& field_Bx, + amrex::ParticleReal& field_By, + amrex::ParticleReal& field_Bz) const noexcept + { + + using namespace amrex::literals; + + amrex::ParticleReal x, y, z; + m_get_position(i, x, y, z); + + // Find location of partice in the indices grid + // (which is in the boosted frame) + const int iz = static_cast((z - m_zmin)/m_dz); + + constexpr amrex::ParticleReal inv_c2 = 1._prt/(PhysConst::c*PhysConst::c); + amrex::ParticleReal const gamma = std::sqrt(1._prt + (m_ux[i]*m_ux[i] + m_uy[i]*m_uy[i] + m_uz[i]*m_uz[i])*inv_c2); + amrex::ParticleReal const vzp = m_uz[i]/gamma; + + amrex::ParticleReal zpvdt = z + vzp*m_dt; + + // The position passed to the get_field methods needs to be in the lab frame. + if (m_gamma_boost > 1._prt) { + z = m_gamma_boost*z + m_uz_boost*m_time; + zpvdt = m_gamma_boost*zpvdt + m_uz_boost*(m_time + m_dt); + } + + amrex::ParticleReal Ex_sum = 0._prt; + amrex::ParticleReal Ey_sum = 0._prt; + amrex::ParticleReal Ez_sum = 0._prt; + amrex::ParticleReal Bx_sum = 0._prt; + amrex::ParticleReal By_sum = 0._prt; + amrex::ParticleReal Bz_sum = 0._prt; + + if (d_quad.nelements > 0) { + if (d_quad_indices_arr[iz] > -1) { + int ielement = d_quad_indices_arr[iz]; + amrex::ParticleReal Ex, Ey, Bx, By; + d_quad.get_field(ielement, x, y, z, zpvdt, Ex, Ey, Bx, By); + Ex_sum += Ex; + Ey_sum += Ey; + Bx_sum += Bx; + By_sum += By; + } + } + + if (d_plasmalens.nelements > 0) { + if (d_plasmalens_indices_arr[iz] > -1) { + int ielement = d_plasmalens_indices_arr[iz]; + amrex::ParticleReal Ex, Ey, Bx, By; + d_plasmalens.get_field(ielement, x, y, z, zpvdt, Ex, Ey, Bx, By); + Ex_sum += Ex; + Ey_sum += Ey; + Bx_sum += Bx; + By_sum += By; + } + } + + if (m_gamma_boost > 1._prt) { + // The fields returned from get_field is in the lab frame + // Transform the fields to the boosted frame + const amrex::ParticleReal Ex_boost = m_gamma_boost*Ex_sum - m_uz_boost*By_sum; + const amrex::ParticleReal Ey_boost = m_gamma_boost*Ey_sum + m_uz_boost*Bx_sum; + const amrex::ParticleReal Bx_boost = m_gamma_boost*Bx_sum + m_uz_boost*Ey_sum*inv_c2; + const amrex::ParticleReal By_boost = m_gamma_boost*By_sum - m_uz_boost*Ex_sum*inv_c2; + Ex_sum = Ex_boost; + Ey_sum = Ey_boost; + Bx_sum = Bx_boost; + By_sum = By_boost; + } + + field_Ex += Ex_sum; + field_Ey += Ey_sum; + field_Ez += Ez_sum; + field_Bx += Bx_sum; + field_By += By_sum; + field_Bz += Bz_sum; + + } + +}; + +#endif // WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_LATTICEELEMENTFINDER_H_ diff --git a/Source/AcceleratorLattice/LatticeElementFinder.cpp b/Source/AcceleratorLattice/LatticeElementFinder.cpp new file mode 100644 index 00000000000..14ccb8f7121 --- /dev/null +++ b/Source/AcceleratorLattice/LatticeElementFinder.cpp @@ -0,0 +1,123 @@ +/* Copyright 2022 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#include "WarpX.H" +#include "LatticeElementFinder.H" +#include "LatticeElements/HardEdgedQuadrupole.H" +#include "LatticeElements/HardEdgedPlasmaLens.H" + +#include +#include + +using namespace amrex::literals; + +void +LatticeElementFinder::InitElementFinder (int const lev, amrex::MFIter const& a_mfi, + AcceleratorLattice const& accelerator_lattice) +{ + + // The lattice is assumed to extend in the z-direction + // Get the number of nodes where indices will be setup + amrex::Box box = a_mfi.tilebox(); + m_nz = box.size()[WARPX_ZINDEX]; + + m_dz = WarpX::CellSize(lev)[2]; + + m_gamma_boost = WarpX::gamma_boost; + m_uz_boost = std::sqrt(WarpX::gamma_boost*WarpX::gamma_boost - 1._prt)*PhysConst::c; + + AllocateIndices(accelerator_lattice); + + UpdateIndices(lev, a_mfi, accelerator_lattice); + +} + +void +LatticeElementFinder::AllocateIndices (AcceleratorLattice const& accelerator_lattice) +{ + // Allocate the space for the indices for each element type. + // Note that this uses m_nz since the information is saved per node. + + if (accelerator_lattice.h_quad.nelements > 0) { + d_quad_indices.resize(m_nz); + } + + if (accelerator_lattice.h_plasmalens.nelements > 0) { + d_plasmalens_indices.resize(m_nz); + } +} + +void +LatticeElementFinder::UpdateIndices (int const lev, amrex::MFIter const& a_mfi, + AcceleratorLattice const& accelerator_lattice) +{ + auto& warpx = WarpX::GetInstance(); + + // Update the location of the index grid. + // Note that the current box is used since the box may have been updated since + // the initialization in InitElementFinder. + amrex::Box box = a_mfi.tilebox(); + m_zmin = WarpX::LowerCorner(box, lev, 0._rt)[2]; + m_time = warpx.gett_new(lev); + + if (accelerator_lattice.h_quad.nelements > 0) { + setup_lattice_indices(accelerator_lattice.h_quad.d_zs, + accelerator_lattice.h_quad.d_ze, + d_quad_indices); + } + + if (accelerator_lattice.h_plasmalens.nelements > 0) { + setup_lattice_indices(accelerator_lattice.h_plasmalens.d_zs, + accelerator_lattice.h_plasmalens.d_ze, + d_plasmalens_indices); + } +} + +LatticeElementFinderDevice +LatticeElementFinder::GetFinderDeviceInstance (WarpXParIter const& a_pti, int const a_offset, + AcceleratorLattice const& accelerator_lattice) +{ + LatticeElementFinderDevice result; + result.InitLatticeElementFinderDevice(a_pti, a_offset, accelerator_lattice, *this); + return result; +} + + +void +LatticeElementFinderDevice::InitLatticeElementFinderDevice (WarpXParIter const& a_pti, int const a_offset, + AcceleratorLattice const& accelerator_lattice, + LatticeElementFinder const & h_finder) +{ + + auto& warpx = WarpX::GetInstance(); + + int const lev = a_pti.GetLevel(); + + m_get_position = GetParticlePosition(a_pti, a_offset); + auto& attribs = a_pti.GetAttribs(); + m_ux = attribs[PIdx::ux].dataPtr() + a_offset; + m_uy = attribs[PIdx::uy].dataPtr() + a_offset; + m_uz = attribs[PIdx::uz].dataPtr() + a_offset; + m_dt = warpx.getdt(lev); + + m_gamma_boost = WarpX::gamma_boost; + m_uz_boost = std::sqrt(WarpX::gamma_boost*WarpX::gamma_boost - 1._prt)*PhysConst::c; + + m_zmin = h_finder.m_zmin; + m_dz = h_finder.m_dz; + m_time = h_finder.m_time; + + if (accelerator_lattice.h_quad.nelements > 0) { + d_quad = accelerator_lattice.h_quad.GetDeviceInstance(); + d_quad_indices_arr = h_finder.d_quad_indices.data(); + } + + if (accelerator_lattice.h_plasmalens.nelements > 0) { + d_plasmalens = accelerator_lattice.h_plasmalens.GetDeviceInstance(); + d_plasmalens_indices_arr = h_finder.d_plasmalens_indices.data(); + } + +} diff --git a/Source/AcceleratorLattice/LatticeElements/CMakeLists.txt b/Source/AcceleratorLattice/LatticeElements/CMakeLists.txt new file mode 100644 index 00000000000..f71865187b8 --- /dev/null +++ b/Source/AcceleratorLattice/LatticeElements/CMakeLists.txt @@ -0,0 +1,7 @@ +target_sources(WarpX + PRIVATE + LatticeElementBase.cpp + Drift.cpp + HardEdgedQuadrupole.cpp + HardEdgedPlasmaLens.cpp +) diff --git a/Source/AcceleratorLattice/LatticeElements/Drift.H b/Source/AcceleratorLattice/LatticeElements/Drift.H new file mode 100644 index 00000000000..4342586f8d0 --- /dev/null +++ b/Source/AcceleratorLattice/LatticeElements/Drift.H @@ -0,0 +1,33 @@ +/* Copyright 2022 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_DRIFT_H_ +#define WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_DRIFT_H_ + +#include "LatticeElementBase.H" + +// Specifies a drift + +struct Drift + : LatticeElementBase +{ + + Drift (); + + ~Drift () = default; + + /** + * \brief Read in an element and add it to the lists + * + * @param[in] pp_element The ParmParse instance to read in the data + * @param[in/out] z_location The current z location in the lattice + */ + void + AddElement (amrex::ParmParse & pp_element, amrex::ParticleReal & z_location); + +}; + +#endif // WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_DRIFT_H_ diff --git a/Source/AcceleratorLattice/LatticeElements/Drift.cpp b/Source/AcceleratorLattice/LatticeElements/Drift.cpp new file mode 100644 index 00000000000..7afb9330e8d --- /dev/null +++ b/Source/AcceleratorLattice/LatticeElements/Drift.cpp @@ -0,0 +1,22 @@ +/* Copyright 2022 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#include "Drift.H" + +#include + +#include + +Drift::Drift () + : LatticeElementBase("drift") +{ +} + +void +Drift::AddElement (amrex::ParmParse & pp_element, amrex::ParticleReal & z_location) +{ + AddElementBase(pp_element, z_location); +} diff --git a/Source/AcceleratorLattice/LatticeElements/HardEdgedPlasmaLens.H b/Source/AcceleratorLattice/LatticeElements/HardEdgedPlasmaLens.H new file mode 100644 index 00000000000..1cbf3341262 --- /dev/null +++ b/Source/AcceleratorLattice/LatticeElements/HardEdgedPlasmaLens.H @@ -0,0 +1,120 @@ +/* Copyright 2022 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_HARDEDGEDPLASMALENS_H_ +#define WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_HARDEDGEDPLASMALENS_H_ + +#include "LatticeElementBase.H" +#include "HardEdged_K.H" + +#include +#include +#include +#include + +#include + +// Specifies a field that models the field generated by a plasma lens, +// an axially uniform radius dependent radial force + +struct HardEdgedPlasmaLensDevice; + +struct HardEdgedPlasmaLens + : LatticeElementBase +{ + + HardEdgedPlasmaLens (); + + ~HardEdgedPlasmaLens () = default; + + /** + * \brief Read in an element and add it to the lists + * + * @param[in] pp_element The ParmParse instance to read in the data + * @param[in/out] z_location The current z location in the lattice + */ + void + AddElement (amrex::ParmParse & pp_element, amrex::ParticleReal & z_location); + + /** + * \brief Write the element information to the device + */ + void + WriteToDevice (); + + /* The field gradients */ + /* On the host */ + std::vector h_dEdx; + std::vector h_dBdx; + /* On the device */ + amrex::Gpu::DeviceVector d_dEdx; + amrex::Gpu::DeviceVector d_dBdx; + + /** + * \brief Returns the device level instance with the lattice information + */ + HardEdgedPlasmaLensDevice GetDeviceInstance () const; + + +}; + +// Instance that is trivially copyable to the device. + +struct HardEdgedPlasmaLensDevice +{ + + /** + * \brief Initializes the data and pointer needed to reference the lattice element info + * + * @param[in] h_plasmalens host level instance that this is associated with + */ + void InitHardEdgedPlasmaLensDevice (HardEdgedPlasmaLens const& h_plasmalens); + + int nelements = 0; + + const amrex::ParticleReal* AMREX_RESTRICT d_zs_arr; + const amrex::ParticleReal* AMREX_RESTRICT d_ze_arr; + + const amrex::ParticleReal* AMREX_RESTRICT d_dEdx_arr; + const amrex::ParticleReal* AMREX_RESTRICT d_dBdx_arr; + + /** + * \brief Fetch the field of the specified element at the given location + * + * @param[in] ielement the element number + * @param[in] x, y, z the particle position in the lab frame + * @param[in] zpvdt the estimated next particle z position, z + v*dt + * @param[out] Ex, Ey, Bx, By the fetched field in the lab frame + */ + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + void get_field (const int ielement, + const amrex::ParticleReal x, + const amrex::ParticleReal y, + const amrex::ParticleReal z, + const amrex::ParticleReal zpvdt, + amrex::ParticleReal& Ex, + amrex::ParticleReal& Ey, + amrex::ParticleReal& Bx, + amrex::ParticleReal& By) const + { + + // Calculate the residence correction, the fraction of the time step that the particle + // spends inside the hard edged element. + amrex::ParticleReal const frac = hard_edged_fraction(z, zpvdt, d_zs_arr[ielement], d_ze_arr[ielement]); + + amrex::ParticleReal dEdx = frac*d_dEdx_arr[ielement]; + amrex::ParticleReal dBdx = frac*d_dBdx_arr[ielement]; + + Ex = +x*dEdx; + Ey = +y*dEdx; + Bx = +y*dBdx; + By = -x*dBdx; + + } + +}; + +#endif // WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_HARDEDGEDPLASMALENS_H_ diff --git a/Source/AcceleratorLattice/LatticeElements/HardEdgedPlasmaLens.cpp b/Source/AcceleratorLattice/LatticeElements/HardEdgedPlasmaLens.cpp new file mode 100644 index 00000000000..6c87389f159 --- /dev/null +++ b/Source/AcceleratorLattice/LatticeElements/HardEdgedPlasmaLens.cpp @@ -0,0 +1,70 @@ +/* Copyright 2022 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#include "HardEdgedPlasmaLens.H" +#include "Utils/WarpXUtil.H" +#include "Utils/TextMsg.H" + +#include +#include + +#include + +HardEdgedPlasmaLens::HardEdgedPlasmaLens () + : LatticeElementBase("plasmalens") +{ +} + +void +HardEdgedPlasmaLens::AddElement (amrex::ParmParse & pp_element, amrex::ParticleReal & z_location) +{ + using namespace amrex::literals; + + AddElementBase(pp_element, z_location); + + amrex::ParticleReal dEdx = 0._prt; + amrex::ParticleReal dBdx = 0._prt; + pp_element.query("dEdx", dEdx); + pp_element.query("dBdx", dBdx); + + h_dEdx.push_back(dEdx); + h_dBdx.push_back(dBdx); +} + +void +HardEdgedPlasmaLens::WriteToDevice () +{ + WriteToDeviceBase(); + + d_dEdx.resize(h_dEdx.size()); + amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, h_dEdx.begin(), h_dEdx.end(), d_dEdx.begin()); + d_dBdx.resize(h_dBdx.size()); + amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, h_dBdx.begin(), h_dBdx.end(), d_dBdx.begin()); +} + +HardEdgedPlasmaLensDevice +HardEdgedPlasmaLens::GetDeviceInstance () const +{ + HardEdgedPlasmaLensDevice result; + result.InitHardEdgedPlasmaLensDevice(*this); + return result; +} + +void +HardEdgedPlasmaLensDevice::InitHardEdgedPlasmaLensDevice (HardEdgedPlasmaLens const& h_plasmalens) +{ + + nelements = h_plasmalens.nelements; + + if (nelements == 0) return; + + d_zs_arr = h_plasmalens.d_zs.data(); + d_ze_arr = h_plasmalens.d_ze.data(); + + d_dEdx_arr = h_plasmalens.d_dEdx.data(); + d_dBdx_arr = h_plasmalens.d_dBdx.data(); + +} diff --git a/Source/AcceleratorLattice/LatticeElements/HardEdgedQuadrupole.H b/Source/AcceleratorLattice/LatticeElements/HardEdgedQuadrupole.H new file mode 100644 index 00000000000..8ca4e29a24a --- /dev/null +++ b/Source/AcceleratorLattice/LatticeElements/HardEdgedQuadrupole.H @@ -0,0 +1,120 @@ +/* Copyright 2022 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_HARDEDGEDQUADRUPOLE_H_ +#define WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_HARDEDGEDQUADRUPOLE_H_ + +#include "LatticeElementBase.H" +#include "HardEdged_K.H" + +#include +#include +#include +#include + +#include + +// Specifies a field that models the field generated by a quadrupole lens, +// an axially uniform quadrupole field + +struct HardEdgedQuadrupoleDevice; + +struct HardEdgedQuadrupole + : LatticeElementBase +{ + + HardEdgedQuadrupole (); + + ~HardEdgedQuadrupole () = default; + + /** + * \brief Read in an element and add it to the lists + * + * @param[in] pp_element The ParmParse instance to read in the data + * @param[in/out] z_location The current z location in the lattice + */ + void + AddElement (amrex::ParmParse & pp_element, amrex::ParticleReal & z_location); + + /** + * \brief Write the element information to the device + */ + void + WriteToDevice (); + + /* The field gradients */ + /* On the host */ + std::vector h_dEdx; + std::vector h_dBdx; + /* On the device */ + amrex::Gpu::DeviceVector d_dEdx; + amrex::Gpu::DeviceVector d_dBdx; + + /** + * \brief Returns the device level instance with the lattice information + */ + HardEdgedQuadrupoleDevice GetDeviceInstance () const; + + +}; + +// Instance that is trivially copyable to the device. + +struct HardEdgedQuadrupoleDevice +{ + + /** + * \brief Initializes the data and pointer needed to reference the lattice element info + * + * @param[in] h_quad host level instance that this is associated with + */ + void InitHardEdgedQuadrupoleDevice (HardEdgedQuadrupole const& h_quad); + + int nelements = 0; + + const amrex::ParticleReal* AMREX_RESTRICT d_zs_arr; + const amrex::ParticleReal* AMREX_RESTRICT d_ze_arr; + + const amrex::ParticleReal* AMREX_RESTRICT d_dEdx_arr; + const amrex::ParticleReal* AMREX_RESTRICT d_dBdx_arr; + + /** + * \brief Fetch the field of the specified element at the given location + * + * @param[in] ielement the element number + * @param[in] x, y, z the particle position in the lab frame + * @param[in] zpvdt the estimated next particle z position, z + v*dt + * @param[out] Ex, Ey, Bx, By the fetched field in the lab frame + */ + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + void get_field (const int ielement, + const amrex::ParticleReal x, + const amrex::ParticleReal y, + const amrex::ParticleReal z, + const amrex::ParticleReal zpvdt, + amrex::ParticleReal& Ex, + amrex::ParticleReal& Ey, + amrex::ParticleReal& Bx, + amrex::ParticleReal& By) const + { + + // Calculate the residence correction, the fraction of the time step that the particle + // spends inside the hard edged element. + amrex::ParticleReal const frac = hard_edged_fraction(z, zpvdt, d_zs_arr[ielement], d_ze_arr[ielement]); + + amrex::ParticleReal dEdx = frac*d_dEdx_arr[ielement]; + amrex::ParticleReal dBdx = frac*d_dBdx_arr[ielement]; + + Ex = +x*dEdx; + Ey = -y*dEdx; + Bx = +y*dBdx; + By = +x*dBdx; + + } + +}; + +#endif // WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_HARDEDGEDQUADRUPOLE_H_ diff --git a/Source/AcceleratorLattice/LatticeElements/HardEdgedQuadrupole.cpp b/Source/AcceleratorLattice/LatticeElements/HardEdgedQuadrupole.cpp new file mode 100644 index 00000000000..4cff9139521 --- /dev/null +++ b/Source/AcceleratorLattice/LatticeElements/HardEdgedQuadrupole.cpp @@ -0,0 +1,70 @@ +/* Copyright 2022 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#include "HardEdgedQuadrupole.H" +#include "Utils/WarpXUtil.H" +#include "Utils/TextMsg.H" + +#include +#include + +#include + +HardEdgedQuadrupole::HardEdgedQuadrupole () + : LatticeElementBase("quad") +{ +} + +void +HardEdgedQuadrupole::AddElement (amrex::ParmParse & pp_element, amrex::ParticleReal & z_location) +{ + using namespace amrex::literals; + + AddElementBase(pp_element, z_location); + + amrex::ParticleReal dEdx = 0._prt; + amrex::ParticleReal dBdx = 0._prt; + pp_element.query("dEdx", dEdx); + pp_element.query("dBdx", dBdx); + + h_dEdx.push_back(dEdx); + h_dBdx.push_back(dBdx); +} + +void +HardEdgedQuadrupole::WriteToDevice () +{ + WriteToDeviceBase(); + + d_dEdx.resize(h_dEdx.size()); + amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, h_dEdx.begin(), h_dEdx.end(), d_dEdx.begin()); + d_dBdx.resize(h_dBdx.size()); + amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, h_dBdx.begin(), h_dBdx.end(), d_dBdx.begin()); +} + +HardEdgedQuadrupoleDevice +HardEdgedQuadrupole::GetDeviceInstance () const +{ + HardEdgedQuadrupoleDevice result; + result.InitHardEdgedQuadrupoleDevice(*this); + return result; +} + +void +HardEdgedQuadrupoleDevice::InitHardEdgedQuadrupoleDevice (HardEdgedQuadrupole const& h_quad) +{ + + nelements = h_quad.nelements; + + if (nelements == 0) return; + + d_zs_arr = h_quad.d_zs.data(); + d_ze_arr = h_quad.d_ze.data(); + + d_dEdx_arr = h_quad.d_dEdx.data(); + d_dBdx_arr = h_quad.d_dBdx.data(); + +} diff --git a/Source/AcceleratorLattice/LatticeElements/HardEdged_K.H b/Source/AcceleratorLattice/LatticeElements/HardEdged_K.H new file mode 100644 index 00000000000..6311d60676d --- /dev/null +++ b/Source/AcceleratorLattice/LatticeElements/HardEdged_K.H @@ -0,0 +1,50 @@ +/* Copyright 2022 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_HARDEDGED_K_H_ +#define WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_HARDEDGED_K_H_ + +#include "Utils/WarpXConst.H" + +#include +#include + +/** + * \brief Calculate the residence correction, the fraction of the time step the particle + * spends inside of the hard edge lattice element + * + * @param[in] z the current location of the particle + * @param[in] zpvdt the estimated future location of the particle, z + v*dt + * @param[in] zs the start of the lattice element + * @param[in] ze the end of the lattice element + * @param[out] the fraction is returned + */ +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +amrex::ParticleReal hard_edged_fraction(const amrex::ParticleReal z, + const amrex::ParticleReal zpvdt, + const amrex::ParticleReal zs, + const amrex::ParticleReal ze) +{ + + using namespace amrex::literals; + + // This allows vz to be positive or negative + amrex::ParticleReal const zl = std::min(z, zpvdt); + amrex::ParticleReal const zr = std::max(z, zpvdt); + + // Calculate the residence correction + // frac will be 1 if the step is completely inside the lens, between 0 and 1 + // when entering or leaving the lens, and otherwise 0. + // This accounts for the case when particles step over the element without landing in it. + // This assumes that vzp != 0. + amrex::ParticleReal const zl_bounded = std::min(std::max(zl, zs), ze); + amrex::ParticleReal const zr_bounded = std::min(std::max(zr, zs), ze); + amrex::ParticleReal frac = (zr_bounded - zl_bounded)/(zr - zl); + + return frac; +} + +#endif // WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_HARDEDGED_K_H_ diff --git a/Source/AcceleratorLattice/LatticeElements/LatticeElementBase.H b/Source/AcceleratorLattice/LatticeElements/LatticeElementBase.H new file mode 100644 index 00000000000..ff62ae23621 --- /dev/null +++ b/Source/AcceleratorLattice/LatticeElements/LatticeElementBase.H @@ -0,0 +1,59 @@ +/* Copyright 2022 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_LATTICEELEMENTBASE_H_ +#define WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_LATTICEELEMENTBASE_H_ + +#include +#include +#include + +#include +#include + +struct LatticeElementBase +{ + + /** + * \brief LatticeElementBase constructor + * This gets the input z extent of the element and check for its correctness + * + * @param[in] element_name the name of the element type + */ + LatticeElementBase (std::string const& element_name); + + ~LatticeElementBase () = default; + + /** + * \brief Read in an element base data and add it to the lists + * + * @param[in] pp_element The ParmParse instance to read in the data + * @param[in/out] z_location The current z location in the lattice + */ + void + AddElementBase(amrex::ParmParse & pp_element, amrex::ParticleReal & z_location); + + /** + * \brief Write the base element information to the device + */ + void + WriteToDeviceBase (); + + std::string m_element_name; + + int nelements = 0; + + // The host level copy of the data. + std::vector h_zs; + std::vector h_ze; + + // The device level copy of the data. + amrex::Gpu::DeviceVector d_zs; + amrex::Gpu::DeviceVector d_ze; + +}; + +#endif // WARPX_ACCELERATORLATTICE_LATTICEELEMENTS_LATTICEELEMENTBASE_H_ diff --git a/Source/AcceleratorLattice/LatticeElements/LatticeElementBase.cpp b/Source/AcceleratorLattice/LatticeElements/LatticeElementBase.cpp new file mode 100644 index 00000000000..07f160f1730 --- /dev/null +++ b/Source/AcceleratorLattice/LatticeElements/LatticeElementBase.cpp @@ -0,0 +1,42 @@ +/* Copyright 2022 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#include "LatticeElementBase.H" +#include "Utils/WarpXUtil.H" +#include "Utils/TextMsg.H" + +#include +#include + +#include + +LatticeElementBase::LatticeElementBase (std::string const& element_name) +{ + m_element_name = element_name; +} + +void +LatticeElementBase::AddElementBase (amrex::ParmParse & pp_element, amrex::ParticleReal & z_location) +{ + // Read in the length of the element and save the start and end, and update z_location + amrex::ParticleReal ds; + pp_element.get("ds", ds); + + h_zs.push_back(z_location); + z_location += ds; + h_ze.push_back(z_location); + + nelements += 1; +} + +void +LatticeElementBase::WriteToDeviceBase () +{ + d_zs.resize(h_zs.size()); + amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, h_zs.begin(), h_zs.end(), d_zs.begin()); + d_ze.resize(h_ze.size()); + amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, h_ze.begin(), h_ze.end(), d_ze.begin()); +} diff --git a/Source/AcceleratorLattice/LatticeElements/Make.package b/Source/AcceleratorLattice/LatticeElements/Make.package new file mode 100644 index 00000000000..2d55e10d68b --- /dev/null +++ b/Source/AcceleratorLattice/LatticeElements/Make.package @@ -0,0 +1,6 @@ +CEXE_sources += LatticeElementBase.cpp +CEXE_sources += Drift.cpp +CEXE_sources += HardEdgedQuadrupole.cpp +CEXE_sources += HardEdgedPlasmaLens.cpp + +VPATH_LOCATIONS += $(WARPX_HOME)/Source/AcceleratorLattice/LatticeElements diff --git a/Source/AcceleratorLattice/Make.package b/Source/AcceleratorLattice/Make.package new file mode 100644 index 00000000000..f25f460c2ff --- /dev/null +++ b/Source/AcceleratorLattice/Make.package @@ -0,0 +1,6 @@ +CEXE_sources += AcceleratorLattice.cpp +CEXE_sources += LatticeElementFinder.cpp + +include $(WARPX_HOME)/Source/AcceleratorLattice/LatticeElements/Make.package + +VPATH_LOCATIONS += $(WARPX_HOME)/Source/AcceleratorLattice diff --git a/Source/AcceleratorLattice/README.rst b/Source/AcceleratorLattice/README.rst new file mode 100644 index 00000000000..3fdbed6fa68 --- /dev/null +++ b/Source/AcceleratorLattice/README.rst @@ -0,0 +1,43 @@ +.. _accelerator_lattice: + +Accelerator lattice +=================== + +The files in this directory handle the accelerator lattice. These are fields of various types and configurations. +The lattice is laid out along the z-axis. + +The AcceleratorLattice has the instances of the accelerator element types and handles the input of the data. + +The LatticeElementFinder manages the application of the fields to the particles. It maintains index lookup tables +that allow rapidly determining which elements the particles are in. + +The classes for each element type are in the subdirectory LatticeElements. + +Host and device classes +----------------------- + +The LatticeElementFinder and each of the element types have two classes, one +that lives on the host and one that can be trivially copied to the device. +This dual structure is needed because of the complex data structures +describing both the accelerator elements and the index lookup tables. The +host level classes manage the data structures, reading in and setting up the +data. The host classes copy the data to the device and maintain the pointers +to that data on the device. The device level classes grab pointers to the +appropriate data (on the device) needed when fetching the data for the particles. + +External fields +--------------- + +The lattice fields are applied to the particles from the GetExternalEBField +class. If a lattice is defined, the GetExternalEBField class gets the lattice +element finder device level instance associated with the grid being operated +on. The fields are applied from that instance, which calls the "get_field" +method for each lattice element type that is defined for each particle. + +Adding new element types +------------------------ + +A number of places need to be touched when adding a new element types. The +best method is to look for every place where the "quad" element is referenced +and duplicate the code for the new element type. Changes will only be needed +within the AcceleratorLattice directory. diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index ed70cec31a4..bbba9ec5d91 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -257,6 +257,14 @@ WarpX::Evolve (int numsteps) // We might need to move j because we are going to make a plotfile. int num_moved = MoveWindow(step+1, move_j); + // Update the accelerator lattice element finder if the window has moved, + // from either a moving window or a boosted frame + if (num_moved != 0 || gamma_boost > 1) { + for (int lev = 0; lev <= finest_level; ++lev) { + m_accelerator_lattice[lev]->UpdateElementFinder(lev); + } + } + mypc->ContinuousFluxInjection(cur_time, dt[0]); mypc->ApplyBoundaryConditions(); diff --git a/Source/Make.WarpX b/Source/Make.WarpX index 7a97df3236e..3f307d2ecaa 100644 --- a/Source/Make.WarpX +++ b/Source/Make.WarpX @@ -71,6 +71,7 @@ endif -include Make.package include $(WARPX_HOME)/Source/Make.package include $(WARPX_HOME)/Source/ablastr/Make.package +include $(WARPX_HOME)/Source/AcceleratorLattice/Make.package include $(WARPX_HOME)/Source/BoundaryConditions/Make.package include $(WARPX_HOME)/Source/Diagnostics/Make.package include $(WARPX_HOME)/Source/EmbeddedBoundary/Make.package diff --git a/Source/Parallelization/WarpXRegrid.cpp b/Source/Parallelization/WarpXRegrid.cpp index db91f2d7878..3c90a07cb4c 100644 --- a/Source/Parallelization/WarpXRegrid.cpp +++ b/Source/Parallelization/WarpXRegrid.cpp @@ -331,6 +331,9 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi BuildBufferMasks(); } + // Re-initialize the lattice element finder with the new ba and dm. + m_accelerator_lattice[lev]->InitElementFinder(lev, ba, dm); + if (costs[lev] != nullptr) { costs[lev] = std::make_unique>(ba, dm); diff --git a/Source/Particles/Gather/GetExternalFields.H b/Source/Particles/Gather/GetExternalFields.H index ff107dce521..d0436566874 100644 --- a/Source/Particles/Gather/GetExternalFields.H +++ b/Source/Particles/Gather/GetExternalFields.H @@ -6,6 +6,8 @@ #include "Particles/WarpXParticleContainer_fwd.H" #include "Utils/WarpXConst.H" +#include "AcceleratorLattice/LatticeElementFinder.H" + #include #include #include @@ -13,6 +15,9 @@ #include #include +#include + + /** \brief Functor class that assigns external * field values (E and B) to particles. */ @@ -54,8 +59,10 @@ struct GetExternalEBField const amrex::ParticleReal* AMREX_RESTRICT m_uy = nullptr; const amrex::ParticleReal* AMREX_RESTRICT m_uz = nullptr; + std::optional d_lattice_element_finder; + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE - bool isNoOp () const { return (m_Etype == None && m_Btype == None); } + bool isNoOp () const { return (m_Etype == None && m_Btype == None && !d_lattice_element_finder.has_value()); } AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void operator () (long i, @@ -68,6 +75,12 @@ struct GetExternalEBField { using namespace amrex::literals; + if (d_lattice_element_finder) { + // Note that the "*" is needed since d_lattice_element_finder is optional + (*d_lattice_element_finder)(i, field_Ex, field_Ey, field_Ez, + field_Bx, field_By, field_Bz); + } + if (m_Etype == None && m_Btype == None) return; amrex::ParticleReal Ex = 0._prt; diff --git a/Source/Particles/Gather/GetExternalFields.cpp b/Source/Particles/Gather/GetExternalFields.cpp index 9bf2c798188..10495cfd5d2 100644 --- a/Source/Particles/Gather/GetExternalFields.cpp +++ b/Source/Particles/Gather/GetExternalFields.cpp @@ -1,5 +1,7 @@ #include "Particles/Gather/GetExternalFields.H" +#include "AcceleratorLattice/AcceleratorLattice.H" + #include "Particles/MultiParticleContainer.H" #include "Particles/WarpXParticleContainer.H" #include "Utils/TextMsg.H" @@ -16,6 +18,13 @@ GetExternalEBField::GetExternalEBField (const WarpXParIter& a_pti, int a_offset) auto& warpx = WarpX::GetInstance(); auto& mypc = warpx.GetPartContainer(); + int lev = a_pti.GetLevel(); + + AcceleratorLattice const & accelerator_lattice = warpx.get_accelerator_lattice(lev); + if (accelerator_lattice.m_lattice_defined) { + d_lattice_element_finder = accelerator_lattice.GetFinderDeviceInstance(a_pti, a_offset); + } + m_gamma_boost = WarpX::gamma_boost; m_uz_boost = std::sqrt(WarpX::gamma_boost*WarpX::gamma_boost - 1._prt)*PhysConst::c; diff --git a/Source/WarpX.H b/Source/WarpX.H index 5c25eb1807c..ca6f60328ad 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -34,6 +34,7 @@ #include "FieldSolver/ElectrostaticSolver.H" #include "Filter/BilinearFilter.H" #include "Parallelization/GuardCellManager.H" +#include "AcceleratorLattice/AcceleratorLattice.H" #include "Utils/Parser/IntervalsParser.H" #include "Utils/WarpXAlgorithmSelection.H" @@ -962,6 +963,9 @@ public: //! Tagging cells for refinement virtual void ErrorEst (int lev, amrex::TagBoxArray& tags, amrex::Real time, int /*ngrow*/) final; + // Return the accelerator lattice instance defined at the given refinement level + const AcceleratorLattice& get_accelerator_lattice (int lev) {return *(m_accelerator_lattice[lev]);} + protected: /** @@ -1439,6 +1443,9 @@ private: //! particle buffer for scraped particles on the boundaries std::unique_ptr m_particle_boundary_buffer; + // Accelerator lattice elements + amrex::Vector< std::unique_ptr > m_accelerator_lattice; + // // Embedded Boundary // diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index e5119708941..5f1d76b6548 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -30,6 +30,7 @@ #include "Filter/NCIGodfreyFilter.H" #include "Particles/MultiParticleContainer.H" #include "Particles/ParticleBoundaryBuffer.H" +#include "AcceleratorLattice/AcceleratorLattice.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" @@ -350,7 +351,6 @@ WarpX::WarpX () m_macroscopic_properties = std::make_unique(); } - // Set default values for particle and cell weights for costs update; // Default values listed here for the case AMREX_USE_GPU are determined // from single-GPU tests on Summit. @@ -427,6 +427,9 @@ WarpX::WarpX () use_fdtd_nci_corr == 0, "The NCI corrector should only be used with Esirkepov deposition"); } + + m_accelerator_lattice.resize(nlevs_max); + } WarpX::~WarpX () @@ -1768,6 +1771,10 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d AllocLevelMFs(lev, ba, dm, guard_cells.ng_alloc_EB, guard_cells.ng_alloc_J, guard_cells.ng_alloc_Rho, guard_cells.ng_alloc_F, guard_cells.ng_alloc_G, aux_is_nodal); + + m_accelerator_lattice[lev] = std::make_unique(); + m_accelerator_lattice[lev]->InitElementFinder(lev, ba, dm); + } void From 04e6dd66bedcde6dea4ace46090c41198caf5917 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 21 Dec 2022 20:30:52 +0100 Subject: [PATCH 0211/1346] Docs: Fix HTML5 for Install Logos (#3588) With the update tot HTML5 in docutils, we need to modernize our CSS code that does fancy logos in the user install section. --- Docs/source/install/users.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/install/users.rst b/Docs/source/install/users.rst index a57c46129df..bcda4ca14af 100644 --- a/Docs/source/install/users.rst +++ b/Docs/source/install/users.rst @@ -6,7 +6,7 @@ Users .. raw:: html