diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index a1e7f5affda..2bc5d35bb4a 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -131,7 +131,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 028638564f7be0694b9898f8d4088cdbf9a6f9f5 && cd - + cd ../amrex && git checkout --detach 103d0e9f7c7e7fbedbeb61512d3f37f3e2b03770 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index d6030743524..fc75ccb0141 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -10,7 +10,9 @@ jobs: build_win_msvc: name: MSVC C++17 w/o MPI runs-on: windows-latest - if: github.event.pull_request.draft == false + # disabled due to issues in #5230 + if: 0 + #if: github.event.pull_request.draft == false steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1b668d5931e..d2b15b8af95 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.5 + rev: v0.6.8 hooks: # Run the linter - id: ruff diff --git a/.readthedocs.yml b/.readthedocs.yml index 3da9bc77140..95f86fe4ff2 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -9,14 +9,17 @@ version: 2 build: os: ubuntu-22.04 tools: - python: "3.11" + python: "mambaforge-latest" + # python: "3.11" sphinx: - configuration: Docs/source/conf.py + configuration: Docs/source/conf.py -python: - install: - - requirements: Docs/requirements.txt +conda: + environment: Docs/conda.yml +# python: +# install: +# - requirements: Docs/requirements.txt formats: - htmlzip diff --git a/Docs/Doxyfile b/Docs/Doxyfile index 5fbb7651b18..f7740bc0328 100644 --- a/Docs/Doxyfile +++ b/Docs/Doxyfile @@ -2245,7 +2245,7 @@ ENABLE_PREPROCESSING = YES # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -MACRO_EXPANSION = NO +MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then # the macro expansion is limited to the macros specified with the PREDEFINED and @@ -2253,7 +2253,7 @@ MACRO_EXPANSION = NO # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -EXPAND_ONLY_PREDEF = NO +EXPAND_ONLY_PREDEF = YES # If the SEARCH_INCLUDES tag is set to YES, the include files in the # INCLUDE_PATH will be searched if a #include is found. @@ -2305,6 +2305,8 @@ PREDEFINED = AMREX_Linux=1 \ WARPX_QED=1 \ WARPX_QED_TABLE_GEN=1 +PREDEFINED += "AMREX_ENUM(CLASS,...)=\"enum class CLASS : int { __VA_ARGS__ };\"" + # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The # macro definition that is found in the sources will be used. Use the PREDEFINED @@ -2312,7 +2314,7 @@ PREDEFINED = AMREX_Linux=1 \ # definition found in the source code. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -EXPAND_AS_DEFINED = +EXPAND_AS_DEFINED = AMREX_ENUM # If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will # remove all references to function-like macros that are alone on a line, have diff --git a/Docs/README.md b/Docs/README.md index e6fac921b04..6d3903ab327 100644 --- a/Docs/README.md +++ b/Docs/README.md @@ -9,12 +9,13 @@ More information can be found in Docs/source/developers/documentation.rst. Install the Python requirements for compiling the documentation: ``` -python3 -m pip install -r Docs/requirements.txt +cd Docs/ +python3 -m pip install -r requirements.txt ``` ### Compiling the documentation -`cd` into the `Docs/` directory and type +Still in the `Docs/` directory, type ``` make html ``` diff --git a/Docs/conda.yml b/Docs/conda.yml new file mode 100644 index 00000000000..1e23c203b2b --- /dev/null +++ b/Docs/conda.yml @@ -0,0 +1,12 @@ +name: readthedocs + +channels: + - conda-forge + - nodefaults + +dependencies: + - python + - doxygen + - pip + - pip: + - -r requirements.txt diff --git a/Docs/requirements.txt b/Docs/requirements.txt index a8c2af0e474..bc34e69cd65 100644 --- a/Docs/requirements.txt +++ b/Docs/requirements.txt @@ -5,7 +5,7 @@ # License: BSD-3-Clause-LBNL # WarpX PICMI bindings w/o C++ component (used for autoclass docs) --e Python +-e ../Python breathe docutils>=0.17.1 diff --git a/Docs/source/developers/documentation.rst b/Docs/source/developers/documentation.rst index a5013299336..5d604bcf9b3 100644 --- a/Docs/source/developers/documentation.rst +++ b/Docs/source/developers/documentation.rst @@ -56,16 +56,15 @@ First, make sure you are in the root directory of WarpX's source and install the .. code-block:: sh - python3 -m pip install -r Docs/requirements.txt + cd Docs/ + python3 -m pip install -r requirements.txt You will also need Doxygen (macOS: ``brew install doxygen``; Ubuntu: ``sudo apt install doxygen``). -Then, to compile the documentation, use +Still in the ``Docs/`` directory, compile the documentation via .. code-block:: sh - cd Docs/ - make html # This will first compile the Doxygen documentation (execute doxygen) # and then build html pages from rst files using sphinx and breathe. diff --git a/Docs/source/developers/fields.rst b/Docs/source/developers/fields.rst index d0af160afef..9d980119814 100644 --- a/Docs/source/developers/fields.rst +++ b/Docs/source/developers/fields.rst @@ -37,6 +37,13 @@ The ``MultiFab`` constructor (for, e.g., ``Ex`` on level ``lev``) is called in ` By default, the ``MultiFab`` are set to ``0`` at initialization. They can be assigned a different value in ``WarpX::InitLevelData``. +Field Names +----------- + +The commonly used WarpX field names are defined in: + +.. doxygenenum:: warpx::fields::FieldType + Field solver ------------ diff --git a/Docs/source/developers/testing.rst b/Docs/source/developers/testing.rst index 5bbc7d0fef4..ee5c82aeea9 100644 --- a/Docs/source/developers/testing.rst +++ b/Docs/source/developers/testing.rst @@ -175,6 +175,8 @@ If you need a new Python package dependency for testing, please add it in `Regre Sometimes two or more tests share a large number of input parameters. The shared input parameters can be collected in a "base" input file that can be passed as a runtime parameter in the actual test input files through the parameter ``FILE``. +If the new test is added in a new directory that did not exist before, please add the name of that directory with the command ``add_subdirectory`` in `Physics_applications/CMakeLists.txt `__ or `Tests/CMakeLists.txt `__, depending on where the new test directory is located. + Naming conventions for automated tests -------------------------------------- diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index 66570644bdc..09156072cad 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -90,6 +90,11 @@ Scientific works in laser-ion acceleration and laser-matter interaction. Physical Review Research **6**, 033148, 2024. `DOI:10.1103/PhysRevResearch.6.033148 `__ +#. Zaïm N, Sainte-Marie A, Fedeli L, Bartoli P, Huebl A, Leblanc A, Vay J-L, Vincenti H. + **Light-matter interaction near the Schwinger limit using tightly focused doppler-boosted lasers**. + Physical Review Letters **132**, 175002, 2024. + `DOI:10.1103/PhysRevLett.132.175002 `__ + #. Knight B, Gautam C, Stoner C, Egner B, Smith J, Orban C, Manfredi J, Frische K, Dexter M, Chowdhury E, Patnaik A (2023). **Detailed Characterization of a kHz-rate Laser-Driven Fusion at a Thin Liquid Sheet with a Neutron Detection Suite**. High Power Laser Science and Engineering, 1-13, 2023. @@ -110,6 +115,11 @@ Scientific works in laser-ion acceleration and laser-matter interaction. Phys. Rev. Accel. Beams **25**, 093402, 2022. `DOI:10.1103/PhysRevAccelBeams.25.093402 `__ +#. Fedeli L, Sainte-Marie A, Zaïm N, Thévenet M, Vay J-L, Myers A, Quéré F, Vincenti H. + **Probing strong-field QED with Doppler-boosted PetaWatt-class lasers**. + Physical Review Letters **127**, 114801, 2021. + `DOI:10.1103/PhysRevLett.127.114801 `__ + Particle Accelerator & Beam Physics *********************************** diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index 72c599ae2bd..71a607eae6a 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -117,7 +117,7 @@ For Nvidia CUDA GPU support, you will need to have `a recent CUDA driver install .. code-block:: bash - conda install -c nvidia -c conda-forge cuda cupy + conda install -c nvidia -c conda-forge cuda cuda-nvtx-dev cupy More info for `CUDA-enabled ML packages `__. diff --git a/Docs/source/install/hpc.rst b/Docs/source/install/hpc.rst index af4c0fe3e61..35884050a59 100644 --- a/Docs/source/install/hpc.rst +++ b/Docs/source/install/hpc.rst @@ -43,6 +43,7 @@ This section documents quick-start guides for a selection of supercomputers that hpc/lassen hpc/lawrencium hpc/leonardo + hpc/lonestar6 hpc/lumi hpc/lxplus hpc/ookami diff --git a/Docs/source/install/hpc/lonestar6.rst b/Docs/source/install/hpc/lonestar6.rst new file mode 100644 index 00000000000..f1512e4a508 --- /dev/null +++ b/Docs/source/install/hpc/lonestar6.rst @@ -0,0 +1,139 @@ +.. _building-lonestar6: + +Lonestar6 (TACC) +================ + +The `Lonestar6 cluster `_ is located at `TACC `__. + + +Introduction +------------ + +If you are new to this system, **please see the following resources**: + +* `TACC user guide `__ +* Batch system: `Slurm `__ +* `Jupyter service `__ +* `Filesystem directories `__: + + * ``$HOME``: per-user home directory, backed up (10 GB) + * ``$WORK``: per-user production directory, not backed up, not purged, Lustre (1 TB) + * ``$SCRATCH``: per-user production directory, not backed up, purged every 10 days, Lustre (no limits, 8PByte total) + + +Installation +------------ + +Use the following commands to download the WarpX source code and switch to the correct branch: + +.. code-block:: bash + + git clone https://github.com/ECP-WarpX/WarpX.git $WORK/src/warpx + +We use system software modules, add environment hints and further dependencies via the file ``$HOME/lonestar6_warpx_a100.profile``. +Create it now: + +.. code-block:: bash + + cp $HOME/src/warpx/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example $HOME/lonestar6_warpx_a100.profile + +.. dropdown:: Script Details + :color: light + :icon: info + :animate: fade-in-slide-down + + .. literalinclude:: ../../../../Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example + :language: bash + +Edit the 2nd line of this script, which sets the ``export proj=""`` variable. +For example, if you are member of the project ``abcde``, then run ``nano $HOME/lonestar6_warpx_a100.profile`` and edit line 2 to read: + +.. code-block:: bash + + export proj="abcde" + +Exit the ``nano`` editor with ``Ctrl`` + ``O`` (save) and then ``Ctrl`` + ``X`` (exit). + +.. important:: + + Now, and as the first step on future logins to Lonestar6, activate these environment settings: + + .. code-block:: bash + + source $HOME/lonestar6_warpx_a100.profile + +Finally, since Lonestar6 does not yet provide software modules for some of our dependencies, install them once: + +.. code-block:: bash + + bash $HOME/src/warpx/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh + source ${SW_DIR}/venvs/warpx-a100/bin/activate + +.. dropdown:: Script Details + :color: light + :icon: info + :animate: fade-in-slide-down + + .. literalinclude:: ../../../../Tools/machines/lonestar6-tacc/install_a100_dependencies.sh + :language: bash + + +.. _building-lonestar6-compilation: + +Compilation +----------- + +Use the following :ref:`cmake commands ` to compile the application executable: + +.. code-block:: bash + + cd $HOME/src/warpx + rm -rf build_pm_gpu + + cmake -S . -B build_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" + cmake --build build_gpu -j 16 + +The WarpX application executables are now in ``$HOME/src/warpx/build_gpu/bin/``. +Additionally, the following commands will install WarpX as a Python module: + +.. code-block:: bash + + cd $HOME/src/warpx + rm -rf build_pm_gpu_py + + cmake -S . -B build_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" + cmake --build build_gpu_py -j 16 --target pip_install + +Now, you can :ref:`submit Lonestar6 compute jobs ` for WarpX :ref:`Python (PICMI) scripts ` (:ref:`example scripts `). +Or, you can use the WarpX executables to submit Lonestar6 jobs (:ref:`example inputs `). +For executables, you can reference their location in your :ref:`job script ` or copy them to a location in ``$WORK`` or ``$SCRATCH``. + + +.. _running-cpp-lonestar6: + +Running +------- + +.. _running-cpp-lonestar6-A100-GPUs: + +A100 GPUs (40 GB) +^^^^^^^^^^^^^^^^^ + +`84 GPU nodes, each with 2 A100 GPUs (40 GB) `__. + +The batch script below can be used to run a WarpX simulation on multiple nodes (change ``-N`` accordingly) on the supercomputer lonestar6 at tacc. +Replace descriptions between chevrons ``<>`` by relevant values, for instance ```` could be ``plasma_mirror_inputs``. +Note that we run one MPI rank per GPU. + + +.. literalinclude:: ../../../../Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch + :language: bash + :caption: You can copy this file from ``Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch``. + +To run a simulation, copy the lines above to a file ``lonestar6.sbatch`` and run + +.. code-block:: bash + + sbatch lonestar6_a100.sbatch + +to submit the job. diff --git a/Docs/source/install/hpc/perlmutter.rst b/Docs/source/install/hpc/perlmutter.rst index dc5a985e99f..9612b64476d 100644 --- a/Docs/source/install/hpc/perlmutter.rst +++ b/Docs/source/install/hpc/perlmutter.rst @@ -153,7 +153,7 @@ Use the following :ref:`cmake commands ` to compile the applicat cd $HOME/src/warpx rm -rf build_pm_gpu - cmake -S . -B build_pm_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_pm_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_pm_gpu -j 16 The WarpX application executables are now in ``$HOME/src/warpx/build_pm_gpu/bin/``. @@ -164,7 +164,7 @@ Use the following :ref:`cmake commands ` to compile the applicat cd $HOME/src/warpx rm -rf build_pm_gpu_py - cmake -S . -B build_pm_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_pm_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_pm_gpu_py -j 16 --target pip_install .. tab-item:: CPU Nodes @@ -174,7 +174,7 @@ Use the following :ref:`cmake commands ` to compile the applicat cd $HOME/src/warpx rm -rf build_pm_cpu - cmake -S . -B build_pm_cpu -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_pm_cpu -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_pm_cpu -j 16 The WarpX application executables are now in ``$HOME/src/warpx/build_pm_cpu/bin/``. @@ -184,7 +184,7 @@ Use the following :ref:`cmake commands ` to compile the applicat rm -rf build_pm_cpu_py - cmake -S . -B build_pm_cpu_py -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_pm_cpu_py -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_pm_cpu_py -j 16 --target pip_install Now, you can :ref:`submit Perlmutter compute jobs ` for WarpX :ref:`Python (PICMI) scripts ` (:ref:`example scripts `). diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 86ab7594c5f..b9d82d5014a 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2129,14 +2129,24 @@ Time step The ratio between the actual timestep that is used in the simulation and the Courant-Friedrichs-Lewy (CFL) limit. (e.g. for `warpx.cfl=1`, the timestep will be exactly equal to the CFL limit.) - This parameter will only be used with the electromagnetic solver. + For some speed v and grid spacing dx, this limits the timestep to `warpx.cfl * dx / v`. + When used with the electromagnetic solver, `v` is the speed of light. + For the electrostatic solver, `v` is the maximum speed among all particles in the domain. * ``warpx.const_dt`` (`float`) Allows direct specification of the time step size, in units of seconds. - When the electrostatic solver is being used, this must be supplied. + When the electrostatic solver is being used, this must be supplied if not using adaptive timestepping. This can be used with the electromagnetic solver, overriding ``warpx.cfl``, but it is up to the user to ensure that the CFL condition is met. +* ``warpx.dt_update_interval`` (`string`) optional (default `-1`) + How many iterations pass between timestep adaptations when using the electrostatic solver. + Must be greater than `0` to use adaptive timestepping, or else ``warpx.const_dt`` must be specified. + +* ``warpx.max_dt`` (`float`) optional + The maximum timestep permitted for the electrostatic solver, when using adaptive timestepping. + If supplied, also sets the initial timestep for these simulations, before the first timestep update. + Filtering ^^^^^^^^^ @@ -3448,39 +3458,42 @@ Reduced Diagnostics For 1D-Z, :math:`x`-related and :math:`y`-related quantities are not outputted. RZ geometry is not supported yet. -* ``DifferentialLuminosity`` - This type computes the differential luminosity between two species, defined as: + * ``DifferentialLuminosity`` + This type computes the differential luminosity between two species, defined as: - .. math:: + .. math:: + + \frac{d\mathcal{L}}{d\mathcal{E}^*}(\mathcal{E}^*, t) = \int_0^t dt'\int d\boldsymbol{x}\,d\boldsymbol{p}_1 d\boldsymbol{p}_2\; + \sqrt{ |\boldsymbol{v}_1 - \boldsymbol{v}_2|^2 - |\boldsymbol{v}_1\times\boldsymbol{v}_2|^2/c^2} \\ f_1(\boldsymbol{x}, \boldsymbol{p}_1, t')f_2(\boldsymbol{x}, \boldsymbol{p}_2, t') \delta(\mathcal{E}^* - \mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2)) - \frac{d\mathcal{L}}{d\mathcal{E}^*}(\mathcal{E}^*, t) = \int_0^t dt'\int d\boldsymbol{x}\,d\boldsymbol{p}_1 d\boldsymbol{p}_2\; - \sqrt{ |\boldsymbol{v}_1 - \boldsymbol{v}_2|^2 - |\boldsymbol{v}_1\times\boldsymbol{v}_2|^2/c^2} \\ f_1(\boldsymbol{x}, \boldsymbol{p}_1, t')f_2(\boldsymbol{x}, \boldsymbol{p}_2, t') \delta(\mathcal{E}^* - \mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2)) + where :math:`\mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2) = \sqrt{m_1^2c^4 + m_2^2c^4 + 2(m_1 m_2 c^4 + \gamma_1 \gamma_2 - \boldsymbol{p}_1\cdot\boldsymbol{p}_2 c^2)}` is the energy in the center-of-mass frame, + and :math:`f_i` is the distribution function of species :math:`i`. Note that, if :math:`\sigma^*(\mathcal{E}^*)` + is the center-of-mass cross-section of a given collision process, then + :math:`\int d\mathcal{E}^* \frac{d\mathcal{L}}{d\mathcal{E}^*} (\mathcal{E}^*, t)\sigma^*(\mathcal{E}^*)` + gives the total number of collisions of that process (from the beginning of the simulation up until time :math:`t`). - where :math:`\mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2) = \sqrt{m_1^2c^4 + m_2^2c^4 + 2(m_1 m_2 c^4 - \gamma_1 \gamma_2 - \boldsymbol{p}_1\cdot\boldsymbol{p}_2 c^2)}` is the energy in the center-of-mass frame, - and :math:`f_i` is the distribution function of species :math:`i`. Note that, if :math:`\sigma^*(\mathcal{E}^*)` - is the center-of-mass cross-section of a given collision process, then - :math:`\int d\mathcal{E}^* \frac{d\mathcal{L}}{d\mathcal{E}^*} (\mathcal{E}^*, t)\sigma^*(\mathcal{E}^*)` - gives the total number of collisions of that process (from the beginning of the simulation up until time :math:`t`). + The differential luminosity is given in units of :math:`\text{m}^{-2}.\text{eV}^{-1}`. For collider-relevant WarpX simulations + involving two crossing, high-energy beams of particles, the differential luminosity in :math:`\text{s}^{-1}.\text{m}^{-2}.\text{eV}^{-1}` + can be obtained by multiplying the above differential luminosity by the expected repetition rate of the beams. - The differential luminosity is given in units of :math:`\text{m}^{-2}.\text{eV}^{-1}`. For collider-relevant WarpX simulations - involving two crossing, high-energy beams of particles, the differential luminosity in :math:`\text{s}^{-1}.\text{m}^{-2}.\text{eV}^{-1}` - can be obtained by multiplying the above differential luminosity by the expected repetition rate of the beams. + In practice, the above expression of the differential luminosity is evaluated over discrete bins in energy :math:`\mathcal{E}^*`, + and by summing over macroparticles. - In practice, the above expression of the differential luminosity is evaluated over discrete bins in energy :math:`\mathcal{E}^*`, - and by summing over macroparticles. + * ``.species`` (`list of two strings`) + The names of the two species for which the differential luminosity is computed. - * ``.species`` (`list of two strings`) - The names of the two species for which the differential luminosity is computed. + * ``.bin_number`` (`int` > 0) + The number of bins in energy :math:`\mathcal{E}^*` - * ``.bin_number`` (`int` > 0) - The number of bins in energy :math:`\mathcal{E}^*` + * ``.bin_max`` (`float`, in eV) + The minimum value of :math:`\mathcal{E}^*` for which the differential luminosity is computed. - * ``.bin_max`` (`float`, in eV) - The minimum value of :math:`\mathcal{E}^*` for which the differential luminosity is computed. + * ``.bin_min`` (`float`, in eV) + The maximum value of :math:`\mathcal{E}^*` for which the differential luminosity is computed. - * ``.bin_min`` (`float`, in eV) - The maximum value of :math:`\mathcal{E}^*` for which the differential luminosity is computed. + * ``Timestep`` + This type outputs the simulation's physical timestep (in seconds) at each mesh refinement level. * ``.intervals`` (`string`) Using the `Intervals Parser`_ syntax, this string defines the timesteps at which reduced diff --git a/Docs/source/usage/workflows/python_extend.rst b/Docs/source/usage/workflows/python_extend.rst index 47610e0d7ba..275a4dd134d 100644 --- a/Docs/source/usage/workflows/python_extend.rst +++ b/Docs/source/usage/workflows/python_extend.rst @@ -134,9 +134,12 @@ This example accesses the :math:`E_x(x,y,z)` field at level 0 after every time s warpx = sim.extension.warpx # data access - E_x_mf = warpx.multifab(f"Efield_fp[x][level=0]") + # vector field E, component x, on the fine patch of MR level 0 + E_x_mf = warpx.multifab("Efield_fp", dir=0, level=0) + # scalar field rho, on the fine patch of MR level 0 + rho_mf = warpx.multifab("rho_fp", level=0) - # compute + # compute on E_x_mf # iterate over mesh-refinement levels for lev in range(warpx.finest_level + 1): # grow (aka guard/ghost/halo) regions diff --git a/Examples/Physics_applications/beam_beam_collision/README.rst b/Examples/Physics_applications/beam_beam_collision/README.rst index a7a06521218..28fdc1ee70e 100644 --- a/Examples/Physics_applications/beam_beam_collision/README.rst +++ b/Examples/Physics_applications/beam_beam_collision/README.rst @@ -11,7 +11,8 @@ We turn on the Quantum Synchrotron QED module for photon emission (also known as the Breit-Wheeler QED module for the generation of electron-positron pairs (also known as coherent pair generation in the collider community). To solve for the electromagnetic field we use the nodal version of the electrostatic relativistic solver. -This solver computes the average velocity of each species, and solves the corresponding relativistic Poisson equation (see the WarpX documentation for `warpx.do_electrostatic = relativistic` for more detail). This solver accurately reproduced the subtle cancellation that occur for some component of the ``E + v x B`` terms which are crucial in simulations of relativistic particles. +This solver computes the average velocity of each species, and solves the corresponding relativistic Poisson equation (see the WarpX documentation for `warpx.do_electrostatic = relativistic` for more detail). +This solver accurately reproduces the subtle cancellation that occur for some component of ``E + v x B``, which are crucial in simulations of relativistic particles. This example is based on the following paper :cite:t:`ex-Yakimenko2019`. @@ -26,7 +27,7 @@ For `MPI-parallel `__ runs, prefix these lines with ` .. literalinclude:: inputs_test_3d_beam_beam_collision :language: ini - :caption: You can copy this file from ``Examples/Physics_applications/beam-beam_collision/inputs_test_3d_beam_beam_collision``. + :caption: You can copy this file from ``Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision``. Visualize @@ -34,12 +35,15 @@ Visualize The figure below shows the number of photons emitted per beam particle (left) and the number of secondary pairs generated per beam particle (right). -We compare different results: +We compare different results for the reduced diagnostics with the literature: * (red) simplified WarpX simulation as the example stored in the directory ``/Examples/Physics_applications/beam-beam_collision``; * (blue) large-scale WarpX simulation (high resolution and ad hoc generated tables ; * (black) literature results from :cite:t:`ex-Yakimenko2019`. -The small-scale simulation has been performed with a resolution of ``nx = 64, ny = 64, nz = 64`` grid cells, while the large-scale one has a much higher resolution of ``nx = 512, ny = 512, nz = 1024``. Moreover, the large-scale simulation uses dedicated QED lookup tables instead of the builtin tables. To generate the tables within WarpX, the code must be compiled with the flag ``-DWarpX_QED_TABLE_GEN=ON``. For the large-scale simulation we have used the following options: +The small-scale simulation has been performed with a resolution of ``nx = 64, ny = 64, nz = 64`` grid cells, while the large-scale one has a much higher resolution of ``nx = 512, ny = 512, nz = 1024``. +Moreover, the large-scale simulation uses dedicated QED lookup tables instead of the builtin tables. +To generate the tables within WarpX, the code must be compiled with the flag ``-DWarpX_QED_TABLE_GEN=ON``. +For the large-scale simulation we have used the following options: .. code-block:: ini @@ -63,8 +67,45 @@ The small-scale simulation has been performed with a resolution of ``nx = 64, ny qed_bw.tab_pair_frac_how_many=512 qed_bw.save_table_in=my_bw_table.txt + .. figure:: https://gist.github.com/user-attachments/assets/2dd43782-d039-4faa-9d27-e3cf8fb17352 :alt: Beam-beam collision benchmark against :cite:t:`ex-Yakimenko2019`. :width: 100% Beam-beam collision benchmark against :cite:t:`ex-Yakimenko2019`. + + +Below are two visualizations scripts that provide examples to graph the field and reduced diagnostics. +They are available in the ``Examples/Physics_applications/beam-beam_collision/`` folder and can be run as simply as ``python3 plot_fields.py`` and ``python3 plot_reduced.py``. + +.. tab-set:: + + .. tab-item:: Field Diagnostics + + This script visualizes the evolution of the fields (:math:`|E|, |B|, \rho`) during the collision between the two ultra-relativistic lepton beams. + The magnitude of E and B and the charge densities of the primary beams and of the secondary pairs are sliced along either one of the two transverse coordinates (:math:`x` and :math:`y`). + + .. literalinclude:: plot_fields.py + :language: python3 + :caption: You can copy this file from ``Examples/Physics_applications/beam-beam_collision/plot_fields.py``. + + .. figure:: https://gist.github.com/user-attachments/assets/04c9c0ec-b580-446f-a11a-437c1b244a41 + :alt: Slice across :math:`x` of different fields (:math:`|E|, |B|, \rho`) at timestep 45, in the middle of the collision. + :width: 100% + + Slice across :math:`x` of different fields (:math:`|E|, |B|, \rho`) at timestep 45, in the middle of the collision. + + + .. tab-item:: Reduced Diagnostics + + A similar script to the one below was used to produce the image showing the benchmark against :cite:t:`ex-Yakimenko2019`. + + .. literalinclude:: plot_reduced.py + :language: python3 + :caption: You can copy this file from ``Examples/Physics_applications/beam-beam_collision/plot_reduced.py``. + + .. figure:: https://gist.github.com/user-attachments/assets/c280490a-f1f2-4329-ad3c-46817d245dc1 + :alt: Photon and pair production rates in time throughout the collision. + :width: 100% + + Photon and pair production rates in time throughout the collision. diff --git a/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision b/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision index e856a078003..d0cf3cd7ebf 100644 --- a/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision +++ b/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision @@ -211,7 +211,7 @@ warpx.do_qed_schwinger = 0. # FULL diagnostics.diags_names = diag1 -diag1.intervals = 0 +diag1.intervals = 15 diag1.diag_type = Full diag1.write_species = 1 diag1.fields_to_plot = Ex Ey Ez Bx By Bz rho_beam1 rho_beam2 rho_ele1 rho_pos1 rho_ele2 rho_pos2 diff --git a/Examples/Physics_applications/beam_beam_collision/plot_fields.py b/Examples/Physics_applications/beam_beam_collision/plot_fields.py new file mode 100644 index 00000000000..a7ddb2d13e9 --- /dev/null +++ b/Examples/Physics_applications/beam_beam_collision/plot_fields.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 + +import matplotlib.pyplot as plt +import numpy as np +from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable +from openpmd_viewer import OpenPMDTimeSeries + +plt.rcParams.update({"font.size": 16}) + +series = OpenPMDTimeSeries("./diags/diag1") +steps = series.iterations + + +for slice_axis in ["x", "y"]: # slice the fields along x and y + for n in steps: # loop through the available timesteps + fig, ax = plt.subplots( + ncols=2, nrows=2, figsize=(10, 6), dpi=300, sharex=True, sharey=True + ) + + # get E field + Ex, info = series.get_field( + field="E", coord="x", iteration=n, plot=False, slice_across=slice_axis + ) + Ey, info = series.get_field( + field="E", coord="y", iteration=n, plot=False, slice_across=slice_axis + ) + Ez, info = series.get_field( + field="E", coord="z", iteration=n, plot=False, slice_across=slice_axis + ) + # get B field + Bx, info = series.get_field( + field="B", coord="x", iteration=n, plot=False, slice_across=slice_axis + ) + By, info = series.get_field( + field="B", coord="y", iteration=n, plot=False, slice_across=slice_axis + ) + Bz, info = series.get_field( + field="B", coord="z", iteration=n, plot=False, slice_across=slice_axis + ) + # get charge densities + rho_beam1, info = series.get_field( + field="rho_beam1", iteration=n, plot=False, slice_across=slice_axis + ) + rho_beam2, info = series.get_field( + field="rho_beam2", iteration=n, plot=False, slice_across=slice_axis + ) + rho_ele1, info = series.get_field( + field="rho_ele1", iteration=n, plot=False, slice_across=slice_axis + ) + rho_pos1, info = series.get_field( + field="rho_pos1", iteration=n, plot=False, slice_across=slice_axis + ) + rho_ele2, info = series.get_field( + field="rho_ele2", iteration=n, plot=False, slice_across=slice_axis + ) + rho_pos2, info = series.get_field( + field="rho_pos2", iteration=n, plot=False, slice_across=slice_axis + ) + + xmin = info.z.min() + xmax = info.z.max() + xlabel = "z [m]" + + if slice_axis == "x": + ymin = info.y.min() + ymax = info.y.max() + ylabel = "y [m]" + elif slice_axis == "y": + ymin = info.x.min() + ymax = info.x.max() + ylabel = "x [m]" + + # plot E magnitude + Emag = np.sqrt(Ex**2 + Ey**2 + Ez**2) + im = ax[0, 0].imshow( + np.transpose(Emag), + cmap="seismic", + extent=[xmin, xmax, ymin, ymax], + vmin=0, + vmax=np.max(np.abs(Emag)), + ) + ax[0, 0].set_title("E [V/m]") + divider = make_axes_locatable(ax[0, 0]) + cax = divider.append_axes("right", size="5%", pad=0.05) + fig.colorbar(im, cax=cax, orientation="vertical") + + # plot B magnitude + Bmag = np.sqrt(Bx**2 + By**2 + Bz**2) + im = ax[1, 0].imshow( + np.transpose(Bmag), + cmap="seismic", + extent=[xmin, xmax, ymin, ymax], + vmin=0, + vmax=np.max(np.abs(Bmag)), + ) + ax[1, 0].set_title("B [T]") + divider = make_axes_locatable(ax[1, 0]) + cax = divider.append_axes("right", size="5%", pad=0.05) + fig.colorbar(im, cax=cax, orientation="vertical") + + # plot beam densities + rho_beams = rho_beam1 + rho_beam2 + im = ax[0, 1].imshow( + np.transpose(rho_beams), + cmap="seismic", + extent=[xmin, xmax, ymin, ymax], + vmin=-np.max(np.abs(rho_beams)), + vmax=np.max(np.abs(rho_beams)), + ) + ax[0, 1].set_title(r"$\rho$ beams [C/m$^3$]") + divider = make_axes_locatable(ax[0, 1]) + cax = divider.append_axes("right", size="5%", pad=0.05) + fig.colorbar(im, cax=cax, orientation="vertical") + + # plot secondary densities + rho2 = rho_ele1 + rho_pos1 + rho_ele2 + rho_pos2 + im = ax[1, 1].imshow( + np.transpose(rho2), + cmap="seismic", + extent=[xmin, xmax, ymin, ymax], + vmin=-np.max(np.abs(rho2)), + vmax=np.max(np.abs(rho2)), + ) + ax[1, 1].set_title(r"$\rho$ secondaries [C/m$^3$]") + divider = make_axes_locatable(ax[1, 1]) + cax = divider.append_axes("right", size="5%", pad=0.05) + fig.colorbar(im, cax=cax, orientation="vertical") + + for a in ax[-1, :].reshape(-1): + a.set_xlabel(xlabel) + for a in ax[:, 0].reshape(-1): + a.set_ylabel(ylabel) + + fig.suptitle(f"Iteration = {n}, time [s] = {series.current_t}", fontsize=20) + plt.tight_layout() + + image_file_name = "FIELDS_" + slice_axis + f"_{n:03d}.png" + plt.savefig(image_file_name, dpi=100, bbox_inches="tight") + plt.close() diff --git a/Examples/Physics_applications/beam_beam_collision/plot_reduced.py b/Examples/Physics_applications/beam_beam_collision/plot_reduced.py new file mode 100644 index 00000000000..3f59f975519 --- /dev/null +++ b/Examples/Physics_applications/beam_beam_collision/plot_reduced.py @@ -0,0 +1,48 @@ +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +from scipy.constants import c, nano, physical_constants + +r_e = physical_constants["classical electron radius"][0] +my_dpi = 300 +sigmaz = 10 * nano + +fig, ax = plt.subplots( + ncols=2, nrows=1, figsize=(2000.0 / my_dpi, 1000.0 / my_dpi), dpi=my_dpi +) + +rdir = "./diags/reducedfiles/" + +df_cr = pd.read_csv(f"{rdir}" + "ColliderRelevant_beam1_beam2.txt", sep=" ", header=0) +df_pn = pd.read_csv(f"{rdir}" + "ParticleNumber.txt", sep=" ", header=0) + + +times = df_cr[[col for col in df_cr.columns if "]time" in col]].to_numpy() +steps = df_cr[[col for col in df_cr.columns if "]step" in col]].to_numpy() + +x = df_cr[[col for col in df_cr.columns if "]dL_dt" in col]].to_numpy() +coll_index = np.argmax(x) +coll_time = times[coll_index] + +# number of photons per beam particle +np1 = df_pn[[col for col in df_pn.columns if "]pho1_weight" in col]].to_numpy() +np2 = df_pn[[col for col in df_pn.columns if "]pho2_weight" in col]].to_numpy() +Ne = df_pn[[col for col in df_pn.columns if "]beam1_weight" in col]].to_numpy()[0] +Np = df_pn[[col for col in df_pn.columns if "]beam2_weight" in col]].to_numpy()[0] + +ax[0].plot((times - coll_time) / (sigmaz / c), (np1 + np2) / (Ne + Np), lw=2) +ax[0].set_title(r"photon number/beam particle") + +# number of NLBW particles per beam particle +e1 = df_pn[[col for col in df_pn.columns if "]ele1_weight" in col]].to_numpy() +e2 = df_pn[[col for col in df_pn.columns if "]ele2_weight" in col]].to_numpy() + +ax[1].plot((times - coll_time) / (sigmaz / c), (e1 + e2) / (Ne + Np), lw=2) +ax[1].set_title(r"NLBW particles/beam particle") + +for a in ax.reshape(-1): + a.set_xlabel(r"time [$\sigma_z/c$]") +image_file_name = "reduced.png" +plt.tight_layout() +plt.savefig(image_file_name, dpi=300, bbox_inches="tight") +plt.close("all") diff --git a/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py b/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py index e3bc888f600..9ce8bb8433c 100644 --- a/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py +++ b/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py @@ -121,13 +121,13 @@ def compute_virtual_charge_on_spacecraft(): # Compute integral of rho over volume of the domain # (i.e. total charge of the plasma particles) rho_integral = ( - (rho[1 : nr - 1, 1 : nz - 1] * r[1 : nr - 1, np.newaxis]).sum() * dr * dz + (rho[1 : nr - 1, 1 : nz - 1] * r[1 : nr - 1, np.newaxis]).sum() + * 2 + * np.pi + * dr + * dz ) - # Due to an oddity in WarpX (which will probably be solved later) - # we need to multiply `rho` by `-epsilon_0` to get the correct charge - rho_integral *= 2 * np.pi * -scc.epsilon_0 # does this oddity still exist? - # Compute charge of the spacecraft, based on Gauss theorem q_spacecraft = -rho_integral - scc.epsilon_0 * grad_phi_integral print("Virtual charge on the spacecraft: %e" % q_spacecraft) diff --git a/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted b/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted index 668ec73d2dd..c056ff1fc66 100644 --- a/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted +++ b/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted @@ -2,8 +2,8 @@ max_step = 50 amr.n_cell = 16 16 8 amr.max_level = 0 geometry.dims = 3 -geometry.prob_lo = -0.2 -0.2 -0.1 -geometry.prob_hi = +0.2 +0.2 +0.1 +geometry.prob_lo = -0.2 -0.2 -0.1866 +geometry.prob_hi = +0.2 +0.2 +0.1866 # Boundary condition boundary.field_lo = pec pec pec diff --git a/Examples/Tests/electrostatic_sphere/CMakeLists.txt b/Examples/Tests/electrostatic_sphere/CMakeLists.txt index 41a151b7884..3d17c4462f8 100644 --- a/Examples/Tests/electrostatic_sphere/CMakeLists.txt +++ b/Examples/Tests/electrostatic_sphere/CMakeLists.txt @@ -41,6 +41,16 @@ add_warpx_test( OFF # dependency ) +add_warpx_test( + test_3d_electrostatic_sphere_adaptive # name + 3 # dims + 2 # nprocs + inputs_test_3d_electrostatic_sphere_adaptive # inputs + analysis_electrostatic_sphere.py # analysis + diags/diag1000054 # output + OFF # dependency +) + add_warpx_test( test_rz_electrostatic_sphere # name RZ # dims diff --git a/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_adaptive b/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_adaptive new file mode 100644 index 00000000000..f64f6de08ee --- /dev/null +++ b/Examples/Tests/electrostatic_sphere/inputs_test_3d_electrostatic_sphere_adaptive @@ -0,0 +1,47 @@ +stop_time = 60e-6 +warpx.cfl = 0.2 +warpx.dt_update_interval = 10 +warpx.max_dt = 1.5e-6 +amr.n_cell = 64 64 64 +amr.max_level = 0 +amr.blocking_factor = 8 +amr.max_grid_size = 128 +geometry.dims = 3 +geometry.prob_lo = -0.5 -0.5 -0.5 +geometry.prob_hi = 0.5 0.5 0.5 +boundary.field_lo = pec pec pec +boundary.field_hi = pec pec pec +warpx.do_electrostatic = relativistic + +particles.species_names = electron + +algo.field_gathering = momentum-conserving + +# Order of particle shape factors +algo.particle_shape = 1 + +my_constants.n0 = 1.49e6 +my_constants.R0 = 0.1 + +electron.charge = -q_e +electron.mass = m_e +electron.injection_style = "NUniformPerCell" +electron.num_particles_per_cell_each_dim = 2 2 2 +electron.profile = parse_density_function +electron.density_function(x,y,z) = "(x*x + y*y + z*z < R0*R0)*n0" +electron.momentum_distribution_type = at_rest + +diagnostics.diags_names = diag1 diag2 + +diag1.intervals = 30 +diag1.diag_type = Full +diag1.fields_to_plot = Ex Ey Ez rho + +diag2.intervals = 30 +diag2.diag_type = Full +diag2.fields_to_plot = none +diag2.format = openpmd + +warpx.reduced_diags_names = timestep +timestep.intervals = 1 +timestep.type = Timestep diff --git a/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py b/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py index 80ce483f2c7..7148cde2d3e 100755 --- a/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py +++ b/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py @@ -94,7 +94,7 @@ face_areas_y = fields.FaceAreasyWrapper() face_areas_z = fields.FaceAreaszWrapper() -print("======== Testing the wrappers of m_edge_lengths =========") +print("======== Testing the wrappers of edge_lengths =========") ly_slice_x = edge_lengths_y[nx // 2, :, :] lz_slice_x = edge_lengths_z[nx // 2, :, :] @@ -159,7 +159,7 @@ print("Perimeter of the middle z-slice:", perimeter_slice_z) assert np.isclose(perimeter_slice_z, perimeter_slice_z_true, rtol=1e-05, atol=1e-08) -print("======== Testing the wrappers of m_face_areas =========") +print("======== Testing the wrappers of face_areas =========") Sx_slice = np.sum(face_areas_x[nx // 2, :, :]) dx = (xmax - xmin) / nx diff --git a/Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb_picmi.py b/Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb_picmi.py index d3c35daf261..ff450b92bc7 100755 --- a/Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb_picmi.py +++ b/Examples/Tests/magnetostatic_eb/inputs_test_3d_magnetostatic_eb_picmi.py @@ -225,7 +225,7 @@ def Er_an(r): er_err = np.abs(Er_mean[r_idx] - Er_an(r_sub)).max() / np.abs(Er_an(r_sub)).max() -plt.ylabel("$E_r$ (V/m)") +plt.ylabel(r"$E_r$ (V/m)") plt.xlabel("r (m)") plt.title("Max % Error: {} %".format(er_err * 100.0)) plt.tight_layout() @@ -298,7 +298,7 @@ def Bt_an(r): bt_err = np.abs(Bt_mean[r_idx] - Bt_an(r_sub)).max() / np.abs(Bt_an(r_sub)).max() -plt.ylabel("$B_{\Theta}$ (T)") +plt.ylabel(r"$B_{\Theta}$ (T)") plt.xlabel("r (m)") plt.title("Max % Error: {} %".format(bt_err * 100.0)) plt.tight_layout() diff --git a/Examples/Tests/magnetostatic_eb/inputs_test_rz_magnetostatic_eb_picmi.py b/Examples/Tests/magnetostatic_eb/inputs_test_rz_magnetostatic_eb_picmi.py index d0f1787a5a2..ff7767181f4 100755 --- a/Examples/Tests/magnetostatic_eb/inputs_test_rz_magnetostatic_eb_picmi.py +++ b/Examples/Tests/magnetostatic_eb/inputs_test_rz_magnetostatic_eb_picmi.py @@ -195,7 +195,7 @@ def Er_an(r): er_err = np.abs(Er_mean[r_idx] - Er_an(r_sub)).max() / np.abs(Er_an(r_sub)).max() -plt.ylabel("$E_r$ (V/m)") +plt.ylabel(r"$E_r$ (V/m)") plt.xlabel("r (m)") plt.title("Max % Error: {} %".format(er_err * 100.0)) plt.tight_layout() @@ -246,7 +246,7 @@ def Bth_an(r): bth_err = np.abs(Bth_mean[r_idx] - Bth_an(r_sub)).max() / np.abs(Bth_an(r_sub)).max() -plt.ylabel("$B_{\Theta}$ (T)") +plt.ylabel(r"$B_{\Theta}$ (T)") plt.xlabel("r (m)") plt.title("Max % Error: {} %".format(bth_err * 100.0)) plt.tight_layout() diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py b/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py index 4f13c76e208..f074c81cbb3 100644 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py @@ -303,7 +303,7 @@ def check_fields(self): rho = fields.RhoFPWrapper(include_ghosts=False)[:, :] Jiy = fields.JyFPWrapper(include_ghosts=False)[...] / self.J0 - Jy = fields.JyFPAmpereWrapper(include_ghosts=False)[...] / self.J0 + Jy = fields.JyFPPlasmaWrapper(include_ghosts=False)[...] / self.J0 Bx = fields.BxFPWrapper(include_ghosts=False)[...] / self.B0 By = fields.ByFPWrapper(include_ghosts=False)[...] / self.B0 Bz = fields.BzFPWrapper(include_ghosts=False)[...] / self.B0 diff --git a/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt index c5ec4583da1..d6141f0b4ab 100644 --- a/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt +++ b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt @@ -12,3 +12,15 @@ if(WarpX_FFT) OFF # dependency ) endif() + +if(WarpX_HEFFTE) + add_warpx_test( + test_3d_open_bc_poisson_solver_heffte # name + 3 # dims + 2 # nprocs + inputs_test_3d_open_bc_poisson_solver_heffte # inputs + analysis.py # analysis + diags/diag1000001 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_heffte b/Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_heffte new file mode 100644 index 00000000000..4f0a50df037 --- /dev/null +++ b/Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_heffte @@ -0,0 +1 @@ +FILE = inputs_test_3d_open_bc_poisson_solver diff --git a/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted b/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted index fa18ac439c4..b00779bae65 100644 --- a/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted +++ b/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted @@ -8,8 +8,8 @@ amr.max_level = 0 # Geometry geometry.dims = 3 -geometry.prob_lo = -1.0 -1.0 -1.0 # physical domain -geometry.prob_hi = 1.0 1.0 2.0 +geometry.prob_lo = -1.0 -1.0 -1.866 # physical domain +geometry.prob_hi = 1.0 1.0 3.732 boundary.field_lo = pec pec pec boundary.field_hi = pec pec pec diff --git a/GNUmakefile b/GNUmakefile index 86bdab2709f..fe10983b780 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -38,6 +38,7 @@ USE_OPENPMD = FALSE WarpxBinDir = Bin USE_FFT = FALSE +USE_HEFFTE = FALSE USE_RZ = FALSE USE_EB = FALSE diff --git a/Python/pywarpx/WarpX.py b/Python/pywarpx/WarpX.py index 9b3aaa27636..9ef7019cda9 100644 --- a/Python/pywarpx/WarpX.py +++ b/Python/pywarpx/WarpX.py @@ -137,7 +137,7 @@ def write_inputs(self, filename="inputs", **kw): for arg in argv: # This prints the name of the input group (prefix) as a header # before each group to make the input file more human readable - prefix_new = re.split(" |\.", arg)[0] + prefix_new = re.split(r" |\.", arg)[0] if prefix_new != prefix_old: if prefix_old != "": ff.write("\n") diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index cbdd8d4517a..5d3b892b543 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -33,7 +33,7 @@ ExFPPMLWrapper, EyFPPMLWrapper, EzFPPMLWrapper BxFPPMLWrapper, ByFPPMLWrapper, BzFPPMLWrapper JxFPPMLWrapper, JyFPPMLWrapper, JzFPPMLWrapper -JxFPAmpereWrapper, JyFPAmpereWrapper, JzFPAmpereWrapper +JxFPPlasmaWrapper, JyFPPlasmaWrapper, JzFPPlasmaWrapper FFPPMLWrapper, GFPPMLWrapper ExCPPMLWrapper, EyCPPMLWrapper, EzCPPMLWrapper @@ -77,6 +77,9 @@ class _MultiFABWrapper(object): everytime it is called if this argument is given instead of directly providing the Multifab. + idir: int, optional + For MultiFab that is an element of a vector, the direction number, 0, 1, or 2. + level: int The refinement level @@ -86,9 +89,10 @@ class _MultiFABWrapper(object): ghost cells. """ - def __init__(self, mf=None, mf_name=None, level=0, include_ghosts=False): + def __init__(self, mf=None, mf_name=None, idir=None, level=0, include_ghosts=False): self._mf = mf self.mf_name = mf_name + self.idir = idir self.level = level self.include_ghosts = include_ghosts @@ -116,8 +120,11 @@ def mf(self): else: # Always fetch this anew in case the C++ MultiFab is recreated warpx = libwarpx.libwarpx_so.get_instance() - # All MultiFab names have the level suffix - return warpx.multifab(f"{self.mf_name}[level={self.level}]") + if self.idir is not None: + direction = libwarpx.libwarpx_so.Direction(self.idir) + return warpx.multifab(self.mf_name, direction, self.level) + else: + return warpx.multifab(self.mf_name, self.level) @property def shape(self): @@ -573,145 +580,145 @@ def norm0(self, *args): def ExWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_aux[x]", level=level, include_ghosts=include_ghosts + mf_name="Efield_aux", idir=0, level=level, include_ghosts=include_ghosts ) def EyWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_aux[y]", level=level, include_ghosts=include_ghosts + mf_name="Efield_aux", idir=1, level=level, include_ghosts=include_ghosts ) def EzWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_aux[z]", level=level, include_ghosts=include_ghosts + mf_name="Efield_aux", idir=2, level=level, include_ghosts=include_ghosts ) def BxWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_aux[x]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_aux", idir=0, level=level, include_ghosts=include_ghosts ) def ByWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_aux[y]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_aux", idir=1, level=level, include_ghosts=include_ghosts ) def BzWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_aux[z]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_aux", idir=2, level=level, include_ghosts=include_ghosts ) def JxWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="current_fp", idir=0, level=level, include_ghosts=include_ghosts ) def JyWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="current_fp", idir=1, level=level, include_ghosts=include_ghosts ) def JzWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="current_fp", idir=2, level=level, include_ghosts=include_ghosts ) def ExFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="Efield_fp", idir=0, level=level, include_ghosts=include_ghosts ) def EyFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="Efield_fp", idir=1, level=level, include_ghosts=include_ghosts ) def EzFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="Efield_fp", idir=2, level=level, include_ghosts=include_ghosts ) def BxFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_fp", idir=0, level=level, include_ghosts=include_ghosts ) def ByFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_fp", idir=1, level=level, include_ghosts=include_ghosts ) def BzFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_fp", idir=2, level=level, include_ghosts=include_ghosts ) def ExFPExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_fp_external[x]", level=level, include_ghosts=include_ghosts + mf_name="Efield_fp_external", idir=0, level=level, include_ghosts=include_ghosts ) def EyFPExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_fp_external[y]", level=level, include_ghosts=include_ghosts + mf_name="Efield_fp_external", idir=1, level=level, include_ghosts=include_ghosts ) def EzFPExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_fp_external[z]", level=level, include_ghosts=include_ghosts + mf_name="Efield_fp_external", idir=2, level=level, include_ghosts=include_ghosts ) def BxFPExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_fp_external[x]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_fp_external", idir=0, level=level, include_ghosts=include_ghosts ) def ByFPExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_fp_external[y]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_fp_external", idir=1, level=level, include_ghosts=include_ghosts ) def BzFPExternalWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_fp_external[z]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_fp_external", idir=2, level=level, include_ghosts=include_ghosts ) def JxFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="current_fp", idir=0, level=level, include_ghosts=include_ghosts ) def JyFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="current_fp", idir=1, level=level, include_ghosts=include_ghosts ) def JzFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="current_fp", idir=2, level=level, include_ghosts=include_ghosts ) @@ -737,7 +744,8 @@ def GFPWrapper(level=0, include_ghosts=False): def AxFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="vector_potential_fp_nodal[x]", + mf_name="vector_potential_fp_nodal", + idir=0, level=level, include_ghosts=include_ghosts, ) @@ -745,7 +753,8 @@ def AxFPWrapper(level=0, include_ghosts=False): def AyFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="vector_potential_fp_nodal[y]", + mf_name="vector_potential_fp_nodal", + idir=1, level=level, include_ghosts=include_ghosts, ) @@ -753,7 +762,8 @@ def AyFPWrapper(level=0, include_ghosts=False): def AzFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="vector_potential_fp_nodal[z]", + mf_name="vector_potential_fp_nodal", + idir=2, level=level, include_ghosts=include_ghosts, ) @@ -761,55 +771,55 @@ def AzFPWrapper(level=0, include_ghosts=False): def ExCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_cp[x]", level=level, include_ghosts=include_ghosts + mf_name="Efield_cp", idir=0, level=level, include_ghosts=include_ghosts ) def EyCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_cp[y]", level=level, include_ghosts=include_ghosts + mf_name="Efield_cp", idir=1, level=level, include_ghosts=include_ghosts ) def EzCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Efield_cp[z]", level=level, include_ghosts=include_ghosts + mf_name="Efield_cp", idir=2, level=level, include_ghosts=include_ghosts ) def BxCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_cp[x]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_cp", idir=0, level=level, include_ghosts=include_ghosts ) def ByCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_cp[y]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_cp", idir=1, level=level, include_ghosts=include_ghosts ) def BzCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="Bfield_cp[z]", level=level, include_ghosts=include_ghosts + mf_name="Bfield_cp", idir=2, level=level, include_ghosts=include_ghosts ) def JxCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_cp[x]", level=level, include_ghosts=include_ghosts + mf_name="current_cp", idir=0, level=level, include_ghosts=include_ghosts ) def JyCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_cp[y]", level=level, include_ghosts=include_ghosts + mf_name="current_cp", idir=1, level=level, include_ghosts=include_ghosts ) def JzCPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_cp[z]", level=level, include_ghosts=include_ghosts + mf_name="current_cp", idir=2, level=level, include_ghosts=include_ghosts ) @@ -829,109 +839,118 @@ def GCPWrapper(level=0, include_ghosts=False): def EdgeLengthsxWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="m_edge_lengths[x]", level=level, include_ghosts=include_ghosts + mf_name="edge_lengths", idir=0, level=level, include_ghosts=include_ghosts ) def EdgeLengthsyWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="m_edge_lengths[y]", level=level, include_ghosts=include_ghosts + mf_name="edge_lengths", idir=1, level=level, include_ghosts=include_ghosts ) def EdgeLengthszWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="m_edge_lengths[z]", level=level, include_ghosts=include_ghosts + mf_name="edge_lengths", idir=2, level=level, include_ghosts=include_ghosts ) def FaceAreasxWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="m_face_areas[x]", level=level, include_ghosts=include_ghosts + mf_name="face_areas", idir=0, level=level, include_ghosts=include_ghosts ) def FaceAreasyWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="m_face_areas[y]", level=level, include_ghosts=include_ghosts + mf_name="face_areas", idir=1, level=level, include_ghosts=include_ghosts ) def FaceAreaszWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="m_face_areas[z]", level=level, include_ghosts=include_ghosts + mf_name="face_areas", idir=2, level=level, include_ghosts=include_ghosts ) -def JxFPAmpereWrapper(level=0, include_ghosts=False): +def JxFPPlasmaWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp_ampere[x]", level=level, include_ghosts=include_ghosts + mf_name="hybrid_current_fp_plasma", + idir=0, + level=level, + include_ghosts=include_ghosts, ) -def JyFPAmpereWrapper(level=0, include_ghosts=False): +def JyFPPlasmaWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp_ampere[y]", level=level, include_ghosts=include_ghosts + mf_name="hybrid_current_fp_plasma", + idir=1, + level=level, + include_ghosts=include_ghosts, ) -def JzFPAmpereWrapper(level=0, include_ghosts=False): +def JzFPPlasmaWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="current_fp_ampere[z]", level=level, include_ghosts=include_ghosts + mf_name="hybrid_current_fp_plasma", + idir=2, + level=level, + include_ghosts=include_ghosts, ) def ExFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_E_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="pml_E_fp", idir=0, level=level, include_ghosts=include_ghosts ) def EyFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_E_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="pml_E_fp", idir=1, level=level, include_ghosts=include_ghosts ) def EzFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_E_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="pml_E_fp", idir=2, level=level, include_ghosts=include_ghosts ) def BxFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_B_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="pml_B_fp", idir=0, level=level, include_ghosts=include_ghosts ) def ByFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_B_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="pml_B_fp", idir=1, level=level, include_ghosts=include_ghosts ) def BzFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_B_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="pml_B_fp", idir=2, level=level, include_ghosts=include_ghosts ) def JxFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_j_fp[x]", level=level, include_ghosts=include_ghosts + mf_name="pml_j_fp", idir=0, level=level, include_ghosts=include_ghosts ) def JyFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_j_fp[y]", level=level, include_ghosts=include_ghosts + mf_name="pml_j_fp", idir=1, level=level, include_ghosts=include_ghosts ) def JzFPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_j_fp[z]", level=level, include_ghosts=include_ghosts + mf_name="pml_j_fp", idir=2, level=level, include_ghosts=include_ghosts ) @@ -949,55 +968,55 @@ def GFPPMLWrapper(level=0, include_ghosts=False): def ExCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_E_cp[x]", level=level, include_ghosts=include_ghosts + mf_name="pml_E_cp", idir=0, level=level, include_ghosts=include_ghosts ) def EyCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_E_cp[y]", level=level, include_ghosts=include_ghosts + mf_name="pml_E_cp", idir=1, level=level, include_ghosts=include_ghosts ) def EzCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_E_cp[z]", level=level, include_ghosts=include_ghosts + mf_name="pml_E_cp", idir=2, level=level, include_ghosts=include_ghosts ) def BxCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_B_cp[x]", level=level, include_ghosts=include_ghosts + mf_name="pml_B_cp", idir=0, level=level, include_ghosts=include_ghosts ) def ByCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_B_cp[y]", level=level, include_ghosts=include_ghosts + mf_name="pml_B_cp", idir=1, level=level, include_ghosts=include_ghosts ) def BzCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_B_cp[z]", level=level, include_ghosts=include_ghosts + mf_name="pml_B_cp", idir=2, level=level, include_ghosts=include_ghosts ) def JxCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_j_cp[x]", level=level, include_ghosts=include_ghosts + mf_name="pml_j_cp", idir=0, level=level, include_ghosts=include_ghosts ) def JyCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_j_cp[y]", level=level, include_ghosts=include_ghosts + mf_name="pml_j_cp", idir=1, level=level, include_ghosts=include_ghosts ) def JzCPPMLWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="pml_j_cp[z]", level=level, include_ghosts=include_ghosts + mf_name="pml_j_cp", idir=2, level=level, include_ghosts=include_ghosts ) diff --git a/Python/pywarpx/particle_containers.py b/Python/pywarpx/particle_containers.py index 8af012f5e7b..bc6b2d74106 100644 --- a/Python/pywarpx/particle_containers.py +++ b/Python/pywarpx/particle_containers.py @@ -733,7 +733,7 @@ def deposit_charge_density(self, level, clear_rho=True, sync_rho=True): sync_rho : bool If True, perform MPI exchange and properly set boundary cells for rho_fp. """ - rho_fp = libwarpx.warpx.multifab(f"rho_fp[level={level}]") + rho_fp = libwarpx.warpx.multifab("rho_fp", level) if rho_fp is None: raise RuntimeError("Multifab `rho_fp` is not allocated.") diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 0d51a8723b4..478b4d5802e 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1522,8 +1522,7 @@ def solver_initialize_inputs(self): # --- Same method names are used, though mapped to lower case. pywarpx.algo.maxwell_solver = self.method - if self.cfl is not None: - pywarpx.warpx.cfl = self.cfl + pywarpx.warpx.cfl = self.cfl if self.source_smoother is not None: self.source_smoother.smoother_initialize_inputs(self) @@ -1880,6 +1879,16 @@ class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): warpx_self_fields_verbosity: integer, default=2 Level of verbosity for the lab frame solver + + warpx_dt_update_interval: string, optional (default = -1) + How frequently the timestep is updated. Adaptive timestepping is disabled when this is <= 0. + + warpx_cfl: float, optional + Fraction of the CFL condition for particle velocity vs grid size, used to set the timestep when `dt_update_interval > 0`. + + warpx_max_dt: float, optional + The maximum allowable timestep when `dt_update_interval > 0`. + """ def init(self, kw): @@ -1887,6 +1896,9 @@ def init(self, kw): self.absolute_tolerance = kw.pop("warpx_absolute_tolerance", None) self.self_fields_verbosity = kw.pop("warpx_self_fields_verbosity", None) self.magnetostatic = kw.pop("warpx_magnetostatic", False) + self.cfl = kw.pop("warpx_cfl", None) + self.dt_update_interval = kw.pop("dt_update_interval", None) + self.max_dt = kw.pop("warpx_max_dt", None) def solver_initialize_inputs(self): # Open BC means FieldBoundaryType::Open for electrostatic sims, rather than perfectly-matched layer @@ -1894,6 +1906,11 @@ def solver_initialize_inputs(self): self.grid.grid_initialize_inputs() + # set adaptive timestepping parameters + pywarpx.warpx.cfl = self.cfl + pywarpx.warpx.dt_update_interval = self.dt_update_interval + pywarpx.warpx.max_dt = self.max_dt + if self.relativistic: pywarpx.warpx.do_electrostatic = "relativistic" else: @@ -3890,6 +3907,7 @@ def __init__( "ParticleNumber", "LoadBalanceCosts", "LoadBalanceEfficiency", + "Timestep", ] # The species diagnostics require a species to be provided self._species_reduced_diagnostics = [ diff --git a/Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_adaptive.json b/Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_adaptive.json new file mode 100644 index 00000000000..561fbf86669 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_electrostatic_sphere_adaptive.json @@ -0,0 +1,17 @@ +{ + "lev=0": { + "Ex": 5.177444767224255, + "Ey": 5.177444767224254, + "Ez": 5.177444767224256, + "rho": 2.6092568008333797e-10 + }, + "electron": { + "particle_momentum_x": 1.3215019655285216e-23, + "particle_momentum_y": 1.3215019655285214e-23, + "particle_momentum_z": 1.3215019655285217e-23, + "particle_position_x": 912.2310003741203, + "particle_position_y": 912.2310003741203, + "particle_position_z": 912.2310003741202, + "particle_weight": 6212.501525878906 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json b/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json index acec34286f7..0a601b7b437 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json +++ b/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json @@ -1,22 +1,22 @@ { + "lev=0": { + "Bx": 3.254604354043409e-14, + "By": 3.2768679907552955e-14, + "Bz": 1.0615351421410278e-16, + "Ex": 2.3084916770539354e-05, + "Ey": 2.2657235922655432e-05, + "Ez": 1.9978004351148e-05, + "jx": 1.781971994166362e-10, + "jy": 4.2163624424546344e-20, + "jz": 1.0378980680353126e-07 + }, "electron": { - "particle_momentum_x": 5.955475926588059e-26, - "particle_momentum_y": 1.4612764777454504e-35, - "particle_momentum_z": 3.4687284535374423e-23, - "particle_position_x": 0.049960237123814574, - "particle_position_y": 8.397636119991403e-15, - "particle_position_z": 0.10931687737912647, + "particle_momentum_x": 5.955475927655105e-26, + "particle_momentum_y": 1.4613271542201658e-35, + "particle_momentum_z": 3.468728453537439e-23, + "particle_position_x": 0.04996023704063194, + "particle_position_y": 8.398113230295983e-15, + "particle_position_z": 0.10931682580470406, "particle_weight": 1.0 - }, - "lev=0": { - "Bx": 3.254531465641299e-14, - "By": 3.2768092409497234e-14, - "Bz": 1.0615286316115558e-16, - "Ex": 2.30845657253269e-05, - "Ey": 2.2656898931877975e-05, - "Ez": 1.997747654112569e-05, - "jx": 1.7819477343635878e-10, - "jy": 4.2163030523377745e-20, - "jz": 1.0378839382497739e-07 } } diff --git a/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json index e4ff1fc68a8..af9ab3a0bdd 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json +++ b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json @@ -1,20 +1,20 @@ { "lev=0": { - "Bx": 100915933.44993827, - "By": 157610622.1855512, - "Bz": 9.717358898362187e-14, - "Ex": 4.7250652706211096e+16, - "Ey": 3.0253948990559976e+16, - "Ez": 3276573.9514776524, + "Bx": 100915933.446046, + "By": 157610622.18548763, + "Bz": 2.76973993530483e-13, + "Ex": 4.725065270619211e+16, + "Ey": 3.0253948989388292e+16, + "Ez": 3276573.9514776673, "rho": 10994013582437.193 }, "electron": { - "particle_momentum_x": 5.701277606050295e-19, - "particle_momentum_y": 3.6504516641520437e-19, + "particle_momentum_x": 5.701277606055763e-19, + "particle_momentum_y": 3.6504516636842883e-19, "particle_momentum_z": 1.145432768297242e-10, "particle_position_x": 17.314086912497864, - "particle_position_y": 0.2583691267187796, + "particle_position_y": 0.25836912671877965, "particle_position_z": 10066.329600000008, "particle_weight": 19969036501.910976 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json b/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json index 6d5eabb492e..e1fa54618ee 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json +++ b/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json @@ -1,21 +1,21 @@ { "lev=0": { - "Bx": 1.3073041371012706e-14, - "By": 1.3033038210840872e-14, - "Bz": 5.595105968291083e-17, - "Ex": 2.801134785671445e-06, - "Ey": 2.8088613469887243e-06, - "Ez": 3.343430731047825e-06, - "jx": 2.5155716299904363e-11, - "jy": 2.013718424043256e-11, - "jz": 6.00631499206418e-09 + "Bx": 1.307357220398482e-14, + "By": 1.3033571630685163e-14, + "Bz": 5.594998319468307e-17, + "Ex": 2.8010832905044288e-06, + "Ey": 2.8088096742407935e-06, + "Ez": 3.3433681277560495e-06, + "jx": 2.5151718871714067e-11, + "jy": 2.013398608921663e-11, + "jz": 6.0063967622563335e-09 }, "electrons": { - "particle_momentum_x": 7.437088723328491e-24, - "particle_momentum_y": 5.9495056615288754e-24, - "particle_momentum_z": 5.117548636687908e-22, - "particle_position_x": 0.036489969262013186, - "particle_position_y": 0.029201200231260247, - "particle_position_z": 6.9681085285694095 + "particle_momentum_x": 7.43708887164806e-24, + "particle_momentum_y": 5.949505779760011e-24, + "particle_momentum_z": 5.117548636790359e-22, + "particle_position_x": 0.03648994812700447, + "particle_position_y": 0.029201183320618985, + "particle_position_z": 6.968107021318396 } -} +} \ No newline at end of file diff --git a/Source/BoundaryConditions/PML.H b/Source/BoundaryConditions/PML.H index 203c109f026..9e7dbc0034c 100644 --- a/Source/BoundaryConditions/PML.H +++ b/Source/BoundaryConditions/PML.H @@ -17,6 +17,8 @@ # include "FieldSolver/SpectralSolver/SpectralSolver.H" #endif +#include + #include #include #include @@ -155,23 +157,6 @@ public: void ComputePMLFactors (amrex::Real dt); - std::array GetE_fp (); - std::array GetB_fp (); - std::array Getj_fp (); - std::array GetE_cp (); - std::array GetB_cp (); - std::array Getj_cp (); - std::array Get_edge_lengths (); - std::array Get_face_areas (); - - // Used when WarpX::do_pml_dive_cleaning = true - amrex::MultiFab* GetF_fp (); - amrex::MultiFab* GetF_cp (); - - // Used when WarpX::do_pml_divb_cleaning = true - amrex::MultiFab* GetG_fp (); - amrex::MultiFab* GetG_cp (); - [[nodiscard]] const MultiSigmaBox& GetMultiSigmaBox_fp () const { return *sigba_fp; @@ -183,35 +168,33 @@ public: } #ifdef WARPX_USE_FFT - void PushPSATD (int lev); + void PushPSATD (ablastr::fields::MultiFabRegister& fields, int lev); #endif - void CopyJtoPMLs (const std::array& j_fp, - const std::array& j_cp); + void CopyJtoPMLs (ablastr::fields::MultiFabRegister& fields, int lev); - void Exchange (const std::array& mf_pml, - const std::array& mf, + void Exchange (ablastr::fields::VectorField mf_pml, + ablastr::fields::VectorField mf, + const PatchType& patch_type, + int do_pml_in_domain); + void Exchange (amrex::MultiFab* mf_pml, + amrex::MultiFab* mf, const PatchType& patch_type, int do_pml_in_domain); - void CopyJtoPMLs (PatchType patch_type, - const std::array& jp); - - void ExchangeF (amrex::MultiFab* F_fp, amrex::MultiFab* F_cp, int do_pml_in_domain); - void ExchangeF (PatchType patch_type, amrex::MultiFab* Fp, int do_pml_in_domain); - - void ExchangeG (amrex::MultiFab* G_fp, amrex::MultiFab* G_cp, int do_pml_in_domain); - void ExchangeG (PatchType patch_type, amrex::MultiFab* Gp, int do_pml_in_domain); + void CopyJtoPMLs ( + ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, + int lev + ); - void FillBoundaryE (PatchType patch_type, std::optional nodal_sync=std::nullopt); - void FillBoundaryB (PatchType patch_type, std::optional nodal_sync=std::nullopt); - void FillBoundaryF (PatchType patch_type, std::optional nodal_sync=std::nullopt); - void FillBoundaryG (PatchType patch_type, std::optional nodal_sync=std::nullopt); + void FillBoundary (ablastr::fields::VectorField mf_pml, PatchType patch_type, std::optional nodal_sync=std::nullopt); + void FillBoundary (amrex::MultiFab & mf_pml, PatchType patch_type, std::optional nodal_sync=std::nullopt); [[nodiscard]] bool ok () const { return m_ok; } - void CheckPoint (const std::string& dir) const; - void Restart (const std::string& dir); + void CheckPoint (ablastr::fields::MultiFabRegister& fields, const std::string& dir) const; + void Restart (ablastr::fields::MultiFabRegister& fields, const std::string& dir); static void Exchange (amrex::MultiFab& pml, amrex::MultiFab& reg, const amrex::Geometry& geom, int do_pml_in_domain); @@ -227,24 +210,6 @@ private: const amrex::Geometry* m_geom; const amrex::Geometry* m_cgeom; - std::array,3> pml_E_fp; - std::array,3> pml_B_fp; - std::array,3> pml_j_fp; - - std::array,3> pml_edge_lengths; - - std::array,3> pml_E_cp; - std::array,3> pml_B_cp; - std::array,3> pml_j_cp; - - // Used when WarpX::do_pml_dive_cleaning = true - std::unique_ptr pml_F_fp; - std::unique_ptr pml_F_cp; - - // Used when WarpX::do_pml_divb_cleaning = true - std::unique_ptr pml_G_fp; - std::unique_ptr pml_G_cp; - std::unique_ptr sigba_fp; std::unique_ptr sigba_cp; @@ -293,13 +258,15 @@ private: }; #ifdef WARPX_USE_FFT -void PushPMLPSATDSinglePatch( int lev, +void PushPMLPSATDSinglePatch ( + int lev, SpectralSolver& solver, - std::array,3>& pml_E, - std::array,3>& pml_B, - std::unique_ptr& pml_F, - std::unique_ptr& pml_G, - const amrex::IntVect& fill_guards); + ablastr::fields::VectorField& pml_E, + ablastr::fields::VectorField& pml_B, + ablastr::fields::ScalarField pml_F, + ablastr::fields::ScalarField pml_G, + const amrex::IntVect& fill_guards +); #endif #endif diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index a66dcb5c0bb..f45ca222e69 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -10,7 +10,7 @@ #include "BoundaryConditions/PML.H" #include "BoundaryConditions/PMLComponent.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #ifdef WARPX_USE_FFT # include "FieldSolver/SpectralSolver/SpectralFieldData.H" #endif @@ -57,7 +57,7 @@ #endif using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -571,6 +571,8 @@ PML::PML (const int lev, const BoxArray& grid_ba, WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!eb_enabled, "PML: eb_enabled is true but was not compiled in."); #endif + using ablastr::fields::Direction; + // When `do_pml_in_domain` is true, the PML overlap with the last `ncell` of the physical domain or fine patch(es) // (instead of extending `ncell` outside of the physical domain or fine patch(es)) // In order to implement this, we define a new reduced Box Array ensuring that it does not @@ -698,33 +700,36 @@ PML::PML (const int lev, const BoxArray& grid_ba, const int ncompe = (m_dive_cleaning) ? 3 : 2; const int ncompb = (m_divb_cleaning) ? 3 : 2; - const amrex::BoxArray ba_Ex = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::Efield_fp, 0,0).ixType().toIntVect()); - const amrex::BoxArray ba_Ey = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::Efield_fp, 0,1).ixType().toIntVect()); - const amrex::BoxArray ba_Ez = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::Efield_fp, 0,2).ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_E_fp[0], ba_Ex, dm, ncompe, nge, lev, "pml_E_fp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_E_fp[1], ba_Ey, dm, ncompe, nge, lev, "pml_E_fp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_E_fp[2], ba_Ez, dm, ncompe, nge, lev, "pml_E_fp[z]", 0.0_rt); - - const amrex::BoxArray ba_Bx = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::Bfield_fp, 0,0).ixType().toIntVect()); - const amrex::BoxArray ba_By = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::Bfield_fp, 0,1).ixType().toIntVect()); - const amrex::BoxArray ba_Bz = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::Bfield_fp, 0,2).ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_B_fp[0], ba_Bx, dm, ncompb, ngb, lev, "pml_B_fp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_B_fp[1], ba_By, dm, ncompb, ngb, lev, "pml_B_fp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_B_fp[2], ba_Bz, dm, ncompb, ngb, lev, "pml_B_fp[z]", 0.0_rt); - - const amrex::BoxArray ba_jx = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::current_fp, 0,0).ixType().toIntVect()); - const amrex::BoxArray ba_jy = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::current_fp, 0,1).ixType().toIntVect()); - const amrex::BoxArray ba_jz = amrex::convert(ba, WarpX::GetInstance().getField(FieldType::current_fp, 0,2).ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_j_fp[0], ba_jx, dm, 1, ngb, lev, "pml_j_fp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_j_fp[1], ba_jy, dm, 1, ngb, lev, "pml_j_fp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_j_fp[2], ba_jz, dm, 1, ngb, lev, "pml_j_fp[z]", 0.0_rt); + auto& warpx = WarpX::GetInstance(); + using ablastr::fields::Direction; + + const amrex::BoxArray ba_Ex = amrex::convert(ba, warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, 0)->ixType().toIntVect()); + const amrex::BoxArray ba_Ey = amrex::convert(ba, warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, 0)->ixType().toIntVect()); + const amrex::BoxArray ba_Ez = amrex::convert(ba, warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, 0)->ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_E_fp, Direction{0}, lev, ba_Ex, dm, ncompe, nge, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_E_fp, Direction{1}, lev, ba_Ey, dm, ncompe, nge, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_E_fp, Direction{2}, lev, ba_Ez, dm, ncompe, nge, 0.0_rt, false, false); + + const amrex::BoxArray ba_Bx = amrex::convert(ba, warpx.m_fields.get(FieldType::Bfield_fp, Direction{0}, 0)->ixType().toIntVect()); + const amrex::BoxArray ba_By = amrex::convert(ba, warpx.m_fields.get(FieldType::Bfield_fp, Direction{1}, 0)->ixType().toIntVect()); + const amrex::BoxArray ba_Bz = amrex::convert(ba, warpx.m_fields.get(FieldType::Bfield_fp, Direction{2}, 0)->ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_B_fp, Direction{0}, lev, ba_Bx, dm, ncompb, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_B_fp, Direction{1}, lev, ba_By, dm, ncompb, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_B_fp, Direction{2}, lev, ba_Bz, dm, ncompb, ngb, 0.0_rt, false, false); + + const amrex::BoxArray ba_jx = amrex::convert(ba, WarpX::GetInstance().m_fields.get(FieldType::current_fp, Direction{0}, 0)->ixType().toIntVect()); + const amrex::BoxArray ba_jy = amrex::convert(ba, WarpX::GetInstance().m_fields.get(FieldType::current_fp, Direction{1}, 0)->ixType().toIntVect()); + const amrex::BoxArray ba_jz = amrex::convert(ba, WarpX::GetInstance().m_fields.get(FieldType::current_fp, Direction{2}, 0)->ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_j_fp, Direction{0}, lev, ba_jx, dm, 1, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_j_fp, Direction{1}, lev, ba_jy, dm, 1, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_j_fp, Direction{2}, lev, ba_jz, dm, 1, ngb, 0.0_rt, false, false); #ifdef AMREX_USE_EB if (eb_enabled) { const amrex::IntVect max_guard_EB_vect = amrex::IntVect(max_guard_EB); - WarpX::AllocInitMultiFab(pml_edge_lengths[0], ba_Ex, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_edge_lengths[1], ba_Ey, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_edge_lengths[2], ba_Ez, dm, WarpX::ncomps, max_guard_EB_vect, lev, "pml_edge_lengths[z]", 0.0_rt); + warpx.m_fields.alloc_init(FieldType::pml_edge_lengths, Direction{0}, lev, ba_Ex, dm, WarpX::ncomps, max_guard_EB_vect, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_edge_lengths, Direction{1}, lev, ba_Ey, dm, WarpX::ncomps, max_guard_EB_vect, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_edge_lengths, Direction{2}, lev, ba_Ez, dm, WarpX::ncomps, max_guard_EB_vect, 0.0_rt, false, false); if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee || WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC || @@ -732,8 +737,9 @@ PML::PML (const int lev, const BoxArray& grid_ba, auto const eb_fact = fieldEBFactory(); - WarpX::ComputeEdgeLengths(pml_edge_lengths, eb_fact); - WarpX::ScaleEdges(pml_edge_lengths, WarpX::CellSize(lev)); + ablastr::fields::VectorField t_pml_edge_lengths = warpx.m_fields.get_alldirs(FieldType::pml_edge_lengths, lev); + WarpX::ComputeEdgeLengths(t_pml_edge_lengths, eb_fact); + WarpX::ScaleEdges(t_pml_edge_lengths, WarpX::CellSize(lev)); } } @@ -743,7 +749,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, if (m_dive_cleaning) { const amrex::BoxArray ba_F_nodal = amrex::convert(ba, amrex::IntVect::TheNodeVector()); - WarpX::AllocInitMultiFab(pml_F_fp, ba_F_nodal, dm, 3, ngf, lev, "pml_F_fp", 0.0_rt); + warpx.m_fields.alloc_init(FieldType::pml_F_fp, lev, ba_F_nodal, dm, 3, ngf, 0.0_rt, false, false); } if (m_divb_cleaning) @@ -753,7 +759,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, (grid_type == GridType::Collocated) ? amrex::IntVect::TheNodeVector() : amrex::IntVect::TheCellVector(); const amrex::BoxArray ba_G_nodal = amrex::convert(ba, G_nodal_flag); - WarpX::AllocInitMultiFab(pml_G_fp, ba_G_nodal, dm, 3, ngf, lev, "pml_G_fp", 0.0_rt); + warpx.m_fields.alloc_init(FieldType::pml_G_fp, lev, ba_G_nodal, dm, 3, ngf, 0.0_rt, false, false); } Box single_domain_box = is_single_box_domain ? domain0 : Box(); @@ -835,24 +841,24 @@ PML::PML (const int lev, const BoxArray& grid_ba, cdm.define(cba); } - const amrex::BoxArray cba_Ex = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::Efield_cp, 1,0).ixType().toIntVect()); - const amrex::BoxArray cba_Ey = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::Efield_cp, 1,1).ixType().toIntVect()); - const amrex::BoxArray cba_Ez = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::Efield_cp, 1,2).ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_E_cp[0], cba_Ex, cdm, ncompe, nge, lev, "pml_E_cp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_E_cp[1], cba_Ey, cdm, ncompe, nge, lev, "pml_E_cp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_E_cp[2], cba_Ez, cdm, ncompe, nge, lev, "pml_E_cp[z]", 0.0_rt); + const amrex::BoxArray cba_Ex = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::Efield_cp, Direction{0}, 1)->ixType().toIntVect()); + const amrex::BoxArray cba_Ey = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::Efield_cp, Direction{1}, 1)->ixType().toIntVect()); + const amrex::BoxArray cba_Ez = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::Efield_cp, Direction{2}, 1)->ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_E_cp, Direction{0}, lev, cba_Ex, cdm, ncompe, nge, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_E_cp, Direction{1}, lev, cba_Ey, cdm, ncompe, nge, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_E_cp, Direction{2}, lev, cba_Ez, cdm, ncompe, nge, 0.0_rt, false, false); - const amrex::BoxArray cba_Bx = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::Bfield_cp, 1,0).ixType().toIntVect()); - const amrex::BoxArray cba_By = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::Bfield_cp, 1,1).ixType().toIntVect()); - const amrex::BoxArray cba_Bz = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::Bfield_cp, 1,2).ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_B_cp[0], cba_Bx, cdm, ncompb, ngb, lev, "pml_B_cp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_B_cp[1], cba_By, cdm, ncompb, ngb, lev, "pml_B_cp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_B_cp[2], cba_Bz, cdm, ncompb, ngb, lev, "pml_B_cp[z]", 0.0_rt); + const amrex::BoxArray cba_Bx = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::Bfield_cp, Direction{0}, 1)->ixType().toIntVect()); + const amrex::BoxArray cba_By = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::Bfield_cp, Direction{1}, 1)->ixType().toIntVect()); + const amrex::BoxArray cba_Bz = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::Bfield_cp, Direction{2}, 1)->ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_B_cp, Direction{0}, lev, cba_Bx, cdm, ncompb, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_B_cp, Direction{1}, lev, cba_By, cdm, ncompb, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_B_cp, Direction{2}, lev, cba_Bz, cdm, ncompb, ngb, 0.0_rt, false, false); if (m_dive_cleaning) { const amrex::BoxArray cba_F_nodal = amrex::convert(cba, amrex::IntVect::TheNodeVector()); - WarpX::AllocInitMultiFab(pml_F_cp, cba_F_nodal, cdm, 3, ngf, lev, "pml_F_cp", 0.0_rt); + warpx.m_fields.alloc_init(FieldType::pml_F_cp, lev, cba_F_nodal, cdm, 3, ngf, 0.0_rt, false, false); } if (m_divb_cleaning) @@ -862,15 +868,15 @@ PML::PML (const int lev, const BoxArray& grid_ba, (grid_type == GridType::Collocated) ? amrex::IntVect::TheNodeVector() : amrex::IntVect::TheCellVector(); const amrex::BoxArray cba_G_nodal = amrex::convert(cba, G_nodal_flag); - WarpX::AllocInitMultiFab( pml_G_cp, cba_G_nodal, cdm, 3, ngf, lev, "pml_G_cp", 0.0_rt); + warpx.m_fields.alloc_init(FieldType::pml_G_cp, lev, cba_G_nodal, cdm, 3, ngf, 0.0_rt, false, false); } - const amrex::BoxArray cba_jx = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::current_cp, 1,0).ixType().toIntVect()); - const amrex::BoxArray cba_jy = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::current_cp, 1,1).ixType().toIntVect()); - const amrex::BoxArray cba_jz = amrex::convert(cba, WarpX::GetInstance().getField(FieldType::current_cp, 1,2).ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_j_cp[0], cba_jx, cdm, 1, ngb, lev, "pml_j_cp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_j_cp[1], cba_jy, cdm, 1, ngb, lev, "pml_j_cp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_j_cp[2], cba_jz, cdm, 1, ngb, lev, "pml_j_cp[z]", 0.0_rt); + const amrex::BoxArray cba_jx = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::current_cp, Direction{0}, 1)->ixType().toIntVect()); + const amrex::BoxArray cba_jy = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::current_cp, Direction{1}, 1)->ixType().toIntVect()); + const amrex::BoxArray cba_jz = amrex::convert(cba, WarpX::GetInstance().m_fields.get(FieldType::current_cp, Direction{2}, 1)->ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_j_cp, Direction{0}, lev, cba_jx, cdm, 1, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_j_cp, Direction{1}, lev, cba_jy, cdm, 1, ngb, 0.0_rt, false, false); + warpx.m_fields.alloc_init(FieldType::pml_j_cp, Direction{2}, lev, cba_jz, cdm, 1, ngb, 0.0_rt, false, false); single_domain_box = is_single_box_domain ? cdomain : Box(); sigba_cp = std::make_unique(cba, cdm, grid_cba_reduced, cgeom->CellSize(), @@ -1045,96 +1051,32 @@ PML::ComputePMLFactors (amrex::Real dt) } } -std::array -PML::GetE_fp () -{ - return {pml_E_fp[0].get(), pml_E_fp[1].get(), pml_E_fp[2].get()}; -} - -std::array -PML::GetB_fp () -{ - return {pml_B_fp[0].get(), pml_B_fp[1].get(), pml_B_fp[2].get()}; -} - -std::array -PML::Getj_fp () -{ - return {pml_j_fp[0].get(), pml_j_fp[1].get(), pml_j_fp[2].get()}; -} - -std::array -PML::GetE_cp () -{ - return {pml_E_cp[0].get(), pml_E_cp[1].get(), pml_E_cp[2].get()}; -} - -std::array -PML::GetB_cp () -{ - return {pml_B_cp[0].get(), pml_B_cp[1].get(), pml_B_cp[2].get()}; -} - -std::array -PML::Getj_cp () -{ - return {pml_j_cp[0].get(), pml_j_cp[1].get(), pml_j_cp[2].get()}; -} - -std::array -PML::Get_edge_lengths() -{ - return {pml_edge_lengths[0].get(), pml_edge_lengths[1].get(), pml_edge_lengths[2].get()}; -} - - -MultiFab* -PML::GetF_fp () -{ - return pml_F_fp.get(); -} - -MultiFab* -PML::GetF_cp () -{ - return pml_F_cp.get(); -} - -MultiFab* -PML::GetG_fp () +void +PML::CopyJtoPMLs ( + ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, + int lev +) { - return pml_G_fp.get(); -} + using ablastr::fields::Direction; -MultiFab* -PML::GetG_cp () -{ - return pml_G_cp.get(); -} + bool const has_j_fp = fields.has_vector(FieldType::current_fp, lev); + bool const has_pml_j_fp = fields.has_vector(FieldType::pml_j_fp, lev); + bool const has_j_cp = fields.has_vector(FieldType::current_cp, lev); + bool const has_pml_j_cp = fields.has_vector(FieldType::pml_j_cp, lev); -void PML::Exchange (const std::array& mf_pml, - const std::array& mf, - const PatchType& patch_type, - const int do_pml_in_domain) -{ - const amrex::Geometry& geom = (patch_type == PatchType::fine) ? *m_geom : *m_cgeom; - if (mf_pml[0] && mf[0]) { Exchange(*mf_pml[0], *mf[0], geom, do_pml_in_domain); } - if (mf_pml[1] && mf[1]) { Exchange(*mf_pml[1], *mf[1], geom, do_pml_in_domain); } - if (mf_pml[2] && mf[2]) { Exchange(*mf_pml[2], *mf[2], geom, do_pml_in_domain); } -} - -void -PML::CopyJtoPMLs (PatchType patch_type, - const std::array& jp) -{ - if (patch_type == PatchType::fine && pml_j_fp[0] && jp[0]) + if (patch_type == PatchType::fine && has_pml_j_fp && has_j_fp) { + ablastr::fields::VectorField pml_j_fp = fields.get_alldirs(FieldType::pml_j_fp, lev); + ablastr::fields::VectorField jp = fields.get_alldirs(FieldType::current_fp, lev); CopyToPML(*pml_j_fp[0], *jp[0], *m_geom); CopyToPML(*pml_j_fp[1], *jp[1], *m_geom); CopyToPML(*pml_j_fp[2], *jp[2], *m_geom); } - else if (patch_type == PatchType::coarse && pml_j_cp[0] && jp[0]) + else if (patch_type == PatchType::coarse && has_j_cp && has_pml_j_cp) { + ablastr::fields::VectorField pml_j_cp = fields.get_alldirs(FieldType::pml_j_cp, lev); + ablastr::fields::VectorField jp = fields.get_alldirs(FieldType::current_cp, lev); CopyToPML(*pml_j_cp[0], *jp[0], *m_cgeom); CopyToPML(*pml_j_cp[1], *jp[1], *m_cgeom); CopyToPML(*pml_j_cp[2], *jp[2], *m_cgeom); @@ -1142,46 +1084,33 @@ PML::CopyJtoPMLs (PatchType patch_type, } void -PML::CopyJtoPMLs (const std::array& j_fp, - const std::array& j_cp) +PML::CopyJtoPMLs ( + ablastr::fields::MultiFabRegister& fields, + int lev +) { - CopyJtoPMLs(PatchType::fine, j_fp); - CopyJtoPMLs(PatchType::coarse, j_cp); + CopyJtoPMLs(fields, PatchType::fine, lev); + CopyJtoPMLs(fields, PatchType::coarse, lev); } -void -PML::ExchangeF (amrex::MultiFab* F_fp, amrex::MultiFab* F_cp, int do_pml_in_domain) -{ - ExchangeF(PatchType::fine, F_fp, do_pml_in_domain); - ExchangeF(PatchType::coarse, F_cp, do_pml_in_domain); -} - -void -PML::ExchangeF (PatchType patch_type, amrex::MultiFab* Fp, int do_pml_in_domain) -{ - if (patch_type == PatchType::fine && pml_F_fp && Fp) { - Exchange(*pml_F_fp, *Fp, *m_geom, do_pml_in_domain); - } else if (patch_type == PatchType::coarse && pml_F_cp && Fp) { - Exchange(*pml_F_cp, *Fp, *m_cgeom, do_pml_in_domain); - } -} - -void PML::ExchangeG (amrex::MultiFab* G_fp, amrex::MultiFab* G_cp, int do_pml_in_domain) +void PML::Exchange (ablastr::fields::VectorField mf_pml, + ablastr::fields::VectorField mf, + const PatchType& patch_type, + const int do_pml_in_domain) { - ExchangeG(PatchType::fine, G_fp, do_pml_in_domain); - ExchangeG(PatchType::coarse, G_cp, do_pml_in_domain); + const amrex::Geometry& geom = (patch_type == PatchType::fine) ? *m_geom : *m_cgeom; + if (mf_pml[0] && mf[0]) { Exchange(*mf_pml[0], *mf[0], geom, do_pml_in_domain); } + if (mf_pml[1] && mf[1]) { Exchange(*mf_pml[1], *mf[1], geom, do_pml_in_domain); } + if (mf_pml[2] && mf[2]) { Exchange(*mf_pml[2], *mf[2], geom, do_pml_in_domain); } } -void PML::ExchangeG (PatchType patch_type, amrex::MultiFab* Gp, int do_pml_in_domain) +void PML::Exchange (amrex::MultiFab* mf_pml, + amrex::MultiFab* mf, + const PatchType& patch_type, + const int do_pml_in_domain) { - if (patch_type == PatchType::fine && pml_G_fp && Gp) - { - Exchange(*pml_G_fp, *Gp, *m_geom, do_pml_in_domain); - } - else if (patch_type == PatchType::coarse && pml_G_cp && Gp) - { - Exchange(*pml_G_cp, *Gp, *m_cgeom, do_pml_in_domain); - } + const amrex::Geometry& geom = (patch_type == PatchType::fine) ? *m_geom : *m_cgeom; + if (mf_pml && mf) { Exchange(*mf_pml, *mf, geom, do_pml_in_domain); } } void @@ -1275,74 +1204,40 @@ PML::CopyToPML (MultiFab& pml, MultiFab& reg, const Geometry& geom) } void -PML::FillBoundaryE (PatchType patch_type, std::optional nodal_sync) +PML::FillBoundary (ablastr::fields::VectorField mf_pml, PatchType patch_type, std::optional nodal_sync) { - if (patch_type == PatchType::fine && pml_E_fp[0] && pml_E_fp[0]->nGrowVect().max() > 0) - { - const auto& period = m_geom->periodicity(); - const Vector mf{pml_E_fp[0].get(),pml_E_fp[1].get(),pml_E_fp[2].get()}; - ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); - } - else if (patch_type == PatchType::coarse && pml_E_cp[0] && pml_E_cp[0]->nGrowVect().max() > 0) - { - const auto& period = m_cgeom->periodicity(); - const Vector mf{pml_E_cp[0].get(),pml_E_cp[1].get(),pml_E_cp[2].get()}; - ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); - } -} + const auto& period = + (patch_type == PatchType::fine) ? + m_geom->periodicity() : + m_cgeom->periodicity(); -void -PML::FillBoundaryB (PatchType patch_type, std::optional nodal_sync) -{ - if (patch_type == PatchType::fine && pml_B_fp[0]) - { - const auto& period = m_geom->periodicity(); - const Vector mf{pml_B_fp[0].get(),pml_B_fp[1].get(),pml_B_fp[2].get()}; - ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); - } - else if (patch_type == PatchType::coarse && pml_B_cp[0]) - { - const auto& period = m_cgeom->periodicity(); - const Vector mf{pml_B_cp[0].get(),pml_B_cp[1].get(),pml_B_cp[2].get()}; - ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); - } + const Vector mf{mf_pml[0], mf_pml[1], mf_pml[2]}; + ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); } void -PML::FillBoundaryF (PatchType patch_type, std::optional nodal_sync) +PML::FillBoundary (amrex::MultiFab & mf_pml, PatchType patch_type, std::optional nodal_sync) { - if (patch_type == PatchType::fine && pml_F_fp && pml_F_fp->nGrowVect().max() > 0) - { - const auto& period = m_geom->periodicity(); - ablastr::utils::communication::FillBoundary(*pml_F_fp, WarpX::do_single_precision_comms, period, nodal_sync); - } - else if (patch_type == PatchType::coarse && pml_F_cp && pml_F_cp->nGrowVect().max() > 0) - { - const auto& period = m_cgeom->periodicity(); - ablastr::utils::communication::FillBoundary(*pml_F_cp, WarpX::do_single_precision_comms, period, nodal_sync); - } -} + const auto& period = + (patch_type == PatchType::fine) ? + m_geom->periodicity() : + m_cgeom->periodicity(); -void -PML::FillBoundaryG (PatchType patch_type, std::optional nodal_sync) -{ - if (patch_type == PatchType::fine && pml_G_fp && pml_G_fp->nGrowVect().max() > 0) - { - const auto& period = m_geom->periodicity(); - ablastr::utils::communication::FillBoundary(*pml_G_fp, WarpX::do_single_precision_comms, period, nodal_sync); - } - else if (patch_type == PatchType::coarse && pml_G_cp && pml_G_cp->nGrowVect().max() > 0) - { - const auto& period = m_cgeom->periodicity(); - ablastr::utils::communication::FillBoundary(*pml_G_cp, WarpX::do_single_precision_comms, period, nodal_sync); - } + ablastr::utils::communication::FillBoundary(mf_pml, WarpX::do_single_precision_comms, period, nodal_sync); } void -PML::CheckPoint (const std::string& dir) const +PML::CheckPoint ( + ablastr::fields::MultiFabRegister& fields, + const std::string& dir +) const { - if (pml_E_fp[0]) + using ablastr::fields::Direction; + + if (fields.has_vector(FieldType::pml_E_fp, 0)) { + ablastr::fields::VectorField pml_E_fp = fields.get_alldirs(FieldType::pml_E_fp, 0); + ablastr::fields::VectorField pml_B_fp = fields.get_alldirs(FieldType::pml_B_fp, 0); VisMF::AsyncWrite(*pml_E_fp[0], dir+"_Ex_fp"); VisMF::AsyncWrite(*pml_E_fp[1], dir+"_Ey_fp"); VisMF::AsyncWrite(*pml_E_fp[2], dir+"_Ez_fp"); @@ -1351,8 +1246,10 @@ PML::CheckPoint (const std::string& dir) const VisMF::AsyncWrite(*pml_B_fp[2], dir+"_Bz_fp"); } - if (pml_E_cp[0]) + if (fields.has_vector(FieldType::pml_E_cp, 0)) { + ablastr::fields::VectorField pml_E_cp = fields.get_alldirs(FieldType::pml_E_cp, 0); + ablastr::fields::VectorField pml_B_cp = fields.get_alldirs(FieldType::pml_B_cp, 0); VisMF::AsyncWrite(*pml_E_cp[0], dir+"_Ex_cp"); VisMF::AsyncWrite(*pml_E_cp[1], dir+"_Ey_cp"); VisMF::AsyncWrite(*pml_E_cp[2], dir+"_Ez_cp"); @@ -1363,10 +1260,17 @@ PML::CheckPoint (const std::string& dir) const } void -PML::Restart (const std::string& dir) +PML::Restart ( + ablastr::fields::MultiFabRegister& fields, + const std::string& dir +) { - if (pml_E_fp[0]) + using ablastr::fields::Direction; + + if (fields.has_vector(FieldType::pml_E_fp, 0)) { + ablastr::fields::VectorField pml_E_fp = fields.get_alldirs(FieldType::pml_E_fp, 0); + ablastr::fields::VectorField pml_B_fp = fields.get_alldirs(FieldType::pml_B_fp, 0); VisMF::Read(*pml_E_fp[0], dir+"_Ex_fp"); VisMF::Read(*pml_E_fp[1], dir+"_Ey_fp"); VisMF::Read(*pml_E_fp[2], dir+"_Ez_fp"); @@ -1375,8 +1279,10 @@ PML::Restart (const std::string& dir) VisMF::Read(*pml_B_fp[2], dir+"_Bz_fp"); } - if (pml_E_cp[0]) + if (fields.has_vector(FieldType::pml_E_cp, 0)) { + ablastr::fields::VectorField pml_E_cp = fields.get_alldirs(FieldType::pml_E_cp, 0); + ablastr::fields::VectorField pml_B_cp = fields.get_alldirs(FieldType::pml_B_cp, 0); VisMF::Read(*pml_E_cp[0], dir+"_Ex_cp"); VisMF::Read(*pml_E_cp[1], dir+"_Ey_cp"); VisMF::Read(*pml_E_cp[2], dir+"_Ez_cp"); @@ -1388,11 +1294,20 @@ PML::Restart (const std::string& dir) #ifdef WARPX_USE_FFT void -PML::PushPSATD (const int lev) { +PML::PushPSATD (ablastr::fields::MultiFabRegister& fields, const int lev) +{ + ablastr::fields::VectorField pml_E_fp = fields.get_alldirs(FieldType::pml_E_fp, lev); + ablastr::fields::VectorField pml_B_fp = fields.get_alldirs(FieldType::pml_B_fp, lev); + ablastr::fields::ScalarField pml_F_fp = fields.get(FieldType::pml_F_fp, lev); + ablastr::fields::ScalarField pml_G_fp = fields.get(FieldType::pml_G_fp, lev); // Update the fields on the fine and coarse patch PushPMLPSATDSinglePatch(lev, *spectral_solver_fp, pml_E_fp, pml_B_fp, pml_F_fp, pml_G_fp, m_fill_guards_fields); if (spectral_solver_cp) { + ablastr::fields::VectorField pml_E_cp = fields.get_alldirs(FieldType::pml_E_cp, lev); + ablastr::fields::VectorField pml_B_cp = fields.get_alldirs(FieldType::pml_B_cp, lev); + ablastr::fields::ScalarField pml_F_cp = fields.get(FieldType::pml_F_cp, lev); + ablastr::fields::ScalarField pml_G_cp = fields.get(FieldType::pml_G_cp, lev); PushPMLPSATDSinglePatch(lev, *spectral_solver_cp, pml_E_cp, pml_B_cp, pml_F_cp, pml_G_cp, m_fill_guards_fields); } } @@ -1401,10 +1316,10 @@ void PushPMLPSATDSinglePatch ( const int lev, SpectralSolver& solver, - std::array,3>& pml_E, - std::array,3>& pml_B, - std::unique_ptr& pml_F, - std::unique_ptr& pml_G, + ablastr::fields::VectorField& pml_E, + ablastr::fields::VectorField& pml_B, + ablastr::fields::ScalarField pml_F, + ablastr::fields::ScalarField pml_G, const amrex::IntVect& fill_guards) { const SpectralFieldIndex& Idx = solver.m_spectral_index; diff --git a/Source/BoundaryConditions/PML_RZ.H b/Source/BoundaryConditions/PML_RZ.H index c908681d8e5..20c7d360fc7 100644 --- a/Source/BoundaryConditions/PML_RZ.H +++ b/Source/BoundaryConditions/PML_RZ.H @@ -16,6 +16,8 @@ # include "FieldSolver/SpectralSolver/SpectralSolverRZ.H" #endif +#include + #include #include #include @@ -30,27 +32,24 @@ class PML_RZ { public: - PML_RZ (int lev, const amrex::BoxArray& grid_ba, const amrex::DistributionMapping& grid_dm, - const amrex::Geometry* geom, int ncell, int do_pml_in_domain); + PML_RZ (int lev, amrex::BoxArray const& grid_ba, amrex::DistributionMapping const& grid_dm, + amrex::Geometry const* geom, int ncell, int do_pml_in_domain); void ApplyDamping(amrex::MultiFab* Et_fp, amrex::MultiFab* Ez_fp, amrex::MultiFab* Bt_fp, amrex::MultiFab* Bz_fp, - amrex::Real dt); - - std::array GetE_fp (); - std::array GetB_fp (); + amrex::Real dt, ablastr::fields::MultiFabRegister& fields); #ifdef WARPX_USE_FFT void PushPSATD (int lev); #endif - void FillBoundaryE (); - void FillBoundaryB (); - void FillBoundaryE (PatchType patch_type, std::optional nodal_sync=std::nullopt); - void FillBoundaryB (PatchType patch_type, std::optional nodal_sync=std::nullopt); + void FillBoundaryE (ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, std::optional nodal_sync=std::nullopt); + void FillBoundaryB (ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, std::optional nodal_sync=std::nullopt); - void CheckPoint (const std::string& dir) const; - void Restart (const std::string& dir); + void CheckPoint (ablastr::fields::MultiFabRegister& fields, std::string const& dir) const; + void Restart (ablastr::fields::MultiFabRegister& fields, std::string const& dir); private: @@ -58,15 +57,13 @@ private: const int m_do_pml_in_domain; const amrex::Geometry* m_geom; - // Only contains Er and Et, and Br and Bt - std::array,2> pml_E_fp; - std::array,2> pml_B_fp; + // The MultiFabs pml_E_fp and pml_B_fp are setup using the registry. + // They hold Er, Et, and Br, Bt. #ifdef WARPX_USE_FFT - void PushPMLPSATDSinglePatchRZ ( int lev, + void PushPMLPSATDSinglePatchRZ (int lev, SpectralSolverRZ& solver, - std::array,2>& pml_E, - std::array,2>& pml_B); + ablastr::fields::MultiFabRegister& fields); #endif }; diff --git a/Source/BoundaryConditions/PML_RZ.cpp b/Source/BoundaryConditions/PML_RZ.cpp index 78f3cf24987..8fd6a1869ae 100644 --- a/Source/BoundaryConditions/PML_RZ.cpp +++ b/Source/BoundaryConditions/PML_RZ.cpp @@ -8,7 +8,7 @@ #include "PML_RZ.H" #include "BoundaryConditions/PML_RZ.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #ifdef WARPX_USE_FFT # include "FieldSolver/SpectralSolver/SpectralFieldDataRZ.H" #endif @@ -33,43 +33,55 @@ #include #include -using namespace amrex; -using namespace warpx::fields; +using namespace amrex::literals; +using warpx::fields::FieldType; +using ablastr::fields::Direction; -PML_RZ::PML_RZ (const int lev, const amrex::BoxArray& grid_ba, const amrex::DistributionMapping& grid_dm, - const amrex::Geometry* geom, const int ncell, const int do_pml_in_domain) +PML_RZ::PML_RZ (int lev, amrex::BoxArray const& grid_ba, amrex::DistributionMapping const& grid_dm, + amrex::Geometry const* geom, int ncell, int do_pml_in_domain) : m_ncell(ncell), m_do_pml_in_domain(do_pml_in_domain), m_geom(geom) { - - const amrex::MultiFab & Er_fp = WarpX::GetInstance().getField(FieldType::Efield_fp, lev,0); - const amrex::MultiFab & Et_fp = WarpX::GetInstance().getField(FieldType::Efield_fp, lev,1); - const amrex::BoxArray ba_Er = amrex::convert(grid_ba, Er_fp.ixType().toIntVect()); - const amrex::BoxArray ba_Et = amrex::convert(grid_ba, Et_fp.ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_E_fp[0], ba_Er, grid_dm, Er_fp.nComp(), Er_fp.nGrowVect(), lev, "pml_E_fp[0]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_E_fp[1], ba_Et, grid_dm, Et_fp.nComp(), Et_fp.nGrowVect(), lev, "pml_E_fp[1]", 0.0_rt); - - const amrex::MultiFab & Br_fp = WarpX::GetInstance().getField(FieldType::Bfield_fp, lev,0); - const amrex::MultiFab & Bt_fp = WarpX::GetInstance().getField(FieldType::Bfield_fp, lev,1); - const amrex::BoxArray ba_Br = amrex::convert(grid_ba, Br_fp.ixType().toIntVect()); - const amrex::BoxArray ba_Bt = amrex::convert(grid_ba, Bt_fp.ixType().toIntVect()); - WarpX::AllocInitMultiFab(pml_B_fp[0], ba_Br, grid_dm, Br_fp.nComp(), Br_fp.nGrowVect(), lev, "pml_B_fp[0]", 0.0_rt); - WarpX::AllocInitMultiFab(pml_B_fp[1], ba_Bt, grid_dm, Bt_fp.nComp(), Bt_fp.nGrowVect(), lev, "pml_B_fp[1]", 0.0_rt); + auto & warpx = WarpX::GetInstance(); + + bool const remake = false; + bool const redistribute_on_remake = false; + + amrex::MultiFab const& Er_fp = *warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev); + amrex::MultiFab const& Et_fp = *warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev); + amrex::BoxArray const ba_Er = amrex::convert(grid_ba, Er_fp.ixType().toIntVect()); + amrex::BoxArray const ba_Et = amrex::convert(grid_ba, Et_fp.ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_E_fp, Direction{0}, lev, ba_Er, grid_dm, Er_fp.nComp(), Er_fp.nGrowVect(), 0.0_rt, + remake, redistribute_on_remake); + warpx.m_fields.alloc_init(FieldType::pml_E_fp, Direction{1}, lev, ba_Et, grid_dm, Et_fp.nComp(), Et_fp.nGrowVect(), 0.0_rt, + remake, redistribute_on_remake); + + amrex::MultiFab const& Br_fp = *warpx.m_fields.get(FieldType::Bfield_fp,Direction{0},lev); + amrex::MultiFab const& Bt_fp = *warpx.m_fields.get(FieldType::Bfield_fp,Direction{1},lev); + amrex::BoxArray const ba_Br = amrex::convert(grid_ba, Br_fp.ixType().toIntVect()); + amrex::BoxArray const ba_Bt = amrex::convert(grid_ba, Bt_fp.ixType().toIntVect()); + warpx.m_fields.alloc_init(FieldType::pml_B_fp, Direction{0}, lev, ba_Br, grid_dm, Br_fp.nComp(), Br_fp.nGrowVect(), 0.0_rt, + remake, redistribute_on_remake); + warpx.m_fields.alloc_init(FieldType::pml_B_fp, Direction{1}, lev, ba_Bt, grid_dm, Bt_fp.nComp(), Bt_fp.nGrowVect(), 0.0_rt, + remake, redistribute_on_remake); } void PML_RZ::ApplyDamping (amrex::MultiFab* Et_fp, amrex::MultiFab* Ez_fp, amrex::MultiFab* Bt_fp, amrex::MultiFab* Bz_fp, - amrex::Real dt) + amrex::Real dt, ablastr::fields::MultiFabRegister& fields) { - const amrex::Real dr = m_geom->CellSize(0); - const amrex::Real cdt_over_dr = PhysConst::c*dt/dr; + amrex::Real const dr = m_geom->CellSize(0); + amrex::Real const cdt_over_dr = PhysConst::c*dt/dr; + + amrex::MultiFab* pml_Et = fields.get(FieldType::pml_E_fp, Direction{1}, 0); + amrex::MultiFab* pml_Bt = fields.get(FieldType::pml_B_fp, Direction{1}, 0); #ifdef AMREX_USE_OMP -#pragma omp parallel if (Gpu::notInLaunchRegion()) +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif for ( amrex::MFIter mfi(*Et_fp, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi ) { @@ -78,8 +90,8 @@ PML_RZ::ApplyDamping (amrex::MultiFab* Et_fp, amrex::MultiFab* Ez_fp, amrex::Array4 const& Bt_arr = Bt_fp->array(mfi); amrex::Array4 const& Bz_arr = Bz_fp->array(mfi); - amrex::Array4 const& pml_Et_arr = pml_E_fp[1]->array(mfi); - amrex::Array4 const& pml_Bt_arr = pml_B_fp[1]->array(mfi); + amrex::Array4 const& pml_Et_arr = pml_Et->array(mfi); + amrex::Array4 const& pml_Bt_arr = pml_Bt->array(mfi); // Get the tileboxes from Efield and Bfield so that they include the guard cells // They are all the same, cell centered @@ -87,19 +99,19 @@ PML_RZ::ApplyDamping (amrex::MultiFab* Et_fp, amrex::MultiFab* Ez_fp, // Box for the whole simulation domain amrex::Box const& domain = m_geom->Domain(); - const int nr_domain = domain.bigEnd(0); + int const nr_domain = domain.bigEnd(0); // Set tilebox to only include the upper radial cells - const int nr_damp = m_ncell; - const int nr_damp_min = (m_do_pml_in_domain)?(nr_domain - nr_damp):(nr_domain); + int const nr_damp = m_ncell; + int const nr_damp_min = (m_do_pml_in_domain)?(nr_domain - nr_damp):(nr_domain); tilebox.setSmall(0, nr_damp_min + 1); amrex::ParallelFor( tilebox, Et_fp->nComp(), [=] AMREX_GPU_DEVICE (int i, int j, int k, int icomp) { - const auto rr = static_cast(i - nr_damp_min); - const amrex::Real wr = rr/nr_damp; - const amrex::Real damp_factor = std::exp( -4._rt * cdt_over_dr * wr*wr ); + auto const rr = static_cast(i - nr_damp_min); + amrex::Real const wr = rr/nr_damp; + amrex::Real const damp_factor = std::exp( -4._rt * cdt_over_dr * wr*wr ); // Substract the theta PML fields from the regular theta fields Et_arr(i,j,k,icomp) -= pml_Et_arr(i,j,k,icomp); @@ -117,105 +129,88 @@ PML_RZ::ApplyDamping (amrex::MultiFab* Et_fp, amrex::MultiFab* Ez_fp, } } -std::array -PML_RZ::GetE_fp () -{ - return {pml_E_fp[0].get(), pml_E_fp[1].get()}; -} - -std::array -PML_RZ::GetB_fp () -{ - return {pml_B_fp[0].get(), pml_B_fp[1].get()}; -} - void -PML_RZ::FillBoundaryE () +PML_RZ::FillBoundaryE (ablastr::fields::MultiFabRegister& fields, PatchType patch_type, std::optional nodal_sync) { - FillBoundaryE(PatchType::fine); -} + amrex::MultiFab * pml_Er = fields.get(FieldType::pml_E_fp, Direction{0}, 0); + amrex::MultiFab * pml_Et = fields.get(FieldType::pml_E_fp, Direction{1}, 0); -void -PML_RZ::FillBoundaryE (PatchType patch_type, std::optional nodal_sync) -{ - if (patch_type == PatchType::fine && pml_E_fp[0] && pml_E_fp[0]->nGrowVect().max() > 0) + if (patch_type == PatchType::fine && pml_Er->nGrowVect().max() > 0) { - const amrex::Periodicity& period = m_geom->periodicity(); - const Vector mf{pml_E_fp[0].get(),pml_E_fp[1].get()}; + amrex::Periodicity const& period = m_geom->periodicity(); + const amrex::Vector mf = {pml_Er, pml_Et}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); } } void -PML_RZ::FillBoundaryB () +PML_RZ::FillBoundaryB (ablastr::fields::MultiFabRegister& fields, PatchType patch_type, std::optional nodal_sync) { - FillBoundaryB(PatchType::fine); -} - -void -PML_RZ::FillBoundaryB (PatchType patch_type, std::optional nodal_sync) -{ - if (patch_type == PatchType::fine && pml_B_fp[0]) + if (patch_type == PatchType::fine) { - const amrex::Periodicity& period = m_geom->periodicity(); - const Vector mf{pml_B_fp[0].get(),pml_B_fp[1].get()}; + amrex::MultiFab * pml_Br = fields.get(FieldType::pml_B_fp, Direction{0}, 0); + amrex::MultiFab * pml_Bt = fields.get(FieldType::pml_B_fp, Direction{1}, 0); + + amrex::Periodicity const& period = m_geom->periodicity(); + const amrex::Vector mf = {pml_Br, pml_Bt}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period, nodal_sync); } } void -PML_RZ::CheckPoint (const std::string& dir) const +PML_RZ::CheckPoint (ablastr::fields::MultiFabRegister& fields, std::string const& dir) const { - if (pml_E_fp[0]) - { - VisMF::AsyncWrite(*pml_E_fp[0], dir+"_Er_fp"); - VisMF::AsyncWrite(*pml_E_fp[1], dir+"_Et_fp"); - VisMF::AsyncWrite(*pml_B_fp[0], dir+"_Br_fp"); - VisMF::AsyncWrite(*pml_B_fp[1], dir+"_Bt_fp"); + if (fields.has(FieldType::pml_E_fp, Direction{0}, 0)) { + amrex::VisMF::AsyncWrite(*fields.get(FieldType::pml_E_fp, Direction{0}, 0), dir+"_Er_fp"); + amrex::VisMF::AsyncWrite(*fields.get(FieldType::pml_E_fp, Direction{1}, 0), dir+"_Et_fp"); + amrex::VisMF::AsyncWrite(*fields.get(FieldType::pml_B_fp, Direction{0}, 0), dir+"_Br_fp"); + amrex::VisMF::AsyncWrite(*fields.get(FieldType::pml_B_fp, Direction{1}, 0), dir+"_Bt_fp"); } } void -PML_RZ::Restart (const std::string& dir) +PML_RZ::Restart (ablastr::fields::MultiFabRegister& fields, std::string const& dir) { - if (pml_E_fp[0]) - { - VisMF::Read(*pml_E_fp[0], dir+"_Er_fp"); - VisMF::Read(*pml_E_fp[1], dir+"_Et_fp"); - VisMF::Read(*pml_B_fp[0], dir+"_Br_fp"); - VisMF::Read(*pml_B_fp[1], dir+"_Bt_fp"); + if (fields.has(FieldType::pml_E_fp, Direction{0}, 0)) { + amrex::VisMF::Read(*fields.get(FieldType::pml_E_fp, Direction{0}, 0), dir+"_Er_fp"); + amrex::VisMF::Read(*fields.get(FieldType::pml_E_fp, Direction{1}, 0), dir+"_Et_fp"); + amrex::VisMF::Read(*fields.get(FieldType::pml_B_fp, Direction{0}, 0), dir+"_Br_fp"); + amrex::VisMF::Read(*fields.get(FieldType::pml_B_fp, Direction{1}, 0), dir+"_Bt_fp"); } } #ifdef WARPX_USE_FFT void -PML_RZ::PushPSATD (const int lev) +PML_RZ::PushPSATD (int lev) { // Update the fields on the fine and coarse patch WarpX& warpx = WarpX::GetInstance(); SpectralSolverRZ& solver = warpx.get_spectral_solver_fp(lev); - PushPMLPSATDSinglePatchRZ(lev, solver, pml_E_fp, pml_B_fp); + PushPMLPSATDSinglePatchRZ(lev, solver, warpx.m_fields); } void PML_RZ::PushPMLPSATDSinglePatchRZ ( - const int lev, + int lev, SpectralSolverRZ& solver, - std::array,2>& pml_E, - std::array,2>& pml_B) + ablastr::fields::MultiFabRegister& fields) { - const SpectralFieldIndex& Idx = solver.m_spectral_index; + SpectralFieldIndex const& Idx = solver.m_spectral_index; + amrex::MultiFab * pml_Er = fields.get(FieldType::pml_E_fp, Direction{0}, 0); + amrex::MultiFab * pml_Et = fields.get(FieldType::pml_E_fp, Direction{1}, 0); + amrex::MultiFab * pml_Br = fields.get(FieldType::pml_B_fp, Direction{0}, 0); + amrex::MultiFab * pml_Bt = fields.get(FieldType::pml_B_fp, Direction{1}, 0); // Perform forward Fourier transforms - solver.ForwardTransform(lev, *pml_E[0], Idx.Er_pml, *pml_E[1], Idx.Et_pml); - solver.ForwardTransform(lev, *pml_B[0], Idx.Br_pml, *pml_B[1], Idx.Bt_pml); + solver.ForwardTransform(lev, *pml_Er, Idx.Er_pml, *pml_Et, Idx.Et_pml); + solver.ForwardTransform(lev, *pml_Br, Idx.Br_pml, *pml_Bt, Idx.Bt_pml); // Advance fields in spectral space - const bool doing_pml = true; + bool const doing_pml = true; solver.pushSpectralFields(doing_pml); // Perform backward Fourier transforms - solver.BackwardTransform(lev, *pml_E[0], Idx.Er_pml, *pml_E[1], Idx.Et_pml); - solver.BackwardTransform(lev, *pml_B[0], Idx.Br_pml, *pml_B[1], Idx.Bt_pml); + solver.BackwardTransform(lev, *pml_Er, Idx.Er_pml, *pml_Et, Idx.Et_pml); + solver.BackwardTransform(lev, *pml_Br, Idx.Br_pml, *pml_Bt, Idx.Bt_pml); } #endif diff --git a/Source/BoundaryConditions/WarpXEvolvePML.cpp b/Source/BoundaryConditions/WarpXEvolvePML.cpp index c6a89d80c07..cfde83dcf5b 100644 --- a/Source/BoundaryConditions/WarpXEvolvePML.cpp +++ b/Source/BoundaryConditions/WarpXEvolvePML.cpp @@ -12,10 +12,13 @@ # include "BoundaryConditions/PML_RZ.H" #endif #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" #include "PML_current.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX_PML_kernels.H" +#include + #ifdef AMREX_USE_SENSEI_INSITU # include #endif @@ -63,9 +66,13 @@ WarpX::DampPML (const int lev, PatchType patch_type) WARPX_PROFILE("WarpX::DampPML()"); #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) if (pml_rz[lev]) { - pml_rz[lev]->ApplyDamping(Efield_fp[lev][1].get(), Efield_fp[lev][2].get(), - Bfield_fp[lev][1].get(), Bfield_fp[lev][2].get(), - dt[lev]); + using ablastr::fields::Direction; + using warpx::fields::FieldType; + pml_rz[lev]->ApplyDamping( m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + m_fields.get(FieldType::Efield_fp, Direction{2}, lev), + m_fields.get(FieldType::Bfield_fp, Direction{1}, lev), + m_fields.get(FieldType::Bfield_fp, Direction{2}, lev), + dt[lev], m_fields); } #endif if (pml[lev]) { @@ -81,12 +88,11 @@ WarpX::DampPML_Cartesian (const int lev, PatchType patch_type) if (pml[lev]->ok()) { - const auto& pml_E = (patch_type == PatchType::fine) ? pml[lev]->GetE_fp() : pml[lev]->GetE_cp(); - const auto& pml_B = (patch_type == PatchType::fine) ? pml[lev]->GetB_fp() : pml[lev]->GetB_cp(); - const auto& pml_F = (patch_type == PatchType::fine) ? pml[lev]->GetF_fp() : pml[lev]->GetF_cp(); - const auto& pml_G = (patch_type == PatchType::fine) ? pml[lev]->GetG_fp() : pml[lev]->GetG_cp(); - const auto& sigba = (patch_type == PatchType::fine) ? pml[lev]->GetMultiSigmaBox_fp() - : pml[lev]->GetMultiSigmaBox_cp(); + using warpx::fields::FieldType; + + const auto& pml_E = (patch_type == PatchType::fine) ? m_fields.get_alldirs(FieldType::pml_E_fp, lev) : m_fields.get_alldirs(FieldType::pml_E_cp, lev); + const auto& pml_B = (patch_type == PatchType::fine) ? m_fields.get_alldirs(FieldType::pml_B_fp, lev) : m_fields.get_alldirs(FieldType::pml_B_cp, lev); + const auto& sigba = (patch_type == PatchType::fine) ? pml[lev]->GetMultiSigmaBox_fp() : pml[lev]->GetMultiSigmaBox_cp(); const amrex::IntVect Ex_stag = pml_E[0]->ixType().toIntVect(); const amrex::IntVect Ey_stag = pml_E[1]->ixType().toIntVect(); @@ -97,12 +103,16 @@ WarpX::DampPML_Cartesian (const int lev, PatchType patch_type) const amrex::IntVect Bz_stag = pml_B[2]->ixType().toIntVect(); amrex::IntVect F_stag; - if (pml_F) { + if (m_fields.has(FieldType::pml_F_fp, lev)) { + amrex::MultiFab* pml_F = (patch_type == PatchType::fine) ? + m_fields.get(FieldType::pml_F_fp, lev) : m_fields.get(FieldType::pml_F_cp, lev); F_stag = pml_F->ixType().toIntVect(); } amrex::IntVect G_stag; - if (pml_G) { + if (m_fields.has(FieldType::pml_G_fp, lev)) { + amrex::MultiFab* pml_G = (patch_type == PatchType::fine) ? + m_fields.get(FieldType::pml_G_fp, lev) : m_fields.get(FieldType::pml_G_cp, lev); G_stag = pml_G->ixType().toIntVect(); } @@ -193,7 +203,9 @@ WarpX::DampPML_Cartesian (const int lev, PatchType patch_type) // For warpx_damp_pml_F(), mfi.nodaltilebox is used in the ParallelFor loop and here we // use mfi.tilebox. However, it does not matter because in damp_pml, where nodaltilebox // is used, only a simple multiplication is performed. - if (pml_F) { + if (m_fields.has(FieldType::pml_F_fp, lev)) { + amrex::MultiFab* pml_F = (patch_type == PatchType::fine) ? + m_fields.get(FieldType::pml_F_fp, lev) : m_fields.get(FieldType::pml_F_cp, lev); const Box& tnd = mfi.nodaltilebox(); auto const& pml_F_fab = pml_F->array(mfi); amrex::ParallelFor(tnd, [=] AMREX_GPU_DEVICE (int i, int j, int k) @@ -204,7 +216,10 @@ WarpX::DampPML_Cartesian (const int lev, PatchType patch_type) } // Damp G when WarpX::do_divb_cleaning = true - if (pml_G) { + if (m_fields.has(FieldType::pml_G_fp, lev)) { + amrex::MultiFab* pml_G = (patch_type == PatchType::fine) ? + m_fields.get(FieldType::pml_G_fp, lev) : m_fields.get(FieldType::pml_G_cp, lev); + const Box& tb = mfi.tilebox(G_stag); auto const& pml_G_fab = pml_G->array(mfi); amrex::ParallelFor(tb, [=] AMREX_GPU_DEVICE (int i, int j, int k) @@ -243,8 +258,9 @@ WarpX::DampJPML (int lev, PatchType patch_type) if (pml[lev]->ok()) { + using warpx::fields::FieldType; - const auto& pml_j = (patch_type == PatchType::fine) ? pml[lev]->Getj_fp() : pml[lev]->Getj_cp(); + const auto& pml_j = (patch_type == PatchType::fine) ? m_fields.get_alldirs(FieldType::pml_j_fp, lev) : m_fields.get_alldirs(FieldType::pml_j_cp, lev); const auto& sigba = (patch_type == PatchType::fine) ? pml[lev]->GetMultiSigmaBox_fp() : pml[lev]->GetMultiSigmaBox_cp(); @@ -273,7 +289,7 @@ WarpX::DampJPML (int lev, PatchType patch_type) // Skip the field update if this gridpoint is inside the embedded boundary amrex::Array4 eb_lxfab, eb_lyfab, eb_lzfab; if (EB::enabled()) { - const auto &pml_edge_lenghts = pml[lev]->Get_edge_lengths(); + const auto &pml_edge_lenghts = m_fields.get_alldirs(FieldType::pml_edge_lengths, lev); eb_lxfab = pml_edge_lenghts[0]->array(mfi); eb_lyfab = pml_edge_lenghts[1]->array(mfi); @@ -338,15 +354,12 @@ WarpX::DampJPML (int lev, PatchType patch_type) void WarpX::CopyJPML () { + using ablastr::fields::Direction; + for (int lev = 0; lev <= finest_level; ++lev) { if (pml[lev] && pml[lev]->ok()){ - pml[lev]->CopyJtoPMLs({ current_fp[lev][0].get(), - current_fp[lev][1].get(), - current_fp[lev][2].get() }, - { current_cp[lev][0].get(), - current_cp[lev][1].get(), - current_cp[lev][2].get() }); + pml[lev]->CopyJtoPMLs(m_fields, lev); } } } diff --git a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp index 6d2525bc724..dc41e95f40f 100644 --- a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp +++ b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp @@ -18,7 +18,7 @@ using namespace amrex; using namespace amrex::literals; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -50,12 +50,14 @@ namespace void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) { + using ablastr::fields::Direction; + if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { if (patch_type == PatchType::fine) { PEC::ApplyPECtoEfield( - {getFieldPointer(FieldType::Efield_fp, lev, 0), - getFieldPointer(FieldType::Efield_fp, lev, 1), - getFieldPointer(FieldType::Efield_fp, lev, 2)}, + {m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + m_fields.get(FieldType::Efield_fp, Direction{2}, lev)}, field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio); @@ -63,7 +65,7 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) // apply pec on split E-fields in PML region const bool split_pml_field = true; PEC::ApplyPECtoEfield( - pml[lev]->GetE_fp(), + m_fields.get_alldirs(FieldType::pml_E_fp, lev), field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio, @@ -71,9 +73,9 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) } } else { PEC::ApplyPECtoEfield( - {getFieldPointer(FieldType::Efield_cp, lev, 0), - getFieldPointer(FieldType::Efield_cp, lev, 1), - getFieldPointer(FieldType::Efield_cp, lev, 2)}, + {m_fields.get(FieldType::Efield_cp,Direction{0},lev), + m_fields.get(FieldType::Efield_cp,Direction{1},lev), + m_fields.get(FieldType::Efield_cp,Direction{2},lev)}, field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio); @@ -81,7 +83,7 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) // apply pec on split E-fields in PML region const bool split_pml_field = true; PEC::ApplyPECtoEfield( - pml[lev]->GetE_cp(), + m_fields.get_alldirs(FieldType::pml_E_cp, lev), field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio, @@ -92,33 +94,35 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) #ifdef WARPX_DIM_RZ if (patch_type == PatchType::fine) { - ApplyFieldBoundaryOnAxis(getFieldPointer(FieldType::Efield_fp, lev, 0), - getFieldPointer(FieldType::Efield_fp, lev, 1), - getFieldPointer(FieldType::Efield_fp, lev, 2), lev); + ApplyFieldBoundaryOnAxis(m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + m_fields.get(FieldType::Efield_fp, Direction{2}, lev), lev); } else { - ApplyFieldBoundaryOnAxis(getFieldPointer(FieldType::Efield_cp, lev, 0), - getFieldPointer(FieldType::Efield_cp, lev, 1), - getFieldPointer(FieldType::Efield_cp, lev, 2), lev); + ApplyFieldBoundaryOnAxis(m_fields.get(FieldType::Efield_cp, Direction{0}, lev), + m_fields.get(FieldType::Efield_cp, Direction{1}, lev), + m_fields.get(FieldType::Efield_cp, Direction{2}, lev), lev); } #endif } void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_dt_type) { + using ablastr::fields::Direction; + if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { if (patch_type == PatchType::fine) { PEC::ApplyPECtoBfield( { - getFieldPointer(FieldType::Bfield_fp, lev, 0), - getFieldPointer(FieldType::Bfield_fp, lev, 1), - getFieldPointer(FieldType::Bfield_fp, lev, 2) }, + m_fields.get(FieldType::Bfield_fp,Direction{0},lev), + m_fields.get(FieldType::Bfield_fp,Direction{1},lev), + m_fields.get(FieldType::Bfield_fp,Direction{2},lev) }, field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio); } else { PEC::ApplyPECtoBfield( { - getFieldPointer(FieldType::Bfield_cp, lev, 0), - getFieldPointer(FieldType::Bfield_cp, lev, 1), - getFieldPointer(FieldType::Bfield_cp, lev, 2)}, + m_fields.get(FieldType::Bfield_cp,Direction{0},lev), + m_fields.get(FieldType::Bfield_cp,Direction{1},lev), + m_fields.get(FieldType::Bfield_cp,Direction{2},lev) }, field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio); @@ -131,6 +135,8 @@ void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_d if (lev == 0) { if (a_dt_type == DtType::FirstHalf) { if(::isAnyBoundary(field_boundary_lo, field_boundary_hi)){ + auto Efield_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, max_level); + auto Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, max_level); m_fdtd_solver_fp[0]->ApplySilverMuellerBoundary( Efield_fp[lev], Bfield_fp[lev], Geom(lev).Domain(), dt[lev], @@ -141,13 +147,13 @@ void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_d #ifdef WARPX_DIM_RZ if (patch_type == PatchType::fine) { - ApplyFieldBoundaryOnAxis(getFieldPointer(FieldType::Bfield_fp, lev, 0), - getFieldPointer(FieldType::Bfield_fp, lev, 1), - getFieldPointer(FieldType::Bfield_fp, lev, 2), lev); + ApplyFieldBoundaryOnAxis(m_fields.get(FieldType::Bfield_fp,Direction{0},lev), + m_fields.get(FieldType::Bfield_fp,Direction{1},lev), + m_fields.get(FieldType::Bfield_fp,Direction{2},lev), lev); } else { - ApplyFieldBoundaryOnAxis(getFieldPointer(FieldType::Bfield_cp, lev, 0), - getFieldPointer(FieldType::Bfield_cp, lev, 1), - getFieldPointer(FieldType::Bfield_cp, lev, 2), lev); + ApplyFieldBoundaryOnAxis(m_fields.get(FieldType::Bfield_cp,Direction{0},lev), + m_fields.get(FieldType::Bfield_cp,Direction{1},lev), + m_fields.get(FieldType::Bfield_cp,Direction{2},lev), lev); } #endif } @@ -268,8 +274,9 @@ void WarpX::ApplyElectronPressureBoundary (const int lev, PatchType patch_type) { if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { if (patch_type == PatchType::fine) { + ablastr::fields::ScalarField electron_pressure_fp = m_fields.get(FieldType::hybrid_electron_pressure_fp, lev); PEC::ApplyPECtoElectronPressure( - m_hybrid_pic_model->get_pointer_electron_pressure_fp(lev), + electron_pressure_fp, field_boundary_lo, field_boundary_hi, Geom(lev), lev, patch_type, ref_ratio); } else { diff --git a/Source/Diagnostics/BTDiagnostics.H b/Source/Diagnostics/BTDiagnostics.H index d5dd67226b7..d11db98276b 100644 --- a/Source/Diagnostics/BTDiagnostics.H +++ b/Source/Diagnostics/BTDiagnostics.H @@ -241,7 +241,7 @@ private: * will be used by all snapshots to obtain lab-frame data at the respective * z slice location. */ - amrex::Vector > m_cell_centered_data; + std::string const m_cell_centered_data_name; /** Vector of pointers to compute cell-centered data, per level, per component * using the coarsening-ratio provided by the user. */ @@ -346,7 +346,7 @@ private: * \param[in] i_buffer snapshot index */ void SetSnapshotFullStatus (int i_buffer); - /** Vector of field-data stored in the cell-centered multifab, m_cell_centered_data. + /** Vector of field-data stored in the cell-centered MultiFab. * All the fields are stored regardless of the specific fields to plot selected * by the user. */ diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 6fdb605f8dc..631de298861 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -14,13 +14,14 @@ #include "Diagnostics/Diagnostics.H" #include "Diagnostics/FlushFormats/FlushFormat.H" #include "ComputeDiagFunctors/BackTransformParticleFunctor.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Utils/Algorithms/IsIn.H" #include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" #include "WarpX.H" +#include #include #include #include @@ -47,7 +48,7 @@ #include using namespace amrex::literals; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -55,7 +56,8 @@ namespace } BTDiagnostics::BTDiagnostics (int i, const std::string& name) - : Diagnostics{i, name} + : Diagnostics{i, name}, + m_cell_centered_data_name("BTD_cell_centered_data_" + name) { ReadParameters(); } @@ -82,7 +84,6 @@ void BTDiagnostics::DerivedInitData () m_old_z_boost.resize(m_num_buffers); m_buffer_counter.resize(m_num_buffers); m_snapshot_ncells_lab.resize(m_num_buffers); - m_cell_centered_data.resize(nmax_lev); m_cell_center_functors.resize(nmax_lev); m_max_buffer_multifabs.resize(m_num_buffers); m_buffer_flush_counter.resize(m_num_buffers); @@ -518,13 +519,18 @@ BTDiagnostics::DefineCellCenteredMultiFab(int lev) #else const int ncomps = static_cast(m_cellcenter_varnames.size()); #endif - WarpX::AllocInitMultiFab(m_cell_centered_data[lev], ba, dmap, ncomps, amrex::IntVect(ngrow), lev, "cellcentered_BTD", 0._rt); + bool const remake = false; + bool const redistribute_on_remake = false; + warpx.m_fields.alloc_init(m_cell_centered_data_name, lev, ba, dmap, ncomps, amrex::IntVect(ngrow), 0.0_rt, + remake, redistribute_on_remake); } void BTDiagnostics::InitializeFieldFunctors (int lev) { + using ablastr::fields::Direction; + // Initialize fields functors only if do_back_transformed_fields is selected if (!m_do_back_transformed_fields) { return; } @@ -537,12 +543,14 @@ BTDiagnostics::InitializeFieldFunctors (int lev) #else auto & warpx = WarpX::GetInstance(); + auto & fields = warpx.m_fields; + // Clear any pre-existing vector to release stored data // This ensures that when domain is load-balanced, the functors point // to the correct field-data pointers m_all_field_functors[lev].clear(); // For back-transformed data, all the components are cell-centered and stored - // in a single multifab, m_cell_centered_data. + // in a single multifab. // Therefore, size of functors at all levels is 1. const int num_BT_functors = 1; m_all_field_functors[lev].resize(num_BT_functors); @@ -551,11 +559,11 @@ BTDiagnostics::InitializeFieldFunctors (int lev) // Create an object of class BackTransformFunctor for (int i = 0; i < num_BT_functors; ++i) { - // coarsening ratio is not provided since the source MultiFab, m_cell_centered_data + // coarsening ratio is not provided since the source MultiFab // is coarsened based on the user-defined m_crse_ratio const int nvars = static_cast(m_varnames.size()); m_all_field_functors[lev][i] = std::make_unique( - m_cell_centered_data[lev].get(), lev, + fields.get(m_cell_centered_data_name, lev), lev, nvars, m_num_buffers, m_varnames, m_varnames_fields); } @@ -567,23 +575,23 @@ BTDiagnostics::InitializeFieldFunctors (int lev) m_cell_center_functors.at(lev).size()); for (int comp=0; comp(warpx.getFieldPointer(FieldType::Efield_aux, lev, 0), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Ey" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 1), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Ez" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 2), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Bx" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 0), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "By" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 1), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Bz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 2), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "jx" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::current_fp, lev, 0), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp,Direction{0}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "jy" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::current_fp, lev, 1), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp,Direction{1}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "jz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::current_fp, lev, 2), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp,Direction{2}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "rho" ){ m_cell_center_functors[lev][comp] = std::make_unique(lev, m_crse_ratio); } @@ -598,7 +606,9 @@ BTDiagnostics::UpdateVarnamesForRZopenPMD () { #ifdef WARPX_DIM_RZ auto & warpx = WarpX::GetInstance(); - const int ncomp_multimodefab = warpx.getFieldPointer(FieldType::Efield_aux, 0,0)->nComp(); + auto & fields = warpx.m_fields; + using ablastr::fields::Direction; + const int ncomp_multimodefab = fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); const int ncomp = ncomp_multimodefab; @@ -656,22 +666,25 @@ void BTDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) { #ifdef WARPX_DIM_RZ + using ablastr::fields::Direction; + auto & warpx = WarpX::GetInstance(); - const int ncomp_multimodefab = warpx.getFieldPointer(FieldType::Efield_aux, 0,0)->nComp(); + auto & fields = warpx.m_fields; + const int ncomp_multimodefab = fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); const int ncomp = ncomp_multimodefab; // Clear any pre-existing vector to release stored data // This ensures that when domain is load-balanced, the functors point // to the correct field-data pointers m_all_field_functors[lev].clear(); // For back-transformed data, all the components are cell-centered and stored - // in a single multifab, m_cell_centered_data. + // in a single MultiFab. // Therefore, size of functors at all levels is 1 const int num_BT_functors = 1; m_all_field_functors[lev].resize(num_BT_functors); for (int i = 0; i < num_BT_functors; ++i) { const int nvars = static_cast(m_varnames.size()); m_all_field_functors[lev][i] = std::make_unique( - m_cell_centered_data[lev].get(), lev, + fields.get(m_cell_centered_data_name, lev), lev, nvars, m_num_buffers, m_varnames, m_varnames_fields); } @@ -683,23 +696,23 @@ BTDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) const auto m_cell_center_functors_at_lev_size = static_cast(m_cell_center_functors.at(lev).size()); for (int comp=0; comp(warpx.getFieldPointer(FieldType::Efield_aux, lev, 0), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Et" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 1), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Ez" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 2), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Br" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 0), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Bt" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 1), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Bz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 2), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "jr" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::current_fp, lev, 0), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "jt" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::current_fp, lev, 1), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "jz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::current_fp, lev, 2), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "rho" ){ m_cell_center_functors[lev][comp] = std::make_unique(lev, m_crse_ratio, false, -1, false, ncomp); } @@ -789,6 +802,8 @@ BTDiagnostics::PrepareFieldDataForOutput () if (!m_do_back_transformed_fields) { return; } auto & warpx = WarpX::GetInstance(); + auto & fields = warpx.m_fields; + // In this function, we will get cell-centered data for every level, lev, // using the cell-center functors and their respective operators() // Call m_cell_center_functors->operator @@ -798,21 +813,23 @@ BTDiagnostics::PrepareFieldDataForOutput () for (int icomp = 0; icompoperator()(*m_cell_centered_data[lev], icomp_dst); + // stores it in cell-centered MultiFab. + m_cell_center_functors[lev][icomp]->operator()(*fields.get(m_cell_centered_data_name, lev), icomp_dst); icomp_dst += m_cell_center_functors[lev][icomp]->nComp(); } // Check that the proper number of user-requested components are cell-centered AMREX_ALWAYS_ASSERT( icomp_dst == m_cellcenter_varnames.size() ); // fill boundary call is required to average_down (flatten) data to // the coarsest level. - ablastr::utils::communication::FillBoundary(*m_cell_centered_data[lev], WarpX::do_single_precision_comms, + ablastr::utils::communication::FillBoundary(*fields.get(m_cell_centered_data_name, lev), + WarpX::do_single_precision_comms, warpx.Geom(lev).periodicity()); } // Flattening out MF over levels for (int lev = warpx.finestLevel(); lev > 0; --lev) { - ablastr::coarsen::sample::Coarsen(*m_cell_centered_data[lev - 1], *m_cell_centered_data[lev], 0, 0, + ablastr::coarsen::sample::Coarsen(*fields.get(m_cell_centered_data_name, lev - 1), + *fields.get(m_cell_centered_data_name, lev), 0, 0, static_cast(m_cellcenter_varnames.size()), 0, WarpX::RefRatio(lev-1) ); } diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H index 3d04a56742b..1d36b434ae2 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H @@ -3,6 +3,8 @@ #include "ComputeDiagFunctor.H" +#include + #include #include @@ -22,8 +24,13 @@ public: * (summing over modes) * \param[in] ncomp Number of component of mf_src to cell-center in dst multifab. */ - DivBFunctor(std::array arr_mf_src, int lev, amrex::IntVect crse_ratio, - bool convertRZmodes2cartesian=true, int ncomp=1); + DivBFunctor ( + ablastr::fields::VectorField const & arr_mf_src, + int lev, + amrex::IntVect crse_ratio, + bool convertRZmodes2cartesian=true, + int ncomp=1 + ); /** \brief Compute DivB directly into mf_dst. * @@ -34,7 +41,7 @@ public: void operator()(amrex::MultiFab& mf_dst, int dcomp, int /*i_buffer*/) const override; private: /** Vector of pointer to source multifab Bx, By, Bz */ - std::array m_arr_mf_src; + ablastr::fields::VectorField m_arr_mf_src; int const m_lev; /**< level on which mf_src is defined (used in cylindrical) */ /**< (for cylindrical) whether to average all modes into 1 comp */ bool m_convertRZmodes2cartesian; diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp index b5782e76ae6..224b74ba372 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.cpp @@ -7,8 +7,13 @@ #include #include -DivBFunctor::DivBFunctor(const std::array arr_mf_src, const int lev, const amrex::IntVect crse_ratio, - bool convertRZmodes2cartesian, const int ncomp) +DivBFunctor::DivBFunctor ( + ablastr::fields::VectorField const & arr_mf_src, + const int lev, + const amrex::IntVect crse_ratio, + bool convertRZmodes2cartesian, + const int ncomp +) : ComputeDiagFunctor(ncomp, crse_ratio), m_arr_mf_src(arr_mf_src), m_lev(lev), m_convertRZmodes2cartesian(convertRZmodes2cartesian) {} diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.H index 312ccaa5cd6..e7691187f3a 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.H @@ -3,6 +3,8 @@ #include "ComputeDiagFunctor.H" +#include + #include #include @@ -21,8 +23,13 @@ public: * \param[in] convertRZmodes2cartesian if true, all RZ modes are averaged into one component * \param[in] ncomp Number of component of mf_src to cell-center in dst multifab. */ - DivEFunctor(std::array arr_mf_src, int lev, amrex::IntVect crse_ratio, - bool convertRZmodes2cartesian=true, int ncomp=1); + DivEFunctor ( + ablastr::fields::VectorField const & arr_mf_src, + int lev, + amrex::IntVect crse_ratio, + bool convertRZmodes2cartesian=true, + int ncomp=1 + ); /** \brief Compute DivE directly into mf_dst. * @@ -33,7 +40,7 @@ public: void operator()(amrex::MultiFab& mf_dst, int dcomp, int /*i_buffer=0*/) const override; private: /** Vector of pointer to source multifab Bx, By, Bz */ - std::array m_arr_mf_src; + ablastr::fields::VectorField m_arr_mf_src; int const m_lev; /**< level on which mf_src is defined (used in cylindrical) */ /**< (for cylindrical) whether to average all modes into 1 comp */ bool m_convertRZmodes2cartesian; diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp index 62801cd431a..e2c4d98c708 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.cpp @@ -13,9 +13,13 @@ #include #include -DivEFunctor::DivEFunctor(const std::array arr_mf_src, const int lev, - const amrex::IntVect crse_ratio, - bool convertRZmodes2cartesian, const int ncomp) +DivEFunctor::DivEFunctor ( + ablastr::fields::VectorField const & arr_mf_src, + const int lev, + const amrex::IntVect crse_ratio, + bool convertRZmodes2cartesian, + const int ncomp +) : ComputeDiagFunctor(ncomp, crse_ratio), m_arr_mf_src(arr_mf_src), m_lev(lev), m_convertRZmodes2cartesian(convertRZmodes2cartesian) { diff --git a/Source/Diagnostics/ComputeDiagFunctors/JFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/JFunctor.cpp index ebaec47b2f1..df25bf7ff03 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/JFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/JFunctor.cpp @@ -6,16 +6,18 @@ #include "JFunctor.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/MultiParticleContainer.H" #include "WarpX.H" +#include + #include #include #include #include -using namespace warpx::fields; +using warpx::fields::FieldType; JFunctor::JFunctor (const int dir, int lev, amrex::IntVect crse_ratio, @@ -29,30 +31,19 @@ JFunctor::JFunctor (const int dir, int lev, void JFunctor::operator() (amrex::MultiFab& mf_dst, int dcomp, const int /*i_buffer*/) const { + using ablastr::fields::Direction; + auto& warpx = WarpX::GetInstance(); /** pointer to source multifab (can be multi-component) */ - amrex::MultiFab* m_mf_src = warpx.getFieldPointer(FieldType::current_fp, m_lev, m_dir); + amrex::MultiFab* m_mf_src = warpx.m_fields.get(FieldType::current_fp,Direction{m_dir},m_lev); // Deposit current if no solver or the electrostatic solver is being used if (m_deposit_current) { // allocate temporary multifab to deposit current density into - amrex::Vector, 3 > > current_fp_temp; - current_fp_temp.resize(1); - - const auto& current_fp_x = warpx.getField(FieldType::current_fp, m_lev,0); - current_fp_temp[0][0] = std::make_unique( - current_fp_x, amrex::make_alias, 0, current_fp_x.nComp() - ); - - const auto& current_fp_y = warpx.getField(FieldType::current_fp, m_lev,1); - current_fp_temp[0][1] = std::make_unique( - current_fp_y, amrex::make_alias, 0, current_fp_y.nComp() - ); - const auto& current_fp_z = warpx.getField(FieldType::current_fp, m_lev,2); - current_fp_temp[0][2] = std::make_unique( - current_fp_z, amrex::make_alias, 0, current_fp_z.nComp() - ); + ablastr::fields::MultiLevelVectorField current_fp_temp { + warpx.m_fields.get_alldirs(FieldType::current_fp, m_lev) + }; auto& mypc = warpx.GetPartContainer(); mypc.DepositCurrent(current_fp_temp, warpx.getdt(m_lev), 0.0); diff --git a/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp index aac5869da65..e06f90b5f0c 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp @@ -1,12 +1,15 @@ -/* This file is part of Warpx. +/* Copyright 2023-2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Avigdor Veksler (TAE Technologies) * - * Authors: Avigdor Veksler * License: BSD-3-Clause-LBNL -*/ + */ #include "JdispFunctor.H" #include "WarpX.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" #include "Particles/MultiParticleContainer.H" @@ -16,7 +19,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; JdispFunctor::JdispFunctor (int dir, int lev, amrex::IntVect crse_ratio, bool convertRZmodes2cartesian, int ncomp) @@ -27,18 +30,20 @@ JdispFunctor::JdispFunctor (int dir, int lev, void JdispFunctor::operator() (amrex::MultiFab& mf_dst, int dcomp, const int /*i_buffer*/) const { + using ablastr::fields::Direction; + auto& warpx = WarpX::GetInstance(); auto* hybrid_pic_model = warpx.get_pointer_HybridPICModel(); /** pointer to total simulation current (J) multifab */ - amrex::MultiFab* mf_j = warpx.getFieldPointer(FieldType::current_fp, m_lev, m_dir); + amrex::MultiFab* mf_j = warpx.m_fields.get(FieldType::current_fp, Direction{m_dir}, m_lev); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(hybrid_pic_model, "Displacement current diagnostic is only implemented for the HybridPICModel."); AMREX_ASSUME(hybrid_pic_model != nullptr); /** pointer to current calculated from Ampere's Law (Jamp) multifab */ - amrex::MultiFab* mf_curlB = hybrid_pic_model->get_pointer_current_fp_ampere(m_lev, m_dir);; + amrex::MultiFab* mf_curlB = warpx.m_fields.get(FieldType::hybrid_current_fp_plasma, Direction{m_dir}, m_lev); //if (!hybrid_pic_model) { // To finish this implementation, we need to implement a method to @@ -61,51 +66,6 @@ JdispFunctor::operator() (amrex::MultiFab& mf_dst, int dcomp, const int /*i_buff -1, *mf_j, 0, 0, 1, Jdisp.nGrowVect() ); - if (hybrid_pic_model) { - // Subtract the interpolated j_external value from j_displacement. - /** pointer to external currents (Jext) multifab */ - amrex::MultiFab* mf_j_external = hybrid_pic_model->get_pointer_current_fp_external(m_lev, m_dir); - - // Index type required for interpolating Jext from their respective - // staggering (nodal) to the Jx_displacement, Jy_displacement, Jz_displacement - // locations. The staggering of J_displacement is the same as the - // staggering for J, so we use J_stag as the interpolation map. - // For interp to work below, the indices of the undefined dimensions - // must match. We set them as (1,1,1). - amrex::GpuArray Jext_IndexType = {1, 1, 1}; - amrex::GpuArray J_IndexType = {1, 1, 1}; - amrex::IntVect Jext_stag = mf_j_external->ixType().toIntVect(); - amrex::IntVect J_stag = mf_j->ixType().toIntVect(); - - // Index types for the dimensions simulated are overwritten. - for ( int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - Jext_IndexType[idim] = Jext_stag[idim]; - J_IndexType[idim] = J_stag[idim]; - } - - // Parameters for `interp` that maps from Jext to J. - // The "coarsening is just 1 i.e. no coarsening" - amrex::GpuArray const& coarsen = {1, 1, 1}; - - // Loop through the grids, and over the tiles within each grid to - // subtract the interpolated Jext from J_displacement. -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif - for ( MFIter mfi(Jdisp, TilingIfNotGPU()); mfi.isValid(); ++mfi ) { - - Array4 const& Jdisp_arr = Jdisp.array(mfi); - Array4 const& Jext = mf_j_external->const_array(mfi); - - // Loop over cells and update the Jdisp MultiFab - amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Interpolate Jext to the staggering of J - auto const jext_interp = ablastr::coarsen::sample::Interp(Jext, Jext_IndexType, J_IndexType, coarsen, i, j, k, 0); - Jdisp_arr(i, j, k, 0) -= jext_interp; - }); - } - } - InterpolateMFForDiag(mf_dst, Jdisp, dcomp, warpx.DistributionMap(m_lev), m_convertRZmodes2cartesian); } diff --git a/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp index 32e11903778..e7f572dd681 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.cpp @@ -47,7 +47,7 @@ RhoFunctor::operator() ( amrex::MultiFab& mf_dst, const int dcomp, const int /*i rho = mypc.GetChargeDensity(m_lev, true); if (warpx.DoFluidSpecies()) { auto& myfl = warpx.GetFluidContainer(); - myfl.DepositCharge(m_lev, *rho); + myfl.DepositCharge(warpx.m_fields, *rho, m_lev); } } // Dump rho per species diff --git a/Source/Diagnostics/Diagnostics.cpp b/Source/Diagnostics/Diagnostics.cpp index dc28aeda095..fd079479285 100644 --- a/Source/Diagnostics/Diagnostics.cpp +++ b/Source/Diagnostics/Diagnostics.cpp @@ -229,8 +229,9 @@ Diagnostics::BaseReadParameters () if (WarpX::boost_direction[ dim_map[WarpX::moving_window_dir] ] == 1) { // Convert user-defined lo and hi for diagnostics to account for boosted-frame // simulations with moving window - const amrex::Real convert_factor = 1._rt/(WarpX::gamma_boost * (1._rt - WarpX::beta_boost) ); - // Assuming that the window travels with speed c + const amrex::Real beta_window = WarpX::moving_window_v / PhysConst::c; + const amrex::Real convert_factor = 1._rt/( + WarpX::gamma_boost * (1._rt - WarpX::beta_boost * beta_window) ); m_lo[WarpX::moving_window_dir] *= convert_factor; m_hi[WarpX::moving_window_dir] *= convert_factor; } diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp index 1a3318ae0d8..4d721dd6abe 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp @@ -5,12 +5,14 @@ # include "BoundaryConditions/PML_RZ.H" #endif #include "Diagnostics/ParticleDiag/ParticleDiag.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/WarpXParticleContainer.H" #include "Utils/TextMsg.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" +#include + #include #include #include @@ -20,7 +22,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -43,6 +45,8 @@ FlushFormatCheckpoint::WriteToFile ( const amrex::Geometry& /*full_BTD_snapshot*/, bool /*isLastBTDFlush*/) const { + using ablastr::fields::Direction; + WARPX_PROFILE("FlushFormatCheckpoint::WriteToFile()"); auto & warpx = WarpX::GetInstance(); @@ -64,85 +68,85 @@ FlushFormatCheckpoint::WriteToFile ( for (int lev = 0; lev < nlev; ++lev) { - VisMF::Write(warpx.getField(FieldType::Efield_fp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ex_fp")); - VisMF::Write(warpx.getField(FieldType::Efield_fp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ey_fp")); - VisMF::Write(warpx.getField(FieldType::Efield_fp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ez_fp")); - VisMF::Write(warpx.getField(FieldType::Bfield_fp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bx_fp")); - VisMF::Write(warpx.getField(FieldType::Bfield_fp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "By_fp")); - VisMF::Write(warpx.getField(FieldType::Bfield_fp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bz_fp")); if (WarpX::fft_do_time_averaging) { - VisMF::Write(warpx.getField(FieldType::Efield_avg_fp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_avg_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ex_avg_fp")); - VisMF::Write(warpx.getField(FieldType::Efield_avg_fp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_avg_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ey_avg_fp")); - VisMF::Write(warpx.getField(FieldType::Efield_avg_fp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_avg_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ez_avg_fp")); - VisMF::Write(warpx.getField(FieldType::Bfield_avg_fp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_avg_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bx_avg_fp")); - VisMF::Write(warpx.getField(FieldType::Bfield_avg_fp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_avg_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "By_avg_fp")); - VisMF::Write(warpx.getField(FieldType::Bfield_avg_fp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_avg_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bz_avg_fp")); } if (warpx.getis_synchronized()) { // Need to save j if synchronized because after restart we need j to evolve E by dt/2. - VisMF::Write(warpx.getField(FieldType::current_fp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::current_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jx_fp")); - VisMF::Write(warpx.getField(FieldType::current_fp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::current_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jy_fp")); - VisMF::Write(warpx.getField(FieldType::current_fp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::current_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jz_fp")); } if (lev > 0) { - VisMF::Write(warpx.getField(FieldType::Efield_cp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ex_cp")); - VisMF::Write(warpx.getField(FieldType::Efield_cp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ey_cp")); - VisMF::Write(warpx.getField(FieldType::Efield_cp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ez_cp")); - VisMF::Write(warpx.getField(FieldType::Bfield_cp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bx_cp")); - VisMF::Write(warpx.getField(FieldType::Bfield_cp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "By_cp")); - VisMF::Write(warpx.getField(FieldType::Bfield_cp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bz_cp")); if (WarpX::fft_do_time_averaging) { - VisMF::Write(warpx.getField(FieldType::Efield_avg_cp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_avg_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ex_avg_cp")); - VisMF::Write(warpx.getField(FieldType::Efield_avg_cp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_avg_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ey_avg_cp")); - VisMF::Write(warpx.getField(FieldType::Efield_avg_cp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Efield_avg_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Ez_avg_cp")); - VisMF::Write(warpx.getField(FieldType::Bfield_avg_cp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_avg_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bx_avg_cp")); - VisMF::Write(warpx.getField(FieldType::Bfield_avg_cp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_avg_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "By_avg_cp")); - VisMF::Write(warpx.getField(FieldType::Bfield_avg_cp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::Bfield_avg_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "Bz_avg_cp")); } if (warpx.getis_synchronized()) { // Need to save j if synchronized because after restart we need j to evolve E by dt/2. - VisMF::Write(warpx.getField(FieldType::current_cp, lev, 0), + VisMF::Write(*warpx.m_fields.get(FieldType::current_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jx_cp")); - VisMF::Write(warpx.getField(FieldType::current_cp, lev, 1), + VisMF::Write(*warpx.m_fields.get(FieldType::current_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jy_cp")); - VisMF::Write(warpx.getField(FieldType::current_cp, lev, 2), + VisMF::Write(*warpx.m_fields.get(FieldType::current_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "jz_cp")); } } @@ -150,11 +154,13 @@ FlushFormatCheckpoint::WriteToFile ( if (warpx.DoPML()) { if (warpx.GetPML(lev)) { warpx.GetPML(lev)->CheckPoint( + warpx.m_fields, amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "pml")); } #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) if (warpx.GetPML_RZ(lev)) { warpx.GetPML_RZ(lev)->CheckPoint( + warpx.m_fields, amrex::MultiFabFileFullPrefix(lev, checkpointname, default_level_prefix, "pml_rz")); } #endif diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp index f6c73d9fa7e..0f05496e4c0 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp @@ -1,6 +1,6 @@ #include "FlushFormatPlotfile.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Diagnostics/MultiDiagnostics.H" #include "Diagnostics/ParticleDiag/ParticleDiag.H" #include "Particles/Filter/FilterFunctors.H" @@ -13,6 +13,8 @@ #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" +#include + #include #include #include @@ -48,7 +50,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -554,6 +556,8 @@ FlushFormatPlotfile::WriteAllRawFields( const bool plot_raw_fields, const int nlevels, const std::string& plotfilename, const bool plot_raw_fields_guards) const { + using ablastr::fields::Direction; + if (!plot_raw_fields) { return; } auto & warpx = WarpX::GetInstance(); for (int lev = 0; lev < nlevels; ++lev) @@ -564,84 +568,103 @@ FlushFormatPlotfile::WriteAllRawFields( // Auxiliary patch - WriteRawMF( warpx.getField(FieldType::Efield_aux, lev, 0), dm, raw_pltname, default_level_prefix, "Ex_aux", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Efield_aux, lev, 1), dm, raw_pltname, default_level_prefix, "Ey_aux", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Efield_aux, lev, 2), dm, raw_pltname, default_level_prefix, "Ez_aux", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Bfield_aux, lev, 0), dm, raw_pltname, default_level_prefix, "Bx_aux", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Bfield_aux, lev, 1), dm, raw_pltname, default_level_prefix, "By_aux", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Bfield_aux, lev, 2), dm, raw_pltname, default_level_prefix, "Bz_aux", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), dm, raw_pltname, default_level_prefix, "Ex_aux", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), dm, raw_pltname, default_level_prefix, "Ey_aux", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), dm, raw_pltname, default_level_prefix, "Ez_aux", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), dm, raw_pltname, default_level_prefix, "Bx_aux", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), dm, raw_pltname, default_level_prefix, "By_aux", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), dm, raw_pltname, default_level_prefix, "Bz_aux", lev, plot_raw_fields_guards); // fine patch - WriteRawMF( warpx.getField(FieldType::Efield_fp, lev, 0), dm, raw_pltname, default_level_prefix, "Ex_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Efield_fp, lev, 1), dm, raw_pltname, default_level_prefix, "Ey_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Efield_fp, lev, 2), dm, raw_pltname, default_level_prefix, "Ez_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::current_fp, lev, 0), dm, raw_pltname, default_level_prefix, "jx_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::current_fp, lev, 1), dm, raw_pltname, default_level_prefix, "jy_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::current_fp, lev, 2), dm, raw_pltname, default_level_prefix, "jz_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Bfield_fp, lev, 0), dm, raw_pltname, default_level_prefix, "Bx_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Bfield_fp, lev, 1), dm, raw_pltname, default_level_prefix, "By_fp", lev, plot_raw_fields_guards); - WriteRawMF( warpx.getField(FieldType::Bfield_fp, lev, 2), dm, raw_pltname, default_level_prefix, "Bz_fp", lev, plot_raw_fields_guards); - if (warpx.isFieldInitialized(FieldType::F_fp, lev)) + WriteRawMF( *warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), dm, raw_pltname, + default_level_prefix, "Ex_fp", lev, plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev), dm, raw_pltname, + default_level_prefix, "Ey_fp", lev, plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev), dm, raw_pltname, + default_level_prefix, "Ez_fp", lev, plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::current_fp,Direction{0}, lev), dm, raw_pltname, + default_level_prefix, "jx_fp", lev,plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::current_fp,Direction{1}, lev), dm, raw_pltname, + default_level_prefix, "jy_fp", lev,plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::current_fp,Direction{2}, lev), dm, raw_pltname, + default_level_prefix, "jz_fp", lev,plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::Bfield_fp, Direction{0}, lev), dm, raw_pltname, + default_level_prefix, "Bx_fp", lev, plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::Bfield_fp, Direction{1}, lev), dm, raw_pltname, + default_level_prefix, "By_fp", lev, plot_raw_fields_guards ); + WriteRawMF( *warpx.m_fields.get(FieldType::Bfield_fp, Direction{2}, lev), dm, raw_pltname, + default_level_prefix, "Bz_fp", lev, plot_raw_fields_guards ); + if (warpx.m_fields.has(FieldType::F_fp, lev)) { - WriteRawMF(warpx.getField(FieldType::F_fp, lev), dm, raw_pltname, default_level_prefix, "F_fp", lev, plot_raw_fields_guards); + WriteRawMF( *warpx.m_fields.get(FieldType::F_fp, lev), dm, raw_pltname, + default_level_prefix, "F_fp", lev, plot_raw_fields_guards ); } - if (warpx.isFieldInitialized(FieldType::rho_fp, lev)) + if (warpx.m_fields.has(FieldType::rho_fp, lev)) { // rho_fp will have either ncomps or 2*ncomps (2 being the old and new). When 2, return the new so // there is time synchronization. - const int nstart = warpx.getField(FieldType::rho_fp, lev).nComp() - WarpX::ncomps; - const MultiFab rho_new(warpx.getField(FieldType::rho_fp, lev), amrex::make_alias, nstart, WarpX::ncomps); + const int nstart = warpx.m_fields.get(FieldType::rho_fp, lev)->nComp() - WarpX::ncomps; + const MultiFab rho_new(*warpx.m_fields.get(FieldType::rho_fp, lev), amrex::make_alias, nstart, WarpX::ncomps); WriteRawMF(rho_new, dm, raw_pltname, default_level_prefix, "rho_fp", lev, plot_raw_fields_guards); } - if (warpx.isFieldInitialized(FieldType::phi_fp, lev)) { - WriteRawMF(warpx.getField(FieldType::phi_fp, lev), dm, raw_pltname, default_level_prefix, "phi_fp", lev, plot_raw_fields_guards); + if (warpx.m_fields.has(FieldType::phi_fp, lev)) { + WriteRawMF( *warpx.m_fields.get(FieldType::phi_fp, lev), dm, raw_pltname, + default_level_prefix, "phi_fp", lev, plot_raw_fields_guards ); } // Averaged fields on fine patch if (WarpX::fft_do_time_averaging) { - WriteRawMF(warpx.getField(FieldType::Efield_avg_fp, lev, 0) , dm, raw_pltname, default_level_prefix, + WriteRawMF(*warpx.m_fields.get(FieldType::Efield_avg_fp, Direction{0}, lev) , dm, raw_pltname, default_level_prefix, "Ex_avg_fp", lev, plot_raw_fields_guards); - WriteRawMF(warpx.getField(FieldType::Efield_avg_fp, lev, 1) , dm, raw_pltname, default_level_prefix, + WriteRawMF(*warpx.m_fields.get(FieldType::Efield_avg_fp, Direction{1}, lev) , dm, raw_pltname, default_level_prefix, "Ey_avg_fp", lev, plot_raw_fields_guards); - WriteRawMF(warpx.getField(FieldType::Efield_avg_fp, lev, 2) , dm, raw_pltname, default_level_prefix, + WriteRawMF(*warpx.m_fields.get(FieldType::Efield_avg_fp, Direction{2}, lev) , dm, raw_pltname, default_level_prefix, "Ez_avg_fp", lev, plot_raw_fields_guards); - WriteRawMF(warpx.getField(FieldType::Bfield_avg_fp, lev, 0) , dm, raw_pltname, default_level_prefix, + WriteRawMF(*warpx.m_fields.get(FieldType::Bfield_avg_fp, Direction{0}, lev) , dm, raw_pltname, default_level_prefix, "Bx_avg_fp", lev, plot_raw_fields_guards); - WriteRawMF(warpx.getField(FieldType::Bfield_avg_fp, lev, 1) , dm, raw_pltname, default_level_prefix, + WriteRawMF(*warpx.m_fields.get(FieldType::Bfield_avg_fp, Direction{1}, lev) , dm, raw_pltname, default_level_prefix, "By_avg_fp", lev, plot_raw_fields_guards); - WriteRawMF(warpx.getField(FieldType::Bfield_avg_fp, lev, 2) , dm, raw_pltname, default_level_prefix, + WriteRawMF(*warpx.m_fields.get(FieldType::Bfield_avg_fp, Direction{2}, lev) , dm, raw_pltname, default_level_prefix, "Bz_avg_fp", lev, plot_raw_fields_guards); } // Coarse path if (lev > 0) { WriteCoarseVector( "E", - warpx.getFieldPointer(FieldType::Efield_cp, lev, 0), warpx.getFieldPointer(FieldType::Efield_cp, lev, 1), warpx.getFieldPointer(FieldType::Efield_cp, lev, 2), - warpx.getFieldPointer(FieldType::Efield_fp, lev, 0), warpx.getFieldPointer(FieldType::Efield_fp, lev, 1), warpx.getFieldPointer(FieldType::Efield_fp, lev, 2), + warpx.m_fields.get(FieldType::Efield_cp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Efield_cp, Direction{1}, lev), + warpx.m_fields.get(FieldType::Efield_cp, Direction{2}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev), dm, raw_pltname, default_level_prefix, lev, plot_raw_fields_guards); WriteCoarseVector( "B", - warpx.getFieldPointer(FieldType::Bfield_cp, lev, 0), warpx.getFieldPointer(FieldType::Bfield_cp, lev, 1), warpx.getFieldPointer(FieldType::Bfield_cp, lev, 2), - warpx.getFieldPointer(FieldType::Bfield_fp, lev, 0), warpx.getFieldPointer(FieldType::Bfield_fp, lev, 1), warpx.getFieldPointer(FieldType::Bfield_fp, lev, 2), + warpx.m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), + warpx.m_fields.get(FieldType::Bfield_cp, Direction{2}, lev), + warpx.m_fields.get(FieldType::Bfield_fp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Bfield_fp, Direction{1}, lev), + warpx.m_fields.get(FieldType::Bfield_fp, Direction{2}, lev), dm, raw_pltname, default_level_prefix, lev, plot_raw_fields_guards); WriteCoarseVector( "j", - warpx.getFieldPointer(FieldType::current_cp, lev, 0), warpx.getFieldPointer(FieldType::current_cp, lev, 1), warpx.getFieldPointer(FieldType::current_cp, lev, 2), - warpx.getFieldPointer(FieldType::current_fp, lev, 0), warpx.getFieldPointer(FieldType::current_fp, lev, 1), warpx.getFieldPointer(FieldType::current_fp, lev, 2), + warpx.m_fields.get(FieldType::current_cp, Direction{0}, lev), warpx.m_fields.get(FieldType::current_cp, Direction{1}, lev), warpx.m_fields.get(FieldType::current_cp, Direction{2}, lev), + warpx.m_fields.get(FieldType::current_fp, Direction{0}, lev), warpx.m_fields.get(FieldType::current_fp, Direction{1}, lev), warpx.m_fields.get(FieldType::current_fp, Direction{2}, lev), dm, raw_pltname, default_level_prefix, lev, plot_raw_fields_guards); - if (warpx.isFieldInitialized(FieldType::F_fp, lev) && warpx.isFieldInitialized(FieldType::F_cp, lev)) + if (warpx.m_fields.has(FieldType::F_fp, lev) && warpx.m_fields.has(FieldType::F_cp, lev)) { - WriteCoarseScalar("F", warpx.getFieldPointer(FieldType::F_cp, lev), warpx.getFieldPointer(FieldType::F_fp, lev), + WriteCoarseScalar("F", warpx.m_fields.get(FieldType::F_cp, lev), warpx.m_fields.get(FieldType::F_fp, lev), dm, raw_pltname, default_level_prefix, lev, plot_raw_fields_guards, 0); } - if (warpx.isFieldInitialized(FieldType::rho_fp, lev) && warpx.isFieldInitialized(FieldType::rho_cp, lev)) + if (warpx.m_fields.has(FieldType::rho_fp, lev) && warpx.m_fields.has(FieldType::rho_cp, lev)) { // Use the component 1 of `rho_cp`, i.e. rho_new for time synchronization - WriteCoarseScalar("rho", warpx.getFieldPointer(FieldType::rho_cp, lev), warpx.getFieldPointer(FieldType::rho_fp, lev), + WriteCoarseScalar("rho", warpx.m_fields.get(FieldType::rho_cp, lev), warpx.m_fields.get(FieldType::rho_fp, lev), dm, raw_pltname, default_level_prefix, lev, plot_raw_fields_guards, 1); } } diff --git a/Source/Diagnostics/FullDiagnostics.cpp b/Source/Diagnostics/FullDiagnostics.cpp index bcf613f49b0..e5eefc82de5 100644 --- a/Source/Diagnostics/FullDiagnostics.cpp +++ b/Source/Diagnostics/FullDiagnostics.cpp @@ -12,7 +12,7 @@ #include "ComputeDiagFunctors/RhoFunctor.H" #include "Diagnostics/Diagnostics.H" #include "Diagnostics/ParticleDiag/ParticleDiag.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "FlushFormats/FlushFormat.H" #include "Particles/MultiParticleContainer.H" #include "Utils/Algorithms/IsIn.H" @@ -20,6 +20,8 @@ #include "Utils/WarpXAlgorithmSelection.H" #include "WarpX.H" +#include + #include #include #include @@ -43,7 +45,7 @@ #include using namespace amrex::literals; -using namespace warpx::fields; +using warpx::fields::FieldType; FullDiagnostics::FullDiagnostics (int i, const std::string& name): Diagnostics{i, name}, @@ -172,17 +174,18 @@ void FullDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) { #ifdef WARPX_DIM_RZ + using ablastr::fields::Direction; auto & warpx = WarpX::GetInstance(); - const int ncomp_multimodefab = warpx.getFieldPointer(FieldType::Efield_aux, 0, 0)->nComp(); + const int ncomp_multimodefab = warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); // Make sure all multifabs have the same number of components for (int dim=0; dim<3; dim++){ AMREX_ALWAYS_ASSERT( - warpx.getFieldPointer(FieldType::Efield_aux, lev, dim)->nComp() == ncomp_multimodefab ); + warpx.m_fields.get(FieldType::Efield_aux, Direction{dim}, lev)->nComp() == ncomp_multimodefab ); AMREX_ALWAYS_ASSERT( - warpx.getFieldPointer(FieldType::Bfield_aux, lev, dim)->nComp() == ncomp_multimodefab ); + warpx.m_fields.get(FieldType::Bfield_aux, Direction{dim}, lev)->nComp() == ncomp_multimodefab ); AMREX_ALWAYS_ASSERT( - warpx.getFieldPointer(FieldType::current_fp, lev, dim)->nComp() == ncomp_multimodefab ); + warpx.m_fields.get(FieldType::current_fp, Direction{dim}, lev)->nComp() == ncomp_multimodefab ); } // Species index to loop over species that dump rho per species @@ -217,37 +220,37 @@ FullDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) const auto m_varname_fields_size = static_cast(m_varnames_fields.size()); for (int comp=0; comp(warpx.getFieldPointer(FieldType::Efield_aux, lev, 0), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("Er"), ncomp); } } else if ( m_varnames_fields[comp] == "Et" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 1), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("Et"), ncomp); } } else if ( m_varnames_fields[comp] == "Ez" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 2), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("Ez"), ncomp); } } else if ( m_varnames_fields[comp] == "Br" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 0), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("Br"), ncomp); } } else if ( m_varnames_fields[comp] == "Bt" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 1), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("Bt"), ncomp); } } else if ( m_varnames_fields[comp] == "Bz" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 2), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("Bz"), ncomp); @@ -314,19 +317,19 @@ FullDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) } i_T_species++; } else if ( m_varnames_fields[comp] == "F" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::F_fp, lev), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::F_fp, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("F"), ncomp); } } else if ( m_varnames_fields[comp] == "G" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::G_fp, lev), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique( warpx.m_fields.get(FieldType::G_fp, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("G"), ncomp); } } else if ( m_varnames_fields[comp] == "phi" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::phi_fp, lev), lev, m_crse_ratio, + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::phi_fp, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("phi"), ncomp); @@ -343,14 +346,14 @@ FullDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) } } else if ( m_varnames_fields[comp] == "divB" ){ m_all_field_functors[lev][comp] = std::make_unique( - warpx.getFieldPointerArray(FieldType::Bfield_aux, lev), + warpx.m_fields.get_alldirs(FieldType::Bfield_aux, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("divB"), ncomp); } } else if ( m_varnames_fields[comp] == "divE" ){ m_all_field_functors[lev][comp] = std::make_unique( - warpx.getFieldPointerArray(FieldType::Efield_aux, lev), + warpx.m_fields.get_alldirs(FieldType::Efield_aux, lev), lev, m_crse_ratio, false, ncomp); if (update_varnames) { AddRZModesToOutputNames(std::string("divE"), ncomp); @@ -393,19 +396,20 @@ void FullDiagnostics::AddRZModesToDiags (int lev) { #ifdef WARPX_DIM_RZ + using ablastr::fields::Direction; if (!m_dump_rz_modes) { return; } auto & warpx = WarpX::GetInstance(); - const int ncomp_multimodefab = warpx.getFieldPointer(FieldType::Efield_aux, 0, 0)->nComp(); + const int ncomp_multimodefab = warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); // Make sure all multifabs have the same number of components for (int dim=0; dim<3; dim++){ AMREX_ALWAYS_ASSERT( - warpx.getFieldPointer(FieldType::Efield_aux, lev, dim)->nComp() == ncomp_multimodefab ); + warpx.m_fields.get(FieldType::Efield_aux, Direction{dim}, lev)->nComp() == ncomp_multimodefab ); AMREX_ALWAYS_ASSERT( - warpx.getFieldPointer(FieldType::Bfield_aux, lev, dim)->nComp() == ncomp_multimodefab ); + warpx.m_fields.get(FieldType::Bfield_aux, Direction{dim}, lev)->nComp() == ncomp_multimodefab ); AMREX_ALWAYS_ASSERT( - warpx.getFieldPointer(FieldType::current_fp, lev, dim)->nComp() == ncomp_multimodefab ); + warpx.m_fields.get(FieldType::current_fp, Direction{dim}, lev)->nComp() == ncomp_multimodefab ); } // Check if divE is requested @@ -440,19 +444,19 @@ FullDiagnostics::AddRZModesToDiags (int lev) for (int dim=0; dim<3; dim++){ // 3 components, r theta z m_all_field_functors[lev].push_back(std::make_unique( - warpx.getFieldPointer(FieldType::Efield_aux, lev, dim), lev, + warpx.m_fields.get(FieldType::Efield_aux, Direction{dim}, lev), lev, m_crse_ratio, false, ncomp_multimodefab)); AddRZModesToOutputNames(std::string("E") + coord[dim], - warpx.getFieldPointer(FieldType::Efield_aux, 0, 0)->nComp()); + warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp()); } // B for (int dim=0; dim<3; dim++){ // 3 components, r theta z m_all_field_functors[lev].push_back(std::make_unique( - warpx.getFieldPointer(FieldType::Bfield_aux, lev, dim), lev, + warpx.m_fields.get(FieldType::Bfield_aux, Direction{dim}, lev), lev, m_crse_ratio, false, ncomp_multimodefab)); AddRZModesToOutputNames(std::string("B") + coord[dim], - warpx.getFieldPointer(FieldType::Bfield_aux, 0, 0)->nComp()); + warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, 0)->nComp()); } // j for (int dim=0; dim<3; dim++){ @@ -461,12 +465,12 @@ FullDiagnostics::AddRZModesToDiags (int lev) dim, lev, m_crse_ratio, false, deposit_current, ncomp_multimodefab)); deposit_current = false; AddRZModesToOutputNames(std::string("J") + coord[dim], - warpx.getFieldPointer(FieldType::current_fp, 0, 0)->nComp()); + warpx.m_fields.get(FieldType::current_fp,Direction{0},0)->nComp()); } // divE if (divE_requested) { m_all_field_functors[lev].push_back(std::make_unique( - warpx.getFieldPointerArray(FieldType::Efield_aux, lev), + warpx.m_fields.get_alldirs(FieldType::Efield_aux, lev), lev, m_crse_ratio, false, ncomp_multimodefab)); AddRZModesToOutputNames(std::string("divE"), ncomp_multimodefab); } @@ -658,20 +662,22 @@ FullDiagnostics::InitializeFieldFunctors (int lev) // diagnostic output bool deposit_current = !m_solver_deposits_current; + using ablastr::fields::Direction; + m_all_field_functors[lev].resize(ntot); // Fill vector of functors for all components except individual cylindrical modes. for (int comp=0; comp(warpx.getFieldPointer(FieldType::Efield_aux, lev, 2), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Bz" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 2), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "jz" ){ m_all_field_functors[lev][comp] = std::make_unique(2, lev, m_crse_ratio, true, deposit_current); deposit_current = false; } else if ( m_varnames[comp] == "jz_displacement" ) { m_all_field_functors[lev][comp] = std::make_unique(2, lev, m_crse_ratio, true); } else if ( m_varnames[comp] == "Az" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::vector_potential_fp, lev, 2), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "rho" ){ // Initialize rho functor to dump total rho m_all_field_functors[lev][comp] = std::make_unique(lev, m_crse_ratio, true); @@ -684,31 +690,31 @@ FullDiagnostics::InitializeFieldFunctors (int lev) m_all_field_functors[lev][comp] = std::make_unique(lev, m_crse_ratio, m_T_per_species_index[i_T_species]); i_T_species++; } else if ( m_varnames[comp] == "F" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::F_fp, lev), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::F_fp, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "G" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::G_fp, lev), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::G_fp, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "phi" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::phi_fp, lev), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::phi_fp, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "part_per_cell" ){ m_all_field_functors[lev][comp] = std::make_unique(nullptr, lev, m_crse_ratio); } else if ( m_varnames[comp] == "part_per_grid" ){ m_all_field_functors[lev][comp] = std::make_unique(nullptr, lev, m_crse_ratio); } else if ( m_varnames[comp] == "divB" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointerArray(FieldType::Bfield_aux, lev), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get_alldirs(FieldType::Bfield_aux, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "divE" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointerArray(FieldType::Efield_aux, lev), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get_alldirs(FieldType::Efield_aux, lev), lev, m_crse_ratio); } else { #ifdef WARPX_DIM_RZ if ( m_varnames[comp] == "Er" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 0), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Et" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 1), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Br" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 0), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Bt" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 1), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "jr" ){ m_all_field_functors[lev][comp] = std::make_unique(0, lev, m_crse_ratio, true, deposit_current); deposit_current = false; @@ -720,22 +726,22 @@ FullDiagnostics::InitializeFieldFunctors (int lev) } else if (m_varnames[comp] == "jt_displacement" ){ m_all_field_functors[lev][comp] = std::make_unique(1, lev, m_crse_ratio, true); } else if ( m_varnames[comp] == "Ar" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::vector_potential_fp, lev, 0), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "At" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::vector_potential_fp, lev, 1), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{1}, lev), lev, m_crse_ratio); } else { WARPX_ABORT_WITH_MESSAGE(m_varnames[comp] + " is not a known field output type for RZ geometry"); } #else // Valid transverse fields in Cartesian coordinates if ( m_varnames[comp] == "Ex" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 0), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Ey" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Efield_aux, lev, 1), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Bx" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 0), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "By" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::Bfield_aux, lev, 1), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "jx" ){ m_all_field_functors[lev][comp] = std::make_unique(0, lev, m_crse_ratio, true, deposit_current); deposit_current = false; @@ -747,9 +753,9 @@ FullDiagnostics::InitializeFieldFunctors (int lev) } else if ( m_varnames[comp] == "jy_displacement" ){ m_all_field_functors[lev][comp] = std::make_unique(1, lev, m_crse_ratio); } else if ( m_varnames[comp] == "Ax" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::vector_potential_fp, lev, 0), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "Ay" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.getFieldPointer(FieldType::vector_potential_fp, lev, 1), lev, m_crse_ratio); + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{1}, lev), lev, m_crse_ratio); } else { std::cout << "Error on component " << m_varnames[comp] << std::endl; WARPX_ABORT_WITH_MESSAGE(m_varnames[comp] + " is not a known field output type for this geometry"); diff --git a/Source/Diagnostics/ParticleIO.cpp b/Source/Diagnostics/ParticleIO.cpp index bf67b51bbeb..e94039ec079 100644 --- a/Source/Diagnostics/ParticleIO.cpp +++ b/Source/Diagnostics/ParticleIO.cpp @@ -7,7 +7,7 @@ * License: BSD-3-Clause-LBNL */ -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/ParticleIO.H" #include "Particles/MultiParticleContainer.H" #include "Particles/PhysicalParticleContainer.H" @@ -43,7 +43,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; void LaserParticleContainer::ReadHeader (std::istream& is) @@ -268,7 +268,7 @@ storePhiOnParticles ( PinnedMemoryParticleContainer& tmp, const amrex::Geometry& geom = warpx.Geom(lev); auto plo = geom.ProbLoArray(); auto dxi = geom.InvCellSizeArray(); - amrex::MultiFab const& phi = warpx.getField( FieldType::phi_fp, lev, 0 ); + amrex::MultiFab const& phi = *warpx.m_fields.get(FieldType::phi_fp, lev); for (PinnedParIter pti(tmp, lev); pti.isValid(); ++pti) { diff --git a/Source/Diagnostics/ReducedDiags/CMakeLists.txt b/Source/Diagnostics/ReducedDiags/CMakeLists.txt index 4f0b05f6180..bbf1b6b65b0 100644 --- a/Source/Diagnostics/ReducedDiags/CMakeLists.txt +++ b/Source/Diagnostics/ReducedDiags/CMakeLists.txt @@ -3,26 +3,27 @@ foreach(D IN LISTS WarpX_DIMS) target_sources(lib_${SD} PRIVATE BeamRelevant.cpp + ChargeOnEB.cpp ColliderRelevant.cpp DifferentialLuminosity.cpp FieldEnergy.cpp + FieldMaximum.cpp + FieldMomentum.cpp FieldProbe.cpp FieldProbeParticleContainer.cpp - FieldMomentum.cpp + FieldReduction.cpp + FieldProbe.cpp LoadBalanceCosts.cpp LoadBalanceEfficiency.cpp MultiReducedDiags.cpp ParticleEnergy.cpp - ParticleMomentum.cpp + ParticleExtrema.cpp ParticleHistogram.cpp ParticleHistogram2D.cpp + ParticleMomentum.cpp + ParticleNumber.cpp ReducedDiags.cpp - FieldMaximum.cpp - ParticleExtrema.cpp RhoMaximum.cpp - ParticleNumber.cpp - FieldReduction.cpp - FieldProbe.cpp - ChargeOnEB.cpp + Timestep.cpp ) endforeach() diff --git a/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp b/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp index 975fed6d74c..050b18d3b9d 100644 --- a/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp +++ b/Source/Diagnostics/ReducedDiags/ChargeOnEB.cpp @@ -9,7 +9,7 @@ #include "Diagnostics/ReducedDiags/ReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" #include "Utils/Parser/ParserUtils.H" @@ -29,7 +29,6 @@ #include using namespace amrex; -using namespace warpx::fields; // constructor @@ -97,6 +96,8 @@ void ChargeOnEB::ComputeDiags (const int step) throw std::runtime_error("ChargeOnEB::ComputeDiags only works when EBs are enabled at runtime"); } #if ((defined WARPX_DIM_3D) && (defined AMREX_USE_EB)) + using ablastr::fields::Direction; + // get a reference to WarpX instance auto & warpx = WarpX::GetInstance(); @@ -104,9 +105,10 @@ void ChargeOnEB::ComputeDiags (const int step) int const lev = 0; // get MultiFab data at lev - const amrex::MultiFab & Ex = warpx.getField(FieldType::Efield_fp, lev,0); - const amrex::MultiFab & Ey = warpx.getField(FieldType::Efield_fp, lev,1); - const amrex::MultiFab & Ez = warpx.getField(FieldType::Efield_fp, lev,2); + using warpx::fields::FieldType; + const amrex::MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev); + const amrex::MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev); + const amrex::MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev); // get EB structures amrex::EBFArrayBoxFactory const& eb_box_factory = warpx.fieldEBFactory(lev); diff --git a/Source/Diagnostics/ReducedDiags/ColliderRelevant.cpp b/Source/Diagnostics/ReducedDiags/ColliderRelevant.cpp index fb683e25319..dfd64fe5af9 100644 --- a/Source/Diagnostics/ReducedDiags/ColliderRelevant.cpp +++ b/Source/Diagnostics/ReducedDiags/ColliderRelevant.cpp @@ -8,7 +8,7 @@ #include "ColliderRelevant.H" #include "Diagnostics/ReducedDiags/ReducedDiags.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #if (defined WARPX_QED) # include "Particles/ElementaryProcess/QEDInternals/QedChiFunctions.H" #endif @@ -59,7 +59,7 @@ #include using namespace amrex; -using namespace warpx::fields; + ColliderRelevant::ColliderRelevant (const std::string& rd_name) : ReducedDiags{rd_name} @@ -429,6 +429,8 @@ void ColliderRelevant::ComputeDiags (int step) amrex::Real chimax_f = 0.0_rt; amrex::Real chiave_f = 0.0_rt; + using ablastr::fields::Direction; + if (myspc.DoQED()) { // define variables in preparation for field gatheeduce_data.value()ring @@ -441,13 +443,14 @@ void ColliderRelevant::ComputeDiags (int step) const int lev = 0; // define variables in preparation for field gathering + using warpx::fields::FieldType; const amrex::XDim3 dinv = WarpX::InvCellSize(std::max(lev, 0)); - const amrex::MultiFab & Ex = warpx.getField(FieldType::Efield_aux, lev,0); - const amrex::MultiFab & Ey = warpx.getField(FieldType::Efield_aux, lev,1); - const amrex::MultiFab & Ez = warpx.getField(FieldType::Efield_aux, lev,2); - const amrex::MultiFab & Bx = warpx.getField(FieldType::Bfield_aux, lev,0); - const amrex::MultiFab & By = warpx.getField(FieldType::Bfield_aux, lev,1); - const amrex::MultiFab & Bz = warpx.getField(FieldType::Bfield_aux, lev,2); + const amrex::MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const amrex::MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const amrex::MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const amrex::MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const amrex::MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const amrex::MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); // declare reduce_op ReduceOps reduce_op; diff --git a/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp b/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp index fbfdaf11017..1a984368b4e 100644 --- a/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp @@ -7,12 +7,14 @@ #include "FieldEnergy.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Diagnostics/ReducedDiags/ReducedDiags.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" #include "WarpX.H" +#include + #include #include #include @@ -29,7 +31,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; // constructor FieldEnergy::FieldEnergy (const std::string& rd_name) @@ -87,16 +89,18 @@ void FieldEnergy::ComputeDiags (int step) // get number of level const auto nLevel = warpx.finestLevel() + 1; + using ablastr::fields::Direction; + // loop over refinement levels for (int lev = 0; lev < nLevel; ++lev) { // get MultiFab data at lev - const MultiFab & Ex = warpx.getField(FieldType::Efield_aux, lev,0); - const MultiFab & Ey = warpx.getField(FieldType::Efield_aux, lev,1); - const MultiFab & Ez = warpx.getField(FieldType::Efield_aux, lev,2); - const MultiFab & Bx = warpx.getField(FieldType::Bfield_aux, lev,0); - const MultiFab & By = warpx.getField(FieldType::Bfield_aux, lev,1); - const MultiFab & Bz = warpx.getField(FieldType::Bfield_aux, lev,2); + const MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); // get cell volume const std::array &dx = WarpX::CellSize(lev); diff --git a/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp b/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp index 4fbbd1ec82c..8c7eb6b4dec 100644 --- a/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldMaximum.cpp @@ -7,10 +7,11 @@ #include "FieldMaximum.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "WarpX.H" +#include #include #include @@ -39,7 +40,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; // constructor FieldMaximum::FieldMaximum (const std::string& rd_name) @@ -112,16 +113,18 @@ void FieldMaximum::ComputeDiags (int step) // get number of level const auto nLevel = warpx.finestLevel() + 1; + using ablastr::fields::Direction; + // loop over refinement levels for (int lev = 0; lev < nLevel; ++lev) { // get MultiFab data at lev - const MultiFab & Ex = warpx.getField(FieldType::Efield_aux, lev,0); - const MultiFab & Ey = warpx.getField(FieldType::Efield_aux, lev,1); - const MultiFab & Ez = warpx.getField(FieldType::Efield_aux, lev,2); - const MultiFab & Bx = warpx.getField(FieldType::Bfield_aux, lev,0); - const MultiFab & By = warpx.getField(FieldType::Bfield_aux, lev,1); - const MultiFab & Bz = warpx.getField(FieldType::Bfield_aux, lev,2); + const MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); constexpr int noutputs = 8; // max of Ex,Ey,Ez,|E|,Bx,By,Bz and |B| constexpr int index_Ex = 0; diff --git a/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp b/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp index 72bece9265b..764e9874c39 100644 --- a/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldMomentum.cpp @@ -7,11 +7,12 @@ #include "FieldMomentum.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" #include "WarpX.H" +#include #include #include @@ -38,7 +39,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; FieldMomentum::FieldMomentum (const std::string& rd_name) : ReducedDiags{rd_name} @@ -104,16 +105,18 @@ void FieldMomentum::ComputeDiags (int step) // Get number of refinement levels const auto nLevel = warpx.finestLevel() + 1; + using ablastr::fields::Direction; + // Loop over refinement levels for (int lev = 0; lev < nLevel; ++lev) { // Get MultiFab data at given refinement level - const amrex::MultiFab & Ex = warpx.getField(FieldType::Efield_aux, lev, 0); - const amrex::MultiFab & Ey = warpx.getField(FieldType::Efield_aux, lev, 1); - const amrex::MultiFab & Ez = warpx.getField(FieldType::Efield_aux, lev, 2); - const amrex::MultiFab & Bx = warpx.getField(FieldType::Bfield_aux, lev, 0); - const amrex::MultiFab & By = warpx.getField(FieldType::Bfield_aux, lev, 1); - const amrex::MultiFab & Bz = warpx.getField(FieldType::Bfield_aux, lev, 2); + const amrex::MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const amrex::MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const amrex::MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const amrex::MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const amrex::MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const amrex::MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); // Cell-centered index type const amrex::GpuArray cc{0,0,0}; diff --git a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp index f498cea7566..923ae727d08 100644 --- a/Source/Diagnostics/ReducedDiags/FieldProbe.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldProbe.cpp @@ -7,7 +7,7 @@ #include "FieldProbe.H" #include "FieldProbeParticleContainer.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/Gather/FieldGather.H" #include "Particles/Pusher/GetAndSetPosition.H" #include "Particles/Pusher/UpdatePosition.H" @@ -17,6 +17,7 @@ #include "Utils/WarpXConst.H" #include "WarpX.H" +#include #include #include @@ -45,7 +46,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; // constructor @@ -381,6 +382,8 @@ void FieldProbe::ComputeDiags (int step) // get number of mesh-refinement levels const auto nLevel = warpx.finestLevel() + 1; + using ablastr::fields::Direction; + // loop over refinement levels for (int lev = 0; lev < nLevel; ++lev) { @@ -398,12 +401,12 @@ void FieldProbe::ComputeDiags (int step) } // get MultiFab data at lev - const amrex::MultiFab &Ex = warpx.getField(FieldType::Efield_aux, lev, 0); - const amrex::MultiFab &Ey = warpx.getField(FieldType::Efield_aux, lev, 1); - const amrex::MultiFab &Ez = warpx.getField(FieldType::Efield_aux, lev, 2); - const amrex::MultiFab &Bx = warpx.getField(FieldType::Bfield_aux, lev, 0); - const amrex::MultiFab &By = warpx.getField(FieldType::Bfield_aux, lev, 1); - const amrex::MultiFab &Bz = warpx.getField(FieldType::Bfield_aux, lev, 2); + const amrex::MultiFab &Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const amrex::MultiFab &Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const amrex::MultiFab &Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const amrex::MultiFab &Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const amrex::MultiFab &By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const amrex::MultiFab &Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); /* * Prepare interpolation of field components to probe_position diff --git a/Source/Diagnostics/ReducedDiags/FieldReduction.H b/Source/Diagnostics/ReducedDiags/FieldReduction.H index 9574caa3d5d..d2c6dc6f6da 100644 --- a/Source/Diagnostics/ReducedDiags/FieldReduction.H +++ b/Source/Diagnostics/ReducedDiags/FieldReduction.H @@ -9,7 +9,7 @@ #define WARPX_DIAGNOSTICS_REDUCEDDIAGS_FIELDREDUCTION_H_ #include "ReducedDiags.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "WarpX.H" #include @@ -87,7 +87,9 @@ public: template void ComputeFieldReduction() { + using ablastr::fields::Direction; using namespace amrex::literals; + using warpx::fields::FieldType; // get a reference to WarpX instance auto & warpx = WarpX::GetInstance(); @@ -99,15 +101,15 @@ public: const auto dx = geom.CellSizeArray(); // get MultiFab data - const amrex::MultiFab & Ex = warpx.getField(warpx::fields::FieldType::Efield_aux, lev,0); - const amrex::MultiFab & Ey = warpx.getField(warpx::fields::FieldType::Efield_aux, lev,1); - const amrex::MultiFab & Ez = warpx.getField(warpx::fields::FieldType::Efield_aux, lev,2); - const amrex::MultiFab & Bx = warpx.getField(warpx::fields::FieldType::Bfield_aux, lev,0); - const amrex::MultiFab & By = warpx.getField(warpx::fields::FieldType::Bfield_aux, lev,1); - const amrex::MultiFab & Bz = warpx.getField(warpx::fields::FieldType::Bfield_aux, lev,2); - const amrex::MultiFab & jx = warpx.getField(warpx::fields::FieldType::current_fp, lev,0); - const amrex::MultiFab & jy = warpx.getField(warpx::fields::FieldType::current_fp, lev,1); - const amrex::MultiFab & jz = warpx.getField(warpx::fields::FieldType::current_fp, lev,2); + const amrex::MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const amrex::MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const amrex::MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const amrex::MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const amrex::MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const amrex::MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); + const amrex::MultiFab & jx = *warpx.m_fields.get(FieldType::current_fp, Direction{0},lev); + const amrex::MultiFab & jy = *warpx.m_fields.get(FieldType::current_fp, Direction{1},lev); + const amrex::MultiFab & jz = *warpx.m_fields.get(FieldType::current_fp, Direction{2},lev); // General preparation of interpolation and reduction operations diff --git a/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.cpp b/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.cpp index a11a8d35853..c496300c54e 100644 --- a/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.cpp +++ b/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.cpp @@ -7,12 +7,14 @@ #include "LoadBalanceCosts.H" #include "Diagnostics/ReducedDiags/ReducedDiags.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/MultiParticleContainer.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "WarpX.H" +#include + #include #include #include @@ -36,7 +38,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -123,11 +125,13 @@ void LoadBalanceCosts::ComputeDiags (int step) // shift index for m_data int shift_m_data = 0; + using ablastr::fields::Direction; + // save data for (int lev = 0; lev < nLevels; ++lev) { const amrex::DistributionMapping& dm = warpx.DistributionMap(lev); - const MultiFab & Ex = warpx.getField(FieldType::Efield_aux, lev,0); + const MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); for (MFIter mfi(Ex, false); mfi.isValid(); ++mfi) { const Box& tbx = mfi.tilebox(); diff --git a/Source/Diagnostics/ReducedDiags/Make.package b/Source/Diagnostics/ReducedDiags/Make.package index e840931f8d3..2611831a3dd 100644 --- a/Source/Diagnostics/ReducedDiags/Make.package +++ b/Source/Diagnostics/ReducedDiags/Make.package @@ -1,24 +1,24 @@ CEXE_sources += MultiReducedDiags.cpp CEXE_sources += ReducedDiags.cpp -CEXE_sources += ParticleEnergy.cpp -CEXE_sources += ParticleMomentum.cpp -CEXE_sources += FieldEnergy.cpp -CEXE_sources += FieldProbe.cpp -CEXE_sources += FieldProbeParticleContainer.cpp -CEXE_sources += FieldMomentum.cpp CEXE_sources += BeamRelevant.cpp +CEXE_sources += ChargeOnEB.cpp CEXE_sources += ColliderRelevant.cpp CEXE_sources += DifferentialLuminosity.cpp +CEXE_sources += FieldEnergy.cpp +CEXE_sources += FieldMaximum.cpp +CEXE_sources += FieldMomentum.cpp +CEXE_sources += FieldProbe.cpp +CEXE_sources += FieldProbeParticleContainer.cpp +CEXE_sources += FieldReduction.cpp CEXE_sources += LoadBalanceCosts.cpp CEXE_sources += LoadBalanceEfficiency.cpp +CEXE_sources += ParticleEnergy.cpp +CEXE_sources += ParticleExtrema.cpp CEXE_sources += ParticleHistogram.cpp CEXE_sources += ParticleHistogram2D.cpp -CEXE_sources += FieldMaximum.cpp -CEXE_sources += FieldProbe.cpp -CEXE_sources += ParticleExtrema.cpp -CEXE_sources += RhoMaximum.cpp +CEXE_sources += ParticleMomentum.cpp CEXE_sources += ParticleNumber.cpp -CEXE_sources += FieldReduction.cpp -CEXE_sources += ChargeOnEB.cpp +CEXE_sources += RhoMaximum.cpp +CEXE_sources += Timestep.cpp VPATH_LOCATIONS += $(WARPX_HOME)/Source/Diagnostics/ReducedDiags diff --git a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp index 25ea87d9f54..5035eac58a8 100644 --- a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp +++ b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp @@ -12,8 +12,8 @@ #include "DifferentialLuminosity.H" #include "FieldEnergy.H" #include "FieldMaximum.H" -#include "FieldProbe.H" #include "FieldMomentum.H" +#include "FieldProbe.H" #include "FieldReduction.H" #include "LoadBalanceCosts.H" #include "LoadBalanceEfficiency.H" @@ -24,6 +24,7 @@ #include "ParticleMomentum.H" #include "ParticleNumber.H" #include "RhoMaximum.H" +#include "Timestep.H" #include "Utils/TextMsg.H" #include "Utils/WarpXProfilerWrapper.H" @@ -52,24 +53,25 @@ MultiReducedDiags::MultiReducedDiags () using CS = const std::string& ; const auto reduced_diags_dictionary = std::map(CS)>>{ + {"BeamRelevant", [](CS s){return std::make_unique(s);}}, + {"ChargeOnEB", [](CS s){return std::make_unique(s);}}, + {"ColliderRelevant", [](CS s){return std::make_unique(s);}}, + {"DifferentialLuminosity",[](CS s){return std::make_unique(s);}}, {"ParticleEnergy", [](CS s){return std::make_unique(s);}}, + {"ParticleExtrema", [](CS s){return std::make_unique(s);}}, + {"ParticleHistogram", [](CS s){return std::make_unique(s);}}, + {"ParticleHistogram2D", [](CS s){return std::make_unique(s);}}, {"ParticleMomentum", [](CS s){return std::make_unique(s);}}, + {"ParticleNumber", [](CS s){return std::make_unique(s);}}, {"FieldEnergy", [](CS s){return std::make_unique(s);}}, - {"FieldMomentum", [](CS s){return std::make_unique(s);}}, {"FieldMaximum", [](CS s){return std::make_unique(s);}}, + {"FieldMomentum", [](CS s){return std::make_unique(s);}}, {"FieldProbe", [](CS s){return std::make_unique(s);}}, {"FieldReduction", [](CS s){return std::make_unique(s);}}, - {"RhoMaximum", [](CS s){return std::make_unique(s);}}, - {"BeamRelevant", [](CS s){return std::make_unique(s);}}, - {"ColliderRelevant", [](CS s){return std::make_unique(s);}}, - {"DifferentialLuminosity",[](CS s){return std::make_unique(s);}}, {"LoadBalanceCosts", [](CS s){return std::make_unique(s);}}, {"LoadBalanceEfficiency", [](CS s){return std::make_unique(s);}}, - {"ParticleHistogram", [](CS s){return std::make_unique(s);}}, - {"ParticleHistogram2D", [](CS s){return std::make_unique(s);}}, - {"ParticleNumber", [](CS s){return std::make_unique(s);}}, - {"ParticleExtrema", [](CS s){return std::make_unique(s);}}, - {"ChargeOnEB", [](CS s){return std::make_unique(s);}} + {"RhoMaximum", [](CS s){return std::make_unique(s);}}, + {"Timestep", [](CS s){return std::make_unique(s);}} }; // loop over all reduced diags and fill m_multi_rd with requested reduced diags std::transform(m_rd_names.begin(), m_rd_names.end(), std::back_inserter(m_multi_rd), diff --git a/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp b/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp index 842dd3c9efd..c82b060b67c 100644 --- a/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp +++ b/Source/Diagnostics/ReducedDiags/ParticleExtrema.cpp @@ -11,7 +11,7 @@ #if (defined WARPX_QED) # include "Particles/ElementaryProcess/QEDInternals/QedChiFunctions.H" #endif -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/Gather/FieldGather.H" #include "Particles/Gather/GetExternalFields.H" #include "Particles/MultiParticleContainer.H" @@ -21,6 +21,8 @@ #include "Utils/WarpXConst.H" #include "WarpX.H" +#include + #include #include #include @@ -52,7 +54,7 @@ #include using namespace amrex::literals; -using namespace warpx::fields; +using warpx::fields::FieldType; // constructor ParticleExtrema::ParticleExtrema (const std::string& rd_name) @@ -260,18 +262,20 @@ void ParticleExtrema::ComputeDiags (int step) const bool galerkin_interpolation = WarpX::galerkin_interpolation; const amrex::IntVect ngEB = warpx.getngEB(); + using ablastr::fields::Direction; + // loop over refinement levels for (int lev = 0; lev <= level_number; ++lev) { // define variables in preparation for field gathering const amrex::XDim3 dinv = WarpX::InvCellSize(std::max(lev, 0)); - const amrex::MultiFab & Ex = warpx.getField(FieldType::Efield_aux, lev,0); - const amrex::MultiFab & Ey = warpx.getField(FieldType::Efield_aux, lev,1); - const amrex::MultiFab & Ez = warpx.getField(FieldType::Efield_aux, lev,2); - const amrex::MultiFab & Bx = warpx.getField(FieldType::Bfield_aux, lev,0); - const amrex::MultiFab & By = warpx.getField(FieldType::Bfield_aux, lev,1); - const amrex::MultiFab & Bz = warpx.getField(FieldType::Bfield_aux, lev,2); + const amrex::MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + const amrex::MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + const amrex::MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + const amrex::MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + const amrex::MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + const amrex::MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); // declare reduce_op amrex::ReduceOps reduce_op; diff --git a/Source/Diagnostics/ReducedDiags/Timestep.H b/Source/Diagnostics/ReducedDiags/Timestep.H new file mode 100644 index 00000000000..bcf4fe6452f --- /dev/null +++ b/Source/Diagnostics/ReducedDiags/Timestep.H @@ -0,0 +1,35 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Thomas Marks + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_DIAGNOSTICS_REDUCEDDIAGS_TIMESTEP_H_ +#define WARPX_DIAGNOSTICS_REDUCEDDIAGS_TIMESTEP_H_ + +#include "ReducedDiags.H" +#include + +/** + * This class contains a function for retrieving the current simulation timestep as a diagnostic. + * Useful mainly for simulations using adaptive timestepping. + */ +class Timestep: public ReducedDiags { +public: + /** + * constructor + * @param[in] rd_name reduced diags name + */ + Timestep (const std::string& rd_name); + + /** + * This function gets the current physical timestep of the simulation at all refinement levels. + * @param[in] step current time step + */ + void ComputeDiags (int step) final; +}; + +#endif //WARPX_DIAGNOSTICS_REDUCEDDIAGS_TIMESTEP_H_ diff --git a/Source/Diagnostics/ReducedDiags/Timestep.cpp b/Source/Diagnostics/ReducedDiags/Timestep.cpp new file mode 100644 index 00000000000..3474121db91 --- /dev/null +++ b/Source/Diagnostics/ReducedDiags/Timestep.cpp @@ -0,0 +1,72 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Thomas Marks + * + * License: BSD-3-Clause-LBNL + */ + +#include "Timestep.H" + +#include "WarpX.H" + +#include +#include +#include // TODO: remove this +#include + +#include + +using namespace amrex::literals; + +// constructor +Timestep::Timestep (const std::string& rd_name) +:ReducedDiags{rd_name} +{ + const auto& warpx = WarpX::GetInstance(); + const auto max_level = warpx.maxLevel(); + + // data size should be equal to the number of refinement levels + m_data.resize(max_level + 1, 0.0_rt); + + if (amrex::ParallelDescriptor::IOProcessor() && m_write_header) { + // open file + std::ofstream ofs{m_path + m_rd_name + "." + m_extension, std::ofstream::out}; + + // write header row + int c = 0; + ofs << "#"; + ofs << "[" << c++ << "]step()"; + ofs << m_sep; + ofs << "[" << c++ << "]time(s)"; + ofs << m_sep; + + for (int lev = 0; lev <= max_level; lev++) { + ofs << "[" << c++ << "]timestep[" << lev << "](s)"; + if (lev < max_level) { + ofs << m_sep; + } + } + + // close file + ofs << std::endl; + ofs.close(); + } +} +// end constructor + +// function to get current simulation timestep at all refinement levels +void Timestep::ComputeDiags (int step) { + // Check if diagnostic should be done + if (!m_intervals.contains(step+1)) { return; } + + const auto& warpx = WarpX::GetInstance(); + const auto max_level = warpx.maxLevel(); + const auto dt = warpx.getdt(); + + for (int lev = 0; lev <= max_level; lev++) { + m_data[lev] = dt[lev]; + } +} +// end Timestep::ComputeDiags diff --git a/Source/Diagnostics/SliceDiagnostic.cpp b/Source/Diagnostics/SliceDiagnostic.cpp index 97af967f2be..bcb6070abdf 100644 --- a/Source/Diagnostics/SliceDiagnostic.cpp +++ b/Source/Diagnostics/SliceDiagnostic.cpp @@ -7,10 +7,11 @@ */ #include "SliceDiagnostic.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "WarpX.H" +#include #include #include @@ -41,7 +42,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; /* \brief * The functions creates the slice for diagnostics based on the user-input. @@ -175,6 +176,10 @@ CreateSlice( const MultiFab& mf, const Vector &dom_geom, const MultiFab& mfSrc = *smf; MultiFab& mfDst = *cs_mf; + auto & warpx = WarpX::GetInstance(); + + using ablastr::fields::Direction; + MFIter mfi_dst(mfDst); for (MFIter mfi(mfSrc); mfi.isValid(); ++mfi) { @@ -196,27 +201,27 @@ CreateSlice( const MultiFab& mf, const Vector &dom_geom, amrex::amrex_avgdown_nodes(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio); } - if( SliceType == WarpX::GetInstance().getField(FieldType::Efield_aux, 0,0).ixType().toIntVect() ) { + if( SliceType == warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->ixType().toIntVect() ) { amrex::amrex_avgdown_edges(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio, 0); } - if( SliceType == WarpX::GetInstance().getField(FieldType::Efield_aux, 0,1).ixType().toIntVect() ) { + if( SliceType == warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, 0)->ixType().toIntVect() ) { amrex::amrex_avgdown_edges(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio, 1); } - if( SliceType == WarpX::GetInstance().getField(FieldType::Efield_aux, 0,2).ixType().toIntVect() ) { + if( SliceType == warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, 0)->ixType().toIntVect() ) { amrex::amrex_avgdown_edges(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio, 2); } - if( SliceType == WarpX::GetInstance().getField(FieldType::Bfield_aux, 0,0).ixType().toIntVect() ) { + if( SliceType == warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, 0)->ixType().toIntVect() ) { amrex::amrex_avgdown_faces(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio, 0); } - if( SliceType == WarpX::GetInstance().getField(FieldType::Bfield_aux, 0,1).ixType().toIntVect() ) { + if( SliceType == warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, 0)->ixType().toIntVect() ) { amrex::amrex_avgdown_faces(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio, 1); } - if( SliceType == WarpX::GetInstance().getField(FieldType::Bfield_aux, 0,2).ixType().toIntVect() ) { + if( SliceType == warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, 0)->ixType().toIntVect() ) { amrex::amrex_avgdown_faces(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, ncomp, slice_cr_ratio, 2); } diff --git a/Source/Diagnostics/WarpXIO.cpp b/Source/Diagnostics/WarpXIO.cpp index a3b902386f6..43415daf151 100644 --- a/Source/Diagnostics/WarpXIO.cpp +++ b/Source/Diagnostics/WarpXIO.cpp @@ -12,6 +12,7 @@ # include "BoundaryConditions/PML_RZ.H" #endif #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" #include "FieldIO.H" #include "Particles/MultiParticleContainer.H" #include "Utils/TextMsg.H" @@ -88,6 +89,9 @@ WarpX::GetRestartDMap (const std::string& chkfile, const amrex::BoxArray& ba, in void WarpX::InitFromCheckpoint () { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + WARPX_PROFILE("WarpX::InitFromCheckpoint()"); amrex::Print()<< Utils::TextMsg::Info( @@ -279,101 +283,101 @@ WarpX::InitFromCheckpoint () for (int lev = 0; lev < nlevs; ++lev) { for (int i = 0; i < 3; ++i) { - current_fp[lev][i]->setVal(0.0); - Efield_fp[lev][i]->setVal(0.0); - Bfield_fp[lev][i]->setVal(0.0); + m_fields.get(FieldType::current_fp, Direction{i}, lev)->setVal(0.0); + m_fields.get(FieldType::Efield_fp, Direction{i}, lev)->setVal(0.0); + m_fields.get(FieldType::Bfield_fp, Direction{i}, lev)->setVal(0.0); } if (lev > 0) { for (int i = 0; i < 3; ++i) { - Efield_aux[lev][i]->setVal(0.0); - Bfield_aux[lev][i]->setVal(0.0); + m_fields.get(FieldType::Efield_aux, Direction{i}, lev)->setVal(0.0); + m_fields.get(FieldType::Bfield_aux, Direction{i}, lev)->setVal(0.0); - current_cp[lev][i]->setVal(0.0); - Efield_cp[lev][i]->setVal(0.0); - Bfield_cp[lev][i]->setVal(0.0); + m_fields.get(FieldType::current_cp, Direction{i}, lev)->setVal(0.0); + m_fields.get(FieldType::Efield_cp, Direction{i}, lev)->setVal(0.0); + m_fields.get(FieldType::Bfield_cp, Direction{i}, lev)->setVal(0.0); } } - VisMF::Read(*Efield_fp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Efield_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ex_fp")); - VisMF::Read(*Efield_fp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Efield_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ey_fp")); - VisMF::Read(*Efield_fp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Efield_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ez_fp")); - VisMF::Read(*Bfield_fp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Bfield_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bx_fp")); - VisMF::Read(*Bfield_fp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Bfield_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "By_fp")); - VisMF::Read(*Bfield_fp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Bfield_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bz_fp")); if (WarpX::fft_do_time_averaging) { - VisMF::Read(*Efield_avg_fp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Efield_avg_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ex_avg_fp")); - VisMF::Read(*Efield_avg_fp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Efield_avg_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ey_avg_fp")); - VisMF::Read(*Efield_avg_fp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Efield_avg_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ez_avg_fp")); - VisMF::Read(*Bfield_avg_fp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Bfield_avg_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bx_avg_fp")); - VisMF::Read(*Bfield_avg_fp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Bfield_avg_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "By_avg_fp")); - VisMF::Read(*Bfield_avg_fp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Bfield_avg_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bz_avg_fp")); } if (is_synchronized) { - VisMF::Read(*current_fp[lev][0], + VisMF::Read(*m_fields.get(FieldType::current_fp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jx_fp")); - VisMF::Read(*current_fp[lev][1], + VisMF::Read(*m_fields.get(FieldType::current_fp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jy_fp")); - VisMF::Read(*current_fp[lev][2], + VisMF::Read(*m_fields.get(FieldType::current_fp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jz_fp")); } if (lev > 0) { - VisMF::Read(*Efield_cp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Efield_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ex_cp")); - VisMF::Read(*Efield_cp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Efield_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ey_cp")); - VisMF::Read(*Efield_cp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Efield_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ez_cp")); - VisMF::Read(*Bfield_cp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bx_cp")); - VisMF::Read(*Bfield_cp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "By_cp")); - VisMF::Read(*Bfield_cp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Bfield_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bz_cp")); if (WarpX::fft_do_time_averaging) { - VisMF::Read(*Efield_avg_cp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Efield_avg_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ex_avg_cp")); - VisMF::Read(*Efield_avg_cp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Efield_avg_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ey_avg_cp")); - VisMF::Read(*Efield_avg_cp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Efield_avg_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Ez_avg_cp")); - VisMF::Read(*Bfield_avg_cp[lev][0], + VisMF::Read(*m_fields.get(FieldType::Bfield_avg_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bx_avg_cp")); - VisMF::Read(*Bfield_avg_cp[lev][1], + VisMF::Read(*m_fields.get(FieldType::Bfield_avg_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "By_avg_cp")); - VisMF::Read(*Bfield_avg_cp[lev][2], + VisMF::Read(*m_fields.get(FieldType::Bfield_avg_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "Bz_avg_cp")); } if (is_synchronized) { - VisMF::Read(*current_cp[lev][0], + VisMF::Read(*m_fields.get(FieldType::current_cp, Direction{0}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jx_cp")); - VisMF::Read(*current_cp[lev][1], + VisMF::Read(*m_fields.get(FieldType::current_cp, Direction{1}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jy_cp")); - VisMF::Read(*current_cp[lev][2], + VisMF::Read(*m_fields.get(FieldType::current_cp, Direction{2}, lev), amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "jz_cp")); } } @@ -384,11 +388,11 @@ WarpX::InitFromCheckpoint () { for (int lev = 0; lev < nlevs; ++lev) { if (pml[lev]) { - pml[lev]->Restart(amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "pml")); + pml[lev]->Restart(m_fields, amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "pml")); } #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) if (pml_rz[lev]) { - pml_rz[lev]->Restart(amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "pml_rz")); + pml_rz[lev]->Restart(m_fields, amrex::MultiFabFileFullPrefix(lev, restart_chkfile, level_prefix, "pml_rz")); } #endif } diff --git a/Source/EmbeddedBoundary/ParticleScraper.H b/Source/EmbeddedBoundary/ParticleScraper.H index c5d9cc68c60..860541542be 100644 --- a/Source/EmbeddedBoundary/ParticleScraper.H +++ b/Source/EmbeddedBoundary/ParticleScraper.H @@ -65,7 +65,7 @@ */ template ::value, int> foo = 0> void -scrapeParticlesAtEB (PC& pc, const amrex::Vector& distance_to_eb, int lev, F&& f) +scrapeParticlesAtEB (PC& pc, ablastr::fields::MultiLevelScalarField const& distance_to_eb, int lev, F&& f) { scrapeParticlesAtEB(pc, distance_to_eb, lev, lev, std::forward(f)); } @@ -108,7 +108,7 @@ scrapeParticlesAtEB (PC& pc, const amrex::Vector& distan */ template ::value, int> foo = 0> void -scrapeParticlesAtEB (PC& pc, const amrex::Vector& distance_to_eb, F&& f) +scrapeParticlesAtEB (PC& pc, ablastr::fields::MultiLevelScalarField const& distance_to_eb, F&& f) { scrapeParticlesAtEB(pc, distance_to_eb, 0, pc.finestLevel(), std::forward(f)); } @@ -153,7 +153,7 @@ scrapeParticlesAtEB (PC& pc, const amrex::Vector& distan */ template ::value, int> foo = 0> void -scrapeParticlesAtEB (PC& pc, const amrex::Vector& distance_to_eb, +scrapeParticlesAtEB (PC& pc, ablastr::fields::MultiLevelScalarField const& distance_to_eb, int lev_min, int lev_max, F&& f) { BL_PROFILE("scrapeParticlesAtEB"); diff --git a/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp b/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp index 717aa26b021..61009fb46e0 100644 --- a/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp +++ b/Source/EmbeddedBoundary/WarpXFaceExtensions.cpp @@ -7,15 +7,20 @@ #include "WarpXFaceInfoBox.H" #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "WarpX.H" #include +#include #include #include #include +using namespace ablastr::fields; +using warpx::fields::FieldType; + /** * \brief Get the value of arr in the neighbor (i_n, j_n) on the plane with normal 'dim'. * @@ -283,7 +288,7 @@ WarpX::ComputeFaceExtensions () void WarpX::InitBorrowing() { int idim = 0; - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { amrex::Box const &box = mfi.validbox(); auto &borrowing_x = (*m_borrowing[maxLevel()][idim])[mfi]; borrowing_x.inds_pointer.resize(box); @@ -299,7 +304,7 @@ WarpX::InitBorrowing() { } idim = 1; - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { amrex::Box const &box = mfi.validbox(); auto &borrowing_y = (*m_borrowing[maxLevel()][idim])[mfi]; borrowing_y.inds_pointer.resize(box); @@ -312,7 +317,7 @@ WarpX::InitBorrowing() { } idim = 2; - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { amrex::Box const &box = mfi.validbox(); auto &borrowing_z = (*m_borrowing[maxLevel()][idim])[mfi]; borrowing_z.inds_pointer.resize(box); @@ -453,11 +458,10 @@ WarpX::ComputeOneWayExtensions () WARPX_ABORT_WITH_MESSAGE( "ComputeOneWayExtensions: Only implemented in 2D3V and 3D3V"); #endif - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { amrex::Box const &box = mfi.validbox(); - - auto const &S = m_face_areas[maxLevel()][idim]->array(mfi); + auto const &S = m_fields.get(FieldType::face_areas, Direction{idim}, maxLevel())->array(mfi); auto const &flag_ext_face = m_flag_ext_face[maxLevel()][idim]->array(mfi); auto const &flag_info_face = m_flag_info_face[maxLevel()][idim]->array(mfi); auto &borrowing = (*m_borrowing[maxLevel()][idim])[mfi]; @@ -469,11 +473,11 @@ WarpX::ComputeOneWayExtensions () amrex::Real* borrowing_area = borrowing.area.data(); int& vecs_size = borrowing.vecs_size; - auto const &S_mod = m_area_mod[maxLevel()][idim]->array(mfi); + auto const &S_mod = m_fields.get(FieldType::area_mod, Direction{idim}, maxLevel())->array(mfi); - const auto &lx = m_edge_lengths[maxLevel()][0]->array(mfi); - const auto &ly = m_edge_lengths[maxLevel()][1]->array(mfi); - const auto &lz = m_edge_lengths[maxLevel()][2]->array(mfi); + const auto &lx = m_fields.get(FieldType::edge_lengths, Direction{0}, maxLevel())->array(mfi); + const auto &ly = m_fields.get(FieldType::edge_lengths, Direction{1}, maxLevel())->array(mfi); + const auto &lz = m_fields.get(FieldType::edge_lengths, Direction{2}, maxLevel())->array(mfi); vecs_size = amrex::Scan::PrefixSum(ncells, [=] AMREX_GPU_DEVICE (int icell) { @@ -581,11 +585,11 @@ WarpX::ComputeEightWaysExtensions () WARPX_ABORT_WITH_MESSAGE( "ComputeEightWaysExtensions: Only implemented in 2D3V and 3D3V"); #endif - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { amrex::Box const &box = mfi.validbox(); - auto const &S = m_face_areas[maxLevel()][idim]->array(mfi); + auto const &S = m_fields.get(FieldType::face_areas, Direction{idim}, maxLevel())->array(mfi); auto const &flag_ext_face = m_flag_ext_face[maxLevel()][idim]->array(mfi); auto const &flag_info_face = m_flag_info_face[maxLevel()][idim]->array(mfi); auto &borrowing = (*m_borrowing[maxLevel()][idim])[mfi]; @@ -597,10 +601,11 @@ WarpX::ComputeEightWaysExtensions () amrex::Real* borrowing_area = borrowing.area.data(); int& vecs_size = borrowing.vecs_size; - auto const &S_mod = m_area_mod[maxLevel()][idim]->array(mfi); - const auto &lx = m_edge_lengths[maxLevel()][0]->array(mfi); - const auto &ly = m_edge_lengths[maxLevel()][1]->array(mfi); - const auto &lz = m_edge_lengths[maxLevel()][2]->array(mfi); + auto const &S_mod = m_fields.get(FieldType::area_mod, Direction{idim}, maxLevel())->array(mfi); + + const auto &lx = m_fields.get(FieldType::edge_lengths, Direction{0}, maxLevel())->array(mfi); + const auto &ly = m_fields.get(FieldType::edge_lengths, Direction{1}, maxLevel())->array(mfi); + const auto &lz = m_fields.get(FieldType::edge_lengths, Direction{2}, maxLevel())->array(mfi); vecs_size += amrex::Scan::PrefixSum(ncells, [=] AMREX_GPU_DEVICE (int icell){ @@ -732,15 +737,15 @@ WarpX::ApplyBCKCorrection (const int idim) const amrex::Real dy = cell_size[1]; const amrex::Real dz = cell_size[2]; - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel()), amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { const amrex::Box &box = mfi.tilebox(); const amrex::Array4 &flag_ext_face = m_flag_ext_face[maxLevel()][idim]->array(mfi); const amrex::Array4 &flag_info_face = m_flag_info_face[maxLevel()][idim]->array(mfi); - const amrex::Array4 &S = m_face_areas[maxLevel()][idim]->array(mfi); - const amrex::Array4 &lx = m_face_areas[maxLevel()][0]->array(mfi); - const amrex::Array4 &ly = m_face_areas[maxLevel()][1]->array(mfi); - const amrex::Array4 &lz = m_face_areas[maxLevel()][2]->array(mfi); + const amrex::Array4 &S = m_fields.get(FieldType::face_areas, Direction{idim}, maxLevel())->array(mfi); + const amrex::Array4 &lx = m_fields.get(FieldType::face_areas, Direction{0}, maxLevel())->array(mfi);; + const amrex::Array4 &ly = m_fields.get(FieldType::face_areas, Direction{1}, maxLevel())->array(mfi);; + const amrex::Array4 &lz = m_fields.get(FieldType::face_areas, Direction{2}, maxLevel())->array(mfi);; amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE(int i, int j, int k) { if (flag_ext_face(i, j, k)) { @@ -760,7 +765,7 @@ void WarpX::ShrinkBorrowing () { for(int idim = 0; idim < AMREX_SPACEDIM; idim++) { - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { auto &borrowing = (*m_borrowing[maxLevel()][idim])[mfi]; borrowing.inds.resize(borrowing.vecs_size); borrowing.neigh_faces.resize(borrowing.vecs_size); diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index f63e4eb45d3..edbc97a8efe 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -9,6 +9,7 @@ #include "EmbeddedBoundary/Enabled.H" #ifdef AMREX_USE_EB +# include "Fields.H" # include "Utils/Parser/ParserUtils.H" # include "Utils/TextMsg.H" @@ -41,6 +42,8 @@ # include # include +using namespace ablastr::fields; + #endif #ifdef AMREX_USE_EB @@ -122,7 +125,7 @@ WarpX::InitEB () #ifdef AMREX_USE_EB void -WarpX::ComputeEdgeLengths (std::array< std::unique_ptr, 3 >& edge_lengths, +WarpX::ComputeEdgeLengths (ablastr::fields::VectorField& edge_lengths, const amrex::EBFArrayBoxFactory& eb_fact) { BL_PROFILE("ComputeEdgeLengths"); @@ -184,7 +187,7 @@ WarpX::ComputeEdgeLengths (std::array< std::unique_ptr, 3 >& ed void -WarpX::ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face_areas, +WarpX::ComputeFaceAreas (VectorField& face_areas, const amrex::EBFArrayBoxFactory& eb_fact) { BL_PROFILE("ComputeFaceAreas"); @@ -238,7 +241,7 @@ WarpX::ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face void -WarpX::ScaleEdges (std::array< std::unique_ptr, 3 >& edge_lengths, +WarpX::ScaleEdges (ablastr::fields::VectorField& edge_lengths, const std::array& cell_size) { BL_PROFILE("ScaleEdges"); @@ -262,8 +265,8 @@ WarpX::ScaleEdges (std::array< std::unique_ptr, 3 >& edge_lengt } void -WarpX::ScaleAreas (std::array< std::unique_ptr, 3 >& face_areas, - const std::array& cell_size) { +WarpX::ScaleAreas (ablastr::fields::VectorField& face_areas, + const std::array& cell_size) { BL_PROFILE("ScaleAreas"); #if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) @@ -290,7 +293,11 @@ WarpX::ScaleAreas (std::array< std::unique_ptr, 3 >& face_areas void -WarpX::MarkCells () { +WarpX::MarkCells () +{ + using ablastr::fields::Direction; + using warpx::fields::FieldType; + #ifndef WARPX_DIM_RZ auto const &cell_size = CellSize(maxLevel()); @@ -306,18 +313,20 @@ WarpX::MarkCells () { continue; } #endif - for (amrex::MFIter mfi(*Bfield_fp[maxLevel()][idim]); mfi.isValid(); ++mfi) { - //amrex::Box const &box = mfi.tilebox(m_face_areas[maxLevel()][idim]->ixType().toIntVect()); - const amrex::Box& box = mfi.tilebox(m_face_areas[maxLevel()][idim]->ixType().toIntVect(), - m_face_areas[maxLevel()][idim]->nGrowVect() ); + for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { + auto* face_areas_idim_max_lev = + m_fields.get(FieldType::face_areas, Direction{idim}, maxLevel()); + + const amrex::Box& box = mfi.tilebox(face_areas_idim_max_lev->ixType().toIntVect(), + face_areas_idim_max_lev->nGrowVect() ); - auto const &S = m_face_areas[maxLevel()][idim]->array(mfi); + auto const &S = face_areas_idim_max_lev->array(mfi); auto const &flag_info_face = m_flag_info_face[maxLevel()][idim]->array(mfi); auto const &flag_ext_face = m_flag_ext_face[maxLevel()][idim]->array(mfi); - const auto &lx = m_edge_lengths[maxLevel()][0]->array(mfi); - const auto &ly = m_edge_lengths[maxLevel()][1]->array(mfi); - const auto &lz = m_edge_lengths[maxLevel()][2]->array(mfi); - auto const &mod_areas_dim = m_area_mod[maxLevel()][idim]->array(mfi); + const auto &lx = m_fields.get(FieldType::edge_lengths, Direction{0}, maxLevel())->array(mfi); + const auto &ly = m_fields.get(FieldType::edge_lengths, Direction{1}, maxLevel())->array(mfi); + const auto &lz = m_fields.get(FieldType::edge_lengths, Direction{2}, maxLevel())->array(mfi); + auto const &mod_areas_dim = m_fields.get(FieldType::area_mod, Direction{idim}, maxLevel())->array(mfi); const amrex::Real dx = cell_size[0]; const amrex::Real dy = cell_size[1]; @@ -381,11 +390,12 @@ WarpX::ComputeDistanceToEB () } #ifdef AMREX_USE_EB BL_PROFILE("ComputeDistanceToEB"); + using warpx::fields::FieldType; const amrex::EB2::IndexSpace& eb_is = amrex::EB2::IndexSpace::top(); for (int lev=0; lev<=maxLevel(); lev++) { const amrex::EB2::Level& eb_level = eb_is.getLevel(Geom(lev)); auto const eb_fact = fieldEBFactory(lev); - amrex::FillSignedDistance(*m_distance_to_eb[lev], eb_level, eb_fact, 1); + amrex::FillSignedDistance(*m_fields.get(FieldType::distance_to_eb, lev), eb_level, eb_fact, 1); } #endif } diff --git a/Source/Evolve/WarpXComputeDt.cpp b/Source/Evolve/WarpXComputeDt.cpp index c1a87166920..b82cb6aff26 100644 --- a/Source/Evolve/WarpXComputeDt.cpp +++ b/Source/Evolve/WarpXComputeDt.cpp @@ -13,6 +13,7 @@ #else # include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" #endif +#include "Particles/MultiParticleContainer.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" @@ -27,29 +28,29 @@ #include #include +/** + * Compute the minimum of array x, where x has dimension AMREX_SPACEDIM + */ +AMREX_FORCE_INLINE amrex::Real +minDim (const amrex::Real* x) +{ + return std::min({AMREX_D_DECL(x[0], x[1], x[2])}); +} + /** * Determine the timestep of the simulation. */ void WarpX::ComputeDt () { // Handle cases where the timestep is not limited by the speed of light - if (electromagnetic_solver_id == ElectromagneticSolverAlgo::None || - electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { - - std::stringstream errorMsg; - if (electrostatic_solver_id != ElectrostaticSolverAlgo::None) { - errorMsg << "warpx.const_dt must be specified with the electrostatic solver."; - } else if (electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { - errorMsg << "warpx.const_dt must be specified with the hybrid-PIC solver."; - } else { - errorMsg << "warpx.const_dt must be specified when not using a field solver."; - } - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_const_dt.has_value(), errorMsg.str()); - - for (int lev=0; lev<=max_level; lev++) { - dt[lev] = m_const_dt.value(); - } - return; + // and no constant timestep is provided + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_const_dt.has_value(), "warpx.const_dt must be specified with the hybrid-PIC solver."); + } else if (electromagnetic_solver_id == ElectromagneticSolverAlgo::None) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + m_const_dt.has_value() || dt_update_interval.isActivated(), + "warpx.const_dt must be specified with the electrostatic solver, or warpx.dt_update_interval must be > 0." + ); } // Determine the appropriate timestep as limited by the speed of light @@ -58,16 +59,17 @@ WarpX::ComputeDt () if (m_const_dt.has_value()) { deltat = m_const_dt.value(); + } else if (electrostatic_solver_id != ElectrostaticSolverAlgo::None) { + // Set dt for electrostatic algorithm + if (m_max_dt.has_value()) { + deltat = m_max_dt.value(); + } else { + deltat = cfl * minDim(dx) / PhysConst::c; + } } else if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { // Computation of dt for spectral algorithm // (determined by the minimum cell size in all directions) -#if defined(WARPX_DIM_1D_Z) - deltat = cfl * dx[0] / PhysConst::c; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - deltat = cfl * std::min(dx[0], dx[1]) / PhysConst::c; -#else - deltat = cfl * std::min(dx[0], std::min(dx[1], dx[2])) / PhysConst::c; -#endif + deltat = cfl * minDim(dx) / PhysConst::c; } else { // Computation of dt for FDTD algorithm #ifdef WARPX_DIM_RZ @@ -99,6 +101,40 @@ WarpX::ComputeDt () } } +/** + * Determine the simulation timestep from the maximum speed of all particles + * Sets timestep so that a particle can only cross cfl*dx cells per timestep. + */ +void +WarpX::UpdateDtFromParticleSpeeds () +{ + const amrex::Real* dx = geom[max_level].CellSize(); + const amrex::Real dx_min = minDim(dx); + + const amrex::ParticleReal max_v = mypc->maxParticleVelocity(); + amrex::Real deltat_new = 0.; + + // Protections from overly-large timesteps + if (max_v == 0) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_max_dt.has_value(), "Particles at rest and no constant or maximum timestep specified. Aborting."); + deltat_new = m_max_dt.value(); + } else { + deltat_new = cfl * dx_min / max_v; + } + + // Restrict to be less than user-specified maximum timestep, if present + if (m_max_dt.has_value()) { + deltat_new = std::min(deltat_new, m_max_dt.value()); + } + + // Update dt + dt[max_level] = deltat_new; + + for (int lev = max_level-1; lev >= 0; --lev) { + dt[lev] = dt[lev+1] * refRatio(lev)[0]; + } +} + void WarpX::PrintDtDxDyDz () { diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index c668eac2e26..a685afd28e7 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -15,6 +15,7 @@ #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" #include "Evolve/WarpXDtType.H" +#include "Fields.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" #ifdef WARPX_USE_FFT # ifdef WARPX_DIM_RZ @@ -60,12 +61,43 @@ using namespace amrex; using ablastr::utils::SignalHandling; +void +WarpX::Synchronize () { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + FillBoundaryE(guard_cells.ng_FieldGather); + FillBoundaryB(guard_cells.ng_FieldGather); + if (fft_do_time_averaging) + { + FillBoundaryE_avg(guard_cells.ng_FieldGather); + FillBoundaryB_avg(guard_cells.ng_FieldGather); + } + UpdateAuxilaryData(); + FillBoundaryAux(guard_cells.ng_UpdateAux); + for (int lev = 0; lev <= finest_level; ++lev) { + mypc->PushP( + lev, + 0.5_rt*dt[lev], + *m_fields.get(FieldType::Efield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{2}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{2}, lev) + ); + } + is_synchronized = true; +} + void WarpX::Evolve (int numsteps) { WARPX_PROFILE_REGION("WarpX::Evolve()"); WARPX_PROFILE("WarpX::Evolve()"); + using ablastr::fields::Direction; + Real cur_time = t_new[0]; // Note that the default argument is numsteps = -1 @@ -95,6 +127,18 @@ WarpX::Evolve (int numsteps) CheckLoadBalance(step); + // Update timestep for electrostatic solver if a constant dt is not provided + // This first synchronizes the position and velocity before setting the new timestep + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::None && + !m_const_dt.has_value() && dt_update_interval.contains(step+1)) { + if (verbose) { + amrex::Print() << Utils::TextMsg::Info("updating timestep"); + } + Synchronize(); + UpdateDtFromParticleSpeeds(); + } + + // If position and velocity are synchronized, push velocity backward one half step if (evolve_scheme == EvolveScheme::Explicit) { ExplicitFillBoundaryEBUpdateAux(); @@ -175,25 +219,9 @@ WarpX::Evolve (int numsteps) // TODO: move out if (evolve_scheme == EvolveScheme::Explicit) { + // At the end of last step, push p by 0.5*dt to synchronize if (cur_time + dt[0] >= stop_time - 1.e-3*dt[0] || step == numsteps_max-1) { - // At the end of last step, push p by 0.5*dt to synchronize - FillBoundaryE(guard_cells.ng_FieldGather); - FillBoundaryB(guard_cells.ng_FieldGather); - if (fft_do_time_averaging) - { - FillBoundaryE_avg(guard_cells.ng_FieldGather); - FillBoundaryB_avg(guard_cells.ng_FieldGather); - } - UpdateAuxilaryData(); - FillBoundaryAux(guard_cells.ng_UpdateAux); - for (int lev = 0; lev <= finest_level; ++lev) { - mypc->PushP(lev, 0.5_rt*dt[lev], - *Efield_aux[lev][0],*Efield_aux[lev][1], - *Efield_aux[lev][2], - *Bfield_aux[lev][0],*Bfield_aux[lev][1], - *Bfield_aux[lev][2]); - } - is_synchronized = true; + Synchronize(); } } @@ -445,7 +473,10 @@ void WarpX::checkEarlyUnusedParams () void WarpX::ExplicitFillBoundaryEBUpdateAux () { WARPX_ALWAYS_ASSERT_WITH_MESSAGE(evolve_scheme == EvolveScheme::Explicit, - "Cannot call WarpX::ExplicitFillBoundaryEBUpdateAux wihtout Explicit evolve scheme set!"); + "Cannot call WarpX::ExplicitFillBoundaryEBUpdateAux without Explicit evolve scheme set!"); + + using ablastr::fields::Direction; + using warpx::fields::FieldType; // At the beginning, we have B^{n} and E^{n}. // Particles have p^{n} and x^{n}. @@ -461,9 +492,16 @@ void WarpX::ExplicitFillBoundaryEBUpdateAux () // on first step, push p by -0.5*dt for (int lev = 0; lev <= finest_level; ++lev) { - mypc->PushP(lev, -0.5_rt*dt[lev], - *Efield_aux[lev][0],*Efield_aux[lev][1],*Efield_aux[lev][2], - *Bfield_aux[lev][0],*Bfield_aux[lev][1],*Bfield_aux[lev][2]); + mypc->PushP( + lev, + -0.5_rt*dt[lev], + *m_fields.get(FieldType::Efield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{2}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{2}, lev) + ); } is_synchronized = false; @@ -527,9 +565,10 @@ void WarpX::HandleParticlesAtBoundaries (int step, amrex::Real cur_time, int num // interact the particles with EB walls (if present) if (EB::enabled()) { - mypc->ScrapeParticlesAtEB(amrex::GetVecOfConstPtrs(m_distance_to_eb)); + using warpx::fields::FieldType; + mypc->ScrapeParticlesAtEB(m_fields.get_mr_levels(FieldType::distance_to_eb, finest_level)); m_particle_boundary_buffer->gatherParticlesFromEmbeddedBoundaries( - *mypc, amrex::GetVecOfConstPtrs(m_distance_to_eb)); + *mypc, m_fields.get_mr_levels(FieldType::distance_to_eb, finest_level)); mypc->deleteInvalidParticles(); } @@ -543,23 +582,22 @@ void WarpX::HandleParticlesAtBoundaries (int step, amrex::Real cur_time, int num void WarpX::SyncCurrentAndRho () { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { if (fft_periodic_single_box) { // With periodic single box, synchronize J and rho here, // even with current correction or Vay deposition - if (current_deposition_algo == CurrentDepositionAlgo::Vay) - { - // TODO Replace current_cp with current_cp_vay once Vay deposition is implemented with MR - SyncCurrent(current_fp_vay, current_cp, current_buf); - SyncRho(rho_fp, rho_cp, charge_buf); - } - else - { - SyncCurrent(current_fp, current_cp, current_buf); - SyncRho(rho_fp, rho_cp, charge_buf); - } + std::string const current_fp_string = (current_deposition_algo == CurrentDepositionAlgo::Vay) + ? "current_fp_vay" : "current_fp"; + // TODO Replace current_cp with current_cp_vay once Vay deposition is implemented with MR + + SyncCurrent(current_fp_string); + SyncRho(); + } else // no periodic single box { @@ -569,42 +607,46 @@ void WarpX::SyncCurrentAndRho () if (!current_correction && current_deposition_algo != CurrentDepositionAlgo::Vay) { - SyncCurrent(current_fp, current_cp, current_buf); - SyncRho(rho_fp, rho_cp, charge_buf); + SyncCurrent("current_fp"); + SyncRho(); } if (current_deposition_algo == CurrentDepositionAlgo::Vay) { // TODO This works only without mesh refinement const int lev = 0; - if (use_filter) { ApplyFilterJ(current_fp_vay, lev); } + if (use_filter) { + ApplyFilterJ(m_fields.get_mr_levels_alldirs(FieldType::current_fp_vay, finest_level), lev); + } } } } else // FDTD { - SyncCurrent(current_fp, current_cp, current_buf); - SyncRho(rho_fp, rho_cp, charge_buf); + SyncCurrent("current_fp"); + SyncRho(); } // Reflect charge and current density over PEC boundaries, if needed. for (int lev = 0; lev <= finest_level; ++lev) { - if (rho_fp[lev]) { - ApplyRhofieldBoundary(lev, rho_fp[lev].get(), PatchType::fine); + if (m_fields.has(FieldType::rho_fp, lev)) { + ApplyRhofieldBoundary(lev, m_fields.get(FieldType::rho_fp,lev), PatchType::fine); } - ApplyJfieldBoundary( - lev, current_fp[lev][0].get(), current_fp[lev][1].get(), - current_fp[lev][2].get(), PatchType::fine - ); + ApplyJfieldBoundary(lev, + m_fields.get(FieldType::current_fp, Direction{0}, lev), + m_fields.get(FieldType::current_fp, Direction{1}, lev), + m_fields.get(FieldType::current_fp, Direction{2}, lev), + PatchType::fine); if (lev > 0) { - if (rho_cp[lev]) { - ApplyRhofieldBoundary(lev, rho_cp[lev].get(), PatchType::coarse); + if (m_fields.has(FieldType::rho_cp, lev)) { + ApplyRhofieldBoundary(lev, m_fields.get(FieldType::rho_cp,lev), PatchType::coarse); } - ApplyJfieldBoundary( - lev, current_cp[lev][0].get(), current_cp[lev][1].get(), - current_cp[lev][2].get(), PatchType::coarse - ); + ApplyJfieldBoundary(lev, + m_fields.get(FieldType::current_cp, Direction{0}, lev), + m_fields.get(FieldType::current_cp, Direction{1}, lev), + m_fields.get(FieldType::current_cp, Direction{2}, lev), + PatchType::coarse); } } } @@ -619,6 +661,8 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) "multi-J algorithm not implemented for FDTD" ); + using warpx::fields::FieldType; + const int rho_mid = spectral_solver_fp[0]->m_spectral_index.rho_mid; const int rho_new = spectral_solver_fp[0]->m_spectral_index.rho_new; @@ -630,7 +674,7 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // Initialize multi-J loop: // 1) Prepare E,B,F,G fields in spectral space - PSATDForwardTransformEB(Efield_fp, Bfield_fp, Efield_cp, Bfield_cp); + PSATDForwardTransformEB(); if (WarpX::do_dive_cleaning) { PSATDForwardTransformF(); } if (WarpX::do_divb_cleaning) { PSATDForwardTransformG(); } @@ -639,31 +683,36 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // 3) Deposit rho (in rho_new, since it will be moved during the loop) // (after checking that pointer to rho_fp on MR level 0 is not null) - if (rho_fp[0] && rho_in_time == RhoInTime::Linear) + if (m_fields.has(FieldType::rho_fp, 0) && rho_in_time == RhoInTime::Linear) { + ablastr::fields::MultiLevelScalarField const rho_fp = m_fields.get_mr_levels(FieldType::rho_fp, finest_level); + + std::string const rho_fp_string = "rho_fp"; + std::string const rho_cp_string = "rho_cp"; + // Deposit rho at relative time -dt // (dt[0] denotes the time step on mesh refinement level 0) mypc->DepositCharge(rho_fp, -dt[0]); // Filter, exchange boundary, and interpolate across levels - SyncRho(rho_fp, rho_cp, charge_buf); + SyncRho(); // Forward FFT of rho - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_new); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_new); } // 4) Deposit J at relative time -dt with time step dt // (dt[0] denotes the time step on mesh refinement level 0) if (J_in_time == JInTime::Linear) { - auto& current = (do_current_centering) ? current_fp_nodal : current_fp; - mypc->DepositCurrent(current, dt[0], -dt[0]); + std::string const current_string = (do_current_centering) ? "current_fp_nodal" : "current_fp"; + mypc->DepositCurrent( m_fields.get_mr_levels_alldirs(current_string, finest_level), dt[0], -dt[0]); // Synchronize J: filter, exchange boundary, and interpolate across levels. // With current centering, the nodal current is deposited in 'current', // namely 'current_fp_nodal': SyncCurrent stores the result of its centering // into 'current_fp' and then performs both filtering, if used, and exchange // of guard cells. - SyncCurrent(current_fp, current_cp, current_buf); + SyncCurrent("current_fp"); // Forward FFT of J - PSATDForwardTransformJ(current_fp, current_cp); + PSATDForwardTransformJ("current_fp", "current_cp"); } // Number of depositions for multi-J scheme @@ -688,31 +737,36 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // Deposit new J at relative time t_deposit_current with time step dt // (dt[0] denotes the time step on mesh refinement level 0) - auto& current = (do_current_centering) ? current_fp_nodal : current_fp; - mypc->DepositCurrent(current, dt[0], t_deposit_current); + std::string const current_string = (do_current_centering) ? "current_fp_nodal" : "current_fp"; + mypc->DepositCurrent( m_fields.get_mr_levels_alldirs(current_string, finest_level), dt[0], t_deposit_current); // Synchronize J: filter, exchange boundary, and interpolate across levels. // With current centering, the nodal current is deposited in 'current', // namely 'current_fp_nodal': SyncCurrent stores the result of its centering // into 'current_fp' and then performs both filtering, if used, and exchange // of guard cells. - SyncCurrent(current_fp, current_cp, current_buf); + SyncCurrent("current_fp"); // Forward FFT of J - PSATDForwardTransformJ(current_fp, current_cp); + PSATDForwardTransformJ("current_fp", "current_cp"); // Deposit new rho // (after checking that pointer to rho_fp on MR level 0 is not null) - if (rho_fp[0]) + if (m_fields.has(FieldType::rho_fp, 0)) { + ablastr::fields::MultiLevelScalarField const rho_fp = m_fields.get_mr_levels(FieldType::rho_fp, finest_level); + + std::string const rho_fp_string = "rho_fp"; + std::string const rho_cp_string = "rho_cp"; + // Move rho from new to old if rho is linear in time if (rho_in_time == RhoInTime::Linear) { PSATDMoveRhoNewToRhoOld(); } // Deposit rho at relative time t_deposit_charge mypc->DepositCharge(rho_fp, t_deposit_charge); // Filter, exchange boundary, and interpolate across levels - SyncRho(rho_fp, rho_cp, charge_buf); + SyncRho(); // Forward FFT of rho const int rho_idx = (rho_in_time == RhoInTime::Linear) ? rho_new : rho_mid; - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_idx); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_idx); } if (WarpX::current_correction) @@ -728,7 +782,7 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) // (the relative time reached here coincides with an integer full time step) if (i_deposit == n_deposit-1) { - PSATDBackwardTransformEB(Efield_fp, Bfield_fp, Efield_cp, Bfield_cp); + PSATDBackwardTransformEB(); if (WarpX::do_dive_cleaning) { PSATDBackwardTransformF(); } if (WarpX::do_divb_cleaning) { PSATDBackwardTransformG(); } } @@ -739,7 +793,12 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) { // We summed the integral of the field over 2*dt PSATDScaleAverageFields(1._rt / (2._rt*dt[0])); - PSATDBackwardTransformEBavg(Efield_avg_fp, Bfield_avg_fp, Efield_avg_cp, Bfield_avg_cp); + PSATDBackwardTransformEBavg( + m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level) + ); } // Evolve fields in PML @@ -747,7 +806,7 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) { if (do_pml && pml[lev]->ok()) { - pml[lev]->PushPSATD(lev); + pml[lev]->PushPSATD(m_fields, lev); } ApplyEfieldBoundary(lev, PatchType::fine); if (lev > 0) { ApplyEfieldBoundary(lev, PatchType::coarse); } @@ -807,13 +866,25 @@ WarpX::OneStep_sub1 (Real cur_time) const int fine_lev = 1; const int coarse_lev = 0; + using warpx::fields::FieldType; + // i) Push particles and fields on the fine patch (first fine step) PushParticlesandDeposit(fine_lev, cur_time, DtType::FirstHalf); - RestrictCurrentFromFineToCoarsePatch(current_fp, current_cp, fine_lev); - RestrictRhoFromFineToCoarsePatch(rho_fp, rho_cp, fine_lev); - if (use_filter) { ApplyFilterJ(current_fp, fine_lev); } - SumBoundaryJ(current_fp, fine_lev, Geom(fine_lev).periodicity()); - ApplyFilterandSumBoundaryRho(rho_fp, rho_cp, fine_lev, PatchType::fine, 0, 2*ncomps); + RestrictCurrentFromFineToCoarsePatch( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), fine_lev); + RestrictRhoFromFineToCoarsePatch(fine_lev); + if (use_filter) { + ApplyFilterJ( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev); + } + SumBoundaryJ( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + fine_lev, Geom(fine_lev).periodicity()); + + ApplyFilterandSumBoundaryRho( + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_cp, finest_level), + fine_lev, PatchType::fine, 0, 2*ncomps); EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); EvolveF(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); @@ -841,8 +912,15 @@ WarpX::OneStep_sub1 (Real cur_time) // by only half a coarse step (first half) PushParticlesandDeposit(coarse_lev, cur_time, DtType::Full); StoreCurrent(coarse_lev); - AddCurrentFromFineLevelandSumBoundary(current_fp, current_cp, current_buf, coarse_lev); - AddRhoFromFineLevelandSumBoundary(rho_fp, rho_cp, charge_buf, coarse_lev, 0, ncomps); + AddCurrentFromFineLevelandSumBoundary( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_buf, finest_level), coarse_lev); + AddRhoFromFineLevelandSumBoundary( + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_cp, finest_level), + m_fields.get_mr_levels(FieldType::rho_buf, finest_level), + coarse_lev, 0, ncomps); EvolveB(fine_lev, PatchType::coarse, dt[fine_lev], DtType::FirstHalf); EvolveF(fine_lev, PatchType::coarse, dt[fine_lev], DtType::FirstHalf); @@ -870,11 +948,18 @@ WarpX::OneStep_sub1 (Real cur_time) // iv) Push particles and fields on the fine patch (second fine step) PushParticlesandDeposit(fine_lev, cur_time + dt[fine_lev], DtType::SecondHalf); - RestrictCurrentFromFineToCoarsePatch(current_fp, current_cp, fine_lev); - RestrictRhoFromFineToCoarsePatch(rho_fp, rho_cp, fine_lev); - if (use_filter) { ApplyFilterJ(current_fp, fine_lev); } - SumBoundaryJ(current_fp, fine_lev, Geom(fine_lev).periodicity()); - ApplyFilterandSumBoundaryRho(rho_fp, rho_cp, fine_lev, PatchType::fine, 0, ncomps); + RestrictCurrentFromFineToCoarsePatch( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), fine_lev); + RestrictRhoFromFineToCoarsePatch(fine_lev); + if (use_filter) { + ApplyFilterJ( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev); + } + SumBoundaryJ( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev, Geom(fine_lev).periodicity()); + ApplyFilterandSumBoundaryRho( + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_cp, finest_level), + fine_lev, PatchType::fine, 0, ncomps); EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); EvolveF(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); @@ -901,8 +986,16 @@ WarpX::OneStep_sub1 (Real cur_time) // v) Push the fields on the coarse patch and mother grid // by only half a coarse step (second half) RestoreCurrent(coarse_lev); - AddCurrentFromFineLevelandSumBoundary(current_fp, current_cp, current_buf, coarse_lev); - AddRhoFromFineLevelandSumBoundary(rho_fp, rho_cp, charge_buf, coarse_lev, ncomps, ncomps); + AddCurrentFromFineLevelandSumBoundary( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_buf, finest_level), + coarse_lev); + AddRhoFromFineLevelandSumBoundary( + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_cp, finest_level), + m_fields.get_mr_levels(FieldType::rho_buf, finest_level), + coarse_lev, ncomps, ncomps); EvolveE(fine_lev, PatchType::coarse, dt[fine_lev]); FillBoundaryE(fine_lev, PatchType::coarse, guard_cells.ng_FieldSolver, @@ -963,9 +1056,18 @@ WarpX::doFieldIonization () void WarpX::doFieldIonization (int lev) { - mypc->doFieldIonization(lev, - *Efield_aux[lev][0],*Efield_aux[lev][1],*Efield_aux[lev][2], - *Bfield_aux[lev][0],*Bfield_aux[lev][1],*Bfield_aux[lev][2]); + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + mypc->doFieldIonization( + lev, + *m_fields.get(FieldType::Efield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{2}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{2}, lev) + ); } #ifdef WARPX_QED @@ -980,9 +1082,18 @@ WarpX::doQEDEvents () void WarpX::doQEDEvents (int lev) { - mypc->doQedEvents(lev, - *Efield_aux[lev][0],*Efield_aux[lev][1],*Efield_aux[lev][2], - *Bfield_aux[lev][0],*Bfield_aux[lev][1],*Bfield_aux[lev][2]); + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + mypc->doQedEvents( + lev, + *m_fields.get(FieldType::Efield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{2}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{2}, lev) + ); } #endif @@ -1000,50 +1111,53 @@ void WarpX::PushParticlesandDeposit (int lev, amrex::Real cur_time, DtType a_dt_type, bool skip_current, PushType push_type) { - amrex::MultiFab* current_x = nullptr; - amrex::MultiFab* current_y = nullptr; - amrex::MultiFab* current_z = nullptr; + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + std::string current_fp_string; if (WarpX::do_current_centering) { - current_x = current_fp_nodal[lev][0].get(); - current_y = current_fp_nodal[lev][1].get(); - current_z = current_fp_nodal[lev][2].get(); + current_fp_string = "current_fp_nodal"; } else if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) { - // Note that Vay deposition is supported only for PSATD and the code currently aborts otherwise - current_x = current_fp_vay[lev][0].get(); - current_y = current_fp_vay[lev][1].get(); - current_z = current_fp_vay[lev][2].get(); + current_fp_string = "current_fp_vay"; } else { - current_x = current_fp[lev][0].get(); - current_y = current_fp[lev][1].get(); - current_z = current_fp[lev][2].get(); + current_fp_string = "current_fp"; } - mypc->Evolve(lev, - *Efield_aux[lev][0], *Efield_aux[lev][1], *Efield_aux[lev][2], - *Bfield_aux[lev][0], *Bfield_aux[lev][1], *Bfield_aux[lev][2], - *current_x, *current_y, *current_z, - current_buf[lev][0].get(), current_buf[lev][1].get(), current_buf[lev][2].get(), - rho_fp[lev].get(), charge_buf[lev].get(), - Efield_cax[lev][0].get(), Efield_cax[lev][1].get(), Efield_cax[lev][2].get(), - Bfield_cax[lev][0].get(), Bfield_cax[lev][1].get(), Bfield_cax[lev][2].get(), - cur_time, dt[lev], a_dt_type, skip_current, push_type); + mypc->Evolve( + m_fields, + lev, + current_fp_string, + cur_time, + dt[lev], + a_dt_type, + skip_current, + push_type + ); if (! skip_current) { #ifdef WARPX_DIM_RZ // This is called after all particles have deposited their current and charge. - ApplyInverseVolumeScalingToCurrentDensity(current_fp[lev][0].get(), current_fp[lev][1].get(), current_fp[lev][2].get(), lev); - if (current_buf[lev][0].get()) { - ApplyInverseVolumeScalingToCurrentDensity(current_buf[lev][0].get(), current_buf[lev][1].get(), current_buf[lev][2].get(), lev-1); + ApplyInverseVolumeScalingToCurrentDensity( + m_fields.get(FieldType::current_fp, Direction{0}, lev), + m_fields.get(FieldType::current_fp, Direction{1}, lev), + m_fields.get(FieldType::current_fp, Direction{2}, lev), + lev); + if (m_fields.has_vector(FieldType::current_buf, lev)) { + ApplyInverseVolumeScalingToCurrentDensity( + m_fields.get(FieldType::current_buf, Direction{0}, lev), + m_fields.get(FieldType::current_buf, Direction{1}, lev), + m_fields.get(FieldType::current_buf, Direction{2}, lev), + lev-1); } - if (rho_fp[lev]) { - ApplyInverseVolumeScalingToChargeDensity(rho_fp[lev].get(), lev); - if (charge_buf[lev]) { - ApplyInverseVolumeScalingToChargeDensity(charge_buf[lev].get(), lev-1); + if (m_fields.has(FieldType::rho_fp, lev)) { + ApplyInverseVolumeScalingToChargeDensity(m_fields.get(FieldType::rho_fp, lev), lev); + if (m_fields.has(FieldType::rho_buf, lev)) { + ApplyInverseVolumeScalingToChargeDensity(m_fields.get(FieldType::rho_buf, lev), lev-1); } } // #else @@ -1055,10 +1169,12 @@ WarpX::PushParticlesandDeposit (int lev, amrex::Real cur_time, DtType a_dt_type, // Might this be related to issue #1943? #endif if (do_fluid_species) { - myfl->Evolve(lev, - *Efield_aux[lev][0], *Efield_aux[lev][1], *Efield_aux[lev][2], - *Bfield_aux[lev][0], *Bfield_aux[lev][1], *Bfield_aux[lev][2], - rho_fp[lev].get(), *current_x, *current_y, *current_z, cur_time, skip_current); + myfl->Evolve(m_fields, + lev, + current_fp_string, + cur_time, + skip_current + ); } } } @@ -1071,6 +1187,8 @@ WarpX::PushParticlesandDeposit (int lev, amrex::Real cur_time, DtType a_dt_type, void WarpX::applyMirrors (Real time) { + using ablastr::fields::Direction; + // something to do? if (num_mirrors == 0) { return; @@ -1097,47 +1215,31 @@ WarpX::applyMirrors (Real time) const amrex::Real dz = WarpX::CellSize(lev)[2]; const amrex::Real z_max = std::max(z_max_tmp, z_min+mirror_z_npoints[i_mirror]*dz); - // Get fine patch field MultiFabs - amrex::MultiFab& Ex = *Efield_fp[lev][0].get(); - amrex::MultiFab& Ey = *Efield_fp[lev][1].get(); - amrex::MultiFab& Ez = *Efield_fp[lev][2].get(); - amrex::MultiFab& Bx = *Bfield_fp[lev][0].get(); - amrex::MultiFab& By = *Bfield_fp[lev][1].get(); - amrex::MultiFab& Bz = *Bfield_fp[lev][2].get(); - - // Set each field to zero between z_min and z_max - NullifyMF(Ex, lev, z_min, z_max); - NullifyMF(Ey, lev, z_min, z_max); - NullifyMF(Ez, lev, z_min, z_max); - NullifyMF(Bx, lev, z_min, z_max); - NullifyMF(By, lev, z_min, z_max); - NullifyMF(Bz, lev, z_min, z_max); + // Set each field on the fine patch to zero between z_min and z_max + NullifyMF(m_fields, "Efield_fp", Direction{0}, lev, z_min, z_max); + NullifyMF(m_fields, "Efield_fp", Direction{1}, lev, z_min, z_max); + NullifyMF(m_fields, "Efield_fp", Direction{2}, lev, z_min, z_max); + NullifyMF(m_fields, "Bfield_fp", Direction{0}, lev, z_min, z_max); + NullifyMF(m_fields, "Bfield_fp", Direction{1}, lev, z_min, z_max); + NullifyMF(m_fields, "Bfield_fp", Direction{2}, lev, z_min, z_max); // If div(E)/div(B) cleaning are used, set F/G field to zero - if (F_fp[lev]) { NullifyMF(*F_fp[lev], lev, z_min, z_max); } - if (G_fp[lev]) { NullifyMF(*G_fp[lev], lev, z_min, z_max); } + NullifyMF(m_fields, "F_fp", lev, z_min, z_max); + NullifyMF(m_fields, "G_fp", lev, z_min, z_max); if (lev>0) { - // Get coarse patch field MultiFabs - amrex::MultiFab& cEx = *Efield_cp[lev][0].get(); - amrex::MultiFab& cEy = *Efield_cp[lev][1].get(); - amrex::MultiFab& cEz = *Efield_cp[lev][2].get(); - amrex::MultiFab& cBx = *Bfield_cp[lev][0].get(); - amrex::MultiFab& cBy = *Bfield_cp[lev][1].get(); - amrex::MultiFab& cBz = *Bfield_cp[lev][2].get(); - - // Set each field to zero between z_min and z_max - NullifyMF(cEx, lev, z_min, z_max); - NullifyMF(cEy, lev, z_min, z_max); - NullifyMF(cEz, lev, z_min, z_max); - NullifyMF(cBx, lev, z_min, z_max); - NullifyMF(cBy, lev, z_min, z_max); - NullifyMF(cBz, lev, z_min, z_max); + // Set each field on the coarse patch to zero between z_min and z_max + NullifyMF(m_fields, "Efield_cp", Direction{0}, lev, z_min, z_max); + NullifyMF(m_fields, "Efield_cp", Direction{1}, lev, z_min, z_max); + NullifyMF(m_fields, "Efield_cp", Direction{2}, lev, z_min, z_max); + NullifyMF(m_fields, "Bfield_cp", Direction{0}, lev, z_min, z_max); + NullifyMF(m_fields, "Bfield_cp", Direction{1}, lev, z_min, z_max); + NullifyMF(m_fields, "Bfield_cp", Direction{2}, lev, z_min, z_max); // If div(E)/div(B) cleaning are used, set F/G field to zero - if (F_cp[lev]) { NullifyMF(*F_cp[lev], lev, z_min, z_max); } - if (G_cp[lev]) { NullifyMF(*G_cp[lev], lev, z_min, z_max); } + NullifyMF(m_fields, "F_cp", lev, z_min, z_max); + NullifyMF(m_fields, "G_cp", lev, z_min, z_max); } } } diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H index 8d23088799f..e58af394a7a 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H @@ -50,15 +50,10 @@ public: * This function must be defined in the derived classes. */ virtual void ComputeSpaceChargeField ( - [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_fp, - [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_cp, - [[maybe_unused]] amrex::Vector< std::unique_ptr >& charge_buf, - [[maybe_unused]] amrex::Vector< std::unique_ptr >& phi_fp, - [[maybe_unused]] MultiParticleContainer& mpc, - [[maybe_unused]] MultiFluidContainer* mfl, - [[maybe_unused]] amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, - [[maybe_unused]] amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp - ) = 0; + ablastr::fields::MultiFabRegister& fields, + MultiParticleContainer& mpc, + MultiFluidContainer* mfl, + int max_level) = 0; /** * \brief Set Dirichlet boundary conditions for the electrostatic solver. @@ -69,7 +64,7 @@ public: * \param[in] idim The dimension for which the Dirichlet boundary condition is set */ void setPhiBC ( - amrex::Vector>& phi, + ablastr::fields::MultiLevelScalarField const& phi, amrex::Real t ) const; @@ -91,8 +86,8 @@ public: * \param[in] verbosity The verbosity setting for the MLMG solver */ void computePhi ( - const amrex::Vector >& rho, - amrex::Vector >& phi, + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi, std::array beta, amrex::Real required_precision, amrex::Real absolute_tolerance, @@ -116,8 +111,8 @@ public: * \param[in] beta Represents the velocity of the source of `phi` */ void computeE ( - amrex::Vector, 3> >& E, - const amrex::Vector >& phi, + ablastr::fields::MultiLevelVectorField const& E, + ablastr::fields::MultiLevelScalarField const& phi, std::array beta ) const; @@ -136,8 +131,8 @@ public: *\param[in] beta Represents the velocity of the source of `phi` */ void computeB ( - amrex::Vector, 3> >& B, - const amrex::Vector >& phi, + ablastr::fields::MultiLevelVectorField const& B, + ablastr::fields::MultiLevelScalarField const& phi, std::array beta ) const; diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp index 895615a5b21..0b1dca675be 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp @@ -8,10 +8,14 @@ */ #include "ElectrostaticSolver.H" -#include #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" + +#include + using namespace amrex; +using warpx::fields::FieldType; ElectrostaticSolver::ElectrostaticSolver (int nlevs_max) : num_levels{nlevs_max} { @@ -39,7 +43,7 @@ void ElectrostaticSolver::ReadParameters () { void ElectrostaticSolver::setPhiBC ( - amrex::Vector>& phi, + ablastr::fields::MultiLevelScalarField const& phi, amrex::Real t ) const { @@ -110,19 +114,23 @@ ElectrostaticSolver::setPhiBC ( void -ElectrostaticSolver::computePhi (const amrex::Vector >& rho, - amrex::Vector >& phi, - std::array const beta, - Real const required_precision, - Real absolute_tolerance, - int const max_iters, - int const verbosity) const { +ElectrostaticSolver::computePhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi, + std::array const beta, + Real const required_precision, + Real absolute_tolerance, + int const max_iters, + int const verbosity) const +{ + using ablastr::fields::Direction; + // create a vector to our fields, sorted by level amrex::Vector sorted_rho; amrex::Vector sorted_phi; for (int lev = 0; lev < num_levels; ++lev) { - sorted_rho.emplace_back(rho[lev].get()); - sorted_phi.emplace_back(phi[lev].get()); + sorted_rho.emplace_back(rho[lev]); + sorted_phi.emplace_back(phi[lev]); } std::optional post_phi_calculation; @@ -149,18 +157,18 @@ ElectrostaticSolver::computePhi (const amrex::Vector{ - warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 2) + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev) } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) amrex::Array{ - warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 0), - warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 2) + warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev) } #elif defined(WARPX_DIM_3D) amrex::Array{ - warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 0), - warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 1), - warpx.getFieldPointer(warpx::fields::FieldType::Efield_fp, lev, 2) + warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev) } #endif ); @@ -193,12 +201,12 @@ ElectrostaticSolver::computePhi (const amrex::Vector, 3> >& E, - const amrex::Vector >& phi, - std::array const beta ) const +ElectrostaticSolver::computeE ( + ablastr::fields::MultiLevelVectorField const& E, + ablastr::fields::MultiLevelScalarField const& phi, + std::array beta ) const { auto & warpx = WarpX::GetInstance(); for (int lev = 0; lev < num_levels; lev++) { @@ -369,9 +378,10 @@ ElectrostaticSolver::computeE (amrex::Vector, 3> >& B, - const amrex::Vector >& phi, - std::array const beta ) const +void ElectrostaticSolver::computeB ( + ablastr::fields::MultiLevelVectorField const& B, + ablastr::fields::MultiLevelScalarField const& phi, + std::array beta) const { // return early if beta is 0 since there will be no B-field if ((beta[0] == 0._rt) && (beta[1] == 0._rt) && (beta[2] == 0._rt)) { return; } diff --git a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H index 7dc41f0a056..5606ebef2f1 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H +++ b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H @@ -22,19 +22,14 @@ public: void InitData () override; void ComputeSpaceChargeField ( - amrex::Vector< std::unique_ptr >& rho_fp, - amrex::Vector< std::unique_ptr >& rho_cp, - amrex::Vector< std::unique_ptr >& charge_buf, - amrex::Vector< std::unique_ptr >& phi_fp, + ablastr::fields::MultiFabRegister& fields, MultiParticleContainer& mpc, MultiFluidContainer* mfl, - amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, - amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp - ) override; + int max_level) override; void computePhiTriDiagonal ( - const amrex::Vector >& rho, - amrex::Vector >& phi + const ablastr::fields::MultiLevelScalarField& rho, + const ablastr::fields::MultiLevelScalarField& phi ); }; diff --git a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp index d14abd1848a..e973ae66975 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp +++ b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp @@ -9,6 +9,7 @@ #include "LabFrameExplicitES.H" #include "Fluids/MultiFluidContainer_fwd.H" #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" #include "Particles/MultiParticleContainer_fwd.H" #include "Python/callbacks.H" #include "WarpX.H" @@ -21,35 +22,35 @@ void LabFrameExplicitES::InitData() { } void LabFrameExplicitES::ComputeSpaceChargeField ( - amrex::Vector< std::unique_ptr >& rho_fp, - amrex::Vector< std::unique_ptr >& rho_cp, - amrex::Vector< std::unique_ptr >& charge_buf, - amrex::Vector< std::unique_ptr >& phi_fp, + ablastr::fields::MultiFabRegister& fields, MultiParticleContainer& mpc, MultiFluidContainer* mfl, - amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, - amrex::Vector< std::array< std::unique_ptr, 3> >& /*Bfield_fp*/ -) { + int max_level) +{ + using ablastr::fields::MultiLevelScalarField; + using ablastr::fields::MultiLevelVectorField; + using warpx::fields::FieldType; + + const MultiLevelScalarField rho_fp = fields.get_mr_levels(FieldType::rho_fp, max_level); + const MultiLevelScalarField rho_cp = fields.get_mr_levels(FieldType::rho_cp, max_level); + const MultiLevelScalarField phi_fp = fields.get_mr_levels(FieldType::phi_fp, max_level); + const MultiLevelVectorField Efield_fp = fields.get_mr_levels_alldirs(FieldType::Efield_fp, max_level); + mpc.DepositCharge(rho_fp, 0.0_rt); if (mfl) { const int lev = 0; - mfl->DepositCharge(lev, *rho_fp[lev]); + mfl->DepositCharge(fields, *rho_fp[lev], lev); } + // Apply filter, perform MPI exchange, interpolate across levels + const Vector > rho_buf(num_levels); auto & warpx = WarpX::GetInstance(); - for (int lev = 0; lev < num_levels; lev++) { - if (lev > 0) { - if (charge_buf[lev]) { - charge_buf[lev]->setVal(0.); - } - } - } - warpx.SyncRho(rho_fp, rho_cp, charge_buf); // Apply filter, perform MPI exchange, interpolate across levels + warpx.SyncRho( rho_fp, rho_cp, amrex::GetVecOfPtrs(rho_buf) ); #ifndef WARPX_DIM_RZ for (int lev = 0; lev < num_levels; lev++) { // Reflect density over PEC boundaries, if needed. - warpx.ApplyRhofieldBoundary(lev, rho_fp[lev].get(), PatchType::fine); + warpx.ApplyRhofieldBoundary(lev, rho_fp[lev], PatchType::fine); } #endif // beta is zero in lab frame @@ -94,8 +95,8 @@ void LabFrameExplicitES::ComputeSpaceChargeField ( \param[out] phi The potential to be computed by this function */ void LabFrameExplicitES::computePhiTriDiagonal ( - const amrex::Vector >& rho, - amrex::Vector >& phi) + const ablastr::fields::MultiLevelScalarField& rho, + const ablastr::fields::MultiLevelScalarField& phi) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE(num_levels == 1, "The tridiagonal solver cannot be used with mesh refinement"); diff --git a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H index 70382d7ced5..cf831a7ab10 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H +++ b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H @@ -44,15 +44,10 @@ public: * \param[in,out] Bfield Field contribution from phi computed from each species' charge density is added */ void ComputeSpaceChargeField ( - [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_fp, - [[maybe_unused]] amrex::Vector< std::unique_ptr >& rho_cp, - amrex::Vector< std::unique_ptr >& charge_buf, - amrex::Vector< std::unique_ptr >& phi_fp, - MultiParticleContainer& mpc, - [[maybe_unused]] MultiFluidContainer* mfl, - amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, - amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp - ) override; + ablastr::fields::MultiFabRegister& fields, + MultiParticleContainer& mpc, + MultiFluidContainer* mfl, + int max_level) override; /** * Compute the charge density of the species paricle container, pc, @@ -65,10 +60,9 @@ public: * \param[in] Bfield Bfield updated to include potential computed for selected species charge density as source */ void AddSpaceChargeField ( - amrex::Vector >& charge_buf, WarpXParticleContainer& pc, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>>& Bfield + ablastr::fields::MultiLevelVectorField& Efield_fp, + ablastr::fields::MultiLevelVectorField& Bfield_fp ); /** Compute the potential `phi` by solving the Poisson equation with the @@ -77,7 +71,7 @@ public: * \param[in] Efield Efield updated to include potential gradient from boundary condition */ void AddBoundaryField ( - amrex::Vector, 3>>& Efield + ablastr::fields::MultiLevelVectorField& Efield ); }; diff --git a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp index 1660efd48c2..69647da1702 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp +++ b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp @@ -6,12 +6,13 @@ * * License: BSD-3-Clause-LBNL */ -#include "WarpX.H" - #include "RelativisticExplicitES.H" +#include "Fields.H" #include "Particles/MultiParticleContainer.H" #include "Particles/WarpXParticleContainer.H" +#include "WarpX.H" + using namespace amrex; @@ -30,26 +31,27 @@ void RelativisticExplicitES::InitData () { } void RelativisticExplicitES::ComputeSpaceChargeField ( - amrex::Vector< std::unique_ptr >& rho_fp, - amrex::Vector< std::unique_ptr >& rho_cp, - amrex::Vector< std::unique_ptr >& charge_buf, - amrex::Vector< std::unique_ptr >& phi_fp, + ablastr::fields::MultiFabRegister& fields, MultiParticleContainer& mpc, - MultiFluidContainer* mfl, - amrex::Vector< std::array< std::unique_ptr, 3> >& Efield_fp, - amrex::Vector< std::array< std::unique_ptr, 3> >& Bfield_fp -) { + [[maybe_unused]] MultiFluidContainer* mfl, + int max_level) +{ WARPX_PROFILE("RelativisticExplicitES::ComputeSpaceChargeField"); - amrex::ignore_unused(rho_fp, rho_cp, phi_fp, mfl); + + using ablastr::fields::MultiLevelVectorField; + using warpx::fields::FieldType; const bool always_run_solve = (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::Relativistic); + MultiLevelVectorField Efield_fp = fields.get_mr_levels_alldirs(FieldType::Efield_fp, max_level); + MultiLevelVectorField Bfield_fp = fields.get_mr_levels_alldirs(FieldType::Bfield_fp, max_level); + // Loop over the species and add their space-charge contribution to E and B. // Note that the fields calculated here does not include the E field // due to simulation boundary potentials for (auto const& species : mpc) { if (always_run_solve || (species->initialize_self_fields)) { - AddSpaceChargeField(charge_buf, *species, Efield_fp, Bfield_fp); + AddSpaceChargeField(*species, Efield_fp, Bfield_fp); } } @@ -61,10 +63,9 @@ void RelativisticExplicitES::ComputeSpaceChargeField ( } void RelativisticExplicitES::AddSpaceChargeField ( - amrex::Vector >& charge_buf, WarpXParticleContainer& pc, - amrex::Vector, 3>>& Efield_fp, - amrex::Vector, 3>>& Bfield_fp) + ablastr::fields::MultiLevelVectorField& Efield_fp, + ablastr::fields::MultiLevelVectorField& Bfield_fp) { WARPX_PROFILE("RelativisticExplicitES::AddSpaceChargeField"); @@ -78,9 +79,9 @@ void RelativisticExplicitES::AddSpaceChargeField ( auto & warpx = WarpX::GetInstance(); // Allocate fields for charge and potential - Vector > rho(num_levels); - Vector > rho_coarse(num_levels); // Used in order to interpolate between levels - Vector > phi(num_levels); + Vector> rho(num_levels); + Vector> rho_coarse(num_levels); // Used in order to interpolate between levels + Vector> phi(num_levels); // Use number of guard cells used for local deposition of rho const amrex::IntVect ng = warpx.get_ng_depos_rho(); for (int lev = 0; lev < num_levels; lev++) { @@ -96,9 +97,6 @@ void RelativisticExplicitES::AddSpaceChargeField ( cba.coarsen(warpx.refRatio(lev-1)); rho_coarse[lev] = std::make_unique(cba, warpx.DistributionMap(lev), 1, ng); rho_coarse[lev]->setVal(0.); - if (charge_buf[lev]) { - charge_buf[lev]->setVal(0.); - } } } // Deposit particle charge density (source of Poisson solver) @@ -108,10 +106,17 @@ void RelativisticExplicitES::AddSpaceChargeField ( bool const apply_boundary_and_scale_volume = true; bool const interpolate_across_levels = false; if ( !pc.do_not_deposit) { - pc.DepositCharge(rho, local, reset, apply_boundary_and_scale_volume, - interpolate_across_levels); + pc.DepositCharge(amrex::GetVecOfPtrs(rho), + local, reset, apply_boundary_and_scale_volume, + interpolate_across_levels); } - warpx.SyncRho(rho, rho_coarse, charge_buf); // Apply filter, perform MPI exchange, interpolate across levels + + // Apply filter, perform MPI exchange, interpolate across levels + const Vector> rho_buf(num_levels); + warpx.SyncRho( + amrex::GetVecOfPtrs(rho), + amrex::GetVecOfPtrs(rho_coarse), + amrex::GetVecOfPtrs(rho_buf)); // Get the particle beta vector bool const local_average = false; // Average across all MPI ranks @@ -122,25 +127,26 @@ void RelativisticExplicitES::AddSpaceChargeField ( } // Compute the potential phi, by solving the Poisson equation - computePhi( rho, phi, beta, pc.self_fields_required_precision, + computePhi( amrex::GetVecOfPtrs(rho), amrex::GetVecOfPtrs(phi), + beta, pc.self_fields_required_precision, pc.self_fields_absolute_tolerance, pc.self_fields_max_iters, pc.self_fields_verbosity ); // Compute the corresponding electric and magnetic field, from the potential phi - computeE( Efield_fp, phi, beta ); - computeB( Bfield_fp, phi, beta ); + computeE( Efield_fp, amrex::GetVecOfPtrs(phi), beta ); + computeB( Bfield_fp, amrex::GetVecOfPtrs(phi), beta ); } -void RelativisticExplicitES::AddBoundaryField (amrex::Vector, 3>>& Efield_fp) +void RelativisticExplicitES::AddBoundaryField (ablastr::fields::MultiLevelVectorField& Efield_fp) { WARPX_PROFILE("RelativisticExplicitES::AddBoundaryField"); auto & warpx = WarpX::GetInstance(); // Allocate fields for charge and potential - amrex::Vector > rho(num_levels); - amrex::Vector > phi(num_levels); + Vector> rho(num_levels); + Vector> phi(num_levels); // Use number of guard cells used for local deposition of rho const amrex::IntVect ng = warpx.get_ng_depos_rho(); for (int lev = 0; lev < num_levels; lev++) { @@ -153,16 +159,17 @@ void RelativisticExplicitES::AddBoundaryField (amrex::Vector beta = {0._rt}; // Compute the potential phi, by solving the Poisson equation - computePhi( rho, phi, beta, self_fields_required_precision, + computePhi( amrex::GetVecOfPtrs(rho), amrex::GetVecOfPtrs(phi), + beta, self_fields_required_precision, self_fields_absolute_tolerance, self_fields_max_iters, self_fields_verbosity ); // Compute the corresponding electric field, from the potential phi. - computeE( Efield_fp, phi, beta ); + computeE( Efield_fp, amrex::GetVecOfPtrs(phi), beta ); } diff --git a/Source/FieldSolver/Fields.H b/Source/FieldSolver/Fields.H deleted file mode 100644 index 9e4ce5a71a7..00000000000 --- a/Source/FieldSolver/Fields.H +++ /dev/null @@ -1,60 +0,0 @@ -/* Copyright 2024 Luca Fedeli - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ -#ifndef WARPX_FIELDS_H_ -#define WARPX_FIELDS_H_ - -#include -#include - -namespace warpx::fields -{ - enum struct FieldType : int - { - None, - Efield_aux, - Bfield_aux, - Efield_fp, - Bfield_fp, - Efield_fp_external, - Bfield_fp_external, - current_fp, - current_fp_nodal, - rho_fp, - F_fp, - G_fp, - phi_fp, - vector_potential_fp, - Efield_cp, - Bfield_cp, - current_cp, - rho_cp, - F_cp, - G_cp, - edge_lengths, - face_areas, - Efield_avg_fp, - Bfield_avg_fp, - Efield_avg_cp, - Bfield_avg_cp - }; - - constexpr FieldType ArrayFieldTypes[] = { - FieldType::Efield_aux, FieldType::Bfield_aux, FieldType::Efield_fp, FieldType::Bfield_fp, - FieldType::current_fp, FieldType::current_fp_nodal, FieldType::vector_potential_fp, - FieldType::Efield_cp, FieldType::Bfield_cp, FieldType::current_cp, - FieldType::Efield_avg_fp, FieldType::Bfield_avg_fp, FieldType::Efield_avg_cp, FieldType::Bfield_avg_cp}; - - inline bool - isFieldArray (const FieldType field_type) - { - return std::any_of( std::begin(ArrayFieldTypes), std::end(ArrayFieldTypes), - [field_type](const FieldType& f) { return f == field_type; }); - } - -} - -#endif //WARPX_FIELDS_H_ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp index e6f010e6f44..e72260fcf4f 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ApplySilverMuellerBoundary.cpp @@ -35,13 +35,15 @@ using namespace amrex; * \brief Update the B field at the boundary, using the Silver-Mueller condition */ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 >& Bfield, + ablastr::fields::VectorField& Efield, + ablastr::fields::VectorField& Bfield, amrex::Box domain_box, amrex::Real const dt, amrex::Array field_boundary_lo, amrex::Array field_boundary_hi) { + using ablastr::fields::Direction; + // Ensure that we are using the Yee solver WARPX_ALWAYS_ASSERT_WITH_MESSAGE( m_fdtd_algo == ElectromagneticSolverAlgo::Yee, @@ -79,14 +81,14 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( // tiling is usually set by TilingIfNotGPU() // but here, we set it to false because of potential race condition, // since we grow the tiles by one guard cell after creating them. - for ( MFIter mfi(*Efield[0], false); mfi.isValid(); ++mfi ) { + for ( MFIter mfi(*Efield[Direction{0}], false); mfi.isValid(); ++mfi ) { // Extract field data for this grid/tile - Array4 const& Er = Efield[0]->array(mfi); - Array4 const& Et = Efield[1]->array(mfi); - Array4 const& Ez = Efield[2]->array(mfi); - Array4 const& Br = Bfield[0]->array(mfi); - Array4 const& Bt = Bfield[1]->array(mfi); - Array4 const& Bz = Bfield[2]->array(mfi); + Array4 const& Er = Efield[Direction{0}]->array(mfi); + Array4 const& Et = Efield[Direction{1}]->array(mfi); + Array4 const& Ez = Efield[Direction{2}]->array(mfi); + Array4 const& Br = Bfield[Direction{0}]->array(mfi); + Array4 const& Bt = Bfield[Direction{1}]->array(mfi); + Array4 const& Bz = Bfield[Direction{2}]->array(mfi); // Extract tileboxes for which to loop Box tbr = mfi.tilebox(Bfield[0]->ixType().toIntVect()); @@ -203,18 +205,18 @@ void FiniteDifferenceSolver::ApplySilverMuellerBoundary ( // tiling is usually set by TilingIfNotGPU() // but here, we set it to false because of potential race condition, // since we grow the tiles by one guard cell after creating them. - for ( MFIter mfi(*Efield[0], false); mfi.isValid(); ++mfi ) { + for ( MFIter mfi(*Efield[Direction{0}], false); mfi.isValid(); ++mfi ) { // Extract field data for this grid/tile - Array4 const& Ex = Efield[0]->array(mfi); - Array4 const& Ey = Efield[1]->array(mfi); + Array4 const& Ex = Efield[Direction{0}]->array(mfi); + Array4 const& Ey = Efield[Direction{1}]->array(mfi); #ifndef WARPX_DIM_1D_Z - Array4 const& Ez = Efield[2]->array(mfi); + Array4 const& Ez = Efield[Direction{2}]->array(mfi); #endif - Array4 const& Bx = Bfield[0]->array(mfi); - Array4 const& By = Bfield[1]->array(mfi); + Array4 const& Bx = Bfield[Direction{0}]->array(mfi); + Array4 const& By = Bfield[Direction{1}]->array(mfi); #ifndef WARPX_DIM_1D_Z - Array4 const& Bz = Bfield[2]->array(mfi); + Array4 const& Bz = Bfield[Direction{2}]->array(mfi); #endif // Extract the tileboxes for which to loop diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp index 0702b264874..3f757603845 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeDivE.cpp @@ -16,6 +16,8 @@ # include "FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" #endif +#include + #include #include #include @@ -40,9 +42,10 @@ using namespace amrex; * \brief Update the F field, over one timestep */ void FiniteDifferenceSolver::ComputeDivE ( - const std::array,3>& Efield, - amrex::MultiFab& divEfield ) { - + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divEfield +) +{ // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ @@ -77,7 +80,7 @@ void FiniteDifferenceSolver::ComputeDivE ( template void FiniteDifferenceSolver::ComputeDivECartesian ( - const std::array,3>& Efield, + ablastr::fields::VectorField const & Efield, amrex::MultiFab& divEfield ) { // Loop through the grids, and over the tiles within each grid @@ -123,9 +126,10 @@ void FiniteDifferenceSolver::ComputeDivECartesian ( template void FiniteDifferenceSolver::ComputeDivECylindrical ( - const std::array,3>& Efield, - amrex::MultiFab& divEfield ) { - + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divEfield +) +{ // Loop through the grids, and over the tiles within each grid #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp index 4a71afda671..c6a1e206200 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp @@ -7,6 +7,7 @@ #include "FiniteDifferenceSolver.H" #include "EmbeddedBoundary/WarpXFaceInfoBox.H" +#include "Fields.H" #ifndef WARPX_DIM_RZ # include "FiniteDifferenceAlgorithms/CartesianYeeAlgorithm.H" # include "FiniteDifferenceAlgorithms/CartesianCKCAlgorithm.H" @@ -48,17 +49,21 @@ using namespace amrex; * \brief Update the B field, over one timestep */ void FiniteDifferenceSolver::EvolveB ( - [[maybe_unused]] std::array< std::unique_ptr, 3 >& Bfield, - [[maybe_unused]] std::array< std::unique_ptr, 3 > const& Efield, - [[maybe_unused]] std::unique_ptr const& Gfield, - [[maybe_unused]] std::array< std::unique_ptr, 3 > const& face_areas, - [[maybe_unused]] std::array< std::unique_ptr, 3 > const& area_mod, - [[maybe_unused]] std::array< std::unique_ptr, 3 >& ECTRhofield, - [[maybe_unused]] std::array< std::unique_ptr, 3 >& Venl, + ablastr::fields::MultiFabRegister& fields, + int lev, + PatchType patch_type, [[maybe_unused]] std::array< std::unique_ptr, 3 >& flag_info_cell, [[maybe_unused]] std::array< std::unique_ptr >, 3 >& borrowing, - [[maybe_unused]] int lev, - [[maybe_unused]] amrex::Real const dt ) { + [[maybe_unused]] amrex::Real const dt ) +{ + + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + const ablastr::fields::VectorField Bfield = patch_type == PatchType::fine ? + fields.get_alldirs(FieldType::Bfield_fp, lev) : fields.get_alldirs(FieldType::Bfield_cp, lev); + const ablastr::fields::VectorField Efield = patch_type == PatchType::fine ? + fields.get_alldirs(FieldType::Efield_fp, lev) : fields.get_alldirs(FieldType::Efield_cp, lev); // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) @@ -68,6 +73,28 @@ void FiniteDifferenceSolver::EvolveB ( EvolveBCylindrical ( Bfield, Efield, lev, dt ); #else + amrex::MultiFab const * Gfield = nullptr; + if (fields.has(FieldType::G_fp, lev)) { + Gfield = patch_type == PatchType::fine ? + fields.get(FieldType::G_fp, lev) : fields.get(FieldType::G_cp, lev); + } + ablastr::fields::VectorField face_areas; + if (fields.has_vector(FieldType::face_areas, lev)) { + face_areas = fields.get_alldirs(FieldType::face_areas, lev); + } + ablastr::fields::VectorField area_mod; + if (fields.has_vector(FieldType::area_mod, lev)) { + area_mod = fields.get_alldirs(FieldType::area_mod, lev); + } + ablastr::fields::VectorField ECTRhofield; + if (fields.has_vector(FieldType::ECTRhofield, lev)) { + ECTRhofield = fields.get_alldirs(FieldType::ECTRhofield, lev); + } + ablastr::fields::VectorField Venl; + if (fields.has_vector(FieldType::Venl, lev)) { + Venl = fields.get_alldirs(FieldType::Venl, lev); + } + if (m_grid_type == GridType::Collocated) { EvolveBCartesian ( Bfield, Efield, Gfield, lev, dt ); @@ -94,9 +121,9 @@ void FiniteDifferenceSolver::EvolveB ( template void FiniteDifferenceSolver::EvolveBCartesian ( - std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& Gfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Efield, + amrex::MultiFab const * Gfield, int lev, amrex::Real const dt ) { amrex::LayoutData* cost = WarpX::getCosts(lev); @@ -162,7 +189,7 @@ void FiniteDifferenceSolver::EvolveBCartesian ( if (Gfield) { // Extract field data for this grid/tile - const Array4 G = Gfield->array(mfi); + Array4 const G = Gfield->array(mfi); // Loop over cells and update G amrex::ParallelFor(tbx, tby, tbz, @@ -193,11 +220,11 @@ void FiniteDifferenceSolver::EvolveBCartesian ( void FiniteDifferenceSolver::EvolveBCartesianECT ( - std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 > const& area_mod, - std::array< std::unique_ptr, 3 >& ECTRhofield, - std::array< std::unique_ptr, 3 >& Venl, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& area_mod, + ablastr::fields::VectorField const& ECTRhofield, + ablastr::fields::VectorField const& Venl, std::array< std::unique_ptr, 3 >& flag_info_cell, std::array< std::unique_ptr >, 3 >& borrowing, const int lev, amrex::Real const dt ) { @@ -359,8 +386,8 @@ void FiniteDifferenceSolver::EvolveBCartesianECT ( template void FiniteDifferenceSolver::EvolveBCylindrical ( - std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Efield, int lev, amrex::Real const dt ) { amrex::LayoutData* cost = WarpX::getCosts(lev); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp index 0ad2c8d6802..e3289d52cfe 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveBPML.cpp @@ -7,6 +7,7 @@ #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" #include "BoundaryConditions/PMLComponent.H" +#include "Fields.H" #ifndef WARPX_DIM_RZ # include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianYeeAlgorithm.H" @@ -41,18 +42,27 @@ using namespace amrex; * \brief Update the B field, over one timestep */ void FiniteDifferenceSolver::EvolveBPML ( - std::array< amrex::MultiFab*, 3 > Bfield, - std::array< amrex::MultiFab*, 3 > const Efield, + ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, + int level, amrex::Real const dt, - const bool dive_cleaning) { + const bool dive_cleaning +) +{ + using warpx::fields::FieldType; // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ - amrex::ignore_unused(Bfield, Efield, dt, dive_cleaning); + amrex::ignore_unused(fields, patch_type, level, dt, dive_cleaning); WARPX_ABORT_WITH_MESSAGE( "PML are not implemented in cylindrical geometry."); #else + const ablastr::fields::VectorField Bfield = (patch_type == PatchType::fine) ? + fields.get_alldirs(FieldType::pml_B_fp, level) : fields.get_alldirs(FieldType::pml_B_cp, level); + const ablastr::fields::VectorField Efield = (patch_type == PatchType::fine) ? + fields.get_alldirs(FieldType::pml_E_fp, level) : fields.get_alldirs(FieldType::pml_E_cp, level); + if (m_grid_type == ablastr::utils::enums::GridType::Collocated) { EvolveBPMLCartesian (Bfield, Efield, dt, dive_cleaning); @@ -78,7 +88,7 @@ void FiniteDifferenceSolver::EvolveBPML ( template void FiniteDifferenceSolver::EvolveBPMLCartesian ( std::array< amrex::MultiFab*, 3 > Bfield, - std::array< amrex::MultiFab*, 3 > const Efield, + ablastr::fields::VectorField const Efield, amrex::Real const dt, const bool dive_cleaning) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp index 566a81da021..03a9866fb98 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp @@ -6,6 +6,7 @@ */ #include "FiniteDifferenceSolver.H" +#include "Fields.H" #ifndef WARPX_DIM_RZ # include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianYeeAlgorithm.H" # include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CartesianCKCAlgorithm.H" @@ -19,6 +20,8 @@ #include "Utils/WarpXConst.H" #include "WarpX.H" +#include + #include #include #include @@ -42,22 +45,48 @@ #include using namespace amrex; +using namespace ablastr::fields; /** * \brief Update the E field, over one timestep */ void FiniteDifferenceSolver::EvolveE ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 >& ECTRhofield, - std::unique_ptr const& Ffield, - int lev, amrex::Real const dt ) { + ablastr::fields::MultiFabRegister & fields, + int lev, + PatchType patch_type, + ablastr::fields::VectorField const& Efield, + amrex::Real const dt +) +{ + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + const ablastr::fields::VectorField Bfield = patch_type == PatchType::fine ? + fields.get_alldirs(FieldType::Bfield_fp, lev) : fields.get_alldirs(FieldType::Bfield_cp, lev); + const ablastr::fields::VectorField Jfield = patch_type == PatchType::fine ? + fields.get_alldirs(FieldType::current_fp, lev) : fields.get_alldirs(FieldType::current_cp, lev); + + amrex::MultiFab* Ffield = nullptr; + if (fields.has(FieldType::F_fp, lev)) { + Ffield = patch_type == PatchType::fine ? + fields.get(FieldType::F_fp, lev) : fields.get(FieldType::F_cp, lev); + } - if (m_fdtd_algo != ElectromagneticSolverAlgo::ECT) { - amrex::ignore_unused(face_areas, ECTRhofield); + ablastr::fields::VectorField edge_lengths; + if (fields.has_vector(FieldType::edge_lengths, lev)) { + edge_lengths = fields.get_alldirs(FieldType::edge_lengths, lev); + } + ablastr::fields::VectorField face_areas; + if (fields.has_vector(FieldType::face_areas, lev)) { + face_areas = fields.get_alldirs(FieldType::face_areas, lev); + } + ablastr::fields::VectorField area_mod; + if (fields.has_vector(FieldType::area_mod, lev)) { + area_mod = fields.get_alldirs(FieldType::area_mod, lev); + } + ablastr::fields::VectorField ECTRhofield; + if (fields.has_vector(FieldType::ECTRhofield, lev)) { + ECTRhofield = fields.get_alldirs(FieldType::ECTRhofield, lev); } // Select algorithm (The choice of algorithm is a runtime option, @@ -90,11 +119,11 @@ void FiniteDifferenceSolver::EvolveE ( template void FiniteDifferenceSolver::EvolveECartesian ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::unique_ptr const& Ffield, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + VectorField const& edge_lengths, + amrex::MultiFab const* Ffield, int lev, amrex::Real const dt ) { #ifndef AMREX_USE_EB @@ -191,7 +220,7 @@ void FiniteDifferenceSolver::EvolveECartesian ( if (Ffield) { // Extract field data for this grid/tile - const Array4 F = Ffield->array(mfi); + const Array4 F = Ffield->array(mfi); // Loop over the cells and update the fields amrex::ParallelFor(tex, tey, tez, @@ -224,11 +253,11 @@ void FiniteDifferenceSolver::EvolveECartesian ( template void FiniteDifferenceSolver::EvolveECylindrical ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::unique_ptr const& Ffield, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& edge_lengths, + amrex::MultiFab const* Ffield, int lev, amrex::Real const dt ) { #ifndef AMREX_USE_EB @@ -391,7 +420,7 @@ void FiniteDifferenceSolver::EvolveECylindrical ( if (Ffield) { // Extract field data for this grid/tile - const Array4 F = Ffield->array(mfi); + const Array4 F = Ffield->array(mfi); // Loop over the cells and update the fields amrex::ParallelFor(ter, tet, tez, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp index 8abdab71300..0740a190bec 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveECTRho.cpp @@ -42,15 +42,16 @@ #include using namespace amrex; +using namespace ablastr::fields; /** * \brief Update the B field, over one timestep */ void FiniteDifferenceSolver::EvolveECTRho ( - std::array< std::unique_ptr, 3 > const& Efield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 >& ECTRhofield, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& edge_lengths, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& ECTRhofield, const int lev) { #if !defined(WARPX_DIM_RZ) and defined(AMREX_USE_EB) @@ -67,10 +68,10 @@ void FiniteDifferenceSolver::EvolveECTRho ( // If we implement ECT in 1D we will need to take care of this #ifndef differently #ifndef WARPX_DIM_RZ void FiniteDifferenceSolver::EvolveRhoCartesianECT ( - std::array< std::unique_ptr, 3 > const& Efield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 >& ECTRhofield, const int lev ) { + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& edge_lengths, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& ECTRhofield, const int lev ) { #ifdef AMREX_USE_EB #if !(defined(WARPX_DIM_3D) || defined(WARPX_DIM_XZ)) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp index a1ba6e44a8c..7a1a05d560d 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp @@ -17,6 +17,7 @@ # include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" #endif #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXConst.H" @@ -45,21 +46,38 @@ using namespace amrex; * \brief Update the E field, over one timestep */ void FiniteDifferenceSolver::EvolveEPML ( - std::array< amrex::MultiFab*, 3 > Efield, - std::array< amrex::MultiFab*, 3 > const Bfield, - std::array< amrex::MultiFab*, 3 > const Jfield, - std::array< amrex::MultiFab*, 3 > const edge_lengths, - amrex::MultiFab* const Ffield, + ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, + int level, MultiSigmaBox const& sigba, amrex::Real const dt, bool pml_has_particles ) { // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ - amrex::ignore_unused(Efield, Bfield, Jfield, Ffield, sigba, dt, pml_has_particles, edge_lengths); + amrex::ignore_unused(fields, patch_type, level, sigba, dt, pml_has_particles); WARPX_ABORT_WITH_MESSAGE( "PML are not implemented in cylindrical geometry."); #else + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + const ablastr::fields::VectorField Efield = (patch_type == PatchType::fine) ? + fields.get_alldirs(FieldType::pml_E_fp, level) : fields.get_alldirs(FieldType::pml_E_cp, level); + const ablastr::fields::VectorField Bfield = (patch_type == PatchType::fine) ? + fields.get_alldirs(FieldType::pml_B_fp, level) : fields.get_alldirs(FieldType::pml_B_cp, level); + const ablastr::fields::VectorField Jfield = (patch_type == PatchType::fine) ? + fields.get_alldirs(FieldType::pml_j_fp, level) : fields.get_alldirs(FieldType::pml_j_cp, level); + ablastr::fields::VectorField edge_lengths; + if (fields.has_vector(FieldType::pml_edge_lengths, level)) { + edge_lengths = fields.get_alldirs(FieldType::pml_edge_lengths, level); + } + amrex::MultiFab * Ffield = nullptr; + if (fields.has(FieldType::pml_F_fp, level)) { + Ffield = (patch_type == PatchType::fine) ? + fields.get(FieldType::pml_F_fp, level) : fields.get(FieldType::pml_F_cp, level); + } + if (m_grid_type == GridType::Collocated) { EvolveEPMLCartesian ( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp index 8ce578bb52a..c7f836e47ec 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveF.cpp @@ -44,9 +44,9 @@ using namespace amrex; * \brief Update the F field, over one timestep */ void FiniteDifferenceSolver::EvolveF ( - std::unique_ptr& Ffield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& rhofield, + amrex::MultiFab* Ffield, + ablastr::fields::VectorField const& Efield, + amrex::MultiFab* const rhofield, int const rhocomp, amrex::Real const dt ) { @@ -82,9 +82,9 @@ void FiniteDifferenceSolver::EvolveF ( template void FiniteDifferenceSolver::EvolveFCartesian ( - std::unique_ptr& Ffield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& rhofield, + amrex::MultiFab* Ffield, + ablastr::fields::VectorField const Efield, + amrex::MultiFab* const rhofield, int const rhocomp, amrex::Real const dt ) { @@ -135,9 +135,9 @@ void FiniteDifferenceSolver::EvolveFCartesian ( template void FiniteDifferenceSolver::EvolveFCylindrical ( - std::unique_ptr& Ffield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& rhofield, + amrex::MultiFab* Ffield, + ablastr::fields::VectorField const & Efield, + amrex::MultiFab* const rhofield, int const rhocomp, amrex::Real const dt ) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp index 4ef056c937a..f14a42f451b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveFPML.cpp @@ -41,7 +41,7 @@ using namespace amrex; */ void FiniteDifferenceSolver::EvolveFPML ( amrex::MultiFab* Ffield, - std::array< amrex::MultiFab*, 3 > const Efield, + ablastr::fields::VectorField const Efield, amrex::Real const dt ) { // Select algorithm (The choice of algorithm is a runtime option, @@ -75,7 +75,7 @@ void FiniteDifferenceSolver::EvolveFPML ( template void FiniteDifferenceSolver::EvolveFPMLCartesian ( amrex::MultiFab* Ffield, - std::array< amrex::MultiFab*, 3 > const Efield, + ablastr::fields::VectorField const Efield, amrex::Real const dt ) { // Loop through the grids, and over the tiles within each grid diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp index b6bc8fdca7f..759644201bc 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveG.cpp @@ -38,8 +38,8 @@ using namespace amrex; void FiniteDifferenceSolver::EvolveG ( - std::unique_ptr& Gfield, - std::array,3> const& Bfield, + amrex::MultiFab* Gfield, + ablastr::fields::VectorField const& Bfield, amrex::Real const dt) { #ifdef WARPX_DIM_RZ @@ -70,8 +70,8 @@ void FiniteDifferenceSolver::EvolveG ( template void FiniteDifferenceSolver::EvolveGCartesian ( - std::unique_ptr& Gfield, - std::array,3> const& Bfield, + amrex::MultiFab* Gfield, + ablastr::fields::VectorField const& Bfield, amrex::Real const dt) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 0a9f21e6863..45c06584fda 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -18,6 +18,7 @@ #include "MacroscopicProperties/MacroscopicProperties_fwd.H" #include +#include #include #include @@ -51,52 +52,47 @@ class FiniteDifferenceSolver std::array cell_size, ablastr::utils::enums::GridType grid_type ); - void EvolveB ( std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& Gfield, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 > const& area_mod, - std::array< std::unique_ptr, 3 >& ECTRhofield, - std::array< std::unique_ptr, 3 >& Venl, + void EvolveB ( ablastr::fields::MultiFabRegister& fields, + int lev, + PatchType patch_type, std::array< std::unique_ptr, 3 >& flag_info_cell, std::array< std::unique_ptr >, 3 >& borrowing, - int lev, amrex::Real dt ); - - void EvolveE ( std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 >& ECTRhofield, - std::unique_ptr const& Ffield, - int lev, amrex::Real dt ); - - void EvolveF ( std::unique_ptr& Ffield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& rhofield, + amrex::Real dt ); + + void EvolveE ( ablastr::fields::MultiFabRegister & fields, + int lev, + PatchType patch_type, + ablastr::fields::VectorField const& Efield, + amrex::Real dt ); + + void EvolveF ( amrex::MultiFab* Ffield, + ablastr::fields::VectorField const& Efield, + amrex::MultiFab* rhofield, int rhocomp, amrex::Real dt ); - void EvolveG (std::unique_ptr& Gfield, - std::array,3> const& Bfield, + void EvolveG (amrex::MultiFab* Gfield, + ablastr::fields::VectorField const& Bfield, amrex::Real dt); - void EvolveECTRho ( std::array< std::unique_ptr, 3 > const& Efield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 >& ECTRhofield, + void EvolveECTRho ( ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& edge_lengths, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& ECTRhofield, int lev ); - void ApplySilverMuellerBoundary( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 >& Bfield, + void ApplySilverMuellerBoundary ( + ablastr::fields::VectorField & Efield, + ablastr::fields::VectorField & Bfield, amrex::Box domain_box, amrex::Real dt, amrex::Array field_boundary_lo, amrex::Array field_boundary_hi); - void ComputeDivE ( const std::array,3>& Efield, - amrex::MultiFab& divE ); + void ComputeDivE ( + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divE + ); /** * \brief Macroscopic E-update for non-vacuum medium using the user-selected @@ -110,29 +106,34 @@ class FiniteDifferenceSolver * \param[in] dt timestep of the simulation * \param[in] macroscopic_properties contains user-defined properties of the medium. */ - void MacroscopicEvolveE ( std::array< std::unique_ptr, 3>& Efield, - std::array< std::unique_ptr, 3> const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + void MacroscopicEvolveE ( + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& edge_lengths, amrex::Real dt, std::unique_ptr const& macroscopic_properties); - void EvolveBPML ( std::array< amrex::MultiFab*, 3 > Bfield, - std::array< amrex::MultiFab*, 3 > Efield, - amrex::Real dt, - bool dive_cleaning); + void EvolveBPML ( + ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, + int level, + amrex::Real dt, + bool dive_cleaning + ); - void EvolveEPML ( std::array< amrex::MultiFab*, 3 > Efield, - std::array< amrex::MultiFab*, 3 > Bfield, - std::array< amrex::MultiFab*, 3 > Jfield, - std::array< amrex::MultiFab*, 3 > edge_lengths, - amrex::MultiFab* Ffield, - MultiSigmaBox const& sigba, - amrex::Real dt, bool pml_has_particles ); + void EvolveEPML ( + ablastr::fields::MultiFabRegister& fields, + PatchType patch_type, + int level, + MultiSigmaBox const& sigba, + amrex::Real dt, + bool pml_has_particles + ); void EvolveFPML ( amrex::MultiFab* Ffield, - std::array< amrex::MultiFab*, 3 > Efield, - amrex::Real dt ); + ablastr::fields::VectorField Efield, + amrex::Real dt ); /** * \brief E-update in the hybrid PIC algorithm as described in @@ -140,9 +141,8 @@ class FiniteDifferenceSolver * https://link.springer.com/chapter/10.1007/3-540-36530-3_8 * * \param[out] Efield vector of electric field MultiFabs updated at a given level - * \param[in] Jfield vector of total current MultiFabs at a given level + * \param[in] Jfield vector of total plasma current MultiFabs at a given level * \param[in] Jifield vector of ion current density MultiFabs at a given level - * \param[in] Jextfield vector of external current density MultiFabs at a given level * \param[in] Bfield vector of magnetic field MultiFabs at a given level * \param[in] rhofield scalar ion charge density Multifab at a given level * \param[in] Pefield scalar electron pressure MultiFab at a given level @@ -151,16 +151,15 @@ class FiniteDifferenceSolver * \param[in] hybrid_model instance of the hybrid-PIC model * \param[in] solve_for_Faraday boolean flag for whether the E-field is solved to be used in Faraday's equation */ - void HybridPICSolveE ( std::array< std::unique_ptr, 3>& Efield, - std::array< std::unique_ptr, 3>& Jfield, - std::array< std::unique_ptr, 3 > const& Jifield, - std::array< std::unique_ptr, 3 > const& Jextfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::unique_ptr const& Pefield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - int lev, HybridPICModel const* hybrid_model, - bool solve_for_Faraday ); + void HybridPICSolveE ( ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField & Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, + int lev, HybridPICModel const* hybrid_model, + bool solve_for_Faraday ); /** * \brief Calculation of total current using Ampere's law (without @@ -172,9 +171,9 @@ class FiniteDifferenceSolver * \param[in] lev level number for the calculation */ void CalculateCurrentAmpere ( - std::array< std::unique_ptr, 3>& Jfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ); private: @@ -206,98 +205,98 @@ class FiniteDifferenceSolver #ifdef WARPX_DIM_RZ template< typename T_Algo > void EvolveBCylindrical ( - std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Efield, int lev, amrex::Real dt ); template< typename T_Algo > void EvolveECylindrical ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::unique_ptr const& Ffield, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& edge_lengths, + amrex::MultiFab const* Ffield, int lev, amrex::Real dt ); template< typename T_Algo > void EvolveFCylindrical ( - std::unique_ptr& Ffield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& rhofield, + amrex::MultiFab* Ffield, + ablastr::fields::VectorField const & Efield, + amrex::MultiFab* rhofield, int rhocomp, amrex::Real dt ); template< typename T_Algo > void ComputeDivECylindrical ( - const std::array,3>& Efield, - amrex::MultiFab& divE ); + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divE + ); template void HybridPICSolveECylindrical ( - std::array< std::unique_ptr, 3>& Efield, - std::array< std::unique_ptr, 3> const& Jfield, - std::array< std::unique_ptr, 3> const& Jifield, - std::array< std::unique_ptr, 3 > const& Jextfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::unique_ptr const& Pefield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); template void CalculateCurrentAmpereCylindrical ( - std::array< std::unique_ptr, 3 >& Jfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ); #else template< typename T_Algo > void EvolveBCartesian ( - std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& Gfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Efield, + amrex::MultiFab const * Gfield, int lev, amrex::Real dt ); template< typename T_Algo > void EvolveECartesian ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::unique_ptr const& Ffield, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& edge_lengths, + amrex::MultiFab const* Ffield, int lev, amrex::Real dt ); template< typename T_Algo > void EvolveFCartesian ( - std::unique_ptr& Ffield, - std::array< std::unique_ptr, 3 > const& Efield, - std::unique_ptr const& rhofield, + amrex::MultiFab* Ffield, + ablastr::fields::VectorField Efield, + amrex::MultiFab* rhofield, int rhocomp, amrex::Real dt ); template< typename T_Algo > void EvolveGCartesian ( - std::unique_ptr& Gfield, - std::array,3> const& Bfield, + amrex::MultiFab* Gfield, + ablastr::fields::VectorField const& Bfield, amrex::Real dt); void EvolveRhoCartesianECT ( - std::array< std::unique_ptr, 3 > const& Efield, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 >& ECTRhofield, int lev); + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& edge_lengths, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& ECTRhofield, int lev); void EvolveBCartesianECT ( - std::array< std::unique_ptr, 3 >& Bfield, - std::array< std::unique_ptr, 3 > const& face_areas, - std::array< std::unique_ptr, 3 > const& area_mod, - std::array< std::unique_ptr, 3 >& ECTRhofield, - std::array< std::unique_ptr, 3 >& Venl, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& area_mod, + ablastr::fields::VectorField const& ECTRhofield, + ablastr::fields::VectorField const& Venl, std::array< std::unique_ptr, 3 >& flag_info_cell, std::array< std::unique_ptr >, 3 >& borrowing, int lev, amrex::Real dt @@ -305,28 +304,28 @@ class FiniteDifferenceSolver template< typename T_Algo > void ComputeDivECartesian ( - const std::array,3>& Efield, + ablastr::fields::VectorField const & Efield, amrex::MultiFab& divE ); template< typename T_Algo, typename T_MacroAlgo > void MacroscopicEvolveECartesian ( - std::array< std::unique_ptr< amrex::MultiFab>, 3>& Efield, - std::array< std::unique_ptr< amrex::MultiFab>, 3> const& Bfield, - std::array< std::unique_ptr< amrex::MultiFab>, 3> const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& edge_lengths, amrex::Real dt, std::unique_ptr const& macroscopic_properties); template< typename T_Algo > void EvolveBPMLCartesian ( std::array< amrex::MultiFab*, 3 > Bfield, - std::array< amrex::MultiFab*, 3 > Efield, + ablastr::fields::VectorField Efield, amrex::Real dt, bool dive_cleaning); template< typename T_Algo > void EvolveEPMLCartesian ( - std::array< amrex::MultiFab*, 3 > Efield, + ablastr::fields::VectorField Efield, std::array< amrex::MultiFab*, 3 > Bfield, std::array< amrex::MultiFab*, 3 > Jfield, std::array< amrex::MultiFab*, 3 > edge_lengths, @@ -336,27 +335,26 @@ class FiniteDifferenceSolver template< typename T_Algo > void EvolveFPMLCartesian ( amrex::MultiFab* Ffield, - std::array< amrex::MultiFab*, 3 > Efield, - amrex::Real dt ); + ablastr::fields::VectorField Efield, + amrex::Real dt ); template void HybridPICSolveECartesian ( - std::array< std::unique_ptr, 3>& Efield, - std::array< std::unique_ptr, 3> const& Jfield, - std::array< std::unique_ptr, 3> const& Jifield, - std::array< std::unique_ptr, 3 > const& Jextfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::unique_ptr const& Pefield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); template void CalculateCurrentAmpereCartesian ( - std::array< std::unique_ptr, 3 >& Jfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ); #endif diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 3a49d5fad4b..ec4a53b2edd 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -19,6 +19,8 @@ #include "Utils/WarpXConst.H" #include "Utils/WarpXProfilerWrapper.H" +#include + #include #include @@ -31,21 +33,18 @@ class HybridPICModel { public: - HybridPICModel (int nlevs_max); // constructor + HybridPICModel (); /** Read user-defined model parameters. Called in constructor. */ void ReadParameters (); /** Allocate hybrid-PIC specific multifabs. Called in constructor. */ - void AllocateMFs (int nlevs_max); - void AllocateLevelMFs (int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, + void AllocateLevelMFs (ablastr::fields::MultiFabRegister & fields, + int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, int ncomps, const amrex::IntVect& ngJ, const amrex::IntVect& ngRho, const amrex::IntVect& jx_nodal_flag, const amrex::IntVect& jy_nodal_flag, const amrex::IntVect& jz_nodal_flag, const amrex::IntVect& rho_nodal_flag); - /** Helper function to clear values from hybrid-PIC specific multifabs. */ - void ClearLevel (int lev); - void InitData (); /** @@ -55,29 +54,29 @@ public: * of time and therefore this should be re-evaluated at every step. */ void GetCurrentExternal ( - amrex::Vector, 3>> const& edge_lengths + ablastr::fields::MultiLevelVectorField const& edge_lengths ); void GetCurrentExternal ( - std::array< std::unique_ptr, 3> const& edge_lengths, + ablastr::fields::VectorField const& edge_lengths, int lev ); /** * \brief - * Function to calculate the total current based on Ampere's law while - * neglecting displacement current (J = curl x B). Used in the Ohm's law - * solver (kinetic-fluid hybrid model). + * Function to calculate the total plasma current based on Ampere's law while + * neglecting displacement current (J = curl x B). Any external current is + * subtracted as well. Used in the Ohm's law solver (kinetic-fluid hybrid model). * * \param[in] Bfield Magnetic field from which the current is calculated. * \param[in] edge_lengths Length of cell edges taking embedded boundaries into account */ - void CalculateCurrentAmpere ( - amrex::Vector, 3>> const& Bfield, - amrex::Vector, 3>> const& edge_lengths + void CalculatePlasmaCurrent ( + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& edge_lengths ); - void CalculateCurrentAmpere ( - std::array< std::unique_ptr, 3> const& Bfield, - std::array< std::unique_ptr, 3> const& edge_lengths, + void CalculatePlasmaCurrent ( + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ); @@ -86,53 +85,53 @@ public: * Function to update the E-field using Ohm's law (hybrid-PIC model). */ void HybridPICSolveE ( - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector, 3>> const& Bfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, - bool solve_for_Faraday); + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, + bool solve_for_Faraday) const; void HybridPICSolveE ( - std::array< std::unique_ptr, 3>& Efield, - std::array< std::unique_ptr, 3> const& Jfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::array< std::unique_ptr, 3> const& edge_lengths, - int lev, bool solve_for_Faraday); + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + ablastr::fields::VectorField const& edge_lengths, + int lev, bool solve_for_Faraday) const; void HybridPICSolveE ( - std::array< std::unique_ptr, 3>& Efield, - std::array< std::unique_ptr, 3> const& Jfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::array< std::unique_ptr, 3> const& edge_lengths, - int lev, PatchType patch_type, bool solve_for_Faraday); + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + ablastr::fields::VectorField const& edge_lengths, + int lev, PatchType patch_type, bool solve_for_Faraday) const; void BfieldEvolveRK ( - amrex::Vector, 3>>& Bfield, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, DtType a_dt_type, amrex::IntVect ng, std::optional nodal_sync); void BfieldEvolveRK ( - amrex::Vector, 3>>& Bfield, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, int lev, DtType dt_type, amrex::IntVect ng, std::optional nodal_sync); void FieldPush ( - amrex::Vector, 3>>& Bfield, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, DtType dt_type, amrex::IntVect ng, std::optional nodal_sync); @@ -141,8 +140,8 @@ public: * Function to calculate the electron pressure using the simulation charge * density. Used in the Ohm's law solver (kinetic-fluid hybrid model). */ - void CalculateElectronPressure (); - void CalculateElectronPressure (int lev); + void CalculateElectronPressure () const; + void CalculateElectronPressure (int lev) const; /** * \brief Fill the electron pressure multifab given the kinetic particle @@ -153,8 +152,8 @@ public: * \param[in] rho_field scalar ion charge density Multifab at a given level */ void FillElectronPressureMF ( - std::unique_ptr const& Pe_field, - amrex::MultiFab* const& rho_field ) const; + amrex::MultiFab& Pe_field, + amrex::MultiFab const& rho_field ) const; // Declare variables to hold hybrid-PIC model parameters /** Number of substeps to take when evolving B */ @@ -187,32 +186,6 @@ public: std::array< amrex::ParserExecutor<4>, 3> m_J_external; bool m_external_field_has_time_dependence = false; - // Declare multifabs specifically needed for the hybrid-PIC model - amrex::Vector< std::unique_ptr > rho_fp_temp; - amrex::Vector, 3 > > current_fp_temp; - amrex::Vector, 3 > > current_fp_ampere; - amrex::Vector, 3 > > current_fp_external; - amrex::Vector< std::unique_ptr > electron_pressure_fp; - - // Helper functions to retrieve hybrid-PIC multifabs - [[nodiscard]] amrex::MultiFab* - get_pointer_current_fp_ampere (int lev, int direction) const - { - return current_fp_ampere[lev][direction].get(); - } - - [[nodiscard]] amrex::MultiFab* - get_pointer_current_fp_external (int lev, int direction) const - { - return current_fp_external[lev][direction].get(); - } - - [[nodiscard]] amrex::MultiFab* - get_pointer_electron_pressure_fp (int lev) const - { - return electron_pressure_fp[lev].get(); - } - /** Gpu Vector with index type of the Jx multifab */ amrex::GpuArray Jx_IndexType; /** Gpu Vector with index type of the Jy multifab */ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 70efc04e259..d7d6a43b4d5 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -10,16 +10,15 @@ #include "HybridPICModel.H" #include "EmbeddedBoundary/Enabled.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "WarpX.H" using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; -HybridPICModel::HybridPICModel ( int nlevs_max ) +HybridPICModel::HybridPICModel () { ReadParameters(); - AllocateMFs(nlevs_max); } void HybridPICModel::ReadParameters () @@ -56,59 +55,63 @@ void HybridPICModel::ReadParameters () pp_hybrid.query("Jz_external_grid_function(x,y,z,t)", m_Jz_ext_grid_function); } -void HybridPICModel::AllocateMFs (int nlevs_max) -{ - electron_pressure_fp.resize(nlevs_max); - rho_fp_temp.resize(nlevs_max); - current_fp_temp.resize(nlevs_max); - current_fp_ampere.resize(nlevs_max); - current_fp_external.resize(nlevs_max); -} - -void HybridPICModel::AllocateLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm, +void HybridPICModel::AllocateLevelMFs (ablastr::fields::MultiFabRegister & fields, + int lev, const BoxArray& ba, const DistributionMapping& dm, const int ncomps, const IntVect& ngJ, const IntVect& ngRho, const IntVect& jx_nodal_flag, const IntVect& jy_nodal_flag, const IntVect& jz_nodal_flag, const IntVect& rho_nodal_flag) { - // The "electron_pressure_fp" multifab stores the electron pressure calculated + using ablastr::fields::Direction; + + // The "hybrid_electron_pressure_fp" multifab stores the electron pressure calculated // from the specified equation of state. - // The "rho_fp_temp" multifab is used to store the ion charge density + fields.alloc_init(FieldType::hybrid_electron_pressure_fp, + lev, amrex::convert(ba, rho_nodal_flag), + dm, ncomps, ngRho, 0.0_rt); + + // The "hybrid_rho_fp_temp" multifab is used to store the ion charge density // interpolated or extrapolated to appropriate timesteps. - // The "current_fp_temp" multifab is used to store the ion current density + fields.alloc_init(FieldType::hybrid_rho_fp_temp, + lev, amrex::convert(ba, rho_nodal_flag), + dm, ncomps, ngRho, 0.0_rt); + + // The "hybrid_current_fp_temp" multifab is used to store the ion current density // interpolated or extrapolated to appropriate timesteps. - // The "current_fp_ampere" multifab stores the total current calculated as - // the curl of B. - WarpX::AllocInitMultiFab(electron_pressure_fp[lev], amrex::convert(ba, rho_nodal_flag), - dm, ncomps, ngRho, lev, "electron_pressure_fp", 0.0_rt); - - WarpX::AllocInitMultiFab(rho_fp_temp[lev], amrex::convert(ba, rho_nodal_flag), - dm, ncomps, ngRho, lev, "rho_fp_temp", 0.0_rt); - - WarpX::AllocInitMultiFab(current_fp_temp[lev][0], amrex::convert(ba, jx_nodal_flag), - dm, ncomps, ngJ, lev, "current_fp_temp[x]", 0.0_rt); - WarpX::AllocInitMultiFab(current_fp_temp[lev][1], amrex::convert(ba, jy_nodal_flag), - dm, ncomps, ngJ, lev, "current_fp_temp[y]", 0.0_rt); - WarpX::AllocInitMultiFab(current_fp_temp[lev][2], amrex::convert(ba, jz_nodal_flag), - dm, ncomps, ngJ, lev, "current_fp_temp[z]", 0.0_rt); - - WarpX::AllocInitMultiFab(current_fp_ampere[lev][0], amrex::convert(ba, jx_nodal_flag), - dm, ncomps, ngJ, lev, "current_fp_ampere[x]", 0.0_rt); - WarpX::AllocInitMultiFab(current_fp_ampere[lev][1], amrex::convert(ba, jy_nodal_flag), - dm, ncomps, ngJ, lev, "current_fp_ampere[y]", 0.0_rt); - WarpX::AllocInitMultiFab(current_fp_ampere[lev][2], amrex::convert(ba, jz_nodal_flag), - dm, ncomps, ngJ, lev, "current_fp_ampere[z]", 0.0_rt); - - // the external current density multifab is made nodal to avoid needing to interpolate - // to a nodal grid as has to be done for the ion and total current density multifabs - // this also allows the external current multifab to not have any ghost cells - WarpX::AllocInitMultiFab(current_fp_external[lev][0], amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), - dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), lev, "current_fp_external[x]", 0.0_rt); - WarpX::AllocInitMultiFab(current_fp_external[lev][1], amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), - dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), lev, "current_fp_external[y]", 0.0_rt); - WarpX::AllocInitMultiFab(current_fp_external[lev][2], amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), - dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), lev, "current_fp_external[z]", 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_temp, Direction{0}, + lev, amrex::convert(ba, jx_nodal_flag), + dm, ncomps, ngJ, 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_temp, Direction{1}, + lev, amrex::convert(ba, jy_nodal_flag), + dm, ncomps, ngJ, 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_temp, Direction{2}, + lev, amrex::convert(ba, jz_nodal_flag), + dm, ncomps, ngJ, 0.0_rt); + + // The "hybrid_current_fp_plasma" multifab stores the total plasma current calculated + // as the curl of B minus any external current. + fields.alloc_init(FieldType::hybrid_current_fp_plasma, Direction{0}, + lev, amrex::convert(ba, jx_nodal_flag), + dm, ncomps, ngJ, 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_plasma, Direction{1}, + lev, amrex::convert(ba, jy_nodal_flag), + dm, ncomps, ngJ, 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_plasma, Direction{2}, + lev, amrex::convert(ba, jz_nodal_flag), + dm, ncomps, ngJ, 0.0_rt); + + // the external current density multifab matches the current staggering and + // one ghost cell is used since we interpolate the current to a nodal grid + fields.alloc_init(FieldType::hybrid_current_fp_external, Direction{0}, + lev, amrex::convert(ba, jx_nodal_flag), + dm, ncomps, IntVect(1), 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_external, Direction{1}, + lev, amrex::convert(ba, jy_nodal_flag), + dm, ncomps, IntVect(1), 0.0_rt); + fields.alloc_init(FieldType::hybrid_current_fp_external, Direction{2}, + lev, amrex::convert(ba, jz_nodal_flag), + dm, ncomps, IntVect(1), 0.0_rt); #ifdef WARPX_DIM_RZ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -117,17 +120,6 @@ void HybridPICModel::AllocateLevelMFs (int lev, const BoxArray& ba, const Distri #endif } -void HybridPICModel::ClearLevel (int lev) -{ - electron_pressure_fp[lev].reset(); - rho_fp_temp[lev].reset(); - for (int i = 0; i < 3; ++i) { - current_fp_temp[lev][i].reset(); - current_fp_ampere[lev][i].reset(); - current_fp_external[lev][i].reset(); - } -} - void HybridPICModel::InitData () { m_resistivity_parser = std::make_unique( @@ -153,17 +145,18 @@ void HybridPICModel::InitData () } auto & warpx = WarpX::GetInstance(); + using ablastr::fields::Direction; // Get the grid staggering of the fields involved in calculating E - amrex::IntVect Jx_stag = warpx.getField(FieldType::current_fp, 0,0).ixType().toIntVect(); - amrex::IntVect Jy_stag = warpx.getField(FieldType::current_fp, 0,1).ixType().toIntVect(); - amrex::IntVect Jz_stag = warpx.getField(FieldType::current_fp, 0,2).ixType().toIntVect(); - amrex::IntVect Bx_stag = warpx.getField(FieldType::Bfield_fp, 0,0).ixType().toIntVect(); - amrex::IntVect By_stag = warpx.getField(FieldType::Bfield_fp, 0,1).ixType().toIntVect(); - amrex::IntVect Bz_stag = warpx.getField(FieldType::Bfield_fp, 0,2).ixType().toIntVect(); - amrex::IntVect Ex_stag = warpx.getField(FieldType::Efield_fp, 0,0).ixType().toIntVect(); - amrex::IntVect Ey_stag = warpx.getField(FieldType::Efield_fp, 0,1).ixType().toIntVect(); - amrex::IntVect Ez_stag = warpx.getField(FieldType::Efield_fp, 0,2).ixType().toIntVect(); + amrex::IntVect Jx_stag = warpx.m_fields.get(FieldType::current_fp, Direction{0}, 0)->ixType().toIntVect(); + amrex::IntVect Jy_stag = warpx.m_fields.get(FieldType::current_fp, Direction{1}, 0)->ixType().toIntVect(); + amrex::IntVect Jz_stag = warpx.m_fields.get(FieldType::current_fp, Direction{2}, 0)->ixType().toIntVect(); + amrex::IntVect Bx_stag = warpx.m_fields.get(FieldType::Bfield_fp, Direction{0}, 0)->ixType().toIntVect(); + amrex::IntVect By_stag = warpx.m_fields.get(FieldType::Bfield_fp, Direction{1}, 0)->ixType().toIntVect(); + amrex::IntVect Bz_stag = warpx.m_fields.get(FieldType::Bfield_fp, Direction{2}, 0)->ixType().toIntVect(); + amrex::IntVect Ex_stag = warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, 0)->ixType().toIntVect(); + amrex::IntVect Ey_stag = warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, 0)->ixType().toIntVect(); + amrex::IntVect Ez_stag = warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, 0)->ixType().toIntVect(); // Check that the grid types are appropriate const bool appropriate_grids = ( @@ -231,9 +224,10 @@ void HybridPICModel::InitData () auto edge_lengths = std::array, 3>(); #ifdef AMREX_USE_EB if (EB::enabled()) { - auto const & edge_lengths_x = warpx.getField(FieldType::edge_lengths, lev, 0); - auto const & edge_lengths_y = warpx.getField(FieldType::edge_lengths, lev, 1); - auto const & edge_lengths_z = warpx.getField(FieldType::edge_lengths, lev, 2); + using ablastr::fields::Direction; + auto const & edge_lengths_x = *warpx.m_fields.get(FieldType::edge_lengths, Direction{0}, lev); + auto const & edge_lengths_y = *warpx.m_fields.get(FieldType::edge_lengths, Direction{1}, lev); + auto const & edge_lengths_z = *warpx.m_fields.get(FieldType::edge_lengths, Direction{2}, lev); edge_lengths = std::array< std::unique_ptr, 3 >{ std::make_unique( @@ -245,12 +239,12 @@ void HybridPICModel::InitData () }; } #endif - GetCurrentExternal(edge_lengths, lev); + GetCurrentExternal(ablastr::fields::a2m(edge_lengths), lev); } } void HybridPICModel::GetCurrentExternal ( - amrex::Vector, 3>> const& edge_lengths) + ablastr::fields::MultiLevelVectorField const& edge_lengths) { if (!m_external_field_has_time_dependence) { return; } @@ -263,7 +257,7 @@ void HybridPICModel::GetCurrentExternal ( void HybridPICModel::GetCurrentExternal ( - std::array< std::unique_ptr, 3> const& edge_lengths, + ablastr::fields::VectorField const& edge_lengths, int lev) { // This logic matches closely to WarpX::InitializeExternalFieldsOnGridUsingParser @@ -275,9 +269,10 @@ void HybridPICModel::GetCurrentExternal ( auto dx_lev = warpx.Geom(lev).CellSizeArray(); const RealBox& real_box = warpx.Geom(lev).ProbDomain(); - auto& mfx = current_fp_external[lev][0]; - auto& mfy = current_fp_external[lev][1]; - auto& mfz = current_fp_external[lev][2]; + using ablastr::fields::Direction; + amrex::MultiFab * mfx = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{0}, lev); + amrex::MultiFab * mfy = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{1}, lev); + amrex::MultiFab * mfz = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{2}, lev); const amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect(); const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); @@ -358,7 +353,7 @@ void HybridPICModel::GetCurrentExternal ( const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; #endif // Initialize the y-component of the field. - mfyfab(i,j,k) = Jy_external(x,y,z,t); + mfyfab(i,j,k) = Jy_external(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary @@ -390,61 +385,71 @@ void HybridPICModel::GetCurrentExternal ( } } -void HybridPICModel::CalculateCurrentAmpere ( - amrex::Vector, 3>> const& Bfield, - amrex::Vector, 3>> const& edge_lengths) +void HybridPICModel::CalculatePlasmaCurrent ( + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& edge_lengths) { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - CalculateCurrentAmpere(Bfield[lev], edge_lengths[lev], lev); + CalculatePlasmaCurrent(Bfield[lev], edge_lengths[lev], lev); } } -void HybridPICModel::CalculateCurrentAmpere ( - std::array< std::unique_ptr, 3> const& Bfield, - std::array< std::unique_ptr, 3> const& edge_lengths, +void HybridPICModel::CalculatePlasmaCurrent ( + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, const int lev) { - WARPX_PROFILE("WarpX::CalculateCurrentAmpere()"); + WARPX_PROFILE("HybridPICModel::CalculatePlasmaCurrent()"); auto& warpx = WarpX::GetInstance(); + ablastr::fields::VectorField current_fp_plasma = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_plasma, lev); warpx.get_pointer_fdtd_solver_fp(lev)->CalculateCurrentAmpere( - current_fp_ampere[lev], Bfield, edge_lengths, lev + current_fp_plasma, Bfield, edge_lengths, lev ); // we shouldn't apply the boundary condition to J since J = J_i - J_e but // the boundary correction was already applied to J_i and the B-field // boundary ensures that J itself complies with the boundary conditions, right? // ApplyJfieldBoundary(lev, Jfield[0].get(), Jfield[1].get(), Jfield[2].get()); - for (int i=0; i<3; i++) { current_fp_ampere[lev][i]->FillBoundary(warpx.Geom(lev).periodicity()); } + for (int i=0; i<3; i++) { current_fp_plasma[i]->FillBoundary(warpx.Geom(lev).periodicity()); } + + // Subtract external current from "Ampere" current calculated above. Note + // we need to include 1 ghost cell since later we will interpolate the + // plasma current to a nodal grid. + ablastr::fields::VectorField current_fp_external = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_external, lev); + for (int i=0; i<3; i++) { + current_fp_plasma[i]->minus(*current_fp_external[i], 0, 1, 1); + } + } void HybridPICModel::HybridPICSolveE ( - amrex::Vector, 3>> & Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector, 3>> const& Bfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, - const bool solve_for_Faraday) + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, + const bool solve_for_Faraday) const { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { HybridPICSolveE( - Efield[lev], Jfield[lev], Bfield[lev], rhofield[lev], + Efield[lev], Jfield[lev], Bfield[lev], *rhofield[lev], edge_lengths[lev], lev, solve_for_Faraday ); } } void HybridPICModel::HybridPICSolveE ( - std::array< std::unique_ptr, 3> & Efield, - std::array< std::unique_ptr, 3> const& Jfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::array< std::unique_ptr, 3> const& edge_lengths, - const int lev, const bool solve_for_Faraday) + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + ablastr::fields::VectorField const& edge_lengths, + const int lev, const bool solve_for_Faraday) const { WARPX_PROFILE("WarpX::HybridPICSolveE()"); @@ -460,27 +465,28 @@ void HybridPICModel::HybridPICSolveE ( } void HybridPICModel::HybridPICSolveE ( - std::array< std::unique_ptr, 3> & Efield, - std::array< std::unique_ptr, 3> const& Jfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::unique_ptr const& rhofield, - std::array< std::unique_ptr, 3> const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + ablastr::fields::VectorField const& edge_lengths, const int lev, PatchType patch_type, - const bool solve_for_Faraday) + const bool solve_for_Faraday) const { auto& warpx = WarpX::GetInstance(); + ablastr::fields::VectorField current_fp_plasma = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_plasma, lev); + const ablastr::fields::ScalarField electron_pressure_fp = warpx.m_fields.get(FieldType::hybrid_electron_pressure_fp, lev); + // Solve E field in regular cells warpx.get_pointer_fdtd_solver_fp(lev)->HybridPICSolveE( - Efield, current_fp_ampere[lev], Jfield, current_fp_external[lev], - Bfield, rhofield, - electron_pressure_fp[lev], - edge_lengths, lev, this, solve_for_Faraday + Efield, current_fp_plasma, Jfield, Bfield, rhofield, + *electron_pressure_fp, edge_lengths, lev, this, solve_for_Faraday ); warpx.ApplyEfieldBoundary(lev, patch_type); } -void HybridPICModel::CalculateElectronPressure() +void HybridPICModel::CalculateElectronPressure() const { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) @@ -489,22 +495,27 @@ void HybridPICModel::CalculateElectronPressure() } } -void HybridPICModel::CalculateElectronPressure(const int lev) +void HybridPICModel::CalculateElectronPressure(const int lev) const { WARPX_PROFILE("WarpX::CalculateElectronPressure()"); auto& warpx = WarpX::GetInstance(); + ablastr::fields::ScalarField electron_pressure_fp = warpx.m_fields.get(FieldType::hybrid_electron_pressure_fp, lev); + ablastr::fields::ScalarField rho_fp = warpx.m_fields.get(FieldType::rho_fp, lev); + // Calculate the electron pressure using rho^{n+1}. FillElectronPressureMF( - electron_pressure_fp[lev], warpx.getFieldPointer(FieldType::rho_fp, lev) + *electron_pressure_fp, + *rho_fp ); warpx.ApplyElectronPressureBoundary(lev, PatchType::fine); - electron_pressure_fp[lev]->FillBoundary(warpx.Geom(lev).periodicity()); + electron_pressure_fp->FillBoundary(warpx.Geom(lev).periodicity()); } void HybridPICModel::FillElectronPressureMF ( - std::unique_ptr const& Pe_field, - amrex::MultiFab* const& rho_field ) const + amrex::MultiFab& Pe_field, + amrex::MultiFab const& rho_field +) const { const auto n0_ref = m_n0_ref; const auto elec_temp = m_elec_temp; @@ -514,11 +525,11 @@ void HybridPICModel::FillElectronPressureMF ( #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for ( MFIter mfi(*Pe_field, TilingIfNotGPU()); mfi.isValid(); ++mfi ) + for ( MFIter mfi(Pe_field, TilingIfNotGPU()); mfi.isValid(); ++mfi ) { // Extract field data for this grid/tile - Array4 const& rho = rho_field->const_array(mfi); - Array4 const& Pe = Pe_field->array(mfi); + Array4 const& rho = rho_field.const_array(mfi); + Array4 const& Pe = Pe_field.array(mfi); // Extract tileboxes for which to loop const Box& tilebox = mfi.tilebox(); @@ -532,11 +543,11 @@ void HybridPICModel::FillElectronPressureMF ( } void HybridPICModel::BfieldEvolveRK ( - amrex::Vector, 3>>& Bfield, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, DtType dt_type, IntVect ng, std::optional nodal_sync ) { @@ -551,11 +562,11 @@ void HybridPICModel::BfieldEvolveRK ( } void HybridPICModel::BfieldEvolveRK ( - amrex::Vector, 3>>& Bfield, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, int lev, DtType dt_type, IntVect ng, std::optional nodal_sync ) { @@ -664,18 +675,18 @@ void HybridPICModel::BfieldEvolveRK ( } void HybridPICModel::FieldPush ( - amrex::Vector, 3>>& Bfield, - amrex::Vector, 3>>& Efield, - amrex::Vector, 3>> const& Jfield, - amrex::Vector> const& rhofield, - amrex::Vector, 3>> const& edge_lengths, + ablastr::fields::MultiLevelVectorField const& Bfield, + ablastr::fields::MultiLevelVectorField const& Efield, + ablastr::fields::MultiLevelVectorField const& Jfield, + ablastr::fields::MultiLevelScalarField const& rhofield, + ablastr::fields::MultiLevelVectorField const& edge_lengths, amrex::Real dt, DtType dt_type, IntVect ng, std::optional nodal_sync ) { auto& warpx = WarpX::GetInstance(); - // Calculate J = curl x B / mu0 - CalculateCurrentAmpere(Bfield, edge_lengths); + // Calculate J = curl x B / mu0 - J_ext + CalculatePlasmaCurrent(Bfield, edge_lengths); // Calculate the E-field from Ohm's law HybridPICSolveE(Efield, Jfield, Bfield, rhofield, edge_lengths, true); warpx.FillBoundaryE(ng, nodal_sync); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index baeaf7a6c18..76fedbf4dea 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -24,9 +24,9 @@ using namespace amrex; void FiniteDifferenceSolver::CalculateCurrentAmpere ( - std::array< std::unique_ptr, 3>& Jfield, - std::array< std::unique_ptr, 3> const& Bfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField & Jfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ) { // Select algorithm (The choice of algorithm is a runtime option, @@ -59,9 +59,9 @@ void FiniteDifferenceSolver::CalculateCurrentAmpere ( #ifdef WARPX_DIM_RZ template void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( - std::array< std::unique_ptr, 3 >& Jfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ) { @@ -242,9 +242,9 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( template void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( - std::array< std::unique_ptr, 3 >& Jfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& edge_lengths, int lev ) { @@ -351,14 +351,13 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( void FiniteDifferenceSolver::HybridPICSolveE ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 >& Jfield, - std::array< std::unique_ptr, 3 > const& Jifield, - std::array< std::unique_ptr, 3 > const& Jextfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::unique_ptr const& rhofield, - std::unique_ptr const& Pefield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday) { @@ -368,14 +367,14 @@ void FiniteDifferenceSolver::HybridPICSolveE ( #ifdef WARPX_DIM_RZ HybridPICSolveECylindrical ( - Efield, Jfield, Jifield, Jextfield, Bfield, rhofield, Pefield, + Efield, Jfield, Jifield, Bfield, rhofield, Pefield, edge_lengths, lev, hybrid_model, solve_for_Faraday ); #else HybridPICSolveECartesian ( - Efield, Jfield, Jifield, Jextfield, Bfield, rhofield, Pefield, + Efield, Jfield, Jifield, Bfield, rhofield, Pefield, edge_lengths, lev, hybrid_model, solve_for_Faraday ); @@ -389,14 +388,13 @@ void FiniteDifferenceSolver::HybridPICSolveE ( #ifdef WARPX_DIM_RZ template void FiniteDifferenceSolver::HybridPICSolveECylindrical ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& Jifield, - std::array< std::unique_ptr, 3 > const& Jextfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::unique_ptr const& rhofield, - std::unique_ptr const& Pefield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday ) { @@ -449,8 +447,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Also note that enE_nodal_mf does not need to have any guard cells since // these values will be interpolated to the Yee mesh which is contained // by the nodal mesh. - auto const& ba = convert(rhofield->boxArray(), IntVect::TheNodeVector()); - MultiFab enE_nodal_mf(ba, rhofield->DistributionMap(), 3, IntVect::TheZeroVector()); + auto const& ba = convert(rhofield.boxArray(), IntVect::TheNodeVector()); + MultiFab enE_nodal_mf(ba, rhofield.DistributionMap(), 3, IntVect::TheZeroVector()); // Loop through the grids, and over the tiles within each grid for the // initial, nodal calculation of E @@ -471,9 +469,6 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& Jir = Jifield[0]->const_array(mfi); Array4 const& Jit = Jifield[1]->const_array(mfi); Array4 const& Jiz = Jifield[2]->const_array(mfi); - Array4 const& Jextr = Jextfield[0]->const_array(mfi); - Array4 const& Jextt = Jextfield[1]->const_array(mfi); - Array4 const& Jextz = Jextfield[2]->const_array(mfi); Array4 const& Br = Bfield[0]->const_array(mfi); Array4 const& Bt = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); @@ -498,16 +493,16 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // calculate enE = (J - Ji) x B enE_nodal(i, j, 0, 0) = ( - (jt_interp - jit_interp - Jextt(i, j, 0)) * Bz_interp - - (jz_interp - jiz_interp - Jextz(i, j, 0)) * Bt_interp + (jt_interp - jit_interp) * Bz_interp + - (jz_interp - jiz_interp) * Bt_interp ); enE_nodal(i, j, 0, 1) = ( - (jz_interp - jiz_interp - Jextz(i, j, 0)) * Br_interp - - (jr_interp - jir_interp - Jextr(i, j, 0)) * Bz_interp + (jz_interp - jiz_interp) * Br_interp + - (jr_interp - jir_interp) * Bz_interp ); enE_nodal(i, j, 0, 2) = ( - (jr_interp - jir_interp - Jextr(i, j, 0)) * Bt_interp - - (jt_interp - jit_interp - Jextt(i, j, 0)) * Br_interp + (jr_interp - jir_interp) * Bt_interp + - (jt_interp - jit_interp) * Br_interp ); }); @@ -539,8 +534,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& Jt = Jfield[1]->const_array(mfi); Array4 const& Jz = Jfield[2]->const_array(mfi); Array4 const& enE = enE_nodal_mf.const_array(mfi); - Array4 const& rho = rhofield->const_array(mfi); - Array4 const& Pe = Pefield->array(mfi); + Array4 const& rho = rhofield.const_array(mfi); + Array4 const& Pe = Pefield.const_array(mfi); amrex::Array4 lr, lz; if (EB::enabled()) { @@ -704,14 +699,13 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( template void FiniteDifferenceSolver::HybridPICSolveECartesian ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& Jifield, - std::array< std::unique_ptr, 3 > const& Jextfield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::unique_ptr const& rhofield, - std::unique_ptr const& Pefield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday ) { @@ -758,8 +752,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Also note that enE_nodal_mf does not need to have any guard cells since // these values will be interpolated to the Yee mesh which is contained // by the nodal mesh. - auto const& ba = convert(rhofield->boxArray(), IntVect::TheNodeVector()); - MultiFab enE_nodal_mf(ba, rhofield->DistributionMap(), 3, IntVect::TheZeroVector()); + auto const& ba = convert(rhofield.boxArray(), IntVect::TheNodeVector()); + MultiFab enE_nodal_mf(ba, rhofield.DistributionMap(), 3, IntVect::TheZeroVector()); // Loop through the grids, and over the tiles within each grid for the // initial, nodal calculation of E @@ -780,9 +774,6 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& Jix = Jifield[0]->const_array(mfi); Array4 const& Jiy = Jifield[1]->const_array(mfi); Array4 const& Jiz = Jifield[2]->const_array(mfi); - Array4 const& Jextx = Jextfield[0]->const_array(mfi); - Array4 const& Jexty = Jextfield[1]->const_array(mfi); - Array4 const& Jextz = Jextfield[2]->const_array(mfi); Array4 const& Bx = Bfield[0]->const_array(mfi); Array4 const& By = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); @@ -790,7 +781,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Loop over the cells and update the nodal E field amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // interpolate the total current to a nodal grid + // interpolate the total plasma current to a nodal grid auto const jx_interp = Interp(Jx, Jx_stag, nodal, coarsen, i, j, k, 0); auto const jy_interp = Interp(Jy, Jy_stag, nodal, coarsen, i, j, k, 0); auto const jz_interp = Interp(Jz, Jz_stag, nodal, coarsen, i, j, k, 0); @@ -807,16 +798,16 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // calculate enE = (J - Ji) x B enE_nodal(i, j, k, 0) = ( - (jy_interp - jiy_interp - Jexty(i, j, k)) * Bz_interp - - (jz_interp - jiz_interp - Jextz(i, j, k)) * By_interp + (jy_interp - jiy_interp) * Bz_interp + - (jz_interp - jiz_interp) * By_interp ); enE_nodal(i, j, k, 1) = ( - (jz_interp - jiz_interp - Jextz(i, j, k)) * Bx_interp - - (jx_interp - jix_interp - Jextx(i, j, k)) * Bz_interp + (jz_interp - jiz_interp) * Bx_interp + - (jx_interp - jix_interp) * Bz_interp ); enE_nodal(i, j, k, 2) = ( - (jx_interp - jix_interp - Jextx(i, j, k)) * By_interp - - (jy_interp - jiy_interp - Jexty(i, j, k)) * Bx_interp + (jx_interp - jix_interp) * By_interp + - (jy_interp - jiy_interp) * Bx_interp ); }); @@ -848,8 +839,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& Jy = Jfield[1]->const_array(mfi); Array4 const& Jz = Jfield[2]->const_array(mfi); Array4 const& enE = enE_nodal_mf.const_array(mfi); - Array4 const& rho = rhofield->const_array(mfi); - Array4 const& Pe = Pefield->array(mfi); + Array4 const& rho = rhofield.const_array(mfi); + Array4 const& Pe = Pefield.array(mfi); amrex::Array4 lx, ly, lz; if (EB::enabled()) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp index 46e4d3efa06..708728c4e5b 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp @@ -34,12 +34,13 @@ #include using namespace amrex; +using namespace ablastr::fields; void FiniteDifferenceSolver::MacroscopicEvolveE ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + VectorField const& edge_lengths, amrex::Real const dt, std::unique_ptr const& macroscopic_properties) { @@ -99,10 +100,10 @@ void FiniteDifferenceSolver::MacroscopicEvolveE ( template void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( - std::array< std::unique_ptr, 3 >& Efield, - std::array< std::unique_ptr, 3 > const& Bfield, - std::array< std::unique_ptr, 3 > const& Jfield, - std::array< std::unique_ptr, 3 > const& edge_lengths, + ablastr::fields::VectorField const& Efield, + ablastr::fields::VectorField const& Bfield, + ablastr::fields::VectorField const& Jfield, + ablastr::fields::VectorField const& edge_lengths, amrex::Real const dt, std::unique_ptr const& macroscopic_properties) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp index a6a389fe056..18c010d9385 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.cpp @@ -1,6 +1,6 @@ #include "MacroscopicProperties.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" @@ -23,7 +23,6 @@ #include using namespace amrex; -using namespace warpx::fields; MacroscopicProperties::MacroscopicProperties () { diff --git a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp index 33e2ced7b53..2236118a30c 100644 --- a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp @@ -7,7 +7,7 @@ #include "SemiImplicitEM.H" #include "WarpX.H" -using namespace warpx::fields; +using warpx::fields::FieldType; using namespace amrex::literals; void SemiImplicitEM::Define ( WarpX* a_WarpX ) @@ -20,7 +20,7 @@ void SemiImplicitEM::Define ( WarpX* a_WarpX ) m_WarpX = a_WarpX; // Define E and Eold vectors - m_E.Define( m_WarpX, FieldType::Efield_fp ); + m_E.Define( m_WarpX, "Efield_fp" ); m_Eold.Define( m_E ); // Parse implicit solver parameters diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H index 009c2c7e546..aba66782154 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H @@ -98,14 +98,6 @@ private: */ WarpXSolverVec m_E, m_Eold; - /** - * \brief B is a derived variable from E. Need to save Bold to update B during - * the iterative nonlinear solve for E. Bold is owned here, but only used by WarpX. - * It is not used directly by the nonlinear solver, nor is it the same size as the - * solver vector (size E), and so it should not be WarpXSolverVec type. - */ - amrex::Vector, 3 > > m_Bold; - /** * \brief Update the E and B fields owned by WarpX */ diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp index 3d74ddfde69..4cd5de4f24f 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp @@ -4,11 +4,11 @@ * * License: BSD-3-Clause-LBNL */ -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "ThetaImplicitEM.H" #include "WarpX.H" -using namespace warpx::fields; +using warpx::fields::FieldType; using namespace amrex::literals; void ThetaImplicitEM::Define ( WarpX* const a_WarpX ) @@ -21,20 +21,21 @@ void ThetaImplicitEM::Define ( WarpX* const a_WarpX ) m_WarpX = a_WarpX; // Define E and Eold vectors - m_E.Define( m_WarpX, FieldType::Efield_fp ); + m_E.Define( m_WarpX, "Efield_fp" ); m_Eold.Define( m_E ); - // Define Bold MultiFab + // Define B_old MultiFabs + using ablastr::fields::Direction; const int num_levels = 1; - m_Bold.resize(num_levels); // size is number of levels for (int lev = 0; lev < num_levels; ++lev) { - for (int n=0; n<3; n++) { - const amrex::MultiFab& Bfp = m_WarpX->getField( FieldType::Bfield_fp,lev,n); - m_Bold[lev][n] = std::make_unique( Bfp.boxArray(), - Bfp.DistributionMap(), - Bfp.nComp(), - Bfp.nGrowVect() ); - } + const auto& ba_Bx = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->boxArray(); + const auto& ba_By = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{1}, lev)->boxArray(); + const auto& ba_Bz = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{2}, lev)->boxArray(); + const auto& dm = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->DistributionMap(); + const amrex::IntVect ngb = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->nGrowVect(); + m_WarpX->m_fields.alloc_init(FieldType::B_old, Direction{0}, lev, ba_Bx, dm, 1, ngb, 0.0_rt); + m_WarpX->m_fields.alloc_init(FieldType::B_old, Direction{1}, lev, ba_By, dm, 1, ngb, 0.0_rt); + m_WarpX->m_fields.alloc_init(FieldType::B_old, Direction{2}, lev, ba_Bz, dm, 1, ngb, 0.0_rt); } // Parse theta-implicit solver specific parameters @@ -88,12 +89,13 @@ void ThetaImplicitEM::OneStep ( const amrex::Real a_time, // Save Eg at the start of the time step m_Eold.Copy( FieldType::Efield_fp ); - const int num_levels = static_cast(m_Bold.size()); + const int num_levels = 1; for (int lev = 0; lev < num_levels; ++lev) { - for (int n=0; n<3; n++) { - const amrex::MultiFab& Bfp = m_WarpX->getField(FieldType::Bfield_fp,lev,n); - amrex::MultiFab& Bold = *m_Bold[lev][n]; - amrex::MultiFab::Copy(Bold, Bfp, 0, 0, 1, Bold.nGrowVect()); + const ablastr::fields::VectorField Bfp = m_WarpX->m_fields.get_alldirs(FieldType::Bfield_fp, lev); + ablastr::fields::VectorField B_old = m_WarpX->m_fields.get_alldirs(FieldType::B_old, lev); + for (int n = 0; n < 3; ++n) { + amrex::MultiFab::Copy(*B_old[n], *Bfp[n], 0, 0, B_old[n]->nComp(), + B_old[n]->nGrowVect() ); } } @@ -145,7 +147,8 @@ void ThetaImplicitEM::UpdateWarpXFields ( const WarpXSolverVec& a_E, m_WarpX->SetElectricFieldAndApplyBCs( a_E ); // Update Bfield_fp owned by WarpX - m_WarpX->UpdateMagneticFieldAndApplyBCs( m_Bold, m_theta*a_dt ); + ablastr::fields::MultiLevelVectorField const& B_old = m_WarpX->m_fields.get_mr_levels_alldirs(FieldType::B_old, 0); + m_WarpX->UpdateMagneticFieldAndApplyBCs(B_old, m_theta * a_dt ); } @@ -160,6 +163,7 @@ void ThetaImplicitEM::FinishFieldUpdate ( amrex::Real a_new_time ) const amrex::Real c1 = 1._rt - c0; m_E.linComb( c0, m_E, c1, m_Eold ); m_WarpX->SetElectricFieldAndApplyBCs( m_E ); - m_WarpX->FinishMagneticFieldAndApplyBCs( m_Bold, m_theta ); + ablastr::fields::MultiLevelVectorField const & B_old = m_WarpX->m_fields.get_mr_levels_alldirs(FieldType::B_old, 0); + m_WarpX->FinishMagneticFieldAndApplyBCs(B_old, m_theta ); } diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp index 8dd97ed5525..806c3412990 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp @@ -11,7 +11,7 @@ #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "Evolve/WarpXDtType.H" #include "Evolve/WarpXPushType.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" #include "Parallelization/GuardCellManager.H" #include "Particles/MultiParticleContainer.H" @@ -73,7 +73,10 @@ WarpX::SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E ) a_E.getArrayVecType()==warpx::fields::FieldType::Efield_fp, "WarpX::SetElectricFieldAndApplyBCs() must be called with Efield_fp type"); - const amrex::Vector, 3 > >& Evec = a_E.getArrayVec(); + using warpx::fields::FieldType; + + ablastr::fields::MultiLevelVectorField Efield_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level); + const ablastr::fields::MultiLevelVectorField& Evec = a_E.getArrayVec(); amrex::MultiFab::Copy(*Efield_fp[0][0], *Evec[0][0], 0, 0, ncomps, Evec[0][0]->nGrowVect()); amrex::MultiFab::Copy(*Efield_fp[0][1], *Evec[0][1], 0, 0, ncomps, Evec[0][1]->nGrowVect()); amrex::MultiFab::Copy(*Efield_fp[0][2], *Evec[0][2], 0, 0, ncomps, Evec[0][2]->nGrowVect()); @@ -82,21 +85,29 @@ WarpX::SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E ) } void -WarpX::UpdateMagneticFieldAndApplyBCs( const amrex::Vector, 3 > >& a_Bn, - amrex::Real a_thetadt ) +WarpX::UpdateMagneticFieldAndApplyBCs( ablastr::fields::MultiLevelVectorField const& a_Bn, + amrex::Real a_thetadt ) { - amrex::MultiFab::Copy(*Bfield_fp[0][0], *a_Bn[0][0], 0, 0, ncomps, a_Bn[0][0]->nGrowVect()); - amrex::MultiFab::Copy(*Bfield_fp[0][1], *a_Bn[0][1], 0, 0, ncomps, a_Bn[0][1]->nGrowVect()); - amrex::MultiFab::Copy(*Bfield_fp[0][2], *a_Bn[0][2], 0, 0, ncomps, a_Bn[0][2]->nGrowVect()); + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + for (int lev = 0; lev <= finest_level; ++lev) { + ablastr::fields::VectorField Bfp = m_fields.get_alldirs(FieldType::Bfield_fp, lev); + amrex::MultiFab::Copy(*Bfp[0], *a_Bn[lev][0], 0, 0, ncomps, a_Bn[lev][0]->nGrowVect()); + amrex::MultiFab::Copy(*Bfp[1], *a_Bn[lev][1], 0, 0, ncomps, a_Bn[lev][1]->nGrowVect()); + amrex::MultiFab::Copy(*Bfp[2], *a_Bn[lev][2], 0, 0, ncomps, a_Bn[lev][2]->nGrowVect()); + } EvolveB(a_thetadt, DtType::Full); ApplyMagneticFieldBCs(); } void -WarpX::FinishMagneticFieldAndApplyBCs( const amrex::Vector, 3 > >& a_Bn, - amrex::Real a_theta ) +WarpX::FinishMagneticFieldAndApplyBCs( ablastr::fields::MultiLevelVectorField const& a_Bn, + amrex::Real a_theta ) { - FinishImplicitField(Bfield_fp, a_Bn, a_theta); + using warpx::fields::FieldType; + + FinishImplicitField(m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, 0), a_Bn, a_theta); ApplyMagneticFieldBCs(); } @@ -248,9 +259,9 @@ WarpX::FinishImplicitParticleUpdate () } void -WarpX::FinishImplicitField( amrex::Vector, 3 > >& Field_fp, - const amrex::Vector, 3 > >& Field_n, - amrex::Real theta ) +WarpX::FinishImplicitField( ablastr::fields::MultiLevelVectorField const& Field_fp, + ablastr::fields::MultiLevelVectorField const& Field_n, + amrex::Real theta ) { using namespace amrex::literals; @@ -335,15 +346,17 @@ WarpX::ImplicitComputeRHSE (int lev, PatchType patch_type, amrex::Real a_dt, War // a_Erhs_vec storing only the RHS of the update equation. I.e., // c^2*dt*(curl(B^{n+theta} - mu0*J^{n+1/2}) if (patch_type == PatchType::fine) { - m_fdtd_solver_fp[lev]->EvolveE( a_Erhs_vec.getArrayVec()[lev], Bfield_fp[lev], - current_fp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], - F_fp[lev], lev, a_dt ); + m_fdtd_solver_fp[lev]->EvolveE( m_fields, + lev, + patch_type, + a_Erhs_vec.getArrayVec()[lev], + a_dt ); } else { - m_fdtd_solver_cp[lev]->EvolveE( a_Erhs_vec.getArrayVec()[lev], Bfield_cp[lev], - current_cp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], - F_cp[lev], lev, a_dt ); + m_fdtd_solver_cp[lev]->EvolveE( m_fields, + lev, + patch_type, + a_Erhs_vec.getArrayVec()[lev], + a_dt ); } // Compute Efield_rhs in PML cells by calling EvolveEPML diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H index f884f5fa623..29c808b48cd 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H +++ b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H @@ -8,10 +8,11 @@ #define WarpXSolverVec_H_ #include "Utils/TextMsg.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include #include +#include #include #include @@ -59,7 +60,7 @@ public: WarpXSolverVec(const WarpXSolverVec&) = delete; - ~WarpXSolverVec() = default; + ~WarpXSolverVec(); using value_type = amrex::Real; using RT = value_type; @@ -67,16 +68,16 @@ public: [[nodiscard]] inline bool IsDefined () const { return m_is_defined; } void Define ( WarpX* a_WarpX, - warpx::fields::FieldType a_array_type, - warpx::fields::FieldType a_scalar_type = warpx::fields::FieldType::None ); + const std::string& a_vector_type_name, + const std::string& a_scalar_type_name = "none" ); inline void Define ( const WarpXSolverVec& a_solver_vec ) { assertIsDefined( a_solver_vec ); Define( WarpXSolverVec::m_WarpX, - a_solver_vec.getArrayVecType(), - a_solver_vec.getScalarVecType() ); + a_solver_vec.getVectorType(), + a_solver_vec.getScalarType() ); } [[nodiscard]] RT dotProduct( const WarpXSolverVec& a_X ) const; @@ -94,13 +95,13 @@ public: for (int lev = 0; lev < m_num_amr_levels; ++lev) { if (m_array_type != warpx::fields::FieldType::None) { for (int n = 0; n < 3; ++n) { - const std::unique_ptr& this_field = a_solver_vec.getArrayVec()[lev][n]; + const amrex::MultiFab* this_field = a_solver_vec.getArrayVec()[lev][n]; amrex::MultiFab::Copy( *m_array_vec[lev][n], *this_field, 0, 0, m_ncomp, amrex::IntVect::TheZeroVector() ); } } if (m_scalar_type != warpx::fields::FieldType::None) { - const std::unique_ptr& this_scalar = a_solver_vec.getScalarVec()[lev]; + const amrex::MultiFab* this_scalar = a_solver_vec.getScalarVec()[lev]; amrex::MultiFab::Copy( *m_scalar_vec[lev], *this_scalar, 0, 0, m_ncomp, amrex::IntVect::TheZeroVector() ); } @@ -270,26 +271,34 @@ public: return std::sqrt(norm); } - [[nodiscard]] const amrex::Vector,3>>& getArrayVec() const {return m_array_vec;} - amrex::Vector,3>>& getArrayVec() {return m_array_vec;} + [[nodiscard]] const ablastr::fields::MultiLevelVectorField& getArrayVec() const {return m_array_vec;} + ablastr::fields::MultiLevelVectorField& getArrayVec() {return m_array_vec;} - [[nodiscard]] const amrex::Vector>& getScalarVec() const {return m_scalar_vec;} - amrex::Vector>& getScalarVec() {return m_scalar_vec;} + [[nodiscard]] const ablastr::fields::MultiLevelScalarField& getScalarVec() const {return m_scalar_vec;} + ablastr::fields::MultiLevelScalarField& getScalarVec() {return m_scalar_vec;} // solver vector types are type warpx::fields::FieldType [[nodiscard]] warpx::fields::FieldType getArrayVecType () const { return m_array_type; } [[nodiscard]] warpx::fields::FieldType getScalarVecType () const { return m_scalar_type; } + // solver vector type names + [[nodiscard]] std::string getVectorType () const { return m_vector_type_name; } + [[nodiscard]] std::string getScalarType () const { return m_scalar_type_name; } + + private: bool m_is_defined = false; - amrex::Vector,3>> m_array_vec; - amrex::Vector> m_scalar_vec; + ablastr::fields::MultiLevelVectorField m_array_vec; + ablastr::fields::MultiLevelScalarField m_scalar_vec; warpx::fields::FieldType m_array_type = warpx::fields::FieldType::None; warpx::fields::FieldType m_scalar_type = warpx::fields::FieldType::None; + std::string m_vector_type_name = "none"; + std::string m_scalar_type_name = "none"; + static constexpr int m_ncomp = 1; static constexpr int m_num_amr_levels = 1; diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp index f2a88d82d42..6a0e6bb8a91 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp @@ -7,11 +7,22 @@ #include "FieldSolver/ImplicitSolvers/WarpXSolverVec.H" #include "WarpX.H" -using namespace warpx::fields; +using warpx::fields::FieldType; -void WarpXSolverVec::Define ( WarpX* a_WarpX, - FieldType a_array_type, - FieldType a_scalar_type ) +WarpXSolverVec::~WarpXSolverVec () +{ + for (auto & lvl : m_array_vec) + { + for (int i =0; i<3; ++i) + { + delete lvl[i]; + } + } +} + +void WarpXSolverVec::Define ( WarpX* a_WarpX, + const std::string& a_vector_type_name, + const std::string& a_scalar_type_name ) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( !IsDefined(), @@ -23,8 +34,33 @@ void WarpXSolverVec::Define ( WarpX* a_WarpX, m_warpx_ptr_defined = true; } - m_array_type = a_array_type; - m_scalar_type = a_scalar_type; + m_vector_type_name = a_vector_type_name; + m_scalar_type_name = a_scalar_type_name; + + if (m_vector_type_name=="Efield_fp") { + m_array_type = FieldType::Efield_fp; + } + else if (m_vector_type_name=="Bfield_fp") { + m_array_type = FieldType::Bfield_fp; + } + else if (m_vector_type_name=="vector_potential_fp_nodal") { + m_array_type = FieldType::vector_potential_fp; + } + else if (m_vector_type_name!="none") { + WARPX_ABORT_WITH_MESSAGE(a_vector_type_name + +"is not a valid option for array type used in Definining" + +"a WarpXSolverVec. Valid array types are: Efield_fp, Bfield_fp," + +"and vector_potential_fp_nodal"); + } + + if (m_scalar_type_name=="phi_fp") { + m_scalar_type = FieldType::phi_fp; + } + else if (m_scalar_type_name!="none") { + WARPX_ABORT_WITH_MESSAGE(a_scalar_type_name + +"is not a valid option for scalar type used in Definining" + +"a WarpXSolverVec. Valid scalar types are: phi_fp"); + } m_array_vec.resize(m_num_amr_levels); m_scalar_vec.resize(m_num_amr_levels); @@ -37,13 +73,12 @@ void WarpXSolverVec::Define ( WarpX* a_WarpX, "WarpXSolverVec::Define() called with array_type not an array field"); for (int lev = 0; lev < m_num_amr_levels; ++lev) { - using arr_mf_type = std::array; - const arr_mf_type this_array = m_WarpX->getFieldPointerArray(m_array_type, lev); + const ablastr::fields::VectorField this_array = m_WarpX->m_fields.get_alldirs(m_vector_type_name, lev); for (int n = 0; n < 3; n++) { - m_array_vec[lev][n] = std::make_unique( this_array[n]->boxArray(), - this_array[n]->DistributionMap(), - this_array[n]->nComp(), - amrex::IntVect::TheZeroVector() ); + m_array_vec[lev][n] = new amrex::MultiFab( this_array[n]->boxArray(), + this_array[n]->DistributionMap(), + this_array[n]->nComp(), + amrex::IntVect::TheZeroVector() ); } } @@ -57,11 +92,11 @@ void WarpXSolverVec::Define ( WarpX* a_WarpX, "WarpXSolverVec::Define() called with scalar_type not a scalar field "); for (int lev = 0; lev < m_num_amr_levels; ++lev) { - const amrex::MultiFab* this_mf = m_WarpX->getFieldPointer(m_scalar_type,lev,0); - m_scalar_vec[lev] = std::make_unique( this_mf->boxArray(), - this_mf->DistributionMap(), - this_mf->nComp(), - amrex::IntVect::TheZeroVector() ); + const amrex::MultiFab* this_mf = m_WarpX->m_fields.get(m_scalar_type_name,lev); + m_scalar_vec[lev] = new amrex::MultiFab( this_mf->boxArray(), + this_mf->DistributionMap(), + this_mf->nComp(), + amrex::IntVect::TheZeroVector() ); } } @@ -87,16 +122,15 @@ void WarpXSolverVec::Copy ( FieldType a_array_type, for (int lev = 0; lev < m_num_amr_levels; ++lev) { if (m_array_type != FieldType::None) { - using arr_mf_type = std::array; - const arr_mf_type this_array = m_WarpX->getFieldPointerArray(m_array_type, lev); + const ablastr::fields::VectorField this_array = m_WarpX->m_fields.get_alldirs(m_vector_type_name, lev); for (int n = 0; n < 3; ++n) { amrex::MultiFab::Copy( *m_array_vec[lev][n], *this_array[n], 0, 0, m_ncomp, amrex::IntVect::TheZeroVector() ); } } if (m_scalar_type != FieldType::None) { - const amrex::MultiFab* this_scalar = m_WarpX->getFieldPointer(m_scalar_type,lev,0); - amrex::MultiFab::Copy( *m_scalar_vec[lev], *this_scalar, 0, 0, m_ncomp, + const amrex::MultiFab* this_mf = m_WarpX->m_fields.get(m_scalar_type_name,lev); + amrex::MultiFab::Copy( *m_scalar_vec[lev], *this_mf, 0, 0, m_ncomp, amrex::IntVect::TheZeroVector() ); } } diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H index a8bbc954e29..c07551c165c 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H @@ -7,6 +7,8 @@ #ifndef WARPX_MAGNETOSTATICSOLVER_H_ #define WARPX_MAGNETOSTATICSOLVER_H_ +#include + #include #include #include @@ -34,23 +36,23 @@ namespace MagnetostaticSolver { */ class EBCalcBfromVectorPotentialPerLevel { private: - const amrex::Vector, 3>>& m_b_field; - const amrex::Vector, 3>>& m_grad_buf_e_stag; - const amrex::Vector, 3>>& m_grad_buf_b_stag; + ablastr::fields::MultiLevelVectorField m_b_field; + ablastr::fields::MultiLevelVectorField m_grad_buf_e_stag; + ablastr::fields::MultiLevelVectorField m_grad_buf_b_stag; public: - EBCalcBfromVectorPotentialPerLevel(const amrex::Vector, 3>>& b_field, - const amrex::Vector, 3>>& grad_buf_e_stag, - const amrex::Vector, 3>>& grad_buf_b_stag) + EBCalcBfromVectorPotentialPerLevel (ablastr::fields::MultiLevelVectorField const & b_field, + ablastr::fields::MultiLevelVectorField const & grad_buf_e_stag, + ablastr::fields::MultiLevelVectorField const & grad_buf_b_stag) : m_b_field(b_field), m_grad_buf_e_stag(grad_buf_e_stag), m_grad_buf_b_stag(grad_buf_b_stag) {} - void operator()(amrex::Array,3> & mlmg, int lev); + void operator() (amrex::Array, 3> & mlmg, int lev); // Function to perform interpolation from cell edges to cell faces - void doInterp(const std::unique_ptr &src, const std::unique_ptr &dst); + void doInterp (amrex::MultiFab & src, amrex::MultiFab & dst); }; } // namespace MagnetostaticSolver diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index 031bc915afc..5c28ff1f3c7 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -6,6 +6,7 @@ */ #include "WarpX.H" +#include "Fields.H" #include "FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H" #include "EmbeddedBoundary/Enabled.H" #include "Parallelization/GuardCellManager.H" @@ -19,6 +20,7 @@ #include "Utils/WarpXProfilerWrapper.H" #include "Parallelization/WarpXComm_K.H" +#include #include #include #include @@ -71,6 +73,9 @@ WarpX::ComputeMagnetostaticField() void WarpX::AddMagnetostaticFieldLabFrame() { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + WARPX_PROFILE("WarpX::AddMagnetostaticFieldLabFrame"); // Store the boundary conditions for the field solver if they haven't been @@ -87,7 +92,7 @@ WarpX::AddMagnetostaticFieldLabFrame() // reset current_fp before depositing current density for this step for (int lev = 0; lev <= max_level; lev++) { for (int dim=0; dim < 3; dim++) { - current_fp[lev][dim]->setVal(0.); + m_fields.get(FieldType::current_fp, Direction{dim}, lev)->setVal(0.); } } @@ -95,22 +100,26 @@ WarpX::AddMagnetostaticFieldLabFrame() for (int ispecies=0; ispeciesnSpecies(); ispecies++){ WarpXParticleContainer& species = mypc->GetParticleContainer(ispecies); if (!species.do_not_deposit) { - species.DepositCurrent(current_fp, dt[0], 0.); + species.DepositCurrent( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + dt[0], 0.); } } #ifdef WARPX_DIM_RZ for (int lev = 0; lev <= max_level; lev++) { - ApplyInverseVolumeScalingToCurrentDensity(current_fp[lev][0].get(), - current_fp[lev][1].get(), - current_fp[lev][2].get(), lev); + ApplyInverseVolumeScalingToCurrentDensity( + m_fields.get(FieldType::current_fp, Direction{0}, lev), + m_fields.get(FieldType::current_fp, Direction{1}, lev), + m_fields.get(FieldType::current_fp, Direction{2}, lev), + lev ); } #endif - SyncCurrent(current_fp, current_cp, current_buf); // Apply filter, perform MPI exchange, interpolate across levels + SyncCurrent("current_fp"); // set the boundary and current density potentials - setVectorPotentialBC(vector_potential_fp_nodal); + setVectorPotentialBC(m_fields.get_mr_levels_alldirs(FieldType::vector_potential_fp_nodal, finest_level)); // Compute the vector potential A, by solving the Poisson equation WARPX_ALWAYS_ASSERT_WITH_MESSAGE( !IsPythonCallbackInstalled("poissonsolver"), @@ -123,9 +132,11 @@ WarpX::AddMagnetostaticFieldLabFrame() const int self_fields_max_iters = 200; const int self_fields_verbosity = 2; - computeVectorPotential( current_fp, vector_potential_fp_nodal, self_fields_required_precision, - magnetostatic_absolute_tolerance, self_fields_max_iters, - self_fields_verbosity ); + computeVectorPotential( + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::vector_potential_fp_nodal, finest_level), + self_fields_required_precision, magnetostatic_absolute_tolerance, self_fields_max_iters, + self_fields_verbosity); } /* Compute the vector potential `A` by solving the Poisson equation with `J` as @@ -145,37 +156,43 @@ WarpX::AddMagnetostaticFieldLabFrame() \param[in] verbosity The verbosity setting for the MLMG solver */ void -WarpX::computeVectorPotential (const amrex::Vector,3> >& curr, - amrex::Vector,3> >& A, - Real const required_precision, - Real absolute_tolerance, - int const max_iters, - int const verbosity) const +WarpX::computeVectorPotential (ablastr::fields::MultiLevelVectorField const& curr, + ablastr::fields::MultiLevelVectorField const& A, + Real const required_precision, + Real absolute_tolerance, + int const max_iters, + int const verbosity) // const // This breaks non-const m_fields.get_mr_levels_alldirs { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + // create a vector to our fields, sorted by level amrex::Vector> sorted_curr; amrex::Vector> sorted_A; for (int lev = 0; lev <= finest_level; ++lev) { - sorted_curr.emplace_back(amrex::Array ({curr[lev][0].get(), - curr[lev][1].get(), - curr[lev][2].get()})); - sorted_A.emplace_back(amrex::Array ({A[lev][0].get(), - A[lev][1].get(), - A[lev][2].get()})); + sorted_curr.emplace_back(amrex::Array ({curr[lev][0], + curr[lev][1], + curr[lev][2]})); + sorted_A.emplace_back(amrex::Array ({A[lev][0], + A[lev][1], + A[lev][2]})); } -#if defined(AMREX_USE_EB) - const std::optional post_A_calculation({Bfield_fp, - vector_potential_grad_buf_e_stag, - vector_potential_grad_buf_b_stag}); + const ablastr::fields::MultiLevelVectorField Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); + const std::optional post_A_calculation( + { + Bfield_fp, + m_fields.get_mr_levels_alldirs(FieldType::vector_potential_grad_buf_e_stag, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::vector_potential_grad_buf_b_stag, finest_level) + }); +#if defined(AMREX_USE_EB) amrex::Vector factories; for (int lev = 0; lev <= finest_level; ++lev) { factories.push_back(&WarpX::fieldEBFactory(lev)); } const std::optional > eb_farray_box_factory({factories}); #else - const std::optional post_A_calculation; const std::optional > eb_farray_box_factory; #endif @@ -210,8 +227,10 @@ WarpX::computeVectorPotential (const amrex::Vector,3>>& A ) const +WarpX::setVectorPotentialBC (ablastr::fields::MultiLevelVectorField const& A) const { + using ablastr::fields::Direction; + // check if any dimension has non-periodic boundary conditions if (!m_vector_poisson_boundary_handler.has_non_periodic) { return; } @@ -226,11 +245,11 @@ WarpX::setVectorPotentialBC ( amrex::Vectorarray(mfi); + auto A_arr = A[lev][Direction{adim}]->array(mfi); // Extract tileboxes for which to loop - const Box& tb = mfi.tilebox( A[lev][adim]->ixType().toIntVect()); + const Box& tb = mfi.tilebox( A[lev][Direction{adim}]->ixType().toIntVect()); // loop over dimensions for (int idim=0; idim &src, - const std::unique_ptr &dst) +void MagnetostaticSolver::EBCalcBfromVectorPotentialPerLevel::doInterp (amrex::MultiFab & src, + amrex::MultiFab & dst) { WarpX &warpx = WarpX::GetInstance(); @@ -373,20 +392,20 @@ void MagnetostaticSolver::EBCalcBfromVectorPotentialPerLevel::doInterp(const std amrex::Real const * stencil_coeffs_z = warpx.device_field_centering_stencil_coeffs_z.data(); // Synchronize the ghost cells, do halo exchange - ablastr::utils::communication::FillBoundary(*src, - src->nGrowVect(), + ablastr::utils::communication::FillBoundary(src, + src.nGrowVect(), WarpX::do_single_precision_comms); #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*dst, TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(dst, TilingIfNotGPU()); mfi.isValid(); ++mfi) { - IntVect const src_stag = src->ixType().toIntVect(); - IntVect const dst_stag = dst->ixType().toIntVect(); + IntVect const src_stag = src.ixType().toIntVect(); + IntVect const dst_stag = dst.ixType().toIntVect(); - Array4 const& src_arr = src->const_array(mfi); - Array4 const& dst_arr = dst->array(mfi); + Array4 const& src_arr = src.const_array(mfi); + Array4 const& dst_arr = dst.array(mfi); const Box bx = mfi.tilebox(); @@ -408,12 +427,12 @@ void MagnetostaticSolver::EBCalcBfromVectorPotentialPerLevel::operator()(amrex:: const amrex::Array buf_ptr = { #if defined(WARPX_DIM_3D) - m_grad_buf_e_stag[lev][0].get(), - m_grad_buf_e_stag[lev][1].get(), - m_grad_buf_e_stag[lev][2].get() + m_grad_buf_e_stag[lev][0], + m_grad_buf_e_stag[lev][1], + m_grad_buf_e_stag[lev][2] #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - m_grad_buf_e_stag[lev][0].get(), - m_grad_buf_e_stag[lev][2].get() + m_grad_buf_e_stag[lev][0], + m_grad_buf_e_stag[lev][2] #endif }; @@ -421,13 +440,13 @@ void MagnetostaticSolver::EBCalcBfromVectorPotentialPerLevel::operator()(amrex:: mlmg[0]->getGradSolution({buf_ptr}); // Interpolate dAx/dz to By grid buffer, then add to By - this->doInterp(m_grad_buf_e_stag[lev][2], - m_grad_buf_b_stag[lev][1]); + this->doInterp(*m_grad_buf_e_stag[lev][2], + *m_grad_buf_b_stag[lev][1]); MultiFab::Add(*(m_b_field[lev][1]), *(m_grad_buf_b_stag[lev][1]), 0, 0, 1, 0 ); // Interpolate dAx/dy to Bz grid buffer, then subtract from Bz - this->doInterp(m_grad_buf_e_stag[lev][1], - m_grad_buf_b_stag[lev][2]); + this->doInterp(*m_grad_buf_e_stag[lev][1], + *m_grad_buf_b_stag[lev][2]); m_grad_buf_b_stag[lev][2]->mult(-1._rt); MultiFab::Add(*(m_b_field[lev][2]), *(m_grad_buf_b_stag[lev][2]), 0, 0, 1, 0 ); @@ -435,13 +454,13 @@ void MagnetostaticSolver::EBCalcBfromVectorPotentialPerLevel::operator()(amrex:: mlmg[1]->getGradSolution({buf_ptr}); // Interpolate dAy/dx to Bz grid buffer, then add to Bz - this->doInterp(m_grad_buf_e_stag[lev][0], - m_grad_buf_b_stag[lev][2]); + this->doInterp(*m_grad_buf_e_stag[lev][0], + *m_grad_buf_b_stag[lev][2]); MultiFab::Add(*(m_b_field[lev][2]), *(m_grad_buf_b_stag[lev][2]), 0, 0, 1, 0 ); // Interpolate dAy/dz to Bx grid buffer, then subtract from Bx - this->doInterp(m_grad_buf_e_stag[lev][2], - m_grad_buf_b_stag[lev][0]); + this->doInterp(*m_grad_buf_e_stag[lev][2], + *m_grad_buf_b_stag[lev][0]); m_grad_buf_b_stag[lev][0]->mult(-1._rt); MultiFab::Add(*(m_b_field[lev][0]), *(m_grad_buf_b_stag[lev][0]), 0, 0, 1, 0 ); @@ -449,13 +468,13 @@ void MagnetostaticSolver::EBCalcBfromVectorPotentialPerLevel::operator()(amrex:: mlmg[2]->getGradSolution({buf_ptr}); // Interpolate dAz/dy to Bx grid buffer, then add to Bx - this->doInterp(m_grad_buf_e_stag[lev][1], - m_grad_buf_b_stag[lev][0]); + this->doInterp(*m_grad_buf_e_stag[lev][1], + *m_grad_buf_b_stag[lev][0]); MultiFab::Add(*(m_b_field[lev][0]), *(m_grad_buf_b_stag[lev][0]), 0, 0, 1, 0 ); // Interpolate dAz/dx to By grid buffer, then subtract from By - this->doInterp(m_grad_buf_e_stag[lev][0], - m_grad_buf_b_stag[lev][1]); + this->doInterp(*m_grad_buf_e_stag[lev][0], + *m_grad_buf_b_stag[lev][1]); m_grad_buf_b_stag[lev][1]->mult(-1._rt); MultiFab::Add(*(m_b_field[lev][1]), *(m_grad_buf_b_stag[lev][1]), 0, 0, 1, 0 ); } diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H index c72e7db250d..462bce23c23 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.H @@ -13,6 +13,7 @@ #include "FieldSolver/SpectralSolver/SpectralFieldData_fwd.H" #include "FieldSolver/SpectralSolver/SpectralFieldData.H" +#include #include #include @@ -74,7 +75,7 @@ class SpectralBaseAlgorithm */ void ComputeSpectralDivE ( int lev, SpectralFieldData& field_data, - const std::array,3>& Efield, + ablastr::fields::VectorField const& Efield, amrex::MultiFab& divE ); protected: // Meant to be used in the subclasses diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp index b3f18dd6912..069b724f96c 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithm.cpp @@ -9,6 +9,8 @@ #include "FieldSolver/SpectralSolver/SpectralFieldData.H" #include "Utils/WarpX_Complex.H" +#include + #include #include #include @@ -58,8 +60,9 @@ void SpectralBaseAlgorithm::ComputeSpectralDivE ( const int lev, SpectralFieldData& field_data, - const std::array,3>& Efield, - amrex::MultiFab& divE ) + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divE +) { const SpectralFieldIndex& Idx = m_spectral_index; diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.H b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.H index 8e03a2a2559..9f6b5b09219 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.H +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.H @@ -10,6 +10,7 @@ #include "FieldSolver/SpectralSolver/SpectralKSpaceRZ.H" #include "FieldSolver/SpectralSolver/SpectralFieldDataRZ.H" +#include #include @@ -66,7 +67,7 @@ class SpectralBaseAlgorithmRZ */ void ComputeSpectralDivE ( int lev, SpectralFieldDataRZ& field_data, - const std::array,3>& Efield, + ablastr::fields::VectorField const & Efield, amrex::MultiFab& divE ); /** diff --git a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.cpp index f8ef0ef4730..3e556363a6f 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralAlgorithms/SpectralBaseAlgorithmRZ.cpp @@ -6,6 +6,8 @@ */ #include "SpectralBaseAlgorithmRZ.H" +#include + #include using namespace amrex; @@ -18,7 +20,7 @@ void SpectralBaseAlgorithmRZ::ComputeSpectralDivE ( const int lev, SpectralFieldDataRZ& field_data, - const std::array,3>& Efield, + ablastr::fields::VectorField const & Efield, amrex::MultiFab& divE ) { using amrex::operator""_rt; diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.H b/Source/FieldSolver/SpectralSolver/SpectralSolver.H index 1aa1e540711..bcd80e421a8 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.H @@ -11,9 +11,9 @@ #include "SpectralAlgorithms/SpectralBaseAlgorithm.H" #include "SpectralFieldData.H" - #include "Utils/WarpXAlgorithmSelection.H" +#include #include #include @@ -127,9 +127,12 @@ class SpectralSolver * \brief Public interface to call the member function ComputeSpectralDivE * of the base class SpectralBaseAlgorithm from objects of class SpectralSolver */ - void ComputeSpectralDivE ( int lev, - const std::array,3>& Efield, - amrex::MultiFab& divE ) { + void ComputeSpectralDivE ( + int lev, + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divE + ) + { algorithm->ComputeSpectralDivE( lev, field_data, Efield, divE ); } diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H index 004255e4d72..61cf64036eb 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H @@ -12,6 +12,7 @@ #include "SpectralAlgorithms/SpectralBaseAlgorithmRZ.H" #include "SpectralFieldDataRZ.H" +#include #include @@ -95,7 +96,8 @@ class SpectralSolverRZ * \brief Public interface to call the member function ComputeSpectralDivE * of the base class SpectralBaseAlgorithmRZ from objects of class SpectralSolverRZ */ - void ComputeSpectralDivE (int lev, const std::array,3>& Efield, + void ComputeSpectralDivE (int lev, + ablastr::fields::VectorField const & Efield, amrex::MultiFab& divE); /** diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp index 7eb3f2c3ae6..9a8cff9f1f3 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp @@ -142,8 +142,10 @@ SpectralSolverRZ::pushSpectralFields (const bool doing_pml) { */ void SpectralSolverRZ::ComputeSpectralDivE (const int lev, - const std::array,3>& Efield, - amrex::MultiFab& divE) { + ablastr::fields::VectorField const & Efield, + amrex::MultiFab& divE +) +{ algorithm->ComputeSpectralDivE(lev, field_data, Efield, divE); } diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index 602a2666b27..fd786dc65ba 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -10,6 +10,7 @@ #include "BoundaryConditions/PML.H" #include "Evolve/WarpXDtType.H" +#include "Fields.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" #if defined(WARPX_USE_FFT) # include "FieldSolver/SpectralSolver/SpectralFieldData.H" @@ -53,6 +54,7 @@ #include using namespace amrex; +using warpx::fields::FieldType; #ifdef WARPX_USE_FFT namespace { @@ -64,7 +66,7 @@ namespace { #else SpectralSolver& solver, #endif - const std::array,3>& vector_field, + const ablastr::fields::VectorField& vector_field, const int compx, const int compy, const int compz) { #ifdef WARPX_DIM_RZ @@ -84,7 +86,7 @@ namespace { #else SpectralSolver& solver, #endif - const std::array,3>& vector_field, + const ablastr::fields::VectorField& vector_field, const int compx, const int compy, const int compz, const amrex::IntVect& fill_guards) { @@ -100,63 +102,93 @@ namespace { } } -void WarpX::PSATDForwardTransformEB ( - const amrex::Vector,3>>& E_fp, - const amrex::Vector,3>>& B_fp, - const amrex::Vector,3>>& E_cp, - const amrex::Vector,3>>& B_cp) +void WarpX::PSATDForwardTransformEB () { const SpectralFieldIndex& Idx = spectral_solver_fp[0]->m_spectral_index; + const std::string Efield_fp_string = "Efield_fp"; + const std::string Efield_cp_string = "Efield_cp"; + const std::string Bfield_fp_string = "Bfield_fp"; + const std::string Bfield_cp_string = "Bfield_cp"; + for (int lev = 0; lev <= finest_level; ++lev) { - ForwardTransformVect(lev, *spectral_solver_fp[lev], E_fp[lev], Idx.Ex, Idx.Ey, Idx.Ez); - ForwardTransformVect(lev, *spectral_solver_fp[lev], B_fp[lev], Idx.Bx, Idx.By, Idx.Bz); + if (m_fields.has_vector(Efield_fp_string, lev)) { + ablastr::fields::VectorField const E_fp = m_fields.get_alldirs(Efield_fp_string, lev); + ForwardTransformVect(lev, *spectral_solver_fp[lev], E_fp, Idx.Ex, Idx.Ey, Idx.Ez); + } + if (m_fields.has_vector(Bfield_fp_string, lev)) { + ablastr::fields::VectorField const B_fp = m_fields.get_alldirs(Bfield_fp_string, lev); + ForwardTransformVect(lev, *spectral_solver_fp[lev], B_fp, Idx.Bx, Idx.By, Idx.Bz); + } if (spectral_solver_cp[lev]) { - ForwardTransformVect(lev, *spectral_solver_cp[lev], E_cp[lev], Idx.Ex, Idx.Ey, Idx.Ez); - ForwardTransformVect(lev, *spectral_solver_cp[lev], B_cp[lev], Idx.Bx, Idx.By, Idx.Bz); + if (m_fields.has_vector(Efield_cp_string, lev)) { + ablastr::fields::VectorField const E_cp = m_fields.get_alldirs(Efield_cp_string, lev); + ForwardTransformVect(lev, *spectral_solver_cp[lev], E_cp, Idx.Ex, Idx.Ey, Idx.Ez); + } + if (m_fields.has_vector(Bfield_cp_string, lev)) { + ablastr::fields::VectorField const B_cp = m_fields.get_alldirs(Bfield_cp_string, lev); + ForwardTransformVect(lev, *spectral_solver_cp[lev], B_cp, Idx.Bx, Idx.By, Idx.Bz); + } } } } -void WarpX::PSATDBackwardTransformEB ( - const amrex::Vector,3>>& E_fp, - const amrex::Vector,3>>& B_fp, - const amrex::Vector,3>>& E_cp, - const amrex::Vector,3>>& B_cp) +void WarpX::PSATDBackwardTransformEB () { const SpectralFieldIndex& Idx = spectral_solver_fp[0]->m_spectral_index; + const std::string Efield_fp_string = "Efield_fp"; + const std::string Efield_cp_string = "Efield_cp"; + const std::string Bfield_fp_string = "Bfield_fp"; + const std::string Bfield_cp_string = "Bfield_cp"; + for (int lev = 0; lev <= finest_level; ++lev) { - BackwardTransformVect(lev, *spectral_solver_fp[lev], E_fp[lev], - Idx.Ex, Idx.Ey, Idx.Ez, m_fill_guards_fields); - BackwardTransformVect(lev, *spectral_solver_fp[lev], B_fp[lev], - Idx.Bx, Idx.By, Idx.Bz, m_fill_guards_fields); + if (m_fields.has_vector(Efield_fp_string, lev)) { + ablastr::fields::VectorField const E_fp = m_fields.get_alldirs(Efield_fp_string, lev); + BackwardTransformVect(lev, *spectral_solver_fp[lev], E_fp, + Idx.Ex, Idx.Ey, Idx.Ez, m_fill_guards_fields); + } + if (m_fields.has_vector(Bfield_fp_string, lev)) { + ablastr::fields::VectorField const B_fp = m_fields.get_alldirs(Bfield_fp_string, lev); + BackwardTransformVect(lev, *spectral_solver_fp[lev], B_fp, + Idx.Bx, Idx.By, Idx.Bz, m_fill_guards_fields); + } if (spectral_solver_cp[lev]) { - BackwardTransformVect(lev, *spectral_solver_cp[lev], E_cp[lev], - Idx.Ex, Idx.Ey, Idx.Ez, m_fill_guards_fields); - BackwardTransformVect(lev, *spectral_solver_cp[lev], B_cp[lev], - Idx.Bx, Idx.By, Idx.Bz, m_fill_guards_fields); + if (m_fields.has_vector(Efield_cp_string, lev)) { + ablastr::fields::VectorField const E_cp = m_fields.get_alldirs(Efield_cp_string, lev); + BackwardTransformVect(lev, *spectral_solver_cp[lev], E_cp, + Idx.Ex, Idx.Ey, Idx.Ez, m_fill_guards_fields); + } + if (m_fields.has_vector(Bfield_cp_string, lev)) { + ablastr::fields::VectorField const B_cp = m_fields.get_alldirs(Bfield_cp_string, lev); + BackwardTransformVect(lev, *spectral_solver_cp[lev], B_cp, + Idx.Bx, Idx.By, Idx.Bz, m_fill_guards_fields); + } } } // Damp the fields in the guard cells for (int lev = 0; lev <= finest_level; ++lev) { - DampFieldsInGuards(lev, E_fp[lev], B_fp[lev]); + if (m_fields.has_vector(Efield_fp_string, lev) && m_fields.has_vector(Bfield_fp_string, lev)) { + ablastr::fields::VectorField const E_fp = m_fields.get_alldirs(Efield_fp_string, lev); + ablastr::fields::VectorField const B_fp = m_fields.get_alldirs(Bfield_fp_string, lev); + DampFieldsInGuards(lev, E_fp, B_fp); + } } } void WarpX::PSATDBackwardTransformEBavg ( - const amrex::Vector,3>>& E_avg_fp, - const amrex::Vector,3>>& B_avg_fp, - const amrex::Vector,3>>& E_avg_cp, - const amrex::Vector,3>>& B_avg_cp) + ablastr::fields::MultiLevelVectorField const& E_avg_fp, + ablastr::fields::MultiLevelVectorField const& B_avg_fp, + ablastr::fields::MultiLevelVectorField const& E_avg_cp, + ablastr::fields::MultiLevelVectorField const& B_avg_cp) { const SpectralFieldIndex& Idx = spectral_solver_fp[0]->m_spectral_index; @@ -184,11 +216,15 @@ WarpX::PSATDForwardTransformF () for (int lev = 0; lev <= finest_level; ++lev) { - if (F_fp[lev]) { spectral_solver_fp[lev]->ForwardTransform(lev, *F_fp[lev], Idx.F); } + if (m_fields.has(FieldType::F_fp, lev)) { + spectral_solver_fp[lev]->ForwardTransform(lev, *m_fields.get(FieldType::F_fp, lev), Idx.F); + } if (spectral_solver_cp[lev]) { - if (F_cp[lev]) { spectral_solver_cp[lev]->ForwardTransform(lev, *F_cp[lev], Idx.F); } + if (m_fields.has(FieldType::F_cp, lev)) { + spectral_solver_cp[lev]->ForwardTransform(lev, *m_fields.get(FieldType::F_cp, lev), Idx.F); + } } } } @@ -201,17 +237,17 @@ WarpX::PSATDBackwardTransformF () for (int lev = 0; lev <= finest_level; ++lev) { #ifdef WARPX_DIM_RZ - if (F_fp[lev]) { spectral_solver_fp[lev]->BackwardTransform(lev, *F_fp[lev], Idx.F); } + if (m_fields.has(FieldType::F_fp, lev)) { spectral_solver_fp[lev]->BackwardTransform(lev, *m_fields.get(FieldType::F_fp, lev), Idx.F); } #else - if (F_fp[lev]) { spectral_solver_fp[lev]->BackwardTransform(lev, *F_fp[lev], Idx.F, m_fill_guards_fields); } + if (m_fields.has(FieldType::F_fp, lev)) { spectral_solver_fp[lev]->BackwardTransform(lev, *m_fields.get(FieldType::F_fp, lev), Idx.F, m_fill_guards_fields); } #endif if (spectral_solver_cp[lev]) { #ifdef WARPX_DIM_RZ - if (F_cp[lev]) { spectral_solver_cp[lev]->BackwardTransform(lev, *F_cp[lev], Idx.F); } + if (m_fields.has(FieldType::F_cp, lev)) { spectral_solver_cp[lev]->BackwardTransform(lev, *m_fields.get(FieldType::F_cp, lev), Idx.F); } #else - if (F_cp[lev]) { spectral_solver_cp[lev]->BackwardTransform(lev, *F_cp[lev], Idx.F, m_fill_guards_fields); } + if (m_fields.has(FieldType::F_cp, lev)) { spectral_solver_cp[lev]->BackwardTransform(lev, *m_fields.get(FieldType::F_cp, lev), Idx.F, m_fill_guards_fields); } #endif } } @@ -219,7 +255,7 @@ WarpX::PSATDBackwardTransformF () // Damp the field in the guard cells for (int lev = 0; lev <= finest_level; ++lev) { - DampFieldsInGuards(lev, F_fp[lev]); + DampFieldsInGuards(lev, m_fields.get(FieldType::F_fp, lev)); } } @@ -230,11 +266,15 @@ WarpX::PSATDForwardTransformG () for (int lev = 0; lev <= finest_level; ++lev) { - if (G_fp[lev]) { spectral_solver_fp[lev]->ForwardTransform(lev, *G_fp[lev], Idx.G); } + if (m_fields.has(FieldType::G_fp, lev)) { + spectral_solver_fp[lev]->ForwardTransform(lev, *m_fields.get(FieldType::G_fp, lev), Idx.G); + } if (spectral_solver_cp[lev]) { - if (G_cp[lev]) { spectral_solver_cp[lev]->ForwardTransform(lev, *G_cp[lev], Idx.G); } + if (m_fields.has(FieldType::G_cp, lev)) { + spectral_solver_fp[lev]->ForwardTransform(lev, *m_fields.get(FieldType::G_cp, lev), Idx.G); + } } } } @@ -246,34 +286,38 @@ WarpX::PSATDBackwardTransformG () for (int lev = 0; lev <= finest_level; ++lev) { + if (m_fields.has(FieldType::G_fp, lev)) { + MultiFab* G_fp = m_fields.get(FieldType::G_fp, lev); #ifdef WARPX_DIM_RZ - if (G_fp[lev]) { spectral_solver_fp[lev]->BackwardTransform(lev, *G_fp[lev], Idx.G); } + spectral_solver_fp[lev]->BackwardTransform(lev, *G_fp, Idx.G); #else - if (G_fp[lev]) { spectral_solver_fp[lev]->BackwardTransform(lev, *G_fp[lev], Idx.G, m_fill_guards_fields); } + spectral_solver_fp[lev]->BackwardTransform(lev, *G_fp, Idx.G, m_fill_guards_fields); #endif + DampFieldsInGuards(lev, G_fp); + } + if (spectral_solver_cp[lev]) { + if (m_fields.has(FieldType::G_cp, lev)) { + MultiFab* G_cp = m_fields.get(FieldType::G_cp, lev); #ifdef WARPX_DIM_RZ - if (G_cp[lev]) { spectral_solver_cp[lev]->BackwardTransform(lev, *G_cp[lev], Idx.G); } + spectral_solver_fp[lev]->BackwardTransform(lev, *G_cp, Idx.G); #else - if (G_cp[lev]) { spectral_solver_cp[lev]->BackwardTransform(lev, *G_cp[lev], Idx.G, m_fill_guards_fields); } + spectral_solver_fp[lev]->BackwardTransform(lev, *G_cp, Idx.G, m_fill_guards_fields); #endif + } } } - - // Damp the field in the guard cells - for (int lev = 0; lev <= finest_level; ++lev) - { - DampFieldsInGuards(lev, G_fp[lev]); - } } void WarpX::PSATDForwardTransformJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, + std::string const & J_fp_string, + std::string const & J_cp_string, const bool apply_kspace_filter) { + if (!m_fields.has_vector(J_fp_string, 0)) { return; } + SpectralFieldIndex Idx; int idx_jx, idx_jy, idx_jz; @@ -285,7 +329,10 @@ void WarpX::PSATDForwardTransformJ ( idx_jy = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy_mid); idx_jz = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz_mid); - ForwardTransformVect(lev, *spectral_solver_fp[lev], J_fp[lev], idx_jx, idx_jy, idx_jz); + if (m_fields.has_vector(J_fp_string, lev)) { + ablastr::fields::VectorField const J_fp = m_fields.get_alldirs(J_fp_string, lev); + ForwardTransformVect(lev, *spectral_solver_fp[lev], J_fp, idx_jx, idx_jy, idx_jz); + } if (spectral_solver_cp[lev]) { @@ -295,7 +342,10 @@ void WarpX::PSATDForwardTransformJ ( idx_jy = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jy_new) : static_cast(Idx.Jy_mid); idx_jz = (J_in_time == JInTime::Linear) ? static_cast(Idx.Jz_new) : static_cast(Idx.Jz_mid); - ForwardTransformVect(lev, *spectral_solver_cp[lev], J_cp[lev], idx_jx, idx_jy, idx_jz); + if (m_fields.has_vector(J_cp_string, lev)) { + ablastr::fields::VectorField const J_cp = m_fields.get_alldirs(J_cp_string, lev); + ForwardTransformVect(lev, *spectral_solver_cp[lev], J_cp, idx_jx, idx_jy, idx_jz); + } } } @@ -331,9 +381,11 @@ void WarpX::PSATDForwardTransformJ ( } void WarpX::PSATDBackwardTransformJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp) + std::string const & J_fp_string, + std::string const & J_cp_string) { + if (!m_fields.has_vector(J_fp_string, 0)) { return; } + SpectralFieldIndex Idx; int idx_jx, idx_jy, idx_jz; @@ -347,8 +399,11 @@ void WarpX::PSATDBackwardTransformJ ( idx_jy = static_cast(Idx.Jy_mid); idx_jz = static_cast(Idx.Jz_mid); - BackwardTransformVect(lev, *spectral_solver_fp[lev], J_fp[lev], - idx_jx, idx_jy, idx_jz, m_fill_guards_current); + if (m_fields.has_vector(J_fp_string, lev)) { + ablastr::fields::VectorField const J_fp = m_fields.get_alldirs(J_fp_string, lev); + BackwardTransformVect(lev, *spectral_solver_fp[lev], J_fp, + idx_jx, idx_jy, idx_jz, m_fill_guards_current); + } if (spectral_solver_cp[lev]) { @@ -360,26 +415,35 @@ void WarpX::PSATDBackwardTransformJ ( idx_jy = static_cast(Idx.Jy_mid); idx_jz = static_cast(Idx.Jz_mid); - BackwardTransformVect(lev, *spectral_solver_cp[lev], J_cp[lev], - idx_jx, idx_jy, idx_jz, m_fill_guards_current); + if (m_fields.has_vector(J_cp_string, lev)) { + ablastr::fields::VectorField const J_cp = m_fields.get_alldirs(J_cp_string, lev); + BackwardTransformVect(lev, *spectral_solver_cp[lev], J_cp, + idx_jx, idx_jy, idx_jz, m_fill_guards_current); + } } } } void WarpX::PSATDForwardTransformRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, + std::string const & charge_fp_string, + std::string const & charge_cp_string, const int icomp, const int dcomp, const bool apply_kspace_filter) { - if (charge_fp[0] == nullptr) { return; } + if (!m_fields.has(charge_fp_string, 0)) { return; } for (int lev = 0; lev <= finest_level; ++lev) { - if (charge_fp[lev]) { spectral_solver_fp[lev]->ForwardTransform(lev, *charge_fp[lev], dcomp, icomp); } + if (m_fields.has(charge_fp_string, lev)) { + amrex::MultiFab const & charge_fp = *m_fields.get(charge_fp_string, lev); + spectral_solver_fp[lev]->ForwardTransform(lev, charge_fp, dcomp, icomp); + } if (spectral_solver_cp[lev]) { - if (charge_cp[lev]) { spectral_solver_cp[lev]->ForwardTransform(lev, *charge_cp[lev], dcomp, icomp); } + if (m_fields.has(charge_cp_string, lev)) { + amrex::MultiFab const & charge_cp = *m_fields.get(charge_cp_string, lev); + spectral_solver_cp[lev]->ForwardTransform(lev, charge_cp, dcomp, icomp); + } } } @@ -430,6 +494,8 @@ void WarpX::PSATDVayDeposition () void WarpX::PSATDSubtractCurrentPartialSumsAvg () { + using ablastr::fields::Direction; + // Subtraction of cumulative sum for Vay deposition // implemented only in 2D and 3D Cartesian geometry #if !defined (WARPX_DIM_1D_Z) && !defined (WARPX_DIM_RZ) @@ -441,15 +507,15 @@ void WarpX::PSATDSubtractCurrentPartialSumsAvg () { const std::array& dx = WarpX::CellSize(lev); - amrex::MultiFab const& Dx = *current_fp_vay[lev][0]; - amrex::MultiFab const& Dy = *current_fp_vay[lev][1]; - amrex::MultiFab const& Dz = *current_fp_vay[lev][2]; + amrex::MultiFab const& Dx = *m_fields.get(FieldType::current_fp_vay, Direction{0}, lev); + amrex::MultiFab const& Dy = *m_fields.get(FieldType::current_fp_vay, Direction{1}, lev); + amrex::MultiFab const& Dz = *m_fields.get(FieldType::current_fp_vay, Direction{2}, lev); #if defined (WARPX_DIM_XZ) amrex::ignore_unused(Dy); #endif - amrex::MultiFab& Jx = *current_fp[lev][0]; + amrex::MultiFab& Jx = *m_fields.get(FieldType::current_fp, Direction{0}, lev); #ifdef AMREX_USE_OMP @@ -480,7 +546,7 @@ void WarpX::PSATDSubtractCurrentPartialSumsAvg () #if defined (WARPX_DIM_3D) // Subtract average of cumulative sum from Jy - amrex::MultiFab& Jy = *current_fp[lev][1]; + amrex::MultiFab& Jy = *m_fields.get(FieldType::current_fp, Direction{1}, lev);; for (amrex::MFIter mfi(Jy); mfi.isValid(); ++mfi) { const amrex::Box& bx = mfi.fabbox(); @@ -505,7 +571,7 @@ void WarpX::PSATDSubtractCurrentPartialSumsAvg () #endif // Subtract average of cumulative sum from Jz - amrex::MultiFab& Jz = *current_fp[lev][2]; + amrex::MultiFab& Jz = *m_fields.get(FieldType::current_fp, Direction{2}, lev); for (amrex::MFIter mfi(Jz); mfi.isValid(); ++mfi) { const amrex::Box& bx = mfi.fabbox(); @@ -658,46 +724,55 @@ WarpX::PushPSATD () const int rho_old = spectral_solver_fp[0]->m_spectral_index.rho_old; const int rho_new = spectral_solver_fp[0]->m_spectral_index.rho_new; + std::string const rho_fp_string = "rho_fp"; + std::string const rho_cp_string = "rho_cp"; + + const ablastr::fields::MultiLevelVectorField current_fp = m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level); + std::string current_fp_string = "current_fp"; + std::string const current_cp_string = "current_cp"; + if (fft_periodic_single_box) { if (current_correction) { // FFT of J and rho - PSATDForwardTransformJ(current_fp, current_cp); - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_old); - PSATDForwardTransformRho(rho_fp, rho_cp, 1, rho_new); + PSATDForwardTransformJ(current_fp_string, current_cp_string); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_old); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new); // Correct J in k-space PSATDCurrentCorrection(); // Inverse FFT of J - PSATDBackwardTransformJ(current_fp, current_cp); + PSATDBackwardTransformJ(current_fp_string, current_cp_string); } else if (current_deposition_algo == CurrentDepositionAlgo::Vay) { // FFT of D and rho (if used) // TODO Replace current_cp with current_cp_vay once Vay deposition is implemented with MR - PSATDForwardTransformJ(current_fp_vay, current_cp); - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_old); - PSATDForwardTransformRho(rho_fp, rho_cp, 1, rho_new); + current_fp_string = "current_fp_vay"; + PSATDForwardTransformJ(current_fp_string, current_cp_string); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_old); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new); // Compute J from D in k-space PSATDVayDeposition(); // Inverse FFT of J, subtract cumulative sums of D - PSATDBackwardTransformJ(current_fp, current_cp); + current_fp_string = "current_fp"; + PSATDBackwardTransformJ(current_fp_string, current_cp_string); // TODO Cumulative sums need to be fixed with periodic single box PSATDSubtractCurrentPartialSumsAvg(); // FFT of J after subtraction of cumulative sums - PSATDForwardTransformJ(current_fp, current_cp); + PSATDForwardTransformJ(current_fp_string, current_cp_string); } else // no current correction, no Vay deposition { // FFT of J and rho (if used) - PSATDForwardTransformJ(current_fp, current_cp); - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_old); - PSATDForwardTransformRho(rho_fp, rho_cp, 1, rho_new); + PSATDForwardTransformJ(current_fp_string, current_cp_string); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_old); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new); } } else // no periodic single box @@ -709,35 +784,37 @@ WarpX::PushPSATD () // In RZ geometry, do not apply filtering here, since it is // applied in the subsequent calls to these functions (below) const bool apply_kspace_filter = false; - PSATDForwardTransformJ(current_fp, current_cp, apply_kspace_filter); - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_old, apply_kspace_filter); - PSATDForwardTransformRho(rho_fp, rho_cp, 1, rho_new, apply_kspace_filter); + PSATDForwardTransformJ(current_fp_string, current_cp_string, apply_kspace_filter); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_old, apply_kspace_filter); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new, apply_kspace_filter); #else - PSATDForwardTransformJ(current_fp, current_cp); - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_old); - PSATDForwardTransformRho(rho_fp, rho_cp, 1, rho_new); + PSATDForwardTransformJ(current_fp_string, current_cp_string); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_old); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new); #endif // Correct J in k-space PSATDCurrentCorrection(); // Inverse FFT of J - PSATDBackwardTransformJ(current_fp, current_cp); + PSATDBackwardTransformJ(current_fp_string, current_cp_string); // Synchronize J and rho - SyncCurrent(current_fp, current_cp, current_buf); - SyncRho(rho_fp, rho_cp, charge_buf); + SyncCurrent("current_fp"); + SyncRho(); } else if (current_deposition_algo == CurrentDepositionAlgo::Vay) { // FFT of D - PSATDForwardTransformJ(current_fp_vay, current_cp); + current_fp_string = "current_fp_vay"; + PSATDForwardTransformJ(current_fp_string, current_cp_string); // Compute J from D in k-space PSATDVayDeposition(); // Inverse FFT of J, subtract cumulative sums of D - PSATDBackwardTransformJ(current_fp, current_cp); + current_fp_string = "current_fp"; + PSATDBackwardTransformJ(current_fp_string, current_cp_string); PSATDSubtractCurrentPartialSumsAvg(); // Synchronize J and rho (if used). @@ -747,17 +824,17 @@ WarpX::PushPSATD () // TODO This works only without mesh refinement const int lev = 0; SumBoundaryJ(current_fp, lev, Geom(lev).periodicity()); - SyncRho(rho_fp, rho_cp, charge_buf); + SyncRho(); } // FFT of J and rho (if used) - PSATDForwardTransformJ(current_fp, current_cp); - PSATDForwardTransformRho(rho_fp, rho_cp, 0, rho_old); - PSATDForwardTransformRho(rho_fp, rho_cp, 1, rho_new); + PSATDForwardTransformJ(current_fp_string, current_cp_string); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 0, rho_old); + PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new); } // FFT of E and B - PSATDForwardTransformEB(Efield_fp, Bfield_fp, Efield_cp, Bfield_cp); + PSATDForwardTransformEB(); #ifdef WARPX_DIM_RZ if (pml_rz[0]) { pml_rz[0]->PushPSATD(0); } @@ -771,8 +848,12 @@ WarpX::PushPSATD () PSATDPushSpectralFields(); // Inverse FFT of E, B, F, and G - PSATDBackwardTransformEB(Efield_fp, Bfield_fp, Efield_cp, Bfield_cp); + PSATDBackwardTransformEB(); if (WarpX::fft_do_time_averaging) { + auto Efield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level); + auto Bfield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level); + auto Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level); + auto Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level); PSATDBackwardTransformEBavg(Efield_avg_fp, Bfield_avg_fp, Efield_avg_cp, Bfield_avg_cp); } if (WarpX::do_dive_cleaning) { PSATDBackwardTransformF(); } @@ -783,7 +864,7 @@ WarpX::PushPSATD () { if (pml[lev] && pml[lev]->ok()) { - pml[lev]->PushPSATD(lev); + pml[lev]->PushPSATD(m_fields, lev); } ApplyEfieldBoundary(lev, PatchType::fine); if (lev > 0) { ApplyEfieldBoundary(lev, PatchType::coarse); } @@ -818,26 +899,27 @@ WarpX::EvolveB (int lev, amrex::Real a_dt, DtType a_dt_type) void WarpX::EvolveB (int lev, PatchType patch_type, amrex::Real a_dt, DtType a_dt_type) { - // Evolve B field in regular cells if (patch_type == PatchType::fine) { - m_fdtd_solver_fp[lev]->EvolveB(Bfield_fp[lev], Efield_fp[lev], G_fp[lev], - m_face_areas[lev], m_area_mod[lev], ECTRhofield[lev], Venl[lev], - m_flag_info_face[lev], m_borrowing[lev], lev, a_dt); + m_fdtd_solver_fp[lev]->EvolveB( m_fields, + lev, + patch_type, + m_flag_info_face[lev], m_borrowing[lev], a_dt ); } else { - m_fdtd_solver_cp[lev]->EvolveB(Bfield_cp[lev], Efield_cp[lev], G_cp[lev], - m_face_areas[lev], m_area_mod[lev], ECTRhofield[lev], Venl[lev], - m_flag_info_face[lev], m_borrowing[lev], lev, a_dt); + m_fdtd_solver_cp[lev]->EvolveB( m_fields, + lev, + patch_type, + m_flag_info_face[lev], m_borrowing[lev], a_dt ); } // Evolve B field in PML cells if (do_pml && pml[lev]->ok()) { if (patch_type == PatchType::fine) { m_fdtd_solver_fp[lev]->EvolveBPML( - pml[lev]->GetB_fp(), pml[lev]->GetE_fp(), a_dt, WarpX::do_dive_cleaning); + m_fields, patch_type, lev, a_dt, WarpX::do_dive_cleaning); } else { m_fdtd_solver_cp[lev]->EvolveBPML( - pml[lev]->GetB_cp(), pml[lev]->GetE_cp(), a_dt, WarpX::do_dive_cleaning); + m_fields, patch_type, lev, a_dt, WarpX::do_dive_cleaning); } } @@ -873,31 +955,33 @@ WarpX::EvolveE (int lev, PatchType patch_type, amrex::Real a_dt) { // Evolve E field in regular cells if (patch_type == PatchType::fine) { - m_fdtd_solver_fp[lev]->EvolveE(Efield_fp[lev], Bfield_fp[lev], - current_fp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], - F_fp[lev], lev, a_dt ); + m_fdtd_solver_fp[lev]->EvolveE( m_fields, + lev, + patch_type, + m_fields.get_alldirs(FieldType::Efield_fp, lev), + a_dt ); } else { - m_fdtd_solver_cp[lev]->EvolveE(Efield_cp[lev], Bfield_cp[lev], - current_cp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], - F_cp[lev], lev, a_dt ); + m_fdtd_solver_cp[lev]->EvolveE( m_fields, + lev, + patch_type, + m_fields.get_alldirs(FieldType::Efield_cp, lev), + a_dt ); } // Evolve E field in PML cells if (do_pml && pml[lev]->ok()) { if (patch_type == PatchType::fine) { m_fdtd_solver_fp[lev]->EvolveEPML( - pml[lev]->GetE_fp(), pml[lev]->GetB_fp(), - pml[lev]->Getj_fp(), pml[lev]->Get_edge_lengths(), - pml[lev]->GetF_fp(), + m_fields, + patch_type, + lev, pml[lev]->GetMultiSigmaBox_fp(), a_dt, pml_has_particles ); } else { m_fdtd_solver_cp[lev]->EvolveEPML( - pml[lev]->GetE_cp(), pml[lev]->GetB_cp(), - pml[lev]->Getj_cp(), pml[lev]->Get_edge_lengths(), - pml[lev]->GetF_cp(), + m_fields, + patch_type, + lev, pml[lev]->GetMultiSigmaBox_cp(), a_dt, pml_has_particles ); } @@ -910,11 +994,17 @@ WarpX::EvolveE (int lev, PatchType patch_type, amrex::Real a_dt) #ifdef AMREX_USE_EB if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { if (patch_type == PatchType::fine) { - m_fdtd_solver_fp[lev]->EvolveECTRho(Efield_fp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], lev); + m_fdtd_solver_fp[lev]->EvolveECTRho( m_fields.get_alldirs(FieldType::Efield_fp, lev), + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), + m_fields.get_alldirs(FieldType::ECTRhofield, lev), + lev ); } else { - m_fdtd_solver_cp[lev]->EvolveECTRho(Efield_cp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], lev); + m_fdtd_solver_cp[lev]->EvolveECTRho( m_fields.get_alldirs(FieldType::Efield_cp, lev), + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), + m_fields.get_alldirs(FieldType::ECTRhofield, lev), + lev); } } #endif @@ -952,21 +1042,27 @@ WarpX::EvolveF (int lev, PatchType patch_type, amrex::Real a_dt, DtType a_dt_typ // Evolve F field in regular cells if (patch_type == PatchType::fine) { - m_fdtd_solver_fp[lev]->EvolveF( F_fp[lev], Efield_fp[lev], - rho_fp[lev], rhocomp, a_dt ); + m_fdtd_solver_fp[lev]->EvolveF( m_fields.get(FieldType::F_fp, lev), + m_fields.get_alldirs(FieldType::Efield_fp, lev), + m_fields.get(FieldType::rho_fp,lev), rhocomp, a_dt ); } else { - m_fdtd_solver_cp[lev]->EvolveF( F_cp[lev], Efield_cp[lev], - rho_cp[lev], rhocomp, a_dt ); + m_fdtd_solver_cp[lev]->EvolveF( m_fields.get(FieldType::F_cp, lev), + m_fields.get_alldirs(FieldType::Efield_cp, lev), + m_fields.get(FieldType::rho_cp,lev), rhocomp, a_dt ); } // Evolve F field in PML cells if (do_pml && pml[lev]->ok()) { if (patch_type == PatchType::fine) { m_fdtd_solver_fp[lev]->EvolveFPML( - pml[lev]->GetF_fp(), pml[lev]->GetE_fp(), a_dt ); + m_fields.get(FieldType::pml_F_fp, lev), + m_fields.get_alldirs(FieldType::pml_E_fp, lev), + a_dt ); } else { m_fdtd_solver_cp[lev]->EvolveFPML( - pml[lev]->GetF_cp(), pml[lev]->GetE_cp(), a_dt ); + m_fields.get(FieldType::pml_F_cp, lev), + m_fields.get_alldirs(FieldType::pml_E_cp, lev), + a_dt ); } } } @@ -1005,11 +1101,17 @@ WarpX::EvolveG (int lev, PatchType patch_type, amrex::Real a_dt, DtType /*a_dt_t // Evolve G field in regular cells if (patch_type == PatchType::fine) { - m_fdtd_solver_fp[lev]->EvolveG(G_fp[lev], Bfield_fp[lev], a_dt); + ablastr::fields::MultiLevelVectorField const& Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); + m_fdtd_solver_fp[lev]->EvolveG( + m_fields.get(FieldType::G_fp, lev), + Bfield_fp[lev], a_dt); } else // coarse patch { - m_fdtd_solver_cp[lev]->EvolveG(G_cp[lev], Bfield_cp[lev], a_dt); + ablastr::fields::MultiLevelVectorField const& Bfield_cp_new = m_fields.get_mr_levels_alldirs(FieldType::Bfield_cp, finest_level); + m_fdtd_solver_cp[lev]->EvolveG( + m_fields.get(FieldType::G_cp, lev), + Bfield_cp_new[lev], a_dt); } // TODO Evolution in PML cells will go here @@ -1045,23 +1147,25 @@ WarpX::MacroscopicEvolveE (int lev, PatchType patch_type, amrex::Real a_dt) { ); m_fdtd_solver_fp[lev]->MacroscopicEvolveE( - Efield_fp[lev], Bfield_fp[lev], - current_fp[lev], m_edge_lengths[lev], + m_fields.get_alldirs(FieldType::Efield_fp, lev), + m_fields.get_alldirs(FieldType::Bfield_fp, lev), + m_fields.get_alldirs(FieldType::current_fp, lev), + m_fields.get_alldirs(FieldType::edge_lengths, lev), a_dt, m_macroscopic_properties); if (do_pml && pml[lev]->ok()) { if (patch_type == PatchType::fine) { m_fdtd_solver_fp[lev]->EvolveEPML( - pml[lev]->GetE_fp(), pml[lev]->GetB_fp(), - pml[lev]->Getj_fp(), pml[lev]->Get_edge_lengths(), - pml[lev]->GetF_fp(), + m_fields, + patch_type, + lev, pml[lev]->GetMultiSigmaBox_fp(), a_dt, pml_has_particles ); } else { m_fdtd_solver_cp[lev]->EvolveEPML( - pml[lev]->GetE_cp(), pml[lev]->GetB_cp(), - pml[lev]->Getj_cp(), pml[lev]->Get_edge_lengths(), - pml[lev]->GetF_cp(), + m_fields, + patch_type, + lev, pml[lev]->GetMultiSigmaBox_cp(), a_dt, pml_has_particles ); } @@ -1072,8 +1176,8 @@ WarpX::MacroscopicEvolveE (int lev, PatchType patch_type, amrex::Real a_dt) { void WarpX::DampFieldsInGuards(const int lev, - const std::array,3>& Efield, - const std::array,3>& Bfield) { + const ablastr::fields::VectorField& Efield, + const ablastr::fields::VectorField& Bfield) { // Loop over dimensions for (int dampdir = 0 ; dampdir < AMREX_SPACEDIM ; dampdir++) @@ -1169,7 +1273,7 @@ WarpX::DampFieldsInGuards(const int lev, } } -void WarpX::DampFieldsInGuards(const int lev, std::unique_ptr& mf) +void WarpX::DampFieldsInGuards(const int lev, amrex::MultiFab* mf) { // Loop over dimensions for (int dampdir = 0; dampdir < AMREX_SPACEDIM; dampdir++) diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index c16f0193b8d..be2d40459ac 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -7,6 +7,7 @@ * License: BSD-3-Clause-LBNL */ #include "Evolve/WarpXDtType.H" +#include "Fields.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" #include "Particles/MultiParticleContainer.H" #include "Utils/TextMsg.H" @@ -15,10 +16,16 @@ #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" +#include + + using namespace amrex; void WarpX::HybridPICEvolveFields () { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + WARPX_PROFILE("WarpX::HybridPICEvolveFields()"); // The below deposition is hard coded for a single level simulation @@ -28,15 +35,19 @@ void WarpX::HybridPICEvolveFields () // The particles have now been pushed to their t_{n+1} positions. // Perform charge deposition in component 0 of rho_fp at t_{n+1}. - mypc->DepositCharge(rho_fp, 0._rt); + mypc->DepositCharge(m_fields.get_mr_levels(FieldType::rho_fp, finest_level), 0._rt); // Perform current deposition at t_{n+1/2}. - mypc->DepositCurrent(current_fp, dt[0], -0.5_rt * dt[0]); + mypc->DepositCurrent(m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), dt[0], -0.5_rt * dt[0]); // Deposit cold-relativistic fluid charge and current if (do_fluid_species) { int const lev = 0; - myfl->DepositCharge(lev, *rho_fp[lev]); - myfl->DepositCurrent(lev, *current_fp[lev][0], *current_fp[lev][1], *current_fp[lev][2]); + myfl->DepositCharge(m_fields, *m_fields.get(FieldType::rho_fp, lev), lev); + myfl->DepositCurrent(m_fields, + *m_fields.get(FieldType::current_fp, Direction{0}, lev), + *m_fields.get(FieldType::current_fp, Direction{1}, lev), + *m_fields.get(FieldType::current_fp, Direction{2}, lev), + lev); } // Synchronize J and rho: @@ -49,7 +60,7 @@ void WarpX::HybridPICEvolveFields () // a nodal grid for (int lev = 0; lev <= finest_level; ++lev) { for (int idim = 0; idim < 3; ++idim) { - current_fp[lev][idim]->FillBoundary(Geom(lev).periodicity()); + m_fields.get(FieldType::current_fp, Direction{idim}, lev)->FillBoundary(Geom(lev).periodicity()); } } @@ -57,11 +68,12 @@ void WarpX::HybridPICEvolveFields () const int sub_steps = m_hybrid_pic_model->m_substeps; // Get the external current - m_hybrid_pic_model->GetCurrentExternal(m_edge_lengths); + m_hybrid_pic_model->GetCurrentExternal( + m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level)); // Reference hybrid-PIC multifabs - auto& rho_fp_temp = m_hybrid_pic_model->rho_fp_temp; - auto& current_fp_temp = m_hybrid_pic_model->current_fp_temp; + ablastr::fields::MultiLevelScalarField rho_fp_temp = m_fields.get_mr_levels(FieldType::hybrid_rho_fp_temp, finest_level); + ablastr::fields::MultiLevelVectorField current_fp_temp = m_fields.get_mr_levels_alldirs(FieldType::hybrid_current_fp_temp, finest_level); // During the above deposition the charge and current density were updated // so that, at this time, we have rho^{n} in rho_fp_temp, rho{n+1} in the @@ -82,7 +94,7 @@ void WarpX::HybridPICEvolveFields () MultiFab::LinComb( *current_fp_temp[lev][idim], 0.5_rt, *current_fp_temp[lev][idim], 0, - 0.5_rt, *current_fp[lev][idim], 0, + 0.5_rt, *m_fields.get(FieldType::current_fp, Direction{idim}, lev), 0, 0, 1, current_fp_temp[lev][idim]->nGrowVect() ); } @@ -94,8 +106,11 @@ void WarpX::HybridPICEvolveFields () for (int sub_step = 0; sub_step < sub_steps; sub_step++) { m_hybrid_pic_model->BfieldEvolveRK( - Bfield_fp, Efield_fp, current_fp_temp, rho_fp_temp, - m_edge_lengths, 0.5_rt/sub_steps*dt[0], + m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), + current_fp_temp, rho_fp_temp, + m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), + 0.5_rt/sub_steps*dt[0], DtType::FirstHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points ); @@ -109,7 +124,7 @@ void WarpX::HybridPICEvolveFields () // the result into the 0'th index of `rho_fp_temp[lev]` MultiFab::LinComb( *rho_fp_temp[lev], 0.5_rt, *rho_fp_temp[lev], 0, - 0.5_rt, *rho_fp[lev], 0, 0, 1, rho_fp_temp[lev]->nGrowVect() + 0.5_rt, *m_fields.get(FieldType::rho_fp, lev), 0, 0, 1, rho_fp_temp[lev]->nGrowVect() ); } @@ -117,8 +132,12 @@ void WarpX::HybridPICEvolveFields () for (int sub_step = 0; sub_step < sub_steps; sub_step++) { m_hybrid_pic_model->BfieldEvolveRK( - Bfield_fp, Efield_fp, current_fp, rho_fp_temp, - m_edge_lengths, 0.5_rt/sub_steps*dt[0], + m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), + rho_fp_temp, + m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), + 0.5_rt/sub_steps*dt[0], DtType::SecondHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points ); @@ -136,7 +155,7 @@ void WarpX::HybridPICEvolveFields () MultiFab::LinComb( *current_fp_temp[lev][idim], -1._rt, *current_fp_temp[lev][idim], 0, - 2._rt, *current_fp[lev][idim], 0, + 2._rt, *m_fields.get(FieldType::current_fp, Direction{idim}, lev), 0, 0, 1, current_fp_temp[lev][idim]->nGrowVect() ); } @@ -146,9 +165,15 @@ void WarpX::HybridPICEvolveFields () m_hybrid_pic_model->CalculateElectronPressure(); // Update the E field to t=n+1 using the extrapolated J_i^n+1 value - m_hybrid_pic_model->CalculateCurrentAmpere(Bfield_fp, m_edge_lengths); + m_hybrid_pic_model->CalculatePlasmaCurrent( + m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level)); m_hybrid_pic_model->HybridPICSolveE( - Efield_fp, current_fp_temp, Bfield_fp, rho_fp, m_edge_lengths, false + m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), + current_fp_temp, + m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), false ); FillBoundaryE(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); @@ -158,10 +183,10 @@ void WarpX::HybridPICEvolveFields () for (int lev = 0; lev <= finest_level; ++lev) { // copy 1 component value starting at index 0 to index 0 - MultiFab::Copy(*rho_fp_temp[lev], *rho_fp[lev], + MultiFab::Copy(*rho_fp_temp[lev], *m_fields.get(FieldType::rho_fp, lev), 0, 0, 1, rho_fp_temp[lev]->nGrowVect()); for (int idim = 0; idim < 3; ++idim) { - MultiFab::Copy(*current_fp_temp[lev][idim], *current_fp[lev][idim], + MultiFab::Copy(*current_fp_temp[lev][idim], *m_fields.get(FieldType::current_fp, Direction{idim}, lev), 0, 0, 1, current_fp_temp[lev][idim]->nGrowVect()); } } @@ -169,12 +194,14 @@ void WarpX::HybridPICEvolveFields () void WarpX::HybridPICDepositInitialRhoAndJ () { - auto& rho_fp_temp = m_hybrid_pic_model->rho_fp_temp; - auto& current_fp_temp = m_hybrid_pic_model->current_fp_temp; + using warpx::fields::FieldType; + + ablastr::fields::MultiLevelScalarField rho_fp_temp = m_fields.get_mr_levels(FieldType::hybrid_rho_fp_temp, finest_level); + ablastr::fields::MultiLevelVectorField current_fp_temp = m_fields.get_mr_levels_alldirs(FieldType::hybrid_current_fp_temp, finest_level); mypc->DepositCharge(rho_fp_temp, 0._rt); mypc->DepositCurrent(current_fp_temp, dt[0], 0._rt); - SyncRho(rho_fp_temp, rho_cp, charge_buf); - SyncCurrent(current_fp_temp, current_cp, current_buf); + SyncRho(rho_fp_temp, m_fields.get_mr_levels(FieldType::rho_cp, finest_level), m_fields.get_mr_levels(FieldType::rho_buf, finest_level)); + SyncCurrent("hybrid_current_fp_temp"); for (int lev=0; lev <= finest_level; ++lev) { // SyncCurrent does not include a call to FillBoundary, but it is needed // for the hybrid-PIC solver since current values are interpolated to @@ -183,12 +210,12 @@ void WarpX::HybridPICDepositInitialRhoAndJ () current_fp_temp[lev][1]->FillBoundary(Geom(lev).periodicity()); current_fp_temp[lev][2]->FillBoundary(Geom(lev).periodicity()); - ApplyRhofieldBoundary(lev, rho_fp_temp[lev].get(), PatchType::fine); + ApplyRhofieldBoundary(lev, rho_fp_temp[lev], PatchType::fine); // Set current density at PEC boundaries, if needed. ApplyJfieldBoundary( - lev, current_fp_temp[lev][0].get(), - current_fp_temp[lev][1].get(), - current_fp_temp[lev][2].get(), + lev, current_fp_temp[lev][0], + current_fp_temp[lev][1], + current_fp_temp[lev][2], PatchType::fine ); } diff --git a/Source/FieldSolver/WarpXSolveFieldsES.cpp b/Source/FieldSolver/WarpXSolveFieldsES.cpp index 42a537b5c2a..6194570cd2d 100644 --- a/Source/FieldSolver/WarpXSolveFieldsES.cpp +++ b/Source/FieldSolver/WarpXSolveFieldsES.cpp @@ -7,24 +7,29 @@ * License: BSD-3-Clause-LBNL */ #include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" + +#include "Fields.H" #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" + void WarpX::ComputeSpaceChargeField (bool const reset_fields) { WARPX_PROFILE("WarpX::ComputeSpaceChargeField"); + using ablastr::fields::Direction; + using warpx::fields::FieldType; + if (reset_fields) { // Reset all E and B fields to 0, before calculating space-charge fields WARPX_PROFILE("WarpX::ComputeSpaceChargeField::reset_fields"); for (int lev = 0; lev <= max_level; lev++) { for (int comp=0; comp<3; comp++) { - Efield_fp[lev][comp]->setVal(0); - Bfield_fp[lev][comp]->setVal(0); + m_fields.get(FieldType::Efield_fp, Direction{comp}, lev)->setVal(0); + m_fields.get(FieldType::Bfield_fp, Direction{comp}, lev)->setVal(0); } } } m_electrostatic_solver->ComputeSpaceChargeField( - rho_fp, rho_cp, charge_buf, phi_fp, *mypc, myfl.get(), Efield_fp, Bfield_fp - ); + m_fields, *mypc, myfl.get(), max_level ); } diff --git a/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp b/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp index 9741d9b667b..1ff1d1f866d 100644 --- a/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp +++ b/Source/FieldSolver/WarpX_QED_Field_Pushers.cpp @@ -6,6 +6,7 @@ */ #include "WarpX.H" +#include "Fields.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXProfilerWrapper.H" @@ -69,36 +70,41 @@ WarpX::Hybrid_QED_Push (int lev, amrex::Real a_dt) void WarpX::Hybrid_QED_Push (int lev, PatchType patch_type, amrex::Real a_dt) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + const int patch_level = (patch_type == PatchType::fine) ? lev : lev-1; const std::array& dx_vec= WarpX::CellSize(patch_level); const Real dx = dx_vec[0]; const Real dy = dx_vec[1]; const Real dz = dx_vec[2]; + using ablastr::fields::Direction; + MultiFab *Ex, *Ey, *Ez, *Bx, *By, *Bz, *Jx, *Jy, *Jz; if (patch_type == PatchType::fine) { - Ex = Efield_fp[lev][0].get(); - Ey = Efield_fp[lev][1].get(); - Ez = Efield_fp[lev][2].get(); - Bx = Bfield_fp[lev][0].get(); - By = Bfield_fp[lev][1].get(); - Bz = Bfield_fp[lev][2].get(); - Jx = current_fp[lev][0].get(); - Jy = current_fp[lev][1].get(); - Jz = current_fp[lev][2].get(); + Ex = m_fields.get(FieldType::Efield_fp, Direction{0}, lev); + Ey = m_fields.get(FieldType::Efield_fp, Direction{1}, lev); + Ez = m_fields.get(FieldType::Efield_fp, Direction{2}, lev); + Bx = m_fields.get(FieldType::Bfield_fp, Direction{0}, lev); + By = m_fields.get(FieldType::Bfield_fp, Direction{1}, lev); + Bz = m_fields.get(FieldType::Bfield_fp, Direction{2}, lev); + Jx = m_fields.get(FieldType::current_fp, Direction{0}, lev); + Jy = m_fields.get(FieldType::current_fp, Direction{1}, lev); + Jz = m_fields.get(FieldType::current_fp, Direction{2}, lev); } else { - Ex = Efield_cp[lev][0].get(); - Ey = Efield_cp[lev][1].get(); - Ez = Efield_cp[lev][2].get(); - Bx = Bfield_cp[lev][0].get(); - By = Bfield_cp[lev][1].get(); - Bz = Bfield_cp[lev][2].get(); - Jx = current_cp[lev][0].get(); - Jy = current_cp[lev][1].get(); - Jz = current_cp[lev][2].get(); + Ex = m_fields.get(FieldType::Efield_cp, Direction{0}, lev); + Ey = m_fields.get(FieldType::Efield_cp, Direction{1}, lev); + Ez = m_fields.get(FieldType::Efield_cp, Direction{2}, lev); + Bx = m_fields.get(FieldType::Bfield_cp, Direction{0}, lev); + By = m_fields.get(FieldType::Bfield_cp, Direction{1}, lev); + Bz = m_fields.get(FieldType::Bfield_cp, Direction{2}, lev); + Jx = m_fields.get(FieldType::current_cp, Direction{0}, lev); + Jy = m_fields.get(FieldType::current_cp, Direction{1}, lev); + Jz = m_fields.get(FieldType::current_cp, Direction{2}, lev); } amrex::LayoutData* cost = WarpX::getCosts(lev); diff --git a/Source/Fields.H b/Source/Fields.H new file mode 100644 index 00000000000..b07661254c4 --- /dev/null +++ b/Source/Fields.H @@ -0,0 +1,137 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + * Authors: Luca Fedeli, Justin Angus, Remi Lehe, Axel Huebl + */ +#ifndef WARPX_FIELDS_H_ +#define WARPX_FIELDS_H_ + +#include + +#include + +#include +#include + + +namespace warpx::fields +{ + /** Unique identifiers for WarpX scalar and vector fields. + * + * These are implemented as amrex::MultiFab (one or one per component "direction", + * respectively) and stored in the ablastr::fields::MultiFabRegister . + */ + AMREX_ENUM(FieldType, + None, + Efield_aux, /**< Field that the particles gather from. Obtained from Efield_fp (and Efield_cp when using MR); see UpdateAuxilaryData */ + Bfield_aux, /**< Field that the particles gather from. Obtained from Bfield_fp (and Bfield_cp when using MR); see UpdateAuxilaryData */ + Efield_fp, /**< The field that is updated by the field solver at each timestep */ + Bfield_fp, /**< The field that is updated by the field solver at each timestep */ + Efield_fp_external, /**< Stores grid particle fields provided by the user as through an openPMD file */ + Bfield_fp_external, /**< Stores grid particle fields provided by the user as through an openPMD file */ + current_fp, /**< The current that is used as a source for the field solver */ + current_fp_nodal, /**< Only used when using nodal current deposition */ + current_fp_vay, /**< Only used when using Vay current deposition */ + current_buf, /**< Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. */ + current_store, /**< Only used when doing subcycling with mesh refinement, for book-keeping of currents */ + rho_buf, /**< Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. */ + rho_fp, /**< The charge density that is used as a source for the field solver (mostly for labframe electrostatic and PSATD) */ + F_fp, /**< Used for divE cleaning */ + G_fp, /**< Used for divB cleaning */ + phi_fp, /**< Obtained by the Poisson solver, for labframe electrostatic */ + vector_potential_fp, /**< Obtained by the magnetostatic solver */ + vector_potential_fp_nodal, + vector_potential_grad_buf_e_stag, + vector_potential_grad_buf_b_stag, + hybrid_electron_pressure_fp, /**< Used with Ohm's law solver. Stores the electron pressure */ + hybrid_rho_fp_temp, /**< Used with Ohm's law solver. Stores the time interpolated/extrapolated charge density */ + hybrid_current_fp_temp, /**< Used with Ohm's law solver. Stores the time interpolated/extrapolated current density */ + hybrid_current_fp_plasma, /**< Used with Ohm's law solver. Stores plasma current calculated as J_plasma = curl x B / mu0 - J_ext */ + hybrid_current_fp_external, /**< Used with Ohm's law solver. Stores external current */ + Efield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ + Bfield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ + current_cp, /**< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level */ + rho_cp, /**< Only used with MR. The charge density that is used as a source for the field solver, on the coarse patch of each level */ + F_cp, /**< Only used with MR. Used for divE cleaning, on the coarse patch of each level */ + G_cp, /**< Only used with MR. Used for divB cleaning, on the coarse patch of each level */ + Efield_cax, /**< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field */ + Bfield_cax, /**< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field */ + E_external_particle_field, /**< Stores external particle fields provided by the user as through an openPMD file */ + B_external_particle_field, /**< Stores external particle fields provided by the user as through an openPMD file */ + distance_to_eb, /**< Only used with embedded boundaries (EB). Stores the distance to the nearest EB */ + edge_lengths, /**< Only used with embedded boundaries (EB). Indicates the length of the cell edge that is covered by the EB, in SI units */ + face_areas, /**< Only used with embedded boundaries (EB). Indicates the area of the cell face that is covered by the EB, in SI units */ + area_mod, + pml_E_fp, + pml_B_fp, + pml_j_fp, + pml_F_fp, + pml_G_fp, + pml_E_cp, + pml_B_cp, + pml_j_cp, + pml_F_cp, + pml_G_cp, + pml_edge_lengths, + Efield_avg_fp, + Bfield_avg_fp, + Efield_avg_cp, + Bfield_avg_cp, + B_old, /**< Stores the value of B at the beginning of the timestep, for the implicit solver */ + ECTRhofield, + Venl + ); + + /** these are vector fields */ + constexpr FieldType ArrayFieldTypes[] = { + FieldType::Efield_aux, + FieldType::Bfield_aux, + FieldType::Efield_fp, + FieldType::Bfield_fp, + FieldType::current_fp, + FieldType::current_fp_nodal, + FieldType::current_fp_vay, + FieldType::current_buf, + FieldType::current_store, + FieldType::vector_potential_fp, + FieldType::vector_potential_fp_nodal, + FieldType::vector_potential_grad_buf_e_stag, + FieldType::vector_potential_grad_buf_b_stag, + FieldType::hybrid_current_fp_temp, + FieldType::hybrid_current_fp_plasma, + FieldType::hybrid_current_fp_external, + FieldType::Efield_cp, + FieldType::Bfield_cp, + FieldType::current_cp, + FieldType::Efield_cax, + FieldType::Bfield_cax, + FieldType::E_external_particle_field, + FieldType::B_external_particle_field, + FieldType::pml_E_fp, + FieldType::pml_B_fp, + FieldType::pml_j_fp, + FieldType::pml_E_cp, + FieldType::pml_B_cp, + FieldType::pml_j_cp, + FieldType::Efield_avg_fp, + FieldType::Bfield_avg_fp, + FieldType::Efield_avg_cp, + FieldType::Bfield_avg_cp, + FieldType::B_old, + FieldType::ECTRhofield, + FieldType::Venl + }; + + /** Returns true if a FieldType represents a vector field */ + inline bool + isFieldArray (const FieldType field_type) + { + return std::any_of( std::begin(ArrayFieldTypes), std::end(ArrayFieldTypes), + [field_type](const FieldType& f) { return f == field_type; }); + } + +} + +#endif //WARPX_FIELDS_H_ diff --git a/Source/Fluids/MultiFluidContainer.H b/Source/Fluids/MultiFluidContainer.H index 23f0c46590b..c2cdfc3e19f 100644 --- a/Source/Fluids/MultiFluidContainer.H +++ b/Source/Fluids/MultiFluidContainer.H @@ -10,6 +10,8 @@ #include "WarpXFluidContainer_fwd.H" +#include + #include #include @@ -34,7 +36,7 @@ class MultiFluidContainer public: - MultiFluidContainer (int nlevs_max); + MultiFluidContainer (); ~MultiFluidContainer() = default; @@ -52,25 +54,26 @@ public: } #endif - void AllocateLevelMFs (int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm); + void AllocateLevelMFs (ablastr::fields::MultiFabRegister& m_fields, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, int lev); - void InitData (int lev, amrex::Box init_box, amrex::Real cur_time); + void InitData (ablastr::fields::MultiFabRegister& m_fields, amrex::Box init_box, amrex::Real cur_time, int lev); /// /// This evolves all the fluids by one PIC time step, including current deposition, the /// field solve, and pushing the fluids, for all the species in the MultiFluidContainer. /// - void Evolve (int lev, - const amrex::MultiFab& Ex, const amrex::MultiFab& Ey, const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, const amrex::MultiFab& By, const amrex::MultiFab& Bz, - amrex::MultiFab* rho, amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, - amrex::Real cur_time, bool skip_deposition=false); + void Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + std::string const& current_fp_string, + amrex::Real cur_time, + bool skip_deposition=false); [[nodiscard]] int nSpecies() const {return static_cast(species_names.size());} - void DepositCharge (int lev, amrex::MultiFab &rho); - void DepositCurrent (int lev, - amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz); + void DepositCharge (ablastr::fields::MultiFabRegister& m_fields, amrex::MultiFab &rho, int lev); + void DepositCurrent (ablastr::fields::MultiFabRegister& m_fields, + amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, + int lev); private: diff --git a/Source/Fluids/MultiFluidContainer.cpp b/Source/Fluids/MultiFluidContainer.cpp index 234cefb4f07..b160817d886 100644 --- a/Source/Fluids/MultiFluidContainer.cpp +++ b/Source/Fluids/MultiFluidContainer.cpp @@ -13,7 +13,7 @@ using namespace amrex; -MultiFluidContainer::MultiFluidContainer (int nlevs_max) +MultiFluidContainer::MultiFluidContainer () { const ParmParse pp_fluids("fluids"); pp_fluids.queryarr("species_names", species_names); @@ -22,52 +22,52 @@ MultiFluidContainer::MultiFluidContainer (int nlevs_max) allcontainers.resize(nspecies); for (int i = 0; i < nspecies; ++i) { - allcontainers[i] = std::make_unique(nlevs_max, i, species_names[i]); + allcontainers[i] = std::make_unique(i, species_names[i]); } } void -MultiFluidContainer::AllocateLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm) +MultiFluidContainer::AllocateLevelMFs (ablastr::fields::MultiFabRegister& m_fields, const BoxArray& ba, const DistributionMapping& dm, int lev) { for (auto& fl : allcontainers) { - fl->AllocateLevelMFs(lev, ba, dm); + fl->AllocateLevelMFs(m_fields, ba, dm, lev); } } void -MultiFluidContainer::InitData (int lev, amrex::Box init_box, amrex::Real cur_time) +MultiFluidContainer::InitData (ablastr::fields::MultiFabRegister& m_fields, amrex::Box init_box, amrex::Real cur_time, int lev) { for (auto& fl : allcontainers) { - fl->InitData(lev, init_box, cur_time); + fl->InitData(m_fields, init_box, cur_time, lev); } } void -MultiFluidContainer::DepositCharge (int lev, amrex::MultiFab &rho) +MultiFluidContainer::DepositCharge (ablastr::fields::MultiFabRegister& m_fields, amrex::MultiFab &rho, int lev) { for (auto& fl : allcontainers) { - fl->DepositCharge(lev,rho); + fl->DepositCharge(m_fields,rho,lev); } } void -MultiFluidContainer::DepositCurrent (int lev, - amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz) +MultiFluidContainer::DepositCurrent (ablastr::fields::MultiFabRegister& m_fields, + amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, int lev) { for (auto& fl : allcontainers) { - fl->DepositCurrent(lev,jx,jy,jz); + fl->DepositCurrent(m_fields,jx,jy,jz,lev); } } void -MultiFluidContainer::Evolve (int lev, - const MultiFab& Ex, const MultiFab& Ey, const MultiFab& Ez, - const MultiFab& Bx, const MultiFab& By, const MultiFab& Bz, - MultiFab* rho, MultiFab& jx, MultiFab& jy, MultiFab& jz, - amrex::Real cur_time, bool skip_deposition) +MultiFluidContainer::Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + std::string const& current_fp_string, + amrex::Real cur_time, + bool skip_deposition) { for (auto& fl : allcontainers) { - fl->Evolve(lev, Ex, Ey, Ez, Bx, By, Bz, rho, jx, jy, jz, cur_time, skip_deposition); + fl->Evolve(fields, lev, current_fp_string, cur_time, skip_deposition); } } diff --git a/Source/Fluids/WarpXFluidContainer.H b/Source/Fluids/WarpXFluidContainer.H index 04ec4d9e80d..f3ea2d9a498 100644 --- a/Source/Fluids/WarpXFluidContainer.H +++ b/Source/Fluids/WarpXFluidContainer.H @@ -30,7 +30,7 @@ class WarpXFluidContainer public: friend MultiFluidContainer; - WarpXFluidContainer (int nlevs_max, int ispecies, const std::string& name); + WarpXFluidContainer (int ispecies, const std::string &name); ~WarpXFluidContainer() = default; WarpXFluidContainer (WarpXFluidContainer const &) = delete; @@ -38,20 +38,20 @@ public: WarpXFluidContainer(WarpXFluidContainer&& ) = default; WarpXFluidContainer& operator=(WarpXFluidContainer&& ) = default; - void AllocateLevelMFs (int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm); + void AllocateLevelMFs (ablastr::fields::MultiFabRegister& m_fields, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, int lev) const; - void InitData (int lev, amrex::Box init_box, amrex::Real cur_time); + void InitData (ablastr::fields::MultiFabRegister& m_fields, amrex::Box init_box, amrex::Real cur_time, int lev); void ReadParameters (); /** * Evolve updates a single timestep (dt) of the cold relativistic fluid equations */ - void Evolve (int lev, - const amrex::MultiFab& Ex, const amrex::MultiFab& Ey, const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, const amrex::MultiFab& By, const amrex::MultiFab& Bz, - amrex::MultiFab* rho, amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, - amrex::Real cur_time, bool skip_deposition=false); + void Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, + amrex::Real cur_time, + bool skip_deposition=false); /** * AdvectivePush_Muscl takes a single timestep (dt) of the cold relativistic fluid equations @@ -61,7 +61,7 @@ public: * * \param[in] lev refinement level */ - void AdvectivePush_Muscl (int lev); + void AdvectivePush_Muscl (ablastr::fields::MultiFabRegister& m_fields, int lev); /** @@ -72,7 +72,7 @@ public: * * \param[in] lev refinement level */ - void ApplyBcFluidsAndComms (int lev); + void ApplyBcFluidsAndComms (ablastr::fields::MultiFabRegister& m_fields, int lev); #if defined(WARPX_DIM_RZ) /** @@ -83,7 +83,7 @@ public: * * \param[in] lev refinement level */ - void centrifugal_source_rz (int lev); + void centrifugal_source_rz (ablastr::fields::MultiFabRegister& m_fields, int lev); #endif /** @@ -101,10 +101,10 @@ public: * \param[in] Bz Yee magnetic field (z) * \param[in] t Current time */ - void GatherAndPush (int lev, + void GatherAndPush (ablastr::fields::MultiFabRegister& m_fields, const amrex::MultiFab& Ex, const amrex::MultiFab& Ey, const amrex::MultiFab& Ez, const amrex::MultiFab& Bx, const amrex::MultiFab& By, const amrex::MultiFab& Bz, - amrex::Real t); + amrex::Real t, int lev); /** * DepositCurrent interpolates the fluid current density comps. onto the Yee grid and @@ -117,8 +117,8 @@ public: * \param[in,out] jy current density MultiFab y comp. * \param[in,out] jz current density MultiFab z comp. */ - void DepositCurrent (int lev, - amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz); + void DepositCurrent (ablastr::fields::MultiFabRegister& m_fields, + amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, int lev); /** * DepositCharge interpolates the fluid charge density onto the Yee grid and @@ -129,7 +129,7 @@ public: * \param[in] lev refinement level * \param[in,out] rho charge density MultiFab. */ - void DepositCharge (int lev, amrex::MultiFab &rho, int icomp = 0); + void DepositCharge (ablastr::fields::MultiFabRegister& m_fields, amrex::MultiFab &rho, int lev, int icomp = 0); [[nodiscard]] amrex::Real getCharge () const {return charge;} [[nodiscard]] amrex::Real getMass () const {return mass;} @@ -185,9 +185,9 @@ protected: public: - // MultiFabs that contain the density (N) and momentum density (NU) of this fluid species, for each refinement level - amrex::Vector< std::unique_ptr > N; - amrex::Vector, 3 > > NU; + // Names of Multifabs that will be added to the mfs register + std::string name_mf_N = "fluid_density_"+species_name; + std::string name_mf_NU = "fluid_momentum_density_"+species_name; }; diff --git a/Source/Fluids/WarpXFluidContainer.cpp b/Source/Fluids/WarpXFluidContainer.cpp index 99a1212ac90..326ce30c844 100644 --- a/Source/Fluids/WarpXFluidContainer.cpp +++ b/Source/Fluids/WarpXFluidContainer.cpp @@ -4,22 +4,25 @@ * * License: BSD-3-Clause-LBNL */ -#include "ablastr/coarsen/sample.H" +#include "Fields.H" #include "Particles/Pusher/UpdateMomentumHigueraCary.H" #include "Utils/WarpXProfilerWrapper.H" #include "MusclHancockUtils.H" #include "Fluids/WarpXFluidContainer.H" -#include "WarpX.H" -#include #include "Utils/Parser/ParserUtils.H" #include "Utils/WarpXUtil.H" #include "Utils/SpeciesUtils.H" +#include "WarpX.H" + +#include +#include using namespace ablastr::utils::communication; using namespace amrex; -WarpXFluidContainer::WarpXFluidContainer(int nlevs_max, int ispecies, const std::string &name): + +WarpXFluidContainer::WarpXFluidContainer(int ispecies, const std::string &name): species_id{ispecies}, species_name{name} { @@ -50,9 +53,6 @@ WarpXFluidContainer::WarpXFluidContainer(int nlevs_max, int ispecies, const std: } amrex::Gpu::synchronize(); - // Resize the list of MultiFabs for the right number of levels - N.resize(nlevs_max); - NU.resize(nlevs_max); } void WarpXFluidContainer::ReadParameters() @@ -139,31 +139,33 @@ void WarpXFluidContainer::ReadParameters() } } -void WarpXFluidContainer::AllocateLevelMFs(int lev, const BoxArray &ba, const DistributionMapping &dm) +void WarpXFluidContainer::AllocateLevelMFs(ablastr::fields::MultiFabRegister& fields, const BoxArray &ba, const DistributionMapping &dm, int lev) const { + using ablastr::fields::Direction; const int ncomps = 1; const amrex::IntVect nguards(AMREX_D_DECL(2, 2, 2)); - // set human-readable tag for each MultiFab - auto const tag = [lev](std::string tagname) - { - tagname.append("[l=").append(std::to_string(lev)).append("]"); - return tagname; - }; - - WarpX::AllocInitMultiFab(N[lev], amrex::convert(ba, amrex::IntVect::TheNodeVector()), - dm, ncomps, nguards, lev, tag("fluid density"), 0.0_rt); - - WarpX::AllocInitMultiFab(NU[lev][0], amrex::convert(ba, amrex::IntVect::TheNodeVector()), - dm, ncomps, nguards, lev, tag("fluid momentum density [x]"), 0.0_rt); - WarpX::AllocInitMultiFab(NU[lev][1], amrex::convert(ba, amrex::IntVect::TheNodeVector()), - dm, ncomps, nguards, lev, tag("fluid momentum density [y]"), 0.0_rt); - WarpX::AllocInitMultiFab(NU[lev][2], amrex::convert(ba, amrex::IntVect::TheNodeVector()), - dm, ncomps, nguards, lev, tag("fluid momentum density [z]"), 0.0_rt); + fields.alloc_init( + name_mf_N, lev, amrex::convert(ba, amrex::IntVect::TheNodeVector()), dm, + ncomps, nguards, 0.0_rt); + + fields.alloc_init( + name_mf_NU, Direction{0}, lev, amrex::convert(ba, amrex::IntVect::TheNodeVector()), dm, + ncomps, nguards, 0.0_rt); + + fields.alloc_init( + name_mf_NU, Direction{1}, lev, amrex::convert(ba, amrex::IntVect::TheNodeVector()), dm, + ncomps, nguards, 0.0_rt); + + fields.alloc_init( + name_mf_NU, Direction{2}, lev, amrex::convert(ba, amrex::IntVect::TheNodeVector()), dm, + ncomps, nguards, 0.0_rt); + } -void WarpXFluidContainer::InitData(int lev, amrex::Box init_box, amrex::Real cur_time) +void WarpXFluidContainer::InitData(ablastr::fields::MultiFabRegister& fields, amrex::Box init_box, amrex::Real cur_time, int lev) { + using ablastr::fields::Direction; WARPX_PROFILE("WarpXFluidContainer::InitData"); // Convert initialization box to nodal box @@ -186,14 +188,14 @@ void WarpXFluidContainer::InitData(int lev, amrex::Box init_box, amrex::Real cur #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Box const tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); - amrex::Array4 const &N_arr = N[lev]->array(mfi); - amrex::Array4 const &NUx_arr = NU[lev][0]->array(mfi); - amrex::Array4 const &NUy_arr = NU[lev][1]->array(mfi); - amrex::Array4 const &NUz_arr = NU[lev][2]->array(mfi); + amrex::Box const tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); + amrex::Array4 const &N_arr = fields.get(name_mf_N, lev)->array(mfi); + amrex::Array4 const &NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + amrex::Array4 const &NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + amrex::Array4 const &NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); // Return the intersection of all cells and the ones we wish to update amrex::Box const init_box_intersection = init_box & tile_box; @@ -253,54 +255,68 @@ void WarpXFluidContainer::InitData(int lev, amrex::Box init_box, amrex::Real cur void WarpXFluidContainer::Evolve( + ablastr::fields::MultiFabRegister& fields, int lev, - const amrex::MultiFab &Ex, const amrex::MultiFab &Ey, const amrex::MultiFab &Ez, - const amrex::MultiFab &Bx, const amrex::MultiFab &By, const amrex::MultiFab &Bz, - amrex::MultiFab* rho, amrex::MultiFab &jx, amrex::MultiFab &jy, amrex::MultiFab &jz, - amrex::Real cur_time, bool skip_deposition) + const std::string& current_fp_string, + amrex::Real cur_time, + bool skip_deposition) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; WARPX_PROFILE("WarpXFluidContainer::Evolve"); - if (rho && ! skip_deposition && ! do_not_deposit) { + if (fields.has(FieldType::rho_fp,lev) && ! skip_deposition && ! do_not_deposit) { // Deposit charge before particle push, in component 0 of MultiFab rho. - DepositCharge(lev, *rho, 0); + DepositCharge(fields, *fields.get(FieldType::rho_fp,lev), lev, 0); } // Step the Lorentz Term if(!do_not_gather){ - GatherAndPush(lev, Ex, Ey, Ez, Bx, By, Bz, cur_time); + GatherAndPush(fields, + *fields.get(FieldType::Efield_aux, Direction{0}, lev), + *fields.get(FieldType::Efield_aux, Direction{1}, lev), + *fields.get(FieldType::Efield_aux, Direction{2}, lev), + *fields.get(FieldType::Bfield_aux, Direction{0}, lev), + *fields.get(FieldType::Bfield_aux, Direction{1}, lev), + *fields.get(FieldType::Bfield_aux, Direction{2}, lev), + cur_time, lev); } // Cylindrical centrifugal term if(!do_not_push){ #if defined(WARPX_DIM_RZ) - centrifugal_source_rz(lev); + centrifugal_source_rz(fields, lev); #endif // Apply (non-periodic) BC on the fluids (needed for spatial derivative), // and communicate N, NU at boundaries - ApplyBcFluidsAndComms(lev); + ApplyBcFluidsAndComms(fields, lev); // Step the Advective term - AdvectivePush_Muscl(lev); + AdvectivePush_Muscl(fields, lev); } // Deposit rho to the simulation mesh // Deposit charge (end of the step) - if (rho && ! skip_deposition && ! do_not_deposit) { - DepositCharge(lev, *rho, 1); + if (fields.has(FieldType::rho_fp,lev) && ! skip_deposition && ! do_not_deposit) { + DepositCharge(fields, *fields.get(FieldType::rho_fp,lev), lev, 1); } // Deposit J to the simulation mesh if (!skip_deposition && ! do_not_deposit) { - DepositCurrent(lev, jx, jy, jz); + DepositCurrent(fields, + *fields.get(current_fp_string, Direction{0}, lev), + *fields.get(current_fp_string, Direction{1}, lev), + *fields.get(current_fp_string, Direction{2}, lev), + lev); } } // Momentum source due to curvature -void WarpXFluidContainer::ApplyBcFluidsAndComms (int lev) +void WarpXFluidContainer::ApplyBcFluidsAndComms (ablastr::fields::MultiFabRegister& fields, int lev) { + using ablastr::fields::Direction; WARPX_PROFILE("WarpXFluidContainer::ApplyBcFluidsAndComms"); WarpX &warpx = WarpX::GetInstance(); @@ -315,15 +331,15 @@ void WarpXFluidContainer::ApplyBcFluidsAndComms (int lev) #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Box tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); + amrex::Box tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); - const amrex::Array4 N_arr = N[lev]->array(mfi); - const amrex::Array4 NUx_arr = NU[lev][0]->array(mfi); - const amrex::Array4 NUy_arr = NU[lev][1]->array(mfi); - const amrex::Array4 NUz_arr = NU[lev][2]->array(mfi); + const amrex::Array4 N_arr = fields.get(name_mf_N, lev)->array(mfi); + const amrex::Array4 NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + const amrex::Array4 NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + const amrex::Array4 NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); //Grow the tilebox tile_box.grow(1); @@ -395,15 +411,16 @@ void WarpXFluidContainer::ApplyBcFluidsAndComms (int lev) } // Fill guard cells - FillBoundary(*N[lev], N[lev]->nGrowVect(), WarpX::do_single_precision_comms, period); - FillBoundary(*NU[lev][0], NU[lev][0]->nGrowVect(), WarpX::do_single_precision_comms, period); - FillBoundary(*NU[lev][1], NU[lev][1]->nGrowVect(), WarpX::do_single_precision_comms, period); - FillBoundary(*NU[lev][2], NU[lev][2]->nGrowVect(), WarpX::do_single_precision_comms, period); + FillBoundary(*fields.get(name_mf_N, lev), fields.get(name_mf_N, lev)->nGrowVect(), WarpX::do_single_precision_comms, period); + FillBoundary(*fields.get(name_mf_NU, Direction{0}, lev), fields.get(name_mf_NU, Direction{0}, lev)->nGrowVect(), WarpX::do_single_precision_comms, period); + FillBoundary(*fields.get(name_mf_NU, Direction{1}, lev), fields.get(name_mf_NU, Direction{1}, lev)->nGrowVect(), WarpX::do_single_precision_comms, period); + FillBoundary(*fields.get(name_mf_NU, Direction{2}, lev), fields.get(name_mf_NU, Direction{2}, lev)->nGrowVect(), WarpX::do_single_precision_comms, period); } // Muscl Advection Update -void WarpXFluidContainer::AdvectivePush_Muscl (int lev) +void WarpXFluidContainer::AdvectivePush_Muscl (ablastr::fields::MultiFabRegister& fields, int lev) { + using ablastr::fields::Direction; WARPX_PROFILE("WarpXFluidContainer::AdvectivePush_Muscl"); // Grab the grid spacing @@ -434,31 +451,31 @@ void WarpXFluidContainer::AdvectivePush_Muscl (int lev) const amrex::Real dt_over_dz_half = 0.5_rt*(dt/dx[0]); #endif - const amrex::BoxArray ba = N[lev]->boxArray(); + const amrex::BoxArray ba = fields.get(name_mf_N, lev)->boxArray(); // Temporary Half-step values #if defined(WARPX_DIM_3D) - amrex::MultiFab tmp_U_minus_x( amrex::convert(ba, IntVect(0,1,1)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_plus_x( amrex::convert(ba, IntVect(0,1,1)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_minus_y( amrex::convert(ba, IntVect(1,0,1)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_plus_y( amrex::convert(ba, IntVect(1,0,1)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_minus_z( amrex::convert(ba, IntVect(1,1,0)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_plus_z( amrex::convert(ba, IntVect(1,1,0)), N[lev]->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_minus_x( amrex::convert(ba, IntVect(0,1,1)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_plus_x( amrex::convert(ba, IntVect(0,1,1)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_minus_y( amrex::convert(ba, IntVect(1,0,1)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_plus_y( amrex::convert(ba, IntVect(1,0,1)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_minus_z( amrex::convert(ba, IntVect(1,1,0)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_plus_z( amrex::convert(ba, IntVect(1,1,0)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::MultiFab tmp_U_minus_x( amrex::convert(ba, IntVect(0,1)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_plus_x( amrex::convert(ba, IntVect(0,1)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_minus_z( amrex::convert(ba, IntVect(1,0)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_plus_z( amrex::convert(ba, IntVect(1,0)), N[lev]->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_minus_x( amrex::convert(ba, IntVect(0,1)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_plus_x( amrex::convert(ba, IntVect(0,1)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_minus_z( amrex::convert(ba, IntVect(1,0)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_plus_z( amrex::convert(ba, IntVect(1,0)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); #else - amrex::MultiFab tmp_U_minus_z( amrex::convert(ba, IntVect(0)), N[lev]->DistributionMap(), 4, 1); - amrex::MultiFab tmp_U_plus_z( amrex::convert(ba, IntVect(0)), N[lev]->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_minus_z( amrex::convert(ba, IntVect(0)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); + amrex::MultiFab tmp_U_plus_z( amrex::convert(ba, IntVect(0)), fields.get(name_mf_N, lev)->DistributionMap(), 4, 1); #endif // Fill edge values of N and U at the half timestep for MUSCL #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { // Loop over a box with one extra gridpoint in the ghost region to avoid @@ -476,10 +493,10 @@ void WarpXFluidContainer::AdvectivePush_Muscl (int lev) return tt; }(); - amrex::Array4 const &N_arr = N[lev]->array(mfi); - amrex::Array4 const &NUx_arr = NU[lev][0]->array(mfi); - amrex::Array4 const &NUy_arr = NU[lev][1]->array(mfi); - amrex::Array4 const &NUz_arr = NU[lev][2]->array(mfi); + amrex::Array4 const &N_arr = fields.get(name_mf_N, lev)->array(mfi); + amrex::Array4 const &NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + amrex::Array4 const &NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + amrex::Array4 const &NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); // Boxes are computed to avoid going out of bounds. // Grow the entire domain @@ -741,13 +758,13 @@ void WarpXFluidContainer::AdvectivePush_Muscl (int lev) #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - const amrex::Box tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); - const amrex::Array4 N_arr = N[lev]->array(mfi); - const amrex::Array4 NUx_arr = NU[lev][0]->array(mfi); - const amrex::Array4 NUy_arr = NU[lev][1]->array(mfi); - const amrex::Array4 NUz_arr = NU[lev][2]->array(mfi); + const amrex::Box tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); + const amrex::Array4 N_arr = fields.get(name_mf_N, lev)->array(mfi); + const amrex::Array4 NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + const amrex::Array4 NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + const amrex::Array4 NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); #if defined(WARPX_DIM_3D) amrex::Array4 const &U_minus_x = tmp_U_minus_x.array(mfi); @@ -878,8 +895,9 @@ void WarpXFluidContainer::AdvectivePush_Muscl (int lev) // Momentum source due to curvature #if defined(WARPX_DIM_RZ) -void WarpXFluidContainer::centrifugal_source_rz (int lev) +void WarpXFluidContainer::centrifugal_source_rz (ablastr::fields::MultiFabRegister& fields, int lev) { + using ablastr::fields::Direction; WARPX_PROFILE("WarpXFluidContainer::centrifugal_source_rz"); WarpX &warpx = WarpX::GetInstance(); @@ -894,15 +912,15 @@ void WarpXFluidContainer::centrifugal_source_rz (int lev) #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Box const &tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); + amrex::Box const &tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); - amrex::Array4 const &N_arr = N[lev]->array(mfi); - const amrex::Array4 NUx_arr = NU[lev][0]->array(mfi); - const amrex::Array4 NUy_arr = NU[lev][1]->array(mfi); - amrex::Array4 const &NUz_arr = NU[lev][2]->array(mfi); + amrex::Array4 const &N_arr = fields.get(name_mf_N, lev)->array(mfi); + const amrex::Array4 NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + const amrex::Array4 NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + amrex::Array4 const &NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); amrex::ParallelFor(tile_box, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept @@ -947,11 +965,13 @@ void WarpXFluidContainer::centrifugal_source_rz (int lev) // Momentum source from fields void WarpXFluidContainer::GatherAndPush ( - int lev, + ablastr::fields::MultiFabRegister& fields, const amrex::MultiFab& Ex, const amrex::MultiFab& Ey, const amrex::MultiFab& Ez, const amrex::MultiFab& Bx, const amrex::MultiFab& By, const amrex::MultiFab& Bz, - Real t) + Real t, + int lev) { + using ablastr::fields::Direction; WARPX_PROFILE("WarpXFluidContainer::GatherAndPush"); WarpX &warpx = WarpX::GetInstance(); @@ -978,7 +998,7 @@ void WarpXFluidContainer::GatherAndPush ( auto Bz_type = amrex::GpuArray{0, 0, 0}; for (int i = 0; i < AMREX_SPACEDIM; ++i) { - Nodal_type[i] = N[lev]->ixType()[i]; + Nodal_type[i] = fields.get(name_mf_N, lev)->ixType()[i]; Ex_type[i] = Ex.ixType()[i]; Ey_type[i] = Ey.ixType()[i]; Ez_type[i] = Ez.ixType()[i]; @@ -1015,15 +1035,15 @@ void WarpXFluidContainer::GatherAndPush ( #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Box const &tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); + amrex::Box const &tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); - amrex::Array4 const &N_arr = N[lev]->array(mfi); - const amrex::Array4 NUx_arr = NU[lev][0]->array(mfi); - const amrex::Array4 NUy_arr = NU[lev][1]->array(mfi); - const amrex::Array4 NUz_arr = NU[lev][2]->array(mfi); + amrex::Array4 const &N_arr = fields.get(name_mf_N, lev)->array(mfi); + const amrex::Array4 NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + const amrex::Array4 NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + const amrex::Array4 NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); amrex::Array4 const& Ex_arr = Ex.array(mfi); amrex::Array4 const& Ey_arr = Ey.array(mfi); @@ -1218,7 +1238,7 @@ void WarpXFluidContainer::GatherAndPush ( } } -void WarpXFluidContainer::DepositCharge (int lev, amrex::MultiFab &rho, int icomp) +void WarpXFluidContainer::DepositCharge (ablastr::fields::MultiFabRegister& fields, amrex::MultiFab &rho, int lev, int icomp) { WARPX_PROFILE("WarpXFluidContainer::DepositCharge"); @@ -1235,11 +1255,11 @@ void WarpXFluidContainer::DepositCharge (int lev, amrex::MultiFab &rho, int icom #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Box const &tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); - amrex::Array4 const &N_arr = N[lev]->array(mfi); + amrex::Box const &tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); + amrex::Array4 const &N_arr = fields.get(name_mf_N, lev)->array(mfi); const amrex::Array4 rho_arr = rho.array(mfi); const amrex::Array4 owner_mask_rho_arr = owner_mask_rho->array(mfi); @@ -1255,15 +1275,17 @@ void WarpXFluidContainer::DepositCharge (int lev, amrex::MultiFab &rho, int icom void WarpXFluidContainer::DepositCurrent( - int lev, - amrex::MultiFab &jx, amrex::MultiFab &jy, amrex::MultiFab &jz) + ablastr::fields::MultiFabRegister& fields, + amrex::MultiFab &jx, amrex::MultiFab &jy, amrex::MultiFab &jz, + int lev) { + using ablastr::fields::Direction; WARPX_PROFILE("WarpXFluidContainer::DepositCurrent"); // Temporary nodal currents - amrex::MultiFab tmp_jx_fluid(N[lev]->boxArray(), N[lev]->DistributionMap(), 1, 0); - amrex::MultiFab tmp_jy_fluid(N[lev]->boxArray(), N[lev]->DistributionMap(), 1, 0); - amrex::MultiFab tmp_jz_fluid(N[lev]->boxArray(), N[lev]->DistributionMap(), 1, 0); + amrex::MultiFab tmp_jx_fluid(fields.get(name_mf_N, lev)->boxArray(), fields.get(name_mf_N, lev)->DistributionMap(), 1, 0); + amrex::MultiFab tmp_jy_fluid(fields.get(name_mf_N, lev)->boxArray(), fields.get(name_mf_N, lev)->DistributionMap(), 1, 0); + amrex::MultiFab tmp_jz_fluid(fields.get(name_mf_N, lev)->boxArray(), fields.get(name_mf_N, lev)->DistributionMap(), 1, 0); const amrex::Real inv_clight_sq = 1.0_prt / PhysConst::c / PhysConst::c; const amrex::Real q = getCharge(); @@ -1293,14 +1315,14 @@ void WarpXFluidContainer::DepositCurrent( #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { - amrex::Box const &tile_box = mfi.tilebox(N[lev]->ixType().toIntVect()); + amrex::Box const &tile_box = mfi.tilebox(fields.get(name_mf_N, lev)->ixType().toIntVect()); - amrex::Array4 const &N_arr = N[lev]->array(mfi); - amrex::Array4 const &NUx_arr = NU[lev][0]->array(mfi); - amrex::Array4 const &NUy_arr = NU[lev][1]->array(mfi); - amrex::Array4 const &NUz_arr = NU[lev][2]->array(mfi); + amrex::Array4 const &N_arr = fields.get(name_mf_N, lev)->array(mfi); + amrex::Array4 const &NUx_arr = fields.get(name_mf_NU, Direction{0}, lev)->array(mfi); + amrex::Array4 const &NUy_arr = fields.get(name_mf_NU, Direction{1}, lev)->array(mfi); + amrex::Array4 const &NUz_arr = fields.get(name_mf_NU, Direction{2}, lev)->array(mfi); const amrex::Array4 tmp_jx_fluid_arr = tmp_jx_fluid.array(mfi); const amrex::Array4 tmp_jy_fluid_arr = tmp_jy_fluid.array(mfi); @@ -1328,7 +1350,7 @@ void WarpXFluidContainer::DepositCurrent( #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (MFIter mfi(*N[lev], TilingIfNotGPU()); mfi.isValid(); ++mfi) + for (MFIter mfi(*fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi) { amrex::Box const &tile_box_x = mfi.tilebox(jx.ixType().toIntVect()); amrex::Box const &tile_box_y = mfi.tilebox(jy.ixType().toIntVect()); diff --git a/Source/Initialization/DivCleaner/ProjectionDivCleaner.H b/Source/Initialization/DivCleaner/ProjectionDivCleaner.H index 7ee1fe57048..2fedb83cd36 100644 --- a/Source/Initialization/DivCleaner/ProjectionDivCleaner.H +++ b/Source/Initialization/DivCleaner/ProjectionDivCleaner.H @@ -35,7 +35,7 @@ #include #include -#include +#include "Fields.H" #include "Utils/Parser/ParserUtils.H" namespace warpx::initialization { @@ -64,7 +64,7 @@ protected: amrex::Real m_rtol; amrex::Real m_atol; - warpx::fields::FieldType m_field_type; + std::string m_field_name; public: amrex::Vector< std::unique_ptr > m_solution; @@ -83,7 +83,7 @@ public: amrex::Gpu::DeviceVector m_stencil_coefs_z; // Default Constructor - ProjectionDivCleaner (warpx::fields::FieldType a_field_type); + ProjectionDivCleaner (std::string const& a_field_name); void ReadParameters (); diff --git a/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp b/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp index 40326aadc3c..670f962f7c3 100644 --- a/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp +++ b/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp @@ -19,7 +19,7 @@ #else #include #endif -#include +#include "Fields.H" #include #include #include @@ -30,9 +30,10 @@ using namespace amrex; namespace warpx::initialization { -ProjectionDivCleaner::ProjectionDivCleaner(warpx::fields::FieldType a_field_type) : - m_field_type(a_field_type) +ProjectionDivCleaner::ProjectionDivCleaner(std::string const& a_field_name) : + m_field_name(a_field_name) { + using ablastr::fields::Direction; ReadParameters(); auto& warpx = WarpX::GetInstance(); @@ -48,7 +49,7 @@ ProjectionDivCleaner::ProjectionDivCleaner(warpx::fields::FieldType a_field_type m_source.resize(m_levels); const int ncomps = WarpX::ncomps; - auto const& ng = warpx.getFieldPointer(m_field_type, 0, 0)->nGrowVect(); + auto const& ng = warpx.m_fields.get(m_field_name, Direction{0}, 0)->nGrowVect(); for (int lev = 0; lev < m_levels; ++lev) { @@ -201,6 +202,8 @@ ProjectionDivCleaner::solve () void ProjectionDivCleaner::setSourceFromBfield () { + using ablastr::fields::Direction; + // Get WarpX object auto & warpx = WarpX::GetInstance(); const auto& geom = warpx.Geom(); @@ -211,7 +214,9 @@ ProjectionDivCleaner::setSourceFromBfield () WarpX::ComputeDivB( *m_source[ilev], 0, - warpx.getFieldPointerArray(m_field_type, ilev), + {warpx.m_fields.get(m_field_name, Direction{0}, ilev), + warpx.m_fields.get(m_field_name, Direction{1}, ilev), + warpx.m_fields.get(m_field_name, Direction{2}, ilev)}, WarpX::CellSize(0) ); @@ -228,6 +233,8 @@ ProjectionDivCleaner::setSourceFromBfield () void ProjectionDivCleaner::correctBfield () { + using ablastr::fields::Direction; + // Get WarpX object auto & warpx = WarpX::GetInstance(); const auto& geom = warpx.Geom(); @@ -236,9 +243,9 @@ ProjectionDivCleaner::correctBfield () for (int ilev = 0; ilev < m_levels; ++ilev) { // Grab B-field multifabs at this level - amrex::MultiFab* Bx = warpx.getFieldPointer(m_field_type, ilev, 0); - amrex::MultiFab* By = warpx.getFieldPointer(m_field_type, ilev, 1); - amrex::MultiFab* Bz = warpx.getFieldPointer(m_field_type, ilev, 2); + amrex::MultiFab* Bx = warpx.m_fields.get(m_field_name, Direction{0}, ilev); + amrex::MultiFab* By = warpx.m_fields.get(m_field_name, Direction{1}, ilev); + amrex::MultiFab* Bz = warpx.m_fields.get(m_field_name, Direction{2}, ilev); #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) @@ -337,8 +344,8 @@ WarpX::ProjectionCleanDivB() { ablastr::warn_manager::WarnPriority::low); } - warpx::initialization::ProjectionDivCleaner dc( - warpx::fields::FieldType::Bfield_fp_external); + warpx::initialization::ProjectionDivCleaner dc("Bfield_fp_external"); + dc.setSourceFromBfield(); dc.solve(); diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 0cf9496e63e..70bf20d0905 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -17,7 +17,7 @@ #include "Diagnostics/MultiDiagnostics.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" @@ -36,6 +36,7 @@ #include "Utils/WarpXUtil.H" #include "Python/callbacks.H" +#include #include #include #include @@ -93,8 +94,15 @@ namespace * \brief Check that the number of guard cells is smaller than the number of valid cells, * for a given MultiFab, and abort otherwise. */ - void CheckGuardCells(amrex::MultiFab const& mf) + void CheckGuardCells ( + ablastr::fields::MultiFabRegister& fields, + const std::string& mf_name, + int lev + ) { + if (!fields.has(mf_name, lev)) { return; } + auto & mf = *fields.get(mf_name, lev); + for (amrex::MFIter mfi(mf); mfi.isValid(); ++mfi) { const amrex::IntVect vc = mfi.validbox().enclosedCells().size(); @@ -495,6 +503,10 @@ void WarpX::InitData () { WARPX_PROFILE("WarpX::InitData()"); + + using ablastr::fields::Direction; + using warpx::fields::FieldType; + ablastr::parallelization::check_mpi_thread_level(); #ifdef WARPX_QED @@ -546,9 +558,9 @@ WarpX::InitData () const int lev_zero = 0; m_macroscopic_properties->InitData( Geom(lev_zero), - getField(warpx::fields::FieldType::Efield_fp, lev_zero,0).ixType().toIntVect(), - getField(warpx::fields::FieldType::Efield_fp, lev_zero,1).ixType().toIntVect(), - getField(warpx::fields::FieldType::Efield_fp, lev_zero,2).ixType().toIntVect() + m_fields.get(FieldType::Efield_fp, Direction{0}, lev_zero)->ixType().toIntVect(), + m_fields.get(FieldType::Efield_fp, Direction{1}, lev_zero)->ixType().toIntVect(), + m_fields.get(FieldType::Efield_fp, Direction{2}, lev_zero)->ixType().toIntVect() ); } @@ -620,30 +632,36 @@ WarpX::InitData () } void -WarpX::AddExternalFields (int const lev) { +WarpX::AddExternalFields (int const lev) +{ + using ablastr::fields::Direction; + using warpx::fields::FieldType; + // FIXME: RZ multimode has more than one component for all these if (m_p_ext_field_params->E_ext_grid_type != ExternalFieldType::default_zero) { + ablastr::fields::MultiLevelVectorField Efield_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, max_level); if (m_p_ext_field_params->E_ext_grid_type == ExternalFieldType::constant) { Efield_fp[lev][0]->plus(m_p_ext_field_params->E_external_grid[0], guard_cells.ng_alloc_EB.min()); Efield_fp[lev][1]->plus(m_p_ext_field_params->E_external_grid[1], guard_cells.ng_alloc_EB.min()); Efield_fp[lev][2]->plus(m_p_ext_field_params->E_external_grid[2], guard_cells.ng_alloc_EB.min()); } else { - amrex::MultiFab::Add(*Efield_fp[lev][0], *Efield_fp_external[lev][0], 0, 0, 1, guard_cells.ng_alloc_EB); - amrex::MultiFab::Add(*Efield_fp[lev][1], *Efield_fp_external[lev][1], 0, 0, 1, guard_cells.ng_alloc_EB); - amrex::MultiFab::Add(*Efield_fp[lev][2], *Efield_fp_external[lev][2], 0, 0, 1, guard_cells.ng_alloc_EB); + amrex::MultiFab::Add(*Efield_fp[lev][0], *m_fields.get(FieldType::Efield_fp_external, Direction{0}, lev), 0, 0, 1, guard_cells.ng_alloc_EB); + amrex::MultiFab::Add(*Efield_fp[lev][1], *m_fields.get(FieldType::Efield_fp_external, Direction{1}, lev), 0, 0, 1, guard_cells.ng_alloc_EB); + amrex::MultiFab::Add(*Efield_fp[lev][2], *m_fields.get(FieldType::Efield_fp_external, Direction{2}, lev), 0, 0, 1, guard_cells.ng_alloc_EB); } } if (m_p_ext_field_params->B_ext_grid_type != ExternalFieldType::default_zero) { + ablastr::fields::MultiLevelVectorField const& Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, max_level); if (m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::constant) { Bfield_fp[lev][0]->plus(m_p_ext_field_params->B_external_grid[0], guard_cells.ng_alloc_EB.min()); Bfield_fp[lev][1]->plus(m_p_ext_field_params->B_external_grid[1], guard_cells.ng_alloc_EB.min()); Bfield_fp[lev][2]->plus(m_p_ext_field_params->B_external_grid[2], guard_cells.ng_alloc_EB.min()); } else { - amrex::MultiFab::Add(*Bfield_fp[lev][0], *Bfield_fp_external[lev][0], 0, 0, 1, guard_cells.ng_alloc_EB); - amrex::MultiFab::Add(*Bfield_fp[lev][1], *Bfield_fp_external[lev][1], 0, 0, 1, guard_cells.ng_alloc_EB); - amrex::MultiFab::Add(*Bfield_fp[lev][2], *Bfield_fp_external[lev][2], 0, 0, 1, guard_cells.ng_alloc_EB); + amrex::MultiFab::Add(*Bfield_fp[lev][0], *m_fields.get(FieldType::Bfield_fp_external, Direction{0}, lev), 0, 0, 1, guard_cells.ng_alloc_EB); + amrex::MultiFab::Add(*Bfield_fp[lev][1], *m_fields.get(FieldType::Bfield_fp_external, Direction{1}, lev), 0, 0, 1, guard_cells.ng_alloc_EB); + amrex::MultiFab::Add(*Bfield_fp[lev][2], *m_fields.get(FieldType::Bfield_fp_external, Direction{2}, lev), 0, 0, 1, guard_cells.ng_alloc_EB); } } } @@ -703,7 +721,7 @@ WarpX::InitPML () do_pml_Hi[0][idim] = 1; // on level 0 } } - if (finest_level > 0) { do_pml = 1; } + if (max_level > 0) { do_pml = 1; } if (do_pml) { bool const eb_enabled = EB::enabled(); @@ -728,7 +746,7 @@ WarpX::InitPML () do_pml_Lo[0], do_pml_Hi[0]); #endif - for (int lev = 1; lev <= finest_level; ++lev) + for (int lev = 1; lev <= max_level; ++lev) { do_pml_Lo[lev] = amrex::IntVect::TheUnitVector(); do_pml_Hi[lev] = amrex::IntVect::TheUnitVector(); @@ -775,7 +793,7 @@ WarpX::ComputePMLFactors () { if (do_pml) { - for (int lev = 0; lev <= finest_level; ++lev) + for (int lev = 0; lev <= max_level; ++lev) { if (pml[lev]) { pml[lev]->ComputePMLFactors(dt[lev]); @@ -892,6 +910,9 @@ WarpX::PostRestart () void WarpX::InitLevelData (int lev, Real /*time*/) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + // initialize the averaged fields only if the averaged algorithm // is activated ('psatd.do_time_averaging=1') const ParmParse pp_psatd("psatd"); @@ -907,14 +928,14 @@ WarpX::InitLevelData (int lev, Real /*time*/) if ( is_B_ext_const && (lev <= maxlevel_extEMfield_init) ) { if (fft_do_time_averaging) { - Bfield_avg_fp[lev][i]->setVal(m_p_ext_field_params->B_external_grid[i]); + m_fields.get(FieldType::Bfield_avg_fp, Direction{i}, lev)->setVal(m_p_ext_field_params->B_external_grid[i]); } if (lev > 0) { - Bfield_aux[lev][i]->setVal(m_p_ext_field_params->B_external_grid[i]); - Bfield_cp[lev][i]->setVal(m_p_ext_field_params->B_external_grid[i]); + m_fields.get(FieldType::Bfield_aux, Direction{i}, lev)->setVal(m_p_ext_field_params->B_external_grid[i]); + m_fields.get(FieldType::Bfield_cp, Direction{i}, lev)->setVal(m_p_ext_field_params->B_external_grid[i]); if (fft_do_time_averaging) { - Bfield_avg_cp[lev][i]->setVal(m_p_ext_field_params->B_external_grid[i]); + m_fields.get(FieldType::Bfield_avg_cp, Direction{i}, lev)->setVal(m_p_ext_field_params->B_external_grid[i]); } } } @@ -927,14 +948,13 @@ WarpX::InitLevelData (int lev, Real /*time*/) if ( is_E_ext_const && (lev <= maxlevel_extEMfield_init) ) { if (fft_do_time_averaging) { - Efield_avg_fp[lev][i]->setVal(m_p_ext_field_params->E_external_grid[i]); + m_fields.get(FieldType::Efield_avg_fp, Direction{i}, lev)->setVal(m_p_ext_field_params->E_external_grid[i]); } - if (lev > 0) { - Efield_aux[lev][i]->setVal(m_p_ext_field_params->E_external_grid[i]); - Efield_cp[lev][i]->setVal(m_p_ext_field_params->E_external_grid[i]); + m_fields.get(FieldType::Efield_aux, Direction{i}, lev)->setVal(m_p_ext_field_params->E_external_grid[i]); + m_fields.get(FieldType::Efield_cp, Direction{i}, lev)->setVal(m_p_ext_field_params->E_external_grid[i]); if (fft_do_time_averaging) { - Efield_avg_cp[lev][i]->setVal(m_p_ext_field_params->E_external_grid[i]); + m_fields.get(FieldType::Efield_avg_cp, Direction{i}, lev)->setVal(m_p_ext_field_params->E_external_grid[i]); } } } @@ -954,26 +974,26 @@ WarpX::InitLevelData (int lev, Real /*time*/) && (lev > 0) && (lev <= maxlevel_extEMfield_init)) { InitializeExternalFieldsOnGridUsingParser( - Bfield_aux[lev][0].get(), - Bfield_aux[lev][1].get(), - Bfield_aux[lev][2].get(), + m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), + m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), + m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), m_p_ext_field_params->Bxfield_parser->compile<3>(), m_p_ext_field_params->Byfield_parser->compile<3>(), m_p_ext_field_params->Bzfield_parser->compile<3>(), - m_edge_lengths[lev], - m_face_areas[lev], + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), 'B', lev, PatchType::fine); InitializeExternalFieldsOnGridUsingParser( - Bfield_cp[lev][0].get(), - Bfield_cp[lev][1].get(), - Bfield_cp[lev][2].get(), + m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), + m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), + m_fields.get(FieldType::Bfield_cp, Direction{2}, lev), m_p_ext_field_params->Bxfield_parser->compile<3>(), m_p_ext_field_params->Byfield_parser->compile<3>(), m_p_ext_field_params->Bzfield_parser->compile<3>(), - m_edge_lengths[lev], - m_face_areas[lev], + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_mr_levels_alldirs(FieldType::face_areas, max_level)[lev], 'B', lev, PatchType::coarse); } @@ -991,43 +1011,49 @@ WarpX::InitLevelData (int lev, Real /*time*/) // We initialize ECTRhofield consistently with the Efield if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { m_fdtd_solver_fp[lev]->EvolveECTRho( - Efield_fp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], lev); + m_fields.get_alldirs(FieldType::Efield_fp, lev), + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_mr_levels_alldirs(FieldType::face_areas, max_level)[lev], + m_fields.get_alldirs(FieldType::ECTRhofield, lev), + lev); } } #endif if (lev > 0) { InitializeExternalFieldsOnGridUsingParser( - Efield_aux[lev][0].get(), - Efield_aux[lev][1].get(), - Efield_aux[lev][2].get(), + m_fields.get(FieldType::Efield_aux, Direction{0}, lev), + m_fields.get(FieldType::Efield_aux, Direction{1}, lev), + m_fields.get(FieldType::Efield_aux, Direction{2}, lev), m_p_ext_field_params->Exfield_parser->compile<3>(), m_p_ext_field_params->Eyfield_parser->compile<3>(), m_p_ext_field_params->Ezfield_parser->compile<3>(), - m_edge_lengths[lev], - m_face_areas[lev], + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), 'E', lev, PatchType::fine); InitializeExternalFieldsOnGridUsingParser( - Efield_cp[lev][0].get(), - Efield_cp[lev][1].get(), - Efield_cp[lev][2].get(), + m_fields.get(FieldType::Efield_cp, Direction{0}, lev), + m_fields.get(FieldType::Efield_cp, Direction{1}, lev), + m_fields.get(FieldType::Efield_cp, Direction{2}, lev), m_p_ext_field_params->Exfield_parser->compile<3>(), m_p_ext_field_params->Eyfield_parser->compile<3>(), m_p_ext_field_params->Ezfield_parser->compile<3>(), - m_edge_lengths[lev], - m_face_areas[lev], + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), 'E', lev, PatchType::coarse); #ifdef AMREX_USE_EB if (eb_enabled) { if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { // We initialize ECTRhofield consistently with the Efield - m_fdtd_solver_cp[lev]->EvolveECTRho(Efield_cp[lev], m_edge_lengths[lev], - m_face_areas[lev], ECTRhofield[lev], lev); - + m_fdtd_solver_cp[lev]->EvolveECTRho( + m_fields.get_alldirs(FieldType::Efield_cp, lev), + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_mr_levels_alldirs(FieldType::face_areas, max_level)[lev], + m_fields.get_alldirs(FieldType::ECTRhofield, lev), + lev); } } #endif @@ -1051,8 +1077,8 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( MultiFab *mfx, MultiFab *mfy, MultiFab *mfz, ParserExecutor<3> const& xfield_parser, ParserExecutor<3> const& yfield_parser, ParserExecutor<3> const& zfield_parser, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, + ablastr::fields::VectorField const& edge_lengths, + ablastr::fields::VectorField const& face_areas, [[maybe_unused]] const char field, const int lev, PatchType patch_type) { @@ -1209,66 +1235,44 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( void WarpX::CheckGuardCells() { - for (int lev = 0; lev <= finest_level; ++lev) + for (int lev = 0; lev <= max_level; ++lev) { for (int dim = 0; dim < 3; ++dim) { - ::CheckGuardCells(*Efield_fp[lev][dim]); - ::CheckGuardCells(*Bfield_fp[lev][dim]); - ::CheckGuardCells(*current_fp[lev][dim]); + ::CheckGuardCells(m_fields, "Efield_fp[" + std::to_string(dim) + "]", lev); + ::CheckGuardCells(m_fields, "Bfield_fp[" + std::to_string(dim) + "]", lev); + ::CheckGuardCells(m_fields, "current_fp[" + std::to_string(dim) + "]", lev); if (WarpX::fft_do_time_averaging) { - ::CheckGuardCells(*Efield_avg_fp[lev][dim]); - ::CheckGuardCells(*Bfield_avg_fp[lev][dim]); + ::CheckGuardCells(m_fields, "Efield_avg_fp[" + std::to_string(dim) + "]", lev); + ::CheckGuardCells(m_fields, "Bfield_avg_fp[" + std::to_string(dim) + "]", lev); } } - if (rho_fp[lev]) - { - ::CheckGuardCells(*rho_fp[lev]); - } - - if (F_fp[lev]) - { - ::CheckGuardCells(*F_fp[lev]); - } - - if (G_fp[lev]) - { - ::CheckGuardCells(*G_fp[lev]); - } + ::CheckGuardCells(m_fields, "rho_fp", lev); + ::CheckGuardCells(m_fields, "F_fp", lev); + ::CheckGuardCells(m_fields, "G_fp", lev); // MultiFabs on coarse patch if (lev > 0) { for (int dim = 0; dim < 3; ++dim) { - ::CheckGuardCells(*Efield_cp[lev][dim]); - ::CheckGuardCells(*Bfield_cp[lev][dim]); - ::CheckGuardCells(*current_cp[lev][dim]); + ::CheckGuardCells(m_fields, "Efield_cp[" + std::to_string(dim) + "]", lev); + ::CheckGuardCells(m_fields, "Bfield_cp[" + std::to_string(dim) + "]", lev); + ::CheckGuardCells(m_fields, "current_cp[" + std::to_string(dim) + "]", lev); if (WarpX::fft_do_time_averaging) { - ::CheckGuardCells(*Efield_avg_cp[lev][dim]); - ::CheckGuardCells(*Bfield_avg_cp[lev][dim]); + ::CheckGuardCells(m_fields, "Efield_avg_cp[" + std::to_string(dim) + "]", lev); + ::CheckGuardCells(m_fields, "Bfield_avg_cp[" + std::to_string(dim) + "]", lev); } } - if (rho_cp[lev]) - { - ::CheckGuardCells(*rho_cp[lev]); - } - - if (F_cp[lev]) - { - ::CheckGuardCells(*F_cp[lev]); - } - - if (G_cp[lev]) - { - ::CheckGuardCells(*G_cp[lev]); - } + ::CheckGuardCells(m_fields, "rho_cp", lev); + ::CheckGuardCells(m_fields, "F_cp", lev); + ::CheckGuardCells(m_fields, "G_cp", lev); } } } @@ -1286,14 +1290,19 @@ void WarpX::InitializeEBGridData (int lev) "particles are close to embedded boundaries"); } - if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD ) { + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD ) + { + using warpx::fields::FieldType; auto const eb_fact = fieldEBFactory(lev); - ComputeEdgeLengths(m_edge_lengths[lev], eb_fact); - ScaleEdges(m_edge_lengths[lev], CellSize(lev)); - ComputeFaceAreas(m_face_areas[lev], eb_fact); - ScaleAreas(m_face_areas[lev], CellSize(lev)); + auto edge_lengths_lev = m_fields.get_alldirs(FieldType::edge_lengths, lev); + ComputeEdgeLengths(edge_lengths_lev, eb_fact); + ScaleEdges(edge_lengths_lev, CellSize(lev)); + + auto face_areas_lev = m_fields.get_alldirs(FieldType::face_areas, lev); + ComputeFaceAreas(face_areas_lev, eb_fact); + ScaleAreas(face_areas_lev, CellSize(lev)); if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { MarkCells(); @@ -1358,6 +1367,9 @@ void WarpX::CheckKnownIssues() void WarpX::LoadExternalFields (int const lev) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + // External fields from file are currently not compatible with the moving window // In order to support the moving window, the MultiFab containing the external // fields should be updated every time the window moves. @@ -1375,14 +1387,14 @@ WarpX::LoadExternalFields (int const lev) if (m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::parse_ext_grid_function) { // Initialize Bfield_fp_external with external function InitializeExternalFieldsOnGridUsingParser( - Bfield_fp_external[lev][0].get(), - Bfield_fp_external[lev][1].get(), - Bfield_fp_external[lev][2].get(), + m_fields.get(FieldType::Bfield_fp_external, Direction{0}, lev), + m_fields.get(FieldType::Bfield_fp_external, Direction{1}, lev), + m_fields.get(FieldType::Bfield_fp_external, Direction{2}, lev), m_p_ext_field_params->Bxfield_parser->compile<3>(), m_p_ext_field_params->Byfield_parser->compile<3>(), m_p_ext_field_params->Bzfield_parser->compile<3>(), - m_edge_lengths[lev], - m_face_areas[lev], + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), 'B', lev, PatchType::fine); } @@ -1390,27 +1402,27 @@ WarpX::LoadExternalFields (int const lev) #if defined(WARPX_DIM_RZ) WARPX_ALWAYS_ASSERT_WITH_MESSAGE(n_rz_azimuthal_modes == 1, "External field reading is not implemented for more than one RZ mode (see #3829)"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Bfield_fp_external[lev][0].get(), "B", "r"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Bfield_fp_external[lev][1].get(), "B", "t"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Bfield_fp_external[lev][2].get(), "B", "z"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Bfield_fp_external,Direction{0},lev), "B", "r"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Bfield_fp_external,Direction{1},lev), "B", "t"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Bfield_fp_external,Direction{2},lev), "B", "z"); #else - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Bfield_fp_external[lev][0].get(), "B", "x"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Bfield_fp_external[lev][1].get(), "B", "y"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Bfield_fp_external[lev][2].get(), "B", "z"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Bfield_fp_external, Direction{0}, lev), "B", "x"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Bfield_fp_external, Direction{1}, lev), "B", "y"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Bfield_fp_external, Direction{2}, lev), "B", "z"); #endif } if (m_p_ext_field_params->E_ext_grid_type == ExternalFieldType::parse_ext_grid_function) { // Initialize Efield_fp_external with external function InitializeExternalFieldsOnGridUsingParser( - Efield_fp_external[lev][0].get(), - Efield_fp_external[lev][1].get(), - Efield_fp_external[lev][2].get(), + m_fields.get(FieldType::Efield_fp_external, Direction{0}, lev), + m_fields.get(FieldType::Efield_fp_external, Direction{1}, lev), + m_fields.get(FieldType::Efield_fp_external, Direction{2}, lev), m_p_ext_field_params->Exfield_parser->compile<3>(), m_p_ext_field_params->Eyfield_parser->compile<3>(), m_p_ext_field_params->Ezfield_parser->compile<3>(), - m_edge_lengths[lev], - m_face_areas[lev], + m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_fields.get_alldirs(FieldType::face_areas, lev), 'E', lev, PatchType::fine); } @@ -1418,13 +1430,13 @@ WarpX::LoadExternalFields (int const lev) #if defined(WARPX_DIM_RZ) WARPX_ALWAYS_ASSERT_WITH_MESSAGE(n_rz_azimuthal_modes == 1, "External field reading is not implemented for more than one RZ mode (see #3829)"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Efield_fp_external[lev][0].get(), "E", "r"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Efield_fp_external[lev][1].get(), "E", "t"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Efield_fp_external[lev][2].get(), "E", "z"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Efield_fp_external,Direction{0},lev), "E", "r"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Efield_fp_external,Direction{1},lev), "E", "t"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Efield_fp_external,Direction{2},lev), "E", "z"); #else - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Efield_fp_external[lev][0].get(), "E", "x"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Efield_fp_external[lev][1].get(), "E", "y"); - ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, Efield_fp_external[lev][2].get(), "E", "z"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Efield_fp_external, Direction{0}, lev), "E", "x"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Efield_fp_external, Direction{1}, lev), "E", "y"); + ReadExternalFieldFromFile(m_p_ext_field_params->external_fields_path, m_fields.get(FieldType::Efield_fp_external, Direction{2}, lev), "E", "z"); #endif } @@ -1441,13 +1453,25 @@ WarpX::LoadExternalFields (int const lev) #if defined(WARPX_DIM_RZ) WARPX_ALWAYS_ASSERT_WITH_MESSAGE(n_rz_azimuthal_modes == 1, "External field reading is not implemented for more than one RZ mode (see #3829)"); - ReadExternalFieldFromFile(external_fields_path, B_external_particle_field[lev][0].get(), "B", "r"); - ReadExternalFieldFromFile(external_fields_path, B_external_particle_field[lev][1].get(), "B", "t"); - ReadExternalFieldFromFile(external_fields_path, B_external_particle_field[lev][2].get(), "B", "z"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::B_external_particle_field, Direction{0}, lev), + "B", "r"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::B_external_particle_field, Direction{1}, lev), + "B", "t"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::B_external_particle_field, Direction{2}, lev), + "B", "z"); #else - ReadExternalFieldFromFile(external_fields_path, B_external_particle_field[lev][0].get(), "B", "x"); - ReadExternalFieldFromFile(external_fields_path, B_external_particle_field[lev][1].get(), "B", "y"); - ReadExternalFieldFromFile(external_fields_path, B_external_particle_field[lev][2].get(), "B", "z"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::B_external_particle_field, Direction{0}, lev), + "B", "x"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::B_external_particle_field, Direction{1}, lev), + "B", "y"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::B_external_particle_field, Direction{2}, lev), + "B", "z"); #endif } if (mypc->m_E_ext_particle_s == "read_from_file") { @@ -1457,13 +1481,25 @@ WarpX::LoadExternalFields (int const lev) #if defined(WARPX_DIM_RZ) WARPX_ALWAYS_ASSERT_WITH_MESSAGE(n_rz_azimuthal_modes == 1, "External field reading is not implemented for more than one RZ mode (see #3829)"); - ReadExternalFieldFromFile(external_fields_path, E_external_particle_field[lev][0].get(), "E", "r"); - ReadExternalFieldFromFile(external_fields_path, E_external_particle_field[lev][1].get(), "E", "t"); - ReadExternalFieldFromFile(external_fields_path, E_external_particle_field[lev][2].get(), "E", "z"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::E_external_particle_field, Direction{0}, lev), + "E", "r"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::E_external_particle_field, Direction{1}, lev), + "E", "t"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::E_external_particle_field, Direction{2}, lev), + "E", "z"); #else - ReadExternalFieldFromFile(external_fields_path, E_external_particle_field[lev][0].get(), "E", "x"); - ReadExternalFieldFromFile(external_fields_path, E_external_particle_field[lev][1].get(), "E", "y"); - ReadExternalFieldFromFile(external_fields_path, E_external_particle_field[lev][2].get(), "E", "z"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::E_external_particle_field, Direction{0}, lev), + "E", "x"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::E_external_particle_field, Direction{1}, lev), + "E", "y"); + ReadExternalFieldFromFile(external_fields_path, + m_fields.get(FieldType::E_external_particle_field, Direction{2}, lev), + "E", "z"); #endif } } diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index 6c44df061fd..d64632d964a 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -12,6 +12,7 @@ #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) # include "BoundaryConditions/PML_RZ.H" #endif +#include "Fields.H" #include "Filter/BilinearFilter.H" #include "Utils/TextMsg.H" #include "Utils/WarpXAlgorithmSelection.H" @@ -20,6 +21,7 @@ #include "WarpXSumGuardCells.H" #include "Particles/MultiParticleContainer.H" +#include #include #include @@ -49,13 +51,20 @@ #include using namespace amrex; +using warpx::fields::FieldType; void WarpX::UpdateAuxilaryData () { WARPX_PROFILE("WarpX::UpdateAuxilaryData()"); - if (Bfield_aux[0][0]->ixType() == Bfield_fp[0][0]->ixType()) { + using ablastr::fields::Direction; + + amrex::MultiFab *Bfield_aux_lvl0_0 = m_fields.get(FieldType::Bfield_aux, Direction{0}, 0); + + ablastr::fields::MultiLevelVectorField const& Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); + + if (Bfield_aux_lvl0_0->ixType() == Bfield_fp[0][0]->ixType()) { UpdateAuxilaryDataSameType(); } else { UpdateAuxilaryDataStagToNodal(); @@ -64,14 +73,18 @@ WarpX::UpdateAuxilaryData () // When loading particle fields from file: add the external fields: for (int lev = 0; lev <= finest_level; ++lev) { if (mypc->m_E_ext_particle_s == "read_from_file") { - amrex::MultiFab::Add(*Efield_aux[lev][0], *E_external_particle_field[lev][0], 0, 0, E_external_particle_field[lev][0]->nComp(), guard_cells.ng_FieldGather); - amrex::MultiFab::Add(*Efield_aux[lev][1], *E_external_particle_field[lev][1], 0, 0, E_external_particle_field[lev][1]->nComp(), guard_cells.ng_FieldGather); - amrex::MultiFab::Add(*Efield_aux[lev][2], *E_external_particle_field[lev][2], 0, 0, E_external_particle_field[lev][2]->nComp(), guard_cells.ng_FieldGather); + ablastr::fields::VectorField Efield_aux = m_fields.get_alldirs(FieldType::Efield_aux, lev); + const auto& E_ext_lev = m_fields.get_alldirs(FieldType::E_external_particle_field, lev); + amrex::MultiFab::Add(*Efield_aux[0], *E_ext_lev[0], 0, 0, E_ext_lev[0]->nComp(), guard_cells.ng_FieldGather); + amrex::MultiFab::Add(*Efield_aux[1], *E_ext_lev[1], 0, 0, E_ext_lev[1]->nComp(), guard_cells.ng_FieldGather); + amrex::MultiFab::Add(*Efield_aux[2], *E_ext_lev[2], 0, 0, E_ext_lev[2]->nComp(), guard_cells.ng_FieldGather); } if (mypc->m_B_ext_particle_s == "read_from_file") { - amrex::MultiFab::Add(*Bfield_aux[lev][0], *B_external_particle_field[lev][0], 0, 0, B_external_particle_field[lev][0]->nComp(), guard_cells.ng_FieldGather); - amrex::MultiFab::Add(*Bfield_aux[lev][1], *B_external_particle_field[lev][1], 0, 0, B_external_particle_field[lev][0]->nComp(), guard_cells.ng_FieldGather); - amrex::MultiFab::Add(*Bfield_aux[lev][2], *B_external_particle_field[lev][2], 0, 0, B_external_particle_field[lev][0]->nComp(), guard_cells.ng_FieldGather); + ablastr::fields::VectorField Bfield_aux = m_fields.get_alldirs(FieldType::Bfield_aux, lev); + const auto& B_ext_lev = m_fields.get_alldirs(FieldType::B_external_particle_field, lev); + amrex::MultiFab::Add(*Bfield_aux[0], *B_ext_lev[0], 0, 0, B_ext_lev[0]->nComp(), guard_cells.ng_FieldGather); + amrex::MultiFab::Add(*Bfield_aux[1], *B_ext_lev[1], 0, 0, B_ext_lev[1]->nComp(), guard_cells.ng_FieldGather); + amrex::MultiFab::Add(*Bfield_aux[2], *B_ext_lev[2], 0, 0, B_ext_lev[2]->nComp(), guard_cells.ng_FieldGather); } } @@ -87,11 +100,21 @@ WarpX::UpdateAuxilaryDataStagToNodal () "WarpX build with spectral solver support."); } #endif - - amrex::Vector,3>> const & Bmf = WarpX::fft_do_time_averaging ? - Bfield_avg_fp : Bfield_fp; - amrex::Vector,3>> const & Emf = WarpX::fft_do_time_averaging ? - Efield_avg_fp : Efield_fp; + using ablastr::fields::Direction; + + ablastr::fields::MultiLevelVectorField const& Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); + ablastr::fields::MultiLevelVectorField const& Efield_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level); + ablastr::fields::MultiLevelVectorField const& Efield_aux = m_fields.get_mr_levels_alldirs(FieldType::Efield_aux, finest_level); + ablastr::fields::MultiLevelVectorField const& Bfield_aux = m_fields.get_mr_levels_alldirs(FieldType::Bfield_aux, finest_level); + + ablastr::fields::MultiLevelVectorField const & Bmf = + WarpX::fft_do_time_averaging ? + m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level) : + Bfield_fp; + ablastr::fields::MultiLevelVectorField const & Emf = + WarpX::fft_do_time_averaging ? + m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level) : + Efield_fp; const amrex::IntVect& Bx_stag = Bmf[0][0]->ixType().toIntVect(); const amrex::IntVect& By_stag = Bmf[0][1]->ixType().toIntVect(); @@ -173,10 +196,10 @@ WarpX::UpdateAuxilaryDataStagToNodal () { if (electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { Array,3> Btmp; - if (Bfield_cax[lev][0]) { + if (m_fields.has_vector(FieldType::Bfield_cax, lev)) { for (int i = 0; i < 3; ++i) { Btmp[i] = std::make_unique( - *Bfield_cax[lev][i], amrex::make_alias, 0, 1); + *m_fields.get(FieldType::Bfield_cax, Direction{i}, lev), amrex::make_alias, 0, 1); } } else { const IntVect ngtmp = Bfield_aux[lev-1][0]->nGrowVect(); @@ -200,13 +223,13 @@ WarpX::UpdateAuxilaryDataStagToNodal () const amrex::IntVect& refinement_ratio = refRatio(lev-1); - const amrex::IntVect& Bx_fp_stag = Bfield_fp[lev][0]->ixType().toIntVect(); - const amrex::IntVect& By_fp_stag = Bfield_fp[lev][1]->ixType().toIntVect(); - const amrex::IntVect& Bz_fp_stag = Bfield_fp[lev][2]->ixType().toIntVect(); + const amrex::IntVect& Bx_fp_stag = m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->ixType().toIntVect(); + const amrex::IntVect& By_fp_stag = m_fields.get(FieldType::Bfield_fp, Direction{1}, lev)->ixType().toIntVect(); + const amrex::IntVect& Bz_fp_stag = m_fields.get(FieldType::Bfield_fp, Direction{2}, lev)->ixType().toIntVect(); - const amrex::IntVect& Bx_cp_stag = Bfield_cp[lev][0]->ixType().toIntVect(); - const amrex::IntVect& By_cp_stag = Bfield_cp[lev][1]->ixType().toIntVect(); - const amrex::IntVect& Bz_cp_stag = Bfield_cp[lev][2]->ixType().toIntVect(); + const amrex::IntVect& Bx_cp_stag = m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->ixType().toIntVect(); + const amrex::IntVect& By_cp_stag = m_fields.get(FieldType::Bfield_cp, Direction{1}, lev)->ixType().toIntVect(); + const amrex::IntVect& Bz_cp_stag = m_fields.get(FieldType::Bfield_cp, Direction{2}, lev)->ixType().toIntVect(); #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) @@ -216,12 +239,12 @@ WarpX::UpdateAuxilaryDataStagToNodal () Array4 const& bx_aux = Bfield_aux[lev][0]->array(mfi); Array4 const& by_aux = Bfield_aux[lev][1]->array(mfi); Array4 const& bz_aux = Bfield_aux[lev][2]->array(mfi); - Array4 const& bx_fp = Bfield_fp[lev][0]->const_array(mfi); - Array4 const& by_fp = Bfield_fp[lev][1]->const_array(mfi); - Array4 const& bz_fp = Bfield_fp[lev][2]->const_array(mfi); - Array4 const& bx_cp = Bfield_cp[lev][0]->const_array(mfi); - Array4 const& by_cp = Bfield_cp[lev][1]->const_array(mfi); - Array4 const& bz_cp = Bfield_cp[lev][2]->const_array(mfi); + Array4 const& bx_fp = m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->const_array(mfi); + Array4 const& by_fp = m_fields.get(FieldType::Bfield_fp, Direction{1}, lev)->const_array(mfi); + Array4 const& bz_fp = m_fields.get(FieldType::Bfield_fp, Direction{2}, lev)->const_array(mfi); + Array4 const& bx_cp = m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->const_array(mfi); + Array4 const& by_cp = m_fields.get(FieldType::Bfield_cp, Direction{1}, lev)->const_array(mfi); + Array4 const& bz_cp = m_fields.get(FieldType::Bfield_cp, Direction{2}, lev)->const_array(mfi); Array4 const& bx_c = Btmp[0]->const_array(mfi); Array4 const& by_c = Btmp[1]->const_array(mfi); Array4 const& bz_c = Btmp[2]->const_array(mfi); @@ -267,10 +290,10 @@ WarpX::UpdateAuxilaryDataStagToNodal () { if (electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { Array,3> Etmp; - if (Efield_cax[lev][0]) { + if (m_fields.has_vector(FieldType::Efield_cax, lev)) { for (int i = 0; i < 3; ++i) { Etmp[i] = std::make_unique( - *Efield_cax[lev][i], amrex::make_alias, 0, 1); + *m_fields.get(FieldType::Efield_cax, Direction{i}, lev), amrex::make_alias, 0, 1); } } else { const IntVect ngtmp = Efield_aux[lev-1][0]->nGrowVect(); @@ -295,13 +318,13 @@ WarpX::UpdateAuxilaryDataStagToNodal () const amrex::IntVect& refinement_ratio = refRatio(lev-1); - const amrex::IntVect& Ex_fp_stag = Efield_fp[lev][0]->ixType().toIntVect(); - const amrex::IntVect& Ey_fp_stag = Efield_fp[lev][1]->ixType().toIntVect(); - const amrex::IntVect& Ez_fp_stag = Efield_fp[lev][2]->ixType().toIntVect(); + const amrex::IntVect& Ex_fp_stag = m_fields.get(FieldType::Efield_fp, Direction{0}, lev)->ixType().toIntVect(); + const amrex::IntVect& Ey_fp_stag = m_fields.get(FieldType::Efield_fp, Direction{1}, lev)->ixType().toIntVect(); + const amrex::IntVect& Ez_fp_stag = m_fields.get(FieldType::Efield_fp, Direction{2}, lev)->ixType().toIntVect(); - const amrex::IntVect& Ex_cp_stag = Efield_cp[lev][0]->ixType().toIntVect(); - const amrex::IntVect& Ey_cp_stag = Efield_cp[lev][1]->ixType().toIntVect(); - const amrex::IntVect& Ez_cp_stag = Efield_cp[lev][2]->ixType().toIntVect(); + const amrex::IntVect& Ex_cp_stag = m_fields.get(FieldType::Efield_cp, Direction{0}, lev)->ixType().toIntVect(); + const amrex::IntVect& Ey_cp_stag = m_fields.get(FieldType::Efield_cp, Direction{1}, lev)->ixType().toIntVect(); + const amrex::IntVect& Ez_cp_stag = m_fields.get(FieldType::Efield_cp, Direction{2}, lev)->ixType().toIntVect(); #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) @@ -311,12 +334,12 @@ WarpX::UpdateAuxilaryDataStagToNodal () Array4 const& ex_aux = Efield_aux[lev][0]->array(mfi); Array4 const& ey_aux = Efield_aux[lev][1]->array(mfi); Array4 const& ez_aux = Efield_aux[lev][2]->array(mfi); - Array4 const& ex_fp = Efield_fp[lev][0]->const_array(mfi); - Array4 const& ey_fp = Efield_fp[lev][1]->const_array(mfi); - Array4 const& ez_fp = Efield_fp[lev][2]->const_array(mfi); - Array4 const& ex_cp = Efield_cp[lev][0]->const_array(mfi); - Array4 const& ey_cp = Efield_cp[lev][1]->const_array(mfi); - Array4 const& ez_cp = Efield_cp[lev][2]->const_array(mfi); + Array4 const& ex_fp = m_fields.get(FieldType::Efield_fp, Direction{0}, lev)->const_array(mfi); + Array4 const& ey_fp = m_fields.get(FieldType::Efield_fp, Direction{1}, lev)->const_array(mfi); + Array4 const& ez_fp = m_fields.get(FieldType::Efield_fp, Direction{2}, lev)->const_array(mfi); + Array4 const& ex_cp = m_fields.get(FieldType::Efield_cp, Direction{0}, lev)->const_array(mfi); + Array4 const& ey_cp = m_fields.get(FieldType::Efield_cp, Direction{1}, lev)->const_array(mfi); + Array4 const& ez_cp = m_fields.get(FieldType::Efield_cp, Direction{2}, lev)->const_array(mfi); Array4 const& ex_c = Etmp[0]->const_array(mfi); Array4 const& ey_c = Etmp[1]->const_array(mfi); Array4 const& ez_c = Etmp[2]->const_array(mfi); @@ -332,9 +355,9 @@ WarpX::UpdateAuxilaryDataStagToNodal () } } else { // electrostatic - const amrex::IntVect& Ex_fp_stag = Efield_fp[lev][0]->ixType().toIntVect(); - const amrex::IntVect& Ey_fp_stag = Efield_fp[lev][1]->ixType().toIntVect(); - const amrex::IntVect& Ez_fp_stag = Efield_fp[lev][2]->ixType().toIntVect(); + const amrex::IntVect& Ex_fp_stag = m_fields.get(FieldType::Efield_fp, Direction{0}, lev)->ixType().toIntVect(); + const amrex::IntVect& Ey_fp_stag = m_fields.get(FieldType::Efield_fp, Direction{1}, lev)->ixType().toIntVect(); + const amrex::IntVect& Ez_fp_stag = m_fields.get(FieldType::Efield_fp, Direction{2}, lev)->ixType().toIntVect(); #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif @@ -343,9 +366,9 @@ WarpX::UpdateAuxilaryDataStagToNodal () Array4 const& ex_aux = Efield_aux[lev][0]->array(mfi); Array4 const& ey_aux = Efield_aux[lev][1]->array(mfi); Array4 const& ez_aux = Efield_aux[lev][2]->array(mfi); - Array4 const& ex_fp = Efield_fp[lev][0]->const_array(mfi); - Array4 const& ey_fp = Efield_fp[lev][1]->const_array(mfi); - Array4 const& ez_fp = Efield_fp[lev][2]->const_array(mfi); + Array4 const& ex_fp = m_fields.get(FieldType::Efield_fp, Direction{0}, lev)->const_array(mfi); + Array4 const& ey_fp = m_fields.get(FieldType::Efield_fp, Direction{1}, lev)->const_array(mfi); + Array4 const& ez_fp = m_fields.get(FieldType::Efield_fp, Direction{2}, lev)->const_array(mfi); const Box& bx = mfi.growntilebox(); amrex::ParallelFor(bx, @@ -367,17 +390,23 @@ WarpX::UpdateAuxilaryDataSameType () // Update aux field, including guard cells, up to ng_FieldGather const amrex::IntVect& ng_src = guard_cells.ng_FieldGather; + using ablastr::fields::Direction; + ablastr::fields::MultiLevelVectorField Efield_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level); + ablastr::fields::MultiLevelVectorField Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); + ablastr::fields::MultiLevelVectorField Efield_aux = m_fields.get_mr_levels_alldirs(FieldType::Efield_aux, finest_level); + ablastr::fields::MultiLevelVectorField Bfield_aux = m_fields.get_mr_levels_alldirs(FieldType::Bfield_aux, finest_level); + // Level 0: Copy from fine to aux // Note: in some configurations, Efield_aux/Bfield_aux and Efield_fp/Bfield_fp are simply aliases to the // same MultiFab object. MultiFab::Copy operation automatically detects this and does nothing in this case. if (WarpX::fft_do_time_averaging) { - MultiFab::Copy(*Efield_aux[0][0], *Efield_avg_fp[0][0], 0, 0, Efield_aux[0][0]->nComp(), ng_src); - MultiFab::Copy(*Efield_aux[0][1], *Efield_avg_fp[0][1], 0, 0, Efield_aux[0][1]->nComp(), ng_src); - MultiFab::Copy(*Efield_aux[0][2], *Efield_avg_fp[0][2], 0, 0, Efield_aux[0][2]->nComp(), ng_src); - MultiFab::Copy(*Bfield_aux[0][0], *Bfield_avg_fp[0][0], 0, 0, Bfield_aux[0][0]->nComp(), ng_src); - MultiFab::Copy(*Bfield_aux[0][1], *Bfield_avg_fp[0][1], 0, 0, Bfield_aux[0][1]->nComp(), ng_src); - MultiFab::Copy(*Bfield_aux[0][2], *Bfield_avg_fp[0][2], 0, 0, Bfield_aux[0][2]->nComp(), ng_src); + MultiFab::Copy(*Efield_aux[0][0], *m_fields.get(FieldType::Efield_avg_fp, Direction{0}, 0), 0, 0, Efield_aux[0][0]->nComp(), ng_src); + MultiFab::Copy(*Efield_aux[0][1], *m_fields.get(FieldType::Efield_avg_fp, Direction{1}, 0), 0, 0, Efield_aux[0][1]->nComp(), ng_src); + MultiFab::Copy(*Efield_aux[0][2], *m_fields.get(FieldType::Efield_avg_fp, Direction{2}, 0), 0, 0, Efield_aux[0][2]->nComp(), ng_src); + MultiFab::Copy(*Bfield_aux[0][0], *m_fields.get(FieldType::Bfield_avg_fp, Direction{0}, 0), 0, 0, Bfield_aux[0][0]->nComp(), ng_src); + MultiFab::Copy(*Bfield_aux[0][1], *m_fields.get(FieldType::Bfield_avg_fp, Direction{1}, 0), 0, 0, Bfield_aux[0][1]->nComp(), ng_src); + MultiFab::Copy(*Bfield_aux[0][2], *m_fields.get(FieldType::Bfield_avg_fp, Direction{2}, 0), 0, 0, Bfield_aux[0][2]->nComp(), ng_src); } else { @@ -391,16 +420,19 @@ WarpX::UpdateAuxilaryDataSameType () for (int lev = 1; lev <= finest_level; ++lev) { const amrex::Periodicity& crse_period = Geom(lev-1).periodicity(); - const IntVect& ng = Bfield_cp[lev][0]->nGrowVect(); - const DistributionMapping& dm = Bfield_cp[lev][0]->DistributionMap(); + const IntVect& ng = m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->nGrowVect(); + const DistributionMapping& dm = m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->DistributionMap(); // B field { if (electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { - MultiFab dBx(Bfield_cp[lev][0]->boxArray(), dm, Bfield_cp[lev][0]->nComp(), ng); - MultiFab dBy(Bfield_cp[lev][1]->boxArray(), dm, Bfield_cp[lev][1]->nComp(), ng); - MultiFab dBz(Bfield_cp[lev][2]->boxArray(), dm, Bfield_cp[lev][2]->nComp(), ng); + MultiFab dBx(m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->boxArray(), dm, + m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->nComp(), ng); + MultiFab dBy(m_fields.get(FieldType::Bfield_cp, Direction{1}, lev)->boxArray(), dm, + m_fields.get(FieldType::Bfield_cp, Direction{1}, lev)->nComp(), ng); + MultiFab dBz(m_fields.get(FieldType::Bfield_cp, Direction{2}, lev)->boxArray(), dm, + m_fields.get(FieldType::Bfield_cp, Direction{2}, lev)->nComp(), ng); dBx.setVal(0.0); dBy.setVal(0.0); dBz.setVal(0.0); @@ -418,15 +450,18 @@ WarpX::UpdateAuxilaryDataSameType () Bfield_aux[lev - 1][2]->nComp(), ng_src, ng, WarpX::do_single_precision_comms, crse_period); - if (Bfield_cax[lev][0]) + if (m_fields.has_vector(FieldType::Bfield_cax, lev)) { - MultiFab::Copy(*Bfield_cax[lev][0], dBx, 0, 0, Bfield_cax[lev][0]->nComp(), ng); - MultiFab::Copy(*Bfield_cax[lev][1], dBy, 0, 0, Bfield_cax[lev][1]->nComp(), ng); - MultiFab::Copy(*Bfield_cax[lev][2], dBz, 0, 0, Bfield_cax[lev][2]->nComp(), ng); + MultiFab::Copy(*m_fields.get(FieldType::Bfield_cax, Direction{0}, lev), dBx, 0, 0, m_fields.get(FieldType::Bfield_cax, Direction{0}, lev)->nComp(), ng); + MultiFab::Copy(*m_fields.get(FieldType::Bfield_cax, Direction{1}, lev), dBy, 0, 0, m_fields.get(FieldType::Bfield_cax, Direction{1}, lev)->nComp(), ng); + MultiFab::Copy(*m_fields.get(FieldType::Bfield_cax, Direction{2}, lev), dBz, 0, 0, m_fields.get(FieldType::Bfield_cax, Direction{2}, lev)->nComp(), ng); } - MultiFab::Subtract(dBx, *Bfield_cp[lev][0], 0, 0, Bfield_cp[lev][0]->nComp(), ng); - MultiFab::Subtract(dBy, *Bfield_cp[lev][1], 0, 0, Bfield_cp[lev][1]->nComp(), ng); - MultiFab::Subtract(dBz, *Bfield_cp[lev][2], 0, 0, Bfield_cp[lev][2]->nComp(), ng); + MultiFab::Subtract(dBx, *m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), + 0, 0, m_fields.get(FieldType::Bfield_cp, Direction{0}, lev)->nComp(), ng); + MultiFab::Subtract(dBy, *m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), + 0, 0, m_fields.get(FieldType::Bfield_cp, Direction{1}, lev)->nComp(), ng); + MultiFab::Subtract(dBz, *m_fields.get(FieldType::Bfield_cp, Direction{2}, lev), + 0, 0, m_fields.get(FieldType::Bfield_cp, Direction{2}, lev)->nComp(), ng); const amrex::IntVect& refinement_ratio = refRatio(lev-1); @@ -475,9 +510,12 @@ WarpX::UpdateAuxilaryDataSameType () { if (electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { - MultiFab dEx(Efield_cp[lev][0]->boxArray(), dm, Efield_cp[lev][0]->nComp(), ng); - MultiFab dEy(Efield_cp[lev][1]->boxArray(), dm, Efield_cp[lev][1]->nComp(), ng); - MultiFab dEz(Efield_cp[lev][2]->boxArray(), dm, Efield_cp[lev][2]->nComp(), ng); + MultiFab dEx(m_fields.get(FieldType::Efield_cp, Direction{0}, lev)->boxArray(), dm, + m_fields.get(FieldType::Efield_cp, Direction{0}, lev)->nComp(), ng); + MultiFab dEy(m_fields.get(FieldType::Efield_cp, Direction{1}, lev)->boxArray(), dm, + m_fields.get(FieldType::Efield_cp, Direction{1}, lev)->nComp(), ng); + MultiFab dEz(m_fields.get(FieldType::Efield_cp, Direction{2}, lev)->boxArray(), dm, + m_fields.get(FieldType::Efield_cp, Direction{2}, lev)->nComp(), ng); dEx.setVal(0.0); dEy.setVal(0.0); dEz.setVal(0.0); @@ -497,15 +535,18 @@ WarpX::UpdateAuxilaryDataSameType () WarpX::do_single_precision_comms, crse_period); - if (Efield_cax[lev][0]) + if (m_fields.has_vector(FieldType::Efield_cax, lev)) { - MultiFab::Copy(*Efield_cax[lev][0], dEx, 0, 0, Efield_cax[lev][0]->nComp(), ng); - MultiFab::Copy(*Efield_cax[lev][1], dEy, 0, 0, Efield_cax[lev][1]->nComp(), ng); - MultiFab::Copy(*Efield_cax[lev][2], dEz, 0, 0, Efield_cax[lev][2]->nComp(), ng); + MultiFab::Copy(*m_fields.get(FieldType::Efield_cax, Direction{0}, lev), dEx, 0, 0, m_fields.get(FieldType::Efield_cax, Direction{0}, lev)->nComp(), ng); + MultiFab::Copy(*m_fields.get(FieldType::Efield_cax, Direction{1}, lev), dEy, 0, 0, m_fields.get(FieldType::Efield_cax, Direction{1}, lev)->nComp(), ng); + MultiFab::Copy(*m_fields.get(FieldType::Efield_cax, Direction{2}, lev), dEz, 0, 0, m_fields.get(FieldType::Efield_cax, Direction{2}, lev)->nComp(), ng); } - MultiFab::Subtract(dEx, *Efield_cp[lev][0], 0, 0, Efield_cp[lev][0]->nComp(), ng); - MultiFab::Subtract(dEy, *Efield_cp[lev][1], 0, 0, Efield_cp[lev][1]->nComp(), ng); - MultiFab::Subtract(dEz, *Efield_cp[lev][2], 0, 0, Efield_cp[lev][2]->nComp(), ng); + MultiFab::Subtract(dEx, *m_fields.get(FieldType::Efield_cp, Direction{0}, lev), + 0, 0, m_fields.get(FieldType::Efield_cp, Direction{0}, lev)->nComp(), ng); + MultiFab::Subtract(dEy, *m_fields.get(FieldType::Efield_cp, Direction{1}, lev), + 0, 0, m_fields.get(FieldType::Efield_cp, Direction{1}, lev)->nComp(), ng); + MultiFab::Subtract(dEz, *m_fields.get(FieldType::Efield_cp, Direction{2}, lev), + 0, 0, m_fields.get(FieldType::Efield_cp, Direction{2}, lev)->nComp(), ng); const amrex::IntVect& refinement_ratio = refRatio(lev-1); @@ -521,9 +562,9 @@ WarpX::UpdateAuxilaryDataSameType () Array4 const& ex_aux = Efield_aux[lev][0]->array(mfi); Array4 const& ey_aux = Efield_aux[lev][1]->array(mfi); Array4 const& ez_aux = Efield_aux[lev][2]->array(mfi); - Array4 const& ex_fp = Efield_fp[lev][0]->const_array(mfi); - Array4 const& ey_fp = Efield_fp[lev][1]->const_array(mfi); - Array4 const& ez_fp = Efield_fp[lev][2]->const_array(mfi); + Array4 const& ex_fp = m_fields.get(FieldType::Efield_fp, Direction{0}, lev)->const_array(mfi); + Array4 const& ey_fp = m_fields.get(FieldType::Efield_fp, Direction{1}, lev)->const_array(mfi); + Array4 const& ez_fp = m_fields.get(FieldType::Efield_fp, Direction{2}, lev)->const_array(mfi); Array4 const& ex_c = dEx.const_array(mfi); Array4 const& ey_c = dEy.const_array(mfi); Array4 const& ez_c = dEz.const_array(mfi); @@ -545,9 +586,9 @@ WarpX::UpdateAuxilaryDataSameType () } else // electrostatic { - MultiFab::Copy(*Efield_aux[lev][0], *Efield_fp[lev][0], 0, 0, Efield_aux[lev][0]->nComp(), Efield_aux[lev][0]->nGrowVect()); - MultiFab::Copy(*Efield_aux[lev][1], *Efield_fp[lev][1], 0, 0, Efield_aux[lev][1]->nComp(), Efield_aux[lev][1]->nGrowVect()); - MultiFab::Copy(*Efield_aux[lev][2], *Efield_fp[lev][2], 0, 0, Efield_aux[lev][2]->nComp(), Efield_aux[lev][2]->nGrowVect()); + MultiFab::Copy(*Efield_aux[lev][0], *m_fields.get(FieldType::Efield_fp, Direction{0}, lev), 0, 0, Efield_aux[lev][0]->nComp(), Efield_aux[lev][0]->nGrowVect()); + MultiFab::Copy(*Efield_aux[lev][1], *m_fields.get(FieldType::Efield_fp, Direction{1}, lev), 0, 0, Efield_aux[lev][1]->nComp(), Efield_aux[lev][1]->nGrowVect()); + MultiFab::Copy(*Efield_aux[lev][2], *m_fields.get(FieldType::Efield_fp, Direction{2}, lev), 0, 0, Efield_aux[lev][2]->nComp(), Efield_aux[lev][2]->nGrowVect()); } } } @@ -668,14 +709,20 @@ WarpX::FillBoundaryE (const int lev, const PatchType patch_type, const amrex::In std::array mf; amrex::Periodicity period; + using ablastr::fields::Direction; + if (patch_type == PatchType::fine) { - mf = {Efield_fp[lev][0].get(), Efield_fp[lev][1].get(), Efield_fp[lev][2].get()}; + mf = {m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + m_fields.get(FieldType::Efield_fp, Direction{2}, lev)}; period = Geom(lev).periodicity(); } else // coarse patch { - mf = {Efield_cp[lev][0].get(), Efield_cp[lev][1].get(), Efield_cp[lev][2].get()}; + mf = {m_fields.get(FieldType::Efield_cp, Direction{0}, lev), + m_fields.get(FieldType::Efield_cp, Direction{1}, lev), + m_fields.get(FieldType::Efield_cp, Direction{2}, lev)}; period = Geom(lev-1).periodicity(); } @@ -686,16 +733,18 @@ WarpX::FillBoundaryE (const int lev, const PatchType patch_type, const amrex::In if (pml[lev] && pml[lev]->ok()) { const std::array mf_pml = - (patch_type == PatchType::fine) ? pml[lev]->GetE_fp() : pml[lev]->GetE_cp(); + (patch_type == PatchType::fine) ? + m_fields.get_alldirs(FieldType::pml_E_fp, lev) : + m_fields.get_alldirs(FieldType::pml_E_cp, lev); pml[lev]->Exchange(mf_pml, mf, patch_type, do_pml_in_domain); - pml[lev]->FillBoundaryE(patch_type, nodal_sync); + pml[lev]->FillBoundary(mf_pml, patch_type, nodal_sync); } #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) if (pml_rz[lev]) { - pml_rz[lev]->FillBoundaryE(patch_type, nodal_sync); + pml_rz[lev]->FillBoundaryE(m_fields, patch_type, nodal_sync); } #endif } @@ -725,14 +774,20 @@ WarpX::FillBoundaryB (const int lev, const PatchType patch_type, const amrex::In std::array mf; amrex::Periodicity period; + using ablastr::fields::Direction; + if (patch_type == PatchType::fine) { - mf = {Bfield_fp[lev][0].get(), Bfield_fp[lev][1].get(), Bfield_fp[lev][2].get()}; + mf = {m_fields.get(FieldType::Bfield_fp, Direction{0}, lev), + m_fields.get(FieldType::Bfield_fp, Direction{1}, lev), + m_fields.get(FieldType::Bfield_fp, Direction{2}, lev)}; period = Geom(lev).periodicity(); } else // coarse patch { - mf = {Bfield_cp[lev][0].get(), Bfield_cp[lev][1].get(), Bfield_cp[lev][2].get()}; + mf = {m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), + m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), + m_fields.get(FieldType::Bfield_cp, Direction{2}, lev)}; period = Geom(lev-1).periodicity(); } @@ -743,16 +798,18 @@ WarpX::FillBoundaryB (const int lev, const PatchType patch_type, const amrex::In if (pml[lev] && pml[lev]->ok()) { const std::array mf_pml = - (patch_type == PatchType::fine) ? pml[lev]->GetB_fp() : pml[lev]->GetB_cp(); + (patch_type == PatchType::fine) ? + m_fields.get_alldirs(FieldType::pml_B_fp, lev) : + m_fields.get_alldirs(FieldType::pml_B_cp, lev); pml[lev]->Exchange(mf_pml, mf, patch_type, do_pml_in_domain); - pml[lev]->FillBoundaryB(patch_type, nodal_sync); + pml[lev]->FillBoundary(mf_pml, patch_type, nodal_sync); } #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) if (pml_rz[lev]) { - pml_rz[lev]->FillBoundaryB(patch_type, nodal_sync); + pml_rz[lev]->FillBoundaryB(m_fields, patch_type, nodal_sync); } #endif } @@ -786,9 +843,11 @@ WarpX::FillBoundaryE_avg (int lev, PatchType patch_type, IntVect ng) WARPX_ABORT_WITH_MESSAGE("Averaged Galilean PSATD with PML is not yet implemented"); } + ablastr::fields::MultiLevelVectorField Efield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level); + const amrex::Periodicity& period = Geom(lev).periodicity(); if ( safe_guard_cells ){ - const Vector mf{Efield_avg_fp[lev][0].get(),Efield_avg_fp[lev][1].get(),Efield_avg_fp[lev][2].get()}; + const Vector mf{Efield_avg_fp[lev][0],Efield_avg_fp[lev][1],Efield_avg_fp[lev][2]}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period); } else { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -806,9 +865,11 @@ WarpX::FillBoundaryE_avg (int lev, PatchType patch_type, IntVect ng) WARPX_ABORT_WITH_MESSAGE("Averaged Galilean PSATD with PML is not yet implemented"); } + ablastr::fields::MultiLevelVectorField Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level); + const amrex::Periodicity& cperiod = Geom(lev-1).periodicity(); if ( safe_guard_cells ) { - const Vector mf{Efield_avg_cp[lev][0].get(),Efield_avg_cp[lev][1].get(),Efield_avg_cp[lev][2].get()}; + const Vector mf{Efield_avg_cp[lev][0],Efield_avg_cp[lev][1],Efield_avg_cp[lev][2]}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, cperiod); } else { @@ -833,19 +894,24 @@ WarpX::FillBoundaryB_avg (int lev, IntVect ng) void WarpX::FillBoundaryB_avg (int lev, PatchType patch_type, IntVect ng) { + using ablastr::fields::Direction; + if (patch_type == PatchType::fine) { if (do_pml && pml[lev]->ok()) { WARPX_ABORT_WITH_MESSAGE("Averaged Galilean PSATD with PML is not yet implemented"); } + + ablastr::fields::MultiLevelVectorField Bfield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level); + const amrex::Periodicity& period = Geom(lev).periodicity(); if ( safe_guard_cells ) { - const Vector mf{Bfield_avg_fp[lev][0].get(),Bfield_avg_fp[lev][1].get(),Bfield_avg_fp[lev][2].get()}; + const Vector mf{Bfield_avg_fp[lev][0],Bfield_avg_fp[lev][1],Bfield_avg_fp[lev][2]}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period); } else { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - ng.allLE(Bfield_fp[lev][0]->nGrowVect()), + ng.allLE(m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->nGrowVect()), "Error: in FillBoundaryB, requested more guard cells than allocated"); ablastr::utils::communication::FillBoundary(*Bfield_avg_fp[lev][0], ng, WarpX::do_single_precision_comms, period); ablastr::utils::communication::FillBoundary(*Bfield_avg_fp[lev][1], ng, WarpX::do_single_precision_comms, period); @@ -859,9 +925,11 @@ WarpX::FillBoundaryB_avg (int lev, PatchType patch_type, IntVect ng) WARPX_ABORT_WITH_MESSAGE("Averaged Galilean PSATD with PML is not yet implemented"); } + ablastr::fields::MultiLevelVectorField Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level); + const amrex::Periodicity& cperiod = Geom(lev-1).periodicity(); if ( safe_guard_cells ){ - const Vector mf{Bfield_avg_cp[lev][0].get(),Bfield_avg_cp[lev][1].get(),Bfield_avg_cp[lev][2].get()}; + const Vector mf{Bfield_avg_cp[lev][0],Bfield_avg_cp[lev][1],Bfield_avg_cp[lev][2]}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, cperiod); } else { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -888,30 +956,38 @@ WarpX::FillBoundaryF (int lev, PatchType patch_type, IntVect ng, std::optionalok()) { - if (F_fp[lev]) { pml[lev]->ExchangeF(patch_type, F_fp[lev].get(), do_pml_in_domain); } - pml[lev]->FillBoundaryF(patch_type, nodal_sync); + if (m_fields.has(FieldType::pml_F_fp, lev) && m_fields.has(FieldType::F_fp, lev)) { + pml[lev]->Exchange(m_fields.get(FieldType::pml_F_fp, lev), m_fields.get(FieldType::F_fp, lev), patch_type, do_pml_in_domain); + } + if (m_fields.has(FieldType::pml_F_fp, lev)) { + pml[lev]->FillBoundary(*m_fields.get(FieldType::pml_F_fp, lev), patch_type, nodal_sync); + } } - if (F_fp[lev]) + if (m_fields.has(FieldType::F_fp, lev)) { const amrex::Periodicity& period = Geom(lev).periodicity(); - const amrex::IntVect& nghost = (safe_guard_cells) ? F_fp[lev]->nGrowVect() : ng; - ablastr::utils::communication::FillBoundary(*F_fp[lev], nghost, WarpX::do_single_precision_comms, period, nodal_sync); + const amrex::IntVect& nghost = (safe_guard_cells) ? m_fields.get(FieldType::F_fp, lev)->nGrowVect() : ng; + ablastr::utils::communication::FillBoundary(*m_fields.get(FieldType::F_fp, lev), nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } else if (patch_type == PatchType::coarse) { if (do_pml && pml[lev] && pml[lev]->ok()) { - if (F_cp[lev]) { pml[lev]->ExchangeF(patch_type, F_cp[lev].get(), do_pml_in_domain); } - pml[lev]->FillBoundaryF(patch_type, nodal_sync); + if (m_fields.has(FieldType::pml_F_cp, lev) && m_fields.has(FieldType::F_cp, lev)) { + pml[lev]->Exchange(m_fields.get(FieldType::pml_F_cp, lev), m_fields.get(FieldType::F_cp, lev), patch_type, do_pml_in_domain); + } + if (m_fields.has(FieldType::pml_F_cp, lev)) { + pml[lev]->FillBoundary(*m_fields.get(FieldType::pml_F_cp, lev), patch_type, nodal_sync); + } } - if (F_cp[lev]) + if (m_fields.has(FieldType::F_cp, lev)) { const amrex::Periodicity& period = Geom(lev-1).periodicity(); - const amrex::IntVect& nghost = (safe_guard_cells) ? F_cp[lev]->nGrowVect() : ng; - ablastr::utils::communication::FillBoundary(*F_cp[lev], nghost, WarpX::do_single_precision_comms, period, nodal_sync); + const amrex::IntVect& nghost = (safe_guard_cells) ? m_fields.get(FieldType::F_cp, lev)->nGrowVect() : ng; + ablastr::utils::communication::FillBoundary(*m_fields.get(FieldType::F_cp, lev), nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } } @@ -932,30 +1008,40 @@ void WarpX::FillBoundaryG (int lev, PatchType patch_type, IntVect ng, std::optio { if (do_pml && pml[lev] && pml[lev]->ok()) { - if (G_fp[lev]) { pml[lev]->ExchangeG(patch_type, G_fp[lev].get(), do_pml_in_domain); } - pml[lev]->FillBoundaryG(patch_type, nodal_sync); + if (m_fields.has(FieldType::pml_G_fp,lev) && m_fields.has(FieldType::G_fp,lev)) { + pml[lev]->Exchange(m_fields.get(FieldType::pml_G_fp, lev), m_fields.get(FieldType::G_fp, lev), patch_type, do_pml_in_domain); + } + if (m_fields.has(FieldType::pml_G_fp,lev)) { + pml[lev]->FillBoundary(*m_fields.get(FieldType::pml_G_fp, lev), patch_type, nodal_sync); + } } - if (G_fp[lev]) + if (m_fields.has(FieldType::G_fp,lev)) { const amrex::Periodicity& period = Geom(lev).periodicity(); - const amrex::IntVect& nghost = (safe_guard_cells) ? G_fp[lev]->nGrowVect() : ng; - ablastr::utils::communication::FillBoundary(*G_fp[lev], nghost, WarpX::do_single_precision_comms, period, nodal_sync); + MultiFab* G_fp = m_fields.get(FieldType::G_fp,lev); + const amrex::IntVect& nghost = (safe_guard_cells) ? G_fp->nGrowVect() : ng; + ablastr::utils::communication::FillBoundary(*G_fp, nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } else if (patch_type == PatchType::coarse) { if (do_pml && pml[lev] && pml[lev]->ok()) { - if (G_cp[lev]) { pml[lev]->ExchangeG(patch_type, G_cp[lev].get(), do_pml_in_domain); } - pml[lev]->FillBoundaryG(patch_type, nodal_sync); + if (m_fields.has(FieldType::pml_G_cp,lev) && m_fields.has(FieldType::G_cp,lev)) { + pml[lev]->Exchange(m_fields.get(FieldType::pml_G_cp, lev), m_fields.get(FieldType::G_cp, lev), patch_type, do_pml_in_domain); + } + if (m_fields.has(FieldType::pml_G_cp, lev)) { + pml[lev]->FillBoundary(*m_fields.get(FieldType::pml_G_cp, lev), patch_type, nodal_sync); + } } - if (G_cp[lev]) + if (m_fields.has(FieldType::G_cp,lev)) { const amrex::Periodicity& period = Geom(lev-1).periodicity(); - const amrex::IntVect& nghost = (safe_guard_cells) ? G_cp[lev]->nGrowVect() : ng; - ablastr::utils::communication::FillBoundary(*G_cp[lev], nghost, WarpX::do_single_precision_comms, period, nodal_sync); + MultiFab* G_cp = m_fields.get(FieldType::G_cp,lev); + const amrex::IntVect& nghost = (safe_guard_cells) ? G_cp->nGrowVect() : ng; + ablastr::utils::communication::FillBoundary(*G_cp, nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } } @@ -972,6 +1058,9 @@ WarpX::FillBoundaryAux (IntVect ng) void WarpX::FillBoundaryAux (int lev, IntVect ng) { + ablastr::fields::MultiLevelVectorField Efield_aux = m_fields.get_mr_levels_alldirs(FieldType::Efield_aux, finest_level); + ablastr::fields::MultiLevelVectorField Bfield_aux = m_fields.get_mr_levels_alldirs(FieldType::Bfield_aux, finest_level); + const amrex::Periodicity& period = Geom(lev).periodicity(); ablastr::utils::communication::FillBoundary(*Efield_aux[lev][0], ng, WarpX::do_single_precision_comms, period); ablastr::utils::communication::FillBoundary(*Efield_aux[lev][1], ng, WarpX::do_single_precision_comms, period); @@ -982,23 +1071,26 @@ WarpX::FillBoundaryAux (int lev, IntVect ng) } void -WarpX::SyncCurrent ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, - const amrex::Vector,3>>& J_buffer) +WarpX::SyncCurrent (const std::string& current_fp_string) { + using ablastr::fields::Direction; + WARPX_PROFILE("WarpX::SyncCurrent()"); + ablastr::fields::MultiLevelVectorField const& J_fp = m_fields.get_mr_levels_alldirs(current_fp_string, finest_level); + // If warpx.do_current_centering = 1, center currents from nodal grid to staggered grid if (do_current_centering) { + ablastr::fields::MultiLevelVectorField const& J_fp_nodal = m_fields.get_mr_levels_alldirs(FieldType::current_fp_nodal, finest_level+1); + AMREX_ALWAYS_ASSERT_WITH_MESSAGE(finest_level <= 1, "warpx.do_current_centering=1 not supported with more than one fine levels"); for (int lev = 0; lev <= finest_level; lev++) { - WarpX::UpdateCurrentNodalToStag(*J_fp[lev][0], *current_fp_nodal[lev][0]); - WarpX::UpdateCurrentNodalToStag(*J_fp[lev][1], *current_fp_nodal[lev][1]); - WarpX::UpdateCurrentNodalToStag(*J_fp[lev][2], *current_fp_nodal[lev][2]); + WarpX::UpdateCurrentNodalToStag(*J_fp[lev][Direction{0}], *J_fp_nodal[lev][Direction{0}]); + WarpX::UpdateCurrentNodalToStag(*J_fp[lev][Direction{1}], *J_fp_nodal[lev][Direction{1}]); + WarpX::UpdateCurrentNodalToStag(*J_fp[lev][Direction{2}], *J_fp_nodal[lev][Direction{2}]); } } @@ -1073,7 +1165,7 @@ WarpX::SyncCurrent ( { for (int lev = finest_level; lev >= 0; --lev) { - const int ncomp = J_fp[lev][idim]->nComp(); + const int ncomp = J_fp[lev][Direction{idim}]->nComp(); auto const& period = Geom(lev).periodicity(); if (lev < finest_level) @@ -1081,8 +1173,8 @@ WarpX::SyncCurrent ( // On a coarse level, the data in mf_comm comes from the // coarse patch of the fine level. They are unfiltered and uncommunicated. // We need to add it to the fine patch of the current level. - MultiFab fine_lev_cp(J_fp[lev][idim]->boxArray(), - J_fp[lev][idim]->DistributionMap(), + MultiFab fine_lev_cp(J_fp[lev][Direction{idim}]->boxArray(), + J_fp[lev][Direction{idim}]->DistributionMap(), ncomp, 0); fine_lev_cp.setVal(0.0); fine_lev_cp.ParallelAdd(*mf_comm, 0, 0, ncomp, mf_comm->nGrowVect(), @@ -1091,7 +1183,7 @@ WarpX::SyncCurrent ( auto owner_mask = amrex::OwnerMask(fine_lev_cp, period); auto const& mma = owner_mask->const_arrays(); auto const& sma = fine_lev_cp.const_arrays(); - auto const& dma = J_fp[lev][idim]->arrays(); + auto const& dma = J_fp[lev][Direction{idim}]->arrays(); amrex::ParallelFor(fine_lev_cp, IntVect(0), ncomp, [=] AMREX_GPU_DEVICE (int bno, int i, int j, int k, int n) { @@ -1100,6 +1192,7 @@ WarpX::SyncCurrent ( } }); // Now it's safe to apply filter and sumboundary on J_cp + ablastr::fields::MultiLevelVectorField const& J_cp = m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level); if (use_filter) { ApplyFilterJ(J_cp, lev+1, idim); @@ -1114,23 +1207,26 @@ WarpX::SyncCurrent ( // filtering depends on the level. This is also done before any // same-level communication because it's easier this way to // avoid double counting. - J_cp[lev][idim]->setVal(0.0); - ablastr::coarsen::average::Coarsen(*J_cp[lev][idim], - *J_fp[lev][idim], + ablastr::fields::MultiLevelVectorField const& J_cp = m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level); + J_cp[lev][Direction{idim}]->setVal(0.0); + ablastr::coarsen::average::Coarsen(*J_cp[lev][Direction{idim}], + *J_fp[lev][Direction{idim}], refRatio(lev-1)); - if (J_buffer[lev][idim]) + if (m_fields.has(FieldType::current_buf, Direction{idim}, lev)) { - IntVect const& ng = J_cp[lev][idim]->nGrowVect(); - AMREX_ASSERT(ng.allLE(J_buffer[lev][idim]->nGrowVect())); - MultiFab::Add(*J_buffer[lev][idim], *J_cp[lev][idim], + ablastr::fields::MultiLevelVectorField const& J_buffer = m_fields.get_mr_levels_alldirs(FieldType::current_buf, finest_level); + + IntVect const& ng = J_cp[lev][Direction{idim}]->nGrowVect(); + AMREX_ASSERT(ng.allLE(J_buffer[lev][Direction{idim}]->nGrowVect())); + MultiFab::Add(*J_buffer[lev][Direction{idim}], *J_cp[lev][Direction{idim}], 0, 0, ncomp, ng); mf_comm = std::make_unique - (*J_buffer[lev][idim], amrex::make_alias, 0, ncomp); + (*J_buffer[lev][Direction{idim}], amrex::make_alias, 0, ncomp); } else { mf_comm = std::make_unique - (*J_cp[lev][idim], amrex::make_alias, 0, ncomp); + (*J_cp[lev][Direction{idim}], amrex::make_alias, 0, ncomp); } } @@ -1145,14 +1241,24 @@ WarpX::SyncCurrent ( void WarpX::SyncRho () { - SyncRho(rho_fp, rho_cp, charge_buf); + const ablastr::fields::MultiLevelScalarField rho_fp = m_fields.has(FieldType::rho_fp, 0) ? + m_fields.get_mr_levels(FieldType::rho_fp, finest_level) : + ablastr::fields::MultiLevelScalarField{static_cast(finest_level+1)}; + const ablastr::fields::MultiLevelScalarField rho_cp = m_fields.has(FieldType::rho_cp, 1) ? + m_fields.get_mr_levels(FieldType::rho_cp, finest_level) : + ablastr::fields::MultiLevelScalarField{static_cast(finest_level+1)}; + const ablastr::fields::MultiLevelScalarField rho_buf = m_fields.has(FieldType::rho_buf, 1) ? + m_fields.get_mr_levels(FieldType::rho_buf, finest_level) : + ablastr::fields::MultiLevelScalarField{static_cast(finest_level+1)}; + + SyncRho(rho_fp, rho_cp, rho_buf); } void WarpX::SyncRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - const amrex::Vector>& charge_buffer) + const ablastr::fields::MultiLevelScalarField& charge_fp, + const ablastr::fields::MultiLevelScalarField& charge_cp, + ablastr::fields::MultiLevelScalarField const & charge_buffer) { WARPX_PROFILE("WarpX::SyncRho()"); @@ -1227,8 +1333,8 @@ WarpX::SyncRho ( * averaging the values of the current of the fine patch (on the same level). */ void WarpX::RestrictCurrentFromFineToCoarsePatch ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, + const ablastr::fields::MultiLevelVectorField& J_fp, + const ablastr::fields::MultiLevelVectorField& J_cp, const int lev) { J_cp[lev][0]->setVal(0.0); @@ -1237,23 +1343,25 @@ void WarpX::RestrictCurrentFromFineToCoarsePatch ( const IntVect& refinement_ratio = refRatio(lev-1); - std::array fine { J_fp[lev][0].get(), - J_fp[lev][1].get(), - J_fp[lev][2].get() }; - std::array< MultiFab*,3> crse { J_cp[lev][0].get(), - J_cp[lev][1].get(), - J_cp[lev][2].get() }; + std::array fine { J_fp[lev][0], + J_fp[lev][1], + J_fp[lev][2] }; + std::array< MultiFab*,3> crse { J_cp[lev][0], + J_cp[lev][1], + J_cp[lev][2] }; ablastr::coarsen::average::Coarsen(*crse[0], *fine[0], refinement_ratio ); ablastr::coarsen::average::Coarsen(*crse[1], *fine[1], refinement_ratio ); ablastr::coarsen::average::Coarsen(*crse[2], *fine[2], refinement_ratio ); } void WarpX::ApplyFilterJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, const int lev, const int idim) { - amrex::MultiFab& J = *current[lev][idim]; + using ablastr::fields::Direction; + + amrex::MultiFab& J = *current[lev][Direction{idim}]; const int ncomp = J.nComp(); const amrex::IntVect ngrow = J.nGrowVect(); @@ -1266,7 +1374,7 @@ void WarpX::ApplyFilterJ ( } void WarpX::ApplyFilterJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, const int lev) { for (int idim=0; idim<3; ++idim) @@ -1276,12 +1384,14 @@ void WarpX::ApplyFilterJ ( } void WarpX::SumBoundaryJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, const int lev, const int idim, const amrex::Periodicity& period) { - amrex::MultiFab& J = *current[lev][idim]; + using ablastr::fields::Direction; + + amrex::MultiFab& J = *current[lev][Direction{idim}]; const amrex::IntVect ng = J.nGrowVect(); amrex::IntVect ng_depos_J = get_ng_depos_J(); @@ -1314,7 +1424,7 @@ void WarpX::SumBoundaryJ ( } void WarpX::SumBoundaryJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, const int lev, const amrex::Periodicity& period) { @@ -1338,9 +1448,9 @@ void WarpX::SumBoundaryJ ( * patch (and buffer region) of `lev+1` */ void WarpX::AddCurrentFromFineLevelandSumBoundary ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, - const amrex::Vector,3>>& J_buffer, + const ablastr::fields::MultiLevelVectorField& J_fp, + const ablastr::fields::MultiLevelVectorField& J_cp, + const ablastr::fields::MultiLevelVectorField& J_buffer, const int lev) { const amrex::Periodicity& period = Geom(lev).periodicity(); @@ -1415,28 +1525,25 @@ void WarpX::AddCurrentFromFineLevelandSumBoundary ( } } -void WarpX::RestrictRhoFromFineToCoarsePatch ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - const int lev) +void WarpX::RestrictRhoFromFineToCoarsePatch ( const int lev ) { - if (charge_fp[lev]) { - charge_cp[lev]->setVal(0.0); + if (m_fields.has(FieldType::rho_fp, lev)) { + m_fields.get(FieldType::rho_cp, lev)->setVal(0.0); const IntVect& refinement_ratio = refRatio(lev-1); - ablastr::coarsen::average::Coarsen(*charge_cp[lev], *charge_fp[lev], refinement_ratio ); + ablastr::coarsen::average::Coarsen(*m_fields.get(FieldType::rho_cp, lev), *m_fields.get(FieldType::rho_fp, lev), refinement_ratio ); } } void WarpX::ApplyFilterandSumBoundaryRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, + const ablastr::fields::MultiLevelScalarField& charge_fp, + const ablastr::fields::MultiLevelScalarField& charge_cp, const int lev, PatchType patch_type, const int icomp, const int ncomp) { const int glev = (patch_type == PatchType::fine) ? lev : lev-1; - const std::unique_ptr& rho = (patch_type == PatchType::fine) ? + amrex::MultiFab* rho = (patch_type == PatchType::fine) ? charge_fp[lev] : charge_cp[lev]; if (rho == nullptr) { return; } ApplyFilterandSumBoundaryRho(lev, glev, *rho, icomp, ncomp); @@ -1474,9 +1581,9 @@ void WarpX::ApplyFilterandSumBoundaryRho (int /*lev*/, int glev, amrex::MultiFab * patch (and buffer region) of `lev+1` */ void WarpX::AddRhoFromFineLevelandSumBoundary ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - const amrex::Vector>& charge_buffer, + const ablastr::fields::MultiLevelScalarField& charge_fp, + const ablastr::fields::MultiLevelScalarField& charge_cp, + ablastr::fields::MultiLevelScalarField const & charge_buffer, const int lev, const int icomp, const int ncomp) @@ -1555,8 +1662,8 @@ void WarpX::AddRhoFromFineLevelandSumBoundary ( } void WarpX::NodalSyncJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, + const ablastr::fields::MultiLevelVectorField& J_fp, + const ablastr::fields::MultiLevelVectorField& J_cp, const int lev, PatchType patch_type) { diff --git a/Source/Parallelization/WarpXRegrid.cpp b/Source/Parallelization/WarpXRegrid.cpp index 0a3ab8d2099..a0a2d4929df 100644 --- a/Source/Parallelization/WarpXRegrid.cpp +++ b/Source/Parallelization/WarpXRegrid.cpp @@ -12,6 +12,7 @@ #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" #include "EmbeddedBoundary/WarpXFaceInfoBox.H" +#include "Fields.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" #include "Initialization/ExternalField.H" #include "Particles/MultiParticleContainer.H" @@ -21,6 +22,8 @@ #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/WarpXProfilerWrapper.H" +#include + #include #include #include @@ -168,85 +171,30 @@ WarpX::LoadBalance () void WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const DistributionMapping& dm) { - - const auto RemakeMultiFab = [&](auto& mf, const bool redistribute){ - if (mf == nullptr) { return; } - const IntVect& ng = mf->nGrowVect(); - auto pmf = std::remove_reference_t{}; - AllocInitMultiFab(pmf, mf->boxArray(), dm, mf->nComp(), ng, lev, mf->tags()[0]); - if (redistribute) { pmf->Redistribute(*mf, 0, 0, mf->nComp(), ng); } - mf = std::move(pmf); - }; + using ablastr::fields::Direction; + using warpx::fields::FieldType; bool const eb_enabled = EB::enabled(); if (ba == boxArray(lev)) { if (ParallelDescriptor::NProcs() == 1) { return; } + m_fields.remake_level(lev, dm); + // Fine patch + ablastr::fields::MultiLevelVectorField const& Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); for (int idim=0; idim < 3; ++idim) { - RemakeMultiFab(Bfield_fp[lev][idim], true); - RemakeMultiFab(Efield_fp[lev][idim], true); - if (m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::read_from_file) { - RemakeMultiFab(Bfield_fp_external[lev][idim], true); - } - if (m_p_ext_field_params->E_ext_grid_type == ExternalFieldType::read_from_file) { - RemakeMultiFab(Efield_fp_external[lev][idim], true); - } - if (mypc->m_B_ext_particle_s == "read_from_file") { - RemakeMultiFab(B_external_particle_field[lev][idim], true); - } - if (mypc->m_E_ext_particle_s == "read_from_file") { - RemakeMultiFab(E_external_particle_field[lev][idim], true); - } - RemakeMultiFab(current_fp[lev][idim], false); - RemakeMultiFab(current_store[lev][idim], false); - if (current_deposition_algo == CurrentDepositionAlgo::Vay) { - RemakeMultiFab(current_fp_vay[lev][idim], false); - } - if (do_current_centering) { - RemakeMultiFab(current_fp_nodal[lev][idim], false); - } - if (fft_do_time_averaging) { - RemakeMultiFab(Efield_avg_fp[lev][idim], true); - RemakeMultiFab(Bfield_avg_fp[lev][idim], true); - } - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { - RemakeMultiFab(m_hybrid_pic_model->current_fp_temp[lev][idim], true); - RemakeMultiFab(m_hybrid_pic_model->current_fp_ampere[lev][idim], false); - RemakeMultiFab(m_hybrid_pic_model->current_fp_external[lev][idim],true); - } if (eb_enabled) { if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { - RemakeMultiFab(m_edge_lengths[lev][idim], false); - RemakeMultiFab(m_face_areas[lev][idim], false); if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - RemakeMultiFab(Venl[lev][idim], false); - RemakeMultiFab(m_flag_info_face[lev][idim], false); - RemakeMultiFab(m_flag_ext_face[lev][idim], false); - RemakeMultiFab(m_area_mod[lev][idim], false); - RemakeMultiFab(ECTRhofield[lev][idim], false); m_borrowing[lev][idim] = std::make_unique>(amrex::convert(ba, Bfield_fp[lev][idim]->ixType().toIntVect()), dm); } } } } - RemakeMultiFab(F_fp[lev], true); - RemakeMultiFab(rho_fp[lev], false); - // phi_fp should be redistributed since we use the solution from - // the last step as the initial guess for the next solve - RemakeMultiFab(phi_fp[lev], true); - - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { - RemakeMultiFab(m_hybrid_pic_model->rho_fp_temp[lev], true); - RemakeMultiFab(m_hybrid_pic_model->electron_pressure_fp[lev], false); - } - if (eb_enabled) { - RemakeMultiFab(m_distance_to_eb[lev], false); - #ifdef AMREX_USE_EB int const max_guard = guard_cells.ng_FieldSolver.max(); m_field_factory[lev] = amrex::makeEBFabFactory(Geom(lev), ba, dm, @@ -292,35 +240,8 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi } #endif - // Aux patch - if (lev == 0 && Bfield_aux[0][0]->ixType() == Bfield_fp[0][0]->ixType()) - { - for (int idim = 0; idim < 3; ++idim) { - Bfield_aux[lev][idim] = std::make_unique(*Bfield_fp[lev][idim], amrex::make_alias, 0, Bfield_aux[lev][idim]->nComp()); - Efield_aux[lev][idim] = std::make_unique(*Efield_fp[lev][idim], amrex::make_alias, 0, Efield_aux[lev][idim]->nComp()); - } - } else { - for (int idim=0; idim < 3; ++idim) - { - RemakeMultiFab(Bfield_aux[lev][idim], false); - RemakeMultiFab(Efield_aux[lev][idim], false); - } - } - // Coarse patch if (lev > 0) { - for (int idim=0; idim < 3; ++idim) - { - RemakeMultiFab(Bfield_cp[lev][idim], true); - RemakeMultiFab(Efield_cp[lev][idim], true); - RemakeMultiFab(current_cp[lev][idim], false); - if (fft_do_time_averaging) { - RemakeMultiFab(Efield_avg_cp[lev][idim], true); - RemakeMultiFab(Bfield_avg_cp[lev][idim], true); - } - } - RemakeMultiFab(F_cp[lev], true); - RemakeMultiFab(rho_cp[lev], false); #ifdef WARPX_USE_FFT if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { @@ -358,17 +279,6 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi } if (lev > 0 && (n_field_gather_buffer > 0 || n_current_deposition_buffer > 0)) { - for (int idim=0; idim < 3; ++idim) - { - RemakeMultiFab(Bfield_cax[lev][idim], false); - RemakeMultiFab(Efield_cax[lev][idim], false); - RemakeMultiFab(current_buf[lev][idim], false); - } - RemakeMultiFab(charge_buf[lev], false); - // we can avoid redistributing these since we immediately re-build the values via BuildBufferMasks() - RemakeMultiFab(current_buffer_masks[lev], false); - RemakeMultiFab(gather_buffer_masks[lev], false); - if (current_buffer_masks[lev] || gather_buffer_masks[lev]) { BuildBufferMasks(); } @@ -405,6 +315,9 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi void WarpX::ComputeCostsHeuristic (amrex::Vector > >& a_costs) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + for (int lev = 0; lev <= finest_level; ++lev) { const auto & mypc_ref = GetInstance().GetPartContainer(); @@ -423,7 +336,7 @@ WarpX::ComputeCostsHeuristic (amrex::Vectorupdate(t_lab); - BL_ASSERT(OnSameGrids(lev,jx)); + BL_ASSERT(OnSameGrids(lev, *fields.get(FieldType::current_fp, Direction{0}, lev))); amrex::LayoutData* cost = WarpX::getCosts(lev); - const bool has_buffer = cjx; + const bool has_rho = fields.has(FieldType::rho_fp, lev); + const bool has_buffer = fields.has_vector(FieldType::current_buf, lev); #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) @@ -626,11 +626,13 @@ LaserParticleContainer::Evolve (int lev, np_current = 0; } - if (rho && ! skip_deposition && ! do_not_deposit) { + if (has_rho && ! skip_deposition && ! do_not_deposit) { int* AMREX_RESTRICT ion_lev = nullptr; + amrex::MultiFab* rho = fields.get(FieldType::rho_fp, lev); DepositCharge(pti, wp, ion_lev, rho, 0, 0, np_current, thread_num, lev, lev); if (has_buffer) { + amrex::MultiFab* crho = fields.get(FieldType::rho_buf, lev); DepositCharge(pti, wp, ion_lev, crho, 0, np_current, np-np_current, thread_num, lev, lev-1); } @@ -658,6 +660,7 @@ LaserParticleContainer::Evolve (int lev, WARPX_PROFILE_VAR_STOP(blp_pp); // Current Deposition + using ablastr::fields::Direction; if (!skip_deposition) { // Deposit at t_{n+1/2} @@ -665,13 +668,19 @@ LaserParticleContainer::Evolve (int lev, int* ion_lev = nullptr; // Deposit inside domains - DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, &jx, &jy, &jz, + amrex::MultiFab * jx = fields.get(current_fp_string, Direction{0}, lev); + amrex::MultiFab * jy = fields.get(current_fp_string, Direction{1}, lev); + amrex::MultiFab * jz = fields.get(current_fp_string, Direction{2}, lev); + DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, jx, jy, jz, 0, np_current, thread_num, lev, lev, dt, relative_time, push_type); if (has_buffer) { // Deposit in buffers + amrex::MultiFab * cjx = fields.get(FieldType::current_buf, Direction{0}, lev); + amrex::MultiFab * cjy = fields.get(FieldType::current_buf, Direction{1}, lev); + amrex::MultiFab * cjz = fields.get(FieldType::current_buf, Direction{2}, lev); DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, cjx, cjy, cjz, np_current, np-np_current, thread_num, lev, lev-1, dt, relative_time, push_type); @@ -679,11 +688,13 @@ LaserParticleContainer::Evolve (int lev, } - if (rho && ! skip_deposition && ! do_not_deposit) { + if (has_rho && ! skip_deposition && ! do_not_deposit) { int* AMREX_RESTRICT ion_lev = nullptr; + amrex::MultiFab* rho = fields.get(FieldType::rho_fp, lev); DepositCharge(pti, wp, ion_lev, rho, 1, 0, np_current, thread_num, lev, lev); if (has_buffer) { + amrex::MultiFab* crho = fields.get(FieldType::rho_buf, lev); DepositCharge(pti, wp, ion_lev, crho, 1, np_current, np-np_current, thread_num, lev, lev-1); } diff --git a/Source/Particles/MultiParticleContainer.H b/Source/Particles/MultiParticleContainer.H index 97e4e1bc4da..0e33b6bac3c 100644 --- a/Source/Particles/MultiParticleContainer.H +++ b/Source/Particles/MultiParticleContainer.H @@ -26,6 +26,8 @@ #include "WarpXParticleContainer.H" #include "ParticleBoundaries.H" +#include + #include #include #include @@ -102,16 +104,16 @@ public: * field solve, and pushing the particles, for all the species in the MultiParticleContainer. * This is the electromagnetic version. */ - void Evolve (int lev, - const amrex::MultiFab& Ex, const amrex::MultiFab& Ey, const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, const amrex::MultiFab& By, const amrex::MultiFab& Bz, - amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, - amrex::MultiFab* cjx, amrex::MultiFab* cjy, amrex::MultiFab* cjz, - amrex::MultiFab* rho, amrex::MultiFab* crho, - const amrex::MultiFab* cEx, const amrex::MultiFab* cEy, const amrex::MultiFab* cEz, - const amrex::MultiFab* cBx, const amrex::MultiFab* cBy, const amrex::MultiFab* cBz, - amrex::Real t, amrex::Real dt, DtType a_dt_type=DtType::Full, bool skip_deposition=false, - PushType push_type=PushType::Explicit); + void Evolve ( + ablastr::fields::MultiFabRegister& fields, + int lev, + std::string const& current_fp_string, + amrex::Real t, + amrex::Real dt, + DtType a_dt_type=DtType::Full, + bool skip_deposition=false, + PushType push_type=PushType::Explicit + ); /** * \brief This pushes the particle positions by one time step for all the species in the @@ -147,7 +149,7 @@ public: * the time of the deposition. */ void - DepositCharge (amrex::Vector >& rho, + DepositCharge (const ablastr::fields::MultiLevelScalarField& rho, amrex::Real relative_time); /** @@ -162,7 +164,7 @@ public: * the time of the deposition. */ void - DepositCurrent (amrex::Vector, 3 > >& J, + DepositCurrent (ablastr::fields::MultiLevelVectorField const& J, amrex::Real dt, amrex::Real relative_time); /// @@ -298,7 +300,7 @@ public: PhysicalParticleContainer& GetPCtmp () { return *pc_tmp; } - void ScrapeParticlesAtEB (const amrex::Vector& distance_to_eb); + void ScrapeParticlesAtEB (ablastr::fields::MultiLevelScalarField const& distance_to_eb); std::string m_B_ext_particle_s = "none"; std::string m_E_ext_particle_s = "none"; diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index 23af4177228..619b54ed7ad 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -11,7 +11,7 @@ */ #include "MultiParticleContainer.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "Particles/ElementaryProcess/Ionization.H" #ifdef WARPX_QED # include "Particles/ElementaryProcess/QEDInternals/BreitWheelerEngineWrapper.H" @@ -44,6 +44,7 @@ #include "WarpX.H" +#include #include #include @@ -80,7 +81,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; namespace { @@ -457,30 +458,26 @@ MultiParticleContainer::InitMultiPhysicsModules () } void -MultiParticleContainer::Evolve (int lev, - const MultiFab& Ex, const MultiFab& Ey, const MultiFab& Ez, - const MultiFab& Bx, const MultiFab& By, const MultiFab& Bz, - MultiFab& jx, MultiFab& jy, MultiFab& jz, - MultiFab* cjx, MultiFab* cjy, MultiFab* cjz, - MultiFab* rho, MultiFab* crho, - const MultiFab* cEx, const MultiFab* cEy, const MultiFab* cEz, - const MultiFab* cBx, const MultiFab* cBy, const MultiFab* cBz, +MultiParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + std::string const& current_fp_string, Real t, Real dt, DtType a_dt_type, bool skip_deposition, PushType push_type) { if (! skip_deposition) { - jx.setVal(0.0); - jy.setVal(0.0); - jz.setVal(0.0); - if (cjx) { cjx->setVal(0.0); } - if (cjy) { cjy->setVal(0.0); } - if (cjz) { cjz->setVal(0.0); } - if (rho) { rho->setVal(0.0); } - if (crho) { crho->setVal(0.0); } + using ablastr::fields::Direction; + + fields.get(current_fp_string, Direction{0}, lev)->setVal(0.0); + fields.get(current_fp_string, Direction{1}, lev)->setVal(0.0); + fields.get(current_fp_string, Direction{2}, lev)->setVal(0.0); + if (fields.has(FieldType::current_buf, Direction{0}, lev)) { fields.get(FieldType::current_buf, Direction{0}, lev)->setVal(0.0); } + if (fields.has(FieldType::current_buf, Direction{1}, lev)) { fields.get(FieldType::current_buf, Direction{1}, lev)->setVal(0.0); } + if (fields.has(FieldType::current_buf, Direction{2}, lev)) { fields.get(FieldType::current_buf, Direction{2}, lev)->setVal(0.0); } + if (fields.has(FieldType::rho_fp, lev)) { fields.get(FieldType::rho_fp, lev)->setVal(0.0); } + if (fields.has(FieldType::rho_buf, lev)) { fields.get(FieldType::rho_buf, lev)->setVal(0.0); } } for (auto& pc : allcontainers) { - pc->Evolve(lev, Ex, Ey, Ez, Bx, By, Bz, jx, jy, jz, cjx, cjy, cjz, - rho, crho, cEx, cEy, cEz, cBx, cBy, cBz, t, dt, a_dt_type, skip_deposition, push_type); + pc->Evolve(fields, lev, current_fp_string, t, dt, a_dt_type, skip_deposition, push_type); } } @@ -529,11 +526,11 @@ MultiParticleContainer::GetZeroChargeDensity (const int lev) void MultiParticleContainer::DepositCurrent ( - amrex::Vector, 3 > >& J, + ablastr::fields::MultiLevelVectorField const & J, const amrex::Real dt, const amrex::Real relative_time) { // Reset the J arrays - for (auto& J_lev : J) + for (const auto& J_lev : J) { J_lev[0]->setVal(0.0_rt); J_lev[1]->setVal(0.0_rt); @@ -550,18 +547,18 @@ MultiParticleContainer::DepositCurrent ( for (int lev = 0; lev < J.size(); ++lev) { WarpX::GetInstance().ApplyInverseVolumeScalingToCurrentDensity( - J[lev][0].get(), J[lev][1].get(), J[lev][2].get(), lev); + J[lev][0], J[lev][1], J[lev][2], lev); } #endif } void MultiParticleContainer::DepositCharge ( - amrex::Vector >& rho, + const ablastr::fields::MultiLevelScalarField& rho, const amrex::Real relative_time) { // Reset the rho array - for (auto& rho_lev : rho) + for (const auto& rho_lev : rho) { rho_lev->setVal(0.0_rt); } @@ -587,7 +584,7 @@ MultiParticleContainer::DepositCharge ( #ifdef WARPX_DIM_RZ for (int lev = 0; lev < rho.size(); ++lev) { - WarpX::GetInstance().ApplyInverseVolumeScalingToChargeDensity(rho[lev].get(), lev); + WarpX::GetInstance().ApplyInverseVolumeScalingToChargeDensity(rho[lev], lev); } #endif } @@ -963,7 +960,8 @@ void MultiParticleContainer::CheckIonizationProductSpecies() } } -void MultiParticleContainer::ScrapeParticlesAtEB (const amrex::Vector& distance_to_eb) +void MultiParticleContainer::ScrapeParticlesAtEB ( + ablastr::fields::MultiLevelScalarField const& distance_to_eb) { for (auto& pc : allcontainers) { scrapeParticlesAtEB(*pc, distance_to_eb, ParticleBoundaryProcess::Absorb()); @@ -1358,12 +1356,13 @@ MultiParticleContainer::doQEDSchwinger () pc_product_ele->defineAllParticleTiles(); pc_product_pos->defineAllParticleTiles(); - const MultiFab & Ex = warpx.getField(FieldType::Efield_aux, level_0,0); - const MultiFab & Ey = warpx.getField(FieldType::Efield_aux, level_0,1); - const MultiFab & Ez = warpx.getField(FieldType::Efield_aux, level_0,2); - const MultiFab & Bx = warpx.getField(FieldType::Bfield_aux, level_0,0); - const MultiFab & By = warpx.getField(FieldType::Bfield_aux, level_0,1); - const MultiFab & Bz = warpx.getField(FieldType::Bfield_aux, level_0,2); + using ablastr::fields::Direction; + const MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, level_0); + const MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, level_0); + const MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, level_0); + const MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, level_0); + const MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, level_0); + const MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, level_0); #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) diff --git a/Source/Particles/ParticleBoundaryBuffer.H b/Source/Particles/ParticleBoundaryBuffer.H index d33834309ab..24b388be00e 100644 --- a/Source/Particles/ParticleBoundaryBuffer.H +++ b/Source/Particles/ParticleBoundaryBuffer.H @@ -12,6 +12,8 @@ #include "Particles/PinnedMemoryParticleContainer.H" #include "Utils/export.H" +#include + #include @@ -41,7 +43,7 @@ public: void gatherParticlesFromDomainBoundaries (MultiParticleContainer& mypc); void gatherParticlesFromEmbeddedBoundaries ( - MultiParticleContainer& mypc, const amrex::Vector& distance_to_eb + MultiParticleContainer& mypc, ablastr::fields::MultiLevelScalarField const& distance_to_eb ); void redistribute (); diff --git a/Source/Particles/ParticleBoundaryBuffer.cpp b/Source/Particles/ParticleBoundaryBuffer.cpp index bc113e8e3a3..0391dcc6178 100644 --- a/Source/Particles/ParticleBoundaryBuffer.cpp +++ b/Source/Particles/ParticleBoundaryBuffer.cpp @@ -462,7 +462,7 @@ void ParticleBoundaryBuffer::gatherParticlesFromDomainBoundaries (MultiParticleC } void ParticleBoundaryBuffer::gatherParticlesFromEmbeddedBoundaries ( - MultiParticleContainer& mypc, const amrex::Vector& distance_to_eb) + MultiParticleContainer& mypc, ablastr::fields::MultiLevelScalarField const& distance_to_eb) { if (EB::enabled()) { WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::EB"); diff --git a/Source/Particles/PhotonParticleContainer.H b/Source/Particles/PhotonParticleContainer.H index 34afac53482..485f56dba43 100644 --- a/Source/Particles/PhotonParticleContainer.H +++ b/Source/Particles/PhotonParticleContainer.H @@ -46,34 +46,16 @@ public: void InitData() override; - void Evolve (int lev, - const amrex::MultiFab& Ex, - const amrex::MultiFab& Ey, - const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, - const amrex::MultiFab& By, - const amrex::MultiFab& Bz, - amrex::MultiFab& jx, - amrex::MultiFab& jy, - amrex::MultiFab& jz, - amrex::MultiFab* cjx, - amrex::MultiFab* cjy, - amrex::MultiFab* cjz, - amrex::MultiFab* rho, - amrex::MultiFab* crho, - const amrex::MultiFab* cEx, - const amrex::MultiFab* cEy, - const amrex::MultiFab* cEz, - const amrex::MultiFab* cBx, - const amrex::MultiFab* cBy, - const amrex::MultiFab* cBz, - amrex::Real t, - amrex::Real dt, - DtType a_dt_type=DtType::Full, - bool skip_deposition=false, - PushType push_type=PushType::Explicit) override; + void Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, + amrex::Real t, + amrex::Real dt, + DtType a_dt_type=DtType::Full, + bool skip_deposition=false, + PushType push_type=PushType::Explicit) override; - void PushPX(WarpXParIter& pti, + void PushPX (WarpXParIter& pti, amrex::FArrayBox const * exfab, amrex::FArrayBox const * eyfab, amrex::FArrayBox const * ezfab, diff --git a/Source/Particles/PhotonParticleContainer.cpp b/Source/Particles/PhotonParticleContainer.cpp index 1f15d5210f5..47c426cd6ff 100644 --- a/Source/Particles/PhotonParticleContainer.cpp +++ b/Source/Particles/PhotonParticleContainer.cpp @@ -229,27 +229,17 @@ PhotonParticleContainer::PushPX (WarpXParIter& pti, } void -PhotonParticleContainer::Evolve (int lev, - const MultiFab& Ex, const MultiFab& Ey, const MultiFab& Ez, - const MultiFab& Bx, const MultiFab& By, const MultiFab& Bz, - MultiFab& jx, MultiFab& jy, MultiFab& jz, - MultiFab* cjx, MultiFab* cjy, MultiFab* cjz, - MultiFab* rho, MultiFab* crho, - const MultiFab* cEx, const MultiFab* cEy, const MultiFab* cEz, - const MultiFab* cBx, const MultiFab* cBy, const MultiFab* cBz, +PhotonParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, Real t, Real dt, DtType a_dt_type, bool skip_deposition, PushType push_type) { // This does gather, push and deposit. // Push and deposit have been re-written for photons - PhysicalParticleContainer::Evolve (lev, - Ex, Ey, Ez, - Bx, By, Bz, - jx, jy, jz, - cjx, cjy, cjz, - rho, crho, - cEx, cEy, cEz, - cBx, cBy, cBz, + PhysicalParticleContainer::Evolve (fields, + lev, + current_fp_string, t, dt, a_dt_type, skip_deposition, push_type); } diff --git a/Source/Particles/PhysicalParticleContainer.H b/Source/Particles/PhysicalParticleContainer.H index 8102fc96a91..18880239183 100644 --- a/Source/Particles/PhysicalParticleContainer.H +++ b/Source/Particles/PhysicalParticleContainer.H @@ -81,27 +81,9 @@ public: * \brief Evolve is the central function PhysicalParticleContainer that * advances plasma particles for a time dt (typically one timestep). * + * \param fields the WarpX field register * \param lev level on which particles are living - * \param Ex MultiFab from which field Ex is gathered - * \param Ey MultiFab from which field Ey is gathered - * \param Ez MultiFab from which field Ez is gathered - * \param Bx MultiFab from which field Bx is gathered - * \param By MultiFab from which field By is gathered - * \param Bz MultiFab from which field Bz is gathered - * \param jx MultiFab to which the particles' current jx is deposited - * \param jy MultiFab to which the particles' current jy is deposited - * \param jz MultiFab to which the particles' current jz is deposited - * \param cjx Same as jx (coarser, from lev-1), when using deposition buffers - * \param cjy Same as jy (coarser, from lev-1), when using deposition buffers - * \param cjz Same as jz (coarser, from lev-1), when using deposition buffers - * \param rho MultiFab to which the particles' charge is deposited - * \param crho Same as rho (coarser, from lev-1), when using deposition buffers - * \param cEx Same as Ex (coarser, from lev-1), when using gather buffers - * \param cEy Same as Ey (coarser, from lev-1), when using gather buffers - * \param cEz Same as Ez (coarser, from lev-1), when using gather buffers - * \param cBx Same as Bx (coarser, from lev-1), when using gather buffers - * \param cBy Same as By (coarser, from lev-1), when using gather buffers - * \param cBz Same as Bz (coarser, from lev-1), when using gather buffers + * \param current_fp_string current coarse or fine patch identifier in fields * \param t current physical time * \param dt time step by which particles are advanced * \param a_dt_type type of time step (used for sub-cycling) @@ -112,32 +94,14 @@ public: * field gather, particle push and current deposition for all particles * in the box. */ - void Evolve (int lev, - const amrex::MultiFab& Ex, - const amrex::MultiFab& Ey, - const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, - const amrex::MultiFab& By, - const amrex::MultiFab& Bz, - amrex::MultiFab& jx, - amrex::MultiFab& jy, - amrex::MultiFab& jz, - amrex::MultiFab* cjx, - amrex::MultiFab* cjy, - amrex::MultiFab* cjz, - amrex::MultiFab* rho, - amrex::MultiFab* crho, - const amrex::MultiFab* cEx, - const amrex::MultiFab* cEy, - const amrex::MultiFab* cEz, - const amrex::MultiFab* cBx, - const amrex::MultiFab* cBy, - const amrex::MultiFab* cBz, - amrex::Real t, - amrex::Real dt, - DtType a_dt_type=DtType::Full, - bool skip_deposition=false, - PushType push_type=PushType::Explicit) override; + void Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, + amrex::Real t, + amrex::Real dt, + DtType a_dt_type=DtType::Full, + bool skip_deposition=false, + PushType push_type=PushType::Explicit) override; virtual void PushPX (WarpXParIter& pti, amrex::FArrayBox const * exfab, diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 23af57b9206..26f9fee38d3 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -10,6 +10,7 @@ */ #include "PhysicalParticleContainer.H" +#include "Fields.H" #include "Filter/NCIGodfreyFilter.H" #include "Initialization/InjectorDensity.H" #include "Initialization/InjectorMomentum.H" @@ -1342,8 +1343,12 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int #ifdef AMREX_USE_EB if (EB::enabled()) { - auto &distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); - scrapeParticlesAtEB(*this, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + using warpx::fields::FieldType; + auto & warpx = WarpX::GetInstance(); + scrapeParticlesAtEB( + *this, + warpx.m_fields.get_mr_levels(FieldType::distance_to_eb, warpx.finestLevel()), + ParticleBoundaryProcess::Absorb()); } #endif @@ -1709,8 +1714,12 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, #ifdef AMREX_USE_EB if (EB::enabled()) { - auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); - scrapeParticlesAtEB(tmp_pc, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + using warpx::fields::FieldType; + auto & warpx = WarpX::GetInstance(); + scrapeParticlesAtEB( + tmp_pc, + warpx.m_fields.get_mr_levels(FieldType::distance_to_eb, warpx.finestLevel()), + ParticleBoundaryProcess::Absorb()); } #endif @@ -1724,29 +1733,36 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, } void -PhysicalParticleContainer::Evolve (int lev, - const MultiFab& Ex, const MultiFab& Ey, const MultiFab& Ez, - const MultiFab& Bx, const MultiFab& By, const MultiFab& Bz, - MultiFab& jx, MultiFab& jy, MultiFab& jz, - MultiFab* cjx, MultiFab* cjy, MultiFab* cjz, - MultiFab* rho, MultiFab* crho, - const MultiFab* cEx, const MultiFab* cEy, const MultiFab* cEz, - const MultiFab* cBx, const MultiFab* cBy, const MultiFab* cBz, +PhysicalParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, Real /*t*/, Real dt, DtType a_dt_type, bool skip_deposition, PushType push_type) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; WARPX_PROFILE("PhysicalParticleContainer::Evolve()"); WARPX_PROFILE_VAR_NS("PhysicalParticleContainer::Evolve::GatherAndPush", blp_fg); - BL_ASSERT(OnSameGrids(lev,jx)); + BL_ASSERT(OnSameGrids(lev, *fields.get(FieldType::current_fp, Direction{0}, lev))); amrex::LayoutData* cost = WarpX::getCosts(lev); const iMultiFab* current_masks = WarpX::CurrentBufferMasks(lev); const iMultiFab* gather_masks = WarpX::GatherBufferMasks(lev); - const bool has_buffer = cEx || cjx; + const bool has_rho = fields.has(FieldType::rho_fp, lev); + const bool has_J_buf = fields.has_vector(FieldType::current_buf, lev); + const bool has_E_cax = fields.has_vector(FieldType::Efield_cax, lev); + const bool has_buffer = has_E_cax || has_J_buf; + + amrex::MultiFab & Ex = *fields.get(FieldType::Efield_aux, Direction{0}, lev); + amrex::MultiFab & Ey = *fields.get(FieldType::Efield_aux, Direction{1}, lev); + amrex::MultiFab & Ez = *fields.get(FieldType::Efield_aux, Direction{2}, lev); + amrex::MultiFab & Bx = *fields.get(FieldType::Bfield_aux, Direction{0}, lev); + amrex::MultiFab & By = *fields.get(FieldType::Bfield_aux, Direction{1}, lev); + amrex::MultiFab & Bz = *fields.get(FieldType::Bfield_aux, Direction{2}, lev); if (m_do_back_transformed_particles) { @@ -1834,17 +1850,19 @@ PhysicalParticleContainer::Evolve (int lev, pti, lev, current_masks, gather_masks ); } - const long np_current = (cjx) ? nfine_current : np; + const long np_current = has_J_buf ? nfine_current : np; - if (rho && ! skip_deposition && ! do_not_deposit) { + if (has_rho && ! skip_deposition && ! do_not_deposit) { // Deposit charge before particle push, in component 0 of MultiFab rho. const int* const AMREX_RESTRICT ion_lev = (do_field_ionization)? pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr():nullptr; + amrex::MultiFab* rho = fields.get(FieldType::rho_fp, lev); DepositCharge(pti, wp, ion_lev, rho, 0, 0, np_current, thread_num, lev, lev); if (has_buffer){ + amrex::MultiFab* crho = fields.get(FieldType::rho_buf, lev); DepositCharge(pti, wp, ion_lev, crho, 0, np_current, np-np_current, thread_num, lev, lev-1); } @@ -1852,7 +1870,7 @@ PhysicalParticleContainer::Evolve (int lev, if (! do_not_push) { - const long np_gather = (cEx) ? nfine_gather : np; + const long np_gather = has_E_cax ? nfine_gather : np; int e_is_nodal = Ex.is_nodal() and Ey.is_nodal() and Ez.is_nodal(); @@ -1879,13 +1897,20 @@ PhysicalParticleContainer::Evolve (int lev, const IntVect& ref_ratio = WarpX::RefRatio(lev-1); const Box& cbox = amrex::coarsen(box,ref_ratio); + amrex::MultiFab & cEx = *fields.get(FieldType::Efield_cax, Direction{0}, lev); + amrex::MultiFab & cEy = *fields.get(FieldType::Efield_cax, Direction{1}, lev); + amrex::MultiFab & cEz = *fields.get(FieldType::Efield_cax, Direction{2}, lev); + amrex::MultiFab & cBx = *fields.get(FieldType::Bfield_cax, Direction{0}, lev); + amrex::MultiFab & cBy = *fields.get(FieldType::Bfield_cax, Direction{1}, lev); + amrex::MultiFab & cBz = *fields.get(FieldType::Bfield_cax, Direction{2}, lev); + // Data on the grid - FArrayBox const* cexfab = &(*cEx)[pti]; - FArrayBox const* ceyfab = &(*cEy)[pti]; - FArrayBox const* cezfab = &(*cEz)[pti]; - FArrayBox const* cbxfab = &(*cBx)[pti]; - FArrayBox const* cbyfab = &(*cBy)[pti]; - FArrayBox const* cbzfab = &(*cBz)[pti]; + FArrayBox const* cexfab = &cEx[pti]; + FArrayBox const* ceyfab = &cEy[pti]; + FArrayBox const* cezfab = &cEz[pti]; + FArrayBox const* cbxfab = &cBx[pti]; + FArrayBox const* cbyfab = &cBy[pti]; + FArrayBox const* cbzfab = &cBz[pti]; if (WarpX::use_fdtd_nci_corr) { @@ -1896,23 +1921,23 @@ PhysicalParticleContainer::Evolve (int lev, applyNCIFilter(lev-1, cbox, exeli, eyeli, ezeli, bxeli, byeli, bzeli, filtered_Ex, filtered_Ey, filtered_Ez, filtered_Bx, filtered_By, filtered_Bz, - (*cEx)[pti], (*cEy)[pti], (*cEz)[pti], - (*cBx)[pti], (*cBy)[pti], (*cBz)[pti], + cEx[pti], cEy[pti], cEz[pti], + cBx[pti], cBy[pti], cBz[pti], cexfab, ceyfab, cezfab, cbxfab, cbyfab, cbzfab); } // Field gather and push for particles in gather buffers - e_is_nodal = cEx->is_nodal() and cEy->is_nodal() and cEz->is_nodal(); + e_is_nodal = cEx.is_nodal() and cEy.is_nodal() and cEz.is_nodal(); if (push_type == PushType::Explicit) { PushPX(pti, cexfab, ceyfab, cezfab, cbxfab, cbyfab, cbzfab, - cEx->nGrowVect(), e_is_nodal, + cEx.nGrowVect(), e_is_nodal, nfine_gather, np-nfine_gather, lev, lev-1, dt, ScaleFields(false), a_dt_type); } else if (push_type == PushType::Implicit) { ImplicitPushXP(pti, cexfab, ceyfab, cezfab, cbxfab, cbyfab, cbzfab, - cEx->nGrowVect(), e_is_nodal, + cEx.nGrowVect(), e_is_nodal, nfine_gather, np-nfine_gather, lev, lev-1, dt, ScaleFields(false), a_dt_type); } @@ -1930,13 +1955,19 @@ PhysicalParticleContainer::Evolve (int lev, pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr():nullptr; // Deposit inside domains - DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, &jx, &jy, &jz, + amrex::MultiFab * jx = fields.get(current_fp_string, Direction{0}, lev); + amrex::MultiFab * jy = fields.get(current_fp_string, Direction{1}, lev); + amrex::MultiFab * jz = fields.get(current_fp_string, Direction{2}, lev); + DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, jx, jy, jz, 0, np_current, thread_num, lev, lev, dt, relative_time, push_type); if (has_buffer) { // Deposit in buffers + amrex::MultiFab * cjx = fields.get(FieldType::current_buf, Direction{0}, lev); + amrex::MultiFab * cjy = fields.get(FieldType::current_buf, Direction{1}, lev); + amrex::MultiFab * cjz = fields.get(FieldType::current_buf, Direction{2}, lev); DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, cjx, cjy, cjz, np_current, np-np_current, thread_num, lev, lev-1, dt, relative_time, push_type); @@ -1944,10 +1975,11 @@ PhysicalParticleContainer::Evolve (int lev, } // end of "if electrostatic_solver_id == ElectrostaticSolverAlgo::None" } // end of "if do_not_push" - if (rho && ! skip_deposition && ! do_not_deposit) { + if (has_rho && ! skip_deposition && ! do_not_deposit) { // Deposit charge after particle push, in component 1 of MultiFab rho. // (Skipped for electrostatic solver, as this may lead to out-of-bounds) if (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::None) { + amrex::MultiFab* rho = fields.get(FieldType::rho_fp, lev); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(rho->nComp() >= 2, "Cannot deposit charge in rho component 1: only component 0 is allocated!"); @@ -1957,6 +1989,7 @@ PhysicalParticleContainer::Evolve (int lev, DepositCharge(pti, wp, ion_lev, rho, 1, 0, np_current, thread_num, lev, lev); if (has_buffer){ + amrex::MultiFab* crho = fields.get(FieldType::rho_buf, lev); DepositCharge(pti, wp, ion_lev, crho, 1, np_current, np-np_current, thread_num, lev, lev-1); } diff --git a/Source/Particles/RigidInjectedParticleContainer.H b/Source/Particles/RigidInjectedParticleContainer.H index bc20420ea6e..d3565dd2df6 100644 --- a/Source/Particles/RigidInjectedParticleContainer.H +++ b/Source/Particles/RigidInjectedParticleContainer.H @@ -61,32 +61,14 @@ public: virtual void RemapParticles(); - void Evolve (int lev, - const amrex::MultiFab& Ex, - const amrex::MultiFab& Ey, - const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, - const amrex::MultiFab& By, - const amrex::MultiFab& Bz, - amrex::MultiFab& jx, - amrex::MultiFab& jy, - amrex::MultiFab& jz, - amrex::MultiFab* cjx, - amrex::MultiFab* cjy, - amrex::MultiFab* cjz, - amrex::MultiFab* rho, - amrex::MultiFab* crho, - const amrex::MultiFab* cEx, - const amrex::MultiFab* cEy, - const amrex::MultiFab* cEz, - const amrex::MultiFab* cBx, - const amrex::MultiFab* cBy, - const amrex::MultiFab* cBz, - amrex::Real t, - amrex::Real dt, - DtType a_dt_type=DtType::Full, - bool skip_deposition=false, - PushType push_type=PushType::Explicit) override; + void Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, + amrex::Real t, + amrex::Real dt, + DtType a_dt_type=DtType::Full, + bool skip_deposition=false, + PushType push_type=PushType::Explicit) override; void PushPX (WarpXParIter& pti, amrex::FArrayBox const * exfab, diff --git a/Source/Particles/RigidInjectedParticleContainer.cpp b/Source/Particles/RigidInjectedParticleContainer.cpp index c3ec4c41131..d1e1f48ab38 100644 --- a/Source/Particles/RigidInjectedParticleContainer.cpp +++ b/Source/Particles/RigidInjectedParticleContainer.cpp @@ -291,14 +291,9 @@ RigidInjectedParticleContainer::PushPX (WarpXParIter& pti, } void -RigidInjectedParticleContainer::Evolve (int lev, - const MultiFab& Ex, const MultiFab& Ey, const MultiFab& Ez, - const MultiFab& Bx, const MultiFab& By, const MultiFab& Bz, - MultiFab& jx, MultiFab& jy, MultiFab& jz, - MultiFab* cjx, MultiFab* cjy, MultiFab* cjz, - MultiFab* rho, MultiFab* crho, - const MultiFab* cEx, const MultiFab* cEy, const MultiFab* cEz, - const MultiFab* cBx, const MultiFab* cBy, const MultiFab* cBz, +RigidInjectedParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, Real t, Real dt, DtType a_dt_type, bool skip_deposition, PushType push_type) { @@ -317,14 +312,9 @@ RigidInjectedParticleContainer::Evolve (int lev, done_injecting_lev = ((zinject_plane_levels[lev] < plo[WARPX_ZINDEX] && WarpX::moving_window_v + WarpX::beta_boost*PhysConst::c >= 0.) || (zinject_plane_levels[lev] > phi[WARPX_ZINDEX] && WarpX::moving_window_v + WarpX::beta_boost*PhysConst::c <= 0.)); - PhysicalParticleContainer::Evolve (lev, - Ex, Ey, Ez, - Bx, By, Bz, - jx, jy, jz, - cjx, cjy, cjz, - rho, crho, - cEx, cEy, cEz, - cBx, cBy, cBz, + PhysicalParticleContainer::Evolve (fields, + lev, + current_fp_string, t, dt, a_dt_type, skip_deposition, push_type); } diff --git a/Source/Particles/WarpXParticleContainer.H b/Source/Particles/WarpXParticleContainer.H index 7e882c151e8..9c316b110ee 100644 --- a/Source/Particles/WarpXParticleContainer.H +++ b/Source/Particles/WarpXParticleContainer.H @@ -25,6 +25,8 @@ #include "MultiParticleContainer_fwd.H" #include "NamedComponentParticleContainer.H" +#include + #include #include #include @@ -145,14 +147,9 @@ public: * particles for a time dt (typically one timestep). It is a pure virtual * function for flexibility. */ - virtual void Evolve (int lev, - const amrex::MultiFab& Ex, const amrex::MultiFab& Ey, const amrex::MultiFab& Ez, - const amrex::MultiFab& Bx, const amrex::MultiFab& By, const amrex::MultiFab& Bz, - amrex::MultiFab& jx, amrex::MultiFab& jy, amrex::MultiFab& jz, - amrex::MultiFab* cjx, amrex::MultiFab* cjy, amrex::MultiFab* cjz, - amrex::MultiFab* rho, amrex::MultiFab* crho, - const amrex::MultiFab* cEx, const amrex::MultiFab* cEy, const amrex::MultiFab* cEz, - const amrex::MultiFab* cBx, const amrex::MultiFab* cBy, const amrex::MultiFab* cBz, + virtual void Evolve (ablastr::fields::MultiFabRegister& fields, + int lev, + const std::string& current_fp_string, amrex::Real t, amrex::Real dt, DtType a_dt_type=DtType::Full, bool skip_deposition=false, PushType push_type=PushType::Explicit) = 0; @@ -199,7 +196,7 @@ public: * the particle position will be temporarily modified to match * the time of the deposition. */ - void DepositCurrent (amrex::Vector, 3 > >& J, + void DepositCurrent (ablastr::fields::MultiLevelVectorField const & J, amrex::Real dt, amrex::Real relative_time); /** @@ -212,12 +209,12 @@ public: * \param[in] interpolate_across_levels whether to average down from the fine patch to the coarse patch * \param[in] icomp component of the MultiFab where rho is deposited (old, new) */ - void DepositCharge (amrex::Vector >& rho, + void DepositCharge (const ablastr::fields::MultiLevelScalarField& rho, bool local = false, bool reset = false, bool apply_boundary_and_scale_volume = false, bool interpolate_across_levels = true, int icomp = 0); - void DepositCharge (std::unique_ptr& rho, int lev, + void DepositCharge (amrex::MultiFab* rho, int lev, bool local = false, bool reset = false, bool apply_boundary_and_scale_volume = false, int icomp = 0); diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index 591190a7a19..36793c8619b 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -14,6 +14,7 @@ #include "Deposition/CurrentDeposition.H" #include "Deposition/SharedDepositionUtils.H" #include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" #include "Pusher/GetAndSetPosition.H" #include "Pusher/UpdatePosition.H" #include "ParticleBoundaries_K.H" @@ -173,6 +174,7 @@ WarpXParticleContainer::AddNParticles (int /*lev*/, long n, int uniqueparticles, amrex::Long id) { using namespace amrex::literals; + using warpx::fields::FieldType; WARPX_ALWAYS_ASSERT_WITH_MESSAGE((PIdx::nattribs + nattr_real - 1) <= NumRealComps(), "Too many real attributes specified"); @@ -302,8 +304,11 @@ WarpXParticleContainer::AddNParticles (int /*lev*/, long n, // Remove particles that are inside the embedded boundaries #ifdef AMREX_USE_EB if (EB::enabled()) { - auto & distance_to_eb = WarpX::GetInstance().GetDistanceToEB(); - scrapeParticlesAtEB( *this, amrex::GetVecOfConstPtrs(distance_to_eb), ParticleBoundaryProcess::Absorb()); + auto & warpx = WarpX::GetInstance(); + scrapeParticlesAtEB( + *this, + warpx.m_fields.get_mr_levels(FieldType::distance_to_eb, warpx.finestLevel()), + ParticleBoundaryProcess::Absorb()); deleteInvalidParticles(); } #endif @@ -823,7 +828,7 @@ WarpXParticleContainer::DepositCurrent (WarpXParIter& pti, void WarpXParticleContainer::DepositCurrent ( - amrex::Vector, 3 > >& J, + ablastr::fields::MultiLevelVectorField const & J, const amrex::Real dt, const amrex::Real relative_time) { // Loop over the refinement levels @@ -853,7 +858,7 @@ WarpXParticleContainer::DepositCurrent ( } DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, - J[lev][0].get(), J[lev][1].get(), J[lev][2].get(), + J[lev][0], J[lev][1], J[lev][2], 0, np, thread_num, lev, lev, dt, relative_time, PushType::Explicit); } #ifdef AMREX_USE_OMP @@ -1170,7 +1175,7 @@ WarpXParticleContainer::DepositCharge (WarpXParIter& pti, RealVector const& wp, } void -WarpXParticleContainer::DepositCharge (amrex::Vector >& rho, +WarpXParticleContainer::DepositCharge (const ablastr::fields::MultiLevelScalarField& rho, const bool local, const bool reset, const bool apply_boundary_and_scale_volume, const bool interpolate_across_levels, @@ -1211,7 +1216,7 @@ WarpXParticleContainer::DepositCharge (amrex::Vector& rho, +WarpXParticleContainer::DepositCharge (amrex::MultiFab* rho, const int lev, const bool local, const bool reset, const bool apply_boundary_and_scale_volume, const int icomp) @@ -1245,7 +1250,7 @@ WarpXParticleContainer::DepositCharge (std::unique_ptr& rho, ion_lev = pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr(); } - DepositCharge(pti, wp, ion_lev, rho.get(), icomp, 0, np, thread_num, lev, lev); + DepositCharge(pti, wp, ion_lev, rho, icomp, 0, np, thread_num, lev, lev); } #ifdef AMREX_USE_OMP } @@ -1254,7 +1259,7 @@ WarpXParticleContainer::DepositCharge (std::unique_ptr& rho, #ifdef WARPX_DIM_RZ if (apply_boundary_and_scale_volume) { - WarpX::GetInstance().ApplyInverseVolumeScalingToChargeDensity(rho.get(), lev); + WarpX::GetInstance().ApplyInverseVolumeScalingToChargeDensity(rho, lev); } #endif @@ -1273,7 +1278,7 @@ WarpXParticleContainer::DepositCharge (std::unique_ptr& rho, if (apply_boundary_and_scale_volume) { // Reflect density over PEC boundaries, if needed. - WarpX::GetInstance().ApplyRhofieldBoundary(lev, rho.get(), PatchType::fine); + WarpX::GetInstance().ApplyRhofieldBoundary(lev, rho, PatchType::fine); } #endif } @@ -1300,7 +1305,7 @@ WarpXParticleContainer::GetChargeDensity (int lev, bool local) const int ng_rho = warpx.get_ng_depos_rho().max(); auto rho = std::make_unique(nba, dm, WarpX::ncomps,ng_rho); - DepositCharge(rho, lev, local, true, true, 0); + DepositCharge(rho.get(), lev, local, true, true, 0); return rho; } diff --git a/Source/Python/CMakeLists.txt b/Source/Python/CMakeLists.txt index 17a75301306..1b4ab90aade 100644 --- a/Source/Python/CMakeLists.txt +++ b/Source/Python/CMakeLists.txt @@ -13,6 +13,7 @@ foreach(D IN LISTS WarpX_DIMS) target_sources(pyWarpX_${SD} PRIVATE # pybind11 + MultiFabRegister.cpp WarpX.cpp ) endif() diff --git a/Source/Python/MultiFabRegister.cpp b/Source/Python/MultiFabRegister.cpp new file mode 100644 index 00000000000..fcf38a1a6db --- /dev/null +++ b/Source/Python/MultiFabRegister.cpp @@ -0,0 +1,164 @@ +/* Copyright 2024 The WarpX Community + * + * Authors: Axel Huebl + * License: BSD-3-Clause-LBNL + */ +#include "Python/pyWarpX.H" + +#include + +#include +#include +#include + +void init_MultiFabRegister (py::module & m) +{ + using namespace ablastr::fields; + + py::class_(m, "Direction") + .def(py::init()); + + py::class_(m, "MultiFabRegister") + + .def("alloc_init", + py::overload_cast< + std::string, + int, + amrex::BoxArray const &, + amrex::DistributionMapping const &, + int, + amrex::IntVect const &, + std::optional, + bool, + bool + >(&MultiFabRegister::alloc_init), + py::arg("name"), + py::arg("level"), + py::arg("ba"), + py::arg("dm"), + py::arg("ncomp"), + py::arg("ngrow"), + py::arg("initial_value"), + py::arg("redistribute"), + py::arg("redistribute_on_remake") + ) + + .def("alloc_init", + py::overload_cast< + std::string, + ablastr::fields::Direction, + int, + amrex::BoxArray const &, + amrex::DistributionMapping const &, + int, + amrex::IntVect const &, + std::optional, + bool, + bool + >(&MultiFabRegister::alloc_init), + py::arg("name"), + py::arg("dir"), + py::arg("level"), + py::arg("ba"), + py::arg("dm"), + py::arg("ncomp"), + py::arg("ngrow"), + py::arg("initial_value"), + py::arg("redistribute"), + py::arg("redistribute_on_remake") + ) + + .def("alias_init", + py::overload_cast< + std::string, + std::string, + int, + std::optional + >(&MultiFabRegister::alias_init), + py::arg("new_name"), + py::arg("alias_name"), + py::arg("level"), + py::arg("initial_value") + ) + + .def("alias_init", + py::overload_cast< + std::string, + std::string, + ablastr::fields::Direction, + int, + std::optional + >(&MultiFabRegister::alias_init), + py::arg("new_name"), + py::arg("alias_name"), + py::arg("dir"), + py::arg("level"), + py::arg("initial_value") + ) + + .def("has", + py::overload_cast< + std::string, + int + >(&MultiFabRegister::has, py::const_), + py::arg("name"), + py::arg("level") + ) + + .def("has", + py::overload_cast< + std::string, + ablastr::fields::Direction, + int + >(&MultiFabRegister::has, py::const_), + py::arg("name"), + py::arg("dir"), + py::arg("level") + ) + + .def("get", + py::overload_cast< + std::string, + int + >(&MultiFabRegister::get), + py::arg("name"), + py::arg("level") + ) + + .def("get", + py::overload_cast< + std::string, + ablastr::fields::Direction, + int + >(&MultiFabRegister::get), + py::arg("name"), + py::arg("dir"), + py::arg("level") + ) + + //.def("list", + // &MultiFabRegister::list + // // "..." + //) + + .def("erase", + py::overload_cast< + std::string, + int + >(&MultiFabRegister::erase), + py::arg("name"), + py::arg("level") + ) + + .def("erase", + py::overload_cast< + std::string, + ablastr::fields::Direction, + int + >(&MultiFabRegister::erase), + py::arg("name"), + py::arg("dir"), + py::arg("level") + ) + ; +} diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 2689b3115fa..0aab95f78f8 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -55,6 +55,8 @@ namespace warpx { void init_WarpX (py::module& m) { + using ablastr::fields::Direction; + // Expose the WarpX instance m.def("get_instance", [] () { return &WarpX::GetInstance(); }, @@ -110,17 +112,65 @@ void init_WarpX (py::module& m) //py::overload_cast< int >(&WarpX::boxArray, py::const_), py::arg("lev") ) + .def("field", + [](WarpX const & wx) { + return wx.multifab_map; + }, + py::return_value_policy::reference_internal, + R"doc(Registry to all WarpX MultiFab (fields).)doc" + ) + .def("multifab", + [](WarpX & wx, std::string internal_name) { + if (wx.m_fields.internal_has(internal_name)) { + return wx.m_fields.internal_get(internal_name); + } else { + throw std::runtime_error("MultiFab '" + internal_name + "' is unknown or is not allocated!"); + } + }, + py::arg("internal_name"), + py::return_value_policy::reference_internal, + R"doc(Return a MultiFab by its internal name (deprecated). + +The multifab('internal_name') signature is deprecated. +Please use: +- multifab('prefix', level=...) for scalar fields +- multifab('prefix', dir=..., level=...) for vector field components +where 'prefix' is the part of 'internal_name';' before the [])doc" + ) + .def("multifab", + [](WarpX & wx, std::string scalar_name, int level) { + if (wx.m_fields.has(scalar_name, level)) { + return wx.m_fields.get(scalar_name, level); + } else { + throw std::runtime_error("The scalar field '" + scalar_name + "' is unknown or is not allocated!"); + } + }, + py::arg("scalar_name"), + py::arg("level"), + py::return_value_policy::reference_internal, + R"doc(Return scalar fields (MultiFabs) by name and level, e.g., ``\"rho_fp\"``, ``\"phi_fp"``, ... + +The physical fields in WarpX have the following naming: + +- ``_fp`` are the "fine" patches, the regular resolution of a current mesh-refinement level +- ``_aux`` are temporary (auxiliar) patches at the same resolution as ``_fp``. + They usually include contributions from other levels and can be interpolated for gather routines of particles. +- ``_cp`` are "coarse" patches, at the same resolution (but not necessary values) as the ``_fp`` of ``level - 1`` + (only for level 1 and higher).)doc" + ) .def("multifab", - [](WarpX const & wx, std::string const multifab_name) { - if (wx.multifab_map.count(multifab_name) > 0) { - return wx.multifab_map.at(multifab_name); + [](WarpX & wx, std::string vector_name, Direction dir, int level) { + if (wx.m_fields.has(vector_name, dir, level)) { + return wx.m_fields.get(vector_name, dir, level); } else { - throw std::runtime_error("The MultiFab '" + multifab_name + "' is unknown or is not allocated!"); + throw std::runtime_error("The vector field '" + vector_name + "' is unknown or is not allocated!"); } }, - py::arg("multifab_name"), + py::arg("vector_name"), + py::arg("dir"), + py::arg("level"), py::return_value_policy::reference_internal, - R"doc(Return MultiFabs by name, e.g., ``\"Efield_aux[x][level=0]\"``, ``\"Efield_cp[x][level=0]\"``, ... + R"doc(Return the component of a vector field (MultiFab) by name, direction, and level, e.g., ``\"Efield_aux\"``, ``\"Efield_fp"``, ... The physical fields in WarpX have the following naming: diff --git a/Source/Python/pyWarpX.cpp b/Source/Python/pyWarpX.cpp index 26f4c77502d..e128599abd0 100644 --- a/Source/Python/pyWarpX.cpp +++ b/Source/Python/pyWarpX.cpp @@ -32,6 +32,7 @@ // forward declarations of exposed classes void init_BoundaryBufferParIter (py::module&); void init_MultiParticleContainer (py::module&); +void init_MultiFabRegister (py::module&); void init_ParticleBoundaryBuffer (py::module&); void init_PinnedMemoryParticleContainer (py::module&); void init_WarpXParIter (py::module&); @@ -59,6 +60,7 @@ PYBIND11_MODULE(PYWARPX_MODULE_NAME, m) { )pbdoc"; // note: order from parent to child classes + init_MultiFabRegister(m); init_PinnedMemoryParticleContainer(m); init_WarpXParticleContainer(m); init_WarpXParIter(m); diff --git a/Source/Utils/WarpXMovingWindow.cpp b/Source/Utils/WarpXMovingWindow.cpp index 73696838cd4..d5cebd69254 100644 --- a/Source/Utils/WarpXMovingWindow.cpp +++ b/Source/Utils/WarpXMovingWindow.cpp @@ -14,6 +14,7 @@ #endif #include "Initialization/ExternalField.H" #include "Particles/MultiParticleContainer.H" +#include "Fields.H" #include "Fluids/MultiFluidContainer.H" #include "Fluids/WarpXFluidContainer.H" #include "Utils/TextMsg.H" @@ -139,6 +140,9 @@ WarpX::MoveWindow (const int step, bool move_j) { WARPX_PROFILE("WarpX::MoveWindow"); + using ablastr::fields::Direction; + using warpx::fields::FieldType; + if (step == start_moving_window_step) { amrex::Print() << Utils::TextMsg::Info("Starting moving window"); } @@ -234,69 +238,73 @@ WarpX::MoveWindow (const int step, bool move_j) if (dim == 1) { Efield_parser = m_p_ext_field_params->Eyfield_parser->compile<3>(); } if (dim == 2) { Efield_parser = m_p_ext_field_params->Ezfield_parser->compile<3>(); } } - shiftMF(*Bfield_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, + shiftMF(*m_fields.get(FieldType::Bfield_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*Efield_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, + shiftMF(*m_fields.get(FieldType::Efield_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_p_ext_field_params->E_external_grid[dim], use_Eparser, Efield_parser); if (fft_do_time_averaging) { + ablastr::fields::MultiLevelVectorField Efield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level); + ablastr::fields::MultiLevelVectorField Bfield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level); shiftMF(*Bfield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); shiftMF(*Efield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, m_p_ext_field_params-> E_external_grid[dim], use_Eparser, Efield_parser); } if (move_j) { - shiftMF(*current_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::current_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost); } if (pml[lev] && pml[lev]->ok()) { - const std::array& pml_B = pml[lev]->GetB_fp(); - const std::array& pml_E = pml[lev]->GetE_fp(); - shiftMF(*pml_B[dim], geom[lev], num_shift, dir, lev, dont_update_cost); - shiftMF(*pml_E[dim], geom[lev], num_shift, dir, lev, dont_update_cost); + amrex::MultiFab* pml_B = m_fields.get(FieldType::pml_B_fp, Direction{dim}, lev); + amrex::MultiFab* pml_E = m_fields.get(FieldType::pml_E_fp, Direction{dim}, lev); + shiftMF(*pml_B, geom[lev], num_shift, dir, lev, dont_update_cost); + shiftMF(*pml_E, geom[lev], num_shift, dir, lev, dont_update_cost); } #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) if (pml_rz[lev] && dim < 2) { - const std::array& pml_rz_B = pml_rz[lev]->GetB_fp(); - const std::array& pml_rz_E = pml_rz[lev]->GetE_fp(); - shiftMF(*pml_rz_B[dim], geom[lev], num_shift, dir, lev, dont_update_cost); - shiftMF(*pml_rz_E[dim], geom[lev], num_shift, dir, lev, dont_update_cost); + amrex::MultiFab* pml_rz_B = m_fields.get(FieldType::pml_B_fp, Direction{dim}, lev); + amrex::MultiFab* pml_rz_E = m_fields.get(FieldType::pml_E_fp, Direction{dim}, lev); + shiftMF(*pml_rz_B, geom[lev], num_shift, dir, lev, dont_update_cost); + shiftMF(*pml_rz_E, geom[lev], num_shift, dir, lev, dont_update_cost); } #endif if (lev > 0) { // coarse grid - shiftMF(*Bfield_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, + shiftMF(*m_fields.get(FieldType::Bfield_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*Efield_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, + shiftMF(*m_fields.get(FieldType::Efield_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_p_ext_field_params->E_external_grid[dim], use_Eparser, Efield_parser); - shiftMF(*Bfield_aux[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost); - shiftMF(*Efield_aux[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::Bfield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::Efield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost); if (fft_do_time_averaging) { + ablastr::fields::MultiLevelVectorField Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level); + ablastr::fields::MultiLevelVectorField Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level); shiftMF(*Bfield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); shiftMF(*Efield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_p_ext_field_params->E_external_grid[dim], use_Eparser, Efield_parser); } if (move_j) { - shiftMF(*current_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::current_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost); } if (do_pml && pml[lev]->ok()) { - const std::array& pml_B = pml[lev]->GetB_cp(); - const std::array& pml_E = pml[lev]->GetE_cp(); - shiftMF(*pml_B[dim], geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); - shiftMF(*pml_E[dim], geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); + amrex::MultiFab* pml_B_cp = m_fields.get(FieldType::pml_B_cp, Direction{dim}, lev); + amrex::MultiFab* pml_E_cp = m_fields.get(FieldType::pml_E_cp, Direction{dim}, lev); + shiftMF(*pml_B_cp, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); + shiftMF(*pml_E_cp, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); } } } // Shift scalar field F with div(E) cleaning in valid domain // TODO: shift F from pml_rz for RZ geometry with PSATD, once implemented - if (F_fp[lev]) + if (m_fields.has(FieldType::F_fp, lev)) { // Fine grid - shiftMF(*F_fp[lev], geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::F_fp, lev), geom[lev], num_shift, dir, lev, do_update_cost); if (lev > 0) { // Coarse grid - shiftMF(*F_cp[lev], geom[lev-1], num_shift_crse, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::F_cp, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost); } } @@ -306,7 +314,7 @@ WarpX::MoveWindow (const int step, bool move_j) // Fine grid if (do_pml && pml[lev]->ok()) { - amrex::MultiFab* pml_F = pml[lev]->GetF_fp(); + amrex::MultiFab* pml_F = m_fields.get(FieldType::pml_F_fp, lev); shiftMF(*pml_F, geom[lev], num_shift, dir, lev, dont_update_cost); } if (lev > 0) @@ -314,7 +322,7 @@ WarpX::MoveWindow (const int step, bool move_j) // Coarse grid if (do_pml && pml[lev]->ok()) { - amrex::MultiFab* pml_F = pml[lev]->GetF_cp(); + amrex::MultiFab* pml_F = m_fields.get(FieldType::pml_F_cp, lev); shiftMF(*pml_F, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); } } @@ -322,14 +330,14 @@ WarpX::MoveWindow (const int step, bool move_j) // Shift scalar field G with div(B) cleaning in valid domain // TODO: shift G from pml_rz for RZ geometry with PSATD, once implemented - if (G_fp[lev]) + if (m_fields.has(FieldType::G_fp, lev)) { // Fine grid - shiftMF(*G_fp[lev], geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::G_fp, lev), geom[lev], num_shift, dir, lev, do_update_cost); if (lev > 0) { // Coarse grid - shiftMF(*G_cp[lev], geom[lev-1], num_shift_crse, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::G_cp, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost); } } @@ -339,7 +347,7 @@ WarpX::MoveWindow (const int step, bool move_j) // Fine grid if (do_pml && pml[lev]->ok()) { - amrex::MultiFab* pml_G = pml[lev]->GetG_fp(); + amrex::MultiFab* pml_G = m_fields.get(FieldType::pml_G_fp, lev); shiftMF(*pml_G, geom[lev], num_shift, dir, lev, dont_update_cost); } if (lev > 0) @@ -347,7 +355,7 @@ WarpX::MoveWindow (const int step, bool move_j) // Coarse grid if (do_pml && pml[lev]->ok()) { - amrex::MultiFab* pml_G = pml[lev]->GetG_cp(); + amrex::MultiFab* pml_G = m_fields.get(FieldType::pml_G_cp, lev); shiftMF(*pml_G, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); } } @@ -355,12 +363,12 @@ WarpX::MoveWindow (const int step, bool move_j) // Shift scalar component rho if (move_j) { - if (rho_fp[lev]){ + if (m_fields.has(FieldType::rho_fp, lev)) { // Fine grid - shiftMF(*rho_fp[lev], geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::rho_fp,lev), geom[lev], num_shift, dir, lev, do_update_cost); if (lev > 0){ // Coarse grid - shiftMF(*rho_cp[lev], geom[lev-1], num_shift_crse, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::rho_cp,lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost); } } } @@ -369,11 +377,11 @@ WarpX::MoveWindow (const int step, bool move_j) if (do_fluid_species) { const int n_fluid_species = myfl->nSpecies(); for (int i=0; iGetFluidContainer(i); - shiftMF( *fl.N[lev], geom[lev], num_shift, dir, lev, do_update_cost ); - shiftMF( *fl.NU[lev][0], geom[lev], num_shift, dir, lev, do_update_cost ); - shiftMF( *fl.NU[lev][1], geom[lev], num_shift, dir, lev, do_update_cost ); - shiftMF( *fl.NU[lev][2], geom[lev], num_shift, dir, lev, do_update_cost ); + WarpXFluidContainer const& fl = myfl->GetFluidContainer(i); + shiftMF( *m_fields.get(fl.name_mf_N, lev), geom[lev], num_shift, dir, lev, do_update_cost ); + shiftMF( *m_fields.get(fl.name_mf_NU, Direction{0}, lev), geom[lev], num_shift, dir, lev, do_update_cost ); + shiftMF( *m_fields.get(fl.name_mf_NU, Direction{1}, lev), geom[lev], num_shift, dir, lev, do_update_cost ); + shiftMF( *m_fields.get(fl.name_mf_NU, Direction{2}, lev), geom[lev], num_shift, dir, lev, do_update_cost ); } } } @@ -449,7 +457,7 @@ WarpX::MoveWindow (const int step, bool move_j) const amrex::Real cur_time = t_new[0]; for (int i=0; iGetFluidContainer(i); - fl.InitData( lev, injection_box, cur_time ); + fl.InitData( m_fields, injection_box, cur_time, lev ); } } @@ -458,9 +466,9 @@ WarpX::MoveWindow (const int step, bool move_j) const int lev_zero = 0; m_macroscopic_properties->InitData( Geom(lev_zero), - getField(warpx::fields::FieldType::Efield_fp, lev_zero,0).ixType().toIntVect(), - getField(warpx::fields::FieldType::Efield_fp, lev_zero,1).ixType().toIntVect(), - getField(warpx::fields::FieldType::Efield_fp, lev_zero,2).ixType().toIntVect() + m_fields.get(FieldType::Efield_fp, Direction{0}, lev_zero)->ixType().toIntVect(), + m_fields.get(FieldType::Efield_fp, Direction{1}, lev_zero)->ixType().toIntVect(), + m_fields.get(FieldType::Efield_fp, Direction{2}, lev_zero)->ixType().toIntVect() ); } diff --git a/Source/Utils/WarpXUtil.H b/Source/Utils/WarpXUtil.H index 1de03eb61f0..46399b439d6 100644 --- a/Source/Utils/WarpXUtil.H +++ b/Source/Utils/WarpXUtil.H @@ -8,6 +8,8 @@ #ifndef WARPX_UTILS_H_ #define WARPX_UTILS_H_ +#include + #include #include #include @@ -31,6 +33,10 @@ void ParseGeometryInput(); void ReadBoostedFrameParameters(amrex::Real& gamma_boost, amrex::Real& beta_boost, amrex::Vector& boost_direction); +void ReadMovingWindowParameters( + int& do_moving_window, int& start_moving_window_step, int& end_moving_window_step, + int& moving_window_dir, amrex::Real& moving_window_v); + void ConvertLabParamsToBoost(); /** @@ -53,9 +59,53 @@ void CheckDims (); */ void CheckGriddingForRZSpectral (); -void NullifyMF(amrex::MultiFab& mf, int lev, amrex::Real zmin, - amrex::Real zmax); - +/** Function that sets the value of MultiFab MF to zero. + * + * \param[in] mf Pointer to the MultiFab + * \param[in] lev The mesh refinement level + * \param[in] zmin The minimum z of the range to be mullified + * \param[in] zmin The maximum z of the range to be mullified + */ +void NullifyMFinstance ( + amrex::MultiFab *mf, + int lev, + amrex::Real zmin, + amrex::Real zmax +); + +/** Function that sets the value of MultiFab MF to zero. + * + * \param[in] multifab_map Multifab registry + * \param[in] nf_name Name of Multifab to modify + * \param[in] lev The mesh refinement level + * \param[in] zmin The minimum z of the range to be mullified + * \param[in] zmin The maximum z of the range to be mullified + */ +void NullifyMF ( + ablastr::fields::MultiFabRegister& multifab_map, + std::string const& mf_name, + int lev, + amrex::Real zmin, + amrex::Real zmax +); + +/** Function that sets the value of MultiFab MF to zero. + * + * \param[in] multifab_map Multifab registry + * \param[in] nf_name Name of Multifab to modify + * \param[in] dir Direction, for Multifabs that are components of vectors + * \param[in] lev The mesh refinement level + * \param[in] zmin The minimum z of the range to be mullified + * \param[in] zmin The maximum z of the range to be mullified + */ +void NullifyMF ( + ablastr::fields::MultiFabRegister& multifab_map, + std::string const& mf_name, + ablastr::fields::Direction dir, + int lev, + amrex::Real zmin, + amrex::Real zmax +); namespace WarpXUtilIO{ /** diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index 4556d64684f..d6f465fa901 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -14,6 +14,7 @@ #include "WarpXProfilerWrapper.H" #include "WarpXUtil.H" +#include #include #include @@ -139,6 +140,43 @@ void ReadBoostedFrameParameters(Real& gamma_boost, Real& beta_boost, } } +void ReadMovingWindowParameters( + int& do_moving_window, int& start_moving_window_step, int& end_moving_window_step, + int& moving_window_dir, amrex::Real& moving_window_v) +{ + const ParmParse pp_warpx("warpx"); + pp_warpx.query("do_moving_window", do_moving_window); + if (do_moving_window) { + utils::parser::queryWithParser( + pp_warpx, "start_moving_window_step", start_moving_window_step); + utils::parser::queryWithParser( + pp_warpx, "end_moving_window_step", end_moving_window_step); + std::string s; + pp_warpx.get("moving_window_dir", s); + + if (s == "z" || s == "Z") { + moving_window_dir = WARPX_ZINDEX; + } +#if defined(WARPX_DIM_3D) + else if (s == "y" || s == "Y") { + moving_window_dir = 1; + } +#endif +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_3D) + else if (s == "x" || s == "X") { + moving_window_dir = 0; + } +#endif + else { + WARPX_ABORT_WITH_MESSAGE("Unknown moving_window_dir: "+s); + } + + utils::parser::getWithParser( + pp_warpx, "moving_window_v", moving_window_v); + moving_window_v *= PhysConst::c; + } +} + void ConvertLabParamsToBoost() { Real gamma_boost = 1., beta_boost = 0.; @@ -195,8 +233,11 @@ void ConvertLabParamsToBoost() { if (boost_direction[dim_map[idim]]) { amrex::Real convert_factor; - // Assume that the window travels with speed +c - convert_factor = 1._rt/( gamma_boost * ( 1 - beta_boost ) ); + amrex::Real beta_window = beta_boost; + if (WarpX::do_moving_window && idim == WarpX::moving_window_dir) { + beta_window = WarpX::moving_window_v / PhysConst::c; + } + convert_factor = 1._rt/( gamma_boost * ( 1 - beta_boost * beta_window ) ); prob_lo[idim] *= convert_factor; prob_hi[idim] *= convert_factor; if (max_level > 0){ @@ -221,16 +262,18 @@ void ConvertLabParamsToBoost() } -/* \brief Function that sets the value of MultiFab MF to zero for z between - * zmin and zmax. - */ -void NullifyMF(amrex::MultiFab& mf, int lev, amrex::Real zmin, amrex::Real zmax){ - WARPX_PROFILE("WarpXUtil::NullifyMF()"); - int const ncomp = mf.nComp(); +void NullifyMFinstance ( + amrex::MultiFab *mf, + int lev, + amrex::Real zmin, + amrex::Real zmax +) +{ + int const ncomp = mf->nComp(); #ifdef AMREX_USE_OMP #pragma omp parallel if (Gpu::notInLaunchRegion()) #endif - for(amrex::MFIter mfi(mf, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi){ + for(amrex::MFIter mfi(*mf, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi){ const amrex::Box& bx = mfi.tilebox(); // Get box lower and upper physical z bound, and dz const amrex::Real zmin_box = WarpX::LowerCorner(bx, lev, 0._rt).z; @@ -246,7 +289,7 @@ void NullifyMF(amrex::MultiFab& mf, int lev, amrex::Real zmin, amrex::Real zmax) #endif // Check if box intersect with [zmin, zmax] if ( (zmax>zmin_box && zmin<=zmax_box) ){ - const Array4 arr = mf[mfi].array(); + const Array4 arr = (*mf)[mfi].array(); // Set field to 0 between zmin and zmax ParallelFor(bx, ncomp, [=] AMREX_GPU_DEVICE(int i, int j, int k, int n) noexcept{ @@ -266,6 +309,39 @@ void NullifyMF(amrex::MultiFab& mf, int lev, amrex::Real zmin, amrex::Real zmax) } } +void NullifyMF ( + ablastr::fields::MultiFabRegister& multifab_map, + std::string const& mf_name, + int lev, + amrex::Real zmin, + amrex::Real zmax +) +{ + WARPX_PROFILE("WarpXUtil::NullifyMF()"); + if (!multifab_map.has(mf_name, lev)) { return; } + + auto * mf = multifab_map.get(mf_name, lev); + + NullifyMFinstance ( mf, lev, zmin, zmax); +} + +void NullifyMF ( + ablastr::fields::MultiFabRegister& multifab_map, + std::string const& mf_name, + ablastr::fields::Direction dir, + int lev, + amrex::Real zmin, + amrex::Real zmax +) +{ + WARPX_PROFILE("WarpXUtil::NullifyMF()"); + if (!multifab_map.has(mf_name, dir, lev)) { return; } + + auto * mf = multifab_map.get(mf_name, dir, lev); + + NullifyMFinstance ( mf, lev, zmin, zmax); +} + namespace WarpXUtilIO{ bool WriteBinaryDataOnFile(const std::string& filename, const amrex::Vector& data) { diff --git a/Source/WarpX.H b/Source/WarpX.H index 28bb6215a45..83b1880f2b1 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -39,7 +39,7 @@ #include "AcceleratorLattice/AcceleratorLattice.H" #include "Evolve/WarpXDtType.H" #include "Evolve/WarpXPushType.H" -#include "FieldSolver/Fields.H" +#include "Fields.H" #include "FieldSolver/MagnetostaticSolver/MagnetostaticSolver.H" #include "FieldSolver/ImplicitSolvers/ImplicitSolver.H" #include "FieldSolver/ImplicitSolvers/WarpXSolverVec.H" @@ -49,6 +49,7 @@ #include "Utils/WarpXAlgorithmSelection.H" #include "Utils/export.H" +#include #include #include @@ -84,7 +85,6 @@ class WARPX_EXPORT WarpX : public amrex::AmrCore { public: - static WarpX& GetInstance (); static void ResetInstance (); @@ -104,9 +104,9 @@ public: WarpX& operator= ( WarpX const & ) = delete; /** Move constructor */ - WarpX ( WarpX && ) = default; + WarpX ( WarpX && ) = delete; /** Move operator */ - WarpX& operator= ( WarpX && ) = default; + WarpX& operator= ( WarpX && ) = delete; static std::string Version (); //!< Version of WarpX executable static std::string PicsarVersion (); //!< Version of PICSAR dependency @@ -117,6 +117,11 @@ public: void Evolve (int numsteps = -1); + /** Push momentum one half step forward to synchronize with position. + * Also sets is_synchronized to `true`. + */ + void Synchronize (); + // // Functions used by implicit solvers // @@ -127,14 +132,14 @@ public: void SaveParticlesAtImplicitStepStart (); void FinishImplicitParticleUpdate (); void SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E ); - void UpdateMagneticFieldAndApplyBCs ( const amrex::Vector, 3 > >& a_Bn, - amrex::Real a_thetadt ); + void UpdateMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn, + amrex::Real a_thetadt ); void ApplyMagneticFieldBCs (); - void FinishMagneticFieldAndApplyBCs ( const amrex::Vector, 3 > >& a_Bn, - amrex::Real a_theta ); - void FinishImplicitField ( amrex::Vector, 3 > >& Field_fp, - const amrex::Vector, 3 > >& Field_n, - amrex::Real theta ); + void FinishMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn, + amrex::Real a_theta ); + void FinishImplicitField ( const ablastr::fields::MultiLevelVectorField& Field_fp, + const ablastr::fields::MultiLevelVectorField& Field_n, + amrex::Real theta ); void ImplicitComputeRHSE ( amrex::Real dt, WarpXSolverVec& a_Erhs_vec); void ImplicitComputeRHSE (int lev, amrex::Real dt, WarpXSolverVec& a_Erhs_vec); void ImplicitComputeRHSE (int lev, PatchType patch_type, amrex::Real dt, WarpXSolverVec& a_Erhs_vec); @@ -147,7 +152,10 @@ public: [[nodiscard]] HybridPICModel * get_pointer_HybridPICModel () const { return m_hybrid_pic_model.get(); } MultiDiagnostics& GetMultiDiags () {return *multi_diags;} #ifdef AMREX_USE_EB - amrex::Vector >& GetDistanceToEB () {return m_distance_to_eb;} + ablastr::fields::MultiLevelScalarField GetDistanceToEB () { + using warpx::fields::FieldType; + return m_fields.get_mr_levels(FieldType::distance_to_eb, finestLevel()); + } #endif ParticleBoundaryBuffer& GetParticleBoundaryBuffer () { return *m_particle_boundary_buffer; } @@ -465,93 +473,12 @@ public: const std::string& name, std::optional initial_value); - /** - * \brief - * Allocate the MultiFab so that is like the specified MultiFab (same ba and dm) - * and optionally initialize it. This also adds the MultiFab - * to the map of MultiFabs (used to ease the access to MultiFabs from the Python - * interface - * - * \param mf[out] The MultiFab unique pointer to be allocated - * \param mf_model[in] The MultiFab to model - * \param name[in] The name of the MultiFab to use in the map - * \param initial_value[in] The optional initial value - */ - static void AllocInitMultiFabFromModel ( - std::unique_ptr& mf, - amrex::MultiFab& mf_model, - int level, - const std::string& name, - std::optional initial_value = {}); - // Maps of all of the MultiFabs and iMultiFabs used (this can include MFs from other classes) // This is a convenience for the Python interface, allowing all MultiFabs // to be easily referenced from Python. static std::map multifab_map; static std::map imultifab_map; - /** - * \brief - * Check if a field is initialized. - * - * \param field_type[in] the field type - * \param lev[in] the mesh refinement level - * \param direction[in] the field component (0 by default) - * - * \return true if the field is initialized, false otherwise - */ - [[nodiscard]] bool - isFieldInitialized (warpx::fields::FieldType field_type, int lev, int direction = 0) const; - - /** - * \brief - * Get a pointer to the field data. - * - * \param field_type[in] the field type - * \param lev[in] the mesh refinement level - * \param direction[in] the field component (0 by default) - * - * \return the pointer to an amrex::MultiFab containing the field data - */ - [[nodiscard]] amrex::MultiFab* - getFieldPointer (warpx::fields::FieldType field_type, int lev, int direction = 0) const; - - /** - * \brief - * For vector fields, get an array of three pointers to the field data. - * - * \param field_type[in] the field type - * \param lev[in] the mesh refinement level - * - * \return an array of three pointers amrex::MultiFab* containing the field data - */ - [[nodiscard]] std::array - getFieldPointerArray (warpx::fields::FieldType field_type, int lev) const; - - /** - * \brief - * Get a constant reference to the field data. - * - * \param field_type[in] the field type - * \param lev[in] the mesh refinement level - * \param direction[in] the field component (0 by default) - * - * \return a constant refernce to an amrex::MultiFab containing the field data - */ - [[nodiscard]] const amrex::MultiFab& - getField(warpx::fields::FieldType field_type, int lev, int direction = 0) const; - - /** - * \brief - * Get a constant reference to the specified vector field on the different MR levels - * - * \param field_type[in] the field type - * - * \return a vector (which one element per MR level) of arrays of three pointers (for 3 vector components) amrex::MultiFab* containing the field data - */ - [[nodiscard]] const amrex::Vector,3>>& - getMultiLevelField(warpx::fields::FieldType field_type) const; - /** * \brief * Get pointer to the amrex::MultiFab containing the dotMask for the specified field @@ -564,7 +491,7 @@ public: * Set the dotMask container */ void SetDotMask( std::unique_ptr& field_dotMask, - warpx::fields::FieldType field_type, int lev, int dir ) const; + std::string const & field_name, int lev, int dir ) const; [[nodiscard]] bool DoPML () const {return do_pml;} [[nodiscard]] bool DoFluidSpecies () const {return do_fluid_species;} @@ -606,6 +533,12 @@ public: /** Determine the timestep of the simulation. */ void ComputeDt (); + /** + * Determine the simulation timestep from the maximum speed of all particles + * Sets timestep so that a particle can only cross cfl*dx cells per timestep. + */ + void UpdateDtFromParticleSpeeds (); + /** Print main PIC parameters to stdout */ void PrintMainPICparameters (); @@ -735,8 +668,8 @@ public: * when FieldBoundaryType is set to damped. Vector version. */ void DampFieldsInGuards (int lev, - const std::array,3>& Efield, - const std::array,3>& Bfield); + const ablastr::fields::VectorField& Efield, + const ablastr::fields::VectorField& Bfield); /** * \brief Private function for spectral solver @@ -745,7 +678,7 @@ public: * can appear in parallel simulations. This will be called * when FieldBoundaryType is set to damped. Scalar version. */ - void DampFieldsInGuards (int lev, std::unique_ptr& mf); + void DampFieldsInGuards (int lev, amrex::MultiFab* mf); #ifdef WARPX_DIM_RZ void ApplyInverseVolumeScalingToCurrentDensity(amrex::MultiFab* Jx, @@ -882,21 +815,16 @@ public: * Then, for each MR level, including level 0, apply filter and sum guard * cells across levels. * - * \param[in,out] J_fp reference to fine-patch current \c MultiFab (all MR levels) - * \param[in,out] J_cp reference to coarse-patch current \c MultiFab (all MR levels) - * \param[in,out] J_buffer reference to buffer current \c MultiFab (all MR levels) + * \param[in] current_fp_string the coarse of fine patch to use for current */ - void SyncCurrent ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, - const amrex::Vector,3>>& J_buffer); + void SyncCurrent (const std::string& current_fp_string); void SyncRho (); void SyncRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - const amrex::Vector>& charge_buffer); + const ablastr::fields::MultiLevelScalarField& charge_fp, + const ablastr::fields::MultiLevelScalarField& charge_cp, + ablastr::fields::MultiLevelScalarField const & charge_buffer); [[nodiscard]] amrex::Vector getnsubsteps () const {return nsubsteps;} [[nodiscard]] int getnsubsteps (int lev) const {return nsubsteps[lev];} @@ -977,11 +905,11 @@ public: // these should be private, but can't due to Cuda limitations static void ComputeDivB (amrex::MultiFab& divB, int dcomp, - const std::array& B, + ablastr::fields::VectorField const & B, const std::array& dx); static void ComputeDivB (amrex::MultiFab& divB, int dcomp, - const std::array& B, + ablastr::fields::VectorField const & B, const std::array& dx, amrex::IntVect ngrow); void ComputeDivE(amrex::MultiFab& divE, int lev); @@ -1011,14 +939,14 @@ public: MagnetostaticSolver::VectorPoissonBoundaryHandler m_vector_poisson_boundary_handler; void ComputeMagnetostaticField (); void AddMagnetostaticFieldLabFrame (); - void computeVectorPotential (const amrex::Vector, 3> >& curr, - amrex::Vector, 3> >& A, + void computeVectorPotential (ablastr::fields::MultiLevelVectorField const& curr, + ablastr::fields::MultiLevelVectorField const& A, amrex::Real required_precision=amrex::Real(1.e-11), amrex::Real absolute_tolerance=amrex::Real(0.0), int max_iters=200, - int verbosity=2) const; + int verbosity=2); // const; - void setVectorPotentialBC (amrex::Vector, 3> >& A) const; + void setVectorPotentialBC (ablastr::fields::MultiLevelVectorField const& A) const; /** * \brief @@ -1046,8 +974,8 @@ public: amrex::ParserExecutor<3> const& xfield_parser, amrex::ParserExecutor<3> const& yfield_parser, amrex::ParserExecutor<3> const& zfield_parser, - std::array< std::unique_ptr, 3 > const& edge_lengths, - std::array< std::unique_ptr, 3 > const& face_areas, + ablastr::fields::VectorField const& edge_lengths, + ablastr::fields::VectorField const& face_areas, [[maybe_unused]] char field, int lev, PatchType patch_type); @@ -1127,24 +1055,24 @@ public: * \brief Compute the length of the mesh edges. Here the length is a value in [0, 1]. * An edge of length 0 is fully covered. */ - static void ComputeEdgeLengths (std::array< std::unique_ptr, 3 >& edge_lengths, + static void ComputeEdgeLengths (ablastr::fields::VectorField& edge_lengths, const amrex::EBFArrayBoxFactory& eb_fact); /** * \brief Compute the area of the mesh faces. Here the area is a value in [0, 1]. * An edge of area 0 is fully covered. */ - static void ComputeFaceAreas (std::array< std::unique_ptr, 3 >& face_areas, + static void ComputeFaceAreas (ablastr::fields::VectorField& face_areas, const amrex::EBFArrayBoxFactory& eb_fact); /** * \brief Scale the edges lengths by the mesh width to obtain the real lengths. */ - static void ScaleEdges (std::array< std::unique_ptr, 3 >& edge_lengths, + static void ScaleEdges (ablastr::fields::VectorField& edge_lengths, const std::array& cell_size); /** * \brief Scale the edges areas by the mesh width to obtain the real areas. */ - static void ScaleAreas (std::array< std::unique_ptr, 3 >& face_areas, + static void ScaleAreas (ablastr::fields::VectorField& face_areas, const std::array& cell_size); /** * \brief Initialize information for cell extensions. @@ -1214,6 +1142,9 @@ public: FiniteDifferenceSolver * get_pointer_fdtd_solver_fp (int lev) { return m_fdtd_solver_fp[lev].get(); } + // Field container + ablastr::fields::MultiFabRegister m_fields; + protected: /** @@ -1313,53 +1244,50 @@ private: void OneStep_multiJ (amrex::Real cur_time); void RestrictCurrentFromFineToCoarsePatch ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, + const ablastr::fields::MultiLevelVectorField& J_fp, + const ablastr::fields::MultiLevelVectorField& J_cp, int lev); void AddCurrentFromFineLevelandSumBoundary ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, - const amrex::Vector,3>>& J_buffer, + const ablastr::fields::MultiLevelVectorField& J_fp, + const ablastr::fields::MultiLevelVectorField& J_cp, + const ablastr::fields::MultiLevelVectorField& J_buffer, int lev); void StoreCurrent (int lev); void RestoreCurrent (int lev); void ApplyFilterJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, int lev, int idim); void ApplyFilterJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, int lev); void SumBoundaryJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, int lev, int idim, const amrex::Periodicity& period); void SumBoundaryJ ( - const amrex::Vector,3>>& current, + const ablastr::fields::MultiLevelVectorField& current, int lev, const amrex::Periodicity& period); void NodalSyncJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, + const ablastr::fields::MultiLevelVectorField& J_fp, + const ablastr::fields::MultiLevelVectorField& J_cp, int lev, PatchType patch_type); - void RestrictRhoFromFineToCoarsePatch ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - int lev); + void RestrictRhoFromFineToCoarsePatch (int lev ); void ApplyFilterandSumBoundaryRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, + const ablastr::fields::MultiLevelScalarField& charge_fp, + const ablastr::fields::MultiLevelScalarField& charge_cp, int lev, PatchType patch_type, int icomp, int ncomp); void AddRhoFromFineLevelandSumBoundary ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - const amrex::Vector>& charge_buffer, + const ablastr::fields::MultiLevelScalarField& charge_fp, + const ablastr::fields::MultiLevelScalarField& charge_cp, + ablastr::fields::MultiLevelScalarField const & charge_buffer, int lev, int icomp, int ncomp); @@ -1482,6 +1410,7 @@ private: amrex::Vector t_new; amrex::Vector t_old; amrex::Vector dt; + static utils::parser::IntervalsParser dt_update_interval; // How often to update the timestep when using adaptive timestepping // Particle container std::unique_ptr mypc; @@ -1495,22 +1424,6 @@ private: // Fields: First array for level, second for direction // - // Full solution - amrex::Vector, 3 > > Efield_aux; - amrex::Vector, 3 > > Bfield_aux; - - // Fine patch - amrex::Vector< std::unique_ptr > F_fp; - amrex::Vector< std::unique_ptr > G_fp; - amrex::Vector< std::unique_ptr > rho_fp; - amrex::Vector< std::unique_ptr > phi_fp; - amrex::Vector, 3 > > current_fp; - amrex::Vector, 3 > > current_fp_vay; - amrex::Vector, 3 > > Efield_fp; - amrex::Vector, 3 > > Bfield_fp; - amrex::Vector, 3 > > Efield_avg_fp; - amrex::Vector, 3 > > Bfield_avg_fp; - // Masks for computing dot product and global moments of fields when using grids that // have shared locations across different ranks (e.g., a Yee grid) mutable amrex::Vector,3 > > Efield_dotMask; @@ -1518,23 +1431,6 @@ private: mutable amrex::Vector,3 > > Afield_dotMask; mutable amrex::Vector< std::unique_ptr > phi_dotMask; - // Memory buffers for computing magnetostatic fields - // Vector Potential A and previous step. Time buffer needed for computing dA/dt to first order - amrex::Vector, 3 > > vector_potential_fp_nodal; - amrex::Vector, 3 > > vector_potential_grad_buf_e_stag; - amrex::Vector, 3 > > vector_potential_grad_buf_b_stag; - - // Same as Bfield_fp/Efield_fp for reading external field data - amrex::Vector, 3 > > Efield_fp_external; - amrex::Vector, 3 > > Bfield_fp_external; - amrex::Vector, 3 > > E_external_particle_field; - amrex::Vector, 3 > > B_external_particle_field; - - //! EB: Lengths of the mesh edges - amrex::Vector, 3 > > m_edge_lengths; - //! EB: Areas of the mesh faces - amrex::Vector, 3 > > m_face_areas; - /** EB: for every mesh face flag_info_face contains a: * * 0 if the face needs to be extended * * 1 if the face is large enough to lend area to other faces @@ -1549,71 +1445,16 @@ private: * and in WarpX::ComputeEightWaysExtensions * This is only used for the ECT solver.*/ amrex::Vector, 3 > > m_flag_ext_face; - /** EB: m_area_mod contains the modified areas of the mesh faces, i.e. if a face is enlarged it - * contains the area of the enlarged face - * This is only used for the ECT solver.*/ - amrex::Vector, 3 > > m_area_mod; + /** EB: m_borrowing contains the info about the enlarged cells, i.e. for every enlarged cell it * contains the info of which neighbors are being intruded (and the amount of borrowed area). * This is only used for the ECT solver.*/ amrex::Vector >, 3 > > m_borrowing; - /** ECTRhofield is needed only by the ect - * solver and it contains the electromotive force density for every mesh face. - * The name ECTRhofield has been used to comply with the notation of the paper - * https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4463918 (page 9, equation 4 - * and below). - * Although it's called rho it has nothing to do with the charge density! - * This is only used for the ECT solver.*/ - amrex::Vector, 3 > > ECTRhofield; - /** Venl contains the electromotive force for every mesh face, i.e. every entry is - * the corresponding entry in ECTRhofield multiplied by the total area (possibly with enlargement) - * This is only used for the ECT solver.*/ - amrex::Vector, 3 > > Venl; - - //EB level set - amrex::Vector > m_distance_to_eb; - - // store fine patch - amrex::Vector, 3 > > current_store; - - // Nodal MultiFab for nodal current deposition if warpx.do_current_centering = 1 - amrex::Vector,3>> current_fp_nodal; - - // Coarse patch - amrex::Vector< std::unique_ptr > F_cp; - amrex::Vector< std::unique_ptr > G_cp; - amrex::Vector< std::unique_ptr > rho_cp; - amrex::Vector, 3 > > current_cp; - amrex::Vector, 3 > > Efield_cp; - amrex::Vector, 3 > > Bfield_cp; - amrex::Vector, 3 > > Efield_avg_cp; - amrex::Vector, 3 > > Bfield_avg_cp; - // Copy of the coarse aux - amrex::Vector, 3 > > Efield_cax; - amrex::Vector, 3 > > Bfield_cax; amrex::Vector > current_buffer_masks; amrex::Vector > gather_buffer_masks; - // If charge/current deposition buffers are used - amrex::Vector, 3 > > current_buf; - amrex::Vector > charge_buf; - - /** - * \brief - * Get a pointer to the field data. Does not check if the pointer - * is not nullptr. - * - * \param field_type[in] the field type - * \param lev[in] the mesh refinement level - * \param direction[in] the field component (0 by default) - * - * \return the pointer to an amrex::MultiFab containing the field data - */ - [[nodiscard]] amrex::MultiFab* - getFieldPointerUnchecked (warpx::fields::FieldType field_type, int lev, int direction = 0) const; - // PML int do_pml = 0; int do_silver_mueller = 0; @@ -1643,7 +1484,9 @@ private: int num_injected_species = -1; amrex::Vector injected_plasma_species; + // Timestepping parameters std::optional m_const_dt; + std::optional m_max_dt; // Macroscopic properties std::unique_ptr m_macroscopic_properties; @@ -1730,17 +1573,11 @@ private: guardCellManager guard_cells; - //Slice Parameters + // Slice Parameters int slice_max_grid_size; int slice_plot_int = -1; amrex::RealBox slice_realbox; amrex::IntVect slice_cr_ratio; - amrex::Vector< std::unique_ptr > F_slice; - amrex::Vector< std::unique_ptr > G_slice; - amrex::Vector< std::unique_ptr > rho_slice; - amrex::Vector, 3 > > current_slice; - amrex::Vector, 3 > > Efield_slice; - amrex::Vector, 3 > > Bfield_slice; bool fft_periodic_single_box = false; int nox_fft = 16; @@ -1810,40 +1647,14 @@ private: /** * \brief Forward FFT of E,B on all mesh refinement levels - * - * \param E_fp Vector of three-dimensional arrays (for each level) - * storing the fine patch electric field to be transformed - * \param B_fp Vector of three-dimensional arrays (for each level) - * storing the fine patch magnetic field to be transformed - * \param E_cp Vector of three-dimensional arrays (for each level) - * storing the coarse patch electric field to be transformed - * \param B_cp Vector of three-dimensional arrays (for each level) - * storing the coarse patch magnetic field to be transformed - */ - void PSATDForwardTransformEB ( - const amrex::Vector,3>>& E_fp, - const amrex::Vector,3>>& B_fp, - const amrex::Vector,3>>& E_cp, - const amrex::Vector,3>>& B_cp); + */ + void PSATDForwardTransformEB (); /** * \brief Backward FFT of E,B on all mesh refinement levels, * with field damping in the guard cells (if needed) - * - * \param E_fp Vector of three-dimensional arrays (for each level) - * storing the fine patch electric field to be transformed - * \param B_fp Vector of three-dimensional arrays (for each level) - * storing the fine patch magnetic field to be transformed - * \param E_cp Vector of three-dimensional arrays (for each level) - * storing the coarse patch electric field to be transformed - * \param B_cp Vector of three-dimensional arrays (for each level) - * storing the coarse patch magnetic field to be transformed - */ - void PSATDBackwardTransformEB ( - const amrex::Vector,3>>& E_fp, - const amrex::Vector,3>>& B_fp, - const amrex::Vector,3>>& E_cp, - const amrex::Vector,3>>& B_cp); + */ + void PSATDBackwardTransformEB (); /** * \brief Backward FFT of averaged E,B on all mesh refinement levels @@ -1858,10 +1669,10 @@ private: * storing the coarse patch averaged magnetic field to be transformed */ void PSATDBackwardTransformEBavg ( - const amrex::Vector,3>>& E_avg_fp, - const amrex::Vector,3>>& B_avg_fp, - const amrex::Vector,3>>& E_avg_cp, - const amrex::Vector,3>>& B_avg_cp); + ablastr::fields::MultiLevelVectorField const& E_avg_fp, + ablastr::fields::MultiLevelVectorField const& B_avg_fp, + ablastr::fields::MultiLevelVectorField const& E_avg_cp, + ablastr::fields::MultiLevelVectorField const& B_avg_cp); /** * \brief Forward FFT of J on all mesh refinement levels, @@ -1875,8 +1686,8 @@ private: * (only used in RZ geometry to avoid double filtering) */ void PSATDForwardTransformJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp, + std::string const & J_fp_string, + std::string const & J_cp_string, bool apply_kspace_filter=true); /** @@ -1888,8 +1699,8 @@ private: * storing the coarse patch current to be transformed */ void PSATDBackwardTransformJ ( - const amrex::Vector,3>>& J_fp, - const amrex::Vector,3>>& J_cp); + std::string const & J_fp_string, + std::string const & J_cp_string); /** * \brief Forward FFT of rho on all mesh refinement levels, @@ -1903,8 +1714,8 @@ private: * (only used in RZ geometry to avoid double filtering) */ void PSATDForwardTransformRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, + std::string const & charge_fp_string, + std::string const & charge_cp_string, int icomp, int dcomp, bool apply_kspace_filter=true); /** diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index ef1668de4c0..89254e05c98 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -92,7 +92,7 @@ #include using namespace amrex; -using namespace warpx::fields; +using warpx::fields::FieldType; int WarpX::do_moving_window = 0; int WarpX::start_moving_window_step = 0; @@ -182,6 +182,8 @@ bool WarpX::do_multi_J = false; int WarpX::do_multi_J_n_depositions; bool WarpX::safe_guard_cells = false; +utils::parser::IntervalsParser WarpX::dt_update_interval; + std::map WarpX::multifab_map; std::map WarpX::imultifab_map; @@ -198,6 +200,10 @@ void WarpX::MakeWarpX () { ParseGeometryInput(); + ReadMovingWindowParameters( + do_moving_window, start_moving_window_step, end_moving_window_step, + moving_window_dir, moving_window_v); + ConvertLabParamsToBoost(); ReadBCParams(); @@ -292,67 +298,17 @@ WarpX::WarpX () // Fluid Container if (do_fluid_species) { - myfl = std::make_unique(nlevs_max); + myfl = std::make_unique(); } - Efield_aux.resize(nlevs_max); - Bfield_aux.resize(nlevs_max); - - F_fp.resize(nlevs_max); - G_fp.resize(nlevs_max); - rho_fp.resize(nlevs_max); - phi_fp.resize(nlevs_max); - current_fp.resize(nlevs_max); - Efield_fp.resize(nlevs_max); - Bfield_fp.resize(nlevs_max); - Efield_dotMask.resize(nlevs_max); Bfield_dotMask.resize(nlevs_max); Afield_dotMask.resize(nlevs_max); phi_dotMask.resize(nlevs_max); - // Only allocate vector potential arrays when using the Magnetostatic Solver - if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) - { - vector_potential_fp_nodal.resize(nlevs_max); - vector_potential_grad_buf_e_stag.resize(nlevs_max); - vector_potential_grad_buf_b_stag.resize(nlevs_max); - } - - if (fft_do_time_averaging) - { - Efield_avg_fp.resize(nlevs_max); - Bfield_avg_fp.resize(nlevs_max); - } - - // Same as Bfield_fp/Efield_fp for reading external field data - Bfield_fp_external.resize(nlevs_max); - Efield_fp_external.resize(nlevs_max); - B_external_particle_field.resize(1); - E_external_particle_field.resize(1); - - m_edge_lengths.resize(nlevs_max); - m_face_areas.resize(nlevs_max); - m_distance_to_eb.resize(nlevs_max); m_flag_info_face.resize(nlevs_max); m_flag_ext_face.resize(nlevs_max); m_borrowing.resize(nlevs_max); - m_area_mod.resize(nlevs_max); - - ECTRhofield.resize(nlevs_max); - Venl.resize(nlevs_max); - - current_store.resize(nlevs_max); - - if (do_current_centering) - { - current_fp_nodal.resize(nlevs_max); - } - - if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) - { - current_fp_vay.resize(nlevs_max); - } // Create Electrostatic Solver object if needed if ((WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) @@ -368,28 +324,11 @@ WarpX::WarpX () if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { // Create hybrid-PIC model object if needed - m_hybrid_pic_model = std::make_unique(nlevs_max); + m_hybrid_pic_model = std::make_unique(); } - F_cp.resize(nlevs_max); - G_cp.resize(nlevs_max); - rho_cp.resize(nlevs_max); - current_cp.resize(nlevs_max); - Efield_cp.resize(nlevs_max); - Bfield_cp.resize(nlevs_max); - - if (fft_do_time_averaging) - { - Efield_avg_cp.resize(nlevs_max); - Bfield_avg_cp.resize(nlevs_max); - } - - Efield_cax.resize(nlevs_max); - Bfield_cax.resize(nlevs_max); current_buffer_masks.resize(nlevs_max); gather_buffer_masks.resize(nlevs_max); - current_buf.resize(nlevs_max); - charge_buf.resize(nlevs_max); pml.resize(nlevs_max); #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) @@ -688,42 +627,11 @@ WarpX::ReadParameters () pp_warpx.query("compute_max_step_from_btd", compute_max_step_from_btd); - pp_warpx.query("do_moving_window", do_moving_window); - if (do_moving_window) - { - utils::parser::queryWithParser( - pp_warpx, "start_moving_window_step", start_moving_window_step); - utils::parser::queryWithParser( - pp_warpx, "end_moving_window_step", end_moving_window_step); - std::string s; - pp_warpx.get("moving_window_dir", s); - - if (s == "z" || s == "Z") { - moving_window_dir = WARPX_ZINDEX; - } -#if defined(WARPX_DIM_3D) - else if (s == "y" || s == "Y") { - moving_window_dir = 1; - } -#endif -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_3D) - else if (s == "x" || s == "X") { - moving_window_dir = 0; - } -#endif - - else { - WARPX_ABORT_WITH_MESSAGE("Unknown moving_window_dir: "+s); - } - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(Geom(0).isPeriodic(moving_window_dir) == 0, - "The problem must be non-periodic in the moving window direction"); - + if (do_moving_window) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + Geom(0).isPeriodic(moving_window_dir) == 0, + "The problem must be non-periodic in the moving window direction"); moving_window_x = geom[0].ProbLo(moving_window_dir); - - utils::parser::getWithParser( - pp_warpx, "moving_window_v", moving_window_v); - moving_window_v *= PhysConst::c; } m_p_ext_field_params = std::make_unique(pp_warpx); @@ -775,7 +683,12 @@ WarpX::ReadParameters () pp_boundary.query("verboncoeur_axis_correction", verboncoeur_axis_correction); #endif + // Read timestepping options utils::parser::queryWithParser(pp_warpx, "const_dt", m_const_dt); + utils::parser::queryWithParser(pp_warpx, "max_dt", m_max_dt); + std::vector dt_interval_vec = {"-1"}; + pp_warpx.queryarr("dt_update_interval", dt_interval_vec); + dt_update_interval = utils::parser::IntervalsParser(dt_interval_vec); // Filter currently not working with FDTD solver in RZ geometry: turn OFF by default // (see https://github.com/ECP-WarpX/WarpX/issues/1943) @@ -1448,8 +1361,9 @@ WarpX::ReadParameters () // Instead, if warpx.grid_type=collocated, the momentum-conserving and // energy conserving field gathering algorithms are equivalent (forces // gathered from the collocated grid) and no fields centering occurs. - if (WarpX::field_gathering_algo == GatheringAlgo::MomentumConserving && - WarpX::grid_type != GridType::Collocated) + if ((WarpX::field_gathering_algo == GatheringAlgo::MomentumConserving + && WarpX::grid_type != GridType::Collocated) + || WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) { utils::parser::queryWithParser( pp_warpx, "field_centering_nox", field_centering_nox); @@ -2048,64 +1962,14 @@ WarpX::MakeNewLevelFromCoarse (int /*lev*/, amrex::Real /*time*/, const amrex::B void WarpX::ClearLevel (int lev) { - for (int i = 0; i < 3; ++i) { - Efield_aux[lev][i].reset(); - Bfield_aux[lev][i].reset(); - - current_fp[lev][i].reset(); - Efield_fp [lev][i].reset(); - Bfield_fp [lev][i].reset(); + m_fields.clear_level(lev); + for (int i = 0; i < 3; ++i) { Efield_dotMask [lev][i].reset(); Bfield_dotMask [lev][i].reset(); Afield_dotMask [lev][i].reset(); - - current_store[lev][i].reset(); - - if (do_current_centering) - { - current_fp_nodal[lev][i].reset(); - } - - if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) - { - current_fp_vay[lev][i].reset(); - } - - if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) - { - vector_potential_fp_nodal[lev][i].reset(); - vector_potential_grad_buf_e_stag[lev][i].reset(); - vector_potential_grad_buf_b_stag[lev][i].reset(); - } - - current_cp[lev][i].reset(); - Efield_cp [lev][i].reset(); - Bfield_cp [lev][i].reset(); - - Efield_cax[lev][i].reset(); - Bfield_cax[lev][i].reset(); - current_buf[lev][i].reset(); - } - - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) - { - m_hybrid_pic_model->ClearLevel(lev); } - charge_buf[lev].reset(); - - current_buffer_masks[lev].reset(); - gather_buffer_masks[lev].reset(); - - F_fp [lev].reset(); - G_fp [lev].reset(); - rho_fp[lev].reset(); - phi_fp[lev].reset(); - F_cp [lev].reset(); - G_cp [lev].reset(); - rho_cp[lev].reset(); - phi_dotMask[lev].reset(); #ifdef WARPX_USE_FFT @@ -2201,6 +2065,8 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm const IntVect& ngEB, IntVect& ngJ, const IntVect& ngRho, const IntVect& ngF, const IntVect& ngG, const bool aux_is_nodal) { + using ablastr::fields::Direction; + // Declare nodal flags IntVect Ex_nodal_flag, Ey_nodal_flag, Ez_nodal_flag; IntVect Bx_nodal_flag, By_nodal_flag, Bz_nodal_flag; @@ -2302,61 +2168,55 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm // const std::array dx = CellSize(lev); - AllocInitMultiFab(Bfield_fp[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_fp[x]", 0.0_rt); - AllocInitMultiFab(Bfield_fp[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_fp[y]", 0.0_rt); - AllocInitMultiFab(Bfield_fp[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_fp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_fp, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_fp, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_fp, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); - AllocInitMultiFab(Efield_fp[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, lev, "Efield_fp[x]", 0.0_rt); - AllocInitMultiFab(Efield_fp[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, lev, "Efield_fp[y]", 0.0_rt); - AllocInitMultiFab(Efield_fp[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_fp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_fp, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_fp, Direction{1}, lev, amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_fp, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); - AllocInitMultiFab(current_fp[lev][0], amrex::convert(ba, jx_nodal_flag), dm, ncomps, ngJ, lev, "current_fp[x]", 0.0_rt); - AllocInitMultiFab(current_fp[lev][1], amrex::convert(ba, jy_nodal_flag), dm, ncomps, ngJ, lev, "current_fp[y]", 0.0_rt); - AllocInitMultiFab(current_fp[lev][2], amrex::convert(ba, jz_nodal_flag), dm, ncomps, ngJ, lev, "current_fp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::current_fp, Direction{0}, lev, amrex::convert(ba, jx_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_fp, Direction{1}, lev, amrex::convert(ba, jy_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_fp, Direction{2}, lev, amrex::convert(ba, jz_nodal_flag), dm, ncomps, ngJ, 0.0_rt); if (do_current_centering) { amrex::BoxArray const& nodal_ba = amrex::convert(ba, amrex::IntVect::TheNodeVector()); - AllocInitMultiFab(current_fp_nodal[lev][0], nodal_ba, dm, ncomps, ngJ, lev, "current_fp_nodal[x]", 0.0_rt); - AllocInitMultiFab(current_fp_nodal[lev][1], nodal_ba, dm, ncomps, ngJ, lev, "current_fp_nodal[y]", 0.0_rt); - AllocInitMultiFab(current_fp_nodal[lev][2], nodal_ba, dm, ncomps, ngJ, lev, "current_fp_nodal[z]", 0.0_rt); + m_fields.alloc_init(FieldType::current_fp_nodal, Direction{0}, lev, nodal_ba, dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_fp_nodal, Direction{1}, lev, nodal_ba, dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_fp_nodal, Direction{2}, lev, nodal_ba, dm, ncomps, ngJ, 0.0_rt); } if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) { - AllocInitMultiFab(current_fp_vay[lev][0], amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, lev, "current_fp_vay[x]", 0.0_rt); - AllocInitMultiFab(current_fp_vay[lev][1], amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, lev, "current_fp_vay[y]", 0.0_rt); - AllocInitMultiFab(current_fp_vay[lev][2], amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, lev, "current_fp_vay[z]", 0.0_rt); + m_fields.alloc_init(FieldType::current_fp_vay, Direction{0}, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_fp_vay, Direction{1}, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_fp_vay, Direction{2}, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngJ, 0.0_rt); } if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) { - AllocInitMultiFab(vector_potential_fp_nodal[lev][0], amrex::convert(ba, rho_nodal_flag), - dm, ncomps, ngRho, lev, "vector_potential_fp_nodal[x]", 0.0_rt); - AllocInitMultiFab(vector_potential_fp_nodal[lev][1], amrex::convert(ba, rho_nodal_flag), - dm, ncomps, ngRho, lev, "vector_potential_fp_nodal[y]", 0.0_rt); - AllocInitMultiFab(vector_potential_fp_nodal[lev][2], amrex::convert(ba, rho_nodal_flag), - dm, ncomps, ngRho, lev, "vector_potential_fp_nodal[z]", 0.0_rt); - - AllocInitMultiFab(vector_potential_grad_buf_e_stag[lev][0], amrex::convert(ba, Ex_nodal_flag), - dm, ncomps, ngEB, lev, "vector_potential_grad_buf_e_stag[x]", 0.0_rt); - AllocInitMultiFab(vector_potential_grad_buf_e_stag[lev][1], amrex::convert(ba, Ey_nodal_flag), - dm, ncomps, ngEB, lev, "vector_potential_grad_buf_e_stag[y]", 0.0_rt); - AllocInitMultiFab(vector_potential_grad_buf_e_stag[lev][2], amrex::convert(ba, Ez_nodal_flag), - dm, ncomps, ngEB, lev, "vector_potential_grad_buf_e_stag[z]", 0.0_rt); - - AllocInitMultiFab(vector_potential_grad_buf_b_stag[lev][0], amrex::convert(ba, Bx_nodal_flag), - dm, ncomps, ngEB, lev, "vector_potential_grad_buf_b_stag[x]", 0.0_rt); - AllocInitMultiFab(vector_potential_grad_buf_b_stag[lev][1], amrex::convert(ba, By_nodal_flag), - dm, ncomps, ngEB, lev, "vector_potential_grad_buf_b_stag[y]", 0.0_rt); - AllocInitMultiFab(vector_potential_grad_buf_b_stag[lev][2], amrex::convert(ba, Bz_nodal_flag), - dm, ncomps, ngEB, lev, "vector_potential_grad_buf_b_stag[z]", 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_fp_nodal, Direction{0}, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngRho, 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_fp_nodal, Direction{1}, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngRho, 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_fp_nodal, Direction{2}, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngRho, 0.0_rt); + + // Memory buffers for computing magnetostatic fields + // Vector Potential A and previous step. Time buffer needed for computing dA/dt to first order + m_fields.alloc_init(FieldType::vector_potential_grad_buf_e_stag, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_grad_buf_e_stag, Direction{1}, lev, amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_grad_buf_e_stag, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + + m_fields.alloc_init(FieldType::vector_potential_grad_buf_b_stag, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_grad_buf_b_stag, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::vector_potential_grad_buf_b_stag, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } // Allocate extra multifabs needed by the kinetic-fluid hybrid algorithm. if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) { m_hybrid_pic_model->AllocateLevelMFs( + m_fields, lev, ba, dm, ncomps, ngJ, ngRho, jx_nodal_flag, jy_nodal_flag, jz_nodal_flag, rho_nodal_flag ); @@ -2364,10 +2224,10 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm // Allocate extra multifabs needed for fluids if (do_fluid_species) { - myfl->AllocateLevelMFs(lev, ba, dm); + myfl->AllocateLevelMFs(m_fields, ba, dm, lev); auto & warpx = GetInstance(); const amrex::Real cur_time = warpx.gett_new(lev); - myfl->InitData(lev, geom[lev].Domain(),cur_time); + myfl->InitData(m_fields, geom[lev].Domain(), cur_time, lev); } // Allocate extra multifabs for macroscopic properties of the medium @@ -2379,50 +2239,41 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm if (fft_do_time_averaging) { - AllocInitMultiFab(Bfield_avg_fp[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_avg_fp[x]", 0.0_rt); - AllocInitMultiFab(Bfield_avg_fp[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_avg_fp[y]", 0.0_rt); - AllocInitMultiFab(Bfield_avg_fp[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_avg_fp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_avg_fp, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_avg_fp, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_avg_fp, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); - AllocInitMultiFab(Efield_avg_fp[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_fp[x]", 0.0_rt); - AllocInitMultiFab(Efield_avg_fp[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_fp[y]", 0.0_rt); - AllocInitMultiFab(Efield_avg_fp[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_fp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_avg_fp, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_avg_fp, Direction{1}, lev, amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_avg_fp, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } if (EB::enabled()) { constexpr int nc_ls = 1; amrex::IntVect const ng_ls(2); - AllocInitMultiFab(m_distance_to_eb[lev], amrex::convert(ba, IntVect::TheNodeVector()), dm, nc_ls, ng_ls, lev, - "m_distance_to_eb"); + //EB level set + m_fields.alloc_init(FieldType::distance_to_eb, lev, amrex::convert(ba, IntVect::TheNodeVector()), dm, nc_ls, ng_ls, 0.0_rt); // EB info are needed only at the finest level if (lev == maxLevel()) { if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { - AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_edge_lengths[x]"); - AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_edge_lengths[y]"); - AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_edge_lengths[z]"); - AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_face_areas[x]"); - AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_face_areas[y]"); - AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_face_areas[z]"); + //! EB: Lengths of the mesh edges + m_fields.alloc_init(FieldType::edge_lengths, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::edge_lengths, Direction{1}, lev, amrex::convert(ba, Ey_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::edge_lengths, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + + //! EB: Areas of the mesh faces + m_fields.alloc_init(FieldType::face_areas, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::face_areas, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::face_areas, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); } if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - AllocInitMultiFab(m_edge_lengths[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_edge_lengths[x]"); - AllocInitMultiFab(m_edge_lengths[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_edge_lengths[y]"); - AllocInitMultiFab(m_edge_lengths[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_edge_lengths[z]"); - AllocInitMultiFab(m_face_areas[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_face_areas[x]"); - AllocInitMultiFab(m_face_areas[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_face_areas[y]"); - AllocInitMultiFab(m_face_areas[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_face_areas[z]"); AllocInitMultiFab(m_flag_info_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_info_face[x]"); AllocInitMultiFab(m_flag_info_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, @@ -2435,31 +2286,47 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[y]"); AllocInitMultiFab(m_flag_ext_face[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_ext_face[z]"); - AllocInitMultiFab(m_area_mod[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_area_mod[x]"); - AllocInitMultiFab(m_area_mod[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_area_mod[y]"); - AllocInitMultiFab(m_area_mod[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "m_area_mod[z]"); + + /** EB: area_mod contains the modified areas of the mesh faces, i.e. if a face is enlarged it + * contains the area of the enlarged face + * This is only used for the ECT solver.*/ + m_fields.alloc_init(FieldType::area_mod, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::area_mod, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::area_mod, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_borrowing[lev][0] = std::make_unique>( amrex::convert(ba, Bx_nodal_flag), dm); m_borrowing[lev][1] = std::make_unique>( amrex::convert(ba, By_nodal_flag), dm); m_borrowing[lev][2] = std::make_unique>( amrex::convert(ba, Bz_nodal_flag), dm); - AllocInitMultiFab(Venl[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "Venl[x]"); - AllocInitMultiFab(Venl[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "Venl[y]"); - AllocInitMultiFab(Venl[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "Venl[z]"); - - AllocInitMultiFab(ECTRhofield[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "ECTRhofield[x]", 0.0_rt); - AllocInitMultiFab(ECTRhofield[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "ECTRhofield[y]", 0.0_rt); - AllocInitMultiFab(ECTRhofield[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, - guard_cells.ng_FieldSolver, lev, "ECTRhofield[z]", 0.0_rt); + + /** Venl contains the electromotive force for every mesh face, i.e. every entry is + * the corresponding entry in ECTRhofield multiplied by the total area (possibly with enlargement) + * This is only used for the ECT solver.*/ + m_fields.alloc_init(FieldType::Venl, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::Venl, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::Venl, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + + /** ECTRhofield is needed only by the ect + * solver and it contains the electromotive force density for every mesh face. + * The name ECTRhofield has been used to comply with the notation of the paper + * https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4463918 (page 9, equation 4 + * and below). + * Although it's called rho it has nothing to do with the charge density! + * This is only used for the ECT solver.*/ + m_fields.alloc_init(FieldType::ECTRhofield, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::ECTRhofield, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + m_fields.alloc_init(FieldType::ECTRhofield, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); } } } @@ -2481,31 +2348,38 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm } if (rho_ncomps > 0) { - AllocInitMultiFab(rho_fp[lev], amrex::convert(ba, rho_nodal_flag), dm, rho_ncomps, ngRho, lev, "rho_fp", 0.0_rt); + m_fields.alloc_init(FieldType::rho_fp, + lev, amrex::convert(ba, rho_nodal_flag), dm, + rho_ncomps, ngRho, 0.0_rt); } if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) { const IntVect ngPhi = IntVect( AMREX_D_DECL(1,1,1) ); - AllocInitMultiFab(phi_fp[lev], amrex::convert(ba, phi_nodal_flag), dm, ncomps, ngPhi, lev, "phi_fp", 0.0_rt); + m_fields.alloc_init(FieldType::phi_fp, lev, amrex::convert(ba, phi_nodal_flag), dm, + ncomps, ngPhi, 0.0_rt ); } if (do_subcycling && lev == 0) { - AllocInitMultiFab(current_store[lev][0], amrex::convert(ba,jx_nodal_flag),dm,ncomps,ngJ,lev, "current_store[x]"); - AllocInitMultiFab(current_store[lev][1], amrex::convert(ba,jy_nodal_flag),dm,ncomps,ngJ,lev, "current_store[y]"); - AllocInitMultiFab(current_store[lev][2], amrex::convert(ba,jz_nodal_flag),dm,ncomps,ngJ,lev, "current_store[z]"); + m_fields.alloc_init(FieldType::current_store, Direction{0}, lev, amrex::convert(ba,jx_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_store, Direction{1}, lev, amrex::convert(ba,jy_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_store, Direction{2}, lev, amrex::convert(ba,jz_nodal_flag), dm, ncomps, ngJ, 0.0_rt); } if (do_dive_cleaning) { - AllocInitMultiFab(F_fp[lev], amrex::convert(ba, F_nodal_flag), dm, ncomps, ngF, lev, "F_fp", 0.0_rt); + m_fields.alloc_init(FieldType::F_fp, + lev, amrex::convert(ba, F_nodal_flag), dm, + ncomps, ngF, 0.0_rt); } if (do_divb_cleaning) { - AllocInitMultiFab(G_fp[lev], amrex::convert(ba, G_nodal_flag), dm, ncomps, ngG, lev, "G_fp", 0.0_rt); + m_fields.alloc_init(FieldType::G_fp, + lev, amrex::convert(ba, G_nodal_flag), dm, + ncomps, ngG, 0.0_rt); } if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) @@ -2574,90 +2448,103 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm // Create aux multifabs on Nodal Box Array BoxArray const nba = amrex::convert(ba,IntVect::TheNodeVector()); - AllocInitMultiFab(Bfield_aux[lev][0], nba, dm, ncomps, ngEB, lev, "Bfield_aux[x]", 0.0_rt); - AllocInitMultiFab(Bfield_aux[lev][1], nba, dm, ncomps, ngEB, lev, "Bfield_aux[y]", 0.0_rt); - AllocInitMultiFab(Bfield_aux[lev][2], nba, dm, ncomps, ngEB, lev, "Bfield_aux[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{0}, lev, nba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{1}, lev, nba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{2}, lev, nba, dm, ncomps, ngEB, 0.0_rt); - AllocInitMultiFab(Efield_aux[lev][0], nba, dm, ncomps, ngEB, lev, "Efield_aux[x]", 0.0_rt); - AllocInitMultiFab(Efield_aux[lev][1], nba, dm, ncomps, ngEB, lev, "Efield_aux[y]", 0.0_rt); - AllocInitMultiFab(Efield_aux[lev][2], nba, dm, ncomps, ngEB, lev, "Efield_aux[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{0}, lev, nba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{1}, lev, nba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{2}, lev, nba, dm, ncomps, ngEB, 0.0_rt); } else if (lev == 0) { if (WarpX::fft_do_time_averaging) { - AliasInitMultiFab(Bfield_aux[lev][0], *Bfield_avg_fp[lev][0], 0, ncomps, lev, "Bfield_aux[x]", 0.0_rt); - AliasInitMultiFab(Bfield_aux[lev][1], *Bfield_avg_fp[lev][1], 0, ncomps, lev, "Bfield_aux[y]", 0.0_rt); - AliasInitMultiFab(Bfield_aux[lev][2], *Bfield_avg_fp[lev][2], 0, ncomps, lev, "Bfield_aux[z]", 0.0_rt); + m_fields.alias_init(FieldType::Bfield_aux, FieldType::Bfield_avg_fp, Direction{0}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Bfield_aux, FieldType::Bfield_avg_fp, Direction{1}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Bfield_aux, FieldType::Bfield_avg_fp, Direction{2}, lev, 0.0_rt); - AliasInitMultiFab(Efield_aux[lev][0], *Efield_avg_fp[lev][0], 0, ncomps, lev, "Efield_aux[x]", 0.0_rt); - AliasInitMultiFab(Efield_aux[lev][1], *Efield_avg_fp[lev][1], 0, ncomps, lev, "Efield_aux[y]", 0.0_rt); - AliasInitMultiFab(Efield_aux[lev][2], *Efield_avg_fp[lev][2], 0, ncomps, lev, "Efield_aux[z]", 0.0_rt); + m_fields.alias_init(FieldType::Efield_aux, FieldType::Efield_avg_fp, Direction{0}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Efield_aux, FieldType::Efield_avg_fp, Direction{1}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Efield_aux, FieldType::Efield_avg_fp, Direction{2}, lev, 0.0_rt); } else { if (mypc->m_B_ext_particle_s == "read_from_file") { - AllocInitMultiFab(Bfield_aux[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_aux[x]"); - AllocInitMultiFab(Bfield_aux[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_aux[y]"); - AllocInitMultiFab(Bfield_aux[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_aux[z]"); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } else { // In this case, the aux grid is simply an alias of the fp grid (most common case in WarpX) - AliasInitMultiFab(Bfield_aux[lev][0], *Bfield_fp[lev][0], 0, ncomps, lev, "Bfield_aux[x]", 0.0_rt); - AliasInitMultiFab(Bfield_aux[lev][1], *Bfield_fp[lev][1], 0, ncomps, lev, "Bfield_aux[y]", 0.0_rt); - AliasInitMultiFab(Bfield_aux[lev][2], *Bfield_fp[lev][2], 0, ncomps, lev, "Bfield_aux[z]", 0.0_rt); + m_fields.alias_init(FieldType::Bfield_aux, FieldType::Bfield_fp, Direction{0}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Bfield_aux, FieldType::Bfield_fp, Direction{1}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Bfield_aux, FieldType::Bfield_fp, Direction{2}, lev, 0.0_rt); } if (mypc->m_E_ext_particle_s == "read_from_file") { - AllocInitMultiFab(Efield_aux[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, lev, "Efield_aux[x]"); - AllocInitMultiFab(Efield_aux[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, lev, "Efield_aux[y]"); - AllocInitMultiFab(Efield_aux[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_aux[z]"); + m_fields.alloc_init(FieldType::Efield_aux, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{1}, lev, amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } else { // In this case, the aux grid is simply an alias of the fp grid (most common case in WarpX) - AliasInitMultiFab(Efield_aux[lev][0], *Efield_fp[lev][0], 0, ncomps, lev, "Efield_aux[x]", 0.0_rt); - AliasInitMultiFab(Efield_aux[lev][1], *Efield_fp[lev][1], 0, ncomps, lev, "Efield_aux[y]", 0.0_rt); - AliasInitMultiFab(Efield_aux[lev][2], *Efield_fp[lev][2], 0, ncomps, lev, "Efield_aux[z]", 0.0_rt); + m_fields.alias_init(FieldType::Efield_aux, FieldType::Efield_fp, Direction{0}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Efield_aux, FieldType::Efield_fp, Direction{1}, lev, 0.0_rt); + m_fields.alias_init(FieldType::Efield_aux, FieldType::Efield_fp, Direction{2}, lev, 0.0_rt); } } } else { - AllocInitMultiFab(Bfield_aux[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_aux[x]", 0.0_rt); - AllocInitMultiFab(Bfield_aux[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_aux[y]", 0.0_rt); - AllocInitMultiFab(Bfield_aux[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_aux[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{0}, lev, amrex::convert(ba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{1}, lev, amrex::convert(ba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_aux, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); - AllocInitMultiFab(Efield_aux[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, lev, "Efield_aux[x]", 0.0_rt); - AllocInitMultiFab(Efield_aux[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, lev, "Efield_aux[y]", 0.0_rt); - AllocInitMultiFab(Efield_aux[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_aux[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{1}, lev, amrex::convert(ba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_aux, Direction{2}, lev, amrex::convert(ba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } // The external fields that are read from file if (m_p_ext_field_params->B_ext_grid_type != ExternalFieldType::default_zero && m_p_ext_field_params->B_ext_grid_type != ExternalFieldType::constant) { // These fields will be added directly to the grid, i.e. to fp, and need to match the index type - AllocInitMultiFab(Bfield_fp_external[lev][0], amrex::convert(ba, Bfield_fp[lev][0]->ixType()), - dm, ncomps, ngEB, lev, "Bfield_fp_external[x]", 0.0_rt); - AllocInitMultiFab(Bfield_fp_external[lev][1], amrex::convert(ba, Bfield_fp[lev][1]->ixType()), - dm, ncomps, ngEB, lev, "Bfield_fp_external[y]", 0.0_rt); - AllocInitMultiFab(Bfield_fp_external[lev][2], amrex::convert(ba, Bfield_fp[lev][2]->ixType()), - dm, ncomps, ngEB, lev, "Bfield_fp_external[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_fp_external, Direction{0}, lev, + amrex::convert(ba, m_fields.get(FieldType::Bfield_fp,Direction{0},lev)->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_fp_external, Direction{1}, lev, + amrex::convert(ba, m_fields.get(FieldType::Bfield_fp,Direction{1},lev)->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_fp_external, Direction{2}, lev, + amrex::convert(ba, m_fields.get(FieldType::Bfield_fp,Direction{2},lev)->ixType()), + dm, ncomps, ngEB, 0.0_rt); } if (mypc->m_B_ext_particle_s == "read_from_file") { // These fields will be added to the fields that the particles see, and need to match the index type - AllocInitMultiFab(B_external_particle_field[lev][0], amrex::convert(ba, Bfield_aux[lev][0]->ixType()), - dm, ncomps, ngEB, lev, "B_external_particle_field[x]", 0.0_rt); - AllocInitMultiFab(B_external_particle_field[lev][1], amrex::convert(ba, Bfield_aux[lev][1]->ixType()), - dm, ncomps, ngEB, lev, "B_external_particle_field[y]", 0.0_rt); - AllocInitMultiFab(B_external_particle_field[lev][2], amrex::convert(ba, Bfield_aux[lev][2]->ixType()), - dm, ncomps, ngEB, lev, "B_external_particle_field[z]", 0.0_rt); + auto *Bfield_aux_levl_0 = m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + auto *Bfield_aux_levl_1 = m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + auto *Bfield_aux_levl_2 = m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); + + // Same as Bfield_fp for reading external field data + m_fields.alloc_init(FieldType::B_external_particle_field, Direction{0}, lev, amrex::convert(ba, Bfield_aux_levl_0->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::B_external_particle_field, Direction{1}, lev, amrex::convert(ba, Bfield_aux_levl_1->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::B_external_particle_field, Direction{2}, lev, amrex::convert(ba, Bfield_aux_levl_2->ixType()), + dm, ncomps, ngEB, 0.0_rt); } if (m_p_ext_field_params->E_ext_grid_type != ExternalFieldType::default_zero && m_p_ext_field_params->E_ext_grid_type != ExternalFieldType::constant) { // These fields will be added directly to the grid, i.e. to fp, and need to match the index type - AllocInitMultiFab(Efield_fp_external[lev][0], amrex::convert(ba, Efield_fp[lev][0]->ixType()), - dm, ncomps, ngEB, lev, "Efield_fp_external[x]", 0.0_rt); - AllocInitMultiFab(Efield_fp_external[lev][1], amrex::convert(ba, Efield_fp[lev][1]->ixType()), - dm, ncomps, ngEB, lev, "Efield_fp_external[y]", 0.0_rt); - AllocInitMultiFab(Efield_fp_external[lev][2], amrex::convert(ba, Efield_fp[lev][2]->ixType()), - dm, ncomps, ngEB, lev, "Efield_fp_external[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_fp_external, Direction{0}, lev, amrex::convert(ba, m_fields.get(FieldType::Efield_fp, Direction{0}, lev)->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_fp_external, Direction{1}, lev, amrex::convert(ba, m_fields.get(FieldType::Efield_fp, Direction{1}, lev)->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_fp_external, Direction{2}, lev, amrex::convert(ba, m_fields.get(FieldType::Efield_fp, Direction{2}, lev)->ixType()), + dm, ncomps, ngEB, 0.0_rt); } if (mypc->m_E_ext_particle_s == "read_from_file") { // These fields will be added to the fields that the particles see, and need to match the index type - AllocInitMultiFab(E_external_particle_field[lev][0], amrex::convert(ba, Efield_aux[lev][0]->ixType()), - dm, ncomps, ngEB, lev, "E_external_particle_field[x]", 0.0_rt); - AllocInitMultiFab(E_external_particle_field[lev][1], amrex::convert(ba, Efield_aux[lev][1]->ixType()), - dm, ncomps, ngEB, lev, "E_external_particle_field[y]", 0.0_rt); - AllocInitMultiFab(E_external_particle_field[lev][2], amrex::convert(ba, Efield_aux[lev][2]->ixType()), - dm, ncomps, ngEB, lev, "E_external_particle_field[z]", 0.0_rt); + auto *Efield_aux_levl_0 = m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + auto *Efield_aux_levl_1 = m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + auto *Efield_aux_levl_2 = m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + + // Same as Efield_fp for reading external field data + m_fields.alloc_init(FieldType::E_external_particle_field, Direction{0}, lev, amrex::convert(ba, Efield_aux_levl_0->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::E_external_particle_field, Direction{1}, lev, amrex::convert(ba, Efield_aux_levl_1->ixType()), + dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::E_external_particle_field, Direction{2}, lev, amrex::convert(ba, Efield_aux_levl_2->ixType()), + dm, ncomps, ngEB, 0.0_rt); } // @@ -2670,49 +2557,57 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm const std::array cdx = CellSize(lev-1); // Create the MultiFabs for B - AllocInitMultiFab(Bfield_cp[lev][0], amrex::convert(cba, Bx_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_cp[x]", 0.0_rt); - AllocInitMultiFab(Bfield_cp[lev][1], amrex::convert(cba, By_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_cp[y]", 0.0_rt); - AllocInitMultiFab(Bfield_cp[lev][2], amrex::convert(cba, Bz_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_cp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cp, Direction{0}, lev, amrex::convert(cba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cp, Direction{1}, lev, amrex::convert(cba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cp, Direction{2}, lev, amrex::convert(cba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); // Create the MultiFabs for E - AllocInitMultiFab(Efield_cp[lev][0], amrex::convert(cba, Ex_nodal_flag), dm, ncomps, ngEB, lev, "Efield_cp[x]", 0.0_rt); - AllocInitMultiFab(Efield_cp[lev][1], amrex::convert(cba, Ey_nodal_flag), dm, ncomps, ngEB, lev, "Efield_cp[y]", 0.0_rt); - AllocInitMultiFab(Efield_cp[lev][2], amrex::convert(cba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_cp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cp, Direction{0}, lev, amrex::convert(cba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cp, Direction{1}, lev, amrex::convert(cba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cp, Direction{2}, lev, amrex::convert(cba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); if (fft_do_time_averaging) { - AllocInitMultiFab(Bfield_avg_cp[lev][0], amrex::convert(cba, Bx_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_avg_cp[x]", 0.0_rt); - AllocInitMultiFab(Bfield_avg_cp[lev][1], amrex::convert(cba, By_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_avg_cp[y]", 0.0_rt); - AllocInitMultiFab(Bfield_avg_cp[lev][2], amrex::convert(cba, Bz_nodal_flag), dm, ncomps, ngEB, lev, "Bfield_avg_cp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_avg_cp, Direction{0}, lev, amrex::convert(cba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_avg_cp, Direction{1}, lev, amrex::convert(cba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_avg_cp, Direction{2}, lev, amrex::convert(cba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); - AllocInitMultiFab(Efield_avg_cp[lev][0], amrex::convert(cba, Ex_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_cp[x]", 0.0_rt); - AllocInitMultiFab(Efield_avg_cp[lev][1], amrex::convert(cba, Ey_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_cp[y]", 0.0_rt); - AllocInitMultiFab(Efield_avg_cp[lev][2], amrex::convert(cba, Ez_nodal_flag), dm, ncomps, ngEB, lev, "Efield_avg_cp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_avg_cp, Direction{0}, lev, amrex::convert(cba, Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_avg_cp, Direction{1}, lev, amrex::convert(cba, Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_avg_cp, Direction{2}, lev, amrex::convert(cba, Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } // Create the MultiFabs for the current - AllocInitMultiFab(current_cp[lev][0], amrex::convert(cba, jx_nodal_flag), dm, ncomps, ngJ, lev, "current_cp[x]", 0.0_rt); - AllocInitMultiFab(current_cp[lev][1], amrex::convert(cba, jy_nodal_flag), dm, ncomps, ngJ, lev, "current_cp[y]", 0.0_rt); - AllocInitMultiFab(current_cp[lev][2], amrex::convert(cba, jz_nodal_flag), dm, ncomps, ngJ, lev, "current_cp[z]", 0.0_rt); + m_fields.alloc_init(FieldType::current_cp, Direction{0}, lev, amrex::convert(cba, jx_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_cp, Direction{1}, lev, amrex::convert(cba, jy_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_cp, Direction{2}, lev, amrex::convert(cba, jz_nodal_flag), dm, ncomps, ngJ, 0.0_rt); if (rho_ncomps > 0) { - AllocInitMultiFab(rho_cp[lev], amrex::convert(cba, rho_nodal_flag), dm, rho_ncomps, ngRho, lev, "rho_cp", 0.0_rt); + m_fields.alloc_init(FieldType::rho_cp, + lev, amrex::convert(cba, rho_nodal_flag), dm, + rho_ncomps, ngRho, 0.0_rt); } if (do_dive_cleaning) { - AllocInitMultiFab(F_cp[lev], amrex::convert(cba, IntVect::TheUnitVector()), dm, ncomps, ngF, lev, "F_cp", 0.0_rt); + m_fields.alloc_init(FieldType::F_cp, + lev, amrex::convert(cba, IntVect::TheUnitVector()), dm, + ncomps, ngF, 0.0_rt); } if (do_divb_cleaning) { if (grid_type == GridType::Collocated) { - AllocInitMultiFab(G_cp[lev], amrex::convert(cba, IntVect::TheUnitVector()), dm, ncomps, ngG, lev, "G_cp", 0.0_rt); + m_fields.alloc_init(FieldType::G_cp, + lev, amrex::convert(cba, IntVect::TheUnitVector()), dm, + ncomps, ngG, 0.0_rt); } else // grid_type=staggered or grid_type=hybrid { - AllocInitMultiFab(G_cp[lev], amrex::convert(cba, IntVect::TheZeroVector()), dm, ncomps, ngG, lev, "G_cp", 0.0_rt); + m_fields.alloc_init(FieldType::G_cp, + lev, amrex::convert(cba, IntVect::TheZeroVector()), dm, + ncomps, ngG, 0.0_rt); } } @@ -2770,22 +2665,25 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm if (n_field_gather_buffer > 0 || mypc->nSpeciesGatherFromMainGrid() > 0) { if (aux_is_nodal) { BoxArray const& cnba = amrex::convert(cba,IntVect::TheNodeVector()); - AllocInitMultiFab(Bfield_cax[lev][0], cnba,dm,ncomps,ngEB,lev, "Bfield_cax[x]", 0.0_rt); - AllocInitMultiFab(Bfield_cax[lev][1], cnba,dm,ncomps,ngEB,lev, "Bfield_cax[y]", 0.0_rt); - AllocInitMultiFab(Bfield_cax[lev][2], cnba,dm,ncomps,ngEB,lev, "Bfield_cax[z]", 0.0_rt); - AllocInitMultiFab(Efield_cax[lev][0], cnba,dm,ncomps,ngEB,lev, "Efield_cax[x]", 0.0_rt); - AllocInitMultiFab(Efield_cax[lev][1], cnba,dm,ncomps,ngEB,lev, "Efield_cax[y]", 0.0_rt); - AllocInitMultiFab(Efield_cax[lev][2], cnba,dm,ncomps,ngEB,lev, "Efield_cax[z]", 0.0_rt); + // Create the MultiFabs for B + m_fields.alloc_init(FieldType::Bfield_cax, Direction{0}, lev, cnba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cax, Direction{1}, lev, cnba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cax, Direction{2}, lev, cnba, dm, ncomps, ngEB, 0.0_rt); + + // Create the MultiFabs for E + m_fields.alloc_init(FieldType::Efield_cax, Direction{0}, lev, cnba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cax, Direction{1}, lev, cnba, dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cax, Direction{2}, lev, cnba, dm, ncomps, ngEB, 0.0_rt); } else { // Create the MultiFabs for B - AllocInitMultiFab(Bfield_cax[lev][0], amrex::convert(cba,Bx_nodal_flag),dm,ncomps,ngEB,lev, "Bfield_cax[x]", 0.0_rt); - AllocInitMultiFab(Bfield_cax[lev][1], amrex::convert(cba,By_nodal_flag),dm,ncomps,ngEB,lev, "Bfield_cax[y]", 0.0_rt); - AllocInitMultiFab(Bfield_cax[lev][2], amrex::convert(cba,Bz_nodal_flag),dm,ncomps,ngEB,lev, "Bfield_cax[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cax, Direction{0}, lev, amrex::convert(cba, Bx_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cax, Direction{1}, lev, amrex::convert(cba, By_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Bfield_cax, Direction{2}, lev, amrex::convert(cba, Bz_nodal_flag), dm, ncomps, ngEB, 0.0_rt); // Create the MultiFabs for E - AllocInitMultiFab(Efield_cax[lev][0], amrex::convert(cba,Ex_nodal_flag),dm,ncomps,ngEB,lev, "Efield_cax[x]", 0.0_rt); - AllocInitMultiFab(Efield_cax[lev][1], amrex::convert(cba,Ey_nodal_flag),dm,ncomps,ngEB,lev, "Efield_cax[y]", 0.0_rt); - AllocInitMultiFab(Efield_cax[lev][2], amrex::convert(cba,Ez_nodal_flag),dm,ncomps,ngEB,lev, "Efield_cax[z]", 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cax, Direction{0}, lev, amrex::convert(cba,Ex_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cax, Direction{1}, lev, amrex::convert(cba,Ey_nodal_flag), dm, ncomps, ngEB, 0.0_rt); + m_fields.alloc_init(FieldType::Efield_cax, Direction{2}, lev, amrex::convert(cba,Ez_nodal_flag), dm, ncomps, ngEB, 0.0_rt); } AllocInitMultiFab(gather_buffer_masks[lev], ba, dm, ncomps, amrex::IntVect(1), lev, "gather_buffer_masks"); @@ -2794,11 +2692,11 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm } if (n_current_deposition_buffer > 0) { - AllocInitMultiFab(current_buf[lev][0], amrex::convert(cba,jx_nodal_flag),dm,ncomps,ngJ,lev, "current_buf[x]"); - AllocInitMultiFab(current_buf[lev][1], amrex::convert(cba,jy_nodal_flag),dm,ncomps,ngJ,lev, "current_buf[y]"); - AllocInitMultiFab(current_buf[lev][2], amrex::convert(cba,jz_nodal_flag),dm,ncomps,ngJ,lev, "current_buf[z]"); - if (rho_cp[lev]) { - AllocInitMultiFab(charge_buf[lev], amrex::convert(cba,rho_nodal_flag),dm,2*ncomps,ngRho,lev, "charge_buf"); + m_fields.alloc_init(FieldType::current_buf, Direction{0}, lev, amrex::convert(cba,jx_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_buf, Direction{1}, lev, amrex::convert(cba,jy_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + m_fields.alloc_init(FieldType::current_buf, Direction{2}, lev, amrex::convert(cba,jz_nodal_flag), dm, ncomps, ngJ, 0.0_rt); + if (m_fields.has(FieldType::rho_cp, lev)) { + m_fields.alloc_init(FieldType::rho_buf, lev, amrex::convert(cba,rho_nodal_flag), dm, 2*ncomps, ngRho, 0.0_rt); } AllocInitMultiFab(current_buffer_masks[lev], ba, dm, ncomps, amrex::IntVect(1), lev, "current_buffer_masks"); // Current buffer masks have 1 ghost cell, because of the fact @@ -3001,7 +2899,7 @@ WarpX::RefRatio (int lev) void WarpX::ComputeDivB (amrex::MultiFab& divB, int const dcomp, - const std::array& B, + ablastr::fields::VectorField const& B, const std::array& dx) { ComputeDivB(divB, dcomp, B, dx, IntVect::TheZeroVector()); @@ -3009,7 +2907,7 @@ WarpX::ComputeDivB (amrex::MultiFab& divB, int const dcomp, void WarpX::ComputeDivB (amrex::MultiFab& divB, int const dcomp, - const std::array& B, + ablastr::fields::VectorField const& B, const std::array& dx, IntVect const ngrow) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE(grid_type != GridType::Collocated, @@ -3049,13 +2947,15 @@ WarpX::ComputeDivE(amrex::MultiFab& divE, const int lev) { if ( WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD ) { #ifdef WARPX_USE_FFT - spectral_solver_fp[lev]->ComputeSpectralDivE( lev, Efield_aux[lev], divE ); + const ablastr::fields::VectorField Efield_aux_lev = m_fields.get_alldirs(FieldType::Efield_aux, lev); + spectral_solver_fp[lev]->ComputeSpectralDivE(lev, Efield_aux_lev, divE); #else WARPX_ABORT_WITH_MESSAGE( "ComputeDivE: PSATD requested but not compiled"); #endif } else { - m_fdtd_solver_fp[lev]->ComputeDivE( Efield_aux[lev], divE ); + const ablastr::fields::VectorField Efield_aux_lev = m_fields.get_alldirs(FieldType::Efield_aux, lev); + m_fdtd_solver_fp[lev]->ComputeDivE(Efield_aux_lev, divE); } } @@ -3330,10 +3230,12 @@ WarpX::GatherBufferMasks (int lev) void WarpX::StoreCurrent (int lev) { + using ablastr::fields::Direction; for (int idim = 0; idim < 3; ++idim) { - if (current_store[lev][idim]) { - MultiFab::Copy(*current_store[lev][idim], *current_fp[lev][idim], - 0, 0, 1, current_store[lev][idim]->nGrowVect()); + if (m_fields.has(FieldType::current_store, Direction{idim},lev)) { + MultiFab::Copy(*m_fields.get(FieldType::current_store, Direction{idim}, lev), + *m_fields.get(FieldType::current_fp, Direction{idim}, lev), + 0, 0, 1, m_fields.get(FieldType::current_store, Direction{idim}, lev)->nGrowVect()); } } } @@ -3341,9 +3243,15 @@ WarpX::StoreCurrent (int lev) void WarpX::RestoreCurrent (int lev) { + using ablastr::fields::Direction; + using warpx::fields::FieldType; + for (int idim = 0; idim < 3; ++idim) { - if (current_store[lev][idim]) { - std::swap(current_fp[lev][idim], current_store[lev][idim]); + if (m_fields.has(FieldType::current_store, Direction{idim}, lev)) { + std::swap( + *m_fields.get(FieldType::current_fp, Direction{idim}, lev), + *m_fields.get(FieldType::current_store, Direction{idim}, lev) + ); } } } @@ -3434,155 +3342,6 @@ WarpX::AliasInitMultiFab ( multifab_map[name_with_suffix] = mf.get(); } -void -WarpX::AllocInitMultiFabFromModel ( - std::unique_ptr& mf, - amrex::MultiFab& mf_model, - const int level, - const std::string& name, - std::optional initial_value) -{ - const auto name_with_suffix = TagWithLevelSuffix(name, level); - const auto tag = amrex::MFInfo().SetTag(name_with_suffix); - mf = std::make_unique(mf_model.boxArray(), mf_model.DistributionMap(), - mf_model.nComp(), mf_model.nGrowVect(), tag); - if (initial_value) { - mf->setVal(*initial_value); - } - multifab_map[name_with_suffix] = mf.get(); -} - -amrex::MultiFab* -WarpX::getFieldPointerUnchecked (const FieldType field_type, const int lev, const int direction) const -{ - // This function does *not* check if the returned field pointer is != nullptr - - amrex::MultiFab* field_pointer = nullptr; - - switch(field_type) - { - case FieldType::Efield_aux : - field_pointer = Efield_aux[lev][direction].get(); - break; - case FieldType::Bfield_aux : - field_pointer = Bfield_aux[lev][direction].get(); - break; - case FieldType::Efield_fp : - field_pointer = Efield_fp[lev][direction].get(); - break; - case FieldType::Bfield_fp : - field_pointer = Bfield_fp[lev][direction].get(); - break; - case FieldType::Efield_fp_external : - field_pointer = Efield_fp_external[lev][direction].get(); - break; - case FieldType::Bfield_fp_external : - field_pointer = Bfield_fp_external[lev][direction].get(); - break; - case FieldType::current_fp : - field_pointer = current_fp[lev][direction].get(); - break; - case FieldType::current_fp_nodal : - field_pointer = current_fp_nodal[lev][direction].get(); - break; - case FieldType::rho_fp : - field_pointer = rho_fp[lev].get(); - break; - case FieldType::F_fp : - field_pointer = F_fp[lev].get(); - break; - case FieldType::G_fp : - field_pointer = G_fp[lev].get(); - break; - case FieldType::phi_fp : - field_pointer = phi_fp[lev].get(); - break; - case FieldType::vector_potential_fp : - field_pointer = vector_potential_fp_nodal[lev][direction].get(); - break; - case FieldType::Efield_cp : - field_pointer = Efield_cp[lev][direction].get(); - break; - case FieldType::Bfield_cp : - field_pointer = Bfield_cp[lev][direction].get(); - break; - case FieldType::current_cp : - field_pointer = current_cp[lev][direction].get(); - break; - case FieldType::rho_cp : - field_pointer = rho_cp[lev].get(); - break; - case FieldType::F_cp : - field_pointer = F_cp[lev].get(); - break; - case FieldType::G_cp : - field_pointer = G_cp[lev].get(); - break; - case FieldType::edge_lengths : - field_pointer = m_edge_lengths[lev][direction].get(); - break; - case FieldType::face_areas : - field_pointer = m_face_areas[lev][direction].get(); - break; - case FieldType::Efield_avg_fp : - field_pointer = Efield_avg_fp[lev][direction].get(); - break; - case FieldType::Bfield_avg_fp : - field_pointer = Bfield_avg_fp[lev][direction].get(); - break; - case FieldType::Efield_avg_cp : - field_pointer = Efield_avg_cp[lev][direction].get(); - break; - case FieldType::Bfield_avg_cp : - field_pointer = Bfield_avg_cp[lev][direction].get(); - break; - default: - WARPX_ABORT_WITH_MESSAGE("Invalid field type"); - break; - } - - return field_pointer; -} - -bool -WarpX::isFieldInitialized (const FieldType field_type, const int lev, const int direction) const -{ - const bool is_field_init = (getFieldPointerUnchecked(field_type, lev, direction) != nullptr); - return is_field_init; -} - -amrex::MultiFab* -WarpX::getFieldPointer (const FieldType field_type, const int lev, const int direction) const -{ - auto* const field_pointer = getFieldPointerUnchecked(field_type, lev, direction); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - field_pointer != nullptr, "Requested field is not initialized!"); - return field_pointer; -} - -std::array -WarpX::getFieldPointerArray (const FieldType field_type, const int lev) const -{ - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - (field_type == FieldType::Efield_aux) || (field_type == FieldType::Bfield_aux) || - (field_type == FieldType::Efield_fp) || (field_type == FieldType::Bfield_fp) || - (field_type == FieldType::Efield_fp_external) || (field_type == FieldType::Bfield_fp_external) || - (field_type == FieldType::current_fp) || (field_type == FieldType::current_fp_nodal) || - (field_type == FieldType::Efield_cp) || (field_type == FieldType::Bfield_cp) || - (field_type == FieldType::current_cp), "Requested field type is not a vector."); - - return std::array{ - getFieldPointer(field_type, lev, 0), - getFieldPointer(field_type, lev, 1), - getFieldPointer(field_type, lev, 2)}; -} - -const amrex::MultiFab& -WarpX::getField(FieldType field_type, const int lev, const int direction) const -{ - return *getFieldPointer(field_type, lev, direction); -} - amrex::DistributionMapping WarpX::MakeDistributionMap (int lev, amrex::BoxArray const& ba) { @@ -3609,55 +3368,22 @@ WarpX::MakeDistributionMap (int lev, amrex::BoxArray const& ba) } } -const amrex::Vector,3>>& -WarpX::getMultiLevelField(warpx::fields::FieldType field_type) const -{ - switch(field_type) - { - case FieldType::Efield_aux : - return Efield_aux; - case FieldType::Bfield_aux : - return Bfield_aux; - case FieldType::Efield_fp : - return Efield_fp; - case FieldType::Efield_fp_external : - return Efield_fp_external; - case FieldType::Bfield_fp : - return Bfield_fp; - case FieldType::Bfield_fp_external : - return Bfield_fp_external; - case FieldType::current_fp : - return current_fp; - case FieldType::current_fp_nodal : - return current_fp_nodal; - case FieldType::Efield_cp : - return Efield_cp; - case FieldType::Bfield_cp : - return Bfield_cp; - case FieldType::current_cp : - return current_cp; - default: - WARPX_ABORT_WITH_MESSAGE("Invalid field type"); - return Efield_fp; - } -} - const amrex::iMultiFab* WarpX::getFieldDotMaskPointer ( FieldType field_type, int lev, int dir ) const { switch(field_type) { case FieldType::Efield_fp : - SetDotMask( Efield_dotMask[lev][dir], field_type, lev, dir ); + SetDotMask( Efield_dotMask[lev][dir], "Efield_fp", lev, dir ); return Efield_dotMask[lev][dir].get(); case FieldType::Bfield_fp : - SetDotMask( Bfield_dotMask[lev][dir], field_type, lev, dir ); + SetDotMask( Bfield_dotMask[lev][dir], "Bfield_fp", lev, dir ); return Bfield_dotMask[lev][dir].get(); case FieldType::vector_potential_fp : - SetDotMask( Afield_dotMask[lev][dir], field_type, lev, dir ); + SetDotMask( Afield_dotMask[lev][dir], "vector_potential_fp", lev, dir ); return Afield_dotMask[lev][dir].get(); case FieldType::phi_fp : - SetDotMask( phi_dotMask[lev], field_type, lev, 0 ); + SetDotMask( phi_dotMask[lev], "phi_fp", lev, 0 ); return phi_dotMask[lev].get(); default: WARPX_ABORT_WITH_MESSAGE("Invalid field type for dotMask"); @@ -3666,15 +3392,15 @@ WarpX::getFieldDotMaskPointer ( FieldType field_type, int lev, int dir ) const } void WarpX::SetDotMask( std::unique_ptr& field_dotMask, - FieldType field_type, int lev, int dir ) const + std::string const & field_name, int lev, int dir ) const { // Define the dot mask for this field_type needed to properly compute dotProduct() // for field values that have shared locations on different MPI ranks if (field_dotMask != nullptr) { return; } - const amrex::MultiFab* this_field = getFieldPointer(field_type,lev,dir); - const amrex::BoxArray& this_ba = this_field->boxArray(); - const amrex::MultiFab tmp( this_ba, this_field->DistributionMap(), + ablastr::fields::ConstVectorField const& this_field = m_fields.get_alldirs(field_name,lev); + const amrex::BoxArray& this_ba = this_field[dir]->boxArray(); + const amrex::MultiFab tmp( this_ba, this_field[dir]->DistributionMap(), 1, 0, amrex::MFInfo().SetAlloc(false) ); const amrex::Periodicity& period = Geom(lev).periodicity(); field_dotMask = tmp.OwnerMask(period); diff --git a/Source/ablastr/fields/CMakeLists.txt b/Source/ablastr/fields/CMakeLists.txt index 56acc678217..011d765a6bb 100644 --- a/Source/ablastr/fields/CMakeLists.txt +++ b/Source/ablastr/fields/CMakeLists.txt @@ -1,5 +1,11 @@ foreach(D IN LISTS WarpX_DIMS) warpx_set_suffix_dims(SD ${D}) + + target_sources(ablastr_${SD} + PRIVATE + MultiFabRegister.cpp + ) + if(ABLASTR_FFT AND D EQUAL 3) target_sources(ablastr_${SD} PRIVATE diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.H b/Source/ablastr/fields/IntegratedGreenFunctionSolver.H index 97ffdb5ac36..28885e167a3 100644 --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.H +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.H @@ -7,6 +7,8 @@ #ifndef ABLASTR_IGF_SOLVER_H #define ABLASTR_IGF_SOLVER_H +#include + #include #include #include @@ -47,6 +49,35 @@ namespace ablastr::fields return G; } + /** @brief add + * + * @param[in] x x-coordinate of given location + * @param[in] y y-coordinate of given location + * @param[in] z z-coordinate of given location + * + * @return the sum of integrated Green function G + */ + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + amrex::Real + SumOfIntegratedPotential (amrex::Real x, amrex::Real y, amrex::Real z, amrex::Real dx, amrex::Real dy, amrex::Real dz) + { + using namespace amrex::literals; + + + amrex::Real const G_value = 1._rt/(4._rt*ablastr::constant::math::pi*ablastr::constant::SI::ep0) * ( + IntegratedPotential( x+0.5_rt*dx, y+0.5_rt*dy, z+0.5_rt*dz ) + - IntegratedPotential( x-0.5_rt*dx, y+0.5_rt*dy, z+0.5_rt*dz ) + - IntegratedPotential( x+0.5_rt*dx, y-0.5_rt*dy, z+0.5_rt*dz ) + + IntegratedPotential( x-0.5_rt*dx, y-0.5_rt*dy, z+0.5_rt*dz ) + - IntegratedPotential( x+0.5_rt*dx, y+0.5_rt*dy, z-0.5_rt*dz ) + + IntegratedPotential( x-0.5_rt*dx, y+0.5_rt*dy, z-0.5_rt*dz ) + + IntegratedPotential( x+0.5_rt*dx, y-0.5_rt*dy, z-0.5_rt*dz ) + - IntegratedPotential( x-0.5_rt*dx, y-0.5_rt*dy, z-0.5_rt*dz ) + ); + + return G_value; + } + /** @brief Compute the electrostatic potential using the Integrated Green Function method * as in http://dx.doi.org/10.1103/PhysRevSTAB.9.044204 * diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp index 0767ecfb2f3..40b36740ae5 100644 --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp @@ -27,7 +27,9 @@ #include #include -#include +#if defined(ABLASTR_USE_FFT) && defined(ABLASTR_USE_HEFFTE) +#include +#endif namespace ablastr::fields { @@ -36,10 +38,16 @@ void computePhiIGF ( amrex::MultiFab const & rho, amrex::MultiFab & phi, std::array const & cell_size, - amrex::BoxArray const & ba ) + amrex::BoxArray const & ba) { using namespace amrex::literals; + BL_PROFILE_VAR_NS("ablastr::fields::computePhiIGF: FFTs", timer_ffts); + BL_PROFILE_VAR_NS("ablastr::fields::computePhiIGF: FFT plans", timer_plans); + BL_PROFILE_VAR_NS("ablastr::fields::computePhiIGF: parallel copies", timer_pcopies); + + BL_PROFILE("ablastr::fields::computePhiIGF"); + // Define box that encompasses the full domain amrex::Box domain = ba.minimalBox(); domain.surroundingNodes(); // get nodal points, since `phi` and `rho` are nodal @@ -50,41 +58,87 @@ computePhiIGF ( amrex::MultiFab const & rho, int const nz = domain.length(2); // Allocate 2x wider arrays for the convolution of rho with the Green function - // This also defines the box arrays for the global FFT: contains only one box; amrex::Box const realspace_box = amrex::Box( {domain.smallEnd(0), domain.smallEnd(1), domain.smallEnd(2)}, {2*nx-1+domain.smallEnd(0), 2*ny-1+domain.smallEnd(1), 2*nz-1+domain.smallEnd(2)}, amrex::IntVect::TheNodeVector() ); + +#if !defined(ABLASTR_USE_HEFFTE) + // Without distributed FFTs (i.e. without heFFTe): + // allocate the 2x wider array on a single box amrex::BoxArray const realspace_ba = amrex::BoxArray( realspace_box ); - amrex::Box const spectralspace_box = amrex::Box( - {0,0,0}, - {nx, 2*ny-1, 2*nz-1}, - amrex::IntVect::TheNodeVector() ); - amrex::BoxArray const spectralspace_ba = amrex::BoxArray( spectralspace_box ); // Define a distribution mapping for the global FFT, with only one box amrex::DistributionMapping dm_global_fft; dm_global_fft.define( realspace_ba ); +#elif defined(ABLASTR_USE_HEFFTE) + // With distributed FFTs (i.e. with heFFTe): + // Define a new distribution mapping which is decomposed purely along z + // and has one box per MPI rank + int const nprocs = amrex::ParallelDescriptor::NProcs(); + amrex::BoxArray realspace_ba; + amrex::DistributionMapping dm_global_fft; + { + int realspace_nx = realspace_box.length(0); + int realspace_ny = realspace_box.length(1); + int realspace_nz = realspace_box.length(2); + int minsize_z = realspace_nz / nprocs; + int nleft_z = realspace_nz - minsize_z*nprocs; + + AMREX_ALWAYS_ASSERT(realspace_nz >= nprocs); + // We are going to split realspace_box in such a way that the first + // nleft boxes has minsize_z+1 nodes and the others minsize + // nodes. We do it this way instead of BoxArray::maxSize to make + // sure there are exactly nprocs boxes and there are no overlaps. + amrex::BoxList bl(amrex::IndexType::TheNodeType()); + for (int iproc = 0; iproc < nprocs; ++iproc) { + int zlo, zhi; + if (iproc < nleft_z) { + zlo = iproc*(minsize_z+1); + zhi = zlo + minsize_z; + + } else { + zlo = iproc*minsize_z + nleft_z; + zhi = zlo + minsize_z - 1; + + } + amrex::Box tbx(amrex::IntVect(0,0,zlo),amrex::IntVect(realspace_nx-1,realspace_ny-1,zhi),amrex::IntVect(1)); + + tbx.shift(realspace_box.smallEnd()); + bl.push_back(tbx); + } + realspace_ba.define(std::move(bl)); + amrex::Vector pmap(nprocs); + std::iota(pmap.begin(), pmap.end(), 0); + dm_global_fft.define(std::move(pmap)); + } +#endif + // Allocate required arrays amrex::MultiFab tmp_rho = amrex::MultiFab(realspace_ba, dm_global_fft, 1, 0); tmp_rho.setVal(0); amrex::MultiFab tmp_G = amrex::MultiFab(realspace_ba, dm_global_fft, 1, 0); tmp_G.setVal(0); - // Allocate corresponding arrays in Fourier space - using SpectralField = amrex::FabArray< amrex::BaseFab< amrex::GpuComplex< amrex::Real > > >; - SpectralField tmp_rho_fft = SpectralField( spectralspace_ba, dm_global_fft, 1, 0 ); - SpectralField tmp_G_fft = SpectralField( spectralspace_ba, dm_global_fft, 1, 0 ); - // Copy from rho to tmp_rho + BL_PROFILE_VAR_START(timer_pcopies); + // Copy from rho including its ghost cells to tmp_rho tmp_rho.ParallelCopy( rho, 0, 0, 1, amrex::IntVect::TheZeroVector(), amrex::IntVect::TheZeroVector() ); + BL_PROFILE_VAR_STOP(timer_pcopies); + +#if !defined(ABLASTR_USE_HEFFTE) + // Without distributed FFTs (i.e. without heFFTe): + // We loop over the original box (not the 2x wider one), and the other quadrants by periodicity + amrex::BoxArray const& igf_compute_box = amrex::BoxArray( domain ); +#else + // With distributed FFTs (i.e. with heFFTe): + // We loop over the full 2x wider box, since 1 MPI rank does not necessarily own the data for the other quadrants + amrex::BoxArray const& igf_compute_box = tmp_G.boxArray(); +#endif // Compute the integrated Green function - { - BL_PROFILE("Initialize Green function"); - amrex::BoxArray const domain_ba = amrex::BoxArray( domain ); #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for (amrex::MFIter mfi(domain_ba, dm_global_fft,amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + for (amrex::MFIter mfi(igf_compute_box, dm_global_fft, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { amrex::Box const bx = mfi.tilebox(); @@ -95,6 +149,7 @@ computePhiIGF ( amrex::MultiFab const & rho, amrex::Real const dx = cell_size[0]; amrex::Real const dy = cell_size[1]; amrex::Real const dz = cell_size[2]; + amrex::Array4 const tmp_G_arr = tmp_G.array(mfi); amrex::ParallelFor( bx, [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept @@ -106,17 +161,9 @@ computePhiIGF ( amrex::MultiFab const & rho, amrex::Real const y = j0*dy; amrex::Real const z = k0*dz; - amrex::Real const G_value = 1._rt/(4._rt*ablastr::constant::math::pi*ablastr::constant::SI::ep0) * ( - IntegratedPotential( x+0.5_rt*dx, y+0.5_rt*dy, z+0.5_rt*dz ) - - IntegratedPotential( x-0.5_rt*dx, y+0.5_rt*dy, z+0.5_rt*dz ) - - IntegratedPotential( x+0.5_rt*dx, y-0.5_rt*dy, z+0.5_rt*dz ) - - IntegratedPotential( x+0.5_rt*dx, y+0.5_rt*dy, z-0.5_rt*dz ) - + IntegratedPotential( x+0.5_rt*dx, y-0.5_rt*dy, z-0.5_rt*dz ) - + IntegratedPotential( x-0.5_rt*dx, y+0.5_rt*dy, z-0.5_rt*dz ) - + IntegratedPotential( x-0.5_rt*dx, y-0.5_rt*dy, z+0.5_rt*dz ) - - IntegratedPotential( x-0.5_rt*dx, y-0.5_rt*dy, z-0.5_rt*dz ) - ); - +#if !defined(ABLASTR_USE_HEFFTE) + // Without distributed FFTs (i.e. without heFFTe): + amrex::Real const G_value = SumOfIntegratedPotential(x , y , z , dx, dy, dz); tmp_G_arr(i,j,k) = G_value; // Fill the rest of the array by periodicity if (i0>0) {tmp_G_arr(hi[0]+1-i0, j , k ) = G_value;} @@ -126,71 +173,120 @@ computePhiIGF ( amrex::MultiFab const & rho, if ((j0>0)&&(k0>0)) {tmp_G_arr(i , hi[1]+1-j0, hi[2]+1-k0) = G_value;} if ((i0>0)&&(k0>0)) {tmp_G_arr(hi[0]+1-i0, j , hi[2]+1-k0) = G_value;} if ((i0>0)&&(j0>0)&&(k0>0)) {tmp_G_arr(hi[0]+1-i0, hi[1]+1-j0, hi[2]+1-k0) = G_value;} - } - ); - } +#else + // With distributed FFTs (i.e. with heFFTe): + amrex::Real x_hi = dx*(hi[0]+2); + amrex::Real y_hi = dy*(hi[1]+2); + amrex::Real z_hi = dz*(hi[2]+2); + if ((i0< nx)&&(j0< ny)&&(k0< nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x , y , z , dx, dy, dz); } + if ((i0< nx)&&(j0> ny)&&(k0< nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x , y_hi-y, z , dx, dy, dz); } + if ((i0< nx)&&(j0< ny)&&(k0> nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x , y , z_hi-z, dx, dy, dz); } + if ((i0> nx)&&(j0> ny)&&(k0< nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x_hi-x, y_hi-y, z , dx, dy, dz); } + if ((i0< nx)&&(j0> ny)&&(k0> nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x , y_hi-y, z_hi-z, dx, dy, dz); } + if ((i0> nx)&&(j0< ny)&&(k0> nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x_hi-x, y , z_hi-z, dx, dy, dz); } + if ((i0> nx)&&(j0> ny)&&(k0> nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x_hi-x, y_hi-y, z_hi-z, dx, dy, dz); } + if ((i0> nx)&&(j0< ny)&&(k0< nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x_hi-x, y , z , dx, dy, dz); } +#endif + } + ); } - // Perform forward FFTs - auto forward_plan_rho = ablastr::math::anyfft::FFTplans(spectralspace_ba, dm_global_fft); - auto forward_plan_G = ablastr::math::anyfft::FFTplans(spectralspace_ba, dm_global_fft); - // Loop over boxes perform FFTs - for ( amrex::MFIter mfi(realspace_ba, dm_global_fft); mfi.isValid(); ++mfi ){ - - // Note: the size of the real-space box and spectral-space box - // differ when using real-to-complex FFT. When initializing - // the FFT plan, the valid dimensions are those of the real-space box. - const amrex::IntVect fft_size = realspace_ba[mfi].length(); - - // FFT of rho - forward_plan_rho[mfi] = ablastr::math::anyfft::CreatePlan( - fft_size, tmp_rho[mfi].dataPtr(), - reinterpret_cast(tmp_rho_fft[mfi].dataPtr()), - ablastr::math::anyfft::direction::R2C, AMREX_SPACEDIM); - ablastr::math::anyfft::Execute(forward_plan_rho[mfi]); + // Prepare to perform global FFT + // Since there is 1 MPI rank per box, here each MPI rank obtains its local box and the associated boxid + const int local_boxid = amrex::ParallelDescriptor::MyProc(); // because of how we made the DistributionMapping + if (local_boxid < realspace_ba.size()) { + // When not using heFFTe, there is only one box (the global box) + // It is taken care of my MPI rank 0 ; other ranks have no work (hence the if condition) - // FFT of G - forward_plan_G[mfi] = ablastr::math::anyfft::CreatePlan( - fft_size, tmp_G[mfi].dataPtr(), - reinterpret_cast(tmp_G_fft[mfi].dataPtr()), - ablastr::math::anyfft::direction::R2C, AMREX_SPACEDIM); - ablastr::math::anyfft::Execute(forward_plan_G[mfi]); + const amrex::Box local_nodal_box = realspace_ba[local_boxid]; + amrex::Box local_box(local_nodal_box.smallEnd(), local_nodal_box.bigEnd()); + local_box.shift(-realspace_box.smallEnd()); // This simplifies the setup because the global lo is zero now + // Since we the domain decompostion is in the z-direction, setting up c_local_box is simple. + amrex::Box c_local_box = local_box; + c_local_box.setBig(0, local_box.length(0)/2+1); - } + // Allocate array in spectral space + using SpectralField = amrex::BaseFab< amrex::GpuComplex< amrex::Real > > ; + SpectralField tmp_rho_fft(c_local_box, 1, amrex::The_Device_Arena()); + SpectralField tmp_G_fft(c_local_box, 1, amrex::The_Device_Arena()); + tmp_rho_fft.shift(realspace_box.smallEnd()); + tmp_G_fft.shift(realspace_box.smallEnd()); - // Multiply tmp_G_fft and tmp_rho_fft in spectral space - // Store the result in-place in Gtmp_G_fft, to save memory - amrex::Multiply( tmp_G_fft, tmp_rho_fft, 0, 0, 1, 0); + // Create FFT plans + BL_PROFILE_VAR_START(timer_plans); +#if !defined(ABLASTR_USE_HEFFTE) + const amrex::IntVect fft_size = realspace_ba[local_boxid].length(); + ablastr::math::anyfft::FFTplan forward_plan_rho = ablastr::math::anyfft::CreatePlan( + fft_size, tmp_rho[local_boxid].dataPtr(), + reinterpret_cast(tmp_rho_fft.dataPtr()), + ablastr::math::anyfft::direction::R2C, AMREX_SPACEDIM); + ablastr::math::anyfft::FFTplan forward_plan_G = ablastr::math::anyfft::CreatePlan( + fft_size, tmp_G[local_boxid].dataPtr(), + reinterpret_cast(tmp_G_fft.dataPtr()), + ablastr::math::anyfft::direction::R2C, AMREX_SPACEDIM); + ablastr::math::anyfft::FFTplan backward_plan = ablastr::math::anyfft::CreatePlan( + fft_size, tmp_G[local_boxid].dataPtr(), + reinterpret_cast( tmp_G_fft.dataPtr()), + ablastr::math::anyfft::direction::C2R, AMREX_SPACEDIM); +#elif defined(ABLASTR_USE_HEFFTE) +#if defined(AMREX_USE_CUDA) + heffte::fft3d_r2c fft +#elif defined(AMREX_USE_HIP) + heffte::fft3d_r2c fft +#else + heffte::fft3d_r2c fft +#endif + ({{local_box.smallEnd(0), local_box.smallEnd(1), local_box.smallEnd(2)}, + {local_box.bigEnd(0), local_box.bigEnd(1), local_box.bigEnd(2)}}, + {{c_local_box.smallEnd(0), c_local_box.smallEnd(1), c_local_box.smallEnd(2)}, + {c_local_box.bigEnd(0), c_local_box.bigEnd(1), c_local_box.bigEnd(2)}}, + 0, amrex::ParallelDescriptor::Communicator()); + using heffte_complex = typename heffte::fft_output::type; + heffte_complex* rho_fft_data = (heffte_complex*) tmp_rho_fft.dataPtr(); + heffte_complex* G_fft_data = (heffte_complex*) tmp_G_fft.dataPtr(); +#endif + BL_PROFILE_VAR_STOP(timer_plans); - // Perform inverse FFT - auto backward_plan = ablastr::math::anyfft::FFTplans(spectralspace_ba, dm_global_fft); - // Loop over boxes perform FFTs - for ( amrex::MFIter mfi(spectralspace_ba, dm_global_fft); mfi.isValid(); ++mfi ){ + // Perform forward FFTs + BL_PROFILE_VAR_START(timer_ffts); +#if !defined(ABLASTR_USE_HEFFTE) + ablastr::math::anyfft::Execute(forward_plan_rho); + ablastr::math::anyfft::Execute(forward_plan_G); +#elif defined(ABLASTR_USE_HEFFTE) + fft.forward(tmp_rho[local_boxid].dataPtr(), rho_fft_data); + fft.forward(tmp_G[local_boxid].dataPtr(), G_fft_data); +#endif + BL_PROFILE_VAR_STOP(timer_ffts); - // Note: the size of the real-space box and spectral-space box - // differ when using real-to-complex FFT. When initializing - // the FFT plan, the valid dimensions are those of the real-space box. - const amrex::IntVect fft_size = realspace_ba[mfi].length(); + // Multiply tmp_G_fft and tmp_rho_fft in spectral space + // Store the result in-place in Gtmp_G_fft, to save memory + tmp_G_fft.template mult(tmp_rho_fft, 0, 0, 1); + amrex::Gpu::streamSynchronize(); - // Inverse FFT: is done in-place, in the array of G - backward_plan[mfi] = ablastr::math::anyfft::CreatePlan( - fft_size, tmp_G[mfi].dataPtr(), - reinterpret_cast( tmp_G_fft[mfi].dataPtr()), - ablastr::math::anyfft::direction::C2R, AMREX_SPACEDIM); - ablastr::math::anyfft::Execute(backward_plan[mfi]); + // Perform backward FFT + BL_PROFILE_VAR_START(timer_ffts); +#if !defined(ABLASTR_USE_HEFFTE) + ablastr::math::anyfft::Execute(backward_plan); +#elif defined(ABLASTR_USE_HEFFTE) + fft.backward(G_fft_data, tmp_G[local_boxid].dataPtr()); +#endif + BL_PROFILE_VAR_STOP(timer_ffts); + +#if !defined(ABLASTR_USE_HEFFTE) + // Loop to destroy FFT plans + ablastr::math::anyfft::DestroyPlan(forward_plan_G); + ablastr::math::anyfft::DestroyPlan(forward_plan_rho); + ablastr::math::anyfft::DestroyPlan(backward_plan); +#endif } - // Normalize, since (FFT + inverse FFT) results in a factor N + + // Normalize, since (FFT + inverse FFT) results in a factor N const amrex::Real normalization = 1._rt / realspace_box.numPts(); tmp_G.mult( normalization ); + BL_PROFILE_VAR_START(timer_pcopies); // Copy from tmp_G to phi - phi.ParallelCopy( tmp_G, 0, 0, 1, amrex::IntVect::TheZeroVector(), phi.nGrowVect() ); - - // Loop to destroy FFT plans - for ( amrex::MFIter mfi(spectralspace_ba, dm_global_fft); mfi.isValid(); ++mfi ){ - ablastr::math::anyfft::DestroyPlan(forward_plan_G[mfi]); - ablastr::math::anyfft::DestroyPlan(forward_plan_rho[mfi]); - ablastr::math::anyfft::DestroyPlan(backward_plan[mfi]); - } + phi.ParallelCopy( tmp_G, 0, 0, 1, amrex::IntVect::TheZeroVector(), phi.nGrowVect()); + BL_PROFILE_VAR_STOP(timer_pcopies); } } // namespace ablastr::fields diff --git a/Source/ablastr/fields/Make.package b/Source/ablastr/fields/Make.package index 01392991559..727a17b6de8 100644 --- a/Source/ablastr/fields/Make.package +++ b/Source/ablastr/fields/Make.package @@ -1,4 +1,5 @@ ifeq ($(USE_FFT),TRUE) + CEXE_sources += MultiFabRegister.cpp ifeq ($(DIM),3) CEXE_sources += IntegratedGreenFunctionSolver.cpp endif diff --git a/Source/ablastr/fields/MultiFabRegister.H b/Source/ablastr/fields/MultiFabRegister.H new file mode 100644 index 00000000000..21df20c1678 --- /dev/null +++ b/Source/ablastr/fields/MultiFabRegister.H @@ -0,0 +1,826 @@ +/* Copyright 2024 The ABLASTR Community + * + * This file is part of ABLASTR. + * + * License: BSD-3-Clause-LBNL + * Authors: Axel Huebl + */ +#ifndef ABLASTR_FIELDS_MF_REGISTER_H +#define ABLASTR_FIELDS_MF_REGISTER_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + + +namespace +{ + // type trait helpers in lieu of an amrex::is_amrex_enum + template > + struct is_castable_to_string : std::false_type {}; + + template + struct is_castable_to_string(std::declval()))>> : std::true_type {}; + + /** helper to either cast a string/char array to string to query an AMREX_ENUM */ + template + std::string getExtractedName (T name) + { + if constexpr(is_castable_to_string()) + { + // already a unique string key + return std::string(name); + } else + { + // user-defined AMREX_ENUM or compile error + return amrex::getEnumNameString(name); + } + } +} + +namespace ablastr::fields +{ + /** Components (base vector directions) of vector/tensor fields. + * + * Because of different staggering, the components of vector/tensor fields are stored + * in separate (i)MultiFab. + * + * @todo: synchronize with AMReX "enum class Direction" + */ + struct Direction + { + int dir = 0; + + bool operator<(const Direction& other) const + { + return other.dir < this->dir; + } + + /* TODO: just temporary int compatibility */ + operator int() const { return dir; } + }; + + /** A scalar field (a MultiFab) + * + * Note: might still have components, e.g., for copies at different times. + */ + using ScalarField = amrex::MultiFab*; + + /** A read-only scalar field (a MultiFab) + * + * Note: might still have components, e.g., for copies at different times. + */ + using ConstScalarField = amrex::MultiFab const *; + + /** A vector field of three MultiFab + */ + //using VectorField = ablastr::utils::ConstMap; + using VectorField = std::array; + + /** A read-only vector field of three MultiFab + */ + //using VectorField = ablastr::utils::ConstMap; + using ConstVectorField = std::array; + + /** A multi-level scalar field + */ + using MultiLevelScalarField = amrex::Vector; + + /** A read-only multi-level scalar field + */ + using ConstMultiLevelScalarField = amrex::Vector; + + /** A multi-level vector field + */ + using MultiLevelVectorField = amrex::Vector; + + /** A read-only multi-level vector field + */ + using ConstMultiLevelVectorField = amrex::Vector; + + /** A class to control the lifetime and properties of a MultiFab (field). + * + * This class is used to own the lifetime of an amrex::MultiFab and to store + * associated information around it regarding unique naming, scalar/vector/tensor + * properties, aliasing, load balancing, etc. + */ + struct MultiFabOwner + { + // TODO: also add iMultiFab via std::variant + + /** owned (i)MultiFab */ + amrex::MultiFab m_mf; + + /** Components (base vector directions) of this MultiFab */ + std::optional m_dir = std::nullopt; + + /** the MR level of this (i)MultiFab */ + int m_level = 0; + + /** remake distribution map on load balance, @see amrex::AmrCore::RemakeLevel */ + bool m_remake = true; + + /** redistribute on @see amrex::AmrCore::RemakeLevel */ + bool m_redistribute_on_remake = true; + + /** if m_mf is a non-owning alias, this string tracks the name of the owner */ + std::string m_owner; + + /** Is this part of a vector/tensor? */ + AMREX_INLINE + bool + is_vector () const { return m_dir.has_value(); } + + /** Is this an alias MultiFab? + * + * If yes, that means we do not own the memory. + */ + AMREX_INLINE + bool + is_alias () const { return !m_owner.empty(); } + }; + + /** This is a register of fields aka amrex::MultiFabs. + * + * This is owned by a simulation instance. All used fields should be registered here. + * Internally, this contains @see MultiFabOwner values. + */ + struct MultiFabRegister + { + // Avoid accidental copies when passing to member functions + MultiFabRegister() = default; + MultiFabRegister(MultiFabRegister const &) = delete; + MultiFabRegister(MultiFabRegister&&) = delete; + MultiFabRegister& operator=(MultiFabRegister const &) = delete; + MultiFabRegister& operator=(MultiFabRegister&&) = delete; + ~MultiFabRegister() = default; + + /** Allocate and optionally initialize a MultiFab (field) + * + * This registers a new MultiFab under a unique name, allocates it and + * optionally assigns it an initial value. + * + * @param name a unique name for this field + * @param level the MR level to represent + * @param ba the list of boxes to cover the field + * @param dm the distribution mapping for load balancing with MPI + * @param ncomp the number of components of the field (all with the same staggering) + * @param ngrow the number of guard (ghost, halo) cells + * @param initial_value the optional initial value + * @param remake follow the default domain decomposition of the simulation + * @param redistribute_on_remake redistribute on @see amrex::AmrCore::RemakeLevel + * @return pointer to newly allocated MultiFab + */ + template + amrex::MultiFab* + alloc_init ( + T name, + int level, + amrex::BoxArray const & ba, + amrex::DistributionMapping const & dm, + int ncomp, + amrex::IntVect const & ngrow, + std::optional initial_value = std::nullopt, + bool remake = true, + bool redistribute_on_remake = true + ) + { + return internal_alloc_init( + getExtractedName(name), + level, + ba, + dm, + ncomp, + ngrow, + initial_value, + remake, + redistribute_on_remake + ); + } + + /** Allocate and optionally initialize a MultiFab (field) + * + * This registers a new MultiFab under a unique name, allocates it and + * optionally assigns it an initial value. + * + * @param name a unique name for this field + * @param dir the field component for vector fields ("direction" of the unit vector) + * @param level the MR level to represent + * @param ba the list of boxes to cover the field + * @param dm the distribution mapping for load balancing with MPI + * @param ncomp the number of components of the field (all with the same staggering) + * @param ngrow the number of guard (ghost, halo) cells + * @param initial_value the optional initial value + * @param remake follow the default domain decomposition of the simulation + * @param redistribute_on_remake redistribute on @see amrex::AmrCore::RemakeLevel + * @return pointer to newly allocated MultiFab + */ + template + amrex::MultiFab* + alloc_init ( + T name, + Direction dir, + int level, + amrex::BoxArray const & ba, + amrex::DistributionMapping const & dm, + int ncomp, + amrex::IntVect const & ngrow, + std::optional initial_value = std::nullopt, + bool remake = true, + bool redistribute_on_remake = true + ) + { + return internal_alloc_init( + getExtractedName(name), + dir, + level, + ba, + dm, + ncomp, + ngrow, + initial_value, + remake, + redistribute_on_remake + ); + } + + /** Create an alias of a MultiFab (field) + * + * Registers a new name for an existing MultiFab (field) and optionally assigning + * a value. + * + * @param new_name new name + * @param alias_name owner name to alias + * @param level the MR level to represent + * @param initial_value the optional value to assign + * @return the newly aliased MultiFab + */ + template + amrex::MultiFab* + alias_init ( + N new_name, + A alias_name, + int level, + std::optional initial_value = std::nullopt + ) + { + return internal_alias_init( + getExtractedName(new_name), + getExtractedName(alias_name), + level, + initial_value + ); + } + + /** Create an alias of a MultiFab (field) + * + * Registers a new name for an existing MultiFab (field) and optionally assigning + * a value. + * + * @param new_name new name + * @param alias_name owner name to alias + * @param dir the field component for vector fields ("direction" of the unit vector) both in the alias and aliased + * @param level the MR level to represent + * @param initial_value the optional value to assign + * @return the newly aliased MultiFab + */ + template + amrex::MultiFab* + alias_init ( + N new_name, + A alias_name, + Direction dir, + int level, + std::optional initial_value = std::nullopt + ) + { + return internal_alias_init( + getExtractedName(new_name), + getExtractedName(alias_name), + dir, + level, + initial_value + ); + } + + /** Check if a scalar MultiFab (field) is registered. + * + * @param name the name to check if registered + * @param level the MR level to check + * @return true if contained, otherwise false + */ + template + [[nodiscard]] bool + has ( + T name, + int level + ) const + { + return internal_has( + getExtractedName(name), + level + ); + } + + /** Check if a MultiFab that is part of a vector/tensor field is registered. + * + * @param name the name to check if registered + * @param dir the field component for vector fields ("direction" of the unit vector) + * @param level the MR level to check + * @return true if contained, otherwise false + */ + template + [[nodiscard]] bool + has ( + T name, + Direction dir, + int level + ) const + { + return internal_has( + getExtractedName(name), + dir, + level + ); + } + + /** Check if a MultiFab vector field is registered. + * + * @param name the name to check if registered + * @param level the MR level to check + * @return true if contained, otherwise false + */ + template + [[nodiscard]] bool + has_vector ( + T name, + int level + ) const + { + return internal_has_vector( + getExtractedName(name), + level + ); + } + + /** Return a scalar MultiFab (field). + * + * This throws a runtime error if the requested field is not present. + * + * @param name the name of the field + * @param level the MR level + * @return a non-owning pointer to the MultiFab (field) + */ + template + [[nodiscard]] amrex::MultiFab* + get ( + T name, + int level + ) + { + return internal_get( + getExtractedName(name), + level + ); + } + + /** Return a MultiFab that is part of a vector/tensor field. + * + * This throws a runtime error if the requested field is not present. + * + * @param name the name of the field + * @param dir the field component for vector fields ("direction" of the unit vector) + * @param level the MR level + * @return a non-owning pointer to the MultiFab (field) + */ + template + [[nodiscard]] amrex::MultiFab* + get ( + T name, + Direction dir, + int level + ) + { + return internal_get( + getExtractedName(name), + dir, + level + ); + } + + /** Return a scalar MultiFab (field). + * + * This throws a runtime error if the requested field is not present. + * + * @param name the name of the field + * @param level the MR level + * @return a non-owning pointer to the MultiFab (field) + */ + template + [[nodiscard]] amrex::MultiFab const * + get ( + T name, + int level + ) const + { + return internal_get( + getExtractedName(name), + level + ); + } + + /** Return a MultiFab that is part of a vector/tensor field. + * + * This throws a runtime error if the requested field is not present. + * + * @param name the name of the field + * @param dir the field component for vector fields ("direction" of the unit vector) + * @param level the MR level + * @return a non-owning pointer to the MultiFab (field) + */ + template + [[nodiscard]] amrex::MultiFab const * + get ( + T name, + Direction dir, + int level + ) const + { + return internal_get( + getExtractedName(name), + dir, + level + ); + } + + /** Return the MultiFab of a scalar field on all MR levels. + * + * This throws a runtime error if the requested field is not present. + * + * @param name the name of the field + * @param finest_level the highest MR level to return + * @return non-owning pointers to the MultiFab (field) on all levels + */ + //@{ + template + [[nodiscard]] MultiLevelScalarField + get_mr_levels ( + T name, + int finest_level + ) + { + return internal_get_mr_levels( + getExtractedName(name), + finest_level + ); + } + template + [[nodiscard]] ConstMultiLevelScalarField + get_mr_levels ( + T name, + int finest_level + ) const + { + return internal_get_mr_levels( + getExtractedName(name), + finest_level + ); + } + //@} + + /** title + * + * Same as get above, but returns all levels for a name. + * + * @param name the name of the field + * @param level the MR level + * @return non-owning pointers to all components of a vector field + */ + //@{ + template + [[nodiscard]] VectorField + get_alldirs ( + T name, + int level + ) + { + return internal_get_alldirs( + getExtractedName(name), + level + ); + } + template + [[nodiscard]] ConstVectorField + get_alldirs ( + T name, + int level + ) const + { + return internal_get_alldirs( + getExtractedName(name), + level + ); + } + //@} + + /** Return a vector field on all MR levels. + * + * Out loop: MR levels. + * Inner loop: directions (components). + * + * @param name the name of the field + * @param finest_level the highest MR level to return + * @return non-owning pointers to all components of a vector field on all MR levels + */ + //@{ + template + [[nodiscard]] MultiLevelVectorField + get_mr_levels_alldirs ( + T name, + int finest_level + ) + { + return internal_get_mr_levels_alldirs( + getExtractedName(name), + finest_level + ); + } + template + [[nodiscard]] ConstMultiLevelVectorField + get_mr_levels_alldirs ( + T name, + int finest_level + ) const + { + return internal_get_mr_levels_alldirs( + getExtractedName(name), + finest_level + ); + } + //@} + + /** List the internal names of all registered fields. + * + * @return all currently allocated and registered fields + */ + [[nodiscard]] std::vector + list () const; + + /** Deallocate and remove a scalar field. + * + * @param name the name of the field + * @param level the MR level + */ + template + void + erase ( + T name, + int level + ) + { + internal_erase(getExtractedName(name), level); + } + + /** Deallocate and remove a vector field component. + * + * @param name the name of the field + * @param dir the field component for vector fields ("direction" of the unit vector) + * @param level the MR level + */ + template + void + erase ( + T name, + Direction dir, + int level + ) + { + internal_erase(getExtractedName(name), dir, level); + } + + /** Erase all MultiFabs on a specific MR level. + * + * Calls @see erase for all MultiFabs on a specific level. + * + * @param level the MR level to erase all MultiFabs from + */ + void + clear_level ( + int level + ); + + /** Remake all (i)MultiFab with a new distribution mapping. + * + * If redistribute is true, we also copy from the old data into the new. + * + * @param level the MR level to erase all MultiFabs from + * @param new_dm new distribution mapping + */ + void + remake_level ( + int other_level, + amrex::DistributionMapping const & new_dm + ); + + /** Create the register name of scalar field and MR level + * + * @param name the name of the field + * @param level the MR level + * @return internal name of the field in the register + */ + [[nodiscard]] std::string + mf_name ( + std::string name, + int level + ) const; + + /** Create the register name of vector field, component direction and MR level + * + * @param name the name of the field + * @param dir the field component for vector fields ("direction" of the unit vector) + * @param level the MR level + * @return internal name of the field in the register + */ + [[nodiscard]] std::string + mf_name ( + std::string name, + Direction dir, + int level + ) const; + + /** Temporary test function for legacy Python bindings */ + [[nodiscard]] bool + internal_has ( + std::string const & internal_name + ); + [[nodiscard]] amrex::MultiFab * + internal_get ( + std::string const & internal_name + ); + + private: + + [[nodiscard]] amrex::MultiFab const * + internal_get ( + std::string const & internal_name + ) const; + + amrex::MultiFab* + internal_alloc_init ( + std::string const & name, + int level, + amrex::BoxArray const & ba, + amrex::DistributionMapping const & dm, + int ncomp, + amrex::IntVect const & ngrow, + std::optional initial_value = std::nullopt, + bool remake = true, + bool redistribute_on_remake = true + ); + amrex::MultiFab* + internal_alloc_init ( + std::string const & name, + Direction dir, + int level, + amrex::BoxArray const & ba, + amrex::DistributionMapping const & dm, + int ncomp, + amrex::IntVect const & ngrow, + std::optional initial_value = std::nullopt, + bool remake = true, + bool redistribute_on_remake = true + ); + + amrex::MultiFab* + internal_alias_init ( + std::string const & new_name, + std::string const & alias_name, + int level, + std::optional initial_value = std::nullopt + ); + amrex::MultiFab* + internal_alias_init ( + std::string const & new_name, + std::string const & alias_name, + Direction dir, + int level, + std::optional initial_value = std::nullopt + ); + + [[nodiscard]] bool + internal_has ( + std::string const & name, + int level + ) const; + [[nodiscard]] bool + internal_has ( + std::string const & name, + Direction dir, + int level + ) const; + [[nodiscard]] bool + internal_has_vector ( + std::string const & name, + int level + ) const; + + [[nodiscard]] amrex::MultiFab * + internal_get ( + std::string const & name, + int level + ); + [[nodiscard]] amrex::MultiFab const * + internal_get ( + std::string const & name, + int level + ) const; + [[nodiscard]] amrex::MultiFab * + internal_get ( + std::string const & name, + Direction dir, + int level + ); + [[nodiscard]] amrex::MultiFab const * + internal_get ( + std::string const & name, + Direction dir, + int level + ) const; + [[nodiscard]] MultiLevelScalarField + internal_get_mr_levels ( + std::string const & name, + int finest_level + ); + [[nodiscard]] ConstMultiLevelScalarField + internal_get_mr_levels ( + std::string const & name, + int finest_level + ) const; + [[nodiscard]] VectorField + internal_get_alldirs ( + std::string const & name, + int level + ); + [[nodiscard]] ConstVectorField + internal_get_alldirs ( + std::string const & name, + int level + ) const; + [[nodiscard]] MultiLevelVectorField + internal_get_mr_levels_alldirs ( + std::string const & name, + int finest_level + ); + [[nodiscard]] ConstMultiLevelVectorField + internal_get_mr_levels_alldirs ( + std::string const & name, + int finest_level + ) const; + + void + internal_erase ( + std::string const & name, + int level + ); + void + internal_erase ( + std::string const & name, + Direction dir, + int level + ); + + /** data storage: ownership and lifetime control */ + std::map< + std::string, + MultiFabOwner + > m_mf_register; + + /** the three directions of a vector field */ + std::vector m_all_dirs = {Direction{0}, Direction{1}, Direction{2}}; + }; + + /** Little temporary helper function to pass temporary MultiFabs as VectorField. + * + * @return pointers to externally managed vector field components (3 MultiFab) + */ + VectorField + a2m ( + std::array< std::unique_ptr, 3 > const & old_vectorfield + ); + +} // namespace ablastr::fields + +#endif // ABLASTR_FIELDS_MF_REGISTER_H diff --git a/Source/ablastr/fields/MultiFabRegister.cpp b/Source/ablastr/fields/MultiFabRegister.cpp new file mode 100644 index 00000000000..2c384a90089 --- /dev/null +++ b/Source/ablastr/fields/MultiFabRegister.cpp @@ -0,0 +1,634 @@ +/* Copyright 2024 The ABLASTR Community + * + * This file is part of ABLASTR. + * + * License: BSD-3-Clause-LBNL + * Authors: Axel Huebl + */ +#include "MultiFabRegister.H" + +#include + +#include +#include +#include +#include +#include +#include +#include + + +namespace ablastr::fields +{ + amrex::MultiFab* + MultiFabRegister::internal_alloc_init ( + std::string const & name, + int level, + amrex::BoxArray const & ba, + amrex::DistributionMapping const & dm, + int ncomp, + amrex::IntVect const & ngrow, + std::optional initial_value, + bool remake, + bool redistribute_on_remake + ) + { + // checks + if (has(name, level)) { + throw std::runtime_error("MultiFabRegister::alloc_init failed because " + name + " already exists."); + } + + // fully qualified name + std::string const internal_name = mf_name(name, level); + + // allocate + const auto tag = amrex::MFInfo().SetTag(internal_name); + auto [it, success] = m_mf_register.emplace( + internal_name, + MultiFabOwner{ + {ba, dm, ncomp, ngrow, tag}, + std::nullopt, // scalar: no direction + level, + remake, + redistribute_on_remake, + "" // we own the memory + } + ); + if (!success) { + throw std::runtime_error("MultiFabRegister::alloc_init failed for " + internal_name); + } + + // a shorthand alias for the code below + amrex::MultiFab & mf = it->second.m_mf; + + // initialize with value + if (initial_value) { + mf.setVal(*initial_value); + } + + return &mf; + } + + amrex::MultiFab* + MultiFabRegister::internal_alloc_init ( + std::string const & name, + Direction dir, + int level, + amrex::BoxArray const & ba, + amrex::DistributionMapping const & dm, + int ncomp, + amrex::IntVect const & ngrow, + std::optional initial_value, + bool remake, + bool redistribute_on_remake + ) + { + // checks + if (has(name, dir, level)) { + throw std::runtime_error( + "MultiFabRegister::alloc_init failed because " + + mf_name(name, dir, level) + + " already exists." + ); + } + + // fully qualified name + std::string const internal_name = mf_name(name, dir, level); + + // allocate + const auto tag = amrex::MFInfo().SetTag(internal_name); + auto [it, success] = m_mf_register.emplace( + internal_name, + MultiFabOwner{ + {ba, dm, ncomp, ngrow, tag}, + dir, + level, + remake, + redistribute_on_remake, + "" // we own the memory + } + ); + if (!success) { + throw std::runtime_error("MultiFabRegister::alloc_init failed for " + internal_name); + } + + // a shorthand alias for the code below + amrex::MultiFab & mf = it->second.m_mf; + + // initialize with value + if (initial_value) { + mf.setVal(*initial_value); + } + + return &mf; + } + + amrex::MultiFab* + MultiFabRegister::internal_alias_init ( + std::string const & new_name, + std::string const & alias_name, + int level, + std::optional initial_value + ) + { + // checks + if (has(new_name, level)) { + throw std::runtime_error( + "MultiFabRegister::alias_init failed because " + + mf_name(new_name, level) + + " already exists." + ); + } + if (!has(alias_name, level)) { + throw std::runtime_error( + "MultiFabRegister::alias_init failed because " + + mf_name(alias_name, level) + + " does not exist." + ); + } + + // fully qualified name + std::string const internal_new_name = mf_name(new_name, level); + std::string const internal_alias_name = mf_name(alias_name, level); + + MultiFabOwner & alias = m_mf_register[internal_alias_name]; + amrex::MultiFab & mf_alias = alias.m_mf; + + // allocate + auto [it, success] = m_mf_register.emplace( + internal_new_name, + MultiFabOwner{ + {mf_alias, amrex::make_alias, 0, mf_alias.nComp()}, + std::nullopt, // scalar: no direction + level, + alias.m_remake, + alias.m_redistribute_on_remake, + internal_alias_name + } + + ); + if (!success) { + throw std::runtime_error("MultiFabRegister::alias_init failed for " + internal_new_name); + } + + // a shorthand alias for the code below + amrex::MultiFab & mf = it->second.m_mf; + + // initialize with value + if (initial_value) { + mf.setVal(*initial_value); + } + + return &mf; + } + + amrex::MultiFab* + MultiFabRegister::internal_alias_init ( + std::string const & new_name, + std::string const & alias_name, + Direction dir, + int level, + std::optional initial_value + ) + { + // checks + if (has(new_name, dir, level)) { + throw std::runtime_error( + "MultiFabRegister::alias_init failed because " + + mf_name(new_name, dir, level) + + " already exists." + ); + } + if (!has(alias_name, dir, level)) { + throw std::runtime_error( + "MultiFabRegister::alias_init failed because " + + mf_name(alias_name, dir, level) + + " does not exist." + ); + } + + // fully qualified name + std::string const internal_new_name = mf_name(new_name, dir, level); + std::string const internal_alias_name = mf_name(alias_name, dir, level); + + MultiFabOwner & alias = m_mf_register[internal_alias_name]; + amrex::MultiFab & mf_alias = alias.m_mf; + + // allocate + auto [it, success] = m_mf_register.emplace( + internal_new_name, + MultiFabOwner{ + {mf_alias, amrex::make_alias, 0, mf_alias.nComp()}, + dir, + level, + alias.m_remake, + alias.m_redistribute_on_remake, + internal_alias_name + } + ); + if (!success) { + throw std::runtime_error("MultiFabRegister::alias_init failed for " + internal_new_name); + } + + // a short-hand alias for the code below + amrex::MultiFab & mf = it->second.m_mf; + + // initialize with value + if (initial_value) { + mf.setVal(*initial_value); + } + + return &mf; + } + + void + MultiFabRegister::remake_level ( + int level, + amrex::DistributionMapping const & new_dm + ) + { + // Owning MultiFabs + for (auto & element : m_mf_register ) + { + MultiFabOwner & mf_owner = element.second; + + // keep distribution map as it is? + if (!mf_owner.m_remake) { + continue; + } + + // remake MultiFab with new distribution map + if (mf_owner.m_level == level && !mf_owner.is_alias()) { + const amrex::MultiFab & mf = mf_owner.m_mf; + amrex::IntVect const & ng = mf.nGrowVect(); + const auto tag = amrex::MFInfo().SetTag(mf.tags()[0]); + amrex::MultiFab new_mf(mf.boxArray(), new_dm, mf.nComp(), ng, tag); + + // copy data to new MultiFab: Only done for persistent data like E and B field, not for + // temporary things like currents, etc. + if (mf_owner.m_redistribute_on_remake) { + new_mf.Redistribute(mf, 0, 0, mf.nComp(), ng); + } + + // replace old MultiFab with new one, deallocate old one + mf_owner.m_mf = std::move(new_mf); + } + } + + // Aliases + for (auto & element : m_mf_register ) + { + MultiFabOwner & mf_owner = element.second; + + // keep distribution map as it is? + if (!mf_owner.m_remake) { + continue; + } + + if (mf_owner.m_level == level && mf_owner.is_alias()) { + const amrex::MultiFab & mf = m_mf_register[mf_owner.m_owner].m_mf; + amrex::MultiFab new_mf(mf, amrex::make_alias, 0, mf.nComp()); + + // no copy via Redistribute: the owner was already redistributed + + // replace old MultiFab with new one, deallocate old one + mf_owner.m_mf = std::move(new_mf); + } + } + } + + bool + MultiFabRegister::internal_has ( + std::string const & name, + int level + ) const + { + std::string const internal_name = mf_name(name, level); + + return m_mf_register.count(internal_name) > 0; + } + + bool + MultiFabRegister::internal_has ( + std::string const & name, + Direction dir, + int level + ) const + { + std::string const internal_name = mf_name(name, dir, level); + + return m_mf_register.count(internal_name) > 0; + } + + bool + MultiFabRegister::internal_has_vector ( + std::string const & name, + int level + ) const + { + unsigned long count = 0; + for (Direction const & dir : m_all_dirs) + { + std::string const internal_name = mf_name(name, dir, level); + count += m_mf_register.count(internal_name); + } + + return count == 3; + } + + bool + MultiFabRegister::internal_has ( + std::string const & internal_name + ) + { + return m_mf_register.count(internal_name) > 0; + } + + amrex::MultiFab* + MultiFabRegister::internal_get ( + std::string const & internal_name + ) + { + if (m_mf_register.count(internal_name) == 0) { + // FIXME: temporary, throw a std::runtime_error + // throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + key); + return nullptr; + } + amrex::MultiFab & mf = m_mf_register.at(internal_name).m_mf; + + return &mf; + } + + amrex::MultiFab const * + MultiFabRegister::internal_get ( + std::string const & internal_name + ) const + { + if (m_mf_register.count(internal_name) == 0) { + // FIXME: temporary, throw a std::runtime_error + // throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + internal_name); + return nullptr; + } + amrex::MultiFab const & mf = m_mf_register.at(internal_name).m_mf; + + return &mf; + } + + amrex::MultiFab* + MultiFabRegister::internal_get ( + std::string const & name, + int level + ) + { + std::string const internal_name = mf_name(name, level); + return internal_get(internal_name); + } + + amrex::MultiFab* + MultiFabRegister::internal_get ( + std::string const & name, + Direction dir, + int level + ) + { + std::string const internal_name = mf_name(name, dir, level); + return internal_get(internal_name); + } + + amrex::MultiFab const * + MultiFabRegister::internal_get ( + std::string const & name, + int level + ) const + { + std::string const internal_name = mf_name(name, level); + return internal_get(internal_name); + } + + amrex::MultiFab const * + MultiFabRegister::internal_get ( + std::string const & name, + Direction dir, + int level + ) const + { + std::string const internal_name = mf_name(name, dir, level); + return internal_get(internal_name); + } + + MultiLevelScalarField + MultiFabRegister::internal_get_mr_levels ( + std::string const & name, + int finest_level + ) + { + MultiLevelScalarField field_on_level; + field_on_level.reserve(finest_level+1); + for (int lvl = 0; lvl <= finest_level; lvl++) + { + field_on_level.push_back(internal_get(name, lvl)); + } + return field_on_level; + } + + ConstMultiLevelScalarField + MultiFabRegister::internal_get_mr_levels ( + std::string const & name, + int finest_level + ) const + { + ConstMultiLevelScalarField field_on_level; + field_on_level.reserve(finest_level+1); + for (int lvl = 0; lvl <= finest_level; lvl++) + { + field_on_level.push_back(internal_get(name, lvl)); + } + return field_on_level; + } + + VectorField + MultiFabRegister::internal_get_alldirs ( + std::string const & name, + int level + ) + { + // insert a new level + VectorField vectorField; + + // insert components + for (Direction const & dir : m_all_dirs) + { + vectorField[dir] = internal_get(name, dir, level); + } + return vectorField; + } + + ConstVectorField + MultiFabRegister::internal_get_alldirs ( + std::string const & name, + int level + ) const + { + // insert a new level + ConstVectorField vectorField; + + // insert components + for (Direction const & dir : m_all_dirs) + { + vectorField[dir] = internal_get(name, dir, level); + } + return vectorField; + } + + MultiLevelVectorField + MultiFabRegister::internal_get_mr_levels_alldirs ( + std::string const & name, + int finest_level + ) + { + MultiLevelVectorField field_on_level; + field_on_level.reserve(finest_level+1); + + for (int lvl = 0; lvl <= finest_level; lvl++) + { + // insert a new level + field_on_level.push_back(VectorField{}); + + // insert components + for (Direction const & dir : m_all_dirs) + { + field_on_level[lvl][dir] = internal_get(name, dir, lvl); + } + } + return field_on_level; + } + + ConstMultiLevelVectorField + MultiFabRegister::internal_get_mr_levels_alldirs ( + std::string const & name, + int finest_level + ) const + { + ConstMultiLevelVectorField field_on_level; + field_on_level.reserve(finest_level+1); + + for (int lvl = 0; lvl <= finest_level; lvl++) + { + // insert a new level + field_on_level.push_back(ConstVectorField{}); + + // insert components + for (Direction const & dir : m_all_dirs) + { + field_on_level[lvl][dir] = internal_get(name, dir, lvl); + } + } + return field_on_level; + } + + std::vector + MultiFabRegister::list () const + { + std::vector names; + names.reserve(m_mf_register.size()); + for (auto const & str : m_mf_register) { names.push_back(str.first); } + + return names; + } + + void + MultiFabRegister::internal_erase ( + std::string const & name, + int level + ) + { + std::string const internal_name = mf_name(name, level); + + if (m_mf_register.count(internal_name) != 1) { + throw std::runtime_error("MultiFabRegister::erase name does not exist in register: " + internal_name); + } + m_mf_register.erase(internal_name); + } + + void + MultiFabRegister::internal_erase ( + std::string const & name, + Direction dir, + int level + ) + { + std::string const internal_name = mf_name(name, dir, level); + + if (m_mf_register.count(internal_name) != 1) { + throw std::runtime_error("MultiFabRegister::erase name does not exist in register: " + internal_name); + } + m_mf_register.erase(internal_name); + } + + void + MultiFabRegister::clear_level ( + int level + ) + { + // C++20: Replace with std::erase_if + for (auto first = m_mf_register.begin(), last = m_mf_register.end(); first != last;) + { + if (first->second.m_level == level) { + first = m_mf_register.erase(first); + } else { + ++first; + } + } + } + + std::string + MultiFabRegister::mf_name ( + std::string name, + int level + ) const + { + // Add the suffix "[level=level]" + return name.append("[level=") + .append(std::to_string(level)) + .append("]"); + } + + std::string + MultiFabRegister::mf_name ( + std::string name, + Direction dir, + int level + ) const + { + // Add the suffix for the direction [x] or [y] or [z] + // note: since Cartesian is not correct for all our supported geometries, + // in the future we might want to break this to "[dir=0/1/2]". + // This will be a breaking change for (Python) users that rely on that string. + constexpr int x_in_ascii = 120; + std::string const component_name{char(x_in_ascii + dir.dir)}; + return mf_name( + name + .append("[") + .append(component_name) + .append("]"), + level + ); + } + + VectorField + a2m ( + std::array< std::unique_ptr, 3 > const & old_vectorfield + ) + { + std::vector const all_dirs = {Direction{0}, Direction{1}, Direction{2}}; + + VectorField field_on_level; + + // insert components + for (auto const dir : {0, 1, 2}) + { + field_on_level[Direction{dir}] = old_vectorfield[dir].get(); + } + return field_on_level; + } +} // namespace ablastr::fields diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index c36c83bc336..d7eeecead1b 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -14,6 +14,7 @@ #include #include #include +#include #include #if defined(ABLASTR_USE_FFT) && defined(WARPX_DIM_3D) @@ -53,6 +54,7 @@ #include #include +#include namespace ablastr::fields { @@ -66,7 +68,7 @@ namespace ablastr::fields { * \param[out] max_norm_rho The maximum L-infinity norm of `rho` across all levels */ inline amrex::Real getMaxNormRho ( - amrex::Vector const & rho, + ablastr::fields::ConstMultiLevelScalarField const& rho, int finest_level, amrex::Real & absolute_tolerance) { @@ -162,8 +164,8 @@ inline void interpolatePhiBetweenLevels ( * \vec{\nabla}^2 r \phi - (\vec{\beta}\cdot\vec{\nabla})^2 r \phi = -\frac{r \rho}{\epsilon_0} * \f] * - * \tparam T_BoundaryHandler handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler * \tparam T_PostPhiCalculationFunctor a calculation per level directly after phi was calculated + * \tparam T_BoundaryHandler handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler (EB ONLY) * \tparam T_FArrayBoxFactory usually nothing or an amrex::EBFArrayBoxFactory (EB ONLY) * \param[in] rho The charge density a given species * \param[out] phi The potential to be computed by this function @@ -186,30 +188,31 @@ inline void interpolatePhiBetweenLevels ( * \param[in] eb_farray_box_factory a factory for field data, @see amrex::EBFArrayBoxFactory; required for embedded boundaries (default: none) */ template< - typename T_BoundaryHandler, typename T_PostPhiCalculationFunctor = std::nullopt_t, + typename T_BoundaryHandler = std::nullopt_t, typename T_FArrayBoxFactory = void > void -computePhi (amrex::Vector const & rho, - amrex::Vector & phi, - std::array const beta, - amrex::Real relative_tolerance, - amrex::Real absolute_tolerance, - int max_iters, - int verbosity, - amrex::Vector const& geom, - amrex::Vector const& dmap, - amrex::Vector const& grids, - utils::enums::GridType grid_type, - T_BoundaryHandler const boundary_handler, - bool is_solver_igf_on_lev0, - bool eb_enabled = false, - bool do_single_precision_comms = false, - std::optional > rel_ref_ratio = std::nullopt, - [[maybe_unused]] T_PostPhiCalculationFunctor post_phi_calculation = std::nullopt, - [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB - [[maybe_unused]] std::optional > eb_farray_box_factory = std::nullopt // only used for EB +computePhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi, + std::array const beta, + amrex::Real relative_tolerance, + amrex::Real absolute_tolerance, + int max_iters, + int verbosity, + amrex::Vector const& geom, + amrex::Vector const& dmap, + amrex::Vector const& grids, + utils::enums::GridType grid_type, + bool is_solver_igf_on_lev0, + bool eb_enabled = false, + bool do_single_precision_comms = false, + std::optional > rel_ref_ratio = std::nullopt, + [[maybe_unused]] T_PostPhiCalculationFunctor post_phi_calculation = std::nullopt, + [[maybe_unused]] T_BoundaryHandler const boundary_handler = std::nullopt, // only used for EB + [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB + [[maybe_unused]] std::optional > eb_farray_box_factory = std::nullopt // only used for EB ) { using namespace amrex::literals; @@ -250,7 +253,8 @@ computePhi (amrex::Vector const & rho, #endif // determine if rho is zero everywhere - const amrex::Real max_norm_b = getMaxNormRho(rho, finest_level, absolute_tolerance); + const amrex::Real max_norm_b = getMaxNormRho( + amrex::GetVecOfConstPtrs(rho), finest_level, absolute_tolerance); amrex::LPInfo info; @@ -273,7 +277,7 @@ computePhi (amrex::Vector const & rho, #endif // Use the Multigrid (MLMG) solver if selected or on refined patches // but first scale rho appropriately - rho[lev]->mult(-1._rt / ablastr::constant::SI::ep0); // TODO: when do we "un-multiply" this? We need to document this side-effect! + rho[lev]->mult(-1._rt / ablastr::constant::SI::ep0); #ifdef WARPX_DIM_RZ constexpr bool is_rz = true; @@ -308,13 +312,17 @@ computePhi (amrex::Vector const & rho, auto linop_nodelap = std::make_unique(); if (eb_enabled) { #if defined(AMREX_USE_EB) - linop_nodelap->define( - amrex::Vector{geom[lev]}, - amrex::Vector{grids[lev]}, - amrex::Vector{dmap[lev]}, - info, - amrex::Vector{eb_farray_box_factory.value()[lev]} - ); + if constexpr(std::is_same_v) { + throw std::runtime_error("EB requested by eb_farray_box_factory not provided!"); + } else { + linop_nodelap->define( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info, + amrex::Vector{eb_farray_box_factory.value()[lev]} + ); + } #endif } else { @@ -341,12 +349,18 @@ computePhi (amrex::Vector const & rho, #endif #if defined(AMREX_USE_EB) if (eb_enabled) { - // if the EB potential only depends on time, the potential can be passed - // as a float instead of a callable - if (boundary_handler.phi_EB_only_t) { - linop_nodelap->setEBDirichlet(boundary_handler.potential_eb_t(current_time.value())); - } else { - linop_nodelap->setEBDirichlet(boundary_handler.getPhiEB(current_time.value())); + if constexpr (!std::is_same_v) { + // if the EB potential only depends on time, the potential can be passed + // as a float instead of a callable + if (boundary_handler.phi_EB_only_t) { + linop_nodelap->setEBDirichlet(boundary_handler.potential_eb_t(current_time.value())); + } else { + linop_nodelap->setEBDirichlet(boundary_handler.getPhiEB(current_time.value())); + } + } else + { + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, + "EB Poisson solver enabled but no 'boundary_handler' passed!"); } } #endif @@ -364,9 +378,20 @@ computePhi (amrex::Vector const & rho, linop = std::move(linop_tenslap); } - // Solve the Poisson equation - linop->setDomainBC(boundary_handler.lobc, boundary_handler.hibc); + // Level 0 domain boundary + if constexpr (std::is_same_v) { + amrex::Array const lobc = {AMREX_D_DECL( + amrex::LinOpBCType::Dirichlet, + amrex::LinOpBCType::Dirichlet, + amrex::LinOpBCType::Dirichlet + )}; + amrex::Array const hibc = lobc; + linop->setDomainBC(lobc, hibc); + } else { + linop->setDomainBC(boundary_handler.lobc, boundary_handler.hibc); + } + // Solve the Poisson equation amrex::MLMG mlmg(*linop); // actual solver defined here mlmg.setVerbose(verbosity); mlmg.setMaxIter(max_iters); @@ -407,6 +432,8 @@ computePhi (amrex::Vector const & rho, post_phi_calculation.value()(mlmg, lev); } } + rho[lev]->mult(-ablastr::constant::SI::ep0); // Multiply rho by epsilon again + } // loop over lev(els) } // computePhi } // namespace ablastr::fields diff --git a/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example b/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example index 27b6a59592e..970dc980347 100644 --- a/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example +++ b/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example @@ -6,7 +6,7 @@ export MY_PROFILE=$(cd $(dirname $BASH_SOURCE) && pwd)"/"$(basename $BASH_SOURCE if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your $MY_PROFILE file! Please edit its line 2 to continue!"; return; fi # required dependencies -module load cmake/3.22.1 # we need 3.24+ - installing via pipx until module is available +module load cmake/3.30.2 module load gcc/11.2.0 module load cuda/11.7.1 module load openmpi/4.1.2/gcc.11.2.0 diff --git a/Tools/machines/hpc3-uci/install_gpu_dependencies.sh b/Tools/machines/hpc3-uci/install_gpu_dependencies.sh index 56f2bff4025..c4c31dd4066 100755 --- a/Tools/machines/hpc3-uci/install_gpu_dependencies.sh +++ b/Tools/machines/hpc3-uci/install_gpu_dependencies.sh @@ -119,7 +119,6 @@ python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel python3 -m pip install --upgrade setuptools python3 -m pip install --upgrade pipx -python3 -m pipx install --upgrade cmake python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh b/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh new file mode 100755 index 00000000000..cd29664a978 --- /dev/null +++ b/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh @@ -0,0 +1,168 @@ +#!/bin/bash +# +# Copyright 2023 The WarpX Community +# +# This file is part of WarpX. +# +# Author: Axel Huebl +# License: BSD-3-Clause-LBNL + +# Exit on first error encountered ############################################# +# +set -eu -o pipefail + + +# Check: ###################################################################### +# +# Was lonestar6_warpx_a100.profile sourced and configured correctly? +if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your lonestar6_warpx_a100.profile file! Please edit its line 2 to continue!"; exit 1; fi + + +# Remove old dependencies ##################################################### +# +SW_DIR="${WORK}/sw/lonestar6/sw/lonestar6/a100" +rm -rf ${SW_DIR} +mkdir -p ${SW_DIR} + +# remove common user mistakes in python, located in .local instead of a venv +python3 -m pip uninstall -qq -y pywarpx +python3 -m pip uninstall -qq -y warpx +python3 -m pip uninstall -qqq -y mpi4py 2>/dev/null || true + + +# General extra dependencies ################################################## +# + +# tmpfs build directory: avoids issues often seen with $HOME and is faster +build_dir=$(mktemp -d) + +# c-blosc (I/O compression) +if [ -d $HOME/src/c-blosc ] +then + cd $HOME/src/c-blosc + git fetch --prune + git checkout v1.21.1 + cd - +else + git clone -b v1.21.1 https://github.com/Blosc/c-blosc.git $HOME/src/c-blosc +fi +rm -rf $HOME/src/c-blosc-a100-build +cmake -S $HOME/src/c-blosc -B ${build_dir}/c-blosc-a100-build -DBUILD_TESTS=OFF -DBUILD_BENCHMARKS=OFF -DDEACTIVATE_AVX2=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/c-blosc-1.21.1 +cmake --build ${build_dir}/c-blosc-a100-build --target install --parallel 16 +rm -rf ${build_dir}/c-blosc-a100-build + +# ADIOS2 +if [ -d $HOME/src/adios2 ] +then + cd $HOME/src/adios2 + git fetch --prune + git checkout v2.8.3 + cd - +else + git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 +fi +rm -rf $HOME/src/adios2-a100-build +cmake -S $HOME/src/adios2 -B ${build_dir}/adios2-a100-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 +cmake --build ${build_dir}/adios2-a100-build --target install -j 16 +rm -rf ${build_dir}/adios2-a100-build + +# BLAS++ (for PSATD+RZ) +if [ -d $HOME/src/blaspp ] +then + cd $HOME/src/blaspp + git fetch --prune + git checkout v2024.05.31 + cd - +else + git clone -b v2024.05.31 https://github.com/icl-utk-edu/blaspp.git $HOME/src/blaspp +fi +rm -rf $HOME/src/blaspp-a100-build +cmake -S $HOME/src/blaspp -B ${build_dir}/blaspp-a100-build -Duse_openmp=OFF -Dgpu_backend=cuda -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=${SW_DIR}/blaspp-2024.05.31 +cmake --build ${build_dir}/blaspp-a100-build --target install --parallel 16 +rm -rf ${build_dir}/blaspp-a100-build + +# LAPACK++ (for PSATD+RZ) +if [ -d $HOME/src/lapackpp ] +then + cd $HOME/src/lapackpp + git fetch --prune + git checkout v2024.05.31 + cd - +else + git clone -b v2024.05.31 https://github.com/icl-utk-edu/lapackpp.git $HOME/src/lapackpp +fi +rm -rf $HOME/src/lapackpp-a100-build +CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S $HOME/src/lapackpp -B ${build_dir}/lapackpp-a100-build -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=${SW_DIR}/lapackpp-2024.05.31 +cmake --build ${build_dir}/lapackpp-a100-build --target install --parallel 16 +rm -rf ${build_dir}/lapackpp-a100-build + +# heFFTe +if [ -d $HOME/src/heffte ] +then + cd $HOME/src/heffte + git fetch --prune + git checkout v2.4.0 + cd - +else + git clone -b v2.4.0 https://github.com/icl-utk-edu/heffte.git ${HOME}/src/heffte +fi +rm -rf ${HOME}/src/heffte-a100-build +cmake \ + -S ${HOME}/src/heffte \ + -B ${build_dir}/heffte-a100-build \ + -DBUILD_SHARED_LIBS=ON \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_STANDARD=17 \ + -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON \ + -DCMAKE_INSTALL_PREFIX=${SW_DIR}/heffte-2.4.0 \ + -DHeffte_DISABLE_GPU_AWARE_MPI=OFF \ + -DHeffte_ENABLE_AVX=OFF \ + -DHeffte_ENABLE_AVX512=OFF \ + -DHeffte_ENABLE_FFTW=OFF \ + -DHeffte_ENABLE_CUDA=ON \ + -DHeffte_ENABLE_ROCM=OFF \ + -DHeffte_ENABLE_ONEAPI=OFF \ + -DHeffte_ENABLE_MKL=OFF \ + -DHeffte_ENABLE_DOXYGEN=OFF \ + -DHeffte_SEQUENTIAL_TESTING=OFF \ + -DHeffte_ENABLE_TESTING=OFF \ + -DHeffte_ENABLE_TRACING=OFF \ + -DHeffte_ENABLE_PYTHON=OFF \ + -DHeffte_ENABLE_FORTRAN=OFF \ + -DHeffte_ENABLE_SWIG=OFF \ + -DHeffte_ENABLE_MAGMA=OFF +cmake --build ${build_dir}/heffte-a100-build --target install --parallel 16 +rm -rf ${build_dir}/heffte-a100-build + + +# Python ###################################################################### +# +python3 -m pip install --upgrade pip +python3 -m pip install --upgrade virtualenv +python3 -m pip cache purge +rm -rf ${SW_DIR}/venvs/warpx-a100 +python3 -m venv ${SW_DIR}/venvs/warpx-a100 +source ${SW_DIR}/venvs/warpx-a100/bin/activate +python3 -m pip install --upgrade pip +python3 -m pip install --upgrade build +python3 -m pip install --upgrade packaging +python3 -m pip install --upgrade wheel +python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade cython +python3 -m pip install --upgrade numpy +python3 -m pip install --upgrade pandas +python3 -m pip install --upgrade scipy +python3 -m pip install --upgrade mpi4py --no-cache-dir --no-build-isolation --no-binary mpi4py +python3 -m pip install --upgrade openpmd-api +python3 -m pip install --upgrade matplotlib +python3 -m pip install --upgrade yt +# install or update WarpX dependencies +python3 -m pip install --upgrade -r $HOME/src/warpx/requirements.txt +#python3 -m pip install --upgrade cupy-cuda12x # CUDA 12 compatible wheel +# optimas (based on libEnsemble & ax->botorch->gpytorch->pytorch) +#python3 -m pip install --upgrade torch # CUDA 12 compatible wheel +#python3 -m pip install --upgrade optimas[all] + + +# remove build temporary directory +rm -rf ${build_dir} diff --git a/Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch b/Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch new file mode 100644 index 00000000000..bef40942ed6 --- /dev/null +++ b/Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch @@ -0,0 +1,41 @@ +#!/bin/bash -l + +# Copyright 2021-2022 Axel Huebl, Kevin Gott +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + +#SBATCH -t 00:10:00 +#SBATCH -N 2 +#SBATCH -J WarpX +# note: must end on _g +#SBATCH -A +#SBATCH -q regular +#SBATCH -C gpu +#SBATCH --exclusive +#SBATCH --gpu-bind=none +#SBATCH --gpus-per-node=4 +#SBATCH -o WarpX.o%j +#SBATCH -e WarpX.e%j + +# executable & inputs file or python interpreter & PICMI script here +EXE=./warpx +INPUTS=inputs_small + +# pin to closest NIC to GPU +export MPICH_OFI_NIC_POLICY=GPU + +# threads for OpenMP and threaded compressors per MPI rank +export SRUN_CPUS_PER_TASK=32 + +# depends on https://github.com/ECP-WarpX/WarpX/issues/2009 +#GPU_AWARE_MPI="amrex.the_arena_is_managed=0 amrex.use_gpu_aware_mpi=1" +GPU_AWARE_MPI="" + +# CUDA visible devices are ordered inverse to local task IDs +# Reference: nvidia-smi topo -m +srun --cpu-bind=cores bash -c " + export CUDA_VISIBLE_DEVICES=\$((3-SLURM_LOCALID)); + ${EXE} ${INPUTS} ${GPU_AWARE_MPI}" \ + > output.txt diff --git a/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example b/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example new file mode 100644 index 00000000000..148299f281c --- /dev/null +++ b/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example @@ -0,0 +1,59 @@ +# please set your project account +#export proj="" # change me + +# required dependencies +module purge +module load TACC +module load gcc/11.2.0 +module load cuda/12.2 +module load cmake +module load mvapich2 + +# optional: for QED support with detailed tables +module load boost/1.84 + +# optional: for openPMD and PSATD+RZ support +module load phdf5/1.10.4 + +SW_DIR="${WORK}/sw/lonestar6/sw/lonestar6/a100" +export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.1:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.8.3:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/heffte-2.4.0:${CMAKE_PREFIX_PATH} + +export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-1.21.1/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.8.3/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/heffte-2.4.0/lib64:$LD_LIBRARY_PATH + +export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} + +# optional: CCache +#module load ccache # TODO: request from support + +# optional: for Python bindings or libEnsemble +module load python3/3.9.7 + +if [ -d "$WORK/sw/lonestar6/a100/venvs/warpx-a100" ] +then + source $WORK/sw/lonestar6/a100/venvs/warpx-a100/bin/activate +fi + +# an alias to request an interactive batch node for one hour +# for parallel execution, start on the batch node: srun +alias getNode="salloc -N 1 --ntasks-per-node=2 -t 1:00:00 -p gpu-100 --gpu-bind=single:1 -c 32 -G 2 -A $proj" +# an alias to run a command on a batch node for up to 30min +# usage: runNode +alias runNode="srun -N 1 --ntasks-per-node=2 -t 0:30:00 -p gpu-100 --gpu-bind=single:1 -c 32 -G 2 -A $proj" + +# optimize CUDA compilation for A100 +export AMREX_CUDA_ARCH=8.0 + +# compiler environment hints +export CC=$(which gcc) +export CXX=$(which g++) +export FC=$(which gfortran) +export CUDACXX=$(which nvcc) +export CUDAHOSTCXX=${CXX} diff --git a/Tools/machines/lumi-csc/lumi_warpx.profile.example b/Tools/machines/lumi-csc/lumi_warpx.profile.example index 13fb6b1d81e..915f976f4ab 100644 --- a/Tools/machines/lumi-csc/lumi_warpx.profile.example +++ b/Tools/machines/lumi-csc/lumi_warpx.profile.example @@ -2,9 +2,9 @@ #export proj="project_..." # required dependencies -module load LUMI/23.09 partition/G -module load rocm/5.2.3 # waiting for 5.5 for next bump -module load buildtools/23.09 +module load LUMI/24.03 partition/G +module load rocm/6.0.3 +module load buildtools/24.03 # optional: just an additional text editor module load nano @@ -27,7 +27,7 @@ export PATH=${SW_DIR}/hdf5-1.14.1.2/bin:${PATH} export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} # optional: for Python bindings or libEnsemble -module load cray-python/3.10.10 +module load cray-python/3.11.7 if [ -d "${SW_DIR}/venvs/warpx-lumi" ] then diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index e3682b69ff5..7524d919c61 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -279,7 +279,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "028638564f7be0694b9898f8d4088cdbf9a6f9f5" +set(WarpX_amrex_branch "103d0e9f7c7e7fbedbeb61512d3f37f3e2b03770" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index e93851443c0..69711866f74 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "41c856b8a588c3c8b04bb35d2d05b56f6ce0dd7f" +set(WarpX_pyamrex_branch "1c66690f83244196c5655293f1381303a7d1589d" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)")