diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index 1355dc2f647..a32ecb8fa24 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -6,6 +6,9 @@ pool: pr: autoCancel: true drafts: false + branches: + include: + - development jobs: - job: diff --git a/.github/workflows/clang_sanitizers.yml b/.github/workflows/clang_sanitizers.yml index 119a893eb72..067488911bb 100644 --- a/.github/workflows/clang_sanitizers.yml +++ b/.github/workflows/clang_sanitizers.yml @@ -1,6 +1,10 @@ name: 🧴 clang sanitizers -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-clangsanitizers @@ -69,7 +73,7 @@ jobs: export "ASAN_OPTIONS=detect_leaks=0" mpirun -n 2 ./build/bin/warpx.rz Examples/Physics_applications/laser_acceleration/inputs_base_rz - mpirun -n 2 ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration + mpirun -n 2 ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_base_1d mpirun -n 2 ./build/bin/warpx.2d Examples/Physics_applications/laser_acceleration/inputs_base_2d mpirun -n 2 ./build/bin/warpx.3d Examples/Physics_applications/laser_acceleration/inputs_base_3d @@ -150,7 +154,7 @@ jobs: export OMP_NUM_THREADS=2 mpirun -n 2 ./build/bin/warpx.rz Examples/Physics_applications/laser_acceleration/inputs_base_rz warpx.serialize_initial_conditions = 0 - mpirun -n 2 ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration warpx.serialize_initial_conditions = 0 + mpirun -n 2 ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_base_1d warpx.serialize_initial_conditions = 0 mpirun -n 2 ./build/bin/warpx.2d Examples/Physics_applications/laser_acceleration/inputs_base_2d warpx.serialize_initial_conditions = 0 mpirun -n 2 ./build/bin/warpx.3d Examples/Physics_applications/laser_acceleration/inputs_base_3d warpx.serialize_initial_conditions = 0 diff --git a/.github/workflows/clang_tidy.yml b/.github/workflows/clang_tidy.yml index 5a4f83f01f1..9088e3af134 100644 --- a/.github/workflows/clang_tidy.yml +++ b/.github/workflows/clang_tidy.yml @@ -1,6 +1,10 @@ name: 🧹 clang-tidy -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-clangtidy diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 5c36b9d9f21..e3549ae340a 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -31,6 +31,11 @@ jobs: - name: Checkout uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + name: Install Python + with: + python-version: '3.x' + - name: Install Packages (C++) if: ${{ matrix.language == 'cpp' }} run: | @@ -38,9 +43,10 @@ jobs: sudo apt-get install --yes cmake openmpi-bin libopenmpi-dev libhdf5-openmpi-dev libadios-openmpi-dev ccache python -m pip install --upgrade pip + python -m pip install --upgrade pipx python -m pip install --upgrade wheel python -m pip install --upgrade cmake - export CMAKE="$HOME/.local/bin/cmake" && echo "CMAKE=$CMAKE" >> $GITHUB_ENV + python -m pipx install cmake - name: Set Up Cache if: ${{ matrix.language == 'cpp' }} @@ -54,7 +60,7 @@ jobs: - name: Configure (C++) if: ${{ matrix.language == 'cpp' }} run: | - $CMAKE -S . -B build -DWarpX_OPENPMD=ON + cmake -S . -B build -DWarpX_OPENPMD=ON - name: Initialize CodeQL uses: github/codeql-action/init@v3 @@ -75,7 +81,7 @@ jobs: export CCACHE_MAXSIZE=100M ccache -z - $CMAKE --build build -j 4 + cmake --build build -j 4 ccache -s du -hs ~/.cache/ccache @@ -83,7 +89,7 @@ jobs: # Make sure CodeQL has something to do touch Source/Utils/WarpXVersion.cpp export CCACHE_DISABLE=1 - $CMAKE --build build -j 4 + cmake --build build -j 4 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 2bc5d35bb4a..2209f425d1f 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -1,6 +1,10 @@ name: 🐧 CUDA -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-cuda @@ -131,7 +135,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 103d0e9f7c7e7fbedbeb61512d3f37f3e2b03770 && cd - + cd ../amrex && git checkout --detach e1222803739ed2342b9ff6fc2d57316ff0d6cb0c && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/.github/workflows/hip.yml b/.github/workflows/hip.yml index ba537e776d4..12513caa19a 100644 --- a/.github/workflows/hip.yml +++ b/.github/workflows/hip.yml @@ -1,6 +1,10 @@ name: 🐧 HIP -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-hip diff --git a/.github/workflows/insitu.yml b/.github/workflows/insitu.yml index 35f16842935..0cc6a1ced5e 100644 --- a/.github/workflows/insitu.yml +++ b/.github/workflows/insitu.yml @@ -1,6 +1,10 @@ name: 🐧 In Situ Vis -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-insituvis diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index 4d0b9ebe9c6..f27181c2e20 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -1,6 +1,10 @@ name: 🐧 Intel -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-intel @@ -180,6 +184,7 @@ jobs: -DCMAKE_VERBOSE_MAKEFILE=ON \ -DWarpX_COMPUTE=SYCL \ -DWarpX_EB=ON \ + -DWarpX_FFT=ON \ -DWarpX_PYTHON=ON \ -DWarpX_MPI=OFF \ -DWarpX_OPENPMD=ON \ diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 596920a3911..0afaf6ea451 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -1,6 +1,10 @@ name: 🍏 macOS -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-macos @@ -18,13 +22,16 @@ jobs: #CMAKE_GENERATOR: Ninja steps: - uses: actions/checkout@v4 - - name: install dependencies + - uses: actions/setup-python@v5 + name: Install Python + with: + python-version: '3.x' + - name: install brew dependencies run: | set +e brew unlink gcc brew update brew upgrade || true - brew install --overwrite python brew install ccache brew install fftw brew install libomp @@ -35,12 +42,12 @@ jobs: set -e brew tap openpmd/openpmd brew install openpmd-api - - python3 -m venv py-venv - source py-venv/bin/activate + - name: install pip dependencies + run: | python3 -m pip install --upgrade pip python3 -m pip install --upgrade build packaging setuptools wheel python3 -m pip install --upgrade mpi4py + python3 -m pip install --upgrade -r Regression/requirements.txt - name: CCache Cache uses: actions/cache@v4 with: @@ -56,8 +63,6 @@ jobs: export CCACHE_SLOPPINESS=time_macros ccache -z - source py-venv/bin/activate - cmake -S . -B build_dp \ -DCMAKE_VERBOSE_MAKEFILE=ON \ -DWarpX_EB=OFF \ @@ -67,7 +72,6 @@ jobs: cmake -S . -B build_sp \ -DCMAKE_VERBOSE_MAKEFILE=ON \ - -DPython_EXECUTABLE=$(which python3) \ -DWarpX_EB=OFF \ -DWarpX_PYTHON=ON \ -DWarpX_OPENPMD=ON \ @@ -81,7 +85,6 @@ jobs: - name: run pywarpx run: | - source py-venv/bin/activate export OMP_NUM_THREADS=1 mpirun -n 2 Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration_picmi.py diff --git a/.github/workflows/source.yml b/.github/workflows/source.yml index 7a2086cfdff..b97afe016c0 100644 --- a/.github/workflows/source.yml +++ b/.github/workflows/source.yml @@ -6,7 +6,11 @@ name: 📜 Source -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-source diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 68d2b2156e9..6435ed7e66a 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -1,6 +1,10 @@ name: 🐧 OpenMP -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-ubuntu @@ -82,7 +86,7 @@ jobs: -DWarpX_QED_TOOLS=ON cmake --build build -j 4 - ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration + ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_base_1d ./build/bin/warpx.2d Examples/Physics_applications/laser_acceleration/inputs_base_2d ccache -s diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index fc75ccb0141..1d8b0fd0495 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -1,6 +1,10 @@ name: 🪟 Windows -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-windows diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d2b15b8af95..8ba600be560 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,7 +18,7 @@ exclude: '^share/openPMD/thirdParty' # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: trailing-whitespace args: [--markdown-linebreak-ext=md] @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.8 + rev: v0.6.9 hooks: # Run the linter - id: ruff diff --git a/CMakeLists.txt b/CMakeLists.txt index 3b4e9199f53..980b23183fd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ # Preamble #################################################################### # cmake_minimum_required(VERSION 3.24.0) -project(WarpX VERSION 24.09) +project(WarpX VERSION 24.10) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) @@ -714,9 +714,9 @@ endforeach() # if(WarpX_PYTHON) set(PY_PIP_OPTIONS "-v" CACHE STRING - "Additional parameters to pass to `pip`") + "Additional parameters to pass to `pip` as ; separated list") set(PY_PIP_INSTALL_OPTIONS "" CACHE STRING - "Additional parameters to pass to `pip install`") + "Additional parameters to pass to `pip install` as ; separated list") # ensure all targets are built before we package them in a wheel set(pyWarpX_INSTALL_TARGET_NAMES) @@ -739,7 +739,8 @@ if(WarpX_PYTHON) ${CMAKE_COMMAND} -E rm -f -r warpx-whl COMMAND ${CMAKE_COMMAND} -E env PYWARPX_LIB_DIR=$ - ${Python_EXECUTABLE} -m pip ${PY_PIP_OPTIONS} wheel --no-build-isolation --no-deps --wheel-dir=warpx-whl ${WarpX_SOURCE_DIR} + ${Python_EXECUTABLE} -m pip ${PY_PIP_OPTIONS} wheel --no-build-isolation --no-deps --wheel-dir=warpx-whl "${WarpX_SOURCE_DIR}" + COMMAND_EXPAND_LISTS VERBATIM WORKING_DIRECTORY ${WarpX_BINARY_DIR} DEPENDS @@ -754,6 +755,7 @@ if(WarpX_PYTHON) endif() add_custom_target(${WarpX_CUSTOM_TARGET_PREFIX}pip_install_requirements ${Python_EXECUTABLE} -m pip ${PY_PIP_OPTIONS} install ${PY_PIP_INSTALL_OPTIONS} -r "${WarpX_SOURCE_DIR}/${pyWarpX_REQUIREMENT_FILE}" + COMMAND_EXPAND_LISTS VERBATIM WORKING_DIRECTORY ${WarpX_BINARY_DIR} ) @@ -771,6 +773,7 @@ if(WarpX_PYTHON) add_custom_target(${WarpX_CUSTOM_TARGET_PREFIX}pip_install ${CMAKE_COMMAND} -E env WARPX_MPI=${WarpX_MPI} ${Python_EXECUTABLE} -m pip ${PY_PIP_OPTIONS} install --force-reinstall --no-index --no-deps ${PY_PIP_INSTALL_OPTIONS} --find-links=warpx-whl pywarpx + COMMAND_EXPAND_LISTS VERBATIM WORKING_DIRECTORY ${WarpX_BINARY_DIR} DEPENDS @@ -784,6 +787,7 @@ if(WarpX_PYTHON) add_custom_target(${WarpX_CUSTOM_TARGET_PREFIX}pip_install_nodeps ${CMAKE_COMMAND} -E env WARPX_MPI=${WarpX_MPI} ${Python_EXECUTABLE} -m pip ${PY_PIP_OPTIONS} install --force-reinstall --no-index --no-deps ${PY_PIP_INSTALL_OPTIONS} --find-links=warpx-whl pywarpx + COMMAND_EXPAND_LISTS VERBATIM WORKING_DIRECTORY ${WarpX_BINARY_DIR} DEPENDS diff --git a/Docs/source/conf.py b/Docs/source/conf.py index 9dfda6346f9..c1ad43197c5 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -107,9 +107,9 @@ def __init__(self, *args, **kwargs): # built documents. # # The short X.Y version. -version = "24.08" +version = "24.10" # The full version, including alpha/beta/rc tags. -release = "24.08" +release = "24.10" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/Docs/source/dataanalysis/catalyst.rst b/Docs/source/dataanalysis/catalyst.rst index 97e634c5c6a..939b6b134bd 100644 --- a/Docs/source/dataanalysis/catalyst.rst +++ b/Docs/source/dataanalysis/catalyst.rst @@ -19,7 +19,7 @@ First, we build build `Catalyst 2 `_ using the conduit library created in the previous step. The latter can be achieved by adding the installation path of conduit to the environmental -variable `CMAKE_PREFIX_PATH` and setting `CATALYST_WITH_EXTERNAL_CONDUIT=ON` during the configuration step of Catalyst. +variable ``CMAKE_PREFIX_PATH`` and setting ``CATALYST_WITH_EXTERNAL_CONDUIT=ON`` during the configuration step of Catalyst. Then we build ParaView master (on a commit after 2024.07.01, tested on ``4ef351a54ff747ef7169e2e52e77d9703a9dfa77``) following the developer instructions provided `here `__ . @@ -27,7 +27,7 @@ A representative set of options for a headless ParaView installation is provided `here `__ Afterward, WarpX must be built with ``WarpX_CATALYST=ON``. Also, make sure to provide the installed paths of Conduit and Catalyst via -`CMAKE_PREFIX_PATH` before configuring WarpX. +``CMAKE_PREFIX_PATH`` before configuring WarpX. Inputs File Configuration ------------------------- @@ -41,7 +41,7 @@ In addition to configuring the diagnostics, the following parameters must be inc * ``catalyst.implementation_search_paths``: The locations to search for the given implementation. The specific file being searched for will be ``catalyst_{implementation}.so``. The latter two can also be given via the environmental variables -`CATALYST_IMPLEMENTATION_NAME` and `CATALYST_IMPLEMENTATION_PATHS` +``CATALYST_IMPLEMENTATION_NAME`` and ``CATALYST_IMPLEMENTATION_PATHS`` respectively. Because the scripts and implementations are global, Catalyst does not benefit from nor differentiate between multiple diagnostics. @@ -110,7 +110,7 @@ To generate the data dumps one must first set the environmental variable ``CATAL This will run the simulation and write the conduit nodes under ``CATALYST_DATA_DUMP_DIRECTORY``. -Afterward, one can replay the generated nodes by setting up the `CATALYST_IMPLEMENTATION_*` variables for the `catalyst_replay` executable (which can be found in the catalyst build directory) appropriately. For example: +Afterward, one can replay the generated nodes by setting up the ``CATALYST_IMPLEMENTATION_*`` variables for the ``catalyst_replay`` executable (which can be found in the catalyst build directory) appropriately. For example: .. code-block:: bash diff --git a/Docs/source/developers/checksum.rst b/Docs/source/developers/checksum.rst index 2452d074ba1..ccbea3408ef 100644 --- a/Docs/source/developers/checksum.rst +++ b/Docs/source/developers/checksum.rst @@ -1,32 +1,36 @@ .. _developers-checksum: -Checksum regression tests -========================= +Checksums on Tests +================== -WarpX has checksum regression tests: as part of CI testing, when running a given test, the checksum module computes one aggregated number per field (``Ex_checksum = np.sum(np.abs(Ex))``) and compares it to a reference (benchmark). This should be sensitive enough to make the test fail if your PR causes a significant difference, print meaningful error messages, and give you a chance to fix a bug or reset the benchmark if needed. +When running an automated test, we often compare the data of final time step of the test with expected values to catch accidental changes. +Instead of relying on reference files that we would have to store in their full size, we calculate an aggregate checksum. -The checksum module is located in ``Regression/Checksum/``, and the benchmarks are stored as human-readable `JSON `__ files in ``Regression/Checksum/benchmarks_json/``, with one file per benchmark (for instance, test ``Langmuir_2d`` has a corresponding benchmark ``Regression/Checksum/benchmarks_json/Langmuir_2d.json``). +For this purpose, the checksum Python module computes one aggregated number per field (e.g., the sum of the absolute values of the array elements) and compares it to a reference value (benchmark). +This should be sensitive enough to make the test fail if your PR causes a significant difference, print meaningful error messages, and give you a chance to fix a bug or reset the benchmark if needed. -For more details on the implementation, the Python files in ``Regression/Checksum/`` should be well documented. +The checksum module is located in ``Regression/Checksum/``, and the benchmarks are stored as human-readable `JSON `__ files in ``Regression/Checksum/benchmarks_json/``, with one file per benchmark (for example, the test ``test_2d_langmuir_multi`` has a corresponding benchmark ``Regression/Checksum/benchmarks_json/test_2d_langmuir_multi.json``). -From a user point of view, you should only need to use ``checksumAPI.py``. It contains Python functions that can be imported and used from an analysis Python script. It can also be executed directly as a Python script. Here are recipes for the main tasks related to checksum regression tests in WarpX CI. +For more details on the implementation, please refer to the Python implementation in ``Regression/Checksum/``. -Include a checksum regression test in an analysis Python script ---------------------------------------------------------------- +From a user point of view, you should only need to use ``checksumAPI.py``, which contains Python functions that can be imported and used from an analysis Python script or can also be executed directly as a Python script. + +How to compare checksums in your analysis script +------------------------------------------------ This relies on the function ``evaluate_checksum``: .. autofunction:: checksumAPI.evaluate_checksum -For an example, see +Here's an example: -.. literalinclude:: ../../../Examples/analysis_default_regression.py +.. literalinclude:: ../../../Examples/Tests/embedded_circle/analysis.py :language: python -This can also be included in an existing analysis script. Note that the plotfile must be ``_plt?????``, as is generated by the CI framework. +This can also be included as part of an existing analysis script. -Evaluate a checksum regression test from a bash terminal --------------------------------------------------------- +How to evaluate checksums from the command line +----------------------------------------------- You can execute ``checksumAPI.py`` as a Python script for that, and pass the plotfile that you want to evaluate, as well as the test name (so the script knows which benchmark to compare it to). @@ -41,11 +45,8 @@ See additional options * ``--rtol`` relative tolerance for the comparison * ``--atol`` absolute tolerance for the comparison (a sum of both is used by ``numpy.isclose()``) -Create/Reset a benchmark with new values that you know are correct ------------------------------------------------------------------- - -Create/Reset a benchmark from a plotfile generated locally -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +How to create or reset checksums with local benchmark values +------------------------------------------------------------ This is using ``checksumAPI.py`` as a Python script. @@ -65,8 +66,8 @@ Since this will automatically change the JSON file stored on the repo, make a se git add .json git commit -m "reset benchmark for because ..." --author="Tools " -Automated reset of a list of test benchmarks --------------------------------------------- +How to reset checksums for a list of tests with local benchmark values +---------------------------------------------------------------------- If you set the environment variable ``export CHECKSUM_RESET=ON`` before running tests that are compared against existing benchmarks, the test analysis will reset the benchmarks to the new values, skipping the comparison. @@ -80,8 +81,8 @@ With `CTest `__ (coming # ... check and commit changes ... -Reset a benchmark from the Azure pipeline output on Github -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +How to reset checksums for a list of tests with benchmark values from the Azure pipeline output +----------------------------------------------------------------------------------------------- Alternatively, the benchmarks can be reset using the output of the Azure continuous intergration (CI) tests on Github. The output can be accessed by following the steps below: diff --git a/Docs/source/developers/testing.rst b/Docs/source/developers/testing.rst index ee5c82aeea9..111e3e7d7cb 100644 --- a/Docs/source/developers/testing.rst +++ b/Docs/source/developers/testing.rst @@ -1,35 +1,45 @@ .. _developers-testing: -Testing the code +Testing the Code ================ -When adding a new feature, you want to make sure that (i) you did not break the existing code and (ii) your contribution gives correct results. While the code is tested regularly remotely (on the cloud when commits are pushed to an open PR, and every night on local clusters), it can also be useful to run tests on your custom input file. This section details how to use both automated and custom tests. +When proposing a code change, you want to make sure that -Continuous Integration in WarpX -------------------------------- +* the code change does not break the existing code; +* the code change gives correct results (numerics, physics, etc.). -Configuration -^^^^^^^^^^^^^ +WarpX follows the continuous integration (CI) software development practice, where automated builds and tests are run after merging code changes into the main branch. -Our regression tests are run with `CTest `__, an executable that comes with CMake. - -The test suite is ready to run once you have configured and built WarpX with CMake, following the instructions that you find in our :ref:`Users ` or :ref:`Developers ` sections. - -A test that requires a build option that was not configured and built will be skipped automatically. For example, if you configure and build WarpX in 1D only, any test of dimensionality other than 1D, which would require WarpX to be configured and built in the corresponding dimensionality, will be skipped automatically. +While the code is tested regularly remotely (on the cloud when commits are pushed to an open PR, and every night on local clusters), it can also be useful to run tests on your custom input file. How to run pre-commit tests locally ----------------------------------- -When proposing code changes to Warpx, we perform a couple of automated stylistic and correctness checks on the code change. -You can run those locally before you push to save some time, install them once like this: +First, when proposing a code change, we perform a couple of automated style and correctness checks. + +If you install the ``pre-commit`` tool on your local machine via .. code-block:: sh python -m pip install -U pre-commit pre-commit install +the style and correctness checks will run automatically on your local machine, after you commit the change and before you push. + +If you do not install the ``pre-commit`` tool on your local machine, these checks will run automatically as part of our CI workflows and a commit containing style and correctness changes might be added automatically to your branch. +In that case, you will need to pull that automated commit before pushing further changes. + See `pre-commit.com `__ and our ``.pre-commit-config.yaml`` file in the repository for more details. +How to configure the automated tests +------------------------------------ + +Our regression tests are run with `CTest `__, an executable that comes with CMake. + +The test suite is ready to run once you have configured and built WarpX with CMake, following the instructions that you find in our :ref:`Users ` or :ref:`Developers ` sections. + +A test that requires a build option that was not configured and built will be skipped automatically. For example, if you configure and build WarpX in 1D only, any test of dimensionality other than 1D, which would require WarpX to be configured and built in the corresponding dimensionality, will be skipped automatically. + How to run automated tests locally ---------------------------------- @@ -107,7 +117,15 @@ If you modify the code base locally and want to assess the effects of your code How to add automated tests -------------------------- -As mentioned above, the input files and scripts used by the automated tests can be found in the `Examples `__ directory, either under `Physics_applications `__ or `Tests `__. +An automated test typically consists of the following components: + +* input file or PICMI input script; +* analysis script; +* checksum file. + +To learn more about how to use checksums in automated tests, please see the corresponding section :ref:`Checksums on Tests `. + +As mentioned above, the input files and scripts used by the automated tests can be found in the `Examples `__ directory, under either `Physics_applications `__ or `Tests `__. Each test directory must contain a file named ``CMakeLists.txt`` where all tests associated with the input files and scripts in that directory must be listed. @@ -173,7 +191,8 @@ A new test can be added by adding a corresponding entry in ``CMakeLists.txt`` as If you need a new Python package dependency for testing, please add it in `Regression/requirements.txt `__. -Sometimes two or more tests share a large number of input parameters. The shared input parameters can be collected in a "base" input file that can be passed as a runtime parameter in the actual test input files through the parameter ``FILE``. +Sometimes two or more tests share a large number of input parameters. +The shared input parameters can be collected in a "base" input file that can be passed as a runtime parameter in the actual test input files through the parameter ``FILE``. If the new test is added in a new directory that did not exist before, please add the name of that directory with the command ``add_subdirectory`` in `Physics_applications/CMakeLists.txt `__ or `Tests/CMakeLists.txt `__, depending on where the new test directory is located. diff --git a/Docs/source/developers/workflows.rst b/Docs/source/developers/workflows.rst index 00279018e9d..f7c81ae70d8 100644 --- a/Docs/source/developers/workflows.rst +++ b/Docs/source/developers/workflows.rst @@ -8,7 +8,7 @@ Workflows profiling testing - documentation checksum - local_compile run_clang_tidy_locally + local_compile + documentation diff --git a/Docs/source/install/cmake.rst b/Docs/source/install/cmake.rst index 60d9eecc2b4..41e4c40bc85 100644 --- a/Docs/source/install/cmake.rst +++ b/Docs/source/install/cmake.rst @@ -77,9 +77,9 @@ For example, this builds WarpX in all geometries, enables Python bindings and Nv Build Options ------------- -============================= ============================================ ========================================================= +============================= ============================================ =========================================================== CMake Option Default & Values Description -============================= ============================================ ========================================================= +============================= ============================================ =========================================================== ``CMAKE_BUILD_TYPE`` RelWithDebInfo/**Release**/Debug `Type of build, symbols & optimizations `__ ``CMAKE_INSTALL_PREFIX`` system-dependent path `Install path prefix `__ ``CMAKE_VERBOSE_MAKEFILE`` ON/**OFF** `Print all compiler commands to the terminal during build `__ @@ -105,9 +105,9 @@ CMake Option Default & Values Descr ``WarpX_QED_TABLES_GEN_OMP`` **AUTO**/ON/OFF Enables OpenMP support for QED lookup tables generation ``WarpX_SENSEI`` ON/**OFF** SENSEI in situ visualization ``Python_EXECUTABLE`` (newest found) Path to Python executable -``PY_PIP_OPTIONS`` ``-v`` Additional options for ``pip``, e.g., ``-vvv`` -``PY_PIP_INSTALL_OPTIONS`` Additional options for ``pip install``, e.g., ``--user`` -============================= ============================================ ========================================================= +``PY_PIP_OPTIONS`` ``-v`` Additional options for ``pip``, e.g., ``-vvv;-q`` +``PY_PIP_INSTALL_OPTIONS`` Additional options for ``pip install``, e.g., ``--user;-q`` +============================= ============================================ =========================================================== WarpX can be configured in further detail with options from AMReX, which are documented in the AMReX manual: diff --git a/Docs/source/refs.bib b/Docs/source/refs.bib index 29fb509608c..70b88a0abf8 100644 --- a/Docs/source/refs.bib +++ b/Docs/source/refs.bib @@ -445,6 +445,41 @@ @article{Vranic2015 doi = {https://doi.org/10.1016/j.cpc.2015.01.020}, } +@misc{Fallahi2020, + title={MITHRA 2.0: A Full-Wave Simulation Tool for Free Electron Lasers}, + author={Arya Fallahi}, + year={2020}, + eprint={2009.13645}, + archivePrefix={arXiv}, + primaryClass={physics.acc-ph}, + url={https://arxiv.org/abs/2009.13645}, +} + +@article{VayFELA2009, + title = {FULL ELECTROMAGNETIC SIMULATION OF FREE-ELECTRON LASER AMPLIFIER PHYSICS VIA THE LORENTZ-BOOSTED FRAME APPROACH}, + author = {Fawley, William M and Vay, Jean-Luc}, + abstractNote = {Numerical simulation of some systems containing charged particles with highly relativistic directed motion can by speeded up by orders of magnitude by choice of the proper Lorentz-boosted frame[1]. A particularly good example is that of short wavelength free-electron lasers (FELs) in which a high energy electron beam interacts with a static magnetic undulator. In the optimal boost frame with Lorentz factor gamma_F , the red-shifted FEL radiation and blue shifted undulator have identical wavelengths and the number of required time-steps (presuming the Courant condition applies) decreases by a factor of 2(gamma_F)**2 for fully electromagnetic simulation. We have adapted the WARP code [2]to apply this method to several FEL problems involving coherent spontaneous emission (CSE) from pre-bunched ebeams, including that in a biharmonic undulator.}, + url = {https://www.osti.gov/biblio/964405}, + place = {United States}, + year = {2009}, + month = {4}, +} + +@article{VayFELB2009, + author = {Fawley, W. M. and Vay, J.‐L.}, + title = "{Use of the Lorentz‐Boosted Frame Transformation to Simulate Free‐Electron Laser Amplifier Physics}", + journal = {AIP Conference Proceedings}, + volume = {1086}, + number = {1}, + pages = {346-350}, + year = {2009}, + month = {01}, + abstract = "{Recently [1] it has been pointed out that numerical simulation of some systems containing charged particles with highly relativistic directed motion can by speeded up by orders of magnitude by choice of the proper Lorentz boosted frame. A particularly good example is that of short wavelength free‐electron lasers (FELs) in which a high energy (E0⩾250 MeV) electron beam interacts with a static magnetic undulator. In the optimal boost frame with Lorentz factor γF, the red‐shifted FEL radiation and blue shifted undulator have identical wavelengths and the number of required time‐steps (presuming the Courant condition applies) decreases by a factor of γF2 for fully electromagnetic simulation.We have adapted the WARP code [2] to apply this method to several FEL problems including coherent spontaneous emission (CSE) from pre‐bunched e‐beams, and strong exponential gain in a single pass amplifier configuration. We discuss our results and compare with those from the “standard” FEL simulation approach which adopts the eikonal approximation for propagation of the radiation field.}", + issn = {0094-243X}, + doi = {10.1063/1.3080930}, + url = {https://doi.org/10.1063/1.3080930}, +} + @article{Rhee1987, author = {Rhee, M. J. and Schneider, R. F. and Weidman, D. J.}, title = "{Simple time‐resolving Thomson spectrometer}", diff --git a/Docs/source/usage/examples.rst b/Docs/source/usage/examples.rst index 6e042424283..fa3e674edd3 100644 --- a/Docs/source/usage/examples.rst +++ b/Docs/source/usage/examples.rst @@ -44,9 +44,9 @@ Particle Accelerator & Beam Physics examples/gaussian_beam/README.rst examples/beam_beam_collision/README.rst + examples/free_electron_laser/README.rst examples/thomson_parabola_spectrometer/README.rst - High Energy Astrophysical Plasma Physics ---------------------------------------- diff --git a/Docs/source/usage/examples/free_electron_laser b/Docs/source/usage/examples/free_electron_laser new file mode 120000 index 00000000000..1ce0fedd798 --- /dev/null +++ b/Docs/source/usage/examples/free_electron_laser @@ -0,0 +1 @@ +../../../../Examples/Physics_applications/free_electron_laser \ No newline at end of file diff --git a/Docs/source/usage/faq.rst b/Docs/source/usage/faq.rst index 67cea8d6621..4ed0f8fa6af 100644 --- a/Docs/source/usage/faq.rst +++ b/Docs/source/usage/faq.rst @@ -74,10 +74,10 @@ Several BTD quantities differ slightly from the lab frame domain described in th In the following discussion, we will use a subscript input (e.g. :math:`\Delta z_{\rm input}`) to denote properties of the lab frame domain. -- The first back-transformed diagnostic (BTD) snapshot may not occur at :math:`t=0`. Rather, it occurs at :math:`t_0=\frac{z_{max}}c \beta(1+\beta)\gamma^2`. This is the first time when the boosted frame can complete the snapshot. +- The first back-transformed diagnostic (BTD) snapshot may not occur at :math:`t=0`. Rather, it occurs at :math:`t_0=\frac{z_{max}}c \beta/(1 - \beta \beta_{mw})`, where :math:`\beta_{mw}` represents the speed of the moving window. This is the first time when the boosted frame can complete the snapshot. - The grid spacing of the BTD snapshot is different from the grid spacing indicated in the input script. It is given by :math:`\Delta z_{\rm grid,snapshot}=\frac{c\Delta t_{\rm boost}}{\gamma\beta}`. For a CFL-limited time step, :math:`\Delta z_{\rm grid,snapshot}\approx \frac{1+\beta}{\beta} \Delta z_{\rm input}\approx 2 \Delta z_{\rm input}`. Hence in many common use cases at large boost, it is expected that the BTD snapshot has a grid spacing twice what is expressed in the input script. - The effective length of the BTD snapshot may be longer than anticipated from the input script because the grid spacing is different. Additionally, the number of grid points in the BTD snapshot is a multiple of ``.buffer_size`` whereas the number of grid cells specified in the input deck may not be. -- The code may require longer than anticipated to complete a BTD snapshot. The code starts filling the :math:`i^{th}` snapshot around step :math:`j_{\rm BTD start}={\rm ceil}\left( i\gamma(1-\beta)\frac{\Delta t_{\rm snapshot}}{\Delta t_{\rm boost}}\right)`. The code then saves information for one BTD cell every time step in the boosted frame simulation. The :math:`i^{th}` snapshot is completed and saved :math:`n_{z,{\rm snapshot}}=n_{\rm buffers}\cdot ({\rm buffer\ size})` time steps after it begins, which is when the effective snapshot length is covered by the simulation. +- The code may require longer than anticipated to complete a BTD snapshot. The code starts filling the :math:`i^{th}` snapshot around step :math:`j_{\rm BTD start}={\rm ceil}\left( i\gamma(1-\beta\beta_{mw})\frac{\Delta t_{\rm snapshot}}{\Delta t_{\rm boost}}\right)`. The code then saves information for one BTD cell every time step in the boosted frame simulation. The :math:`i^{th}` snapshot is completed and saved :math:`n_{z,{\rm snapshot}}=n_{\rm buffers}\cdot ({\rm buffer\ size})` time steps after it begins, which is when the effective snapshot length is covered by the simulation. What kinds of RZ output do you support? --------------------------------------- diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index b9d82d5014a..a6ba9a2773d 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -257,7 +257,7 @@ Overall simulation parameters ``warpx.self_fields_absolute_tolerance``). * ``fft``: Poisson's equation is solved using an Integrated Green Function method (which requires FFT calculations). - See these references for more details :cite:t:`QiangPhysRevSTAB2006`, :cite:t:`QiangPhysRevSTAB2006err`. + See these references for more details :cite:t:`param-QiangPhysRevSTAB2006`, :cite:t:`param-QiangPhysRevSTAB2006err`. It only works in 3D and it requires the compilation flag ``-DWarpX_FFT=ON``. If mesh refinement is enabled, this solver only works on the coarsest level. On the refined patches, the Poisson equation is solved with the multigrid solver. @@ -971,16 +971,21 @@ Particle initialization The ``external_file`` option is currently implemented for 2D, 3D and RZ geometries, with record components in the cartesian coordinates ``(x,y,z)`` for 3D and RZ, and ``(x,z)`` for 2D. For more information on the `openPMD format `__ and how to build WarpX with it, please visit :ref:`the install section `. - * ``NFluxPerCell``: Continuously inject a flux of macroparticles from a planar surface. + * ``NFluxPerCell``: Continuously inject a flux of macroparticles from a surface. The emitting surface can be chosen to be either a plane + defined by the user (using some of the parameters listed below), or the embedded boundary (see :ref:`Embedded Boundary Conditions `). This requires the additional parameters: * ``.flux_profile`` (see the description of this parameter further below) - * ``.surface_flux_pos`` (`double`, location of the injection plane [meter]) + * ``.inject_from_embedded_boundary`` (`0` or `1`, default `0` ; whether to inject from the embedded boundary or from a user-specified plane. + When injecting from the embedded boundary, the momentum distribution specified by the user along ``z`` (see e.g. ``uz_m``, ``uz_th`` below) is interpreted + as the momentum distribution along the local normal to the embedded boundary.) - * ``.flux_normal_axis`` (`x`, `y`, or `z` for 3D, `x` or `z` for 2D, or `r`, `t`, or `z` for RZ. When `flux_normal_axis` is `r` or `t`, the `x` and `y` components of the user-specified momentum distribution are interpreted as the `r` and `t` components respectively) + * ``.surface_flux_pos`` (only used when injecting from a plane, `double`, location of the injection plane [meter]) - * ``.flux_direction`` (`-1` or `+1`, direction of flux relative to the plane) + * ``.flux_normal_axis`` (only used when injecting from a plane, `x`, `y`, or `z` for 3D, `x` or `z` for 2D, or `r`, `t`, or `z` for RZ. When `flux_normal_axis` is `r` or `t`, the `x` and `y` components of the user-specified momentum distribution are interpreted as the `r` and `t` components respectively) + + * ``.flux_direction`` (only used when injecting from a plane, `-1` or `+1`, direction of flux relative to the plane) * ``.num_particles_per_cell`` (`double`) @@ -3466,14 +3471,15 @@ Reduced Diagnostics \frac{d\mathcal{L}}{d\mathcal{E}^*}(\mathcal{E}^*, t) = \int_0^t dt'\int d\boldsymbol{x}\,d\boldsymbol{p}_1 d\boldsymbol{p}_2\; \sqrt{ |\boldsymbol{v}_1 - \boldsymbol{v}_2|^2 - |\boldsymbol{v}_1\times\boldsymbol{v}_2|^2/c^2} \\ f_1(\boldsymbol{x}, \boldsymbol{p}_1, t')f_2(\boldsymbol{x}, \boldsymbol{p}_2, t') \delta(\mathcal{E}^* - \mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2)) - where :math:`\mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2) = \sqrt{m_1^2c^4 + m_2^2c^4 + 2(m_1 m_2 c^4 - \gamma_1 \gamma_2 - \boldsymbol{p}_1\cdot\boldsymbol{p}_2 c^2)}` is the energy in the center-of-mass frame, - and :math:`f_i` is the distribution function of species :math:`i`. Note that, if :math:`\sigma^*(\mathcal{E}^*)` + where :math:`f_i` is the distribution function of species :math:`i` and + :math:`\mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2) = \sqrt{m_1^2c^4 + m_2^2c^4 + 2 c^2{p_1}^\mu {p_2}_\mu}` + is the energy in the center-of-mass frame, where :math:`p^\mu = (\sqrt{m^2 c^2 + \boldsymbol{p}^2}, \boldsymbol{p})` + represents the 4-momentum. Note that, if :math:`\sigma^*(\mathcal{E}^*)` is the center-of-mass cross-section of a given collision process, then :math:`\int d\mathcal{E}^* \frac{d\mathcal{L}}{d\mathcal{E}^*} (\mathcal{E}^*, t)\sigma^*(\mathcal{E}^*)` gives the total number of collisions of that process (from the beginning of the simulation up until time :math:`t`). - The differential luminosity is given in units of :math:`\text{m}^{-2}.\text{eV}^{-1}`. For collider-relevant WarpX simulations + The differential luminosity is given in units of :math:`\text{m}^{-2}.\text{eV}^{-1}`. For collider-relevant WarpX simulations involving two crossing, high-energy beams of particles, the differential luminosity in :math:`\text{s}^{-1}.\text{m}^{-2}.\text{eV}^{-1}` can be obtained by multiplying the above differential luminosity by the expected repetition rate of the beams. diff --git a/Examples/Physics_applications/CMakeLists.txt b/Examples/Physics_applications/CMakeLists.txt index b68b782ee95..ed06a840501 100644 --- a/Examples/Physics_applications/CMakeLists.txt +++ b/Examples/Physics_applications/CMakeLists.txt @@ -3,6 +3,7 @@ add_subdirectory(beam_beam_collision) add_subdirectory(capacitive_discharge) +add_subdirectory(free_electron_laser) add_subdirectory(laser_acceleration) add_subdirectory(laser_ion) add_subdirectory(plasma_acceleration) diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_1d.py b/Examples/Physics_applications/capacitive_discharge/analysis_1d.py index 82d98c38210..e9043e5dc01 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_1d.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_1d.py @@ -2,8 +2,14 @@ # Copyright 2022 Modern Electron, David Grote +import os +import sys + import numpy as np +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + # fmt: off ref_density = np.array([ 1.27989677e+14, 2.23601330e+14, 2.55400265e+14, 2.55664972e+14, @@ -45,3 +51,9 @@ density_data = np.load("ion_density_case_1.npy") print(repr(density_data)) assert np.allclose(density_data, ref_density) + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_2d.py b/Examples/Physics_applications/capacitive_discharge/analysis_2d.py index f52f69f4bf4..d4845ffb718 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_2d.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_2d.py @@ -7,12 +7,15 @@ # solver that directly solves the Poisson equation using matrix inversion # rather than the iterative approach from the MLMG solver. +import os import sys -sys.path.append("../../../../warpx/Regression/Checksum/") +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum -import checksumAPI - -my_check = checksumAPI.evaluate_checksum( - "test_2d_background_mcc", "diags/diag1000050", do_particles=True, rtol=5e-3 +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=5e-3, ) diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py b/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py index 505521fc1ca..1458924b35c 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py @@ -7,15 +7,8 @@ import numpy as np -sys.path.append("../../../../warpx/Regression/Checksum/") - -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] - -my_check = checksumAPI.evaluate_checksum(test_name, fn, do_particles=True) +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum # fmt: off ref_density = np.array([ @@ -58,3 +51,9 @@ density_data = np.load("ion_density_case_1.npy") print(repr(density_data)) assert np.allclose(density_data, ref_density) + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Physics_applications/free_electron_laser/CMakeLists.txt b/Examples/Physics_applications/free_electron_laser/CMakeLists.txt new file mode 100644 index 00000000000..f5bc8d857d2 --- /dev/null +++ b/Examples/Physics_applications/free_electron_laser/CMakeLists.txt @@ -0,0 +1,12 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_fel # name + 1 # dims + 2 # nprocs + inputs_test_1d_fel # inputs + analysis_fel.py # analysis + diags/diag_labframe # output + OFF # dependency +) diff --git a/Examples/Physics_applications/free_electron_laser/README.rst b/Examples/Physics_applications/free_electron_laser/README.rst new file mode 100644 index 00000000000..00d6ef2758c --- /dev/null +++ b/Examples/Physics_applications/free_electron_laser/README.rst @@ -0,0 +1,46 @@ +.. _examples-free-electron-laser: + +Free-electron laser +=================== + +This example shows how to simulate the physics of a free-electron laser (FEL) using WarpX. +In this example, a relativistic electron beam is sent through an undulator (represented by an external, +oscillating magnetic field). The radiation emitted by the beam grows exponentially +as the beam travels through the undulator, due to the Free-Electron-Laser instability. + +The parameters of the simulation are taken from section 5.1 of :cite:t:`ex-Fallahi2020`. + +The simulation is performed in 1D, and uses the boosted-frame technique as described in +:cite:t:`ex-VayFELA2009` and :cite:t:`ex-VayFELB2009` to reduce the computational cost (the Lorentz frame of the simulation is moving at the average speed of the beam in the undulator). +Even though the simulation is run in this boosted frame, the results are reconstructed in the +laboratory frame, using WarpX's ``BackTransformed`` diagnostic. + +The effect of space-charge is intentionally turned off in this example, as it may not be properly modeled in 1D. +This is achieved by initializing two species of opposite charge (electrons and positrons) to +represent the physical electron beam, as discussed in :cite:t:`ex-VayFELB2009`. + +Run +--- + +This example can be run with the WarpX executable using an input file: ``warpx.1d inputs_test_1d_fel``. For `MPI-parallel `__ runs, prefix these lines with ``mpiexec -n 4 ...`` or ``srun -n 4 ...``, depending on the system. + +.. literalinclude:: inputs_test_1d_fel + :language: ini + :caption: You can copy this file from ``Examples/Physics_applications/free_electron_laser/inputs_test_1d_fel``. + +Visualize +--------- + +The figure below shows the results of the simulation. The left panel shows the exponential growth of the radiation along the undulator (note that the vertical axis is plotted in log scale). The right panel shows a snapshot of the simulation, +1.6 m into the undulator. Microbunching of the beam is visible in the electron density (blue). One can also see the +emitted FEL radiation (red) slipping ahead of the beam. + +.. figure:: https://gist.githubusercontent.com/RemiLehe/871a1e24c69e353c5dbb4625cd636cd1/raw/7f4e3da7e0001cff6c592190fee8622580bbe37a/FEL.png + :alt: Results of the WarpX FEL simulation. + :width: 100% + +This figure was obtained with the script below, which can be run with ``python3 plot_sim.py``. + +.. literalinclude:: plot_sim.py + :language: ini + :caption: You can copy this file from ``Examples/Physics_applications/free_electron_laser/plot_sim.py``. diff --git a/Examples/Physics_applications/free_electron_laser/analysis_fel.py b/Examples/Physics_applications/free_electron_laser/analysis_fel.py new file mode 100755 index 00000000000..3ab80d195c0 --- /dev/null +++ b/Examples/Physics_applications/free_electron_laser/analysis_fel.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python + +""" +This script tests that the FEL is correctly modelled in the simulation. + +The physical parameters are the same as the ones from section 5.1 +of https://arxiv.org/pdf/2009.13645 + +The simulation uses the boosted-frame technique as described in +https://www.osti.gov/servlets/purl/940581 +In particular, the effect of space-charge is effectively turned off +by initializing an electron and positron beam on top of each other, +each having half the current of the physical beam. + +The script checks that the radiation wavelength and gain length +are the expected ones. The check is performed both in the +lab-frame diagnostics and boosted-frame diagnostics. +""" + +import os +import sys + +import numpy as np +from openpmd_viewer import OpenPMDTimeSeries +from scipy.constants import c, e, m_e + +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + +# Physical parameters of the test +gamma_bunch = 100.6 +Bu = 0.5 +lambda_u = 3e-2 +k_u = 2 * np.pi / lambda_u +K = e * Bu / (m_e * c * k_u) # Undulator parameter +gamma_boost = ( + gamma_bunch / (1 + K * K / 2) ** 0.5 +) # Lorentz factor of the ponderomotive frame +beta_boost = (1 - 1.0 / gamma_boost**2) ** 0.5 + + +# Analyze the diagnostics showing quantities in the lab frame +filename = sys.argv[1] +ts_lab = OpenPMDTimeSeries(filename) + + +# Extract the growth of the peak electric field +def extract_peak_E_lab(iteration): + """ + Extract the position of the peak electric field + """ + Ex, info = ts_lab.get_field("E", "x", iteration=iteration) + Ex_max = abs(Ex).max() + z_max = info.z[abs(Ex).argmax()] + return z_max, Ex_max + + +# Loop through all iterations +# Since the radiation power is proportional to the square of the peak electric field, +# the log of the power is equal to the log of the square of the peak electric field, +# up to an additive constant. +z_lab_peak, E_lab_peak = ts_lab.iterate(extract_peak_E_lab) +log_P_peak = np.log(E_lab_peak**2) + +# Pick the iterations between which the growth of the log of the power is linear +# (i.e. the growth of the power is exponential) and fit a line to extract the +# gain length. +i_start = 6 +i_end = 23 +# Perform linear fit +p = np.polyfit(z_lab_peak[i_start:i_end], log_P_peak[i_start:i_end], 1) +# Extract the gain length +Lg = 1 / p[0] +Lg_expected = 0.22 # Expected gain length from https://arxiv.org/pdf/2009.13645 +print(f"Gain length: {Lg}") +assert abs(Lg - Lg_expected) / Lg_expected < 0.15 + +# Check that the radiation wavelength is the expected one +iteration_check = 14 +Ex, info = ts_lab.get_field("E", "x", iteration=iteration_check) +Nz = len(info.z) +fft_E = abs(np.fft.fft(Ex)) +lambd = 1.0 / np.fft.fftfreq(Nz, d=info.dz) +lambda_radiation_lab = lambd[fft_E[: Nz // 2].argmax()] +lambda_expected = lambda_u / (2 * gamma_boost**2) +print(f"lambda_radiation_lab: {lambda_radiation_lab}") +print(f"lambda_expected: {lambda_expected}") +assert abs(lambda_radiation_lab - lambda_expected) / lambda_expected < 0.01 + +# Analyze the diagnostics showing quantities in the boosted frame +ts = OpenPMDTimeSeries("diags/diag_boostedframe") + + +# Extract the growth of the peak electric field +def extract_peak_E_boost(iteration): + """ + Extract the peak electric field in a *boosted-frame* snapshot. + Also return the position of the peak in the lab frame. + """ + Ex, info = ts.get_field("E", "x", iteration=iteration) + By, info = ts.get_field("B", "y", iteration=iteration) + E_lab = gamma_boost * (Ex + c * beta_boost * By) + E_lab_peak = abs(E_lab).max() + z_boost_peak = info.z[abs(E_lab).argmax()] + t_boost_peak = ts.current_t + z_lab_peak = gamma_boost * (z_boost_peak + beta_boost * c * t_boost_peak) + return z_lab_peak, E_lab_peak + + +# Loop through all iterations +z_lab_peak, E_lab_peak = ts.iterate(extract_peak_E_boost) +log_P_peak = np.log(E_lab_peak**2) + +# Pick the iterations between which the growth of the log of the power is linear +# (i.e. the growth of the power is exponential) and fit a line to extract the +# gain length. +i_start = 16 +i_end = 25 +# Perform linear fit +p = np.polyfit(z_lab_peak[i_start:i_end], log_P_peak[i_start:i_end], 1) +# Extract the gain length +Lg = 1 / p[0] +Lg_expected = 0.22 # Expected gain length from https://arxiv.org/pdf/2009.13645 +print(f"Gain length: {Lg}") +assert abs(Lg - Lg_expected) / Lg_expected < 0.15 + +# Check that the radiation wavelength is the expected one +iteration_check = 2000 +Ex, info = ts.get_field("E", "x", iteration=iteration_check) +By, info = ts.get_field("B", "y", iteration=iteration_check) +E_lab = gamma_boost * (Ex + c * beta_boost * By) +Nz = len(info.z) +fft_E = abs(np.fft.fft(E_lab)) +lambd = 1.0 / np.fft.fftfreq(Nz, d=info.dz) +lambda_radiation_boost = lambd[fft_E[: Nz // 2].argmax()] +lambda_radiation_lab = lambda_radiation_boost / (2 * gamma_boost) +lambda_expected = lambda_u / (2 * gamma_boost**2) +assert abs(lambda_radiation_lab - lambda_expected) / lambda_expected < 0.01 + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Physics_applications/free_electron_laser/inputs_test_1d_fel b/Examples/Physics_applications/free_electron_laser/inputs_test_1d_fel new file mode 100644 index 00000000000..79fdadab8ae --- /dev/null +++ b/Examples/Physics_applications/free_electron_laser/inputs_test_1d_fel @@ -0,0 +1,92 @@ +my_constants.gamma_bunch=100.6 +my_constants.Bu = 0.5 +my_constants.lambda_u = 3e-2 +my_constants.k_u= 2*pi/lambda_u +my_constants.K = q_e*Bu/(m_e*clight*k_u) # Undulator parameter + +warpx.gamma_boost = gamma_bunch/sqrt(1+K*K/2) # Lorentz factor of the ponderomotive frame +warpx.boost_direction = z +algo.maxwell_solver = yee +algo.particle_shape = 2 +algo.particle_pusher = vay + +# geometry +geometry.dims = 1 +geometry.prob_hi = 0 +geometry.prob_lo = -192e-6 + +amr.max_grid_size = 1024 +amr.max_level = 0 +amr.n_cell = 1024 + +# boundary +boundary.field_hi = absorbing_silver_mueller +boundary.field_lo = absorbing_silver_mueller +boundary.particle_hi = absorbing +boundary.particle_lo = absorbing + +# diagnostics +diagnostics.diags_names = diag_labframe diag_boostedframe + +# Diagnostic that show quantities in the frame +# of the simulation (boosted-frame) +diag_boostedframe.diag_type = Full +diag_boostedframe.format = openpmd +diag_boostedframe.intervals = 100 + +# Diagnostic that show quantities +# reconstructed in the lab frame +diag_labframe.diag_type = BackTransformed +diag_labframe.num_snapshots_lab = 25 +diag_labframe.dz_snapshots_lab = 0.1 +diag_labframe.format = openpmd +diag_labframe.buffer_size = 64 + +# Run the simulation long enough for +# all backtransformed diagnostic to be complete +warpx.compute_max_step_from_btd = 1 + +particles.species_names = electrons positrons +particles.rigid_injected_species= electrons positrons + +electrons.charge = -q_e +electrons.injection_style = nuniformpercell +electrons.mass = m_e +electrons.momentum_distribution_type = constant +electrons.num_particles_per_cell_each_dim = 8 +electrons.profile = constant +electrons.density = 2.7e19/2 +electrons.ux = 0.0 +electrons.uy = 0.0 +electrons.uz = gamma_bunch +electrons.zmax = -25e-6 +electrons.zmin = -125e-6 +electrons.zinject_plane=0.0 +electrons.rigid_advance=0 + +positrons.charge = q_e +positrons.injection_style = nuniformpercell +positrons.mass = m_e +positrons.momentum_distribution_type = constant +positrons.num_particles_per_cell_each_dim = 8 +positrons.profile = constant +positrons.density = 2.7e19/2 +positrons.ux = 0.0 +positrons.uy = 0.0 +positrons.uz = gamma_bunch +positrons.zmax = -25e-6 +positrons.zmin = -125e-6 +positrons.zinject_plane=0.0 +positrons.rigid_advance=0 + +warpx.do_moving_window = 1 +warpx.moving_window_dir = z +warpx.moving_window_v = sqrt(1-(1+K*K/2)/(gamma_bunch*gamma_bunch)) + +# Undulator field +particles.B_ext_particle_init_style = parse_B_ext_particle_function +particles.Bx_external_particle_function(x,y,z,t) = 0 +particles.By_external_particle_function(x,y,z,t) = if( z>0, Bu*cos(k_u*z), 0 ) +particles.Bz_external_particle_function(x,y,z,t) =0.0 + +warpx.cfl = 0.99 diff --git a/Examples/Physics_applications/free_electron_laser/plot_sim.py b/Examples/Physics_applications/free_electron_laser/plot_sim.py new file mode 100644 index 00000000000..e7635d65790 --- /dev/null +++ b/Examples/Physics_applications/free_electron_laser/plot_sim.py @@ -0,0 +1,52 @@ +import matplotlib.pyplot as plt +from openpmd_viewer import OpenPMDTimeSeries + +ts = OpenPMDTimeSeries("./diags/diag_labframe/") + + +def extract_peak_E(iteration): + """ + Extract peak electric field and its position + """ + Ex, info = ts.get_field("E", "x", iteration=iteration) + Ex_max = abs(Ex).max() + z_max = info.z[abs(Ex).argmax()] + return z_max, Ex_max + + +# Loop through the lab-frame snapshots and extract the peak electric field +z_max, Ex_max = ts.iterate(extract_peak_E) + +# Create a figure +plt.figure(figsize=(8, 4)) + +# Plot of the E field growth +plt.subplot(121) # Span all rows in the first column +plt.semilogy(z_max, Ex_max) +plt.ylim(2e7, 2e9) +plt.xlabel("z (m)") +plt.ylabel("Peak $E_x$ (V/m)") +plt.title("Growth of the radiation field\n along the undulator") + +# Plots of snapshot +iteration = 16 +plt.subplot(122) # Upper right panel + + +plt.ylabel("$E_x$ (V/m)") +plt.xlabel("") +ts.get_particle(["z"], iteration=iteration, nbins=300, species="electrons", plot=True) +plt.title("") +plt.ylim(0, 30e12) +plt.ylabel("Electron density (a. u.)", color="b") +plt.twinx() +Ex, info = ts.get_field("E", "x", iteration=iteration, plot=True) +plt.ylabel("$E_x$ (V/m)", color="r") +plt.plot(info.z, Ex, color="r") +plt.ylim(-0.6e9, 0.4e9) +plt.xlabel("z (m)") +plt.title("Snapshot 1.6 m into the undulator") + +plt.tight_layout() + +plt.savefig("FEL.png") diff --git a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py b/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py index 934d298c6b7..03369d48adf 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py @@ -25,7 +25,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -188,5 +188,8 @@ def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py b/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py index f136ffeb1d4..0e07ddf914c 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py @@ -1,10 +1,14 @@ #!/usr/bin/env python3 +import os import sys import numpy as np import openpmd_api as io +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + filename = sys.argv[1] series = io.Series(f"{filename}/openpmd_%T.h5", io.Access.read_only) @@ -63,3 +67,10 @@ assert ( (electron_meanz > 0) and (beam_meanz < 0) ), "problem with openPMD+RZ. Maybe openPMD+RZ mixed up the order of rho_ diagnostics?" + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py b/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py index bc7fac15247..d481075c112 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py @@ -17,7 +17,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -59,5 +59,8 @@ # Test uniformity up to 0.5% relative variation assert rho_slice.std() < 0.005 * abs(rho_slice.mean()) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Physics_applications/laser_acceleration/inputs_base_1d b/Examples/Physics_applications/laser_acceleration/inputs_base_1d new file mode 100644 index 00000000000..95e54c7d43e --- /dev/null +++ b/Examples/Physics_applications/laser_acceleration/inputs_base_1d @@ -0,0 +1,84 @@ +################################# +####### GENERAL PARAMETERS ###### +################################# +max_step = 100 +amr.n_cell = 256 +amr.max_grid_size = 64 # maximum size of each AMReX box, used to decompose the domain +amr.blocking_factor = 32 # minimum size of each AMReX box, used to decompose the domain +geometry.dims = 1 +geometry.prob_lo = -56.e-6 # physical domain +geometry.prob_hi = 12.e-6 +amr.max_level = 0 # Maximum level in hierarchy (1 might be unstable, >1 is not supported) + +################################# +####### Boundary condition ###### +################################# +boundary.field_lo = pec +boundary.field_hi = pec + +################################# +############ NUMERICS ########### +################################# +warpx.verbose = 1 +warpx.do_dive_cleaning = 0 +warpx.use_filter = 1 +warpx.cfl = 0.9 # if 1., the time step is set to its CFL limit +warpx.do_moving_window = 1 +warpx.moving_window_dir = z +warpx.moving_window_v = 1.0 # units of speed of light +warpx.do_dynamic_scheduling = 0 +warpx.serialize_initial_conditions = 1 + +# Order of particle shape factors +algo.particle_shape = 3 + +################################# +############ PLASMA ############# +################################# +particles.species_names = electrons + +electrons.species_type = electron +electrons.injection_style = "NUniformPerCell" +electrons.num_particles_per_cell_each_dim = 10 +electrons.zmin = 10.e-6 +electrons.profile = constant +electrons.density = 2.e23 # number of electrons per m^3 +electrons.momentum_distribution_type = "at_rest" +electrons.do_continuous_injection = 1 +electrons.addRealAttributes = orig_z +electrons.attribute.orig_z(x,y,z,ux,uy,uz,t) = "z" +electrons.addIntegerAttributes = regionofinterest +electrons.attribute.regionofinterest(x,y,z,ux,uy,uz,t) = " (z>12.0e-6) * (z<13.0e-6)" + +################################# +############ LASER ############## +################################# +lasers.names = laser1 +laser1.profile = Gaussian +laser1.position = 0. 0. 9.e-6 # This point is on the laser plane +laser1.direction = 0. 0. 1. # The plane normal direction +laser1.polarization = 0. 1. 0. # The main polarization vector +laser1.e_max = 16.e12 # Maximum amplitude of the laser field (in V/m) +laser1.profile_waist = 5.e-6 # The waist of the laser (in m) +laser1.profile_duration = 15.e-15 # The duration of the laser (in s) +laser1.profile_t_peak = 30.e-15 # Time at which the laser reaches its peak (in s) +laser1.profile_focal_distance = 100.e-6 # Focal distance from the antenna (in m) +laser1.wavelength = 0.8e-6 # The wavelength of the laser (in m) + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 100 +diag1.diag_type = Full +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho + +# Reduced Diagnostics +warpx.reduced_diags_names = FP + +FP.type = FieldProbe +FP.intervals = 10 +FP.integrate = 0 +FP.probe_geometry = Line +FP.z_probe = -56e-6 +FP.z1_probe = 12e-6 +FP.resolution = 100 +FP.do_moving_window_FP = 1 diff --git a/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration b/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration index 95e54c7d43e..190b458b397 100644 --- a/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration +++ b/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration @@ -1,84 +1,2 @@ -################################# -####### GENERAL PARAMETERS ###### -################################# -max_step = 100 -amr.n_cell = 256 -amr.max_grid_size = 64 # maximum size of each AMReX box, used to decompose the domain -amr.blocking_factor = 32 # minimum size of each AMReX box, used to decompose the domain -geometry.dims = 1 -geometry.prob_lo = -56.e-6 # physical domain -geometry.prob_hi = 12.e-6 -amr.max_level = 0 # Maximum level in hierarchy (1 might be unstable, >1 is not supported) - -################################# -####### Boundary condition ###### -################################# -boundary.field_lo = pec -boundary.field_hi = pec - -################################# -############ NUMERICS ########### -################################# -warpx.verbose = 1 -warpx.do_dive_cleaning = 0 -warpx.use_filter = 1 -warpx.cfl = 0.9 # if 1., the time step is set to its CFL limit -warpx.do_moving_window = 1 -warpx.moving_window_dir = z -warpx.moving_window_v = 1.0 # units of speed of light -warpx.do_dynamic_scheduling = 0 -warpx.serialize_initial_conditions = 1 - -# Order of particle shape factors -algo.particle_shape = 3 - -################################# -############ PLASMA ############# -################################# -particles.species_names = electrons - -electrons.species_type = electron -electrons.injection_style = "NUniformPerCell" -electrons.num_particles_per_cell_each_dim = 10 -electrons.zmin = 10.e-6 -electrons.profile = constant -electrons.density = 2.e23 # number of electrons per m^3 -electrons.momentum_distribution_type = "at_rest" -electrons.do_continuous_injection = 1 -electrons.addRealAttributes = orig_z -electrons.attribute.orig_z(x,y,z,ux,uy,uz,t) = "z" -electrons.addIntegerAttributes = regionofinterest -electrons.attribute.regionofinterest(x,y,z,ux,uy,uz,t) = " (z>12.0e-6) * (z<13.0e-6)" - -################################# -############ LASER ############## -################################# -lasers.names = laser1 -laser1.profile = Gaussian -laser1.position = 0. 0. 9.e-6 # This point is on the laser plane -laser1.direction = 0. 0. 1. # The plane normal direction -laser1.polarization = 0. 1. 0. # The main polarization vector -laser1.e_max = 16.e12 # Maximum amplitude of the laser field (in V/m) -laser1.profile_waist = 5.e-6 # The waist of the laser (in m) -laser1.profile_duration = 15.e-15 # The duration of the laser (in s) -laser1.profile_t_peak = 30.e-15 # Time at which the laser reaches its peak (in s) -laser1.profile_focal_distance = 100.e-6 # Focal distance from the antenna (in m) -laser1.wavelength = 0.8e-6 # The wavelength of the laser (in m) - -# Diagnostics -diagnostics.diags_names = diag1 -diag1.intervals = 100 -diag1.diag_type = Full -diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho - -# Reduced Diagnostics -warpx.reduced_diags_names = FP - -FP.type = FieldProbe -FP.intervals = 10 -FP.integrate = 0 -FP.probe_geometry = Line -FP.z_probe = -56e-6 -FP.z1_probe = 12e-6 -FP.resolution = 100 -FP.do_moving_window_FP = 1 +# base input parameters +FILE = inputs_base_1d diff --git a/Examples/Physics_applications/spacecraft_charging/analysis.py b/Examples/Physics_applications/spacecraft_charging/analysis.py index 8819c435fb7..1795f5dfb6e 100755 --- a/Examples/Physics_applications/spacecraft_charging/analysis.py +++ b/Examples/Physics_applications/spacecraft_charging/analysis.py @@ -23,13 +23,9 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum -# Open plotfile specified in command line filename = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") - ts = OpenPMDTimeSeries(filename) dt = 1.27e-8 t = [] @@ -78,3 +74,10 @@ def func(x, v0, tau): assert (diff_v0 < tolerance_v0) and ( diff_tau < tolerance_tau ), "Test spacecraft_charging did not pass" + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Tests/accelerator_lattice/analysis.py b/Examples/Tests/accelerator_lattice/analysis.py index 6f76fd86855..b208d086d8c 100755 --- a/Examples/Tests/accelerator_lattice/analysis.py +++ b/Examples/Tests/accelerator_lattice/analysis.py @@ -24,7 +24,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] ds = yt.load(filename) @@ -131,5 +131,8 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): "error in x particle velocity" ) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/boosted_diags/analysis.py b/Examples/Tests/boosted_diags/analysis.py index 62956133af6..0d4794a8894 100755 --- a/Examples/Tests/boosted_diags/analysis.py +++ b/Examples/Tests/boosted_diags/analysis.py @@ -27,7 +27,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -56,5 +56,8 @@ (w,) = ts.get_particle(["w"], species="beam", iteration=3) assert (400 < len(w)) & (len(w) < 600) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/boundaries/analysis.py b/Examples/Tests/boundaries/analysis.py index be76a728a1f..ce3251ea406 100755 --- a/Examples/Tests/boundaries/analysis.py +++ b/Examples/Tests/boundaries/analysis.py @@ -23,7 +23,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # The min and max size of the box along the three axis. dmin = -1.0 @@ -111,5 +111,8 @@ def do_periodic(x): np.abs((zz - zza) / zz) < 1.0e-15 ), "Periodic particle position not correct" -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/btd_rz/analysis.py b/Examples/Tests/btd_rz/analysis.py index 5002b4c80b3..87f74599105 100755 --- a/Examples/Tests/btd_rz/analysis.py +++ b/Examples/Tests/btd_rz/analysis.py @@ -17,7 +17,7 @@ from scipy.optimize import curve_fit sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum def gaussian_laser(z, a0, z0_phase, z0_prop, ctau, lambda0): @@ -58,6 +58,8 @@ def fit_function(z, z0_phase): ## Check that the a0 agrees within 5% of the predicted value assert np.allclose(Ex, Ex_fit, atol=0.18 * Ex.max()) -# Checksum regression analysis -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, plotfile) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/collider_relevant_diags/analysis.py b/Examples/Tests/collider_relevant_diags/analysis.py index f6eb9de124f..232bc47af21 100755 --- a/Examples/Tests/collider_relevant_diags/analysis.py +++ b/Examples/Tests/collider_relevant_diags/analysis.py @@ -8,8 +8,8 @@ import pandas as pd from scipy.constants import c, e, hbar, m_e -sys.path.append("../../../../warpx/Regression/Checksum/") -import checksumAPI +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum sys.path.append("../../../../warpx/Tools/Parser/") from input_file_parser import parse_input_file @@ -180,7 +180,8 @@ def dL_dt(): dL_dt_cr = df[[col for col in df.columns if "dL_dt" in col]].to_numpy() assert np.allclose(dL_dt_cr, dL_dt(), rtol=1e-8) -# Checksum analysis -plotfile = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, plotfile) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/collision/analysis_collision_1d.py b/Examples/Tests/collision/analysis_collision_1d.py index 1888696953e..97ddee0591d 100755 --- a/Examples/Tests/collision/analysis_collision_1d.py +++ b/Examples/Tests/collision/analysis_collision_1d.py @@ -23,7 +23,7 @@ from scipy.constants import e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file last_fn = sys.argv[1] @@ -124,5 +124,8 @@ print("tolerance = ", tolerance) assert error < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, last_fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/collision/analysis_collision_2d.py b/Examples/Tests/collision/analysis_collision_2d.py index 7e1d74001a3..7ce3e4cdf2e 100755 --- a/Examples/Tests/collision/analysis_collision_2d.py +++ b/Examples/Tests/collision/analysis_collision_2d.py @@ -33,7 +33,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum test_name = os.path.split(os.getcwd())[1] @@ -121,4 +121,8 @@ last_fn, random_filter_fn, random_fraction, dim, species_name ) -checksumAPI.evaluate_checksum(test_name, last_fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/collision/analysis_collision_3d.py b/Examples/Tests/collision/analysis_collision_3d.py index 0a1b016a227..59c625d3cb8 100755 --- a/Examples/Tests/collision/analysis_collision_3d.py +++ b/Examples/Tests/collision/analysis_collision_3d.py @@ -33,7 +33,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 0.001 @@ -111,5 +111,8 @@ last_fn, random_filter_fn, random_fraction, dim, species_name ) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, last_fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/collision/analysis_collision_3d_isotropization.py b/Examples/Tests/collision/analysis_collision_3d_isotropization.py index 6386ce74812..2cfe7f9fffd 100755 --- a/Examples/Tests/collision/analysis_collision_3d_isotropization.py +++ b/Examples/Tests/collision/analysis_collision_3d_isotropization.py @@ -19,7 +19,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum e = sc.e pi = sc.pi @@ -64,5 +64,8 @@ print(f"tolerance = {tolerance}") assert error < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/collision/analysis_collision_rz.py b/Examples/Tests/collision/analysis_collision_rz.py index 168d8a8a7cf..2df2f6500d2 100755 --- a/Examples/Tests/collision/analysis_collision_rz.py +++ b/Examples/Tests/collision/analysis_collision_rz.py @@ -24,7 +24,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 1.0e-15 @@ -55,5 +55,9 @@ print("tolerance = ", tolerance) assert error < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, last_fn, do_particles=False) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + do_particles=False, +) diff --git a/Examples/Tests/diff_lumi_diag/CMakeLists.txt b/Examples/Tests/diff_lumi_diag/CMakeLists.txt index 1651d74115e..481847a023d 100644 --- a/Examples/Tests/diff_lumi_diag/CMakeLists.txt +++ b/Examples/Tests/diff_lumi_diag/CMakeLists.txt @@ -2,10 +2,20 @@ # add_warpx_test( - test_3d_diff_lumi_diag # name + test_3d_diff_lumi_diag_leptons # name 3 # dims 2 # nprocs - inputs_test_3d_diff_lumi_diag # inputs + inputs_test_3d_diff_lumi_diag_leptons # inputs + analysis.py # analysis + diags/diag1000080 # output + OFF # dependency +) + +add_warpx_test( + test_3d_diff_lumi_diag_photons # name + 3 # dims + 2 # nprocs + inputs_test_3d_diff_lumi_diag_photons # inputs analysis.py # analysis diags/diag1000080 # output OFF # dependency diff --git a/Examples/Tests/diff_lumi_diag/analysis.py b/Examples/Tests/diff_lumi_diag/analysis.py index ef573fc4863..41501b1915d 100755 --- a/Examples/Tests/diff_lumi_diag/analysis.py +++ b/Examples/Tests/diff_lumi_diag/analysis.py @@ -11,7 +11,7 @@ from read_raw_data import read_reduced_diags_histogram sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Extract the differential luminosity from the file _, _, E_bin, bin_data = read_reduced_diags_histogram( @@ -37,16 +37,28 @@ * np.exp(-((E_bin - 2 * E_beam) ** 2) / (2 * sigma_E**2)) ) +# Extract test name from path +test_name = os.path.split(os.getcwd())[1] +print("test_name", test_name) + +# Pick tolerance +if "leptons" in test_name: + tol = 1e-2 +elif "photons" in test_name: + # In the photons case, the particles are + # initialized from a density distribution ; + # tolerance is larger due to lower particle statistics + tol = 6e-2 + # Check that the simulation result and analytical result match error = abs(dL_dE_sim - dL_dE_th).max() / abs(dL_dE_th).max() -tol = 1e-2 print("Relative error: ", error) print("Tolerance: ", tol) assert error < tol -# Get name of the test -fn = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] - -# Run checksum regression test -checksumAPI.evaluate_checksum(test_name, fn, rtol=1e-2) +# compare checksums +evaluate_checksum( + test_name=test_name, + output_file=sys.argv[1], + rtol=1e-2, +) diff --git a/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag b/Examples/Tests/diff_lumi_diag/inputs_base_3d similarity index 81% rename from Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag rename to Examples/Tests/diff_lumi_diag/inputs_base_3d index e8854937b6e..ba3c823b52b 100644 --- a/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag +++ b/Examples/Tests/diff_lumi_diag/inputs_base_3d @@ -6,12 +6,11 @@ my_constants.mc2_eV = m_e*clight*clight/q_e # BEAMS my_constants.beam_energy_eV = 125.e9 my_constants.beam_gamma = beam_energy_eV/(mc2_eV) -my_constants.beam_charge = 1.2e10*q_e +my_constants.beam_N = 1.2e10 my_constants.sigmax = 500e-9 my_constants.sigmay = 10e-9 my_constants.sigmaz = 300e-3 -my_constants.muz = -4*sigmaz -my_constants.nmacropart = 2e5 +my_constants.muz = 4*sigmaz # BOX my_constants.Lx = 8*sigmax @@ -62,17 +61,6 @@ warpx.poisson_solver = fft ################################# particles.species_names = beam1 beam2 -beam1.species_type = electron -beam1.injection_style = gaussian_beam -beam1.x_rms = sigmax -beam1.y_rms = sigmay -beam1.z_rms = sigmaz -beam1.x_m = 0 -beam1.y_m = 0 -beam1.z_m = muz -beam1.npart = nmacropart -beam1.q_tot = -beam_charge -beam1.z_cut = 4 beam1.momentum_distribution_type = gaussian beam1.uz_m = beam_gamma beam1.uy_m = 0.0 @@ -82,17 +70,6 @@ beam1.uy_th = 0 beam1.uz_th = 0.02*beam_gamma beam1.do_not_deposit = 1 -beam2.species_type = positron -beam2.injection_style = gaussian_beam -beam2.x_rms = sigmax -beam2.y_rms = sigmay -beam2.z_rms = sigmaz -beam2.x_m = 0 -beam2.y_m = 0 -beam2.z_m = -muz -beam2.npart = nmacropart -beam2.q_tot = beam_charge -beam2.z_cut = 4 beam2.momentum_distribution_type = gaussian beam2.uz_m = -beam_gamma beam2.uy_m = 0.0 @@ -108,7 +85,7 @@ beam2.do_not_deposit = 1 # FULL diagnostics.diags_names = diag1 -diag1.intervals = 1 +diag1.intervals = 80 diag1.diag_type = Full diag1.write_species = 1 diag1.fields_to_plot = rho_beam1 rho_beam2 diff --git a/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_leptons b/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_leptons new file mode 100644 index 00000000000..1cded30d3af --- /dev/null +++ b/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_leptons @@ -0,0 +1,31 @@ +# base input parameters +FILE = inputs_base_3d + +# Test with electrons/positrons: use gaussian beam distribution +# by providing the total charge (q_tot) + +my_constants.nmacropart = 2e5 + +beam1.species_type = electron +beam1.injection_style = gaussian_beam +beam1.x_rms = sigmax +beam1.y_rms = sigmay +beam1.z_rms = sigmaz +beam1.x_m = 0 +beam1.y_m = 0 +beam1.z_m = -muz +beam1.npart = nmacropart +beam1.q_tot = -beam_N*q_e +beam1.z_cut = 4 + +beam2.species_type = positron +beam2.injection_style = gaussian_beam +beam2.x_rms = sigmax +beam2.y_rms = sigmay +beam2.z_rms = sigmaz +beam2.x_m = 0 +beam2.y_m = 0 +beam2.z_m = muz +beam2.npart = nmacropart +beam2.q_tot = beam_N*q_e +beam2.z_cut = 4 diff --git a/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_photons b/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_photons new file mode 100644 index 00000000000..f0ef254d911 --- /dev/null +++ b/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_photons @@ -0,0 +1,28 @@ +# base input parameters +FILE = inputs_base_3d + +# Test with electrons/positrons: use parse_density_function + +beam1.species_type = electron +beam1.injection_style = "NUniformPerCell" +beam1.num_particles_per_cell_each_dim = 1 1 1 +beam1.profile = parse_density_function +beam1.density_function(x,y,z) = "beam_N/(sqrt(2*pi)*2*pi*sigmax*sigmay*sigmaz)*exp(-x*x/(2*sigmax*sigmax)-y*y/(2*sigmay*sigmay)-(z+muz)*(z+muz)/(2*sigmaz*sigmaz))" +beam1.xmin = -4*sigmax +beam1.xmax = 4*sigmax +beam1.ymin = -4*sigmay +beam1.ymax = 4*sigmay +beam1.zmin =-muz-4*sigmaz +beam1.zmax =-muz+4*sigmaz + +beam2.species_type = positron +beam2.injection_style = "NUniformPerCell" +beam2.num_particles_per_cell_each_dim = 1 1 1 +beam2.profile = parse_density_function +beam2.xmin = -4*sigmax +beam2.xmax = 4*sigmax +beam2.ymin = -4*sigmay +beam2.ymax = 4*sigmay +beam2.zmin = muz-4*sigmaz +beam2.zmax = muz+4*sigmaz +beam2.density_function(x,y,z) = "beam_N/(sqrt(2*pi)*2*pi*sigmax*sigmay*sigmaz)*exp(-x*x/(2*sigmax*sigmax)-y*y/(2*sigmay*sigmay)-(z-muz)*(z-muz)/(2*sigmaz*sigmaz))" diff --git a/Examples/Tests/divb_cleaning/analysis.py b/Examples/Tests/divb_cleaning/analysis.py index e534e5b0d59..d72226a01cc 100755 --- a/Examples/Tests/divb_cleaning/analysis.py +++ b/Examples/Tests/divb_cleaning/analysis.py @@ -15,9 +15,8 @@ import yt yt.funcs.mylog.setLevel(50) -import re -import checksumAPI +from checksumAPI import evaluate_checksum from scipy.constants import c # Name of the last plotfile @@ -53,9 +52,8 @@ assert rel_error < tolerance -test_name = os.path.split(os.getcwd())[1] - -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/electrostatic_dirichlet_bc/analysis.py b/Examples/Tests/electrostatic_dirichlet_bc/analysis.py index 82fe061c3a8..1b8f6923c1c 100755 --- a/Examples/Tests/electrostatic_dirichlet_bc/analysis.py +++ b/Examples/Tests/electrostatic_dirichlet_bc/analysis.py @@ -14,10 +14,16 @@ # Possible running time: ~ 19 s import glob +import os +import re +import sys import numpy as np import yt +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + files = sorted(glob.glob("diags/diag1*"))[1:] assert len(files) > 0 @@ -39,3 +45,11 @@ assert np.allclose(potentials_lo, expected_potentials_lo, rtol=0.1) assert np.allclose(potentials_hi, expected_potentials_hi, rtol=0.1) + +# compare checksums +test_name = os.path.split(os.getcwd())[1] +test_name = re.sub("_picmi", "", test_name) # same checksums for PICMI test +evaluate_checksum( + test_name=test_name, + output_file=sys.argv[1], +) diff --git a/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py b/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py index 33842058b0b..dd15a6492f1 100755 --- a/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py +++ b/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py @@ -29,7 +29,7 @@ from scipy.optimize import fsolve sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum yt.funcs.mylog.setLevel(0) @@ -193,5 +193,8 @@ def return_energies(iteration): Ek_i + Ep_i ) # Check conservation of energy -# Checksum regression analysis -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/electrostatic_sphere_eb/analysis.py b/Examples/Tests/electrostatic_sphere_eb/analysis.py index 71b3bfa3aa5..e12070119ac 100755 --- a/Examples/Tests/electrostatic_sphere_eb/analysis.py +++ b/Examples/Tests/electrostatic_sphere_eb/analysis.py @@ -8,10 +8,9 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - # Check reduced diagnostics for charge on EB import numpy as np +from checksumAPI import evaluate_checksum from scipy.constants import epsilon_0 # Theoretical charge on the embedded boundary, for sphere at potential phi_0 @@ -29,6 +28,8 @@ q_sim_eighth = data_eighth[1, 2] assert abs((q_sim_eighth - q_th / 8) / (q_th / 8)) < 0.06 -filename = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py b/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py index b33f19488d0..e3976c95e68 100755 --- a/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py +++ b/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py @@ -24,7 +24,7 @@ from unyt import m sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 0.0041 @@ -67,5 +67,9 @@ print("tolerance = ", tolerance) assert errmax_phi < tolerance and errmax_Er < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn, do_particles=False) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + do_particles=False, +) diff --git a/Examples/Tests/electrostatic_sphere_eb/analysis_rz_mr.py b/Examples/Tests/electrostatic_sphere_eb/analysis_rz_mr.py index 77710ca5f72..586b35fc7a4 100755 --- a/Examples/Tests/electrostatic_sphere_eb/analysis_rz_mr.py +++ b/Examples/Tests/electrostatic_sphere_eb/analysis_rz_mr.py @@ -19,7 +19,7 @@ from openpmd_viewer import OpenPMDTimeSeries sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 0.004 print(f"tolerance = {tolerance}") @@ -110,5 +110,9 @@ def get_error_per_lev(ts, level): for level in range(nlevels + 1): get_error_per_lev(ts, level) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn, output_format="openpmd") +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Tests/embedded_boundary_cube/analysis_fields.py b/Examples/Tests/embedded_boundary_cube/analysis_fields.py index 49da1a76edd..3202ccfaca2 100755 --- a/Examples/Tests/embedded_boundary_cube/analysis_fields.py +++ b/Examples/Tests/embedded_boundary_cube/analysis_fields.py @@ -9,7 +9,7 @@ from scipy.constants import c, mu_0, pi sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # This is a script that analyses the simulation results from # the script `inputs_3d`. This simulates a TMmnp mode in a PEC cubic resonator. @@ -110,4 +110,8 @@ rel_err_z = np.sqrt(np.sum(np.square(Bz_sim - Bz_th)) / np.sum(np.square(Bz_th))) assert rel_err_z < rel_tol_err -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py b/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py index 70a5b7d46c5..454d78169b7 100755 --- a/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py +++ b/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py @@ -8,7 +8,7 @@ from scipy.constants import c, mu_0, pi sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # This is a script that analyses the simulation results from # the script `inputs_3d`. This simulates a TMmnp mode in a PEC cubic resonator. @@ -63,6 +63,8 @@ Ey_sim = data["Ey"].to_ndarray() rel_err_y = np.sqrt(np.sum(np.square(Ey_sim / c - By_th)) / np.sum(np.square(By_th))) -test_name = os.path.split(os.getcwd())[1] - -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py b/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py index 84dfacbb505..8f0b7818516 100755 --- a/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py +++ b/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py @@ -15,7 +15,7 @@ from scipy.ndimage import gaussian_filter1d sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] ts = OpenPMDTimeSeries(filename) @@ -42,6 +42,9 @@ def r_first_minimum(iz): theta_diffraction = np.arcsin(1.22 * 0.1 / 0.4) / 2 assert np.all(abs(r[50:] - theta_diffraction * info.z[50:]) < 0.03) -# Open the right plot file -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Tests/embedded_boundary_python_api/analysis.py b/Examples/Tests/embedded_boundary_python_api/analysis.py index 09cc2accfea..7fda682f618 100755 --- a/Examples/Tests/embedded_boundary_python_api/analysis.py +++ b/Examples/Tests/embedded_boundary_python_api/analysis.py @@ -3,8 +3,17 @@ # This script just checks that the PICMI file executed successfully. # If it did there will be a plotfile for the final step. +import os import sys -step = int(sys.argv[1][-5:]) +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum +step = int(sys.argv[1][-5:]) assert step == 2 + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py index 6f3904e8764..451913fd54d 100755 --- a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py +++ b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py @@ -8,7 +8,7 @@ from scipy.constants import c, mu_0, pi sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # This is a script that analyses the simulation results from # the script `inputs_3d`. This simulates a TMmnp mode in a PEC cubic resonator. @@ -68,6 +68,8 @@ rel_err_y = np.sqrt(np.sum(np.square(By_sim - By_th)) / np.sum(np.square(By_th))) assert rel_err_y < rel_tol_err -test_name = os.path.split(os.getcwd())[1] - -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py index 968ebe395a5..838c9c82479 100755 --- a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py +++ b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py @@ -15,7 +15,7 @@ from scipy.constants import c, mu_0, pi sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # This is a script that analyses the simulation results from # the script `inputs_3d`. This simulates a TMmnp mode in a PEC cubic resonator rotated by pi/8. @@ -144,6 +144,8 @@ ) assert rel_err_z < rel_tol_err -test_name = os.path.split(os.getcwd())[1] - -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/embedded_circle/analysis.py b/Examples/Tests/embedded_circle/analysis.py index 569ca40dce4..d1bb04fedb6 100755 --- a/Examples/Tests/embedded_circle/analysis.py +++ b/Examples/Tests/embedded_circle/analysis.py @@ -4,13 +4,11 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] - -# Get name of the test -test_name = os.path.split(os.getcwd())[1] - -# Run checksum regression test -checksumAPI.evaluate_checksum(test_name, fn, rtol=1e-2) +from checksumAPI import evaluate_checksum + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=1e-2, +) diff --git a/Examples/Tests/energy_conserving_thermal_plasma/analysis.py b/Examples/Tests/energy_conserving_thermal_plasma/analysis.py index 4cf7b4ff4e6..0d29f85e7eb 100755 --- a/Examples/Tests/energy_conserving_thermal_plasma/analysis.py +++ b/Examples/Tests/energy_conserving_thermal_plasma/analysis.py @@ -18,10 +18,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] +from checksumAPI import evaluate_checksum # Get energy as a function of time, from reduced diagnostics EFdata = np.genfromtxt("./diags/reducedfiles/EF.txt") # Field energy @@ -33,6 +30,8 @@ # Check that the energy is conserved to 0.3% assert np.all(abs(E - E[0]) / E[0] < 0.003) -# Checksum test -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/field_ionization/analysis.py b/Examples/Tests/field_ionization/analysis.py index 62d3f839941..a02c293601b 100755 --- a/Examples/Tests/field_ionization/analysis.py +++ b/Examples/Tests/field_ionization/analysis.py @@ -26,7 +26,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Open plotfile specified in command line, and get ion's ionization level. filename = sys.argv[1] @@ -107,5 +107,8 @@ except yt.utilities.exceptions.YTFieldNotFound: pass # The backtransformed diagnostic version of the test does not have orig_z -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/field_probe/analysis.py b/Examples/Tests/field_probe/analysis.py index 57085fb7cdc..e974e284b65 100755 --- a/Examples/Tests/field_probe/analysis.py +++ b/Examples/Tests/field_probe/analysis.py @@ -18,9 +18,15 @@ which can be solved analytically. """ +import os +import sys + import numpy as np import pandas as pd +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + filename = "diags/reducedfiles/FP_line.txt" # Open data file @@ -59,3 +65,9 @@ def I_envelope(x, lam=0.2e-6, a=0.3e-6, D=1.7e-6): print("Average error greater than 2.5%") assert averror < 2.5 + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/flux_injection/CMakeLists.txt b/Examples/Tests/flux_injection/CMakeLists.txt index d09b83d7618..0929fc3d4c4 100644 --- a/Examples/Tests/flux_injection/CMakeLists.txt +++ b/Examples/Tests/flux_injection/CMakeLists.txt @@ -20,3 +20,33 @@ add_warpx_test( diags/diag1000120 # output OFF # dependency ) + +add_warpx_test( + test_3d_flux_injection_from_eb # name + 3 # dims + 2 # nprocs + inputs_test_3d_flux_injection_from_eb # inputs + analysis_flux_injection_from_eb.py # analysis + diags/diag1000010 # output + OFF # dependency +) + +add_warpx_test( + test_rz_flux_injection_from_eb # name + RZ # dims + 2 # nprocs + inputs_test_rz_flux_injection_from_eb # inputs + analysis_flux_injection_from_eb.py # analysis + diags/diag1000010 # output + OFF # dependency +) + +add_warpx_test( + test_2d_flux_injection_from_eb # name + 2 # dims + 2 # nprocs + inputs_test_2d_flux_injection_from_eb # inputs + analysis_flux_injection_from_eb.py # analysis + diags/diag1000010 # output + OFF # dependency +) diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_3d.py b/Examples/Tests/flux_injection/analysis_flux_injection_3d.py index 3840bb72e74..dc89780703d 100755 --- a/Examples/Tests/flux_injection/analysis_flux_injection_3d.py +++ b/Examples/Tests/flux_injection/analysis_flux_injection_3d.py @@ -22,7 +22,6 @@ """ import os -import re import sys import matplotlib.pyplot as plt @@ -32,7 +31,7 @@ from scipy.special import erf sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum yt.funcs.mylog.setLevel(0) @@ -148,9 +147,8 @@ def compare_gaussian_flux(u, w, u_th, u_m, label=""): plt.tight_layout() plt.savefig("Distribution.png") -# Verify checksum -test_name = os.path.split(os.getcwd())[1] -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py b/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py new file mode 100755 index 00000000000..36ff50bea06 --- /dev/null +++ b/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python3 +# +# Copyright 2024 Remi Lehe +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + +""" +This script tests the emission of particles from the embedded boundary. +(In this case, the embedded boundary is a sphere in 3D and RZ, a cylinder in 2D.) +We check that the embedded boundary emits the correct number of particles, and that +the particle distributions are consistent with the expected distributions. +""" + +import os +import re +import sys + +import matplotlib.pyplot as plt +import numpy as np +import yt +from scipy.constants import c, m_e +from scipy.special import erf + +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +import checksumAPI + +yt.funcs.mylog.setLevel(0) + +# Open plotfile specified in command line +fn = sys.argv[1] +ds = yt.load(fn) +ad = ds.all_data() +t_max = ds.current_time.item() # time of simulation + +# Extract the dimensionality of the simulation +with open("./warpx_used_inputs", "r") as f: + warpx_used_inputs = f.read() +if re.search("geometry.dims = 2", warpx_used_inputs): + dims = "2D" +elif re.search("geometry.dims = RZ", warpx_used_inputs): + dims = "RZ" +elif re.search("geometry.dims = 3", warpx_used_inputs): + dims = "3D" + +# Total number of electrons expected: +# Simulation parameters determine the total number of particles emitted (Ntot) +flux = 1.0 # in m^-2.s^-1, from the input script +R = 2.0 # in m, radius of the sphere +if dims == "3D" or dims == "RZ": + emission_surface = 4 * np.pi * R**2 # in m^2 +elif dims == "2D": + emission_surface = 2 * np.pi * R # in m +Ntot = flux * emission_surface * t_max + +# Parameters of the histogram +hist_bins = 50 +hist_range = [-0.5, 0.5] + + +# Define function that histograms and checks the data +def gaussian_dist(u, u_th): + return 1.0 / ((2 * np.pi) ** 0.5 * u_th) * np.exp(-(u**2) / (2 * u_th**2)) + + +def gaussian_flux_dist(u, u_th, u_m): + normalization_factor = u_th**2 * np.exp(-(u_m**2) / (2 * u_th**2)) + ( + np.pi / 2 + ) ** 0.5 * u_m * u_th * (1 + erf(u_m / (2**0.5 * u_th))) + result = ( + 1.0 + / normalization_factor + * np.where(u > 0, u * np.exp(-((u - u_m) ** 2) / (2 * u_th**2)), 0) + ) + return result + + +def compare_gaussian(u, w, u_th, label=""): + du = (hist_range[1] - hist_range[0]) / hist_bins + w_hist, u_hist = np.histogram(u, bins=hist_bins, weights=w / du, range=hist_range) + u_hist = 0.5 * (u_hist[1:] + u_hist[:-1]) + w_th = Ntot * gaussian_dist(u_hist, u_th) + plt.plot(u_hist, w_hist, label=label + ": simulation") + plt.plot(u_hist, w_th, "--", label=label + ": theory") + assert np.allclose(w_hist, w_th, atol=0.07 * w_th.max()) + + +def compare_gaussian_flux(u, w, u_th, u_m, label=""): + du = (hist_range[1] - hist_range[0]) / hist_bins + w_hist, u_hist = np.histogram(u, bins=hist_bins, weights=w / du, range=hist_range) + u_hist = 0.5 * (u_hist[1:] + u_hist[:-1]) + w_th = Ntot * gaussian_flux_dist(u_hist, u_th, u_m) + plt.plot(u_hist, w_hist, label=label + ": simulation") + plt.plot(u_hist, w_th, "--", label=label + ": theory") + assert np.allclose(w_hist, w_th, atol=0.05 * w_th.max()) + + +# Load data and perform check + +plt.figure() + +if dims == "3D": + x = ad["electron", "particle_position_x"].to_ndarray() + y = ad["electron", "particle_position_y"].to_ndarray() + z = ad["electron", "particle_position_z"].to_ndarray() +elif dims == "2D": + x = ad["electron", "particle_position_x"].to_ndarray() + y = np.zeros_like(x) + z = ad["electron", "particle_position_y"].to_ndarray() +elif dims == "RZ": + theta = ad["electron", "particle_theta"].to_ndarray() + r = ad["electron", "particle_position_x"].to_ndarray() + x = r * np.cos(theta) + y = r * np.sin(theta) + z = ad["electron", "particle_position_y"].to_ndarray() +ux = ad["electron", "particle_momentum_x"].to_ndarray() / (m_e * c) +uy = ad["electron", "particle_momentum_y"].to_ndarray() / (m_e * c) +uz = ad["electron", "particle_momentum_z"].to_ndarray() / (m_e * c) +w = ad["electron", "particle_weight"].to_ndarray() + +# Check that the total number of particles emitted is correct +Ntot_sim = np.sum(w) +print("Ntot_sim = ", Ntot_sim) +print("Ntot = ", Ntot) +assert np.isclose(Ntot_sim, Ntot, rtol=0.01) + +# Check that none of the particles are inside the EB +# A factor 0.98 is applied to accomodate +# the cut-cell approximation of the sphere +assert np.all(x**2 + y**2 + z**2 > (0.98 * R) ** 2) + +# Check that the normal component of the velocity is consistent with the expected distribution +r = np.sqrt(x**2 + y**2 + z**2) +nx = x / r +ny = y / r +nz = z / r +u_n = ux * nx + uy * ny + uz * nz # normal component +compare_gaussian_flux(u_n, w, u_th=0.1, u_m=0.07, label="u_n") + +# Pick a direction that is orthogonal to the normal direction, and check the distribution +vx = ny / np.sqrt(nx**2 + ny**2) +vy = -nx / np.sqrt(nx**2 + ny**2) +vz = 0 +u_perp = ux * vx + uy * vy + uz * vz +compare_gaussian(u_perp, w, u_th=0.01, label="u_perp") + +# Pick the other perpendicular direction, and check the distribution +# The third direction is obtained by the cross product (n x v) +wx = ny * vz - nz * vy +wy = nz * vx - nx * vz +wz = nx * vy - ny * vx +u_perp2 = ux * wx + uy * wy + uz * wz +compare_gaussian(u_perp2, w, u_th=0.01, label="u_perp") + +plt.tight_layout() +plt.savefig("Distribution.png") + +# Verify checksum +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_rz.py b/Examples/Tests/flux_injection/analysis_flux_injection_rz.py index ad73fdb47af..33b487cc36b 100755 --- a/Examples/Tests/flux_injection/analysis_flux_injection_rz.py +++ b/Examples/Tests/flux_injection/analysis_flux_injection_rz.py @@ -26,14 +26,13 @@ """ import os -import re import sys import numpy as np import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum yt.funcs.mylog.setLevel(0) @@ -58,9 +57,8 @@ # Check that the particles are at the right radius assert np.all((r >= 1.48) & (r <= 1.92)) -test_name = os.path.split(os.getcwd())[1] - -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/flux_injection/inputs_base_from_eb b/Examples/Tests/flux_injection/inputs_base_from_eb new file mode 100644 index 00000000000..3e32d8799b6 --- /dev/null +++ b/Examples/Tests/flux_injection/inputs_base_from_eb @@ -0,0 +1,42 @@ +# Maximum number of time steps +max_step = 10 + +# The lo and hi ends of grids are multipliers of blocking factor +amr.blocking_factor = 8 + +# Maximum allowable size of each subdomain in the problem domain; +# this is used to decompose the domain for parallel calculations. +amr.max_grid_size = 8 + +# Maximum level in hierarchy (for now must be 0, i.e., one level in total) +amr.max_level = 0 + +# Deactivate Maxwell solver +algo.maxwell_solver = none +warpx.const_dt = 1e-9 + +# Embedded boundary +warpx.eb_implicit_function = "-(x**2+y**2+z**2-2**2)" + +# particles +particles.species_names = electron +algo.particle_shape = 3 + +electron.charge = -q_e +electron.mass = m_e +electron.injection_style = NFluxPerCell +electron.inject_from_embedded_boundary = 1 +electron.num_particles_per_cell = 100 +electron.flux_profile = parse_flux_function +electron.flux_function(x,y,z,t) = "1." +electron.momentum_distribution_type = gaussianflux +electron.ux_th = 0.01 +electron.uy_th = 0.01 +electron.uz_th = 0.1 +electron.uz_m = 0.07 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 10 +diag1.diag_type = Full +diag1.fields_to_plot = none diff --git a/Examples/Tests/flux_injection/inputs_test_2d_flux_injection_from_eb b/Examples/Tests/flux_injection/inputs_test_2d_flux_injection_from_eb new file mode 100644 index 00000000000..f2e6f177887 --- /dev/null +++ b/Examples/Tests/flux_injection/inputs_test_2d_flux_injection_from_eb @@ -0,0 +1,13 @@ +FILE = inputs_base_from_eb + +# number of grid points +amr.n_cell = 16 16 + +# Geometry +geometry.dims = 2 +geometry.prob_lo = -4 -4 +geometry.prob_hi = 4 4 + +# Boundary condition +boundary.field_lo = periodic periodic +boundary.field_hi = periodic periodic diff --git a/Examples/Tests/flux_injection/inputs_test_3d_flux_injection_from_eb b/Examples/Tests/flux_injection/inputs_test_3d_flux_injection_from_eb new file mode 100644 index 00000000000..81ddc039977 --- /dev/null +++ b/Examples/Tests/flux_injection/inputs_test_3d_flux_injection_from_eb @@ -0,0 +1,13 @@ +FILE = inputs_base_from_eb + +# number of grid points +amr.n_cell = 16 16 16 + +# Geometry +geometry.dims = 3 +geometry.prob_lo = -4 -4 -4 +geometry.prob_hi = 4 4 4 + +# Boundary condition +boundary.field_lo = periodic periodic periodic +boundary.field_hi = periodic periodic periodic diff --git a/Examples/Tests/flux_injection/inputs_test_rz_flux_injection_from_eb b/Examples/Tests/flux_injection/inputs_test_rz_flux_injection_from_eb new file mode 100644 index 00000000000..4c970257f57 --- /dev/null +++ b/Examples/Tests/flux_injection/inputs_test_rz_flux_injection_from_eb @@ -0,0 +1,15 @@ +FILE = inputs_base_from_eb + +# number of grid points +amr.n_cell = 8 16 + +# Geometry +geometry.dims = RZ +geometry.prob_lo = 0 -4 +geometry.prob_hi = 4 4 + +# Boundary condition +boundary.field_lo = none periodic +boundary.field_hi = pec periodic + +electron.num_particles_per_cell = 300 diff --git a/Examples/Tests/gaussian_beam/analysis.py b/Examples/Tests/gaussian_beam/analysis.py index c2318d0cb7d..a2278b2cf7a 100755 --- a/Examples/Tests/gaussian_beam/analysis.py +++ b/Examples/Tests/gaussian_beam/analysis.py @@ -14,8 +14,7 @@ from scipy.constants import c, eV, m_e, micro, nano sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - -import checksumAPI +from checksumAPI import evaluate_checksum from openpmd_viewer import OpenPMDTimeSeries GeV = 1e9 * eV @@ -39,8 +38,6 @@ def s(z, sigma0, emit): return np.sqrt(sigma0**2 + emit**2 * (z - focal_distance) ** 2 / sigma0**2) -filename = sys.argv[1] - ts = OpenPMDTimeSeries("./diags/openpmd/") ( @@ -71,5 +68,8 @@ def s(z, sigma0, emit): assert np.allclose(sx, sx_theory, rtol=0.051, atol=0) assert np.allclose(sy, sy_theory, rtol=0.038, atol=0) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/implicit/analysis_1d.py b/Examples/Tests/implicit/analysis_1d.py index bbbbb8db9b2..665fcaac951 100755 --- a/Examples/Tests/implicit/analysis_1d.py +++ b/Examples/Tests/implicit/analysis_1d.py @@ -16,10 +16,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] +from checksumAPI import evaluate_checksum field_energy = np.loadtxt("diags/reducedfiles/field_energy.txt", skiprows=1) particle_energy = np.loadtxt("diags/reducedfiles/particle_energy.txt", skiprows=1) @@ -41,5 +38,8 @@ assert max_delta_E < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/implicit/analysis_vandb_jfnk_2d.py b/Examples/Tests/implicit/analysis_vandb_jfnk_2d.py index 3c962eb91ea..29a2c870574 100755 --- a/Examples/Tests/implicit/analysis_vandb_jfnk_2d.py +++ b/Examples/Tests/implicit/analysis_vandb_jfnk_2d.py @@ -18,7 +18,7 @@ from scipy.constants import e, epsilon_0 sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -66,5 +66,8 @@ assert drho_rms < tolerance_rel_charge -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/initial_distribution/analysis.py b/Examples/Tests/initial_distribution/analysis.py index 6d23c5da1e4..834934df255 100755 --- a/Examples/Tests/initial_distribution/analysis.py +++ b/Examples/Tests/initial_distribution/analysis.py @@ -27,9 +27,7 @@ from read_raw_data import read_reduced_diags, read_reduced_diags_histogram sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -filename = sys.argv[1] +from checksumAPI import evaluate_checksum # print tolerance tolerance = 0.02 @@ -451,6 +449,8 @@ def Gaussian(mean, sigma, u): assert f9_error < tolerance - -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/initial_plasma_profile/analysis.py b/Examples/Tests/initial_plasma_profile/analysis.py index f5fc75ee578..d372bd30a93 100755 --- a/Examples/Tests/initial_plasma_profile/analysis.py +++ b/Examples/Tests/initial_plasma_profile/analysis.py @@ -9,16 +9,13 @@ import os import sys -import yt - -yt.funcs.mylog.setLevel(50) - sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# Name of the plotfile -fn = sys.argv[1] - -test_name = os.path.split(os.getcwd())[1] - -checksumAPI.evaluate_checksum(test_name, fn, rtol=1e-4, do_particles=False) +from checksumAPI import evaluate_checksum + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=1e-4, + do_particles=False, +) diff --git a/Examples/Tests/ion_stopping/analysis.py b/Examples/Tests/ion_stopping/analysis.py index e343bd23fdd..45983538025 100755 --- a/Examples/Tests/ion_stopping/analysis.py +++ b/Examples/Tests/ion_stopping/analysis.py @@ -19,7 +19,7 @@ from scipy.constants import e, epsilon_0, k, m_e, m_p sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Define constants using the WarpX names for the evals below q_e = e @@ -194,5 +194,8 @@ def stopping_from_ions(dt, ni, Ti, mi, Zi, Zb, ion_mass, ion_energy): assert np.all(error3 < tolerance) assert np.all(error4 < tolerance) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, last_filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir/analysis_1d.py b/Examples/Tests/langmuir/analysis_1d.py index d041ca03b36..8eefd95b4f7 100755 --- a/Examples/Tests/langmuir/analysis_1d.py +++ b/Examples/Tests/langmuir/analysis_1d.py @@ -27,7 +27,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # test name test_name = os.path.split(os.getcwd())[1] @@ -126,4 +126,8 @@ def get_theoretical_field(field, t): print("tolerance = {}".format(tolerance)) assert error_rel < tolerance -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir/analysis_2d.py b/Examples/Tests/langmuir/analysis_2d.py index ac98354c73b..31995e896a5 100755 --- a/Examples/Tests/langmuir/analysis_2d.py +++ b/Examples/Tests/langmuir/analysis_2d.py @@ -27,7 +27,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # test name test_name = os.path.split(os.getcwd())[1] @@ -163,4 +163,8 @@ def get_theoretical_field(field, t): print("tolerance = {}".format(tolerance)) assert error_rel < tolerance -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir/analysis_3d.py b/Examples/Tests/langmuir/analysis_3d.py index 9f4b2cc1f93..05f1c585ec0 100755 --- a/Examples/Tests/langmuir/analysis_3d.py +++ b/Examples/Tests/langmuir/analysis_3d.py @@ -27,7 +27,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # test name test_name = os.path.split(os.getcwd())[1] @@ -212,7 +212,8 @@ def get_theoretical_field(field, t): print("tolerance = {}".format(tolerance)) assert error_rel < tolerance -if re.search("single_precision", test_name): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir/analysis_rz.py b/Examples/Tests/langmuir/analysis_rz.py index dd26fd29db7..64f8cfb6313 100755 --- a/Examples/Tests/langmuir/analysis_rz.py +++ b/Examples/Tests/langmuir/analysis_rz.py @@ -30,7 +30,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -183,4 +183,8 @@ def Ez(z, r, epsilon, k0, w0, wp, t): fn, random_filter_fn, random_fraction, dim, species_name ) -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir_fluids/analysis_1d.py b/Examples/Tests/langmuir_fluids/analysis_1d.py index fa4566b6173..c448303783f 100755 --- a/Examples/Tests/langmuir_fluids/analysis_1d.py +++ b/Examples/Tests/langmuir_fluids/analysis_1d.py @@ -26,7 +26,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -148,5 +148,8 @@ def get_theoretical_rho_field(field, t): assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir_fluids/analysis_2d.py b/Examples/Tests/langmuir_fluids/analysis_2d.py index d7ecca986e4..d8ba50a9df1 100755 --- a/Examples/Tests/langmuir_fluids/analysis_2d.py +++ b/Examples/Tests/langmuir_fluids/analysis_2d.py @@ -26,7 +26,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -178,5 +178,8 @@ def get_theoretical_rho_field(field, t): assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir_fluids/analysis_3d.py b/Examples/Tests/langmuir_fluids/analysis_3d.py index 321b528b6cb..899dc72424b 100755 --- a/Examples/Tests/langmuir_fluids/analysis_3d.py +++ b/Examples/Tests/langmuir_fluids/analysis_3d.py @@ -14,7 +14,6 @@ # $$ E_y = \epsilon \,\frac{m_e c^2 k_y}{q_e}\cos(k_x x)\sin(k_y y)\cos(k_z z)\sin( \omega_p t)$$ # $$ E_z = \epsilon \,\frac{m_e c^2 k_z}{q_e}\cos(k_x x)\cos(k_y y)\sin(k_z z)\sin( \omega_p t)$$ import os -import re import sys import matplotlib.pyplot as plt @@ -27,7 +26,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -213,9 +212,8 @@ def get_theoretical_rho_field(field, t): assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] - -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir_fluids/analysis_rz.py b/Examples/Tests/langmuir_fluids/analysis_rz.py index f629ddc6626..0e918a6ab31 100755 --- a/Examples/Tests/langmuir_fluids/analysis_rz.py +++ b/Examples/Tests/langmuir_fluids/analysis_rz.py @@ -29,7 +29,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -226,4 +226,8 @@ def rho(z, r, epsilon, k0, w0, wp, t): assert error_rel < tolerance_rel -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection/analysis_1d.py b/Examples/Tests/laser_injection/analysis_1d.py index 9215125427d..5ce7065c967 100755 --- a/Examples/Tests/laser_injection/analysis_1d.py +++ b/Examples/Tests/laser_injection/analysis_1d.py @@ -24,7 +24,7 @@ from scipy.signal import hilbert sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.05 @@ -192,8 +192,11 @@ def main(): check_laser(filename_end) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) if __name__ == "__main__": diff --git a/Examples/Tests/laser_injection/analysis_2d.py b/Examples/Tests/laser_injection/analysis_2d.py index c6548e8be1d..5e2d9ebf280 100755 --- a/Examples/Tests/laser_injection/analysis_2d.py +++ b/Examples/Tests/laser_injection/analysis_2d.py @@ -30,7 +30,7 @@ from scipy.signal import hilbert sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.05 @@ -225,8 +225,11 @@ def main(): check_laser(filename_end) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) if __name__ == "__main__": diff --git a/Examples/Tests/laser_injection/analysis_3d.py b/Examples/Tests/laser_injection/analysis_3d.py index bf2a03e342c..153b721b526 100755 --- a/Examples/Tests/laser_injection/analysis_3d.py +++ b/Examples/Tests/laser_injection/analysis_3d.py @@ -18,10 +18,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] +from checksumAPI import evaluate_checksum # you can save an image to be displayed on the website t = np.arange(0.0, 2.0, 0.01) @@ -29,5 +26,8 @@ plt.plot(t, s) plt.savefig("laser_analysis.png") -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_1d.py b/Examples/Tests/laser_injection_from_file/analysis_1d.py index 1b5f209cb91..c6542ed1ac8 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_1d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_1d.py @@ -28,7 +28,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -113,5 +113,8 @@ def gauss_env(T, Z): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py b/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py index 89c0ea3c57c..e410369cb45 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py +++ b/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py @@ -28,7 +28,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -113,6 +113,8 @@ def gauss_env(T, Z): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -# Do the checksum test -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_2d.py b/Examples/Tests/laser_injection_from_file/analysis_2d.py index ab5649e968f..1e6704f55a5 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_2d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_2d.py @@ -28,7 +28,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -139,5 +139,8 @@ def gauss_env(T, X, Y, Z): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py b/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py index bcb13bba410..7fc14824471 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py +++ b/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py @@ -27,7 +27,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -146,5 +146,8 @@ def gauss_env(T, XX, ZZ): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_3d.py b/Examples/Tests/laser_injection_from_file/analysis_3d.py index 7d30af28639..3921e3d5930 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_3d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_3d.py @@ -28,7 +28,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -145,5 +145,8 @@ def gauss_env(T, X, Y, Z): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py b/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py index 72575da96b4..f797ddb5d90 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py +++ b/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py @@ -29,7 +29,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -147,5 +147,8 @@ def laguerre_env(T, X, Y, Z, p, m): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_rz.py b/Examples/Tests/laser_injection_from_file/analysis_rz.py index 90e392bcf25..c37c6d8b3c2 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_rz.py +++ b/Examples/Tests/laser_injection_from_file/analysis_rz.py @@ -28,7 +28,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -140,5 +140,8 @@ def gauss_env(T, X, Y, Z): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/load_external_field/analysis_3d.py b/Examples/Tests/load_external_field/analysis_3d.py index 0865584d683..05cba3ea7bd 100755 --- a/Examples/Tests/load_external_field/analysis_3d.py +++ b/Examples/Tests/load_external_field/analysis_3d.py @@ -23,7 +23,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 1.0e-8 x0 = 0.12238072 @@ -44,5 +44,8 @@ print("tolerance = ", tolerance) assert error < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/load_external_field/analysis_rz.py b/Examples/Tests/load_external_field/analysis_rz.py index 75d9c084718..7de160cdd50 100755 --- a/Examples/Tests/load_external_field/analysis_rz.py +++ b/Examples/Tests/load_external_field/analysis_rz.py @@ -23,7 +23,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 1.0e-8 r0 = 0.12402005 @@ -41,5 +41,8 @@ print("tolerance = ", tolerance) assert error < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/magnetostatic_eb/analysis_rz.py b/Examples/Tests/magnetostatic_eb/analysis_rz.py index 05aa4a3fe47..f31069ad230 100755 --- a/Examples/Tests/magnetostatic_eb/analysis_rz.py +++ b/Examples/Tests/magnetostatic_eb/analysis_rz.py @@ -1,20 +1,14 @@ #!/usr/bin/env python3 import os -import re import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] - -# Get name of the test -test_name = os.path.split(os.getcwd())[1] - -# Run checksum regression test -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=2.0e-6, do_particles=False) -else: - checksumAPI.evaluate_checksum(test_name, fn, do_particles=False) +from checksumAPI import evaluate_checksum + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + do_particles=False, +) diff --git a/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py b/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py index 7bfa47f3164..ad635bf0fbe 100755 --- a/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py +++ b/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py @@ -19,7 +19,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum fn = sys.argv[1] use_MR = re.search("nci_correctorMR", fn) is not None @@ -50,5 +50,8 @@ assert energy < energy_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/nci_psatd_stability/analysis_galilean.py b/Examples/Tests/nci_psatd_stability/analysis_galilean.py index 40c74ecc5bf..99f14d91371 100755 --- a/Examples/Tests/nci_psatd_stability/analysis_galilean.py +++ b/Examples/Tests/nci_psatd_stability/analysis_galilean.py @@ -23,7 +23,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -118,5 +118,9 @@ print(f"tol_charge = {tol_charge}") assert err_charge < tol_charge -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, rtol=1.0e-8) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=1e-8, +) diff --git a/Examples/Tests/nci_psatd_stability/analysis_multiJ.py b/Examples/Tests/nci_psatd_stability/analysis_multiJ.py index 2a438d5d22e..6dcfb6565fe 100755 --- a/Examples/Tests/nci_psatd_stability/analysis_multiJ.py +++ b/Examples/Tests/nci_psatd_stability/analysis_multiJ.py @@ -19,7 +19,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -50,5 +50,8 @@ print(f"tol_energy = {tol_energy}") assert err_energy < tol_energy -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/nodal_electrostatic/analysis.py b/Examples/Tests/nodal_electrostatic/analysis.py index c8725ce5d95..f015d525280 100755 --- a/Examples/Tests/nodal_electrostatic/analysis.py +++ b/Examples/Tests/nodal_electrostatic/analysis.py @@ -6,10 +6,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] +from checksumAPI import evaluate_checksum # check that the maximum chi value is small fname = "diags/reducedfiles/ParticleExtrema_beam_p.txt" @@ -21,6 +18,8 @@ pho_num = np.loadtxt(fname)[:, 7] assert pho_num.all() == 0.0 -# Checksum regression analysis -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py b/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py index 22de371090c..8ae0e768815 100755 --- a/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py +++ b/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py @@ -29,10 +29,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# Name of the plotfile -fn = sys.argv[1] +from checksumAPI import evaluate_checksum # Load data from reduced diagnostics (physical time and neutron weights) time = np.loadtxt("./reduced_diags/particle_number.txt", usecols=1) @@ -52,6 +49,8 @@ print("tolerance = ", tolerance) assert error < tolerance -# Compare checksums with benchmark -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py b/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py index 25e898c05be..c69080ac726 100755 --- a/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py +++ b/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py @@ -12,9 +12,9 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI import numpy as np import scipy.constants as scc +from checksumAPI import evaluate_checksum ## This script performs various checks for the proton boron nuclear fusion module. The simulation ## that we check is made of 5 different tests, each with different proton, boron and alpha species. @@ -880,8 +880,11 @@ def main(): rho_end = field_data_end["rho"].to_ndarray() check_charge_conservation(rho_start, rho_end) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) if __name__ == "__main__": diff --git a/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py b/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py index be1fbb0702a..1a458a25e4a 100755 --- a/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py +++ b/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py @@ -12,9 +12,9 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI import numpy as np import scipy.constants as scc +from checksumAPI import evaluate_checksum ## This script performs various checks for the fusion module. The simulation ## that we check is made of 2 different tests, each with different reactant and product species. @@ -555,8 +555,11 @@ def main(): rho_end = field_data_end["rho"].to_ndarray() check_charge_conservation(rho_start, rho_end) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) if __name__ == "__main__": diff --git a/Examples/Tests/ohm_solver_em_modes/analysis.py b/Examples/Tests/ohm_solver_em_modes/analysis.py index 36869623ac4..bee634415d9 100755 --- a/Examples/Tests/ohm_solver_em_modes/analysis.py +++ b/Examples/Tests/ohm_solver_em_modes/analysis.py @@ -353,9 +353,10 @@ def get_analytic_L_mode(w): import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - import checksumAPI + from checksumAPI import evaluate_checksum - # this will be the name of the plot file - fn = sys.argv[1] - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, fn) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) diff --git a/Examples/Tests/ohm_solver_em_modes/analysis_rz.py b/Examples/Tests/ohm_solver_em_modes/analysis_rz.py index 4d5bc2aa016..a1eb185bbf6 100755 --- a/Examples/Tests/ohm_solver_em_modes/analysis_rz.py +++ b/Examples/Tests/ohm_solver_em_modes/analysis_rz.py @@ -187,9 +187,11 @@ def process(it): import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - import checksumAPI + from checksumAPI import evaluate_checksum - # this will be the name of the plot file - fn = sys.argv[1] - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, fn, rtol=1e-6) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=1e-6, + ) diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py b/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py index 700ad68fe87..620331cf13f 100755 --- a/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py @@ -117,9 +117,10 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - import checksumAPI + from checksumAPI import evaluate_checksum - # this will be the name of the plot file - fn = sys.argv[1] - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, fn) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py b/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py index 5bd9db3d91d..3b0a18f29d5 100755 --- a/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py +++ b/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py @@ -236,9 +236,10 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - import checksumAPI + from checksumAPI import evaluate_checksum - # this will be the name of the plot file - fn = sys.argv[1] - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, fn) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py b/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py index 93d574e5294..e7b41d4fbb4 100755 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py @@ -190,9 +190,10 @@ def animate(i): import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - import checksumAPI + from checksumAPI import evaluate_checksum - # this will be the name of the plot file - fn = sys.argv[1] - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, fn) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) diff --git a/Examples/Tests/open_bc_poisson_solver/analysis.py b/Examples/Tests/open_bc_poisson_solver/analysis.py index 8d5be875c7a..8ffd9ef52e2 100755 --- a/Examples/Tests/open_bc_poisson_solver/analysis.py +++ b/Examples/Tests/open_bc_poisson_solver/analysis.py @@ -9,7 +9,7 @@ from scipy.special import erf sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum sigmaz = 300e-6 sigmax = 516e-9 @@ -37,8 +37,6 @@ def evaluate_E(x, y, z): return E_complex.imag, E_complex.real -fn = sys.argv[1] - path = os.path.join("diags", "diag2") ts = OpenPMDTimeSeries(path) @@ -64,8 +62,9 @@ def evaluate_E(x, y, z): assert np.allclose(Ey_warpx, Ey_theory, rtol=0.029, atol=0) -# Get name of the test -test_name = os.path.split(os.getcwd())[1] - -# Run checksum regression test -checksumAPI.evaluate_checksum(test_name, fn, rtol=1e-2) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=1e-2, +) diff --git a/Examples/Tests/particle_boundary_interaction/analysis.py b/Examples/Tests/particle_boundary_interaction/analysis.py index 3b9d2f12b84..d06200157d2 100755 --- a/Examples/Tests/particle_boundary_interaction/analysis.py +++ b/Examples/Tests/particle_boundary_interaction/analysis.py @@ -17,13 +17,10 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Open plotfile specified in command line filename = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") - ts = OpenPMDTimeSeries(filename) it = ts.iterations @@ -52,3 +49,10 @@ assert ( (diff_x < tolerance) and (y[0] < 1e-8) and (diff_z < tolerance) ), "Test particle_boundary_interaction did not pass" + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Tests/particle_boundary_process/CMakeLists.txt b/Examples/Tests/particle_boundary_process/CMakeLists.txt index a7081fe9090..499cf445da5 100644 --- a/Examples/Tests/particle_boundary_process/CMakeLists.txt +++ b/Examples/Tests/particle_boundary_process/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 2 # dims 1 # nprocs inputs_test_2d_particle_reflection_picmi.py # inputs - analysis_reflection.py # analysis + analysis_default_regression.py # analysis diags/diag1000010 # output OFF # dependency ) diff --git a/Examples/Tests/particle_boundary_process/analysis_absorption.py b/Examples/Tests/particle_boundary_process/analysis_absorption.py index fdde2622684..79e8d0e4bc6 100755 --- a/Examples/Tests/particle_boundary_process/analysis_absorption.py +++ b/Examples/Tests/particle_boundary_process/analysis_absorption.py @@ -1,15 +1,19 @@ #!/usr/bin/env python3 -import sys - -import yt - # This test shoots a beam of electrons at cubic embedded boundary geometry # At time step 40, none of the particles have hit the boundary yet. At time # step 60, all of them should have been absorbed by the boundary. In the # absence of the cube, none of the particles would have had time to exit # the problem domain yet. +import os +import sys + +import yt + +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + # all particles are still there ds40 = yt.load("diags/diag1000040") np40 = ds40.index.particle_headers["electrons"].num_particles @@ -20,3 +24,9 @@ ds60 = yt.load(filename) np60 = ds60.index.particle_headers["electrons"].num_particles assert np60 == 0 + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/particle_boundary_process/analysis_default_regression.py b/Examples/Tests/particle_boundary_process/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/particle_boundary_process/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/particle_boundary_process/analysis_reflection.py b/Examples/Tests/particle_boundary_process/analysis_reflection.py deleted file mode 100755 index 1187a58e75d..00000000000 --- a/Examples/Tests/particle_boundary_process/analysis_reflection.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2021 Modern Electron -# -# License: BSD-3-Clause-LBNL - -# This script just checks that the PICMI file executed successfully. -# If it did there will be a plotfile for the final step. - -import yt - -plotfile = "Python_particle_reflection_plt000010" -ds = yt.load(plotfile) # noqa - -assert True diff --git a/Examples/Tests/particle_boundary_process/inputs_test_2d_particle_reflection_picmi.py b/Examples/Tests/particle_boundary_process/inputs_test_2d_particle_reflection_picmi.py index 0803bc05d59..ef1b7d45e1a 100755 --- a/Examples/Tests/particle_boundary_process/inputs_test_2d_particle_reflection_picmi.py +++ b/Examples/Tests/particle_boundary_process/inputs_test_2d_particle_reflection_picmi.py @@ -80,16 +80,12 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=10, - write_dir=".", - warpx_file_prefix="Python_particle_reflection_plt", ) field_diag = picmi.FieldDiagnostic( grid=grid, name="diag1", data_list=["E"], period=10, - write_dir=".", - warpx_file_prefix="Python_particle_reflection_plt", ) ########################## diff --git a/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py b/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py index a7c84b05459..01a7436a787 100755 --- a/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py +++ b/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py @@ -20,7 +20,7 @@ from scipy.constants import c, e, m_e, m_p sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum def do_analysis(single_precision=False): @@ -248,5 +248,9 @@ def do_analysis(single_precision=False): assert error_opmd[k] < tolerance print(k, "relative error openPMD = ", error_opmd[k]) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, fn, rtol=check_tolerance) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=check_tolerance, + ) diff --git a/Examples/Tests/particle_pusher/analysis.py b/Examples/Tests/particle_pusher/analysis.py index acef0e819d3..9ed92507d4d 100755 --- a/Examples/Tests/particle_pusher/analysis.py +++ b/Examples/Tests/particle_pusher/analysis.py @@ -28,7 +28,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 0.001 @@ -41,5 +41,8 @@ print("tolerance = ", tolerance) assert abs(x) < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/particle_thermal_boundary/analysis.py b/Examples/Tests/particle_thermal_boundary/analysis.py index 49f33b5b805..621bf2032be 100755 --- a/Examples/Tests/particle_thermal_boundary/analysis.py +++ b/Examples/Tests/particle_thermal_boundary/analysis.py @@ -20,7 +20,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum FE_rdiag = "./diags/reducedfiles/EF.txt" init_Fenergy = np.loadtxt(FE_rdiag)[1, 2] @@ -32,6 +32,9 @@ init_Penergy = np.loadtxt(PE_rdiag)[0, 2] final_Penergy = np.loadtxt(PE_rdiag)[-1, 2] assert abs(final_Penergy - init_Penergy) / init_Penergy < 0.02 -filename = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py b/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py index df106976e78..8e7d95eda08 100755 --- a/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py +++ b/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py @@ -25,7 +25,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Open plotfile specified in command line filename = sys.argv[1] @@ -67,5 +67,8 @@ print("tolerance_abs: " + str(tolerance_abs)) assert max_Efield < tolerance_abs -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/pec/analysis_pec.py b/Examples/Tests/pec/analysis_pec.py index 12907bb7846..29d9a4e26f4 100755 --- a/Examples/Tests/pec/analysis_pec.py +++ b/Examples/Tests/pec/analysis_pec.py @@ -12,7 +12,6 @@ # and as a result, the minimum and maximum value after reflection would be two times the value at initialization due to constructive interference. # Additionally, the value of Ey at the boundary must be equal to zero. import os -import re import sys import matplotlib @@ -26,7 +25,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -91,9 +90,8 @@ assert max_Ey_error_rel < tolerance_rel assert min_Ey_error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] - -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/pec/analysis_pec_mr.py b/Examples/Tests/pec/analysis_pec_mr.py index 8361246b8dd..069a1d01afa 100755 --- a/Examples/Tests/pec/analysis_pec_mr.py +++ b/Examples/Tests/pec/analysis_pec_mr.py @@ -12,7 +12,6 @@ # and as a result, the minimum and maximum value after reflection would be two times the value at initialization due to constructive interference. # Additionally, the value of Ey at the boundary must be equal to zero. import os -import re import sys import matplotlib @@ -26,7 +25,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -91,9 +90,8 @@ assert max_Ey_error_rel < tolerance_rel assert min_Ey_error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] - -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/photon_pusher/analysis.py b/Examples/Tests/photon_pusher/analysis.py index 9135ad981ba..2a77e325bc5 100755 --- a/Examples/Tests/photon_pusher/analysis.py +++ b/Examples/Tests/photon_pusher/analysis.py @@ -14,7 +14,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # This script checks if photons initialized with different momenta and # different initial directions propagate along straight lines at the speed of @@ -153,8 +153,11 @@ def check(): assert (max(disc_pos) <= tol_pos) and (max(disc_mom) <= tol_mom) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) # This function generates the input file to test the photon pusher. diff --git a/Examples/Tests/plasma_lens/analysis.py b/Examples/Tests/plasma_lens/analysis.py index 11e2a084ac5..46036573940 100755 --- a/Examples/Tests/plasma_lens/analysis.py +++ b/Examples/Tests/plasma_lens/analysis.py @@ -25,7 +25,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] ds = yt.load(filename) @@ -195,8 +195,10 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): "error in y particle velocity" ) -# The PICMI and native input versions run the same test, so -# their results are compared to the same benchmark file +# compare checksums test_name = os.path.split(os.getcwd())[1] -test_name = re.sub("_picmi", "", test_name) -checksumAPI.evaluate_checksum(test_name, filename) +test_name = re.sub("_picmi", "", test_name) # same checksums for PICMI test +evaluate_checksum( + test_name=test_name, + output_file=sys.argv[1], +) diff --git a/Examples/Tests/pml/analysis_pml_ckc.py b/Examples/Tests/pml/analysis_pml_ckc.py index 4e6bff076c7..f6637e2d47b 100755 --- a/Examples/Tests/pml/analysis_pml_ckc.py +++ b/Examples/Tests/pml/analysis_pml_ckc.py @@ -17,7 +17,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -57,5 +57,8 @@ assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/pml/analysis_pml_psatd.py b/Examples/Tests/pml/analysis_pml_psatd.py index 00b867857f9..4f44c0f3432 100755 --- a/Examples/Tests/pml/analysis_pml_psatd.py +++ b/Examples/Tests/pml/analysis_pml_psatd.py @@ -17,7 +17,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -75,5 +75,8 @@ assert reflectivity < reflectivity_max -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/pml/analysis_pml_psatd_rz.py b/Examples/Tests/pml/analysis_pml_psatd_rz.py index 2d9d58734a1..fb662e36d40 100755 --- a/Examples/Tests/pml/analysis_pml_psatd_rz.py +++ b/Examples/Tests/pml/analysis_pml_psatd_rz.py @@ -24,7 +24,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Open plotfile specified in command line filename = sys.argv[1] @@ -55,5 +55,8 @@ print("tolerance_abs: " + str(tolerance_abs)) assert max_Efield < tolerance_abs -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/pml/analysis_pml_yee.py b/Examples/Tests/pml/analysis_pml_yee.py index a24854af095..5f6d21e579c 100755 --- a/Examples/Tests/pml/analysis_pml_yee.py +++ b/Examples/Tests/pml/analysis_pml_yee.py @@ -17,7 +17,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -57,5 +57,8 @@ assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/point_of_contact_eb/analysis.py b/Examples/Tests/point_of_contact_eb/analysis.py index 3f42aa6eeca..1c9dbc85f4c 100755 --- a/Examples/Tests/point_of_contact_eb/analysis.py +++ b/Examples/Tests/point_of_contact_eb/analysis.py @@ -17,12 +17,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# Open plotfile specified in command line -filename = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") +from checksumAPI import evaluate_checksum ts_scraping = OpenPMDTimeSeries("./diags/diag2/particles_at_eb/") @@ -97,3 +92,10 @@ and (diff_ny < tolerance_n) and (np.abs(nz) < 1e-8) ), "Test point_of_contact did not pass" + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Tests/projection_divb_cleaner/analysis.py b/Examples/Tests/projection_divb_cleaner/analysis.py index 256d1929d06..2324c370032 100755 --- a/Examples/Tests/projection_divb_cleaner/analysis.py +++ b/Examples/Tests/projection_divb_cleaner/analysis.py @@ -23,7 +23,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 4e-3 @@ -73,5 +73,8 @@ print("tolerance = ", tolerance) assert error < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/qed/analysis_breit_wheeler_opmd.py b/Examples/Tests/qed/analysis_breit_wheeler_opmd.py index 25547eda438..b88f00a85dc 100755 --- a/Examples/Tests/qed/analysis_breit_wheeler_opmd.py +++ b/Examples/Tests/qed/analysis_breit_wheeler_opmd.py @@ -13,7 +13,7 @@ import openpmd_api as io # sys.path.insert(1, '../../../../warpx/Regression/Checksum/') -# import checksumAPI +# from checksumAPI import evaluate_checksum # This script is a frontend for the analysis routines @@ -72,8 +72,12 @@ def main(): ac.check(dt, particle_data) - # test_name = os.path.split(os.getcwd())[1] - # checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + # evaluate_checksum( + # test_name=os.path.split(os.getcwd())[1], + # output_file=sys.argv[1], + # output_format="openpmd", + # ) if __name__ == "__main__": diff --git a/Examples/Tests/qed/analysis_breit_wheeler_yt.py b/Examples/Tests/qed/analysis_breit_wheeler_yt.py index 9836e3e8894..48c45c990b0 100755 --- a/Examples/Tests/qed/analysis_breit_wheeler_yt.py +++ b/Examples/Tests/qed/analysis_breit_wheeler_yt.py @@ -14,7 +14,7 @@ sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import analysis_breit_wheeler_core as ac -import checksumAPI +from checksumAPI import evaluate_checksum # This script is a frontend for the analysis routines # in analysis_breit_wheeler_core.py (please refer to this file for @@ -58,8 +58,11 @@ def main(): ac.check(dt, particle_data) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) if __name__ == "__main__": diff --git a/Examples/Tests/qed/analysis_quantum_sync.py b/Examples/Tests/qed/analysis_quantum_sync.py index cf60d2ee647..531a0eac195 100755 --- a/Examples/Tests/qed/analysis_quantum_sync.py +++ b/Examples/Tests/qed/analysis_quantum_sync.py @@ -18,8 +18,8 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI import matplotlib.pyplot as plt +from checksumAPI import evaluate_checksum # This script performs detailed checks of the Quantum Synchrotron photon emission process. # Two electron populations and two positron populations are initialized with different momenta in different @@ -348,8 +348,11 @@ def check(): print("*************\n") - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) def main(): diff --git a/Examples/Tests/qed/analysis_schwinger.py b/Examples/Tests/qed/analysis_schwinger.py index 30a25e6a956..4ad21e3d518 100755 --- a/Examples/Tests/qed/analysis_schwinger.py +++ b/Examples/Tests/qed/analysis_schwinger.py @@ -19,7 +19,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # define some parameters @@ -161,5 +161,8 @@ def do_analysis(Ex, Ey, Ez, Bx, By, Bz): do_analysis(Ex_test, Ey_test, Ez_test, Bx_test, By_test, Bz_test) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/radiation_reaction/analysis.py b/Examples/Tests/radiation_reaction/analysis.py index e24129d3e38..74155a89cb3 100755 --- a/Examples/Tests/radiation_reaction/analysis.py +++ b/Examples/Tests/radiation_reaction/analysis.py @@ -37,7 +37,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Input filename inputname = "inputs" @@ -163,8 +163,11 @@ def check(): assert error_rel < tolerance_rel - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) def generate(): diff --git a/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py b/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py index 64b726e5954..42916d34568 100755 --- a/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py +++ b/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py @@ -21,7 +21,7 @@ from scipy.constants import mu_0 as mu0 sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # gamma threshold to switch between the relativistic expression of # the kinetic energy and its Taylor expansion. @@ -376,7 +376,9 @@ def do_analysis(single_precision=False): assert error[k] < tol print() - test_name = os.path.split(os.getcwd())[1] - - checksum_rtol = 2e-9 if single_precision else 1e-9 - checksumAPI.evaluate_checksum(test_name, fn, rtol=checksum_rtol) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=1e-9, + ) diff --git a/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py b/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py index 05f696e2fe6..49a0018baa5 100755 --- a/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py +++ b/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py @@ -24,7 +24,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Command line argument fn = sys.argv[1] @@ -77,8 +77,10 @@ def get_efficiency(i): # than non-load balanced case assert efficiency_before < efficiency_after -# The PICMI and native input versions run the same test, so -# their results are compared to the same benchmark file +# compare checksums test_name = os.path.split(os.getcwd())[1] -test_name = re.sub("_picmi", "", test_name) -checksumAPI.evaluate_checksum(test_name, fn) +test_name = re.sub("_picmi", "", test_name) # same checksums for PICMI test +evaluate_checksum( + test_name=test_name, + output_file=sys.argv[1], +) diff --git a/Examples/Tests/relativistic_space_charge_initialization/analysis.py b/Examples/Tests/relativistic_space_charge_initialization/analysis.py index 4828e3ddce5..ef0a87dce92 100755 --- a/Examples/Tests/relativistic_space_charge_initialization/analysis.py +++ b/Examples/Tests/relativistic_space_charge_initialization/analysis.py @@ -25,7 +25,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Parameters from the Simulation Qtot = -1.0e-20 @@ -95,5 +95,9 @@ def check(E, E_th, label): check(Ex_array, Ex_th, "Ex") -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, do_particles=False) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + do_particles=False, +) diff --git a/Examples/Tests/repelling_particles/analysis.py b/Examples/Tests/repelling_particles/analysis.py index 401ba7ba5d0..74bde7b68ca 100755 --- a/Examples/Tests/repelling_particles/analysis.py +++ b/Examples/Tests/repelling_particles/analysis.py @@ -35,6 +35,9 @@ yt.funcs.mylog.setLevel(0) +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + # Check plotfile name specified in command line last_filename = sys.argv[1] filename_radical = re.findall(r"(.*?)\d+/*$", last_filename)[0] @@ -76,9 +79,8 @@ assert np.allclose(beta1[1:], beta_th[1:], atol=0.01) assert np.allclose(-beta2[1:], beta_th[1:], atol=0.01) -# Run checksum regression test -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, last_filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/resampling/analysis.py b/Examples/Tests/resampling/analysis.py index f55f3b996c5..40bad24d65e 100755 --- a/Examples/Tests/resampling/analysis.py +++ b/Examples/Tests/resampling/analysis.py @@ -17,7 +17,7 @@ from scipy.special import erf sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum fn_final = sys.argv[1] fn0 = fn_final[:-4] + "0000" @@ -171,5 +171,8 @@ # Check that particles with weight higher than level weight are unaffected by resampling. assert np.all(w[-numparts_unaffected:] == w0[-numparts_unaffected:]) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn_final) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/restart/CMakeLists.txt b/Examples/Tests/restart/CMakeLists.txt index bb3e90059c9..df5b1239a01 100644 --- a/Examples/Tests/restart/CMakeLists.txt +++ b/Examples/Tests/restart/CMakeLists.txt @@ -11,29 +11,24 @@ add_warpx_test( OFF # dependency ) -# TODO -# - Add checksums file -# - Enable analysis add_warpx_test( test_2d_runtime_components_picmi # name 2 # dims 1 # nprocs inputs_test_2d_runtime_components_picmi.py # inputs - OFF #analysis_default_regression.py # analysis - OFF #diags/diag1000010 # output + analysis_default_regression.py # analysis + diags/diag1000010 # output OFF # dependency ) -# TODO -# - Add checksums file -# - Enable analysis +# FIXME add_warpx_test( test_2d_runtime_components_picmi_restart # name 2 # dims 1 # nprocs "inputs_test_2d_runtime_components_picmi.py amr.restart='../test_2d_runtime_components_picmi/diags/chk000005'" # inputs - OFF #analysis_default_restart.py # analysis - OFF #diags/diag1000010 # output + OFF #analysis_default_restart.py # analysis + OFF #diags/diag1000010 # output test_2d_runtime_components_picmi # dependency ) diff --git a/Examples/Tests/restart/analysis_restart.py b/Examples/Tests/restart/analysis_restart.py index 4a4d198f63f..26a05da90f2 100755 --- a/Examples/Tests/restart/analysis_restart.py +++ b/Examples/Tests/restart/analysis_restart.py @@ -4,7 +4,7 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -14,7 +14,8 @@ check_restart(filename) -# Check-sum analysis -filename = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py b/Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py index 9b9054a4d42..759c211b42d 100755 --- a/Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py +++ b/Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py @@ -30,7 +30,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -81,5 +81,8 @@ print(f"tolerance = {tol}") assert err < tol -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py b/Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py index 94b2a1ac07e..91e2bed1ed0 100755 --- a/Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py +++ b/Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py @@ -31,7 +31,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -102,5 +102,8 @@ def remove_rigid_lines(plotfile, nlines_if_rigid): assert np.array_equal(z, orig_z) assert np.array_equal(1 * (np.abs(x) < 5.0e-7), center) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/scraping/analysis_rz.py b/Examples/Tests/scraping/analysis_rz.py index 8bf86e320f3..aa0038dbcf5 100755 --- a/Examples/Tests/scraping/analysis_rz.py +++ b/Examples/Tests/scraping/analysis_rz.py @@ -28,7 +28,7 @@ from openpmd_viewer import OpenPMDTimeSeries sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 0 @@ -83,6 +83,9 @@ def n_scraped_particles(iteration): np.sort(id_initial) == np.sort(id_final) ) # Sort because particles may not be in the same order -# Checksum test -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn, do_particles=False) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + do_particles=False, +) diff --git a/Examples/Tests/silver_mueller/analysis.py b/Examples/Tests/silver_mueller/analysis.py index e1de7199aa0..aee27131bc9 100755 --- a/Examples/Tests/silver_mueller/analysis.py +++ b/Examples/Tests/silver_mueller/analysis.py @@ -20,7 +20,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -51,5 +51,8 @@ assert np.all(abs(Ey) < max_reflection_amplitude) assert np.all(abs(Ez) < max_reflection_amplitude) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/single_particle/analysis.py b/Examples/Tests/single_particle/analysis.py index 198d84c6bfd..4127663e14d 100755 --- a/Examples/Tests/single_particle/analysis.py +++ b/Examples/Tests/single_particle/analysis.py @@ -16,7 +16,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Build Jx without filter. This can be obtained by running this test without # a filter, e.g., execute @@ -66,5 +66,8 @@ assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/space_charge_initialization/analysis.py b/Examples/Tests/space_charge_initialization/analysis.py index 1d5c8b9cb78..d63ba8f7334 100755 --- a/Examples/Tests/space_charge_initialization/analysis.py +++ b/Examples/Tests/space_charge_initialization/analysis.py @@ -26,7 +26,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Parameters from the Simulation Qtot = -1.0e-20 @@ -124,5 +124,9 @@ def check(E, E_th, label): if ds.dimensionality == 3: check(Ez_array, Ez_th, "Ez") -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, do_particles=0) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + do_particles=False, +) diff --git a/Examples/Tests/vay_deposition/analysis.py b/Examples/Tests/vay_deposition/analysis.py index 82776c34c42..ba428520660 100755 --- a/Examples/Tests/vay_deposition/analysis.py +++ b/Examples/Tests/vay_deposition/analysis.py @@ -16,7 +16,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Plotfile data set fn = sys.argv[1] @@ -35,6 +35,8 @@ print("tolerance = {}".format(tolerance)) assert error_rel < tolerance -# Checksum analysis -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/analysis_default_openpmd_regression.py b/Examples/analysis_default_openpmd_regression.py index 03a0f1ede1f..6f38693f820 100755 --- a/Examples/analysis_default_openpmd_regression.py +++ b/Examples/analysis_default_openpmd_regression.py @@ -5,16 +5,22 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum -# this will be the name of the plot file -fn = sys.argv[1] - -# Get name of the test test_name = os.path.split(os.getcwd())[1] +output_file = sys.argv[1] # Run checksum regression test -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, output_format="openpmd", rtol=2.0e-6) +if re.search("single_precision", output_file): + evaluate_checksum( + test_name=test_name, + output_file=output_file, + output_format="openpmd", + rtol=2e-6, + ) else: - checksumAPI.evaluate_checksum(test_name, fn, output_format="openpmd") + evaluate_checksum( + test_name=test_name, + output_file=output_file, + output_format="openpmd", + ) diff --git a/Examples/analysis_default_regression.py b/Examples/analysis_default_regression.py index 519bbeeea64..7c02f6904b2 100755 --- a/Examples/analysis_default_regression.py +++ b/Examples/analysis_default_regression.py @@ -5,17 +5,21 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum -# this will be the name of the plot file -fn = sys.argv[1] - -# Get name of the test test_name = os.path.split(os.getcwd())[1] +output_file = sys.argv[1] # Run checksum regression test -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=2.0e-6) +if re.search("single_precision", output_file): + evaluate_checksum( + test_name=test_name, + output_file=output_file, + rtol=2e-6, + ) else: # using default relative tolerance - checksumAPI.evaluate_checksum(test_name, fn) + evaluate_checksum( + test_name=test_name, + output_file=output_file, + ) diff --git a/Examples/analysis_default_restart.py b/Examples/analysis_default_restart.py index 55bab253dbc..c019a0b5945 100755 --- a/Examples/analysis_default_restart.py +++ b/Examples/analysis_default_restart.py @@ -7,7 +7,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum def check_restart(filename, tolerance=1e-12): @@ -67,12 +67,17 @@ def check_restart(filename, tolerance=1e-12): print() -filename = sys.argv[1] +# test name (for checksums, remove "_restart") and output file name +test_name = os.path.split(os.getcwd())[1] +test_name = test_name.replace("_restart", "") +output_file = sys.argv[1] # compare restart results against original results -check_restart(filename) +check_restart(output_file) # compare restart checksums against original checksums -testname = os.path.split(os.getcwd())[1] -testname = testname.replace("_restart", "") -checksumAPI.evaluate_checksum(testname, filename, rtol=1e-12) +evaluate_checksum( + test_name=test_name, + output_file=output_file, + rtol=1e-12, +) diff --git a/GNUmakefile b/GNUmakefile index fe10983b780..1cc78403c7b 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -43,5 +43,8 @@ USE_RZ = FALSE USE_EB = FALSE +USE_LINEAR_SOLVERS_EM = TRUE +USE_LINEAR_SOLVERS_INCFLO = FALSE + WARPX_HOME := . include $(WARPX_HOME)/Source/Make.WarpX diff --git a/GOVERNANCE.rst b/GOVERNANCE.rst index b5253b80f9f..588e8b2df6e 100644 --- a/GOVERNANCE.rst +++ b/GOVERNANCE.rst @@ -16,7 +16,7 @@ Current Roster - Remi Lehe - Axel Huebl -See: `GitHub team `__ +See: `GitHub team `__ Role ^^^^ @@ -66,7 +66,7 @@ Current Roster - Weiqun Zhang - Edoardo Zoni -See: `GitHub team `__ +See: `GitHub team `__ Role ^^^^ diff --git a/Python/setup.py b/Python/setup.py index 86585bf8886..d57ebc65223 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -65,7 +65,7 @@ setup( name="pywarpx", - version="24.08", + version="24.10", packages=["pywarpx"], package_dir={"pywarpx": "pywarpx"}, description="""Wrapper of WarpX""", diff --git a/Regression/Checksum/benchmarks_json/test_1d_background_mcc_picmi.json b/Regression/Checksum/benchmarks_json/test_1d_background_mcc_picmi.json new file mode 100644 index 00000000000..029294deb66 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_1d_background_mcc_picmi.json @@ -0,0 +1,20 @@ +{ + "lev=0": { + "rho_electrons": 0.0044328572492614605, + "rho_he_ions": 0.005198609403474849 + }, + "electrons": { + "particle_momentum_x": 3.5020450942268976e-20, + "particle_momentum_y": 3.5342700024993965e-20, + "particle_momentum_z": 1.2596017960675146e-19, + "particle_position_x": 2139.5967568101983, + "particle_weight": 14577210937500.002 + }, + "he_ions": { + "particle_momentum_x": 2.770046913680294e-19, + "particle_momentum_y": 2.755651798947783e-19, + "particle_momentum_z": 3.619494241595636e-19, + "particle_position_x": 2200.218124999781, + "particle_weight": 17184714843750.002 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_1d_fel.json b/Regression/Checksum/benchmarks_json/test_1d_fel.json new file mode 100644 index 00000000000..2bd9c1fad80 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_1d_fel.json @@ -0,0 +1,31 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 514.5044890273722, + "Bz": 0.0, + "Ex": 154245109024.33972, + "Ey": 0.0, + "Ez": 0.0, + "jx": 1161126105.5594487, + "jy": 0.0, + "jz": 0.0 + }, + "electrons": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 13607.569953355982, + "particle_momentum_x": 3.095483353687591e-19, + "particle_momentum_y": 0.0, + "particle_momentum_z": 1.5419514460764825e-16, + "particle_weight": 1349823909946836.0 + }, + "positrons": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 13607.569953355982, + "particle_momentum_x": 3.095483353687591e-19, + "particle_momentum_y": 0.0, + "particle_momentum_z": 1.5419514460764825e-16, + "particle_weight": 1349823909946836.0 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_2d_background_mcc_picmi.json b/Regression/Checksum/benchmarks_json/test_2d_background_mcc_picmi.json new file mode 100644 index 00000000000..579f46d33ab --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_background_mcc_picmi.json @@ -0,0 +1,22 @@ +{ + "electrons": { + "particle_momentum_x": 1.011638818664759e-18, + "particle_momentum_y": 2.81974298744432e-19, + "particle_momentum_z": 2.809194032519318e-19, + "particle_position_x": 17136.01865460215, + "particle_position_y": 936.3651769897449, + "particle_weight": 61113170379.63868 + }, + "he_ions": { + "particle_momentum_x": 2.883076633513297e-18, + "particle_momentum_y": 2.195704870583595e-18, + "particle_momentum_z": 2.198216553980008e-18, + "particle_position_x": 17607.42545752183, + "particle_position_y": 1100.024786059151, + "particle_weight": 71976747650.1465 + }, + "lev=0": { + "rho_electrons": 0.03558889419586454, + "rho_he_ions": 0.04176234095111594 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_2d_dirichlet_bc.json b/Regression/Checksum/benchmarks_json/test_2d_dirichlet_bc.json new file mode 100644 index 00000000000..41567dc3bf2 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_dirichlet_bc.json @@ -0,0 +1,5 @@ +{ + "lev=0": { + "phi": 10817.97280547637 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_field_probe.json b/Regression/Checksum/benchmarks_json/test_2d_field_probe.json new file mode 100644 index 00000000000..cb82acfc067 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_field_probe.json @@ -0,0 +1,10 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 126826.78487921853, + "Bz": 0.0, + "Ex": 32517064310550.266, + "Ey": 0.0, + "Ez": 17321323003697.61 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json b/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json new file mode 100644 index 00000000000..dd489f16e05 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json @@ -0,0 +1,11 @@ +{ + "lev=0": {}, + "electron": { + "particle_momentum_x": 6.990772711451971e-19, + "particle_momentum_y": 5.4131306169803364e-20, + "particle_momentum_z": 6.997294931789925e-19, + "particle_position_x": 35518.95120597846, + "particle_position_y": 35517.855675902414, + "particle_weight": 1.25355e-07 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_particle_reflection_picmi.json b/Regression/Checksum/benchmarks_json/test_2d_particle_reflection_picmi.json new file mode 100644 index 00000000000..97d0c1f5e58 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_particle_reflection_picmi.json @@ -0,0 +1,7 @@ +{ + "lev=0": { + "Ex": 4.865922376234882e-11, + "Ey": 0.0, + "Ez": 2.3293326580399806e-10 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_rigid_injection_btd.json b/Regression/Checksum/benchmarks_json/test_2d_rigid_injection_btd.json index 90cf134201f..9e876d5c23e 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_rigid_injection_btd.json +++ b/Regression/Checksum/benchmarks_json/test_2d_rigid_injection_btd.json @@ -1,22 +1,22 @@ { + "lev=0": { + "Bx": 3.719030475087696e-05, + "By": 0.004843257051761486, + "Bz": 5.522765606391185e-06, + "Ex": 1461264.5033270014, + "Ey": 11205.64142004876, + "Ez": 282020.7784731542, + "jx": 16437877.898892798, + "jy": 2492340.3149980744, + "jz": 215102423.57036853, + "rho": 0.7246235591902171 + }, "beam": { - "particle_momentum_x": 2.2080215038948936e-16, + "particle_momentum_x": 2.2080215038948934e-16, "particle_momentum_y": 2.18711072170811e-16, - "particle_momentum_z": 2.730924530737497e-15, - "particle_position_x": 0.0260823588888081, + "particle_momentum_z": 2.730924530737456e-15, + "particle_position_x": 0.026082358888808558, "particle_position_y": 0.5049438607316916, "particle_weight": 62415.090744607645 - }, - "lev=0": { - "Bx": 3.721807007218884e-05, - "By": 0.004860056238272468, - "Bz": 5.5335765596325185e-06, - "Ex": 1466447.517373168, - "Ey": 11214.10223280318, - "Ez": 283216.0961218869, - "jx": 16437877.898892513, - "jy": 2492340.3149980404, - "jz": 215102423.57036877, - "rho": 0.7246235591902177 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_2d_runtime_components_picmi.json b/Regression/Checksum/benchmarks_json/test_2d_runtime_components_picmi.json new file mode 100644 index 00000000000..f1eb0047d49 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_runtime_components_picmi.json @@ -0,0 +1,14 @@ +{ + "lev=0": { + "phi": 0.001516261626005395 + }, + "electrons": { + "particle_momentum_x": 7.75165529536844e-26, + "particle_momentum_y": 6.938526597814195e-26, + "particle_momentum_z": 6.572519525636007e-26, + "particle_newPid": 500.0, + "particle_position_x": 1.4999588764814886, + "particle_position_y": 1.4999551809410656, + "particle_weight": 200.0 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag.json b/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_leptons.json similarity index 100% rename from Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag.json rename to Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_leptons.json diff --git a/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_photons.json b/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_photons.json new file mode 100644 index 00000000000..09b2031cdd2 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_photons.json @@ -0,0 +1,24 @@ +{ + "lev=0": { + "rho_beam1": 656097367.2335038, + "rho_beam2": 656097367.2335038 + }, + "beam1": { + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 1.7512476113279403e-11, + "particle_position_x": 0.2621440000000001, + "particle_position_y": 0.005242880000000001, + "particle_position_z": 314572.79999473685, + "particle_weight": 11997744756.90957 + }, + "beam2": { + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 1.7513431895752007e-11, + "particle_position_x": 0.2621440000000001, + "particle_position_y": 0.005242880000000001, + "particle_position_z": 314572.79999472946, + "particle_weight": 11997744756.909573 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_picmi.json new file mode 100644 index 00000000000..f3483a544b5 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_picmi.json @@ -0,0 +1,5 @@ +{ + "lev=0": { + "Ex": 0.0 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json b/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json new file mode 100644 index 00000000000..e947a8af07b --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json @@ -0,0 +1,12 @@ +{ + "lev=0": {}, + "electron": { + "particle_momentum_x": 4.371688233196277e-18, + "particle_momentum_y": 4.368885079657374e-18, + "particle_momentum_z": 4.367429424105371e-18, + "particle_position_x": 219746.94401890738, + "particle_position_y": 219690.7015248918, + "particle_position_z": 219689.45580938633, + "particle_weight": 4.954974999999999e-07 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_particle_absorption.json b/Regression/Checksum/benchmarks_json/test_3d_particle_absorption.json new file mode 100644 index 00000000000..ce6e2fcf79b --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_particle_absorption.json @@ -0,0 +1,10 @@ +{ + "lev=0": { + "Bx": 202106.71291347666, + "By": 202106.71291347663, + "Bz": 3371.897999274175, + "Ex": 38304043178806.11, + "Ey": 38304043178806.11, + "Ez": 83057027925874.84 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json b/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json new file mode 100644 index 00000000000..23884de9725 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json @@ -0,0 +1,12 @@ +{ + "lev=0": {}, + "electron": { + "particle_momentum_x": 6.734984863106283e-19, + "particle_momentum_y": 6.786279785869023e-19, + "particle_momentum_z": 1.0527983828124758e-18, + "particle_position_x": 53309.270966506396, + "particle_position_y": 53302.3776094842, + "particle_theta": 58707.74469425615, + "particle_weight": 4.991396867417661e-07 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_laser_acceleration_opmd.json b/Regression/Checksum/benchmarks_json/test_rz_laser_acceleration_opmd.json new file mode 100644 index 00000000000..de631f4767a --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_laser_acceleration_opmd.json @@ -0,0 +1,35 @@ +{ + "lev=0": { + "Bt": 4299.677335258863, + "Bz": 34749.512290662635, + "Er": 1343319090029.9607, + "jr": 5229952989213.152, + "jt": 9.287962600874053e+17, + "jz": 3712414162446391.5, + "part_per_cell": 6288.0, + "part_per_grid": 25755648.0, + "rho": 102920475.65331206, + "rho_beam": 12377109.352622943, + "rho_electrons": 90543366.3006891 + }, + "beam": { + "particle_position_x": 3.651481908823126e-05, + "particle_position_y": 4.275668879776449e-05, + "particle_position_z": 0.0025531549045483943, + "particle_momentum_x": 3.879691286254116e-20, + "particle_momentum_y": 5.0782566944104114e-20, + "particle_momentum_z": 1.3503182565048374e-17, + "particle_weight": 6241509.074460764 + }, + "electrons": { + "particle_origX": 0.03652440297475791, + "particle_origZ": 0.06924276562500002, + "particle_position_x": 0.036524412900510936, + "particle_position_y": 0.03652445428108603, + "particle_position_z": 0.06924303765442104, + "particle_momentum_x": 5.508781425380743e-23, + "particle_momentum_y": 7.236141259605716e-21, + "particle_momentum_z": 4.4528442530356535e-22, + "particle_weight": 1118799420.1067173 + } +} diff --git a/Regression/Checksum/checksum.py b/Regression/Checksum/checksum.py index 4133d882a41..b2f327e36e3 100644 --- a/Regression/Checksum/checksum.py +++ b/Regression/Checksum/checksum.py @@ -243,9 +243,8 @@ def evaluate(self, rtol=1.0e-9, atol=1.0e-40): ) print("Benchmark: %s" % ref_benchmark.data.keys()) print("Test file: %s" % self.data.keys()) - print("\n----------------\nNew file for " + self.test_name + ":") + print(f"\nNew checksums file {self.test_name}.json:") print(json.dumps(self.data, indent=2)) - print("----------------") sys.exit(1) # Dictionaries have same inner keys (field and particle quantities)? @@ -261,9 +260,8 @@ def evaluate(self, rtol=1.0e-9, atol=1.0e-40): % (key1, ref_benchmark.data[key1].keys()) ) print("Test file inner keys in %s: %s" % (key1, self.data[key1].keys())) - print("\n----------------\nNew file for " + self.test_name + ":") + print(f"\nNew checksums file {self.test_name}.json:") print(json.dumps(self.data, indent=2)) - print("----------------") sys.exit(1) # Dictionaries have same values? @@ -298,7 +296,6 @@ def evaluate(self, rtol=1.0e-9, atol=1.0e-40): rel_err = abs_err / np.abs(x) print("Relative error: {:.2e}".format(rel_err)) if checksums_differ: - print("\n----------------\nNew file for " + self.test_name + ":") + print(f"\nNew checksums file {self.test_name}.json:") print(json.dumps(self.data, indent=2)) - print("----------------") sys.exit(1) diff --git a/Source/Diagnostics/BTDiagnostics.H b/Source/Diagnostics/BTDiagnostics.H index d11db98276b..ab04f30ef18 100644 --- a/Source/Diagnostics/BTDiagnostics.H +++ b/Source/Diagnostics/BTDiagnostics.H @@ -161,6 +161,7 @@ private: * in z-direction for both 2D and 3D simulations in the Cartesian frame of reference. */ int m_moving_window_dir; + amrex::Real m_moving_window_beta; /** Number of back-transformed snapshots in the lab-frame requested by the user */ int m_num_snapshots_lab = std::numeric_limits::lowest(); diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 631de298861..312bbc7ec45 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -69,6 +69,7 @@ void BTDiagnostics::DerivedInitData () m_gamma_boost = WarpX::gamma_boost; m_beta_boost = std::sqrt( 1._rt - 1._rt/( m_gamma_boost * m_gamma_boost) ); m_moving_window_dir = WarpX::moving_window_dir; + m_moving_window_beta = WarpX::moving_window_v/PhysConst::c; // Currently, for BTD, all the data is averaged+coarsened to coarsest level // and then sliced+back-transformed+filled_to_buffer. // The number of levels to be output is nlev_output. @@ -138,7 +139,7 @@ void BTDiagnostics::DerivedInitData () const int lev = 0; const amrex::Real dt_boosted_frame = warpx.getdt(lev); const int moving_dir = WarpX::moving_window_dir; - const amrex::Real Lz_lab = warpx.Geom(lev).ProbLength(moving_dir) / WarpX::gamma_boost / (1._rt+WarpX::beta_boost); + const amrex::Real Lz_lab = warpx.Geom(lev).ProbLength(moving_dir) * WarpX::gamma_boost * (1._rt - WarpX::beta_boost*m_moving_window_beta); const int ref_ratio = 1; const amrex::Real dz_snapshot_grid = dz_lab(dt_boosted_frame, ref_ratio); // Need enough buffers so the snapshot length is longer than the lab frame length @@ -149,22 +150,21 @@ void BTDiagnostics::DerivedInitData () // the final snapshot starts filling when the // right edge of the moving window intersects the final snapshot // time of final snapshot : t_sn = t0 + i*dt_snapshot - // where t0 is the time of first BTD snapshot, t0 = zmax / c * beta / (1-beta) + // where t0 is the time of first BTD snapshot, t0 = zmax / c * beta / (1-beta*beta_mw) // // the right edge of the moving window at the time of the final snapshot // has space time coordinates - // time t_intersect = t_sn, position z_intersect=zmax + c*t_sn + // time t_intersect = t_sn, position z_intersect=zmax + v_mw*t_sn // the boosted time of this space time pair is // t_intersect_boost = gamma * (t_intersect - beta * z_intersect_boost/c) - // = gamma * (t_sn * (1 - beta) - beta * zmax / c) - // = gamma * (zmax*beta/c + i*dt_snapshot*(1-beta) - beta*zmax/c) - // = gamma * i * dt_snapshot * (1-beta) - // = i * dt_snapshot / gamma / (1+beta) + // = gamma * (t_sn * (1 - beta*beta_mw) - beta * zmax / c) + // = gamma * (zmax*beta/c + i*dt_snapshot*(1-beta*beta_mw) - beta*zmax/c) + // = gamma * (1-beta*beta_mw) * i * dt_snapshot // // if j = final snapshot starting step, then we want to solve - // j dt_boosted_frame >= t_intersect_boost = i * dt_snapshot / gamma / (1+beta) - // j >= i / gamma / (1+beta) * dt_snapshot / dt_boosted_frame - const int final_snapshot_starting_step = static_cast(std::ceil(final_snapshot_iteration / WarpX::gamma_boost / (1._rt+WarpX::beta_boost) * m_dt_snapshots_lab / dt_boosted_frame)); + // j dt_boosted_frame >= t_intersect_boost = i * gamma * (1-beta*beta_mw) * dt_snapshot + // j >= i * gamma * (1-beta*beta_mw) * dt_snapshot / dt_boosted_frame + const int final_snapshot_starting_step = static_cast(std::ceil(final_snapshot_iteration * WarpX::gamma_boost * (1._rt - WarpX::beta_boost*m_moving_window_beta) * m_dt_snapshots_lab / dt_boosted_frame)); const int final_snapshot_fill_iteration = final_snapshot_starting_step + num_buffers * m_buffer_size - 1; const amrex::Real final_snapshot_fill_time = final_snapshot_fill_iteration * dt_boosted_frame; if (WarpX::compute_max_step_from_btd) { @@ -256,7 +256,7 @@ BTDiagnostics::ReadParameters () bool snapshot_interval_is_specified = utils::parser::queryWithParser( pp_diag_name, "dt_snapshots_lab", m_dt_snapshots_lab); if ( utils::parser::queryWithParser(pp_diag_name, "dz_snapshots_lab", m_dz_snapshots_lab) ) { - m_dt_snapshots_lab = m_dz_snapshots_lab/PhysConst::c; + m_dt_snapshots_lab = m_dz_snapshots_lab/WarpX::moving_window_v; snapshot_interval_is_specified = true; } WARPX_ALWAYS_ASSERT_WITH_MESSAGE(snapshot_interval_is_specified, @@ -338,13 +338,15 @@ BTDiagnostics::InitializeBufferData ( int i_buffer , int lev, bool restart) // When restarting boosted simulations, the code below needs to take // into account the fact that the position of the box at the beginning // of the simulation, is not the one that we had at t=0 (because of the moving window) - const amrex::Real boosted_moving_window_v = (WarpX::moving_window_v - m_beta_boost*PhysConst::c) - / (1._rt - m_beta_boost * WarpX::moving_window_v/PhysConst::c); + const amrex::Real boosted_moving_window_v = (m_moving_window_beta - m_beta_boost) + / (1._rt - m_beta_boost*m_moving_window_beta); // Lab-frame time for the i^th snapshot if (!restart) { - const amrex::Real zmax_0 = warpx.Geom(lev).ProbHi(m_moving_window_dir); + const amrex::Real zmax_boost = warpx.Geom(lev).ProbHi(m_moving_window_dir); m_t_lab.at(i_buffer) = m_intervals.GetBTDIteration(i_buffer) * m_dt_snapshots_lab - + m_gamma_boost*m_beta_boost*zmax_0/PhysConst::c; + + m_gamma_boost*m_beta_boost*zmax_boost/PhysConst::c; + // Note: gamma_boost*beta_boost*zmax_boost is equal to + // beta_boost*zmax_lab/(1-beta_boost*beta_moving_window) } // Define buffer domain in boosted frame at level, lev, with user-defined lo and hi @@ -403,9 +405,9 @@ BTDiagnostics::InitializeBufferData ( int i_buffer , int lev, bool restart) // Define buffer_domain in lab-frame for the i^th snapshot. // Replace z-dimension with lab-frame co-ordinates. const amrex::Real zmin_buffer_lab = ( diag_dom.lo(m_moving_window_dir) - boosted_moving_window_v * warpx.gett_new(0) ) - / ( (1.0_rt + m_beta_boost) * m_gamma_boost); + * (1.0_rt - m_beta_boost*m_moving_window_beta) * m_gamma_boost; const amrex::Real zmax_buffer_lab = ( diag_dom.hi(m_moving_window_dir) - boosted_moving_window_v * warpx.gett_new(0) ) - / ( (1.0_rt + m_beta_boost) * m_gamma_boost); + * (1.0_rt - m_beta_boost*m_moving_window_beta) * m_gamma_boost; // Initialize buffer counter and z-positions of the i^th snapshot in // boosted-frame and lab-frame @@ -999,12 +1001,15 @@ BTDiagnostics::GetZSliceInDomainFlag (const int i_buffer, const int lev) { auto & warpx = WarpX::GetInstance(); const amrex::RealBox& boost_domain = warpx.Geom(lev).ProbDomain(); + const amrex::Real boost_cellsize = warpx.Geom(lev).CellSize(m_moving_window_dir); const amrex::Real buffer_zmin_lab = m_snapshot_domain_lab[i_buffer].lo( m_moving_window_dir ); const amrex::Real buffer_zmax_lab = m_snapshot_domain_lab[i_buffer].hi( m_moving_window_dir ); + // Exclude 0.5*boost_cellsize from the edge, to avoid that the interpolation to + // cell centers uses data from the guard cells. const bool slice_not_in_domain = - ( m_current_z_boost[i_buffer] <= boost_domain.lo(m_moving_window_dir) ) || - ( m_current_z_boost[i_buffer] >= boost_domain.hi(m_moving_window_dir) ) || + ( m_current_z_boost[i_buffer] <= boost_domain.lo(m_moving_window_dir) + 0.5_rt*boost_cellsize) || + ( m_current_z_boost[i_buffer] >= boost_domain.hi(m_moving_window_dir) - 0.5_rt*boost_cellsize) || ( m_current_z_lab[i_buffer] <= buffer_zmin_lab ) || ( m_current_z_lab[i_buffer] >= buffer_zmax_lab ); diff --git a/Source/Diagnostics/ReducedDiags/DifferentialLuminosity.cpp b/Source/Diagnostics/ReducedDiags/DifferentialLuminosity.cpp index 59a32cf0545..ef5e0da6014 100644 --- a/Source/Diagnostics/ReducedDiags/DifferentialLuminosity.cpp +++ b/Source/Diagnostics/ReducedDiags/DifferentialLuminosity.cpp @@ -132,9 +132,8 @@ void DifferentialLuminosity::ComputeDiags (int step) // Since this diagnostic *accumulates* the luminosity in the // array d_data, we add contributions at *each timestep*, but // we only write the data to file at intervals specified by the user. - - const Real c2_over_qe = PhysConst::c*PhysConst::c/PhysConst::q_e; - const Real inv_c2 = 1._rt/(PhysConst::c*PhysConst::c); + const Real c_sq = PhysConst::c*PhysConst::c; + const Real c_over_qe = PhysConst::c/PhysConst::q_e; // get a reference to WarpX instance auto& warpx = WarpX::GetInstance(); @@ -187,6 +186,7 @@ void DifferentialLuminosity::ComputeDiags (int step) amrex::ParticleReal * const AMREX_RESTRICT u1x = soa_1.m_rdata[PIdx::ux]; amrex::ParticleReal * const AMREX_RESTRICT u1y = soa_1.m_rdata[PIdx::uy]; // v*gamma=p/m amrex::ParticleReal * const AMREX_RESTRICT u1z = soa_1.m_rdata[PIdx::uz]; + bool const species1_is_photon = species_1.AmIA(); const auto soa_2 = ptile_2.getParticleTileData(); index_type* AMREX_RESTRICT indices_2 = bins_2.permutationPtr(); @@ -196,6 +196,7 @@ void DifferentialLuminosity::ComputeDiags (int step) amrex::ParticleReal * const AMREX_RESTRICT u2x = soa_2.m_rdata[PIdx::ux]; amrex::ParticleReal * const AMREX_RESTRICT u2y = soa_2.m_rdata[PIdx::uy]; amrex::ParticleReal * const AMREX_RESTRICT u2z = soa_2.m_rdata[PIdx::uz]; + bool const species2_is_photon = species_2.AmIA(); // Extract low-level data auto const n_cells = static_cast(bins_1.numBins()); @@ -218,34 +219,59 @@ void DifferentialLuminosity::ComputeDiags (int step) index_type const j_1 = indices_1[i_1]; index_type const j_2 = indices_2[i_2]; - Real const u1_square = u1x[j_1]*u1x[j_1] + u1y[j_1]*u1y[j_1] + u1z[j_1]*u1z[j_1]; - Real const gamma1 = std::sqrt(1._rt + u1_square*inv_c2); - Real const u2_square = u2x[j_2]*u2x[j_2] + u2y[j_2]*u2y[j_2] + u2z[j_2]*u2z[j_2]; - Real const gamma2 = std::sqrt(1._rt + u2_square*inv_c2); - Real const u1_dot_u2 = u1x[j_1]*u2x[j_2] + u1y[j_1]*u2y[j_2] + u1z[j_1]*u2z[j_2]; + Real p1t=0, p1x=0, p1y=0, p1z=0; // components of 4-momentum of particle 1 + Real const u1_sq = u1x[j_1]*u1x[j_1] + u1y[j_1]*u1y[j_1] + u1z[j_1]*u1z[j_1]; + if (species1_is_photon) { + // photon case (momentum is normalized by m_e in WarpX) + p1t = PhysConst::m_e*std::sqrt( u1_sq ); + p1x = PhysConst::m_e*u1x[j_1]; + p1y = PhysConst::m_e*u1y[j_1]; + p1z = PhysConst::m_e*u1z[j_1]; + } else { + p1t = m1*std::sqrt( c_sq + u1_sq ); + p1x = m1*u1x[j_1]; + p1y = m1*u1y[j_1]; + p1z = m1*u1z[j_1]; + } + + Real p2t=0, p2x=0, p2y=0, p2z=0; // components of 4-momentum of particle 2 + Real const u2_sq = u2x[j_2]*u2x[j_2] + u2y[j_2]*u2y[j_2] + u2z[j_2]*u2z[j_2]; + if (species2_is_photon) { + // photon case (momentum is normalized by m_e in WarpX) + p2t = PhysConst::m_e*std::sqrt(u2_sq); + p2x = PhysConst::m_e*u2x[j_2]; + p2y = PhysConst::m_e*u2y[j_2]; + p2z = PhysConst::m_e*u2z[j_2]; + } else { + p2t = m2*std::sqrt( c_sq + u2_sq ); + p2x = m2*u2x[j_2]; + p2y = m2*u2y[j_2]; + p2z = m2*u2z[j_2]; + } // center of mass energy in eV - Real const E_com = c2_over_qe * std::sqrt(m1*m1 + m2*m2 + 2*m1*m2* (gamma1*gamma2 - u1_dot_u2*inv_c2)); + Real const E_com = c_over_qe * std::sqrt(m1*m1*c_sq + m2*m2*c_sq + 2*(p1t*p2t - p1x*p2x - p1y*p2y - p1z*p2z)); // determine particle bin int const bin = int(Math::floor((E_com-bin_min)/bin_size)); if ( bin<0 || bin>=num_bins ) { continue; } // discard if out-of-range - Real const v1_minus_v2_x = u1x[j_1]/gamma1 - u2x[j_2]/gamma2; - Real const v1_minus_v2_y = u1y[j_1]/gamma1 - u2y[j_2]/gamma2; - Real const v1_minus_v2_z = u1z[j_1]/gamma1 - u2z[j_2]/gamma2; - Real const v1_minus_v2_square = v1_minus_v2_x*v1_minus_v2_x + v1_minus_v2_y*v1_minus_v2_y + v1_minus_v2_z*v1_minus_v2_z; + Real const inv_p1t = 1.0_rt/p1t; + Real const inv_p2t = 1.0_rt/p2t; - Real const u1_cross_u2_x = u1y[j_1]*u2z[j_2] - u1z[j_1]*u2y[j_2]; - Real const u1_cross_u2_y = u1z[j_1]*u2x[j_2] - u1x[j_1]*u2z[j_2]; - Real const u1_cross_u2_z = u1x[j_1]*u2y[j_2] - u1y[j_1]*u2x[j_2]; + Real const beta1_sq = (p1x*p1x + p1y*p1y + p1z*p1z) * inv_p1t*inv_p1t; + Real const beta2_sq = (p2x*p2x + p2y*p2y + p2z*p2z) * inv_p2t*inv_p2t; + Real const beta1_dot_beta2 = (p1x*p2x + p1y*p2y + p1z*p2z) * inv_p1t*inv_p2t; - Real const v1_cross_v2_square = (u1_cross_u2_x*u1_cross_u2_x + u1_cross_u2_y*u1_cross_u2_y + u1_cross_u2_z*u1_cross_u2_z) / (gamma1*gamma1*gamma2*gamma2); + // Here we use the fact that: + // (v1 - v2)^2 = v1^2 + v2^2 - 2 v1.v2 + // and (v1 x v2)^2 = v1^2 v2^2 - (v1.v2)^2 + // we also use beta=v/c instead of v - Real const radicand = v1_minus_v2_square - v1_cross_v2_square * inv_c2; + Real const radicand = beta1_sq + beta2_sq - 2*beta1_dot_beta2 - beta1_sq*beta2_sq + beta1_dot_beta2*beta1_dot_beta2; - Real const dL_dEcom = std::sqrt( radicand ) * w1[j_1] * w2[j_2] / dV / bin_size * dt; // m^-2 eV^-1 + Real const dL_dEcom = PhysConst::c * std::sqrt( radicand ) * w1[j_1] * w2[j_2] / dV / bin_size * dt; // m^-2 eV^-1 amrex::HostDevice::Atomic::Add(&dptr_data[bin], dL_dEcom); diff --git a/Source/EmbeddedBoundary/DistanceToEB.H b/Source/EmbeddedBoundary/DistanceToEB.H index 0c13724380c..0b27fd054cd 100644 --- a/Source/EmbeddedBoundary/DistanceToEB.H +++ b/Source/EmbeddedBoundary/DistanceToEB.H @@ -121,7 +121,13 @@ amrex::RealVect interp_normal (int i, int j, int k, const amrex::Real W[AMREX_SP #else amrex::ignore_unused(i, j, k, ic, jc, kc, W, Wc, phi, dxi); amrex::RealVect normal(0.0); - WARPX_ABORT_WITH_MESSAGE("Error: interp_distance not yet implemented in 1D"); + + AMREX_IF_ON_DEVICE(( + AMREX_DEVICE_ASSERT(0); + )) + AMREX_IF_ON_HOST(( + WARPX_ABORT_WITH_MESSAGE("Error: interp_normal not yet implemented in 1D"); + )) #endif return normal; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index ec4a53b2edd..7e8dd260a6e 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -53,13 +53,7 @@ public: * external current multifab. Note the external current can be a function * of time and therefore this should be re-evaluated at every step. */ - void GetCurrentExternal ( - ablastr::fields::MultiLevelVectorField const& edge_lengths - ); - void GetCurrentExternal ( - ablastr::fields::VectorField const& edge_lengths, - int lev - ); + void GetCurrentExternal (); /** * \brief diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index d7d6a43b4d5..424f194ff37 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -221,167 +221,32 @@ void HybridPICModel::InitData () // if the current is time dependent which is what needs to be done to // write time independent fields on the first step. for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - auto edge_lengths = std::array, 3>(); -#ifdef AMREX_USE_EB - if (EB::enabled()) { - using ablastr::fields::Direction; - auto const & edge_lengths_x = *warpx.m_fields.get(FieldType::edge_lengths, Direction{0}, lev); - auto const & edge_lengths_y = *warpx.m_fields.get(FieldType::edge_lengths, Direction{1}, lev); - auto const & edge_lengths_z = *warpx.m_fields.get(FieldType::edge_lengths, Direction{2}, lev); - - edge_lengths = std::array< std::unique_ptr, 3 >{ - std::make_unique( - edge_lengths_x, amrex::make_alias, 0, edge_lengths_x.nComp()), - std::make_unique( - edge_lengths_y, amrex::make_alias, 0, edge_lengths_y.nComp()), - std::make_unique( - edge_lengths_z, amrex::make_alias, 0, edge_lengths_z.nComp()) - }; - } -#endif - GetCurrentExternal(ablastr::fields::a2m(edge_lengths), lev); + warpx.ComputeExternalFieldOnGridUsingParser( + FieldType::hybrid_current_fp_external, + m_J_external[0], + m_J_external[1], + m_J_external[2], + lev, PatchType::fine, 'e', + warpx.m_fields.get_alldirs(FieldType::edge_lengths, lev), + warpx.m_fields.get_alldirs(FieldType::face_areas, lev)); } } -void HybridPICModel::GetCurrentExternal ( - ablastr::fields::MultiLevelVectorField const& edge_lengths) +void HybridPICModel::GetCurrentExternal () { if (!m_external_field_has_time_dependence) { return; } auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - GetCurrentExternal(edge_lengths[lev], lev); - } -} - - -void HybridPICModel::GetCurrentExternal ( - ablastr::fields::VectorField const& edge_lengths, - int lev) -{ - // This logic matches closely to WarpX::InitializeExternalFieldsOnGridUsingParser - // except that the parsers include time dependence. - auto & warpx = WarpX::GetInstance(); - - auto t = warpx.gett_new(lev); - - auto dx_lev = warpx.Geom(lev).CellSizeArray(); - const RealBox& real_box = warpx.Geom(lev).ProbDomain(); - - using ablastr::fields::Direction; - amrex::MultiFab * mfx = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{0}, lev); - amrex::MultiFab * mfy = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{1}, lev); - amrex::MultiFab * mfz = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{2}, lev); - - const amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect(); - const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); - const amrex::IntVect z_nodal_flag = mfz->ixType().toIntVect(); - - // avoid implicit lambda capture - auto Jx_external = m_J_external[0]; - auto Jy_external = m_J_external[1]; - auto Jz_external = m_J_external[2]; - - for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) - { - const amrex::Box& tbx = mfi.tilebox( x_nodal_flag, mfx->nGrowVect() ); - const amrex::Box& tby = mfi.tilebox( y_nodal_flag, mfy->nGrowVect() ); - const amrex::Box& tbz = mfi.tilebox( z_nodal_flag, mfz->nGrowVect() ); - - auto const& mfxfab = mfx->array(mfi); - auto const& mfyfab = mfy->array(mfi); - auto const& mfzfab = mfz->array(mfi); - - amrex::Array4 lx, ly, lz; - if (EB::enabled()) { - lx = edge_lengths[0]->array(mfi); - ly = edge_lengths[1]->array(mfi); - lz = edge_lengths[2]->array(mfi); - } - - amrex::ParallelFor (tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // skip if node is covered by an embedded boundary - if (lx && lx(i, j, k) <= 0) { return; } - - // Shift required in the x-, y-, or z- position - // depending on the index type of the multifab -#if defined(WARPX_DIM_1D_Z) - const amrex::Real x = 0._rt; - const amrex::Real y = 0._rt; - const amrex::Real fac_z = (1._rt - x_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real z = j*dx_lev[0] + real_box.lo(0) + fac_z; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const amrex::Real fac_x = (1._rt - x_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real y = 0._rt; - const amrex::Real fac_z = (1._rt - x_nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; -#else - const amrex::Real fac_x = (1._rt - x_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real fac_y = (1._rt - x_nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real y = j*dx_lev[1] + real_box.lo(1) + fac_y; - const amrex::Real fac_z = (1._rt - x_nodal_flag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; -#endif - // Initialize the x-component of the field. - mfxfab(i,j,k) = Jx_external(x,y,z,t); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // skip if node is covered by an embedded boundary - if (ly && ly(i, j, k) <= 0) { return; } - -#if defined(WARPX_DIM_1D_Z) - const amrex::Real x = 0._rt; - const amrex::Real y = 0._rt; - const amrex::Real fac_z = (1._rt - y_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real z = j*dx_lev[0] + real_box.lo(0) + fac_z; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const amrex::Real fac_x = (1._rt - y_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real y = 0._rt; - const amrex::Real fac_z = (1._rt - y_nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; -#elif defined(WARPX_DIM_3D) - const amrex::Real fac_x = (1._rt - y_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real fac_y = (1._rt - y_nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real y = j*dx_lev[1] + real_box.lo(1) + fac_y; - const amrex::Real fac_z = (1._rt - y_nodal_flag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; -#endif - // Initialize the y-component of the field. - mfyfab(i,j,k) = Jy_external(x,y,z,t); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // skip if node is covered by an embedded boundary - if (lz && lz(i, j, k) <= 0) { return; } - -#if defined(WARPX_DIM_1D_Z) - const amrex::Real x = 0._rt; - const amrex::Real y = 0._rt; - const amrex::Real fac_z = (1._rt - z_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real z = j*dx_lev[0] + real_box.lo(0) + fac_z; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const amrex::Real fac_x = (1._rt - z_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real y = 0._rt; - const amrex::Real fac_z = (1._rt - z_nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; -#elif defined(WARPX_DIM_3D) - const amrex::Real fac_x = (1._rt - z_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real fac_y = (1._rt - z_nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real y = j*dx_lev[1] + real_box.lo(1) + fac_y; - const amrex::Real fac_z = (1._rt - z_nodal_flag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; -#endif - // Initialize the z-component of the field. - mfzfab(i,j,k) = Jz_external(x,y,z,t); - } - ); + warpx.ComputeExternalFieldOnGridUsingParser( + FieldType::hybrid_current_fp_external, + m_J_external[0], + m_J_external[1], + m_J_external[2], + lev, PatchType::fine, 'e', + warpx.m_fields.get_alldirs(FieldType::edge_lengths, lev), + warpx.m_fields.get_alldirs(FieldType::face_areas, lev)); } } diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index be2d40459ac..5220419f822 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -68,8 +68,7 @@ void WarpX::HybridPICEvolveFields () const int sub_steps = m_hybrid_pic_model->m_substeps; // Get the external current - m_hybrid_pic_model->GetCurrentExternal( - m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level)); + m_hybrid_pic_model->GetCurrentExternal(); // Reference hybrid-PIC multifabs ablastr::fields::MultiLevelScalarField rho_fp_temp = m_fields.get_mr_levels(FieldType::hybrid_rho_fp_temp, finest_level); diff --git a/Source/Fluids/WarpXFluidContainer.cpp b/Source/Fluids/WarpXFluidContainer.cpp index 326ce30c844..0a0ca4b8818 100644 --- a/Source/Fluids/WarpXFluidContainer.cpp +++ b/Source/Fluids/WarpXFluidContainer.cpp @@ -1010,24 +1010,23 @@ void WarpXFluidContainer::GatherAndPush ( // External field parsers external_e_fields = (m_E_ext_s == "parse_e_ext_function"); external_b_fields = (m_B_ext_s == "parse_b_ext_function"); + amrex::ParserExecutor<4> Exfield_parser; amrex::ParserExecutor<4> Eyfield_parser; amrex::ParserExecutor<4> Ezfield_parser; amrex::ParserExecutor<4> Bxfield_parser; amrex::ParserExecutor<4> Byfield_parser; amrex::ParserExecutor<4> Bzfield_parser; + if (external_e_fields){ - constexpr int num_arguments = 4; //x,y,z,t - Exfield_parser = m_Ex_parser->compile(); - Eyfield_parser = m_Ey_parser->compile(); - Ezfield_parser = m_Ez_parser->compile(); + Exfield_parser = m_Ex_parser->compile<4>(); + Eyfield_parser = m_Ey_parser->compile<4>(); + Ezfield_parser = m_Ez_parser->compile<4>(); } - if (external_b_fields){ - constexpr int num_arguments = 4; //x,y,z,t - Bxfield_parser = m_Bx_parser->compile(); - Byfield_parser = m_By_parser->compile(); - Bzfield_parser = m_Bz_parser->compile(); + Bxfield_parser = m_Bx_parser->compile<4>(); + Byfield_parser = m_By_parser->compile<4>(); + Bzfield_parser = m_Bz_parser->compile<4>(); } diff --git a/Source/Initialization/ExternalField.cpp b/Source/Initialization/ExternalField.cpp index d86c0a484bf..504fb1ce7a5 100644 --- a/Source/Initialization/ExternalField.cpp +++ b/Source/Initialization/ExternalField.cpp @@ -127,11 +127,11 @@ ExternalFieldParams::ExternalFieldParams(const amrex::ParmParse& pp_warpx) str_Bz_ext_grid_function); Bxfield_parser = std::make_unique( - utils::parser::makeParser(str_Bx_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Bx_ext_grid_function,{"x","y","z","t"})); Byfield_parser = std::make_unique( - utils::parser::makeParser(str_By_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_By_ext_grid_function,{"x","y","z","t"})); Bzfield_parser = std::make_unique( - utils::parser::makeParser(str_Bz_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Bz_ext_grid_function,{"x","y","z","t"})); } //___________________________________________________________________________ @@ -163,11 +163,11 @@ ExternalFieldParams::ExternalFieldParams(const amrex::ParmParse& pp_warpx) str_Ez_ext_grid_function); Exfield_parser = std::make_unique( - utils::parser::makeParser(str_Ex_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Ex_ext_grid_function,{"x","y","z","t"})); Eyfield_parser = std::make_unique( - utils::parser::makeParser(str_Ey_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Ey_ext_grid_function,{"x","y","z","t"})); Ezfield_parser = std::make_unique( - utils::parser::makeParser(str_Ez_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Ez_ext_grid_function,{"x","y","z","t"})); } //___________________________________________________________________________ diff --git a/Source/Initialization/PlasmaInjector.H b/Source/Initialization/PlasmaInjector.H index b9fe2323290..f14720d271c 100644 --- a/Source/Initialization/PlasmaInjector.H +++ b/Source/Initialization/PlasmaInjector.H @@ -131,6 +131,8 @@ public: int flux_normal_axis; int flux_direction; // -1 for left, +1 for right + bool m_inject_from_eb = false; // whether to inject from the embedded boundary + bool radially_weighted = true; std::string str_flux_function; diff --git a/Source/Initialization/PlasmaInjector.cpp b/Source/Initialization/PlasmaInjector.cpp index 3d846375a99..76bb7a5be42 100644 --- a/Source/Initialization/PlasmaInjector.cpp +++ b/Source/Initialization/PlasmaInjector.cpp @@ -9,6 +9,7 @@ */ #include "PlasmaInjector.H" +#include "EmbeddedBoundary/Enabled.H" #include "Initialization/GetTemperature.H" #include "Initialization/GetVelocity.H" #include "Initialization/InjectorDensity.H" @@ -303,50 +304,65 @@ void PlasmaInjector::setupNFluxPerCell (amrex::ParmParse const& pp_species) "(Please visit PR#765 for more information.)"); } #endif - utils::parser::getWithParser(pp_species, source_name, "surface_flux_pos", surface_flux_pos); - utils::parser::queryWithParser(pp_species, source_name, "flux_tmin", flux_tmin); - utils::parser::queryWithParser(pp_species, source_name, "flux_tmax", flux_tmax); - std::string flux_normal_axis_string; - utils::parser::get(pp_species, source_name, "flux_normal_axis", flux_normal_axis_string); - flux_normal_axis = -1; + + // Check whether injection from the embedded boundary is requested + utils::parser::queryWithParser(pp_species, source_name, "inject_from_embedded_boundary", m_inject_from_eb); + if (m_inject_from_eb) { + AMREX_ALWAYS_ASSERT_WITH_MESSAGE( EB::enabled(), + "Error: Embedded boundary injection is only available when " + "embedded boundaries are enabled."); + flux_normal_axis = 2; // Interpret z as the normal direction to the EB + flux_direction = 1; + } else { + // Injection is through a plane in this case. + // Parse the parameters of the plane (position, normal direction, etc.) + + utils::parser::getWithParser(pp_species, source_name, "surface_flux_pos", surface_flux_pos); + utils::parser::queryWithParser(pp_species, source_name, "flux_tmin", flux_tmin); + utils::parser::queryWithParser(pp_species, source_name, "flux_tmax", flux_tmax); + std::string flux_normal_axis_string; + utils::parser::get(pp_species, source_name, "flux_normal_axis", flux_normal_axis_string); + flux_normal_axis = -1; #ifdef WARPX_DIM_RZ - if (flux_normal_axis_string == "r" || flux_normal_axis_string == "R") { - flux_normal_axis = 0; - } - if (flux_normal_axis_string == "t" || flux_normal_axis_string == "T") { - flux_normal_axis = 1; - } + if (flux_normal_axis_string == "r" || flux_normal_axis_string == "R") { + flux_normal_axis = 0; + } + if (flux_normal_axis_string == "t" || flux_normal_axis_string == "T") { + flux_normal_axis = 1; + } #else # ifndef WARPX_DIM_1D_Z - if (flux_normal_axis_string == "x" || flux_normal_axis_string == "X") { - flux_normal_axis = 0; - } + if (flux_normal_axis_string == "x" || flux_normal_axis_string == "X") { + flux_normal_axis = 0; + } # endif #endif #ifdef WARPX_DIM_3D - if (flux_normal_axis_string == "y" || flux_normal_axis_string == "Y") { - flux_normal_axis = 1; - } + if (flux_normal_axis_string == "y" || flux_normal_axis_string == "Y") { + flux_normal_axis = 1; + } #endif - if (flux_normal_axis_string == "z" || flux_normal_axis_string == "Z") { - flux_normal_axis = 2; - } + if (flux_normal_axis_string == "z" || flux_normal_axis_string == "Z") { + flux_normal_axis = 2; + } #ifdef WARPX_DIM_3D - const std::string flux_normal_axis_help = "'x', 'y', or 'z'."; + const std::string flux_normal_axis_help = "'x', 'y', or 'z'."; #else # ifdef WARPX_DIM_RZ - const std::string flux_normal_axis_help = "'r' or 'z'."; + const std::string flux_normal_axis_help = "'r' or 'z'."; # elif WARPX_DIM_XZ - const std::string flux_normal_axis_help = "'x' or 'z'."; + const std::string flux_normal_axis_help = "'x' or 'z'."; # else - const std::string flux_normal_axis_help = "'z'."; + const std::string flux_normal_axis_help = "'z'."; # endif -#endif - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(flux_normal_axis >= 0, - "Error: Invalid value for flux_normal_axis. It must be " + flux_normal_axis_help); - utils::parser::getWithParser(pp_species, source_name, "flux_direction", flux_direction); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(flux_direction == +1 || flux_direction == -1, - "Error: flux_direction must be -1 or +1."); + #endif + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(flux_normal_axis >= 0, + "Error: Invalid value for flux_normal_axis. It must be " + flux_normal_axis_help); + utils::parser::getWithParser(pp_species, source_name, "flux_direction", flux_direction); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(flux_direction == +1 || flux_direction == -1, + "Error: flux_direction must be -1 or +1."); + } + // Construct InjectorPosition with InjectorPositionRandom. h_flux_pos = std::make_unique( (InjectorPositionRandomPlane*)nullptr, diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 70bf20d0905..ce9c3d50a1e 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -972,30 +972,23 @@ WarpX::InitLevelData (int lev, Real /*time*/) // The default maxlevel_extEMfield_init value is the total number of levels in the simulation if ((m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::parse_ext_grid_function) && (lev > 0) && (lev <= maxlevel_extEMfield_init)) { - - InitializeExternalFieldsOnGridUsingParser( - m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), - m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), - m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), - m_p_ext_field_params->Bxfield_parser->compile<3>(), - m_p_ext_field_params->Byfield_parser->compile<3>(), - m_p_ext_field_params->Bzfield_parser->compile<3>(), + ComputeExternalFieldOnGridUsingParser( + FieldType::Bfield_aux, + m_p_ext_field_params->Bxfield_parser->compile<4>(), + m_p_ext_field_params->Byfield_parser->compile<4>(), + m_p_ext_field_params->Bzfield_parser->compile<4>(), + lev, PatchType::fine, 'f', m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev), - 'B', - lev, PatchType::fine); - - InitializeExternalFieldsOnGridUsingParser( - m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), - m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), - m_fields.get(FieldType::Bfield_cp, Direction{2}, lev), - m_p_ext_field_params->Bxfield_parser->compile<3>(), - m_p_ext_field_params->Byfield_parser->compile<3>(), - m_p_ext_field_params->Bzfield_parser->compile<3>(), + m_fields.get_alldirs(FieldType::face_areas, lev)); + + ComputeExternalFieldOnGridUsingParser( + FieldType::Bfield_cp, + m_p_ext_field_params->Bxfield_parser->compile<4>(), + m_p_ext_field_params->Byfield_parser->compile<4>(), + m_p_ext_field_params->Bzfield_parser->compile<4>(), + lev, PatchType::coarse, 'f', m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_mr_levels_alldirs(FieldType::face_areas, max_level)[lev], - 'B', - lev, PatchType::coarse); + m_fields.get_mr_levels_alldirs(FieldType::face_areas, max_level)[lev]); } // if the input string for the E-field is "parse_e_ext_grid_function", @@ -1021,29 +1014,23 @@ WarpX::InitLevelData (int lev, Real /*time*/) #endif if (lev > 0) { - InitializeExternalFieldsOnGridUsingParser( - m_fields.get(FieldType::Efield_aux, Direction{0}, lev), - m_fields.get(FieldType::Efield_aux, Direction{1}, lev), - m_fields.get(FieldType::Efield_aux, Direction{2}, lev), - m_p_ext_field_params->Exfield_parser->compile<3>(), - m_p_ext_field_params->Eyfield_parser->compile<3>(), - m_p_ext_field_params->Ezfield_parser->compile<3>(), + ComputeExternalFieldOnGridUsingParser( + FieldType::Efield_aux, + m_p_ext_field_params->Exfield_parser->compile<4>(), + m_p_ext_field_params->Eyfield_parser->compile<4>(), + m_p_ext_field_params->Ezfield_parser->compile<4>(), + lev, PatchType::fine, 'e', m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev), - 'E', - lev, PatchType::fine); - - InitializeExternalFieldsOnGridUsingParser( - m_fields.get(FieldType::Efield_cp, Direction{0}, lev), - m_fields.get(FieldType::Efield_cp, Direction{1}, lev), - m_fields.get(FieldType::Efield_cp, Direction{2}, lev), - m_p_ext_field_params->Exfield_parser->compile<3>(), - m_p_ext_field_params->Eyfield_parser->compile<3>(), - m_p_ext_field_params->Ezfield_parser->compile<3>(), + m_fields.get_alldirs(FieldType::face_areas, lev)); + + ComputeExternalFieldOnGridUsingParser( + FieldType::Efield_cp, + m_p_ext_field_params->Exfield_parser->compile<4>(), + m_p_ext_field_params->Eyfield_parser->compile<4>(), + m_p_ext_field_params->Ezfield_parser->compile<4>(), + lev, PatchType::coarse, 'e', m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev), - 'E', - lev, PatchType::coarse); + m_fields.get_alldirs(FieldType::face_areas, lev)); #ifdef AMREX_USE_EB if (eb_enabled) { if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { @@ -1072,48 +1059,61 @@ WarpX::InitLevelData (int lev, Real /*time*/) } } -void -WarpX::InitializeExternalFieldsOnGridUsingParser ( - MultiFab *mfx, MultiFab *mfy, MultiFab *mfz, - ParserExecutor<3> const& xfield_parser, ParserExecutor<3> const& yfield_parser, - ParserExecutor<3> const& zfield_parser, - ablastr::fields::VectorField const& edge_lengths, - ablastr::fields::VectorField const& face_areas, - [[maybe_unused]] const char field, - const int lev, PatchType patch_type) +void WarpX::ComputeExternalFieldOnGridUsingParser ( + warpx::fields::FieldType field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, [[maybe_unused]] const char topology, + std::optional const& edge_lengths, + std::optional const& face_areas) { + auto t = gett_new(lev); auto dx_lev = geom[lev].CellSizeArray(); - amrex::IntVect refratio = (lev > 0 ) ? WarpX::RefRatio(lev-1) : amrex::IntVect(1); + const RealBox& real_box = geom[lev].ProbDomain(); + + amrex::IntVect refratio = (lev > 0 ) ? RefRatio(lev-1) : amrex::IntVect(1); if (patch_type == PatchType::coarse) { for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { dx_lev[idim] = dx_lev[idim] * refratio[idim]; } } - const RealBox& real_box = geom[lev].ProbDomain(); + + using ablastr::fields::Direction; + amrex::MultiFab* mfx = m_fields.get(field, Direction{0}, lev); + amrex::MultiFab* mfy = m_fields.get(field, Direction{1}, lev); + amrex::MultiFab* mfz = m_fields.get(field, Direction{2}, lev); + const amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect(); const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); const amrex::IntVect z_nodal_flag = mfz->ixType().toIntVect(); - bool const eb_enabled = EB::enabled(); + const bool eb_enabled = EB::enabled(); for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) { - const amrex::Box &tbx = mfi.tilebox(x_nodal_flag, mfx->nGrowVect()); - const amrex::Box &tby = mfi.tilebox(y_nodal_flag, mfy->nGrowVect()); - const amrex::Box &tbz = mfi.tilebox(z_nodal_flag, mfz->nGrowVect()); + const amrex::Box& tbx = mfi.tilebox( x_nodal_flag, mfx->nGrowVect() ); + const amrex::Box& tby = mfi.tilebox( y_nodal_flag, mfy->nGrowVect() ); + const amrex::Box& tbz = mfi.tilebox( z_nodal_flag, mfz->nGrowVect() ); - auto const &mfxfab = mfx->array(mfi); - auto const &mfyfab = mfy->array(mfi); - auto const &mfzfab = mfz->array(mfi); + auto const& mfxfab = mfx->array(mfi); + auto const& mfyfab = mfy->array(mfi); + auto const& mfzfab = mfz->array(mfi); amrex::Array4 lx, ly, lz, Sx, Sy, Sz; if (eb_enabled) { - lx = edge_lengths[0]->array(mfi); - ly = edge_lengths[1]->array(mfi); - lz = edge_lengths[2]->array(mfi); - Sx = face_areas[0]->array(mfi); - Sy = face_areas[1]->array(mfi); - Sz = face_areas[2]->array(mfi); + if (edge_lengths.has_value()) { + const auto& edge_lengths_array = edge_lengths.value(); + lx = edge_lengths_array[0]->array(mfi); + ly = edge_lengths_array[1]->array(mfi); + lz = edge_lengths_array[2]->array(mfi); + } + if (face_areas.has_value()) { + const auto& face_areas_array = face_areas.value(); + Sx = face_areas_array[0]->array(mfi); + Sy = face_areas_array[1]->array(mfi); + Sz = face_areas_array[2]->array(mfi); + } } #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) @@ -1132,10 +1132,10 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( [=] AMREX_GPU_DEVICE (int i, int j, int k) { #ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if(lx && ((field=='E' and lx(i, j, k)<=0) or (field=='B' and Sx(i, j, k)<=0))) { return; } + if(lx && ((topology=='e' and lx(i, j, k)<=0) or (topology=='f' and Sx(i, j, k)<=0))) { return; } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In XZ and RZ Ex is associated with a x-edge, while Bx is associated with a z-edge - if(lx && ((field=='E' and lx(i, j, k)<=0) or (field=='B' and lz(i, j, k)<=0))) { return; } + if(lx && ((topology=='e' and lx(i, j, k)<=0) or (topology=='f' and lz(i, j, k)<=0))) { return; } #endif #endif // Shift required in the x-, y-, or z- position @@ -1160,20 +1160,20 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; #endif // Initialize the x-component of the field. - mfxfab(i,j,k) = xfield_parser(x,y,z); + mfxfab(i,j,k) = fx_parser(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { #ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if(ly && ((field=='E' and ly(i, j, k)<=0) or (field=='B' and Sy(i, j, k)<=0))) { return; } + if(ly && ((topology=='e' and ly(i, j, k)<=0) or (topology=='f' and Sy(i, j, k)<=0))) { return; } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In XZ and RZ Ey is associated with a mesh node, so we need to check if the mesh node is covered if(lx && - ((field=='E' and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 + ((topology=='e' and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 || lx(std::max(i-1, lx_lo.x), std::min(j , lx_hi.y), k)<=0 || lz(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)<=0 || lz(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)<=0)) or - (field=='B' and Sy(i,j,k)<=0))) { return; } + (topology=='f' and Sy(i,j,k)<=0))) { return; } #endif #endif #if defined(WARPX_DIM_1D_Z) @@ -1196,15 +1196,15 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; #endif // Initialize the y-component of the field. - mfyfab(i,j,k) = yfield_parser(x,y,z); + mfyfab(i,j,k) = fy_parser(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { #ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if(lz && ((field=='E' and lz(i, j, k)<=0) or (field=='B' and Sz(i, j, k)<=0))) { return; } + if(lz && ((topology=='e' and lz(i, j, k)<=0) or (topology=='f' and Sz(i, j, k)<=0))) { return; } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In XZ and RZ Ez is associated with a z-edge, while Bz is associated with a x-edge - if(lz && ((field=='E' and lz(i, j, k)<=0) or (field=='B' and lx(i, j, k)<=0))) { return; } + if(lz && ((topology=='e' and lz(i, j, k)<=0) or (topology=='f' and lx(i, j, k)<=0))) { return; } #endif #endif #if defined(WARPX_DIM_1D_Z) @@ -1227,7 +1227,7 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; #endif // Initialize the z-component of the field. - mfzfab(i,j,k) = zfield_parser(x,y,z); + mfzfab(i,j,k) = fz_parser(x,y,z,t); } ); } @@ -1386,17 +1386,14 @@ WarpX::LoadExternalFields (int const lev) // External grid fields if (m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::parse_ext_grid_function) { // Initialize Bfield_fp_external with external function - InitializeExternalFieldsOnGridUsingParser( - m_fields.get(FieldType::Bfield_fp_external, Direction{0}, lev), - m_fields.get(FieldType::Bfield_fp_external, Direction{1}, lev), - m_fields.get(FieldType::Bfield_fp_external, Direction{2}, lev), - m_p_ext_field_params->Bxfield_parser->compile<3>(), - m_p_ext_field_params->Byfield_parser->compile<3>(), - m_p_ext_field_params->Bzfield_parser->compile<3>(), + ComputeExternalFieldOnGridUsingParser( + FieldType::Bfield_fp_external, + m_p_ext_field_params->Bxfield_parser->compile<4>(), + m_p_ext_field_params->Byfield_parser->compile<4>(), + m_p_ext_field_params->Bzfield_parser->compile<4>(), + lev, PatchType::fine, 'f', m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev), - 'B', - lev, PatchType::fine); + m_fields.get_alldirs(FieldType::face_areas, lev)); } else if (m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::read_from_file) { #if defined(WARPX_DIM_RZ) @@ -1414,17 +1411,14 @@ WarpX::LoadExternalFields (int const lev) if (m_p_ext_field_params->E_ext_grid_type == ExternalFieldType::parse_ext_grid_function) { // Initialize Efield_fp_external with external function - InitializeExternalFieldsOnGridUsingParser( - m_fields.get(FieldType::Efield_fp_external, Direction{0}, lev), - m_fields.get(FieldType::Efield_fp_external, Direction{1}, lev), - m_fields.get(FieldType::Efield_fp_external, Direction{2}, lev), - m_p_ext_field_params->Exfield_parser->compile<3>(), - m_p_ext_field_params->Eyfield_parser->compile<3>(), - m_p_ext_field_params->Ezfield_parser->compile<3>(), + ComputeExternalFieldOnGridUsingParser( + FieldType::Efield_fp_external, + m_p_ext_field_params->Exfield_parser->compile<4>(), + m_p_ext_field_params->Eyfield_parser->compile<4>(), + m_p_ext_field_params->Ezfield_parser->compile<4>(), + lev, PatchType::fine, 'e', m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev), - 'E', - lev, PatchType::fine); + m_fields.get_alldirs(FieldType::face_areas, lev)); } else if (m_p_ext_field_params->E_ext_grid_type == ExternalFieldType::read_from_file) { #if defined(WARPX_DIM_RZ) diff --git a/Source/Particles/AddPlasmaUtilities.H b/Source/Particles/AddPlasmaUtilities.H index 8f0489e3921..bb05d7be3c8 100644 --- a/Source/Particles/AddPlasmaUtilities.H +++ b/Source/Particles/AddPlasmaUtilities.H @@ -22,6 +22,29 @@ #include #include +struct PDim3 { + amrex::ParticleReal x, y, z; + + AMREX_GPU_HOST_DEVICE + PDim3(const amrex::XDim3& a): + x{static_cast(a.x)}, + y{static_cast(a.y)}, + z{static_cast(a.z)} + {} + + AMREX_GPU_HOST_DEVICE + ~PDim3() = default; + + AMREX_GPU_HOST_DEVICE + PDim3(PDim3 const &) = default; + AMREX_GPU_HOST_DEVICE + PDim3& operator=(PDim3 const &) = default; + AMREX_GPU_HOST_DEVICE + PDim3(PDim3&&) = default; + AMREX_GPU_HOST_DEVICE + PDim3& operator=(PDim3&&) = default; +}; + /* Finds the overlap region between the given tile_realbox and part_realbox, returning true if an overlap exists and false if otherwise. This also sets the parameters overlap_realbox, @@ -71,12 +94,124 @@ int compute_area_weights (const amrex::IntVect& iv, const int normal_axis) { return r; } + +#ifdef AMREX_USE_EB +/* + * \brief This computes the scale_fac (used for setting the particle weights) on a on area basis + * (used for flux injection from the embedded boundary). + * + * \param[in] dx: cell size in each direction + * \param[in] num_ppc_real: number of particles per cell + * \param[in] eb_bnd_normal_arr: array containing the normal to the embedded boundary + * \param[in] i, j, k: indices of the cell + * + * \return scale_fac: the scaling factor to be applied to the weight of the particles + */ +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +amrex::Real compute_scale_fac_area_eb ( + const amrex::GpuArray& dx, + const amrex::Real num_ppc_real, + amrex::Array4 const& eb_bnd_normal_arr, + int i, int j, int k ) { + using namespace amrex::literals; + // Scale particle weight by the area of the emitting surface, within one cell + // By definition, eb_bnd_area_arr is normalized (unitless). + // Here we undo the normalization (i.e. multiply by the surface used for normalization in amrex: + // see https://amrex-codes.github.io/amrex/docs_html/EB.html#embedded-boundary-data-structures) +#if defined(WARPX_DIM_3D) + const amrex::Real nx = eb_bnd_normal_arr(i,j,k,0); + const amrex::Real ny = eb_bnd_normal_arr(i,j,k,1); + const amrex::Real nz = eb_bnd_normal_arr(i,j,k,2); + amrex::Real scale_fac = std::sqrt(amrex::Math::powi<2>(nx*dx[1]*dx[2]) + + amrex::Math::powi<2>(ny*dx[0]*dx[2]) + + amrex::Math::powi<2>(nz*dx[0]*dx[1])); + +#elif defined(WARPX_DIM_RZ) || defined(WARPX_DIM_XZ) + const amrex::Real nx = eb_bnd_normal_arr(i,j,k,0); + const amrex::Real nz = eb_bnd_normal_arr(i,j,k,1); + amrex::Real scale_fac = std::sqrt(amrex::Math::powi<2>(nx*dx[1]) + + amrex::Math::powi<2>(nz*dx[0])); +#else + amrex::ignore_unused(dx, eb_bnd_normal_arr, i, j, k); + amrex::Real scale_fac = 0.0_rt; +#endif + // Do not multiply by eb_bnd_area_arr(i,j,k) here because this + // already taken into account by emitting a number of macroparticles + // that is proportional to the area of eb_bnd_area_arr(i,j,k). + scale_fac /= num_ppc_real; + return scale_fac; +} + +/* \brief Rotate the momentum of the particle so that the z direction + * transforms to the normal of the embedded boundary. + * + * More specifically, before calling this function, `pu.z` has the + * momentum distribution that is meant for the direction normal to the + * embedded boundary, and `pu.x`/`pu.y` have the momentum distribution that + * is meant for the tangentional direction. After calling this function, + * `pu.x`, `pu.y`, `pu.z` will have the correct momentum distribution, + * consistent with the local normal to the embedded boundary. + * + * \param[inout] pu momentum of the particle + * \param[in] eb_bnd_normal_arr: array containing the normal to the embedded boundary + * \param[in] i, j, k: indices of the cell + * */ +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +void rotate_momentum_eb ( + PDim3 & pu, + amrex::Array4 const& eb_bnd_normal_arr, + int i, int j, int k ) +{ + using namespace amrex::literals; + +#if defined(WARPX_DIM_3D) + // The minus sign below takes into account the fact that eb_bnd_normal_arr + // points towards the covered region, while particles are to be emitted + // *away* from the covered region. + amrex::Real const nx = -eb_bnd_normal_arr(i,j,k,0); + amrex::Real const ny = -eb_bnd_normal_arr(i,j,k,1); + amrex::Real const nz = -eb_bnd_normal_arr(i,j,k,2); + + // Rotate the momentum in theta and phi + amrex::Real const cos_theta = nz; + amrex::Real const sin_theta = std::sqrt(1._rt-nz*nz); + amrex::Real const nperp = std::sqrt(nx*nx + ny*ny); + amrex::Real cos_phi = 1; + amrex::Real sin_phi = 0; + if ( nperp > 0.0 ) { + cos_phi = nx/nperp; + sin_phi = ny/nperp; + } + // Apply rotation matrix + amrex::Real const ux = pu.x*cos_theta*cos_phi - pu.y*sin_phi + pu.z*sin_theta*cos_phi; + amrex::Real const uy = pu.x*cos_theta*sin_phi + pu.y*cos_phi + pu.z*sin_theta*sin_phi; + amrex::Real const uz = -pu.x*sin_theta + pu.z*cos_theta; + pu.x = ux; + pu.y = uy; + pu.z = uz; + +#elif defined(WARPX_DIM_RZ) || defined(WARPX_DIM_XZ) + // The minus sign below takes into account the fact that eb_bnd_normal_arr + // points towards the covered region, while particles are to be emitted + // *away* from the covered region. + amrex::Real const sin_theta = -eb_bnd_normal_arr(i,j,k,0); + amrex::Real const cos_theta = -eb_bnd_normal_arr(i,j,k,1); + amrex::Real const uz = pu.z*cos_theta - pu.x*sin_theta; + amrex::Real const ux = pu.x*cos_theta + pu.z*sin_theta; + pu.x = ux; + pu.z = uz; +#else + amrex::ignore_unused(pu, eb_bnd_normal_arr, i, j, k); +#endif +} +#endif //AMREX_USE_EB + /* This computes the scale_fac (used for setting the particle weights) on a on area basis - (used for flux injection). + (used for flux injection from a plane). */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE -amrex::Real compute_scale_fac_area (const amrex::GpuArray& dx, +amrex::Real compute_scale_fac_area_plane (const amrex::GpuArray& dx, const amrex::Real num_ppc_real, const int flux_normal_axis) { using namespace amrex::literals; amrex::Real scale_fac = AMREX_D_TERM(dx[0],*dx[1],*dx[2])/num_ppc_real; diff --git a/Source/Particles/Gather/GetExternalFields.cpp b/Source/Particles/Gather/GetExternalFields.cpp index bb55f79f394..207ef4a5a8b 100644 --- a/Source/Particles/Gather/GetExternalFields.cpp +++ b/Source/Particles/Gather/GetExternalFields.cpp @@ -50,19 +50,17 @@ GetExternalEBField::GetExternalEBField (const WarpXParIter& a_pti, long a_offset if (mypc.m_E_ext_particle_s == "parse_e_ext_particle_function") { m_Etype = ExternalFieldInitType::Parser; - constexpr auto num_arguments = 4; //x,y,z,t - m_Exfield_partparser = mypc.m_Ex_particle_parser->compile(); - m_Eyfield_partparser = mypc.m_Ey_particle_parser->compile(); - m_Ezfield_partparser = mypc.m_Ez_particle_parser->compile(); + m_Exfield_partparser = mypc.m_Ex_particle_parser->compile<4>(); + m_Eyfield_partparser = mypc.m_Ey_particle_parser->compile<4>(); + m_Ezfield_partparser = mypc.m_Ez_particle_parser->compile<4>(); } if (mypc.m_B_ext_particle_s == "parse_b_ext_particle_function") { m_Btype = ExternalFieldInitType::Parser; - constexpr auto num_arguments = 4; //x,y,z,t - m_Bxfield_partparser = mypc.m_Bx_particle_parser->compile(); - m_Byfield_partparser = mypc.m_By_particle_parser->compile(); - m_Bzfield_partparser = mypc.m_Bz_particle_parser->compile(); + m_Bxfield_partparser = mypc.m_Bx_particle_parser->compile<4>(); + m_Byfield_partparser = mypc.m_By_particle_parser->compile<4>(); + m_Bzfield_partparser = mypc.m_Bz_particle_parser->compile<4>(); } if (mypc.m_E_ext_particle_s == "repeated_plasma_lens" || diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 26f9fee38d3..7c70c9a35c4 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -147,29 +147,6 @@ namespace return z0; } - struct PDim3 { - ParticleReal x, y, z; - - AMREX_GPU_HOST_DEVICE - PDim3(const amrex::XDim3& a): - x{static_cast(a.x)}, - y{static_cast(a.y)}, - z{static_cast(a.z)} - {} - - AMREX_GPU_HOST_DEVICE - ~PDim3() = default; - - AMREX_GPU_HOST_DEVICE - PDim3(PDim3 const &) = default; - AMREX_GPU_HOST_DEVICE - PDim3& operator=(PDim3 const &) = default; - AMREX_GPU_HOST_DEVICE - PDim3(PDim3&&) = default; - AMREX_GPU_HOST_DEVICE - PDim3& operator=(PDim3&&) = default; - }; - AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE XDim3 getCellCoords (const GpuArray& lo_corner, const GpuArray& dx, @@ -1371,6 +1348,22 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, const auto dx = geom.CellSizeArray(); const auto problo = geom.ProbLoArray(); +#ifdef AMREX_USE_EB + bool const inject_from_eb = plasma_injector.m_inject_from_eb; // whether to inject from EB or from a plane + // Extract data structures for embedded boundaries + amrex::FabArray const* eb_flag = nullptr; + amrex::MultiCutFab const* eb_bnd_area = nullptr; + amrex::MultiCutFab const* eb_bnd_normal = nullptr; + amrex::MultiCutFab const* eb_bnd_cent = nullptr; + if (inject_from_eb) { + amrex::EBFArrayBoxFactory const& eb_box_factory = WarpX::GetInstance().fieldEBFactory(0); + eb_flag = &eb_box_factory.getMultiEBCellFlagFab(); + eb_bnd_area = &eb_box_factory.getBndryArea(); + eb_bnd_normal = &eb_box_factory.getBndryNormal(); + eb_bnd_cent = &eb_box_factory.getBndryCent(); + } +#endif + amrex::LayoutData* cost = WarpX::getCosts(0); // Create temporary particle container to which particles will be added; @@ -1428,9 +1421,20 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, RealBox overlap_realbox; Box overlap_box; IntVect shifted; - const bool no_overlap = find_overlap_flux(tile_realbox, part_realbox, dx, problo, plasma_injector, overlap_realbox, overlap_box, shifted); - if (no_overlap) { - continue; // Go to the next tile +#ifdef AMREX_USE_EB + if (inject_from_eb) { + // Injection from EB + const amrex::FabType fab_type = (*eb_flag)[mfi].getType(tile_box); + if (fab_type == amrex::FabType::regular) { continue; } // Go to the next tile + if (fab_type == amrex::FabType::covered) { continue; } // Go to the next tile + overlap_box = tile_box; + overlap_realbox = part_realbox; + } else +#endif + { + // Injection from a plane + const bool no_overlap = find_overlap_flux(tile_realbox, part_realbox, dx, problo, plasma_injector, overlap_realbox, overlap_box, shifted); + if (no_overlap) { continue; } // Go to the next tile } const int grid_id = mfi.index(); @@ -1450,24 +1454,57 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, if (refine_injection) { fine_overlap_box = overlap_box & amrex::shift(fine_injection_box, -shifted); } + +#ifdef AMREX_USE_EB + // Extract data structures for embedded boundaries + amrex::Array4::value_type> eb_flag_arr; + amrex::Array4 eb_bnd_area_arr; + amrex::Array4 eb_bnd_normal_arr; + amrex::Array4 eb_bnd_cent_arr; + if (inject_from_eb) { + eb_flag_arr = eb_flag->array(mfi); + eb_bnd_area_arr = eb_bnd_area->array(mfi); + eb_bnd_normal_arr = eb_bnd_normal->array(mfi); + eb_bnd_cent_arr = eb_bnd_cent->array(mfi); + } +#endif + amrex::ParallelForRNG(overlap_box, [=] AMREX_GPU_DEVICE (int i, int j, int k, amrex::RandomEngine const& engine) noexcept { const IntVect iv(AMREX_D_DECL(i, j, k)); amrex::ignore_unused(j,k); - auto lo = getCellCoords(overlap_corner, dx, {0._rt, 0._rt, 0._rt}, iv); - auto hi = getCellCoords(overlap_corner, dx, {1._rt, 1._rt, 1._rt}, iv); - - if (flux_pos->overlapsWith(lo, hi)) + // Determine the number of macroparticles to inject in this cell (num_ppc_int) +#ifdef AMREX_USE_EB + amrex::Real num_ppc_real_in_this_cell = num_ppc_real; // user input: number of macroparticles per cell + if (inject_from_eb) { + // Injection from EB + // Skip cells that are not partially covered by the EB + if (eb_flag_arr(i,j,k).isRegular() || eb_flag_arr(i,j,k).isCovered()) { return; } + // Scale by the (normalized) area of the EB surface in this cell + num_ppc_real_in_this_cell *= eb_bnd_area_arr(i,j,k); + } else +#else + amrex::Real const num_ppc_real_in_this_cell = num_ppc_real; // user input: number of macroparticles per cell +#endif { - auto index = overlap_box.index(iv); - int r = 1; - if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { - r = compute_area_weights(rrfac, flux_normal_axis); - } - const int num_ppc_int = static_cast(num_ppc_real*r + amrex::Random(engine)); - pcounts[index] = num_ppc_int; + // Injection from a plane + auto lo = getCellCoords(overlap_corner, dx, {0._rt, 0._rt, 0._rt}, iv); + auto hi = getCellCoords(overlap_corner, dx, {1._rt, 1._rt, 1._rt}, iv); + // Skip cells that do not overlap with the plane + if (!flux_pos->overlapsWith(lo, hi)) { return; } } + + auto index = overlap_box.index(iv); + // Take into account refined injection region + int r = 1; + if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { + r = compute_area_weights(rrfac, flux_normal_axis); + } + const int num_ppc_int = static_cast(num_ppc_real_in_this_cell*r + amrex::Random(engine)); + pcounts[index] = num_ppc_int; + + amrex::ignore_unused(j,k); }); // Max number of new particles. All of them are created, @@ -1537,7 +1574,15 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, amrex::ignore_unused(j,k); const auto index = overlap_box.index(iv); - Real scale_fac = compute_scale_fac_area(dx, num_ppc_real, flux_normal_axis); + Real scale_fac; +#ifdef AMREX_USE_EB + if (inject_from_eb) { + scale_fac = compute_scale_fac_area_eb(dx, num_ppc_real, eb_bnd_normal_arr, i, j, k ); + } else +#endif + { + scale_fac = compute_scale_fac_area_plane(dx, num_ppc_real, flux_normal_axis); + } if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { scale_fac /= compute_area_weights(rrfac, flux_normal_axis); @@ -1548,13 +1593,32 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, const long ip = poffset[index] + i_part; pa_idcpu[ip] = amrex::SetParticleIDandCPU(pid+ip, cpuid); - // This assumes the flux_pos is of type InjectorPositionRandomPlane - const XDim3 r = (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) ? - // In the refined injection region: use refinement ratio `rrfac` - flux_pos->getPositionUnitBox(i_part, rrfac, engine) : - // Otherwise: use 1 as the refinement ratio - flux_pos->getPositionUnitBox(i_part, amrex::IntVect::TheUnitVector(), engine); - auto pos = getCellCoords(overlap_corner, dx, r, iv); + // Determine the position of the particle within the cell + XDim3 pos; + XDim3 r; +#ifdef AMREX_USE_EB + if (inject_from_eb) { +#if defined(WARPX_DIM_3D) + pos.x = overlap_corner[0] + (iv[0] + 0.5_rt + eb_bnd_cent_arr(i,j,k,0))*dx[0]; + pos.y = overlap_corner[1] + (iv[1] + 0.5_rt + eb_bnd_cent_arr(i,j,k,1))*dx[1]; + pos.z = overlap_corner[2] + (iv[2] + 0.5_rt + eb_bnd_cent_arr(i,j,k,2))*dx[2]; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + pos.x = overlap_corner[0] + (iv[0] + 0.5_rt + eb_bnd_cent_arr(i,j,k,0))*dx[0]; + pos.y = 0.0_rt; + pos.z = overlap_corner[1] + (iv[1] + 0.5_rt + eb_bnd_cent_arr(i,j,k,1))*dx[1]; +#endif + } else +#endif + { + // Injection from a plane + // This assumes the flux_pos is of type InjectorPositionRandomPlane + r = (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) ? + // In the refined injection region: use refinement ratio `rrfac` + flux_pos->getPositionUnitBox(i_part, rrfac, engine) : + // Otherwise: use 1 as the refinement ratio + flux_pos->getPositionUnitBox(i_part, amrex::IntVect::TheUnitVector(), engine); + pos = getCellCoords(overlap_corner, dx, r, iv); + } auto ppos = PDim3(pos); // inj_mom would typically be InjectorMomentumGaussianFlux @@ -1595,6 +1659,15 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, continue; } +#ifdef AMREX_USE_EB + if (inject_from_eb) { + // Injection from EB: rotate momentum according to the normal of the EB surface + // (The above code initialized the momentum by assuming that z is the direction + // normal to the EB surface. Thus we need to rotate from z to the normal.) + rotate_momentum_eb(pu, eb_bnd_normal_arr, i, j , k); + } +#endif + #ifdef WARPX_DIM_RZ // Conversion from cylindrical to Cartesian coordinates // Replace the x and y, setting an angle theta. @@ -1610,7 +1683,11 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, const amrex::Real radial_position = ppos.x; ppos.x = radial_position*cos_theta; ppos.y = radial_position*sin_theta; - if (loc_flux_normal_axis != 2) { + if ((loc_flux_normal_axis != 2) +#ifdef AMREX_USE_EB + || (inject_from_eb) +#endif + ) { // Rotate the momentum // This because, when the flux direction is e.g. "r" // the `inj_mom` objects generates a v*Gaussian distribution diff --git a/Source/Python/Particles/MultiParticleContainer.cpp b/Source/Python/Particles/MultiParticleContainer.cpp index e709f0950b4..7b3b114b080 100644 --- a/Source/Python/Particles/MultiParticleContainer.cpp +++ b/Source/Python/Particles/MultiParticleContainer.cpp @@ -42,5 +42,12 @@ i_lens: int strength_E, strength_B: floats The electric and magnetic focusing strength of the lens)pbdoc" ) + + .def("get_charge_density", + [](MultiParticleContainer& mpc, int lev, bool local) { + return mpc.GetChargeDensity(lev, local); + }, + py::arg("lev"), py::arg("local") + ) ; } diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 0aab95f78f8..0b1ae49dfbc 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -112,13 +112,6 @@ void init_WarpX (py::module& m) //py::overload_cast< int >(&WarpX::boxArray, py::const_), py::arg("lev") ) - .def("field", - [](WarpX const & wx) { - return wx.multifab_map; - }, - py::return_value_policy::reference_internal, - R"doc(Registry to all WarpX MultiFab (fields).)doc" - ) .def("multifab", [](WarpX & wx, std::string internal_name) { if (wx.m_fields.internal_has(internal_name)) { @@ -253,6 +246,10 @@ The physical fields in WarpX have the following naming: [] () { WarpX::ProjectionCleanDivB(); }, "Executes projection based divergence cleaner on loaded Bfield_fp_external." ) + .def("synchronize", + [] (WarpX& wx) { wx.Synchronize(); }, + "Synchronize particle velocities and positions." + ) ; py::class_(m, "Config") diff --git a/Source/WarpX.H b/Source/WarpX.H index 83b1880f2b1..bad63cd44d9 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -403,31 +403,6 @@ public: // Global rho nodal flag to know about rho index type when rho MultiFab is not allocated amrex::IntVect m_rho_nodal_flag; - /** - * \brief - * Allocate and optionally initialize the MultiFab. This also adds the MultiFab - * to the map of MultiFabs (used to ease the access to MultiFabs from the Python - * interface - * - * \param[out] mf The MultiFab unique pointer to be allocated - * \param[in] ba The BoxArray describing the MultiFab - * \param[in] dm The DistributionMapping describing the MultiFab - * \param[in] ncomp The number of components in the MultiFab - * \param[in] ngrow The number of guard cells in the MultiFab - * \param[in] level The refinement level - * \param[in] name The name of the MultiFab to use in the map - * \param[in] initial_value The optional initial value - */ - static void AllocInitMultiFab ( - std::unique_ptr& mf, - const amrex::BoxArray& ba, - const amrex::DistributionMapping& dm, - int ncomp, - const amrex::IntVect& ngrow, - int level, - const std::string& name, - std::optional initial_value = {}); - /** * \brief * Allocate and optionally initialize the iMultiFab. This also adds the iMultiFab @@ -453,30 +428,9 @@ public: const std::string& name, std::optional initial_value = {}); - /** - * \brief - * Create an alias of a MultiFab, adding the alias to the MultiFab map - * \param[out] mf The MultiFab to create - * \param[in] mf_to_alias The MultiFab to alias - * \param[in] scomp The starting component to be aliased - * \param[in] ncomp The number of components to alias - * \param[in] level The refinement level - * \param[in] name The name of the MultiFab to use in the map - * \param[in] initial_value optional initial value for MultiFab - */ - static void AliasInitMultiFab ( - std::unique_ptr& mf, - const amrex::MultiFab& mf_to_alias, - int scomp, - int ncomp, - int level, - const std::string& name, - std::optional initial_value); - - // Maps of all of the MultiFabs and iMultiFabs used (this can include MFs from other classes) - // This is a convenience for the Python interface, allowing all MultiFabs + // Maps of all of the iMultiFabs used (this can include MFs from other classes) + // This is a convenience for the Python interface, allowing all iMultiFabs // to be easily referenced from Python. - static std::map multifab_map; static std::map imultifab_map; /** @@ -735,7 +689,7 @@ public: void DampJPML (int lev, PatchType patch_type); void CopyJPML (); - bool isAnyBoundaryPML(); + /** True if any of the particle boundary condition type is Thermal */ static bool isAnyParticleBoundaryThermal(); @@ -950,34 +904,31 @@ public: /** * \brief - * This function initializes the E and B fields on each level + * This function computes the E, B, and J fields on each level * using the parser and the user-defined function for the external fields. * The subroutine will parse the x_/y_z_external_grid_function and * then, the field multifab is initialized based on the (x,y,z) position * on the staggered yee-grid or cell-centered grid, in the interior cells * and guard cells. * - * \param[in] mfx x-component of the field to be initialized - * \param[in] mfy y-component of the field to be initialized - * \param[in] mfz z-component of the field to be initialized - * \param[in] xfield_parser parser function to initialize x-field - * \param[in] yfield_parser parser function to initialize y-field - * \param[in] zfield_parser parser function to initialize z-field + * \param[in] field FieldType + * \param[in] fx_parser parser function to initialize x-field + * \param[in] fy_parser parser function to initialize y-field + * \param[in] fz_parser parser function to initialize z-field * \param[in] edge_lengths edge lengths information * \param[in] face_areas face areas information - * \param[in] field flag indicating which field is being initialized ('E' for electric, 'B' for magnetic) + * \param[in] topology flag indicating if field is edge-based or face-based * \param[in] lev level of the Multifabs that is initialized * \param[in] patch_type PatchType on which the field is initialized (fine or coarse) */ - void InitializeExternalFieldsOnGridUsingParser ( - amrex::MultiFab *mfx, amrex::MultiFab *mfy, amrex::MultiFab *mfz, - amrex::ParserExecutor<3> const& xfield_parser, - amrex::ParserExecutor<3> const& yfield_parser, - amrex::ParserExecutor<3> const& zfield_parser, - ablastr::fields::VectorField const& edge_lengths, - ablastr::fields::VectorField const& face_areas, - [[maybe_unused]] char field, - int lev, PatchType patch_type); + void ComputeExternalFieldOnGridUsingParser ( + warpx::fields::FieldType field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, [[maybe_unused]] char topology, + std::optional const& edge_lengths = std::nullopt, + std::optional const& face_areas = std::nullopt); /** * \brief Load field values from a user-specified openPMD file, diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 89254e05c98..d1e3108e32a 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -184,7 +184,6 @@ bool WarpX::safe_guard_cells = false; utils::parser::IntervalsParser WarpX::dt_update_interval; -std::map WarpX::multifab_map; std::map WarpX::imultifab_map; IntVect WarpX::filter_npass_each_dir(1); @@ -196,6 +195,22 @@ amrex::IntVect m_rho_nodal_flag; WarpX* WarpX::m_instance = nullptr; +namespace +{ + + [[nodiscard]] bool + isAnyBoundaryPML( + const amrex::Array& field_boundary_lo, + const amrex::Array& field_boundary_hi) + { + constexpr auto is_pml = [](const FieldBoundaryType fbt) {return (fbt == FieldBoundaryType::PML);}; + const auto is_any_pml = + std::any_of(field_boundary_lo.begin(), field_boundary_lo.end(), is_pml) || + std::any_of(field_boundary_hi.begin(), field_boundary_hi.end(), is_pml); + return is_any_pml; + } +} + void WarpX::MakeWarpX () { ParseGeometryInput(); @@ -879,7 +894,7 @@ WarpX::ReadParameters () } #ifdef WARPX_DIM_RZ - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( isAnyBoundaryPML() == false || electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD, + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( ::isAnyBoundaryPML(field_boundary_lo, field_boundary_hi) == false || electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD, "PML are not implemented in RZ geometry with FDTD; please set a different boundary condition using boundary.field_lo and boundary.field_hi."); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( field_boundary_lo[1] != FieldBoundaryType::PML && field_boundary_hi[1] != FieldBoundaryType::PML, "PML are not implemented in RZ geometry along z; please set a different boundary condition using boundary.field_lo and boundary.field_hi."); @@ -2015,7 +2030,7 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d safe_guard_cells, WarpX::do_multi_J, WarpX::fft_do_time_averaging, - WarpX::isAnyBoundaryPML(), + ::isAnyBoundaryPML(field_boundary_lo, field_boundary_hi), WarpX::do_pml_in_domain, WarpX::pml_ncell, this->refRatio(), @@ -2743,7 +2758,7 @@ void WarpX::AllocLevelSpectralSolverRZ (amrex::Vector& mf, - const amrex::BoxArray& ba, - const amrex::DistributionMapping& dm, - const int ncomp, - const amrex::IntVect& ngrow, - const int level, - const std::string& name, - std::optional initial_value) -{ - const auto name_with_suffix = TagWithLevelSuffix(name, level); - const auto tag = amrex::MFInfo().SetTag(name_with_suffix); - mf = std::make_unique(ba, dm, ncomp, ngrow, tag); - if (initial_value) { - mf->setVal(*initial_value); - } - multifab_map[name_with_suffix] = mf.get(); -} - void WarpX::AllocInitMultiFab ( std::unique_ptr& mf, @@ -3324,24 +3309,6 @@ WarpX::AllocInitMultiFab ( imultifab_map[name_with_suffix] = mf.get(); } -void -WarpX::AliasInitMultiFab ( - std::unique_ptr& mf, - const amrex::MultiFab& mf_to_alias, - const int scomp, - const int ncomp, - const int level, - const std::string& name, - std::optional initial_value) -{ - const auto name_with_suffix = TagWithLevelSuffix(name, level); - mf = std::make_unique(mf_to_alias, amrex::make_alias, scomp, ncomp); - if (initial_value) { - mf->setVal(*initial_value); - } - multifab_map[name_with_suffix] = mf.get(); -} - amrex::DistributionMapping WarpX::MakeDistributionMap (int lev, amrex::BoxArray const& ba) { diff --git a/Source/ablastr/Make.package b/Source/ablastr/Make.package index b9ff3c72560..edbf43b7802 100644 --- a/Source/ablastr/Make.package +++ b/Source/ablastr/Make.package @@ -1,4 +1,3 @@ -#CEXE_sources += ParticleBoundaries.cpp include $(WARPX_HOME)/Source/ablastr/coarsen/Make.package include $(WARPX_HOME)/Source/ablastr/math/Make.package diff --git a/Source/ablastr/fields/Make.package b/Source/ablastr/fields/Make.package index 727a17b6de8..7441a6a1238 100644 --- a/Source/ablastr/fields/Make.package +++ b/Source/ablastr/fields/Make.package @@ -1,5 +1,7 @@ + +CEXE_sources += MultiFabRegister.cpp + ifeq ($(USE_FFT),TRUE) - CEXE_sources += MultiFabRegister.cpp ifeq ($(DIM),3) CEXE_sources += IntegratedGreenFunctionSolver.cpp endif diff --git a/Tools/DevUtils/update_benchmarks_from_azure_output.py b/Tools/DevUtils/update_benchmarks_from_azure_output.py index b2be4d17a7b..bcff995b21a 100644 --- a/Tools/DevUtils/update_benchmarks_from_azure_output.py +++ b/Tools/DevUtils/update_benchmarks_from_azure_output.py @@ -1,4 +1,4 @@ -# Copyright 2023 Neil Zaim +# Copyright 2023 Neil Zaim, Edoardo Zoni # # This file is part of WarpX. # @@ -9,56 +9,45 @@ import sys """ -This Python script updates the Azure benchmarks automatically using a raw Azure output textfile -that is given as the first and only argument of the script. - -In the Azure output, we read the lines contained between -"New file for Test_Name:" -and the next occurrence of -"'----------------'" -And use these lines to update the benchmarks +This Python script updates the Azure benchmarks automatically using a raw +Azure output text file that is passed as command line argument of the script. """ -azure_output_filename = sys.argv[1] +# read path to Azure output text file +azure_output = sys.argv[1] -pattern_test_name = "New file for (?P[\w\-]*)" -closing_string = "----------------" -benchmark_path = "../../Regression/Checksum/benchmarks_json/" -benchmark_suffix = ".json" +# string to identify failing tests that require a checksums reset +new_checksums = "New checksums" +failing_test = "" -first_line_read = False -current_test = "" +# path of all checksums benchmark files +benchmark_path = "../../Regression/Checksum/benchmarks_json/" -with open(azure_output_filename, "r") as f: +with open(azure_output, "r") as f: + # find length of Azure prefix to be removed from each line, + # first line of Azure output starts with "##[section]Starting:" + first_line = f.readline() + prefix_length = first_line.find("#") + # loop over lines for line in f: - if current_test == "": - # Here we search lines that read, for example, - # "New file for LaserAcceleration_BTD" - # and we set current_test = "LaserAcceleration_BTD" - match_test_name = re.search(pattern_test_name, line) - if match_test_name: - current_test = match_test_name.group("testname") - new_file_string = "" - + # remove Azure prefix from line + line = line[prefix_length:] + if failing_test == "": + # no failing test found yet + if re.search(new_checksums, line): + # failing test found, set failing test name + failing_test = line[line.find("test_") : line.find(".json")] + json_file_string = "" else: - # We add each line to the new file string until we find the line containing - # "----------------" - # which indicates that we have read the new file entirely - - if closing_string not in line: - if not first_line_read: - # Raw Azure output comes with a prefix at the beginning of each line that we do - # not need here. The first line that we will read is the prefix followed by the - # "{" character, so we determine how long the prefix is by finding the last - # occurrence of the "{" character in this line. - azure_indent = line.rfind("{") - first_line_read = True - new_file_string += line[azure_indent:] - - else: - # We have read the new file entirely. Dump it in the json file. - new_file_json = json.loads(new_file_string) - json_filepath = benchmark_path + current_test + benchmark_suffix - with open(json_filepath, "w") as f_json: - json.dump(new_file_json, f_json, indent=2) - current_test = "" + # extract and dump new checksums of failing test + json_file_string += line + if line.startswith("}"): # end of new checksums + json_file = json.loads(json_file_string) + json_filename = failing_test + ".json" + json_filepath = benchmark_path + json_filename + print(f"\nDumping new checksums file {json_filename}:") + print(json_file_string) + with open(json_filepath, "w") as json_f: + json.dump(json_file, json_f, indent=2) + # reset to empty string to continue search of failing tests + failing_test = "" diff --git a/Tools/Release/newVersion.sh b/Tools/Release/newVersion.sh index b1d2a6aad27..9491401b120 100755 --- a/Tools/Release/newVersion.sh +++ b/Tools/Release/newVersion.sh @@ -104,25 +104,25 @@ sed -i -E "s/"\ # setup.py: version = '21.02', sed -i -E "s/"\ -"([[:blank:]]*version[[:blank:]]*=[[:blank:]]*')(.*)('.+)/"\ +"([[:blank:]]*version[[:blank:]]*=[[:blank:]]*\")(.*)(\".+)/"\ "\1${VERSION_STR}\3/g" \ ${REPO_DIR}/setup.py # Python/setup.py: version = '21.02', sed -i -E "s/"\ -"([[:blank:]]*version[[:blank:]]*=[[:blank:]]*')(.*)('.+)/"\ +"([[:blank:]]*version[[:blank:]]*=[[:blank:]]*\")(.*)(\".+)/"\ "\1${VERSION_STR}\3/g" \ ${REPO_DIR}/Python/setup.py # sphinx / RTD # docs/source/conf.py sed -i "s/"\ -"[[:blank:]]*version[[:blank:]]*=[[:blank:]]*u.*/"\ -"version = u'${VERSION_STR_NOSUFFIX}'/g" \ +"[[:blank:]]*version[[:blank:]]*=[[:blank:]]*.*/"\ +"version = \"${VERSION_STR_NOSUFFIX}\"/g" \ ${REPO_DIR}/Docs/source/conf.py sed -i "s/"\ -"[[:blank:]]*release[[:blank:]]*=[[:blank:]]*u.*/"\ -"release = u'${VERSION_STR}'/g" \ +"[[:blank:]]*release[[:blank:]]*=[[:blank:]]*.*/"\ +"release = \"${VERSION_STR}\"/g" \ ${REPO_DIR}/Docs/source/conf.py diff --git a/Tools/machines/lawrencium-lbnl/lawrencium_warpx.profile.example b/Tools/machines/lawrencium-lbnl/lawrencium_warpx.profile.example index 8db2b44b8a7..62f80433233 100644 --- a/Tools/machines/lawrencium-lbnl/lawrencium_warpx.profile.example +++ b/Tools/machines/lawrencium-lbnl/lawrencium_warpx.profile.example @@ -2,19 +2,16 @@ #export proj="" # change me, e.g., ac_blast # required dependencies -module load cmake/3.24.1 -module load cuda/11.4 -module load gcc/7.4.0 -module load openmpi/4.0.1-gcc +module load cmake/3.27.7 +module load gcc/11.4.0 +module load cuda/12.2.1 +module load openmpi/4.1.6 # optional: for QED support with detailed tables -module load boost/1.70.0-gcc +module load boost/1.83.0 # optional: for openPMD and PSATD+RZ support -module load hdf5/1.10.5-gcc-p -module load lapack/3.8.0-gcc -# CPU only: -#module load fftw/3.3.8-gcc +module load hdf5/1.14.3 export CMAKE_PREFIX_PATH=$HOME/sw/v100/c-blosc-1.21.1:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=$HOME/sw/v100/adios2-2.8.3:$CMAKE_PREFIX_PATH @@ -27,7 +24,7 @@ export PATH=$HOME/sw/v100/adios2-2.8.3/bin:$PATH #module load ccache # missing # optional: for Python bindings or libEnsemble -module load python/3.8.8 +module load python/3.11.6-gcc-11.4.0 if [ -d "$HOME/sw/v100/venvs/warpx" ] then diff --git a/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example b/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example index d5cb1ec7a07..e1bd4e0fdd3 100644 --- a/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example +++ b/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example @@ -20,7 +20,7 @@ module load cmake/3.27.7 module load boost # optional: for openPMD and PSATD+RZ support -module load cray-hdf5-parallel/1.12.2.9 +module load hdf5/1.14.3 export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/c-blosc-1.21.1:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/adios2-2.8.3:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/blaspp-2024.05.31:$CMAKE_PREFIX_PATH @@ -53,7 +53,7 @@ export CXXFLAGS="-march=znver3" export CFLAGS="-march=znver3" # compiler environment hints -export CC=$(which gcc) -export CXX=$(which g++) +export CC=$(which gcc-12) +export CXX=$(which g++-12) export CUDACXX=$(which nvcc) export CUDAHOSTCXX=${CXX} diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 7524d919c61..6513841f327 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -92,6 +92,8 @@ macro(find_amrex) set(AMReX_PARTICLES ON CACHE INTERNAL "") set(AMReX_PROBINIT OFF CACHE INTERNAL "") set(AMReX_TINY_PROFILE ON CACHE BOOL "") + set(AMReX_LINEAR_SOLVERS_EM ON CACHE INTERNAL "") + set(AMReX_LINEAR_SOLVER_INCFLO OFF CACHE INTERNAL "") if(WarpX_ASCENT OR WarpX_SENSEI) set(AMReX_GPU_RDC ON CACHE BOOL "") @@ -200,6 +202,8 @@ macro(find_amrex) mark_as_advanced(AMReX_HYPRE) mark_as_advanced(AMReX_IPO) mark_as_advanced(AMReX_LINEAR_SOLVERS) + mark_as_advanced(AMReX_LINEAR_SOLVERS_INCFLO) + mark_as_advanced(AMReX_LINEAR_SOLVERS_EM) mark_as_advanced(AMReX_MEM_PROFILE) mark_as_advanced(AMReX_MPI) mark_as_advanced(AMReX_MPI_THREAD_MULTIPLE) @@ -256,7 +260,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 24.09 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) + find_package(AMReX 24.10 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) # note: TINYP skipped because user-configured and optional # AMReX CMake helper scripts @@ -279,7 +283,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "103d0e9f7c7e7fbedbeb61512d3f37f3e2b03770" +set(WarpX_amrex_branch "e1222803739ed2342b9ff6fc2d57316ff0d6cb0c" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/FFT.cmake b/cmake/dependencies/FFT.cmake index 571006e8530..df0ef11ae53 100644 --- a/cmake/dependencies/FFT.cmake +++ b/cmake/dependencies/FFT.cmake @@ -48,14 +48,20 @@ if(ABLASTR_FFT) # # cuFFT (CUDA) - # TODO: check if `find_package` search works + if(WarpX_COMPUTE STREQUAL CUDA) + # nothing to do (cuFFT is part of the CUDA SDK) + # TODO: check if `find_package` search works for cuFFT # rocFFT (HIP) - if(WarpX_COMPUTE STREQUAL HIP) + elseif(WarpX_COMPUTE STREQUAL HIP) find_package(rocfft REQUIRED) - # FFTW (NOACC, OMP, SYCL) - elseif(NOT WarpX_COMPUTE STREQUAL CUDA) + elseif(WarpX_COMPUTE STREQUAL SYCL) + # nothing to do (oneMKL is part of oneAPI) + # TODO: check if `find_package` search works for oneMKL + + # FFTW (NOACC, OMP) + else() # On Windows, try searching for FFTW3(f)Config.cmake files first # Installed .pc files wrongly and unconditionally add -lm # https://github.com/FFTW/fftw3/issues/236 @@ -106,6 +112,8 @@ if(ABLASTR_FFT) warpx_make_third_party_includes_system(cufft FFT) elseif(WarpX_COMPUTE STREQUAL HIP) warpx_make_third_party_includes_system(roc::rocfft FFT) + elseif(WarpX_COMPUTE STREQUAL SYCL) + warpx_make_third_party_includes_system(AMReX::SYCL FFT) else() if(WarpX_FFTW_SEARCH STREQUAL CMAKE) warpx_make_third_party_includes_system(FFTW3::fftw3${HFFTWp} FFT) diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 69711866f74..9543dac2ee2 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -59,7 +59,7 @@ function(find_pyamrex) endif() elseif(NOT WarpX_pyamrex_internal) # TODO: MPI control - find_package(pyAMReX 24.09 CONFIG REQUIRED) + find_package(pyAMReX 24.10 CONFIG REQUIRED) message(STATUS "pyAMReX: Found version '${pyAMReX_VERSION}'") endif() endfunction() @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "1c66690f83244196c5655293f1381303a7d1589d" +set(WarpX_pyamrex_branch "3699781e4284921f9ccdbbbbc57169ff79c0de20" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") diff --git a/setup.py b/setup.py index efc18d900cf..9683c8ab675 100644 --- a/setup.py +++ b/setup.py @@ -274,7 +274,7 @@ def build_extension(self, ext): setup( name="pywarpx", # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version="24.08", + version="24.10", packages=["pywarpx"], package_dir={"pywarpx": "Python/pywarpx"}, author="Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.",