diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..b20527d --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,24 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/python +{ + "name": "Python 3", + // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile + "image": "mcr.microsoft.com/devcontainers/python:1-3.10-bookworm", + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + "runArgs": ["--gpus", "all"], + + // Use 'postCreateCommand' to run commands after the container is created. + "postCreateCommand": "sudo apt update -y && sudo apt upgrade -y && sudo apt install gfortran -y && pip3 install --user -e .[dev,test,datagen310]" + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..63fa9ed --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,12 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for more information: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates +# https://containers.dev/guide/dependabot + +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: weekly diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml new file mode 100644 index 0000000..d0707c1 --- /dev/null +++ b/.github/workflows/precommit.yml @@ -0,0 +1,14 @@ +name: pre-commit + +on: + pull_request: + push: + branches: [main] + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + - uses: pre-commit/action@v3.0.1 diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml new file mode 100644 index 0000000..9c714d1 --- /dev/null +++ b/.github/workflows/python-package.yml @@ -0,0 +1,33 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: Python package + +on: + push: + branches: ["main"] + pull_request: + branches: ["main"] + +jobs: + build: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10"] + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install --user -e .[test] + - name: Lint and test with nox + run: | + # stop the build if there are Python syntax errors or undefined names + nox -s pylint tests diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml new file mode 100644 index 0000000..10b7eff --- /dev/null +++ b/.github/workflows/python-publish.yml @@ -0,0 +1,38 @@ +# This workflow will upload a Python Package using Twine when a release is created +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Upload Python Package + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + deploy: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.x" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build + - name: Build package + run: python -m build + - name: Publish package + uses: pypa/gh-action-pypi-publish@ec4db0b4ddc65acdf4bff5fa45ac92d78b56bdf0 + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..defc249 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,88 @@ +ci: + autoupdate_commit_msg: "chore: update pre-commit hooks" + autofix_commit_msg: "style: pre-commit fixes" + +repos: + - repo: https://github.com/adamchainz/blacken-docs + rev: "1.16.0" + hooks: + - id: blacken-docs + additional_dependencies: [black==23.*] + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: "v4.5.0" + hooks: + - id: check-added-large-files + - id: check-case-conflict + - id: check-merge-conflict + - id: check-symlinks + - id: check-yaml + - id: debug-statements + - id: end-of-file-fixer + - id: mixed-line-ending + - id: name-tests-test + args: ["--pytest-test-first"] + - id: requirements-txt-fixer + - id: trailing-whitespace + + - repo: https://github.com/pre-commit/pygrep-hooks + rev: "v1.10.0" + hooks: + - id: rst-backticks + - id: rst-directive-colons + - id: rst-inline-touching-normal + + - repo: https://github.com/pre-commit/mirrors-prettier + rev: "v3.1.0" + hooks: + - id: prettier + types_or: [yaml, markdown, html, css, scss, javascript, json] + args: [--prose-wrap=always] + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: "v0.1.14" + hooks: + - id: ruff + args: ["--fix", "--show-fixes"] + - id: ruff-format + + - repo: https://github.com/pre-commit/mirrors-mypy + rev: "v1.8.0" + hooks: + - id: mypy + files: pdebench|tests + args: [] + additional_dependencies: + - pytest + + - repo: https://github.com/codespell-project/codespell + rev: "v2.2.6" + hooks: + - id: codespell + exclude_types: [jupyter] + + - repo: https://github.com/shellcheck-py/shellcheck-py + rev: "v0.9.0.6" + hooks: + - id: shellcheck + + - repo: local + hooks: + - id: disallow-caps + name: Disallow improper capitalization + language: pygrep + entry: PyBind|Numpy|Cmake|CCache|Github|PyTest + exclude: .pre-commit-config.yaml + + - repo: https://github.com/abravalheri/validate-pyproject + rev: "v0.16" + hooks: + - id: validate-pyproject + additional_dependencies: ["validate-pyproject-schema-store[all]"] + + - repo: https://github.com/python-jsonschema/check-jsonschema + rev: "0.27.3" + hooks: + - id: check-dependabot + - id: check-github-workflows + - id: check-readthedocs diff --git a/LICENSE.txt b/LICENSE.txt index b36c10f..49e3e47 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -6,4 +6,4 @@ Except where otherwise stated this code is released under the MIT license. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md index 1af9ea9..ea37558 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,31 @@ # PDEBench -The code repository for the NeurIPS 2022 paper -[PDEBench: An Extensive Benchmark for Scientific Machine Learning](https://arxiv.org/abs/2210.07182) - -:tada: [**SimTech Best Paper Award 2023**](https://www.simtech.uni-stuttgart.de/press/SimTech-Best-Paper-Award-2023-Benchmark-for-ML-for-scientific-simulations) :confetti_ball: - -PDEBench provides a diverse and comprehensive set of benchmarks for scientific machine learning, including challenging and realistic physical problems. This repository consists of the code used to generate the datasets, to upload and download the datasets from the data repository, as well as to train and evaluate different machine learning models as baselines. PDEBench features a much wider range of PDEs than existing benchmarks and includes realistic and difficult problems (both forward and inverse), larger ready-to-use datasets comprising various initial and boundary conditions, and PDE parameters. Moreover, PDEBench was created to make the source code extensible and we invite active participation from the SciML community to improve and extend the benchmark. +The code repository for the NeurIPS 2022 paper +[PDEBench: An Extensive Benchmark for Scientific Machine Learning](https://arxiv.org/abs/2210.07182) + +:tada: +[**SimTech Best Paper Award 2023**](https://www.simtech.uni-stuttgart.de/press/SimTech-Best-Paper-Award-2023-Benchmark-for-ML-for-scientific-simulations) +:confetti_ball: + +PDEBench provides a diverse and comprehensive set of benchmarks for scientific +machine learning, including challenging and realistic physical problems. This +repository consists of the code used to generate the datasets, to upload and +download the datasets from the data repository, as well as to train and evaluate +different machine learning models as baselines. PDEBench features a much wider +range of PDEs than existing benchmarks and includes realistic and difficult +problems (both forward and inverse), larger ready-to-use datasets comprising +various initial and boundary conditions, and PDE parameters. Moreover, PDEBench +was created to make the source code extensible and we invite active +participation from the SciML community to improve and extend the benchmark. ![Visualizations of some PDE problems covered by the benchmark.](https://github.com/pdebench/PDEBench/blob/main/pdebench_examples.PNG) -Created and maintained by Makoto Takamoto ``, Timothy Praditia ``, Raphael Leiteritz, Dan MacKinlay, Francesco Alesiani, Dirk Pflüger, and Mathias Niepert. +Created and maintained by Makoto Takamoto +``, Timothy Praditia +``, Raphael Leiteritz, Dan MacKinlay, +Francesco Alesiani, Dirk Pflüger, and Mathias Niepert. ---------------- +--- ## Datasets and Pretrained Models @@ -23,8 +37,7 @@ https://darus.uni-stuttgart.de/dataset.xhtml?persistentId=doi:10.18419/darus-298 PDEBench Pre-Trained Models: https://darus.uni-stuttgart.de/dataset.xhtml?persistentId=doi:10.18419/darus-2987 - -DOIs +DOIs [![DOI:10.18419/darus-2986](https://img.shields.io/badge/DOI-doi%3A10.18419%2Fdarus--2986-red)](https://doi.org/10.18419/darus-2986) [![DOI:10.18419/darus-2987](https://img.shields.io/badge/DOI-doi%3A10.18419%2Fdarus--2987-red)](https://doi.org/10.18419/darus-2987) @@ -34,22 +47,27 @@ DOIs ### Using pip Locally: + ```bash pip install --upgrade pip wheel pip install . ``` From PyPI: + ```bash pip install pdebench ``` To include dependencies for data generation: + ```bash pip install "pdebench[datagen310]" pip install ".[datagen310]" # locally ``` + or + ```bash pip install "pdebench[datagen39]" pip install ".[datagen39]" # locally @@ -59,14 +77,19 @@ pip install ".[datagen39]" # locally For GPU support there are additional platform-specific instructions: -For PyTorch, the latest version we support is v1.13.1 [see previous-versions/#linux - CUDA 11.7](https://pytorch.org/get-started/previous-versions/#linux-and-windows-2). - -For JAX, which is approximately 6 times faster for simulations than PyTorch in our tests, [see jax#pip-installation-gpu-cuda-installed-via-pip](https://github.com/google/jax#pip-installation-gpu-cuda-installed-via-pip-easier) +For PyTorch, the latest version we support is v1.13.1 +[see previous-versions/#linux - CUDA 11.7](https://pytorch.org/get-started/previous-versions/#linux-and-windows-2). +For JAX, which is approximately 6 times faster for simulations than PyTorch in +our tests, +[see jax#pip-installation-gpu-cuda-installed-via-pip](https://github.com/google/jax#pip-installation-gpu-cuda-installed-via-pip-easier) ## Installation using conda: -If you like you can also install dependencies using anaconda, we suggest to use [mambaforge](https://github.com/conda-forge/miniforge#mambaforge) as a distribution. Otherwise you may have to __enable the conda-forge__ channel for the following commands. +If you like you can also install dependencies using anaconda, we suggest to use +[mambaforge](https://github.com/conda-forge/miniforge#mambaforge) as a +distribution. Otherwise you may have to **enable the conda-forge** channel for +the following commands. Starting from a fresh environment: @@ -76,81 +99,112 @@ conda activate myenv ``` Install dependencies for model training: + ``` conda install deepxde hydra-core h5py -c conda-forge ``` -According to your hardware availability, either install PyTorch with CUDA support: +According to your hardware availability, either install PyTorch with CUDA +support: - - [see previous-versions/#linux - CUDA 11.7](https://pytorch.org/get-started/previous-versions/#linux-and-windows-2). +- [see previous-versions/#linux - CUDA 11.7](https://pytorch.org/get-started/previous-versions/#linux-and-windows-2). ``` conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 pytorch-cuda=11.7 -c pytorch -c nvidia ``` - - [or CPU only binaries](https://pytorch.org/get-started/previous-versions/#linux-and-windows-2). +- [or CPU only binaries](https://pytorch.org/get-started/previous-versions/#linux-and-windows-2). ``` conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 cpuonly -c pytorch ``` - Optional dependencies for data generation: + ``` conda install clawpack jax jaxlib python-dotenv ``` ## Configuring DeepXDE -In our tests we used PyTorch as backend for DeepXDE. Please [follow the documentation](https://deepxde.readthedocs.io/en/latest/user/installation.html#working-with-different-backends) to enable this. +In our tests we used PyTorch as backend for DeepXDE. Please +[follow the documentation](https://deepxde.readthedocs.io/en/latest/user/installation.html#working-with-different-backends) +to enable this. ## Data Generation + The data generation codes are contained in [data_gen](./pdebench/data_gen): + - `gen_diff_react.py` to generate the 2D diffusion-reaction data. - `gen_diff_sorp.py` to generate the 1D diffusion-sorption data. - `gen_radial_dam_break.py` to generate the 2D shallow-water data. -- `gen_ns_incomp.py` to generate the 2D incompressible inhomogenous Navier-Stokes data. +- `gen_ns_incomp.py` to generate the 2D incompressible inhomogeneous + Navier-Stokes data. - `plot.py` to plot the generated data. - `uploader.py` to upload the generated data to the data repository. -- `.env` is the environment data to store Dataverse URL and API token to upload the generated data. Note that the filename should be strictly `.env` (i.e. remove the `example` from the filename) -- `configs` directory contains the yaml files storing the configuration for the simulation. Arguments for the simulation are problem-specific and detailed explanation can be found in the simulation scripts. -- `src` directory contains the simulation scripts for different problems: `sim_diff_react-py` for 2D diffusion-reaction, `sim_diff_sorp.py` for 1D diffusion-sorption, and `swe` for the shallow-water equation. +- `.env` is the environment data to store Dataverse URL and API token to upload + the generated data. Note that the filename should be strictly `.env` (i.e. + remove the `example` from the filename) +- `configs` directory contains the yaml files storing the configuration for the + simulation. Arguments for the simulation are problem-specific and detailed + explanation can be found in the simulation scripts. +- `src` directory contains the simulation scripts for different problems: + `sim_diff_react-py` for 2D diffusion-reaction, `sim_diff_sorp.py` for 1D + diffusion-sorption, and `swe` for the shallow-water equation. ### Data Generation for 1D Advection/Burgers/Reaction-Diffusion/2D DarcyFlow/Compressible Navier-Stokes Equations -The data generation codes are contained in [data_gen_NLE](./pdebench/data_gen/data_gen_NLE/): -- `utils.py` util file for data generation, mainly boundary conditions and initial conditions. -- `AdvectionEq` directory with the source codes to generate 1D Advection equation training samples -- `BurgersEq` directory with the source codes to generate 1D Burgers equation training samples -- `CompressibleFluid` directory with the source codes to generate compressible Navier-Stokes equations training samples - - `ReactionDiffusionEq` directory with the source codes to generate 1D Reaction-Diffusion equation training samples (**Note: [DarcyFlow data can be generated by run_DarcyFlow2D.sh](pdebench/data_gen/data_gen_NLE/README.md) in this folder.**) + +The data generation codes are contained in +[data_gen_NLE](./pdebench/data_gen/data_gen_NLE/): + +- `utils.py` util file for data generation, mainly boundary conditions and + initial conditions. +- `AdvectionEq` directory with the source codes to generate 1D Advection + equation training samples +- `BurgersEq` directory with the source codes to generate 1D Burgers equation + training samples +- `CompressibleFluid` directory with the source codes to generate compressible + Navier-Stokes equations training samples + + - `ReactionDiffusionEq` directory with the source codes to generate 1D + Reaction-Diffusion equation training samples (**Note: + [DarcyFlow data can be generated by run_DarcyFlow2D.sh](pdebench/data_gen/data_gen_NLE/README.md) + in this folder.**) - `save` directory saving the generated training samples -A typical example to generate training samples (1D Advection Equation): -(in `data_gen/data_gen_NLE/AdvectionEq/`) +A typical example to generate training samples (1D Advection Equation): (in +`data_gen/data_gen_NLE/AdvectionEq/`) + ```bash python3 advection_multi_solution_Hydra.py +multi=beta1e0.yaml ``` + which is assumed to be performed in each directory. -Examples for generating other PDEs are provided in `run_trainset.sh` in each PDE's directories. -The config files for Hydra are stored in `config` directory in each PDE's directory. +Examples for generating other PDEs are provided in `run_trainset.sh` in each +PDE's directories. The config files for Hydra are stored in `config` directory +in each PDE's directory. #### Data Transformaion and Merge into HDF5 format -1D Advection/Burgers/Reaction-Diffusion/2D DarcyFlow/Compressible Navier-Stokes Equations save data as a numpy array. -So, to read those data via our dataloaders, the data transformation/merge should be performed. -This can be done using `data_gen_NLE/Data_Merge.py` whose config file is located at: `data_gen/data_gen_NLE/config/config.yaml`. -After properly setting the parameters in the config file (type: name of PDEs, dim: number of spatial-dimension, bd: boundary condition), -the corresponding HDF5 file could be obtained as: + +1D Advection/Burgers/Reaction-Diffusion/2D DarcyFlow/Compressible Navier-Stokes +Equations save data as a numpy array. So, to read those data via our +dataloaders, the data transformation/merge should be performed. This can be done +using `data_gen_NLE/Data_Merge.py` whose config file is located at: +`data_gen/data_gen_NLE/config/config.yaml`. After properly setting the +parameters in the config file (type: name of PDEs, dim: number of +spatial-dimension, bd: boundary condition), the corresponding HDF5 file could be +obtained as: ```bash python3 Data_Merge.py ``` - ## Configuration -You can set the default values for data locations for this project by putting config vars like this in the `.env` file: +You can set the default values for data locations for this project by putting +config vars like this in the `.env` file: ``` WORKING_DIR=~/Data/Working @@ -159,45 +213,82 @@ ARCHIVE_DATA_DIR=~/Data/Archive There is an example in `example.env`. - ## Data Download -The download scripts are provided in [data_download](./pdebench/data_download). There are two options to download data. -1) Using `download_direct.py` (**recommended**) - - Retrieves data shards directly using URLs. Sample command for each PDE is given in the README file in the [data_download](./pdebench/data_download) directory. -2) Using `download_easydataverse.py` (might be slow and you could encounter errors/issues; hence, not recommended!) - - Use the config files from the `config` directory that contains the yaml files storing the configuration. Any files in the dataset matching `args.filename` will be downloaded into `args.data_folder`. +The download scripts are provided in [data_download](./pdebench/data_download). +There are two options to download data. +1. Using `download_direct.py` (**recommended**) + - Retrieves data shards directly using URLs. Sample command for each PDE is + given in the README file in the [data_download](./pdebench/data_download) + directory. +2. Using `download_easydataverse.py` (might be slow and you could encounter + errors/issues; hence, not recommended!) + - Use the config files from the `config` directory that contains the yaml + files storing the configuration. Any files in the dataset matching + `args.filename` will be downloaded into `args.data_folder`. ## Baseline Models -In this work, we provide three different ML models to be trained and evaluated against the benchmark datasets, namely [FNO](https://arxiv.org/pdf/2010.08895.pdf), [U-Net](https://www.sciencedirect.com/science/article/abs/pii/S0010482519301520?via%3Dihub), and [PINN](https://www.sciencedirect.com/science/article/pii/S0021999118307125). -The codes for the baseline model implementations are contained in [models](./pdebench/models): - -- `train_models_forward.py` is the main script to train and evaluate the model. It will call on model-specific script based on the input argument. -- `train_models_inverse.py` is the main script to train and evaluate the model for inverse problems. It will call on model-specific script based on the input argument. -- `metrics.py` is the script to evaluate the trained models based on various evaluation metrics described in our paper. Additionally, it also plots the prediction and target data. -- `analyse_result_forward.py` is the script to convert the saved pickle file from the metrics calculation script into pandas dataframe format and save it as a CSV file. Additionally it also plots a bar chart to compare the results between different models. -- `analyse_result_inverse.py` is the script to convert the saved pickle file from the metrics calculation script into pandas dataframe format and save it as a CSV file. This script is used for the inverse problems. Additionally it also plots a bar chart to compare the results between different models. -- `fno` contains the scripts of FNO implementation. These are partly adapted from the [FNO repository](https://github.com/zongyi-li/fourier_neural_operator). -- `unet` contains the scripts of U-Net implementation. These are partly adapted from the [U-Net repository](https://github.com/mateuszbuda/brain-segmentation-pytorch). -- `pinn` contains the scripts of PINN implementation. These utilize the [DeepXDE library](https://github.com/lululxvi/deepxde). -- `inverse` contains the model for inverse model based on gradient. -- `config` contains the yaml files for the model training input. The default templates for different equations are provided in the [args](./pdebench/models/config/args) directory. User just needs to copy and paste them to the args keyword in the [config.yaml](./pdebench/models/config/config.yaml) file. -An example to run the forward model training can be found in [run_forward_1D.sh](./pdebench/models/run_forward_1D.sh), and an example to run the inverse model training can be found in [run_inverse.sh](./pdebench/models/run_inverse.sh). +In this work, we provide three different ML models to be trained and evaluated +against the benchmark datasets, namely +[FNO](https://arxiv.org/pdf/2010.08895.pdf), +[U-Net](https://www.sciencedirect.com/science/article/abs/pii/S0010482519301520?via%3Dihub), +and [PINN](https://www.sciencedirect.com/science/article/pii/S0021999118307125). +The codes for the baseline model implementations are contained in +[models](./pdebench/models): + +- `train_models_forward.py` is the main script to train and evaluate the model. + It will call on model-specific script based on the input argument. +- `train_models_inverse.py` is the main script to train and evaluate the model + for inverse problems. It will call on model-specific script based on the input + argument. +- `metrics.py` is the script to evaluate the trained models based on various + evaluation metrics described in our paper. Additionally, it also plots the + prediction and target data. +- `analyse_result_forward.py` is the script to convert the saved pickle file + from the metrics calculation script into pandas dataframe format and save it + as a CSV file. Additionally it also plots a bar chart to compare the results + between different models. +- `analyse_result_inverse.py` is the script to convert the saved pickle file + from the metrics calculation script into pandas dataframe format and save it + as a CSV file. This script is used for the inverse problems. Additionally it + also plots a bar chart to compare the results between different models. +- `fno` contains the scripts of FNO implementation. These are partly adapted + from the + [FNO repository](https://github.com/zongyi-li/fourier_neural_operator). +- `unet` contains the scripts of U-Net implementation. These are partly adapted + from the + [U-Net repository](https://github.com/mateuszbuda/brain-segmentation-pytorch). +- `pinn` contains the scripts of PINN implementation. These utilize the + [DeepXDE library](https://github.com/lululxvi/deepxde). +- `inverse` contains the model for inverse model based on gradient. +- `config` contains the yaml files for the model training input. The default + templates for different equations are provided in the + [args](./pdebench/models/config/args) directory. User just needs to copy and + paste them to the args keyword in the + [config.yaml](./pdebench/models/config/config.yaml) file. +An example to run the forward model training can be found in +[run_forward_1D.sh](./pdebench/models/run_forward_1D.sh), and an example to run +the inverse model training can be found in +[run_inverse.sh](./pdebench/models/run_inverse.sh). ### Short explanations on the config args -- model_name: string, containing the baseline model name, either 'FNO', 'Unet', or 'PINN'. + +- model_name: string, containing the baseline model name, either 'FNO', 'Unet', + or 'PINN'. - if_training: bool, set True for training, or False for evaluation. -- continue_training: bool, set True to continute training from a checkpoint. +- continue_training: bool, set True to continue training from a checkpoint. - num_workers: int, number of workers for the PyTorch dataloader. - batch_size: int, training batch size. - initial_step: int, number of time steps used as input for FNO and U-Net. -- t_train: int, number of the last time step used for training (for extrapolation testing, set this to be < Nt). +- t_train: int, number of the last time step used for training (for + extrapolation testing, set this to be < Nt). - model_update: int, number of epochs to save model. - filename: str, has to match the dataset filename. -- single_file: bool, set False for 2D diffusion-reaction, 1D diffusion-sorption, and the radial dam break scenarios, and set True otherwise. +- single_file: bool, set False for 2D diffusion-reaction, 1D diffusion-sorption, + and the radial dam break scenarios, and set True otherwise. - reduced_resolution: int, factor to downsample spatial resolution. - reduced_resolution_t: int, factor to downsample temporal resolution. - reduced_batch: int, factor to downsample sample size used for training. @@ -207,31 +298,38 @@ An example to run the forward model training can be found in [run_forward_1D.sh] - scheduler_gamma: float, decay rate of the learning rate. #### U-Net specific args: + - in_channels: int, number of input channels - out_channels: int, number of output channels - ar_mode: bool, set True for fully autoregressive or pushforward training. -- pushforward: bool, set True for pushforward training, False otherwise (ar_mode also has to be set True). -- unroll_step: int, number of time steps to backpropagate in the pushforward training. +- pushforward: bool, set True for pushforward training, False otherwise (ar_mode + also has to be set True). +- unroll_step: int, number of time steps to backpropagate in the pushforward + training. #### FNO specific args: + - num_channels: int, number of channels (variables). - modes: int, number of Fourier modes to multiply. - width: int, number of channels for the Fourier layer. #### INVERSE specific args: - - base_path: string, location of the data directory - - training_type: string, type of training, autoregressive, single - - mcmc_num_samples: int, number of generated samples - - mcmc_warmup_steps: 10 - - mcmc_num_chains: 1 - - num_samples_max: 1000 - - in_channels_hid: 64 - - inverse_model_type: string, type of inverse inference model, ProbRasterLatent, InitialConditionInterp - - inverse_epochs: int, number of epochs for the gradient based method - - inverse_learning_rate: float, learning rate for the gradient based method - - inverse_verbose_flag: bool, some printing + +- base_path: string, location of the data directory +- training_type: string, type of training, autoregressive, single +- mcmc_num_samples: int, number of generated samples +- mcmc_warmup_steps: 10 +- mcmc_num_chains: 1 +- num_samples_max: 1000 +- in_channels_hid: 64 +- inverse_model_type: string, type of inverse inference model, ProbRasterLatent, + InitialConditionInterp +- inverse_epochs: int, number of epochs for the gradient based method +- inverse_learning_rate: float, learning rate for the gradient based method +- inverse_verbose_flag: bool, some printing #### Plotting specific args: + - plot: bool, set True to activate plotting. - channel_plot: int, determines which channel/variable to plot. - x_min: float, left spatial domain. @@ -242,11 +340,17 @@ An example to run the forward model training can be found in [run_forward_1D.sh] - t_max: float, end of temporal domain. ## Datasets and pretrained models -We provide the benchmark datasets we used in the paper through our [DaRUS data repository](https://darus.uni-stuttgart.de/dataset.xhtml?persistentId=doi:10.18419/darus-2986). -The data generation configuration can be found in the paper. -Additionally, the pretrained models are also available to be downloaded from [PDEBench Pretrained Models](https://darus.uni-stuttgart.de/dataset.xhtml?persistentId=doi:10.18419/darus-2987) DaRus repository. To use the pretrained models, users can specify the argument `continue_training: True` in the [config file](./pdebench/models/config/config.yaml). -------- +We provide the benchmark datasets we used in the paper through our +[DaRUS data repository](https://darus.uni-stuttgart.de/dataset.xhtml?persistentId=doi:10.18419/darus-2986). +The data generation configuration can be found in the paper. Additionally, the +pretrained models are also available to be downloaded from +[PDEBench Pretrained Models](https://darus.uni-stuttgart.de/dataset.xhtml?persistentId=doi:10.18419/darus-2987) +DaRus repository. To use the pretrained models, users can specify the argument +`continue_training: True` in the +[config file](./pdebench/models/config/config.yaml). + +--- ## Directory Tour @@ -255,8 +359,8 @@ Below is an illustration of the directory structure of PDEBench. ``` 📂 pdebench |_📁 models - |_📁 pinn # Model: Physics-Informed Neural Network - |_📄 train.py + |_📁 pinn # Model: Physics-Informed Neural Network + |_📄 train.py |_📄 utils.py |_📄 pde_definitions.py |_📁 fno # Model: Fourier Neural Operator @@ -301,12 +405,12 @@ Below is an illustration of the directory structure of PDEBench. |_📄 __init__.py ``` - ------- +--- ## Publications & Citations -Please cite the following papers if you use PDEBench datasets and/or source code in your research. +Please cite the following papers if you use PDEBench datasets and/or source code +in your research.
@@ -323,8 +427,8 @@ booktitle = {36th Conference on Neural Information Processing Systems (NeurIPS 2 url = {https://arxiv.org/abs/2210.07182} } ``` -
+
@@ -342,6 +446,7 @@ doi = {10.18419/darus-2986}, url = {https://doi.org/10.18419/darus-2986} } ``` +
@@ -350,21 +455,22 @@ url = {https://doi.org/10.18419/darus-2986}
- ``` - @article{cape-takamoto:2023, - author = {Makoto Takamoto and - Francesco Alesiani and - Mathias Niepert}, - title = {Learning Neural {PDE} Solvers with Parameter-Guided Channel Attention}, - journal = {CoRR}, - volume = {abs/2304.14118}, - year = {2023}, - url = {https://doi.org/10.48550/arXiv.2304.14118}, - doi = {10.48550/arXiv.2304.14118}, - eprinttype = {arXiv}, - eprint = {2304.14118}, - } - ``` +``` +@article{cape-takamoto:2023, + author = {Makoto Takamoto and + Francesco Alesiani and + Mathias Niepert}, + title = {Learning Neural {PDE} Solvers with Parameter-Guided Channel Attention}, + journal = {CoRR}, + volume = {abs/2304.14118}, + year = {2023}, + url = {https://doi.org/10.48550/arXiv.2304.14118}, + doi = {10.48550/arXiv.2304.14118}, + eprinttype = {arXiv}, + eprint = {2304.14118}, + } +``` +
@@ -373,14 +479,15 @@ url = {https://doi.org/10.18419/darus-2986}
- ``` +``` @inproceedings{vcnef-vectorized-conditional-neural-fields-hagnberger:2024, author = {Hagnberger, Jan and Kalimuthu, Marimuthu and Musekamp, Daniel and Niepert, Mathias}, title = {{Vectorized Conditional Neural Fields: A Framework for Solving Time-dependent Parametric Partial Differential Equations}}, year = {2024}, booktitle = {Proceedings of the 41st International Conference on Machine Learning (ICML 2024)} } - ``` +``` +
@@ -389,44 +496,53 @@ booktitle = {Proceedings of the 41st International Conference on Machine Learnin
- ``` +``` @article{active-learn-neuralpde-benchmark-musekamp:2024, - author = {Daniel Musekamp and - Marimuthu Kalimuthu and - David Holzm{\"{u}}ller and - Makoto Takamoto and - Mathias Niepert}, - title = {Active Learning for Neural {PDE} Solvers}, - journal = {CoRR}, - volume = {abs/2408.01536}, - year = {2024}, - url = {https://doi.org/10.48550/arXiv.2408.01536}, - doi = {10.48550/ARXIV.2408.01536}, - eprinttype = {arXiv}, - eprint = {2408.01536}, + author = {Daniel Musekamp and + Marimuthu Kalimuthu and + David Holzm{\"{u}}ller and + Makoto Takamoto and + Mathias Niepert}, + title = {Active Learning for Neural {PDE} Solvers}, + journal = {CoRR}, + volume = {abs/2408.01536}, + year = {2024}, + url = {https://doi.org/10.48550/arXiv.2408.01536}, + doi = {10.48550/ARXIV.2408.01536}, + eprinttype = {arXiv}, + eprint = {2408.01536}, } - ``` +``` +
------- +--- ## Code contributors - -* [Makato Takamoto](https://github.com/mtakamoto-D) ([NEC laboratories Europe](https://www.neclab.eu/)) -* [Timothy Praditia](https://github.com/timothypraditia) ([Stuttgart Center for Simulation Science | University of Stuttgart](https://www.simtech.uni-stuttgart.de/)) -* [Raphael Leiteritz](https://github.com/leiterrl) ([Stuttgart Center for Simulation Science | University of Stuttgart](https://www.simtech.uni-stuttgart.de/)) -* [Francesco Alesiani](https://github.com/falesiani) ([NEC laboratories Europe](https://www.neclab.eu/)) -* [Dan MacKinlay](https://danmackinlay.name/) ([CSIRO’s Data61](https://data61.csiro.au/)) -* [Marimuthu Kalimuthu](https://github.com/kmario23) ([Stuttgart Center for Simulation Science | University of Stuttgart](https://www.simtech.uni-stuttgart.de/)) -* [John Kim](https://github.com/johnmjkim) ([ANU TechLauncher](https://comp.anu.edu.au/TechLauncher/)/[CSIRO’s Data61](https://data61.csiro.au/)) -* [Gefei Shan](https://github.com/davecatmeow) ([ANU TechLauncher](https://comp.anu.edu.au/TechLauncher/)/[CSIRO’s Data61](https://data61.csiro.au/)) -* [Yizhou Yang](https://github.com/verdantwynnd) ([ANU TechLauncher](https://comp.anu.edu.au/TechLauncher/)/[CSIRO’s Data61](https://data61.csiro.au/)) -* [Ran Zhang](https://github.com/maphyca) ([ANU TechLauncher](https://comp.anu.edu.au/TechLauncher/)/[CSIRO’s Data61](https://data61.csiro.au/)) -* [Simon Brown](https://github.com/SimonSyBrown) ([ANU TechLauncher](https://comp.anu.edu.au/TechLauncher/)/[CSIRO’s Data61](https://data61.csiro.au/)) - +- [Makato Takamoto](https://github.com/mtakamoto-D) + ([NEC laboratories Europe](https://www.neclab.eu/)) +- [Timothy Praditia](https://github.com/timothypraditia) + ([Stuttgart Center for Simulation Science | University of Stuttgart](https://www.simtech.uni-stuttgart.de/)) +- [Raphael Leiteritz](https://github.com/leiterrl) + ([Stuttgart Center for Simulation Science | University of Stuttgart](https://www.simtech.uni-stuttgart.de/)) +- [Francesco Alesiani](https://github.com/falesiani) + ([NEC laboratories Europe](https://www.neclab.eu/)) +- [Dan MacKinlay](https://danmackinlay.name/) + ([CSIRO’s Data61](https://data61.csiro.au/)) +- [Marimuthu Kalimuthu](https://github.com/kmario23) + ([Stuttgart Center for Simulation Science | University of Stuttgart](https://www.simtech.uni-stuttgart.de/)) +- [John Kim](https://github.com/johnmjkim) + ([ANU TechLauncher](https://comp.anu.edu.au/TechLauncher/)/[CSIRO’s Data61](https://data61.csiro.au/)) +- [Gefei Shan](https://github.com/davecatmeow) + ([ANU TechLauncher](https://comp.anu.edu.au/TechLauncher/)/[CSIRO’s Data61](https://data61.csiro.au/)) +- [Yizhou Yang](https://github.com/verdantwynnd) + ([ANU TechLauncher](https://comp.anu.edu.au/TechLauncher/)/[CSIRO’s Data61](https://data61.csiro.au/)) +- [Ran Zhang](https://github.com/maphyca) + ([ANU TechLauncher](https://comp.anu.edu.au/TechLauncher/)/[CSIRO’s Data61](https://data61.csiro.au/)) +- [Simon Brown](https://github.com/SimonSyBrown) + ([ANU TechLauncher](https://comp.anu.edu.au/TechLauncher/)/[CSIRO’s Data61](https://data61.csiro.au/)) ## License -MIT licensed, except where otherwise stated. -See `LICENSE.txt` file. +MIT licensed, except where otherwise stated. See `LICENSE.txt` file. diff --git a/pdebench/__init__.py b/pdebench/__init__.py index 010919c..4b4fd31 100644 --- a/pdebench/__init__.py +++ b/pdebench/__init__.py @@ -19,7 +19,8 @@ """ +from __future__ import annotations __version__ = "0.0.1" -__author__ = 'Makoto Takamoto, Timothy Praditia, Raphael Leiteritz, Dan MacKinlay, Francesco Alesiani, Dirk Pflüger, Mathias Niepert' -__credits__ = 'NEC labs Europe, University of Stuttgart, CSIRO''s Data61' +__author__ = "Makoto Takamoto, Timothy Praditia, Raphael Leiteritz, Dan MacKinlay, Francesco Alesiani, Dirk Pflüger, Mathias Niepert" +__credits__ = "NEC labs Europe, University of Stuttgart, CSIRO" "s Data61" diff --git a/pdebench/_version.pyi b/pdebench/_version.pyi new file mode 100644 index 0000000..91744f9 --- /dev/null +++ b/pdebench/_version.pyi @@ -0,0 +1,4 @@ +from __future__ import annotations + +version: str +version_tuple: tuple[int, int, int] | tuple[int, int, int, str, str] diff --git a/pdebench/data_download/README.md b/pdebench/data_download/README.md index d86d5f6..8e619db 100644 --- a/pdebench/data_download/README.md +++ b/pdebench/data_download/README.md @@ -1,27 +1,30 @@ - # Downloading PDEBench Datasets :earth_asia: -Here we enumerate the list of all available PDEs in PDEBench and the commands to download them. - -| PDEs | Dataset Download | Dataset Size | -| ----------- | :----------------------------------------------------------- | ------------ | -| advection | ```python download_direct.py --root_folder $proj_home/data --pde_name advection``` | 47 GB | -| burgers | ```python download_direct.py --root_folder $proj_home/data --pde_name burgers``` | 93 GB | -| 1d_cfd | ```python download_direct.py --root_folder $proj_home/data --pde_name 1d_cfd``` | 88 GB | -| diff_sorp | ```python download_direct.py --root_folder $proj_home/data --pde_name diff_sorp``` | 4 GB | -| 1d_reacdiff | ```python download_direct.py --root_folder $proj_home/data --pde_name 1d_reacdiff``` | 62 GB | -| 2d_reacdiff | ```python download_direct.py --root_folder $proj_home/data --pde_name 2d_reacdiff``` | 13 GB | -| 2d_cfd | ```python download_direct.py --root_folder $proj_home/data --pde_name 2d_cfd``` | 551 GB | -| 3d_cfd | ```python download_direct.py --root_folder $proj_home/data --pde_name 3d_cfd``` | 285 GB | -| darcy | ```python download_direct.py --root_folder $proj_home/data --pde_name darcy``` | 6.2 GB | -| ns_incom | ```python download_direct.py --root_folder $proj_home/data --pde_name ns_incom``` | 2.3 TB | -| swe | ```python download_direct.py --root_folder $proj_home/data --pde_name swe``` | 6.2 GB | - --------- +Here we enumerate the list of all available PDEs in PDEBench and the commands to +download them. + +| PDEs | Dataset Download | Dataset Size | +| ----------- | :------------------------------------------------------------------------------- | ------------ | +| advection | `python download_direct.py --root_folder $proj_home/data --pde_name advection` | 47 GB | +| burgers | `python download_direct.py --root_folder $proj_home/data --pde_name burgers` | 93 GB | +| 1d_cfd | `python download_direct.py --root_folder $proj_home/data --pde_name 1d_cfd` | 88 GB | +| diff_sorp | `python download_direct.py --root_folder $proj_home/data --pde_name diff_sorp` | 4 GB | +| 1d_reacdiff | `python download_direct.py --root_folder $proj_home/data --pde_name 1d_reacdiff` | 62 GB | +| 2d_reacdiff | `python download_direct.py --root_folder $proj_home/data --pde_name 2d_reacdiff` | 13 GB | +| 2d_cfd | `python download_direct.py --root_folder $proj_home/data --pde_name 2d_cfd` | 551 GB | +| 3d_cfd | `python download_direct.py --root_folder $proj_home/data --pde_name 3d_cfd` | 285 GB | +| darcy | `python download_direct.py --root_folder $proj_home/data --pde_name darcy` | 6.2 GB | +| ns_incom | `python download_direct.py --root_folder $proj_home/data --pde_name ns_incom` | 2.3 TB | +| swe | `python download_direct.py --root_folder $proj_home/data --pde_name swe` | 6.2 GB | + +--- # Visualizing PDEs :ocean: -Below are some illustrations for how to visualize a certain PDE. It is assumed that you first download the data shard you'd like to visualize for a desired PDE. Then you can use the `visualize_pde.py` script to generate an animation (i.e., `.gif`). +Below are some illustrations for how to visualize a certain PDE. It is assumed +that you first download the data shard you'd like to visualize for a desired +PDE. Then you can use the `visualize_pde.py` script to generate an animation +(i.e., `.gif`). ###### 1D Diffusion Sorption Eqn @@ -33,7 +36,7 @@ https://darus.uni-stuttgart.de/api/access/datafile/133020 python visualize_pdes.py --pde_name "diff_sorp" --data_path "./" ``` ----------- +--- ###### 1D Diffusion Reaction Eqn @@ -45,7 +48,7 @@ https://darus.uni-stuttgart.de/api/access/datafile/133181 python visualize_pdes.py --pde_name "1d_reacdiff" ``` ----------- +--- ###### 1D Advection Eqn @@ -57,7 +60,7 @@ https://darus.uni-stuttgart.de/api/access/datafile/133110 python visualize_pdes.py --pde_name "advection" ``` ------------ +--- ###### 1D Burgers Eqn @@ -69,7 +72,7 @@ https://darus.uni-stuttgart.de/api/access/datafile/133136 python visualize_pdes.py --pde_name "burgers" ``` --------------------- +--- ###### 1D CFD Eqn @@ -81,7 +84,7 @@ https://darus.uni-stuttgart.de/api/access/datafile/135485 python visualize_pdes.py --pde_name "1d_cfd" ``` -------------- +--- ###### 2D Diffusion Reaction Eqn @@ -93,7 +96,7 @@ https://darus.uni-stuttgart.de/api/access/datafile/133017 python visualize_pdes.py --pde_name "2d_reacdiff" ``` -------------- +--- ###### 2D Darcy Flow Eqn @@ -105,7 +108,7 @@ https://darus.uni-stuttgart.de/api/access/datafile/133219 python visualize_pdes.py --pde_name "darcy" ``` ------------------- +--- ###### 2D Shallow Water Eqn @@ -116,4 +119,3 @@ https://darus.uni-stuttgart.de/api/access/datafile/133021 # visualize python visualize_pdes.py --pde_name "swe" --data_path "./" ``` - diff --git a/pdebench/data_download/config/config.yaml b/pdebench/data_download/config/config.yaml index 7cd39eb..13d7b8b 100644 --- a/pdebench/data_download/config/config.yaml +++ b/pdebench/data_download/config/config.yaml @@ -7,9 +7,9 @@ hydra: output_subdir: null run: dir: . - + args: - filename: 'Advection_beta' - dataverse_url: 'https://darus.uni-stuttgart.de' - dataset_id: 'doi:10.18419/darus-2986' - data_folder: 'data' + filename: "Advection_beta" + dataverse_url: "https://darus.uni-stuttgart.de" + dataset_id: "doi:10.18419/darus-2986" + data_folder: "data" diff --git a/pdebench/data_download/download_direct.py b/pdebench/data_download/download_direct.py index 35ee0e0..0dc4b39 100644 --- a/pdebench/data_download/download_direct.py +++ b/pdebench/data_download/download_direct.py @@ -1,9 +1,11 @@ -import os +from __future__ import annotations + import argparse +import os -from tqdm import tqdm import pandas as pd from torchvision.datasets.utils import download_url +from tqdm import tqdm def parse_metadata(pde_names): diff --git a/pdebench/data_download/download_easydataverse.py b/pdebench/data_download/download_easydataverse.py index 5e3976f..cc1042c 100644 --- a/pdebench/data_download/download_easydataverse.py +++ b/pdebench/data_download/download_easydataverse.py @@ -1,15 +1,17 @@ +from __future__ import annotations + +import logging import os import hydra +from easyDataverse import Dataset from hydra.utils import get_original_cwd from omegaconf import DictConfig -import logging - from pyDataverse.api import NativeApi -from easyDataverse import Dataset log = logging.getLogger(__name__) + @hydra.main(config_path="config/", config_name="config") def main(config: DictConfig): """ @@ -26,7 +28,7 @@ def main(config: DictConfig): # Extract dataset from the given DOI dataset = Dataset() - setattr(dataset, "p_id", config.args.dataset_id) + dataset.p_id = config.args.dataset_id # Extract file list contained in the dataset api = NativeApi(config.args.dataverse_url) @@ -40,7 +42,7 @@ def main(config: DictConfig): files.append(file["dataFile"]["filename"]) # Download the files - + dataset = Dataset.from_dataverse_doi( doi=config.args.dataset_id, dataverse_url=config.args.dataverse_url, @@ -49,6 +51,5 @@ def main(config: DictConfig): ) - if __name__ == "__main__": main() diff --git a/pdebench/data_download/pdebench_data_urls.csv b/pdebench/data_download/pdebench_data_urls.csv index ba845ea..3332b67 100644 --- a/pdebench/data_download/pdebench_data_urls.csv +++ b/pdebench/data_download/pdebench_data_urls.csv @@ -373,4 +373,4 @@ SWE,2D_rdb_NA_NA.h5,https://darus.uni-stuttgart.de/api/access/datafile/133021,2D 3D_CFD,Turb_M05.hdf5,https://darus.uni-stuttgart.de/api/access/datafile/133225,3D/Test/Turbulence/,f9407dff1a75a1d14d93e7dd570af728 3D_CFD,Turb_M1.hdf5,https://darus.uni-stuttgart.de/api/access/datafile/135833,3D/Test/Turbulence/,3758f23f71684ac666e0b1e91da0a1c4 3D_CFD,Turb_M2.hdf5,https://darus.uni-stuttgart.de/api/access/datafile/133227,3D/Test/Turbulence/,12e528dc8ab800f69474600ec58b24d3 -3D_CFD,Turb_M4.hdf5,https://darus.uni-stuttgart.de/api/access/datafile/133228,3D/Test/Turbulence/,8db384feba75903a8c5b21ebeba40083 \ No newline at end of file +3D_CFD,Turb_M4.hdf5,https://darus.uni-stuttgart.de/api/access/datafile/133228,3D/Test/Turbulence/,8db384feba75903a8c5b21ebeba40083 diff --git a/pdebench/data_download/visualize_pdes.py b/pdebench/data_download/visualize_pdes.py index 1b7f397..a117bcf 100644 --- a/pdebench/data_download/visualize_pdes.py +++ b/pdebench/data_download/visualize_pdes.py @@ -1,12 +1,13 @@ -import os +from __future__ import annotations + import argparse +import os -from tqdm import tqdm import h5py -import numpy as np import matplotlib.pyplot as plt -import matplotlib.animation as animation - +import numpy as np +from matplotlib import animation +from tqdm import tqdm pdes = ( "advection", @@ -35,10 +36,10 @@ def visualize_diff_sorp(path, seed=None): # Read the h5 file and store the data h5_file = h5py.File(os.path.join(path, "1D_diff-sorp_NA_NA.h5"), "r") num_samples = len(h5_file.keys()) - - # randomly choose a seed for picking a sample that will subsequently be visualized + + # randomly choose a seed for picking a sample that will subsequently be visualized if not seed: - seed = np.random.randint(0, num_samples) + seed = np.random.randint(0, num_samples) # Ensure the seed number is defined assert seed < num_samples, "Seed number too high!" @@ -55,9 +56,11 @@ def visualize_diff_sorp(path, seed=None): ims = [] for i in tqdm(range(data.shape[0])): if i == 0: - im = ax.plot(data[0].squeeze(), animated=True, color="blue") # show an initial one first + im = ax.plot( + data[0].squeeze(), animated=True, color="blue" + ) # show an initial one first else: - im = ax.plot(data[i].squeeze(), animated=True, color="blue") + im = ax.plot(data[i].squeeze(), animated=True, color="blue") ax.plot ims.append([im[0]]) @@ -82,15 +85,16 @@ def visualize_2d_reacdiff(path, seed=None): h5_file = h5py.File(os.path.join(path, "2D_diff-react_NA_NA.h5"), "r") num_samples = len(h5_file.keys()) - # randomly choose a seed for picking a sample that will subsequently be visualized + # randomly choose a seed for picking a sample that will subsequently be visualized if not seed: - seed = np.random.randint(0, num_samples) + seed = np.random.randint(0, num_samples) # Ensure the seed number is defined assert seed < num_samples, "Seed number too high!" seed = str(seed).zfill(4) - data = np.array(h5_file[f"{seed}/data"], dtype="f") # dim = [101, 128, 128, 2] + # dim = [101, 128, 128, 2] + data = np.array(h5_file[f"{seed}/data"], dtype="f") h5_file.close() @@ -103,8 +107,10 @@ def visualize_2d_reacdiff(path, seed=None): im1 = ax[0].imshow(data[i, ..., 0].squeeze(), animated=True) im2 = ax[1].imshow(data[i, ..., 1].squeeze(), animated=True) if i == 0: - ax[0].imshow(data[0, ..., 0].squeeze()) # show an initial one first - ax[1].imshow(data[0, ..., 1].squeeze()) # show an initial one first + # show an initial one first + ax[0].imshow(data[0, ..., 0].squeeze()) + # show an initial one first + ax[1].imshow(data[0, ..., 1].squeeze()) ims.append([im1, im2]) # Animate the plot @@ -127,16 +133,17 @@ def visualize_swe(path, seed=None): # Read the h5 file and store the data h5_file = h5py.File(os.path.join(path, "2D_rdb_NA_NA.h5"), "r") num_samples = len(h5_file.keys()) - - # randomly choose a seed for picking a sample that will subsequently be visualized + + # randomly choose a seed for picking a sample that will subsequently be visualized if not seed: - seed = np.random.randint(0, num_samples) + seed = np.random.randint(0, num_samples) # Ensure the seed number is defined assert seed < num_samples, "Seed number too high!" seed = str(seed).zfill(4) - data = np.array(h5_file[f"{seed}/data"], dtype="f") # dim = [101, 128, 128, 1] + # dim = [101, 128, 128, 1] + data = np.array(h5_file[f"{seed}/data"], dtype="f") h5_file.close() @@ -171,14 +178,16 @@ def visualize_burgers(path, param=None): # Read the h5 file and store the data if param is not None: flnm = "1D_Burgers_Sols_Nu" + str(param) + ".hdf5" - assert os.path.isfile(path + flnm), 'no such file! '+path + flnm + assert os.path.isfile(path + flnm), "no such file! " + path + flnm else: flnm = "1D_Burgers_Sols_Nu0.01.hdf5" nb = 0 with h5py.File(os.path.join(path, flnm), "r") as h5_file: xcrd = np.array(h5_file["x-coordinate"], dtype=np.float32) - data = np.array(h5_file["tensor"], dtype=np.float32)[nb] # (batch, t, x, channel) --> (t, x, channel) + data = np.array(h5_file["tensor"], dtype=np.float32)[ + nb + ] # (batch, t, x, channel) --> (t, x, channel) # Initialize plot fig, ax = plt.subplots() @@ -190,7 +199,9 @@ def visualize_burgers(path, param=None): if i == 0: im = ax.plot(xcrd, data[i].squeeze(), animated=True, color="blue") else: - im = ax.plot(xcrd, data[i].squeeze(), animated=True, color="blue") # show an initial one first + im = ax.plot( + xcrd, data[i].squeeze(), animated=True, color="blue" + ) # show an initial one first ax.plot ims.append([im[0]]) @@ -214,14 +225,16 @@ def visualize_advection(path, param=None): # Read the h5 file and store the data if param is not None: flnm = "1D_Advection_Sols_beta" + str(param) + ".hdf5" - assert os.path.isfile(path + flnm), 'no such file! '+ path + flnm + assert os.path.isfile(path + flnm), "no such file! " + path + flnm else: flnm = "1D_Advection_Sols_beta0.4.hdf5" nb = 0 with h5py.File(os.path.join(path, flnm), "r") as h5_file: xcrd = np.array(h5_file["x-coordinate"], dtype=np.float32) - data = np.array(h5_file["tensor"], dtype=np.float32)[nb] # (batch, t, x, channel) --> (t, x, channel) + data = np.array(h5_file["tensor"], dtype=np.float32)[ + nb + ] # (batch, t, x, channel) --> (t, x, channel) # Initialize plot fig, ax = plt.subplots() @@ -254,23 +267,35 @@ def visualize_1d_cfd(path, param=None): # Read the h5 file and store the data if param is not None: - assert len(param) == 4, 'param should include type,eta,zeta,boundary as list' - flnm = "1D_CFD_" + str(param[0]) + "_Eta" + str(param[1]) + '_Zeta' + str(param[2]) +"_" + str(param[3]) + "_Train.hdf5" - assert os.path.isfile(path + flnm), 'no such file! '+ path + flnm + assert len(param) == 4, "param should include type,eta,zeta,boundary as list" + flnm = ( + "1D_CFD_" + + str(param[0]) + + "_Eta" + + str(param[1]) + + "_Zeta" + + str(param[2]) + + "_" + + str(param[3]) + + "_Train.hdf5" + ) + assert os.path.isfile(path + flnm), "no such file! " + path + flnm else: flnm = "1D_CFD_Rand_Eta1.e-8_Zeta1.e-8_periodic_Train.hdf5" nb = 0 with h5py.File(os.path.join(path, flnm), "r") as h5_file: xcrd = np.array(h5_file["x-coordinate"], dtype=np.float32) - dd = np.array(h5_file["density"], dtype=np.float32)[nb] # (batch, t, x, channel) --> (t, x, channel) + dd = np.array(h5_file["density"], dtype=np.float32)[ + nb + ] # (batch, t, x, channel) --> (t, x, channel) # Initialize plot fig, ax = plt.subplots() # Store the plot handle at each time step in the 'ims' list ims = [] - ax.set_title('density') + ax.set_title("density") for i in tqdm(range(dd.shape[0])): im = ax.plot(xcrd, dd[i].squeeze(), animated=True) if i == 0: @@ -297,22 +322,40 @@ def visualize_2d_cfd(path, param=None): # Read the h5 file and store the data if param is not None: - assert len(param) == 6, 'param should include type,M,eta,zeta,boundary, resolution as list' - flnm = "2D_CFD_" + str(param[0]) + "_M" + str(param[1]) + "_Eta" + str(param[2]) + '_Zeta' + str(param[3]) + "_" + str(param[4]) + "_" + str(param[5]) + "_Train.hdf5" - assert os.path.isfile(path + flnm), 'no such file! '+ path + flnm + assert ( + len(param) == 6 + ), "param should include type,M,eta,zeta,boundary, resolution as list" + flnm = ( + "2D_CFD_" + + str(param[0]) + + "_M" + + str(param[1]) + + "_Eta" + + str(param[2]) + + "_Zeta" + + str(param[3]) + + "_" + + str(param[4]) + + "_" + + str(param[5]) + + "_Train.hdf5" + ) + assert os.path.isfile(path + flnm), "no such file! " + path + flnm else: flnm = "2D_CFD_Rand_M0.1_Eta1e-8_Zeta1e-8_periodic_512_Train.hdf5" nb = 0 with h5py.File(os.path.join(path, flnm), "r") as h5_file: - dd = np.array(h5_file["density"], dtype=np.float32)[nb] # (batch, t, x, y, channel) --> (t, x, y, channel) + dd = np.array(h5_file["density"], dtype=np.float32)[ + nb + ] # (batch, t, x, y, channel) --> (t, x, y, channel) # Initialize plot fig, ax = plt.subplots() # Store the plot handle at each time step in the 'ims' list ims = [] - ax.set_title('density') + ax.set_title("density") for i in range(dd.shape[0]): im = ax.imshow(dd[i].squeeze(), animated=True) ims.append([im]) @@ -328,22 +371,36 @@ def visualize_2d_cfd(path, param=None): def visualize_3d_cfd(path, param=None): # Read the h5 file and store the data if param is not None: - assert len(param) == 5, 'param should include type,M,eta,zeta,boundary as list' - flnm = "3D_CFD_" + str(param[0]) + "_M" + str(param[1]) + "_Eta" + str(param[2]) + '_Zeta' + str(param[3]) + "_" + str(param[4]) + "_Train.hdf5" - assert os.path.isfile(path + flnm), 'no such file! '+ path + flnm + assert len(param) == 5, "param should include type,M,eta,zeta,boundary as list" + flnm = ( + "3D_CFD_" + + str(param[0]) + + "_M" + + str(param[1]) + + "_Eta" + + str(param[2]) + + "_Zeta" + + str(param[3]) + + "_" + + str(param[4]) + + "_Train.hdf5" + ) + assert os.path.isfile(path + flnm), "no such file! " + path + flnm else: flnm = "3D_CFD_Rand_M1.0_Eta1e-8_Zeta1e-8_periodic_Train.hdf5" nb = 0 with h5py.File(os.path.join(path, flnm), "r") as h5_file: - dd = np.array(h5_file["density"], dtype=np.float32)[nb] # (batch, t, x, y, channel) --> (t, x, y, channel) + dd = np.array(h5_file["density"], dtype=np.float32)[ + nb + ] # (batch, t, x, y, channel) --> (t, x, y, channel) # Initialize plot fig, ax = plt.subplots() # Store the plot handle at each time step in the 'ims' list ims = [] - ax.set_title('density') + ax.set_title("density") for i in range(dd.shape[0]): im = ax.imshow(dd[i, :, :, 32].squeeze(), animated=True) ims.append([im]) @@ -356,7 +413,7 @@ def visualize_3d_cfd(path, param=None): print("saved") -def visualize_ns_incom(): +def visualize_ns_incom() -> None: pass @@ -372,23 +429,27 @@ def visualize_darcy(path, param=None): # Read the h5 file and store the data if param is not None: flnm = "2D_DarcyFlow_beta" + str(param) + "_Train.hdf5" - assert os.path.isfile(path + flnm), 'no such file! '+ path + flnm + assert os.path.isfile(path + flnm), "no such file! " + path + flnm else: flnm = "2D_DarcyFlow_beta1.0_Train.hdf5" nb = 0 with h5py.File(os.path.join(path, flnm), "r") as h5_file: - data = np.array(h5_file["tensor"], dtype=np.float32)[nb] # (batch, t, x, y, channel) --> (t, x, y, channel) - nu = np.array(h5_file["nu"], dtype=np.float32)[nb] # (batch, t, x, y, channel) --> (t, x, y, channel) + data = np.array(h5_file["tensor"], dtype=np.float32)[ + nb + ] # (batch, t, x, y, channel) --> (t, x, y, channel) + nu = np.array(h5_file["nu"], dtype=np.float32)[ + nb + ] # (batch, t, x, y, channel) --> (t, x, y, channel) # Initialize plot fig, ax = plt.subplots(1, 2, figsize=(16, 8)) ax[0].imshow(data.squeeze()) ax[1].imshow(nu.squeeze()) - ax[0].set_title('Data u') - ax[1].set_title('diffusion coefficient nu') - plt.savefig('2D_DarcyFlow.pdf') + ax[0].set_title("Data u") + ax[1].set_title("diffusion coefficient nu") + plt.savefig("2D_DarcyFlow.pdf") print("plot saved") @@ -403,16 +464,18 @@ def visualize_1d_reacdiff(path, param=None): # Read the h5 file and store the data if param is not None: - assert len(param) == 2, 'param should include Nu and Rho as list' - flnm = "ReacDiff_Nu" + str(param[0]) + '_Rho' + str(param[1]) +".hdf5" - assert os.path.isfile(path + flnm), 'no such file! '+ path + flnm + assert len(param) == 2, "param should include Nu and Rho as list" + flnm = "ReacDiff_Nu" + str(param[0]) + "_Rho" + str(param[1]) + ".hdf5" + assert os.path.isfile(path + flnm), "no such file! " + path + flnm else: flnm = "ReacDiff_Nu1.0_Rho1.0.hdf5" nb = 0 with h5py.File(os.path.join(path, flnm), "r") as h5_file: xcrd = np.array(h5_file["x-coordinate"], dtype=np.float32) - data = np.array(h5_file["tensor"], dtype=np.float32)[nb] # (batch, t, x, channel) --> (t, x, channel) + data = np.array(h5_file["tensor"], dtype=np.float32)[ + nb + ] # (batch, t, x, channel) --> (t, x, channel) # Initialize plot fig, ax = plt.subplots() @@ -464,7 +527,7 @@ def visualize_1d_reacdiff(path, param=None): ) arg_parser.add_argument( "--params", - nargs='+', + nargs="+", default=None, help="PDE parameters to be plotted", ) @@ -493,4 +556,3 @@ def visualize_1d_reacdiff(path, param=None): visualize_1d_reacdiff(args.data_path, args.params) else: raise ValueError("PDE name not recognized!") - diff --git a/pdebench/data_gen/configs/diff-react.yaml b/pdebench/data_gen/configs/diff-react.yaml index 39372cc..92ab753 100644 --- a/pdebench/data_gen/configs/diff-react.yaml +++ b/pdebench/data_gen/configs/diff-react.yaml @@ -34,13 +34,12 @@ sim: y_top: 1.0 ydim: 128 n: 1 - seed: '???' - -plot: - t_idx: 1.0 # Fraction of the final time step idx to be plotted - dim: 2 # Spatial dimension - channel_idx: 0 # Index of the variable to be plotted + seed: "???" +plot: + t_idx: 1.0 # Fraction of the final time step idx to be plotted + dim: 2 # Spatial dimension + channel_idx: 0 # Index of the variable to be plotted dataverse: lib_name: pyDaRUS @@ -53,8 +52,9 @@ dataverse: identifier_scheme: ORCID identifier: 0000-0003-3619-9122 description: - - text: 2D diffusion-reaction dataset generated for the PDE benchmark paper - date: '2022' + - text: + 2D diffusion-reaction dataset generated for the PDE benchmark paper + date: "2022" contact: - name: Timothy Praditia affiliation: Universität Stuttgart @@ -66,10 +66,18 @@ dataverse: process: processing_methods: - name: FVM - description: Finite Volume Method is a spatial discretization method to calculate spatial derivative in a Partial Differential Equation. It integrates the fluxes at all discrete cell boundaries so that it ensures conservation. + description: + Finite Volume Method is a spatial discretization method to calculate + spatial derivative in a Partial Differential Equation. It integrates + the fluxes at all discrete cell boundaries so that it ensures + conservation. parameters: cell length, cell width - name: RK45 - description: Explicit Runge-Kutta method of order 5(4) is a time integration method to solve the temporal derivative in a Partial Differential Equation. It is an adaptive time integration scheme to ensure better accuracy and computation efficiency. + description: + Explicit Runge-Kutta method of order 5(4) is a time integration + method to solve the temporal derivative in a Partial Differential + Equation. It is an adaptive time integration scheme to ensure better + accuracy and computation efficiency. parameters: time step size, total time, error tolerance method_parameters: - name: cell length diff --git a/pdebench/data_gen/configs/diff-sorp.yaml b/pdebench/data_gen/configs/diff-sorp.yaml index e45a24e..2ad7e1f 100644 --- a/pdebench/data_gen/configs/diff-sorp.yaml +++ b/pdebench/data_gen/configs/diff-sorp.yaml @@ -34,13 +34,12 @@ sim: x_right: 1.0 xdim: 1024 n: 1 - seed: '???' - + seed: "???" + plot: - t_idx: 1.0 # Fraction of the final time step idx to be plotted - dim: 1 # Spatial dimension - channel_idx: 0 # Index of the variable to be plotted - + t_idx: 1.0 # Fraction of the final time step idx to be plotted + dim: 1 # Spatial dimension + channel_idx: 0 # Index of the variable to be plotted dataverse: lib_name: pyDaRUS @@ -53,8 +52,9 @@ dataverse: identifier_scheme: ORCID identifier: 0000-0003-3619-9122 description: - - text: 1D diffusion-sorption dataset generated for the PDE benchmark paper - date: '2022' + - text: + 1D diffusion-sorption dataset generated for the PDE benchmark paper + date: "2022" contact: - name: Timothy Praditia affiliation: Universität Stuttgart @@ -66,10 +66,18 @@ dataverse: process: processing_methods: - name: FVM - description: Finite Volume Method is a spatial discretization method to calculate spatial derivative in a Partial Differential Equation. It integrates the fluxes at all discrete cell boundaries so that it ensures conservation. + description: + Finite Volume Method is a spatial discretization method to calculate + spatial derivative in a Partial Differential Equation. It integrates + the fluxes at all discrete cell boundaries so that it ensures + conservation. parameters: cell length - name: RK45 - description: Explicit Runge-Kutta method of order 5(4) is a time integration method to solve the temporal derivative in a Partial Differential Equation. It is an adaptive time integration scheme to ensure better accuracy and computation efficiency. + description: + Explicit Runge-Kutta method of order 5(4) is a time integration + method to solve the temporal derivative in a Partial Differential + Equation. It is an adaptive time integration scheme to ensure better + accuracy and computation efficiency. parameters: time step size, total time, error tolerance method_parameters: - name: cell length diff --git a/pdebench/data_gen/configs/mode/debug.yaml b/pdebench/data_gen/configs/mode/debug.yaml index e17c0d7..2225b85 100644 --- a/pdebench/data_gen/configs/mode/debug.yaml +++ b/pdebench/data_gen/configs/mode/debug.yaml @@ -14,4 +14,3 @@ hydra: subdir: ${hydra.job.num} launcher: n_jobs: 1 - diff --git a/pdebench/data_gen/configs/mode/default.yaml b/pdebench/data_gen/configs/mode/default.yaml index ecb1b8f..bd34f5a 100644 --- a/pdebench/data_gen/configs/mode/default.yaml +++ b/pdebench/data_gen/configs/mode/default.yaml @@ -8,4 +8,3 @@ hydra: sweep: dir: ${oc.env:WORKING_DIR,multirun}/${hydra.job.name}/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} subdir: ${hydra.job.num} - diff --git a/pdebench/data_gen/configs/mode/slurm.yaml b/pdebench/data_gen/configs/mode/slurm.yaml index c3c619e..eaff80a 100644 --- a/pdebench/data_gen/configs/mode/slurm.yaml +++ b/pdebench/data_gen/configs/mode/slurm.yaml @@ -15,4 +15,3 @@ hydra: tasks_per_node: 1 mem_gb: 16 timeout_min: 719 # just under 12 hours is good for many clusters - diff --git a/pdebench/data_gen/configs/ns_incomp.yaml b/pdebench/data_gen/configs/ns_incomp.yaml index ec76433..086b771 100644 --- a/pdebench/data_gen/configs/ns_incomp.yaml +++ b/pdebench/data_gen/configs/ns_incomp.yaml @@ -7,25 +7,25 @@ artefact_dir: ${oc.env:ARTEFACT_DIR,artefacts} dataverse: dataset_id: doi:10.18419/darus-2984 -sim_name: 'ns_sim_2d' +sim_name: "ns_sim_2d" label: null -# Solver Parameters +# Solver Parameters domain_size: [1, 1] -grid_size: [256,256] +grid_size: [256, 256] #['scalar_grid', extrapolation_x:(type or bound), extrapolation_y:(type or bound)] -particle_extrapolation: 'BOUNDARY' - +particle_extrapolation: "BOUNDARY" + #['staggered_grid', extrapolation_x:(type or bound), extrapolation_y:(type or bound)] -velocity_extrapolation: 'ZERO' +velocity_extrapolation: "ZERO" # Fluid characteristics NU: 0.01 #(kinematic viscosity) # External force # enable_gravity: false -force_extrapolation: 'ZERO' +force_extrapolation: "ZERO" # Fluctuation Generator Parameters (Noise) seed: 1 @@ -36,7 +36,7 @@ force_scale: 0.15 #params for IncompressibleFlow(Physics) n_steps: 100000 -DT : 0.00005 +DT: 0.00005 frame_int: 100 n_batch: 1 @@ -45,10 +45,9 @@ n_batch: 1 # save_images: false # save_gif: false save_h5: true -profile: false # Run performance profiling -upload: false # upload to DARUS - requires key +profile: false # Run performance profiling +upload: false # upload to DARUS - requires key -backend: 'jax' -device: 'GPU' +backend: "jax" +device: "GPU" jit: true - diff --git a/pdebench/data_gen/configs/radial_dam_break.yaml b/pdebench/data_gen/configs/radial_dam_break.yaml index 3071940..fb5e13c 100644 --- a/pdebench/data_gen/configs/radial_dam_break.yaml +++ b/pdebench/data_gen/configs/radial_dam_break.yaml @@ -33,13 +33,12 @@ sim: x_right: 2.5 y_bottom: -2.5 y_top: 2.5 - seed: '???' + seed: "???" - plot: - t_idx: 1.0 # Fraction of the final time step idx to be plotted - dim: 2 # Spatial dimension - channel_idx: 0 # Index of the variable to be plotted + t_idx: 1.0 # Fraction of the final time step idx to be plotted + dim: 2 # Spatial dimension + channel_idx: 0 # Index of the variable to be plotted dataverse: lib_name: pyDaRUS @@ -52,8 +51,10 @@ dataverse: identifier_scheme: ORCID identifier: 0000-0001-8070-2384 description: - - text: 2D shallow-water equation dataset generated for the PDE benchmark paper - date: '2022' + - text: + 2D shallow-water equation dataset generated for the PDE benchmark + paper + date: "2022" contact: - name: Raphael Leiteritz affiliation: Universität Stuttgart @@ -65,7 +66,11 @@ dataverse: process: processing_methods: - name: FVM - description: Finite Volume Method is a spatial discretization method to calculate spatial derivative in a Partial Differential Equation. It integrates the fluxes at all discrete cell boundaries so that it ensures conservation. + description: + Finite Volume Method is a spatial discretization method to calculate + spatial derivative in a Partial Differential Equation. It integrates + the fluxes at all discrete cell boundaries so that it ensures + conservation. parameters: cell length, cell width method_parameters: - name: cell length @@ -85,4 +90,4 @@ dataverse: unit: s value: 1 engMeta: {} - codeMeta: {} \ No newline at end of file + codeMeta: {} diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/advection_exact_Hydra.py b/pdebench/data_gen/data_gen_NLE/AdvectionEq/advection_exact_Hydra.py index 1938425..e1e8eb9 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/advection_exact_Hydra.py +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/advection_exact_Hydra.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ @@ -145,24 +144,24 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ +from __future__ import annotations import time -import sys from math import ceil -# Hydra -from omegaconf import DictConfig, OmegaConf import hydra - import jax import jax.numpy as jnp from jax import device_put +# Hydra +from omegaconf import DictConfig + # Init arguments with Hydra @hydra.main(config_path="config") def main(cfg: DictConfig) -> None: - print('advection velocity: {}'.format(cfg.args.beta)) + print(f"advection velocity: {cfg.args.beta}") # cell edge coordinate xe = jnp.linspace(cfg.args.xL, cfg.args.xR, cfg.args.nx + 1) @@ -177,37 +176,37 @@ def evolve(u): i_save = 0 tm_ini = time.time() - it_tot = ceil((cfg.args.fin_time - cfg.args.ini_time)/cfg.args.dt_save) + 1 + it_tot = ceil((cfg.args.fin_time - cfg.args.ini_time) / cfg.args.dt_save) + 1 uu = jnp.zeros([it_tot, u.shape[0]]) uu = uu.at[0].set(u) while t < cfg.args.fin_time: - print('save data at t = {0:.3f}'.format(t)) + print(f"save data at t = {t:.3f}") u = set_function(xc, t, cfg.args.beta) uu = uu.at[i_save].set(u) t += cfg.args.dt_save i_save += 1 tm_fin = time.time() - print('total elapsed time is {} sec'.format(tm_fin - tm_ini)) + print(f"total elapsed time is {tm_fin - tm_ini} sec") uu = uu.at[-1].set(u) return uu, t @jax.jit def set_function(x, t, beta): - return jnp.sin(2.*jnp.pi*(x - beta*t)) + return jnp.sin(2.0 * jnp.pi * (x - beta * t)) u = set_function(xc, t=0, beta=cfg.args.beta) u = device_put(u) # putting variables in GPU (not necessary??) uu, t = evolve(u) - print('final time is: {0:.3f}'.format(t)) + print(f"final time is: {t:.3f}") + print("data saving...") + cwd = hydra.utils.get_original_cwd() + "/" + jnp.save(cwd + cfg.args.save + "/Advection_beta" + str(cfg.args.beta), uu) + jnp.save(cwd + cfg.args.save + "/x_coordinate", xe) + jnp.save(cwd + cfg.args.save + "/t_coordinate", tc) - print('data saving...') - cwd = hydra.utils.get_original_cwd() + '/' - jnp.save(cwd + cfg.args.save + '/Advection_beta' + str(cfg.args.beta), uu) - jnp.save(cwd + cfg.args.save + '/x_coordinate', xe) - jnp.save(cwd + cfg.args.save + '/t_coordinate', tc) -if __name__=='__main__': +if __name__ == "__main__": main() diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/advection_multi_solution_Hydra.py b/pdebench/data_gen/data_gen_NLE/AdvectionEq/advection_multi_solution_Hydra.py index 19f6a12..ea951f9 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/advection_multi_solution_Hydra.py +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/advection_multi_solution_Hydra.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ @@ -145,33 +144,33 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ -import sys -from math import ceil, log, exp +from __future__ import annotations + import random from pathlib import Path -# Hydra -from omegaconf import DictConfig, OmegaConf import hydra - import jax -from jax import vmap import jax.numpy as jnp from jax import device_put, lax -sys.path.append('..') -from utils import init_multi, Courant, save_data, bc, limiting +# Hydra +from omegaconf import DictConfig + +sys.path.append("..") +from utils import Courant, bc, init_multi, limiting def _pass(carry): return carry + # Init arguments with Hydra @hydra.main(config_path="config") def main(cfg: DictConfig) -> None: # basic parameters dx = (cfg.multi.xR - cfg.multi.xL) / cfg.multi.nx - dx_inv = 1. / dx + dx_inv = 1.0 / dx # cell edge coordinate xe = jnp.linspace(cfg.multi.xL, cfg.multi.xR, cfg.multi.nx + 1) @@ -187,11 +186,13 @@ def main(cfg: DictConfig) -> None: dt_save = cfg.multi.dt_save CFL = cfg.multi.CFL if cfg.multi.if_rand_param: - beta = exp(random.uniform(log(0.01), log(100))) # uniform number between 0.01 to 100 + beta = exp( + random.uniform(log(0.01), log(100)) + ) # uniform number between 0.01 to 100 else: beta = cfg.multi.beta - print('beta: ', beta) + print("beta: ", beta) @jax.jit def evolve(u): @@ -199,7 +200,7 @@ def evolve(u): tsave = t steps = 0 i_save = 0 - dt = 0. + dt = 0.0 uu = jnp.zeros([it_tot, u.shape[0]]) uu = uu.at[0].set(u) @@ -244,7 +245,7 @@ def _update(carry): return u, dt carry = u, dt - u, dt = lax.cond(dt > 1.e-8, _update, _pass, carry) + u, dt = lax.cond(dt > 1.0e-8, _update, _pass, carry) t += dt steps += 1 @@ -253,37 +254,44 @@ def _update(carry): @jax.jit def update(u, u_tmp, dt): f = flux(u_tmp) - u -= dt * dx_inv * (f[1:cfg.multi.nx + 1] - f[0:cfg.multi.nx]) + u -= dt * dx_inv * (f[1 : cfg.multi.nx + 1] - f[0 : cfg.multi.nx]) return u def flux(u): - _u = bc(u, dx, Ncell=cfg.multi.nx) # index 2 for _U is equivalent with index 0 for u + _u = bc( + u, dx, Ncell=cfg.multi.nx + ) # index 2 for _U is equivalent with index 0 for u uL, uR = limiting(_u, cfg.multi.nx, if_second_order=cfg.multi.if_second_order) fL = uL * beta fR = uR * beta # upwind advection scheme - f_upwd = 0.5 * (fR[1:cfg.multi.nx+2] + fL[2:cfg.multi.nx+3] - - jnp.abs(beta)*(uL[2:cfg.multi.nx+3] - uR[1:cfg.multi.nx+2])) + f_upwd = 0.5 * ( + fR[1 : cfg.multi.nx + 2] + + fL[2 : cfg.multi.nx + 3] + - jnp.abs(beta) * (uL[2 : cfg.multi.nx + 3] - uR[1 : cfg.multi.nx + 2]) + ) return f_upwd u = init_multi(xc, numbers=cfg.multi.numbers, k_tot=4, init_key=cfg.multi.init_key) u = device_put(u) # putting variables in GPU (not necessary??) - #vm_evolve = vmap(evolve, 0, 0) - #uu = vm_evolve(u) - vm_evolve = jax.pmap(jax.vmap(evolve, axis_name='j'), axis_name='i') + # vm_evolve = vmap(evolve, 0, 0) + # uu = vm_evolve(u) + vm_evolve = jax.pmap(jax.vmap(evolve, axis_name="j"), axis_name="i") local_devices = jax.local_device_count() - uu = vm_evolve(u.reshape([local_devices, cfg.multi.numbers//local_devices, -1])) + + uu = vm_evolve(u.reshape([local_devices, cfg.multi.numbers // local_devices, -1])) # reshape before saving uu = uu.reshape((-1, *uu.shape[2:])) - print('data saving...') - cwd = hydra.utils.get_original_cwd() + '/' + print("data saving...") + cwd = hydra.utils.get_original_cwd() + "/" Path(cwd + cfg.multi.save).mkdir(parents=True, exist_ok=True) - jnp.save(cwd+cfg.multi.save+'1D_Advection_Sols_beta'+str(beta)[:5], uu) - jnp.save(cwd + cfg.multi.save + '/x_coordinate', xc) - jnp.save(cwd + cfg.multi.save + '/t_coordinate', tc) + jnp.save(cwd + cfg.multi.save + "1D_Advection_Sols_beta" + str(beta)[:5], uu) + jnp.save(cwd + cfg.multi.save + "/x_coordinate", xc) + jnp.save(cwd + cfg.multi.save + "/t_coordinate", tc) + -if __name__=='__main__': +if __name__ == "__main__": main() diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta1e-1.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta1e-1.yaml index 481e65f..8c484b7 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta1e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta1e-1.yaml @@ -1,10 +1,10 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 1.e-1 +beta: 1.e-1 if_show: 1 -init_mode: 'sin' +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta1e0.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta1e0.yaml index c1a1492..bebc582 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta1e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta1e0.yaml @@ -1,10 +1,10 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 1.e0 +beta: 1.e0 if_show: 1 -init_mode: 'sin' +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta1e1.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta1e1.yaml index a7164b1..db14c50 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta1e1.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta1e1.yaml @@ -1,10 +1,10 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 1.e1 +beta: 1.e1 if_show: 1 -init_mode: 'sin' +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta2e-1.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta2e-1.yaml index 3d324c5..aa5539e 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta2e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta2e-1.yaml @@ -1,10 +1,10 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 2.e-1 +beta: 2.e-1 if_show: 1 -init_mode: 'sin' +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta2e0.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta2e0.yaml index fd69b3c..73832a2 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta2e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta2e0.yaml @@ -1,10 +1,10 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 2.e0 +beta: 2.e0 if_show: 1 -init_mode: 'sin' +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta4e-1.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta4e-1.yaml index 91a0ad9..d8114c8 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta4e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta4e-1.yaml @@ -1,10 +1,10 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 4.e-1 +beta: 4.e-1 if_show: 1 -init_mode: 'sin' +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta4e0.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta4e0.yaml index 3788878..30ab03a 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta4e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/beta4e0.yaml @@ -1,10 +1,10 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 4.e0 +beta: 4.e0 if_show: 1 -init_mode: 'sin' +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/config.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/config.yaml index c1a1492..bebc582 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/config.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/args/config.yaml @@ -1,10 +1,10 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 1.e0 +beta: 1.e0 if_show: 1 -init_mode: 'sin' +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta1e-1.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta1e-1.yaml index 569c31a..0bdcd23 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta1e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta1e-1.yaml @@ -1,15 +1,15 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 1.e-1 +beta: 1.e-1 if_show: 1 numbers: 10000 CFL: 4.e-1 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta1e0.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta1e0.yaml index 4d7d9bd..5d8f5fd 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta1e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta1e0.yaml @@ -1,11 +1,11 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 1.e0 +beta: 1.e0 if_show: 1 numbers: 10000 CFL: 4.e-1 diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta2e-1.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta2e-1.yaml index 25edbee..cb46b32 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta2e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta2e-1.yaml @@ -1,15 +1,15 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 2.e-1 +beta: 2.e-1 if_show: 1 numbers: 10000 CFL: 4.e-1 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta2e0.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta2e0.yaml index 5171dc6..3185fc7 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta2e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta2e0.yaml @@ -1,15 +1,15 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 2.e0 +beta: 2.e0 if_show: 1 numbers: 10000 CFL: 4.e-1 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta4e-1.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta4e-1.yaml index 2b5385d..0125f48 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta4e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta4e-1.yaml @@ -1,15 +1,15 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 4.e-1 +beta: 4.e-1 if_show: 1 numbers: 10000 CFL: 4.e-1 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta4e0.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta4e0.yaml index a9b2961..d6d8772 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta4e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/beta4e0.yaml @@ -1,15 +1,15 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 4.e0 +beta: 4.e0 if_show: 1 numbers: 10000 CFL: 4.e-1 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/config.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/config.yaml index 6e60508..ce8be8b 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/config.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/config.yaml @@ -1,11 +1,11 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. nx: 1024 xL: 0. xR: 1. -beta : 1.e0 +beta: 1.e0 if_show: 1 numbers: 100 CFL: 3.e-1 diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/config2D.yaml b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/config2D.yaml index 06c0f34..21ce8e9 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/config2D.yaml +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/config/multi/config2D.yaml @@ -1,4 +1,4 @@ -save: '../save/advection/' +save: "../save/advection/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -8,8 +8,8 @@ xL: 0. xR: 1. yL: 0. yR: 1. -betaX : 1.e0 -betaY : 1.e0 +betaX: 1.e0 +betaY: 1.e0 if_show: 1 numbers: 4 CFL: 2.5e-1 diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/run_testset.sh b/pdebench/data_gen/data_gen_NLE/AdvectionEq/run_testset.sh index 384989a..08e2ebd 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/run_testset.sh +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/run_testset.sh @@ -1,7 +1,8 @@ +#! /bin/bash CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta1e0.yaml CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta1e1.yaml CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta1e-1.yaml CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta2e0.yaml CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta2e-1.yaml CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta4e0.yaml -CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta4e-1.yaml \ No newline at end of file +CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta4e-1.yaml diff --git a/pdebench/data_gen/data_gen_NLE/AdvectionEq/run_trainset.sh b/pdebench/data_gen/data_gen_NLE/AdvectionEq/run_trainset.sh index 0d92fee..48f5d93 100644 --- a/pdebench/data_gen/data_gen_NLE/AdvectionEq/run_trainset.sh +++ b/pdebench/data_gen/data_gen_NLE/AdvectionEq/run_trainset.sh @@ -1,3 +1,4 @@ +#! /bin/bash CUDA_VISIBLE_DEVICES='2,3' python3 advection_multi_solution_Hydra.py +multi=beta1e0.yaml CUDA_VISIBLE_DEVICES='2,3' python3 advection_multi_solution_Hydra.py +multi=beta1e-1.yaml CUDA_VISIBLE_DEVICES='2,3' python3 advection_multi_solution_Hydra.py +multi=beta2e0.yaml diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/burgers_Hydra.py b/pdebench/data_gen/data_gen_NLE/BurgersEq/burgers_Hydra.py index 4adc063..f8eab18 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/burgers_Hydra.py +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/burgers_Hydra.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ @@ -145,32 +144,35 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ +from __future__ import annotations -import time import sys +import time from math import ceil -# Hydra -from omegaconf import DictConfig, OmegaConf import hydra - import jax import jax.numpy as jnp from jax import device_put, lax -sys.path.append('..') -from utils import init, Courant, Courant_diff, save_data, bc, limiting +# Hydra +from omegaconf import DictConfig + +sys.path.append("..") +from utils import Courant, Courant_diff, bc, init, limiting + def _pass(carry): return carry + # Init arguments with Hydra @hydra.main(config_path="config") def main(cfg: DictConfig) -> None: # basic parameters - pi_inv = 1. / jnp.pi + pi_inv = 1.0 / jnp.pi dx = (cfg.args.xR - cfg.args.xL) / cfg.args.nx - dx_inv = 1. / dx + dx_inv = 1.0 / dx # cell edge coordinate xe = jnp.linspace(cfg.args.xL, cfg.args.xR, cfg.args.nx + 1) @@ -193,7 +195,7 @@ def evolve(u): tsave = t steps = 0 i_save = 0 - dt = 0. + dt = 0.0 uu = jnp.zeros([it_tot, u.shape[0]]) uu = uu.at[0].set(u) @@ -225,14 +227,14 @@ def _save(_carry): uu = uu.at[-1].set(u) tm_fin = time.time() - print('total elapsed time is {} sec'.format(tm_fin - tm_ini)) + print(f"total elapsed time is {tm_fin - tm_ini} sec") return uu, t @jax.jit def simulation_fn(i, carry): u, t, dt, steps, tsave = carry dt_adv = Courant(u, dx) * CFL - dt_dif = Courant_diff(dx, cfg.args.epsilon*pi_inv) * CFL + dt_dif = Courant_diff(dx, cfg.args.epsilon * pi_inv) * CFL dt = jnp.min(jnp.array([dt_adv, dt_dif, fin_time - t, tsave - t])) def _update(carry): @@ -244,7 +246,7 @@ def _update(carry): return u, dt carry = u, dt - u, dt = lax.cond(dt > 1.e-8, _update, _pass, carry) + u, dt = lax.cond(dt > 1.0e-8, _update, _pass, carry) t += dt steps += 1 @@ -253,36 +255,69 @@ def _update(carry): @jax.jit def update(u, u_tmp, dt): f = flux(u_tmp) - u -= dt * dx_inv * (f[1:cfg.args.nx + 1] - f[0:cfg.args.nx]) + u -= dt * dx_inv * (f[1 : cfg.args.nx + 1] - f[0 : cfg.args.nx]) return u def flux(u): - _u = bc(u, dx, Ncell=cfg.args.nx) # index 2 for _U is equivalent with index 0 for u - uL, uR = limiting(_u, cfg.args.nx, if_second_order=1.) - fL = 0.5*uL**2 - fR = 0.5*uR**2 + _u = bc( + u, dx, Ncell=cfg.args.nx + ) # index 2 for _U is equivalent with index 0 for u + uL, uR = limiting(_u, cfg.args.nx, if_second_order=1.0) + fL = 0.5 * uL**2 + fR = 0.5 * uR**2 # upwind advection scheme - f_upwd = 0.5 * (fR[1:cfg.args.nx+2] + fL[2:cfg.args.nx+3] - - 0.5*jnp.abs(uL[2:cfg.args.nx+3] + uR[1:cfg.args.nx+2])*(uL[2:cfg.args.nx+3] - uR[1:cfg.args.nx+2])) + f_upwd = 0.5 * ( + fR[1 : cfg.args.nx + 2] + + fL[2 : cfg.args.nx + 3] + - 0.5 + * jnp.abs(uL[2 : cfg.args.nx + 3] + uR[1 : cfg.args.nx + 2]) + * (uL[2 : cfg.args.nx + 3] - uR[1 : cfg.args.nx + 2]) + ) # source term - f_upwd += - cfg.args.epsilon*pi_inv*(_u[2:cfg.args.nx+3] - _u[1:cfg.args.nx+2])*dx_inv + f_upwd += ( + -cfg.args.epsilon + * pi_inv + * (_u[2 : cfg.args.nx + 3] - _u[1 : cfg.args.nx + 2]) + * dx_inv + ) return f_upwd u = init(xc=xc, mode=cfg.args.init_mode, u0=cfg.args.u0, du=cfg.args.du) u = device_put(u) # putting variables in GPU (not necessary??) uu, t = evolve(u) - print('final time is: {0:.3f}'.format(t)) - - print('data saving...') - cwd = hydra.utils.get_original_cwd() + '/' - if cfg.args.init_mode=='sinsin': - jnp.save(cwd + cfg.args.save + '/Burgers_' + cfg.args.init_mode + '_u' + str(cfg.args.u0) + '_du' + str( - cfg.args.du) + '_Nu' + str(cfg.args.epsilon), uu) + print(f"final time is: {t:.3f}") + + print("data saving...") + cwd = hydra.utils.get_original_cwd() + "/" + if cfg.args.init_mode == "sinsin": + jnp.save( + cwd + + cfg.args.save + + "/Burgers_" + + cfg.args.init_mode + + "_u" + + str(cfg.args.u0) + + "_du" + + str(cfg.args.du) + + "_Nu" + + str(cfg.args.epsilon), + uu, + ) else: - jnp.save(cwd + cfg.args.save + '/Burgers_' + cfg.args.init_mode + '_u' + str(cfg.args.u0) + '_Nu' + str( - cfg.args.epsilon), uu) - jnp.save(cwd + cfg.args.save+'/x_coordinate', xc) - jnp.save(cwd + cfg.args.save+'/t_coordinate', tc) - -if __name__=='__main__': + jnp.save( + cwd + + cfg.args.save + + "/Burgers_" + + cfg.args.init_mode + + "_u" + + str(cfg.args.u0) + + "_Nu" + + str(cfg.args.epsilon), + uu, + ) + jnp.save(cwd + cfg.args.save + "/x_coordinate", xc) + jnp.save(cwd + cfg.args.save + "/t_coordinate", tc) + + +if __name__ == "__main__": main() diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/burgers_multi_solution_Hydra.py b/pdebench/data_gen/data_gen_NLE/BurgersEq/burgers_multi_solution_Hydra.py index da17c10..5e07805 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/burgers_multi_solution_Hydra.py +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/burgers_multi_solution_Hydra.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ @@ -145,34 +144,36 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ -import sys +from __future__ import annotations + import random +import sys from math import ceil, exp, log from pathlib import Path -# Hydra -from omegaconf import DictConfig, OmegaConf import hydra - import jax -from jax import vmap import jax.numpy as jnp from jax import device_put, lax -sys.path.append('..') -from utils import init_multi, Courant, Courant_diff, save_data, bc, limiting +# Hydra +from omegaconf import DictConfig + +sys.path.append("..") +from utils import Courant, Courant_diff, bc, init_multi, limiting def _pass(carry): return carry + # Init arguments with Hydra @hydra.main(config_path="config") def main(cfg: DictConfig) -> None: # basic parameters - pi_inv = 1. / jnp.pi + pi_inv = 1.0 / jnp.pi dx = (cfg.multi.xR - cfg.multi.xL) / cfg.multi.nx - dx_inv = 1. / dx + dx_inv = 1.0 / dx # cell edge coordinate xe = jnp.linspace(cfg.multi.xL, cfg.multi.xR, cfg.multi.nx + 1) @@ -185,10 +186,12 @@ def main(cfg: DictConfig) -> None: dt_save = cfg.multi.dt_save CFL = cfg.multi.CFL if cfg.multi.if_rand_param: - epsilon = exp(random.uniform(log(0.001), log(10))) # uniform number between 0.01 to 100 + epsilon = exp( + random.uniform(log(0.001), log(10)) + ) # uniform number between 0.01 to 100 else: epsilon = cfg.multi.epsilon - print('epsilon: ', epsilon) + print("epsilon: ", epsilon) # t-coordinate it_tot = ceil((fin_time - ini_time) / dt_save) + 1 tc = jnp.arange(it_tot + 1) * dt_save @@ -199,7 +202,7 @@ def evolve(u): tsave = t steps = 0 i_save = 0 - dt = 0. + dt = 0.0 uu = jnp.zeros([it_tot, u.shape[0]]) uu = uu.at[0].set(u) @@ -233,7 +236,7 @@ def _show(_carry): def simulation_fn(i, carry): u, t, dt, steps, tsave = carry dt_adv = Courant(u, dx) * CFL - dt_dif = Courant_diff(dx, epsilon*pi_inv) * CFL + dt_dif = Courant_diff(dx, epsilon * pi_inv) * CFL dt = jnp.min(jnp.array([dt_adv, dt_dif, fin_time - t, tsave - t])) def _update(carry): @@ -245,7 +248,7 @@ def _update(carry): return u, dt carry = u, dt - u, dt = lax.cond(dt > 1.e-8, _update, _pass, carry) + u, dt = lax.cond(dt > 1.0e-8, _update, _pass, carry) t += dt steps += 1 @@ -254,39 +257,57 @@ def _update(carry): @jax.jit def update(u, u_tmp, dt): f = flux(u_tmp) - u -= dt * dx_inv * (f[1:cfg.multi.nx + 1] - f[0:cfg.multi.nx]) + u -= dt * dx_inv * (f[1 : cfg.multi.nx + 1] - f[0 : cfg.multi.nx]) return u def flux(u): - _u = bc(u, dx, Ncell=cfg.multi.nx) # index 2 for _U is equivalent with index 0 for u - uL, uR = limiting(_u, cfg.multi.nx, if_second_order=1.) - fL = 0.5*uL**2 - fR = 0.5*uR**2 + _u = bc( + u, dx, Ncell=cfg.multi.nx + ) # index 2 for _U is equivalent with index 0 for u + uL, uR = limiting(_u, cfg.multi.nx, if_second_order=1.0) + fL = 0.5 * uL**2 + fR = 0.5 * uR**2 # upwind advection scheme - f_upwd = 0.5 * (fR[1:cfg.multi.nx+2] + fL[2:cfg.multi.nx+3] - - 0.5*jnp.abs(uL[2:cfg.multi.nx+3] + uR[1:cfg.multi.nx+2])*(uL[2:cfg.multi.nx+3] - uR[1:cfg.multi.nx+2])) + f_upwd = 0.5 * ( + fR[1 : cfg.multi.nx + 2] + + fL[2 : cfg.multi.nx + 3] + - 0.5 + * jnp.abs(uL[2 : cfg.multi.nx + 3] + uR[1 : cfg.multi.nx + 2]) + * (uL[2 : cfg.multi.nx + 3] - uR[1 : cfg.multi.nx + 2]) + ) # source term - f_upwd += - epsilon*pi_inv*(_u[2:cfg.multi.nx+3] - _u[1:cfg.multi.nx+2])*dx_inv + f_upwd += ( + -epsilon + * pi_inv + * (_u[2 : cfg.multi.nx + 3] - _u[1 : cfg.multi.nx + 2]) + * dx_inv + ) return f_upwd u = init_multi(xc, numbers=cfg.multi.numbers, k_tot=4, init_key=cfg.multi.init_key) u = device_put(u) # putting variables in GPU (not necessary??) - #vm_evolve = vmap(evolve, 0, 0) - #uu = vm_evolve(u) - vm_evolve = jax.pmap(jax.vmap(evolve, axis_name='j'), axis_name='i') + # vm_evolve = vmap(evolve, 0, 0) + # uu = vm_evolve(u) + vm_evolve = jax.pmap(jax.vmap(evolve, axis_name="j"), axis_name="i") local_devices = jax.local_device_count() - uu = vm_evolve(u.reshape([local_devices, cfg.multi.numbers//local_devices, -1])) + uu = vm_evolve(u.reshape([local_devices, cfg.multi.numbers // local_devices, -1])) + + cwd = hydra.utils.get_original_cwd() + "/" + jnp.save(cwd + cfg.multi.save + "1D_Burgers_Sols_Nu" + str(epsilon)[:5], uu) + jnp.save(cwd + cfg.multi.save + "/x_coordinate", xc) + jnp.save(cwd + cfg.multi.save + "/t_coordinate", tc) # reshape before saving uu = uu.reshape((-1, *uu.shape[2:])) - print('data saving...') - cwd = hydra.utils.get_original_cwd() + '/' + print("data saving...") + cwd = hydra.utils.get_original_cwd() + "/" Path(cwd + cfg.multi.save).mkdir(parents=True, exist_ok=True) - jnp.save(cwd+cfg.multi.save+'1D_Burgers_Sols_Nu'+str(epsilon)[:5], uu) - jnp.save(cwd + cfg.multi.save + '/x_coordinate', xc) - jnp.save(cwd + cfg.multi.save + '/t_coordinate', tc) + jnp.save(cwd + cfg.multi.save + "1D_Burgers_Sols_Nu" + str(epsilon)[:5], uu) + jnp.save(cwd + cfg.multi.save + "/x_coordinate", xc) + jnp.save(cwd + cfg.multi.save + "/t_coordinate", tc) + -if __name__=='__main__': +if __name__ == "__main__": main() diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/config.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/config.yaml index 1fa8247..bdb4851 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/config.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/config.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,12 +6,12 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-2 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sin' +init_mode: "sin" init_key: 2022 -if_rand_param: None \ No newline at end of file +if_rand_param: None diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e-1.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e-1.yaml index 62def9e..2e2faaa 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e-1.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-1 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'possin' \ No newline at end of file +init_mode: "possin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e-2.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e-2.yaml index 1f56fe3..a069740 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e-2.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e-2.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-2 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'possin' \ No newline at end of file +init_mode: "possin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e-3.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e-3.yaml index f2ea891..88907d2 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e-3.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e-3.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-3 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'possin' \ No newline at end of file +init_mode: "possin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e0.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e0.yaml index 88f0d44..0563653 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e0.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e0 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'possin' \ No newline at end of file +init_mode: "possin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e1.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e1.yaml index 2acd61c..214752e 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e1.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e1.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e1 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'possin' \ No newline at end of file +init_mode: "possin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e2.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e2.yaml index fb53b28..6161d32 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e2.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/possin_eps1e2.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e2 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'possin' \ No newline at end of file +init_mode: "possin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-1.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-1.yaml index 7c5b1f2..1a6d55a 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-1.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-1 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sin' \ No newline at end of file +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2.yaml index 942c834..137e61c 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-2 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sin' \ No newline at end of file +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e-1.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e-1.yaml index 99edd16..67f0974 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e-1.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-2 -u0 : 1.e-1 -du : 0.1 +u0: 1.e-1 +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sin' \ No newline at end of file +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e-2.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e-2.yaml index 9309d59..9078bcd 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e-2.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e-2.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-2 -u0 : 1.e-2 -du : 0.1 +u0: 1.e-2 +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sin' \ No newline at end of file +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e1.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e1.yaml index 814a1b8..fef36a8 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e1.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e1.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-2 -u0 : 1.e1 -du : 0.1 +u0: 1.e1 +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sin' \ No newline at end of file +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e2.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e2.yaml index bf4118d..efe85dd 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e2.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-2_u01e2.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-2 -u0 : 1.e2 -du : 0.1 +u0: 1.e2 +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sin' \ No newline at end of file +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-3.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-3.yaml index a190a58..59dbfe7 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-3.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e-3.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-3 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sin' \ No newline at end of file +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e0.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e0.yaml index b1e460d..4aaea8b 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e0.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e0 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sin' \ No newline at end of file +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e1.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e1.yaml index 48cf919..4e03682 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e1.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e1.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e1 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sin' \ No newline at end of file +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e2.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e2.yaml index 0515b8c..8526ee6 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e2.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sin_eps1e2.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e2 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sin' \ No newline at end of file +init_mode: "sin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du01.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du01.yaml index d1a644e..1bb49e3 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du01.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du01.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-2 -u0 : 1. -du : 0.1 +u0: 1. +du: 0.1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sinsin' \ No newline at end of file +init_mode: "sinsin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du025.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du025.yaml index 2376eb6..051f70a 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du025.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du025.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-2 -u0 : 1. -du : 0.25 +u0: 1. +du: 0.25 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sinsin' \ No newline at end of file +init_mode: "sinsin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du05.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du05.yaml index 9274eef..fa55f69 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du05.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du05.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-2 -u0 : 1. -du : 0.5 +u0: 1. +du: 0.5 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sinsin' \ No newline at end of file +init_mode: "sinsin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du1.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du1.yaml index a561103..d1d512a 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du1.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du1.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-2 -u0 : 1. -du : 1 +u0: 1. +du: 1 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sinsin' \ No newline at end of file +init_mode: "sinsin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du2.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du2.yaml index 9137a1a..e0d48c8 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du2.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du2.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-2 -u0 : 1. -du : 2 +u0: 1. +du: 2 CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sinsin' \ No newline at end of file +init_mode: "sinsin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du5.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du5.yaml index cd789a5..6f0ea11 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du5.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/args/sinsin_eps1e-2_du5.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -6,10 +6,10 @@ nx: 1024 xL: -1. xR: 1. epsilon: 1.e-2 -u0 : 1. -du : 5. +u0: 1. +du: 5. CFL: 4.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'sinsin' \ No newline at end of file +init_mode: "sinsin" diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e-1.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e-1.yaml index db263b5..76d5873 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e-1.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -11,4 +11,4 @@ if_second_order: 1. numbers: 10000 show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e-2.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e-2.yaml index 315f7f7..f8fccd3 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e-2.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e-2.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -11,4 +11,4 @@ if_second_order: 1. numbers: 10000 show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e-3.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e-3.yaml index 9a981a8..798bc67 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e-3.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e-3.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -11,4 +11,4 @@ if_second_order: 1. numbers: 10000 show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e0.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e0.yaml index bf78a94..6562ba4 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/1e0.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e-1.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e-1.yaml index 3f4d4d0..93d252b 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e-1.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -11,4 +11,4 @@ if_second_order: 1. numbers: 10000 show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e-2.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e-2.yaml index 13c9935..638746a 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e-2.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e-2.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -11,4 +11,4 @@ if_second_order: 1. numbers: 10000 show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e-3.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e-3.yaml index 52ad377..afe96db 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e-3.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e-3.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -11,4 +11,4 @@ if_second_order: 1. numbers: 10000 show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e0.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e0.yaml index fc30c15..a867e22 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/2e0.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -11,4 +11,4 @@ if_second_order: 1. numbers: 10000 show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e-1.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e-1.yaml index eaf7974..9b2f9e6 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e-1.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -11,4 +11,4 @@ if_second_order: 1. numbers: 10000 show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e-2.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e-2.yaml index 07ba0c2..c3b6706 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e-2.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e-2.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -11,4 +11,4 @@ if_second_order: 1. numbers: 10000 show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e-3.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e-3.yaml index 6ff09f2..15c940f 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e-3.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e-3.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -11,4 +11,4 @@ if_second_order: 1. numbers: 10000 show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e0.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e0.yaml index 0e8a8d2..59c9465 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/4e0.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -11,4 +11,4 @@ if_second_order: 1. numbers: 10000 show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/config.yaml b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/config.yaml index 39fb552..3bf3e39 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/config.yaml +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/config/multi/config.yaml @@ -1,4 +1,4 @@ -save: '../save/burgers/' +save: "../save/burgers/" dt_save: 0.05 ini_time: 0. fin_time: 2. diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/run_testset.sh b/pdebench/data_gen/data_gen_NLE/BurgersEq/run_testset.sh index 82b0310..95db658 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/run_testset.sh +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/run_testset.sh @@ -1,3 +1,4 @@ +#!/bin/bash CUDA_VISIBLE_DEVICES='3' python3 burgers_Hydra.py +args=possin_eps1e0.yaml CUDA_VISIBLE_DEVICES='3' python3 burgers_Hydra.py +args=possin_eps1e1.yaml CUDA_VISIBLE_DEVICES='3' python3 burgers_Hydra.py +args=possin_eps1e2.yaml @@ -22,4 +23,4 @@ CUDA_VISIBLE_DEVICES='3' python3 burgers_Hydra.py +args=sinsin_eps1e-2_du01.yaml CUDA_VISIBLE_DEVICES='3' python3 burgers_Hydra.py +args=sinsin_eps1e-2_du2.yaml CUDA_VISIBLE_DEVICES='3' python3 burgers_Hydra.py +args=sinsin_eps1e-2_du5.yaml CUDA_VISIBLE_DEVICES='3' python3 burgers_Hydra.py +args=sinsin_eps1e-2_du05.yaml -CUDA_VISIBLE_DEVICES='3' python3 burgers_Hydra.py +args=sinsin_eps1e-2_du025.yaml \ No newline at end of file +CUDA_VISIBLE_DEVICES='3' python3 burgers_Hydra.py +args=sinsin_eps1e-2_du025.yaml diff --git a/pdebench/data_gen/data_gen_NLE/BurgersEq/run_trainset.sh b/pdebench/data_gen/data_gen_NLE/BurgersEq/run_trainset.sh index 5899961..25994e9 100644 --- a/pdebench/data_gen/data_gen_NLE/BurgersEq/run_trainset.sh +++ b/pdebench/data_gen/data_gen_NLE/BurgersEq/run_trainset.sh @@ -1,3 +1,4 @@ +#!/bin/sh CUDA_VISIBLE_DEVICES='0,2' python3 burgers_multi_solution_Hydra.py +multi=1e0.yaml CUDA_VISIBLE_DEVICES='0,2' python3 burgers_multi_solution_Hydra.py +multi=1e-1.yaml CUDA_VISIBLE_DEVICES='0,2' python3 burgers_multi_solution_Hydra.py +multi=1e-2.yaml @@ -9,4 +10,4 @@ CUDA_VISIBLE_DEVICES='0,2' python3 burgers_multi_solution_Hydra.py +multi=2e-3.y CUDA_VISIBLE_DEVICES='0,2' python3 burgers_multi_solution_Hydra.py +multi=4e0.yaml CUDA_VISIBLE_DEVICES='0,2' python3 burgers_multi_solution_Hydra.py +multi=4e-1.yaml CUDA_VISIBLE_DEVICES='0,2' python3 burgers_multi_solution_Hydra.py +multi=4e-2.yaml -CUDA_VISIBLE_DEVICES='0,2' python3 burgers_multi_solution_Hydra.py +multi=4e-3.yaml \ No newline at end of file +CUDA_VISIBLE_DEVICES='0,2' python3 burgers_multi_solution_Hydra.py +multi=4e-3.yaml diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/CFD_Hydra.py b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/CFD_Hydra.py index 3ac7d98..ad2366b 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/CFD_Hydra.py +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/CFD_Hydra.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ @@ -145,57 +144,58 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ +from __future__ import annotations -import time import sys -from math import ceil +import time from functools import partial +from math import ceil -# Hydra -from omegaconf import DictConfig, OmegaConf import hydra - -from jax import jit import jax.numpy as jnp -from jax import device_put, lax +from jax import device_put, jit, lax + +# Hydra +from omegaconf import DictConfig # if double precision -#from jax.config import config -#config.update("jax_enable_x64", True) +# from jax.config import config +# config.update("jax_enable_x64", True) -sys.path.append('..') -from utils import init_HD, Courant_HD, Courant_vis_HD, save_data_HD, bc_HD, limiting_HD, bc_HD_vis +sys.path.append("..") +from utils import Courant_HD, Courant_vis_HD, bc_HD, init_HD, limiting_HD, save_data_HD def _pass(carry): return carry + # Init arguments with Hydra @hydra.main(config_path="config", config_name="config") def main(cfg: DictConfig) -> None: # physical constants gamma = cfg.args.gamma # 3D non-relativistic gas - gammi1 = gamma - 1. - gamminv1 = 1. / gammi1 + gammi1 = gamma - 1.0 + gamminv1 = 1.0 / gammi1 gamgamm1inv = gamma * gamminv1 - gammi1 = gamma - 1. - gampl1 = gamma + 1. - gammi3 = gamma - 3. - gampl3 = gamma + 3. + gammi1 = gamma - 1.0 + gampl1 = gamma + 1.0 + gammi3 = gamma - 3.0 + gampl3 = gamma + 3.0 - visc = cfg.args.zeta + cfg.args.eta / 3. + visc = cfg.args.zeta + cfg.args.eta / 3.0 - BCs = ['trans', 'periodic', 'KHI'] # reflect + BCs = ["trans", "periodic", "KHI"] # reflect assert cfg.args.bc in BCs, "bc should be in 'trans, reflect, periodic'" dx = (cfg.args.xR - cfg.args.xL) / cfg.args.nx - dx_inv = 1. / dx + dx_inv = 1.0 / dx # dy = (cfg.args.yR - cfg.args.yL) / cfg.args.ny - dy_inv = 1. / dy + dy_inv = 1.0 / dy # dz = (cfg.args.zR - cfg.args.zL) / cfg.args.nz - dz_inv = 1. / dz + dz_inv = 1.0 / dz # cell edge coordinate xe = jnp.linspace(cfg.args.xL, cfg.args.xR, cfg.args.nx + 1) @@ -216,56 +216,77 @@ def evolve(Q): steps = 0 i_save = 0 tm_ini = time.time() - dt = 0. + dt = 0.0 while t < cfg.args.fin_time: if t >= tsave: - print('save data at t = {0:.3f}'.format(t)) - save_data_HD(Q[:,2:-2,2:-2,2:-2], xc, yc, zc, i_save, cfg.args.save) + print(f"save data at t = {t:.3f}") + save_data_HD(Q[:, 2:-2, 2:-2, 2:-2], xc, yc, zc, i_save, cfg.args.save) tsave += cfg.args.dt_save i_save += 1 - if steps%cfg.args.show_steps==0 and cfg.args.if_show: - print('now {0:d}-steps, t = {1:.3f}, dt = {2:.3f}'.format(steps, t, dt)) + if steps % cfg.args.show_steps == 0 and cfg.args.if_show: + print(f"now {steps:d}-steps, t = {t:.3f}, dt = {dt:.3f}") carry = (Q, t, dt, steps, tsave) - Q, t, dt, steps, tsave = lax.fori_loop(0, cfg.args.show_steps, simulation_fn, carry) + Q, t, dt, steps, tsave = lax.fori_loop( + 0, cfg.args.show_steps, simulation_fn, carry + ) tm_fin = time.time() - print('total elapsed time is {} sec'.format(tm_fin - tm_ini)) - save_data_HD(Q[:,2:-2,2:-2,2:-2], xc, yc, zc, - i_save, cfg.args.save, cfg.args.dt_save, if_final=True) + print(f"total elapsed time is {tm_fin - tm_ini} sec") + save_data_HD( + Q[:, 2:-2, 2:-2, 2:-2], + xc, + yc, + zc, + i_save, + cfg.args.save, + cfg.args.dt_save, + if_final=True, + ) return t @jit def simulation_fn(i, carry): Q, t, dt, steps, tsave = carry - dt = Courant_HD(Q[:,2:-2,2:-2,2:-2], dx, dy, dz, cfg.args.gamma) * cfg.args.CFL + dt = ( + Courant_HD(Q[:, 2:-2, 2:-2, 2:-2], dx, dy, dz, cfg.args.gamma) + * cfg.args.CFL + ) dt = jnp.min(jnp.array([dt, cfg.args.fin_time - t, tsave - t])) def _update(carry): Q, dt = carry # preditor step for calculating t+dt/2-th time step - Q_tmp = bc_HD(Q, mode=cfg.args.bc) # index 2 for _U is equivalent with index 0 for u + Q_tmp = bc_HD( + Q, mode=cfg.args.bc + ) # index 2 for _U is equivalent with index 0 for u Q_tmp = update(Q, Q_tmp, dt * 0.5) # update using flux at t+dt/2-th time step - Q_tmp = bc_HD(Q_tmp, mode=cfg.args.bc) # index 2 for _U is equivalent with index 0 for u + Q_tmp = bc_HD( + Q_tmp, mode=cfg.args.bc + ) # index 2 for _U is equivalent with index 0 for u Q = update(Q, Q_tmp, dt) # update via viscosity - #d_min = jnp.min(Q[0]) - #dt_vis = Courant_vis_HD(dx, dy, dz, eta/d_min, zeta/d_min) * cfg.args.CFL # for realistic viscosity - dt_vis = Courant_vis_HD(dx, dy, dz, cfg.args.eta, cfg.args.zeta) * cfg.args.CFL + # d_min = jnp.min(Q[0]) + # dt_vis = Courant_vis_HD(dx, dy, dz, eta/d_min, zeta/d_min) * cfg.args.CFL # for realistic viscosity + dt_vis = ( + Courant_vis_HD(dx, dy, dz, cfg.args.eta, cfg.args.zeta) * cfg.args.CFL + ) dt_vis = jnp.min(jnp.array([dt_vis, dt])) - t_vis = 0. + t_vis = 0.0 carry = Q, dt, dt_vis, t_vis - Q, dt, dt_vis, t_vis = lax.while_loop(lambda x: x[1] - x[3] > 1.e-8, update_vis, carry) + Q, dt, dt_vis, t_vis = lax.while_loop( + lambda x: x[1] - x[3] > 1.0e-8, update_vis, carry + ) return Q, dt carry = Q, dt - Q, dt = lax.cond(dt > 1.e-8, _update, _pass, carry) + Q, dt = lax.cond(dt > 1.0e-8, _update, _pass, carry) t += dt steps += 1 @@ -275,10 +296,10 @@ def _update(carry): def update(Q, Q_tmp, dt): # calculate conservative variables D0 = Q[0] - Mx = Q[1]*Q[0] - My = Q[2]*Q[0] - Mz = Q[3]*Q[0] - E0 = Q[4] * gamminv1 + 0.5*(Mx*Q[1] + My*Q[2] + Mz*Q[3]) + Mx = Q[1] * Q[0] + My = Q[2] * Q[0] + Mz = Q[3] * Q[0] + E0 = Q[4] * gamminv1 + 0.5 * (Mx * Q[1] + My * Q[2] + Mz * Q[3]) D0 = D0[2:-2, 2:-2, 2:-2] Mx = Mx[2:-2, 2:-2, 2:-2] @@ -292,34 +313,46 @@ def update(Q, Q_tmp, dt): fz = flux_z(Q_tmp) # update conservative variables - dtdx, dtdy, dtdz = dt*dx_inv, dt*dy_inv, dt*dz_inv - D0 -= dtdx * (fx[0, 1:, 2:-2, 2:-2] - fx[0, :-1, 2:-2, 2:-2])\ - + dtdy * (fy[0, 2:-2, 1:, 2:-2] - fy[0, 2:-2, :-1, 2:-2])\ + dtdx, dtdy, dtdz = dt * dx_inv, dt * dy_inv, dt * dz_inv + D0 -= ( + dtdx * (fx[0, 1:, 2:-2, 2:-2] - fx[0, :-1, 2:-2, 2:-2]) + + dtdy * (fy[0, 2:-2, 1:, 2:-2] - fy[0, 2:-2, :-1, 2:-2]) + dtdz * (fz[0, 2:-2, 2:-2, 1:] - fz[0, 2:-2, 2:-2, :-1]) + ) - Mx -= dtdx * (fx[1, 1:, 2:-2, 2:-2] - fx[1, :-1, 2:-2, 2:-2])\ - + dtdy * (fy[1, 2:-2, 1:, 2:-2] - fy[1, 2:-2, :-1, 2:-2])\ + Mx -= ( + dtdx * (fx[1, 1:, 2:-2, 2:-2] - fx[1, :-1, 2:-2, 2:-2]) + + dtdy * (fy[1, 2:-2, 1:, 2:-2] - fy[1, 2:-2, :-1, 2:-2]) + dtdz * (fz[1, 2:-2, 2:-2, 1:] - fz[1, 2:-2, 2:-2, :-1]) + ) - My -= dtdx * (fx[2, 1:, 2:-2, 2:-2] - fx[2, :-1, 2:-2, 2:-2])\ - + dtdy * (fy[2, 2:-2, 1:, 2:-2] - fy[2, 2:-2, :-1, 2:-2])\ + My -= ( + dtdx * (fx[2, 1:, 2:-2, 2:-2] - fx[2, :-1, 2:-2, 2:-2]) + + dtdy * (fy[2, 2:-2, 1:, 2:-2] - fy[2, 2:-2, :-1, 2:-2]) + dtdz * (fz[2, 2:-2, 2:-2, 1:] - fz[2, 2:-2, 2:-2, :-1]) + ) - Mz -= dtdx * (fx[3, 1:, 2:-2, 2:-2] - fx[3, :-1, 2:-2, 2:-2])\ - + dtdy * (fy[3, 2:-2, 1:, 2:-2] - fy[3, 2:-2, :-1, 2:-2])\ + Mz -= ( + dtdx * (fx[3, 1:, 2:-2, 2:-2] - fx[3, :-1, 2:-2, 2:-2]) + + dtdy * (fy[3, 2:-2, 1:, 2:-2] - fy[3, 2:-2, :-1, 2:-2]) + dtdz * (fz[3, 2:-2, 2:-2, 1:] - fz[3, 2:-2, 2:-2, :-1]) + ) - E0 -= dtdx * (fx[4, 1:, 2:-2, 2:-2] - fx[4, :-1, 2:-2, 2:-2])\ - + dtdy * (fy[4, 2:-2, 1:, 2:-2] - fy[4, 2:-2, :-1, 2:-2])\ + E0 -= ( + dtdx * (fx[4, 1:, 2:-2, 2:-2] - fx[4, :-1, 2:-2, 2:-2]) + + dtdy * (fy[4, 2:-2, 1:, 2:-2] - fy[4, 2:-2, :-1, 2:-2]) + dtdz * (fz[4, 2:-2, 2:-2, 1:] - fz[4, 2:-2, 2:-2, :-1]) + ) # reverse primitive variables - Q = Q.at[0, 2:-2, 2:-2, 2:-2].set(D0) # d - Q = Q.at[1, 2:-2, 2:-2, 2:-2].set(Mx/D0) # vx - Q = Q.at[2, 2:-2, 2:-2, 2:-2].set(My/D0) # vy - Q = Q.at[3, 2:-2, 2:-2, 2:-2].set(Mz/D0) # vz - Q = Q.at[4, 2:-2, 2:-2, 2:-2].set(gammi1 * (E0 - 0.5*(Mx**2 + My**2 + Mz**2)/D0)) # p - Q = Q.at[4].set(jnp.where(Q[4] > 1.e-8, Q[4], cfg.args.p_floor)) + Q = Q.at[0, 2:-2, 2:-2, 2:-2].set(D0) # d + Q = Q.at[1, 2:-2, 2:-2, 2:-2].set(Mx / D0) # vx + Q = Q.at[2, 2:-2, 2:-2, 2:-2].set(My / D0) # vy + Q = Q.at[3, 2:-2, 2:-2, 2:-2].set(Mz / D0) # vz + Q = Q.at[4, 2:-2, 2:-2, 2:-2].set( + gammi1 * (E0 - 0.5 * (Mx**2 + My**2 + Mz**2) / D0) + ) # p + Q = Q.at[4].set(jnp.where(Q[4] > 1.0e-8, Q[4], cfg.args.p_floor)) return Q @@ -339,17 +372,20 @@ def _update_vis_x(carry): # here the viscosity is eta*D0, so that dv/dt = eta*d^2v/dx^2 (not realistic viscosity but fast to calculate) Dm = 0.5 * (D0[2:-1, 2:-2, 2:-2] + D0[1:-2, 2:-2, 2:-2]) - fMx = (eta + visc) * Dm * dx_inv * (\ - Q[1, 2:-1, 2:-2, 2:-2] - Q[1, 1:-2, 2:-2, 2:-2]) - fMy = eta * Dm * dx_inv * (\ - Q[2, 2:-1, 2:-2, 2:-2] - Q[2, 1:-2, 2:-2, 2:-2]) - fMz = eta * Dm * dx_inv * (\ - Q[3, 2:-1, 2:-2, 2:-2] - Q[3, 1:-2, 2:-2, 2:-2]) - fE = 0.5 * (eta + visc) * Dm * dx_inv * (\ - Q[1, 2:-1, 2:-2, 2:-2] ** 2 - Q[1, 1:-2, 2:-2, 2:-2] ** 2)\ - + 0.5 * eta * Dm * dx_inv * (\ - (Q[2, 2:-1, 2:-2, 2:-2] ** 2 - Q[2, 1:-2, 2:-2, 2:-2] ** 2)\ - + (Q[3, 2:-1, 2:-2, 2:-2] ** 2 - Q[3, 1:-2, 2:-2, 2:-2] ** 2)) + fMx = ( + (eta + visc) + * Dm + * dx_inv + * (Q[1, 2:-1, 2:-2, 2:-2] - Q[1, 1:-2, 2:-2, 2:-2]) + ) + fMy = eta * Dm * dx_inv * (Q[2, 2:-1, 2:-2, 2:-2] - Q[2, 1:-2, 2:-2, 2:-2]) + fMz = eta * Dm * dx_inv * (Q[3, 2:-1, 2:-2, 2:-2] - Q[3, 1:-2, 2:-2, 2:-2]) + fE = 0.5 * (eta + visc) * Dm * dx_inv * ( + Q[1, 2:-1, 2:-2, 2:-2] ** 2 - Q[1, 1:-2, 2:-2, 2:-2] ** 2 + ) + 0.5 * eta * Dm * dx_inv * ( + (Q[2, 2:-1, 2:-2, 2:-2] ** 2 - Q[2, 1:-2, 2:-2, 2:-2] ** 2) + + (Q[3, 2:-1, 2:-2, 2:-2] ** 2 - Q[3, 1:-2, 2:-2, 2:-2] ** 2) + ) D0 = D0[2:-2, 2:-2, 2:-2] Mx = Mx[2:-2, 2:-2, 2:-2] @@ -366,7 +402,9 @@ def _update_vis_x(carry): Q = Q.at[1, 2:-2, 2:-2, 2:-2].set(Mx / D0) # vx Q = Q.at[2, 2:-2, 2:-2, 2:-2].set(My / D0) # vy Q = Q.at[3, 2:-2, 2:-2, 2:-2].set(Mz / D0) # vz - Q = Q.at[4, 2:-2, 2:-2, 2:-2].set(gammi1 * (E0 - 0.5 * (Mx ** 2 + My ** 2 + Mz ** 2) / D0)) # p + Q = Q.at[4, 2:-2, 2:-2, 2:-2].set( + gammi1 * (E0 - 0.5 * (Mx**2 + My**2 + Mz**2) / D0) + ) # p return Q, dt @@ -384,17 +422,20 @@ def _update_vis_y(carry): # here the viscosity is eta*D0, so that dv/dt = eta*d^2v/dx^2 (not realistic viscosity but fast to calculate) Dm = 0.5 * (D0[2:-2, 2:-1, 2:-2] + D0[2:-2, 1:-2, 2:-2]) - fMx = eta * Dm * dy_inv * (\ - Q[1, 2:-2, 2:-1, 2:-2] - Q[1, 2:-2, 1:-2, 2:-2]) - fMy = (eta + visc) * Dm * dy_inv * (\ - Q[2, 2:-2, 2:-1, 2:-2] - Q[2, 2:-2, 1:-2, 2:-2]) - fMz = eta * Dm * dy_inv * (\ - Q[3, 2:-2, 2:-1, 2:-2] - Q[3, 2:-2, 1:-2, 2:-2]) - fE = 0.5 * (eta + visc) * Dm * dy_inv * (\ - Q[2, 2:-2, 2:-1, 2:-2] ** 2 - Q[2, 2:-2, 1:-2, 2:-2] ** 2)\ - + 0.5 * eta * Dm * dy_inv * ( \ - (Q[3, 2:-2, 2:-1, 2:-2] ** 2 - Q[3, 2:-2, 1:-2, 2:-2] ** 2) \ - + (Q[1, 2:-2, 2:-1, 2:-2] ** 2 - Q[1, 2:-2, 1:-2, 2:-2] ** 2)) + fMx = eta * Dm * dy_inv * (Q[1, 2:-2, 2:-1, 2:-2] - Q[1, 2:-2, 1:-2, 2:-2]) + fMy = ( + (eta + visc) + * Dm + * dy_inv + * (Q[2, 2:-2, 2:-1, 2:-2] - Q[2, 2:-2, 1:-2, 2:-2]) + ) + fMz = eta * Dm * dy_inv * (Q[3, 2:-2, 2:-1, 2:-2] - Q[3, 2:-2, 1:-2, 2:-2]) + fE = 0.5 * (eta + visc) * Dm * dy_inv * ( + Q[2, 2:-2, 2:-1, 2:-2] ** 2 - Q[2, 2:-2, 1:-2, 2:-2] ** 2 + ) + 0.5 * eta * Dm * dy_inv * ( + (Q[3, 2:-2, 2:-1, 2:-2] ** 2 - Q[3, 2:-2, 1:-2, 2:-2] ** 2) + + (Q[1, 2:-2, 2:-1, 2:-2] ** 2 - Q[1, 2:-2, 1:-2, 2:-2] ** 2) + ) D0 = D0[2:-2, 2:-2, 2:-2] Mx = Mx[2:-2, 2:-2, 2:-2] @@ -411,7 +452,9 @@ def _update_vis_y(carry): Q = Q.at[1, 2:-2, 2:-2, 2:-2].set(Mx / D0) # vx Q = Q.at[2, 2:-2, 2:-2, 2:-2].set(My / D0) # vy Q = Q.at[3, 2:-2, 2:-2, 2:-2].set(Mz / D0) # vz - Q = Q.at[4, 2:-2, 2:-2, 2:-2].set(gammi1 * (E0 - 0.5 * (Mx ** 2 + My ** 2 + Mz ** 2) / D0)) # p + Q = Q.at[4, 2:-2, 2:-2, 2:-2].set( + gammi1 * (E0 - 0.5 * (Mx**2 + My**2 + Mz**2) / D0) + ) # p return Q, dt @@ -429,17 +472,20 @@ def _update_vis_z(carry): # here the viscosity is eta*D0, so that dv/dt = eta*d^2v/dx^2 (not realistic viscosity but fast to calculate) Dm = 0.5 * (D0[2:-2, 2:-2, 2:-1] + D0[2:-2, 2:-2, 1:-2]) - fMx = eta * Dm * dz_inv * (\ - Q[1, 2:-2, 2:-2, 2:-1] - Q[1, 2:-2, 2:-2, 1:-2]) - fMy = eta * Dm * dz_inv * (\ - Q[2, 2:-2, 2:-2, 2:-1] - Q[2, 2:-2, 2:-2, 1:-2]) - fMz = (eta + visc) * Dm * dz_inv * (\ - Q[3, 2:-2, 2:-2, 2:-1] - Q[3, 2:-2, 2:-2, 1:-2]) - fE = 0.5 * (eta + visc) * Dm * dz_inv * (\ - Q[3, 2:-2, 2:-2, 2:-1] ** 2 - Q[3, 2:-2, 2:-2, 1:-2] ** 2)\ - + 0.5 * eta * Dm * dz_inv * ( \ - (Q[1, 2:-2, 2:-2, 2:-1] ** 2 - Q[1, 2:-2, 2:-2, 1:-2] ** 2) \ - + (Q[2, 2:-2, 2:-2, 2:-1] ** 2 - Q[2, 2:-2, 2:-2, 1:-2] ** 2)) + fMx = eta * Dm * dz_inv * (Q[1, 2:-2, 2:-2, 2:-1] - Q[1, 2:-2, 2:-2, 1:-2]) + fMy = eta * Dm * dz_inv * (Q[2, 2:-2, 2:-2, 2:-1] - Q[2, 2:-2, 2:-2, 1:-2]) + fMz = ( + (eta + visc) + * Dm + * dz_inv + * (Q[3, 2:-2, 2:-2, 2:-1] - Q[3, 2:-2, 2:-2, 1:-2]) + ) + fE = 0.5 * (eta + visc) * Dm * dz_inv * ( + Q[3, 2:-2, 2:-2, 2:-1] ** 2 - Q[3, 2:-2, 2:-2, 1:-2] ** 2 + ) + 0.5 * eta * Dm * dz_inv * ( + (Q[1, 2:-2, 2:-2, 2:-1] ** 2 - Q[1, 2:-2, 2:-2, 1:-2] ** 2) + + (Q[2, 2:-2, 2:-2, 2:-1] ** 2 - Q[2, 2:-2, 2:-2, 1:-2] ** 2) + ) D0 = D0[2:-2, 2:-2, 2:-2] Mx = Mx[2:-2, 2:-2, 2:-2] @@ -456,12 +502,16 @@ def _update_vis_z(carry): Q = Q.at[1, 2:-2, 2:-2, 2:-2].set(Mx / D0) # vx Q = Q.at[2, 2:-2, 2:-2, 2:-2].set(My / D0) # vy Q = Q.at[3, 2:-2, 2:-2, 2:-2].set(Mz / D0) # vz - Q = Q.at[4, 2:-2, 2:-2, 2:-2].set(gammi1 * (E0 - 0.5 * (Mx ** 2 + My ** 2 + Mz ** 2) / D0)) # p + Q = Q.at[4, 2:-2, 2:-2, 2:-2].set( + gammi1 * (E0 - 0.5 * (Mx**2 + My**2 + Mz**2) / D0) + ) # p return Q, dt Q, dt, dt_vis, t_vis = carry - Q = bc_HD(Q, mode=cfg.args.bc) # index 2 for _U is equivalent with index 0 for u + Q = bc_HD( + Q, mode=cfg.args.bc + ) # index 2 for _U is equivalent with index 0 for u dt_ev = jnp.min(jnp.array([dt, dt_vis, dt - t_vis])) carry = Q, dt_ev @@ -477,7 +527,7 @@ def _update_vis_z(carry): @jit def flux_x(Q): QL, QR = limiting_HD(Q, if_second_order=cfg.args.if_second_order) - #f_Riemann = HLL(QL, QR, direc=0) + # f_Riemann = HLL(QL, QR, direc=0) f_Riemann = HLLC(QL, QR, direc=0) return f_Riemann @@ -485,15 +535,17 @@ def flux_x(Q): def flux_y(Q): _Q = jnp.transpose(Q, (0, 2, 3, 1)) # (y, z, x) QL, QR = limiting_HD(_Q, if_second_order=cfg.args.if_second_order) - #f_Riemann = jnp.transpose(HLL(QL, QR, direc=1), (0, 3, 1, 2)) # (x,y,z) = (Z,X,Y) - f_Riemann = jnp.transpose(HLLC(QL, QR, direc=1), (0, 3, 1, 2)) # (x,y,z) = (Z,X,Y) + # f_Riemann = jnp.transpose(HLL(QL, QR, direc=1), (0, 3, 1, 2)) # (x,y,z) = (Z,X,Y) + f_Riemann = jnp.transpose( + HLLC(QL, QR, direc=1), (0, 3, 1, 2) + ) # (x,y,z) = (Z,X,Y) return f_Riemann @jit def flux_z(Q): _Q = jnp.transpose(Q, (0, 3, 1, 2)) # (z, x, y) QL, QR = limiting_HD(_Q, if_second_order=cfg.args.if_second_order) - #f_Riemann = jnp.transpose(HLL(QL, QR, direc=2), (0, 2, 3, 1)) + # f_Riemann = jnp.transpose(HLL(QL, QR, direc=2), (0, 2, 3, 1)) f_Riemann = jnp.transpose(HLLC(QL, QR, direc=2), (0, 2, 3, 1)) return f_Riemann @@ -503,21 +555,31 @@ def HLL(QL, QR, direc): iX, iY, iZ = direc + 1, (direc + 1) % 3 + 1, (direc + 2) % 3 + 1 cfL = jnp.sqrt(gamma * QL[4] / QL[0]) cfR = jnp.sqrt(gamma * QR[4] / QR[0]) - Sfl = jnp.minimum(QL[iX, 2:-1], QR[iX, 1:-2]) - jnp.maximum(cfL[2:-1], cfR[1:-2]) # left-going wave - Sfr = jnp.maximum(QL[iX, 2:-1], QR[iX, 1:-2]) + jnp.maximum(cfL[2:-1], cfR[1:-2]) # right-going wave - dcfi = 1. / (Sfr - Sfl + 1.e-8) + Sfl = jnp.minimum(QL[iX, 2:-1], QR[iX, 1:-2]) - jnp.maximum( + cfL[2:-1], cfR[1:-2] + ) # left-going wave + Sfr = jnp.maximum(QL[iX, 2:-1], QR[iX, 1:-2]) + jnp.maximum( + cfL[2:-1], cfR[1:-2] + ) # right-going wave + dcfi = 1.0 / (Sfr - Sfl + 1.0e-8) UL, UR = jnp.zeros_like(QL), jnp.zeros_like(QR) UL = UL.at[0].set(QL[0]) UL = UL.at[iX].set(QL[0] * QL[iX]) UL = UL.at[iY].set(QL[0] * QL[iY]) UL = UL.at[iZ].set(QL[0] * QL[iZ]) - UL = UL.at[4].set(gamminv1 * QL[4] + 0.5 * (UL[iX] * QL[iX] + UL[iY] * QL[iY] + UL[iZ] * QL[iZ])) + UL = UL.at[4].set( + gamminv1 * QL[4] + + 0.5 * (UL[iX] * QL[iX] + UL[iY] * QL[iY] + UL[iZ] * QL[iZ]) + ) UR = UR.at[0].set(QR[0]) UR = UR.at[iX].set(QR[0] * QR[iX]) UR = UR.at[iY].set(QR[0] * QR[iY]) UR = UR.at[iZ].set(QR[0] * QR[iZ]) - UR = UR.at[4].set(gamminv1 * QR[4] + 0.5 * (UR[iX] * QR[iX] + UR[iY] * QR[iY] + UR[iZ] * QR[iZ])) + UR = UR.at[4].set( + gamminv1 * QR[4] + + 0.5 * (UR[iX] * QR[iX] + UR[iY] * QR[iY] + UR[iZ] * QR[iZ]) + ) fL, fR = jnp.zeros_like(QL), jnp.zeros_like(QR) fL = fL.at[0].set(UL[iX]) @@ -531,38 +593,56 @@ def HLL(QL, QR, direc): fR = fR.at[iZ].set(UR[iX] * QR[iZ]) fR = fR.at[4].set((UR[4] + QR[4]) * QR[iX]) # upwind advection scheme - fHLL = dcfi * (Sfr * fR[:, 1:-2] - Sfl * fL[:, 2:-1] - + Sfl * Sfr * (UL[:, 2:-1] - UR[:, 1:-2])) + fHLL = dcfi * ( + Sfr * fR[:, 1:-2] + - Sfl * fL[:, 2:-1] + + Sfl * Sfr * (UL[:, 2:-1] - UR[:, 1:-2]) + ) # L: left of cell = right-going, R: right of cell: left-going - f_Riemann = jnp.where(Sfl > 0., fR[:, 1:-2], fHLL) - f_Riemann = jnp.where(Sfr < 0., fL[:, 2:-1], f_Riemann) + f_Riemann = jnp.where(Sfl > 0.0, fR[:, 1:-2], fHLL) + f_Riemann = jnp.where(Sfr < 0.0, fL[:, 2:-1], f_Riemann) return f_Riemann @partial(jit, static_argnums=(2,)) def HLLC(QL, QR, direc): - """ full-Godunov method -- exact shock solution""" + """full-Godunov method -- exact shock solution""" iX, iY, iZ = direc + 1, (direc + 1) % 3 + 1, (direc + 2) % 3 + 1 cfL = jnp.sqrt(gamma * QL[4] / QL[0]) cfR = jnp.sqrt(gamma * QR[4] / QR[0]) - Sfl = jnp.minimum(QL[iX, 2:-1], QR[iX, 1:-2]) - jnp.maximum(cfL[2:-1], cfR[1:-2]) # left-going wave - Sfr = jnp.maximum(QL[iX, 2:-1], QR[iX, 1:-2]) + jnp.maximum(cfL[2:-1], cfR[1:-2]) # right-going wave + Sfl = jnp.minimum(QL[iX, 2:-1], QR[iX, 1:-2]) - jnp.maximum( + cfL[2:-1], cfR[1:-2] + ) # left-going wave + Sfr = jnp.maximum(QL[iX, 2:-1], QR[iX, 1:-2]) + jnp.maximum( + cfL[2:-1], cfR[1:-2] + ) # right-going wave UL, UR = jnp.zeros_like(QL), jnp.zeros_like(QR) UL = UL.at[0].set(QL[0]) UL = UL.at[iX].set(QL[0] * QL[iX]) UL = UL.at[iY].set(QL[0] * QL[iY]) UL = UL.at[iZ].set(QL[0] * QL[iZ]) - UL = UL.at[4].set(gamminv1 * QL[4] + 0.5 * (UL[iX] * QL[iX] + UL[iY] * QL[iY] + UL[iZ] * QL[iZ])) + UL = UL.at[4].set( + gamminv1 * QL[4] + + 0.5 * (UL[iX] * QL[iX] + UL[iY] * QL[iY] + UL[iZ] * QL[iZ]) + ) UR = UR.at[0].set(QR[0]) UR = UR.at[iX].set(QR[0] * QR[iX]) UR = UR.at[iY].set(QR[0] * QR[iY]) UR = UR.at[iZ].set(QR[0] * QR[iZ]) - UR = UR.at[4].set(gamminv1 * QR[4] + 0.5 * (UR[iX] * QR[iX] + UR[iY] * QR[iY] + UR[iZ] * QR[iZ])) - - Va = (Sfr - QL[iX, 2:-1]) * UL[iX, 2:-1] - (Sfl - QR[iX, 1:-2]) * UR[iX, 1:-2]- QL[4, 2:-1] + QR[4, 1:-2] + UR = UR.at[4].set( + gamminv1 * QR[4] + + 0.5 * (UR[iX] * QR[iX] + UR[iY] * QR[iY] + UR[iZ] * QR[iZ]) + ) + + Va = ( + (Sfr - QL[iX, 2:-1]) * UL[iX, 2:-1] + - (Sfl - QR[iX, 1:-2]) * UR[iX, 1:-2] + - QL[4, 2:-1] + + QR[4, 1:-2] + ) Va /= (Sfr - QL[iX, 2:-1]) * QL[0, 2:-1] - (Sfl - QR[iX, 1:-2]) * QR[0, 1:-2] Pa = QR[4, 1:-2] + QR[0, 1:-2] * (Sfl - QR[iX, 1:-2]) * (Va - QR[iX, 1:-2]) @@ -587,26 +667,55 @@ def HLLC(QL, QR, direc): far = far.at[iX].set(Dar * Va**2 + Pa) far = far.at[iY].set(Dar * Va * QL[iY, 2:-1]) far = far.at[iZ].set(Dar * Va * QL[iZ, 2:-1]) - far = far.at[4].set( (gamgamm1inv * Pa + 0.5 * Dar * (Va**2 + QL[iY, 2:-1]**2 + QL[iZ, 2:-1]**2)) * Va) + far = far.at[4].set( + ( + gamgamm1inv * Pa + + 0.5 * Dar * (Va**2 + QL[iY, 2:-1] ** 2 + QL[iZ, 2:-1] ** 2) + ) + * Va + ) fal = fal.at[0].set(Dal * Va) fal = fal.at[iX].set(Dal * Va**2 + Pa) fal = fal.at[iY].set(Dal * Va * QR[iY, 1:-2]) fal = fal.at[iZ].set(Dal * Va * QR[iZ, 1:-2]) - fal = fal.at[4].set( (gamgamm1inv * Pa + 0.5 * Dal * (Va**2 + QR[iY, 1:-2]**2 + QR[iZ, 1:-2]**2)) * Va) - - f_Riemann = jnp.where(Sfl > 0., fR[:, 1:-2], fL[:, 2:-1]) # Sf2 > 0 : supersonic - f_Riemann = jnp.where(Sfl*Va < 0., fal, f_Riemann) # SL < 0 and Va > 0 : sub-sonic - f_Riemann = jnp.where(Sfr*Va < 0., far, f_Riemann) # Va < 0 and SR > 0 : sub-sonic - #f_Riemann = jnp.where(Sfr < 0., fL[:, 2:-1], f_Riemann) # SR < 0 : supersonic + fal = fal.at[4].set( + ( + gamgamm1inv * Pa + + 0.5 * Dal * (Va**2 + QR[iY, 1:-2] ** 2 + QR[iZ, 1:-2] ** 2) + ) + * Va + ) + + f_Riemann = jnp.where( + Sfl > 0.0, fR[:, 1:-2], fL[:, 2:-1] + ) # Sf2 > 0 : supersonic + f_Riemann = jnp.where( + Sfl * Va < 0.0, fal, f_Riemann + ) # SL < 0 and Va > 0 : sub-sonic + f_Riemann = jnp.where( + Sfr * Va < 0.0, far, f_Riemann + ) # Va < 0 and SR > 0 : sub-sonic + # f_Riemann = jnp.where(Sfr < 0., fL[:, 2:-1], f_Riemann) # SR < 0 : supersonic return f_Riemann Q = jnp.zeros([5, cfg.args.nx + 4, cfg.args.ny + 4, cfg.args.nz + 4]) - Q = init_HD(Q, xc, yc, zc, mode=cfg.args.init_mode, direc='x', init_key=cfg.args.init_key, - M0=cfg.args.M0, dk=cfg.args.dk, gamma=cfg.args.gamma) + Q = init_HD( + Q, + xc, + yc, + zc, + mode=cfg.args.init_mode, + direc="x", + init_key=cfg.args.init_key, + M0=cfg.args.M0, + dk=cfg.args.dk, + gamma=cfg.args.gamma, + ) Q = device_put(Q) # putting variables in GPU (not necessary??) t = evolve(Q) - print('final time is: {0:.3f}'.format(t)) + print(f"final time is: {t:.3f}") + -if __name__=='__main__': +if __name__ == "__main__": main() diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/CFD_multi_Hydra.py b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/CFD_multi_Hydra.py index a734e54..23cd971 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/CFD_multi_Hydra.py +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/CFD_multi_Hydra.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ @@ -145,61 +144,74 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ +from __future__ import annotations -import time +import os import random -import sys, os +import sys +import time from functools import partial from math import ceil, exp, log -# Hydra -from omegaconf import DictConfig, OmegaConf import hydra - import jax -from jax import jit, vmap import jax.numpy as jnp -from jax import device_put, lax - -os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' -os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = '.9' +from jax import device_put, jit, lax -sys.path.append('..') -from utils import Courant_HD, Courant_vis_HD, bc_HD, limiting_HD, bc_HD_vis -from utils import init_multi_HD, init_multi_HD_shock, init_multi_HD_KH -from utils import init_multi_HD_2DTurb, init_multi_HD_2DRand, init_multi_HD_3DTurb, init_multi_HD_3DRand +# Hydra +from omegaconf import DictConfig + +os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" +os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = ".9" + +sys.path.append("..") +from utils import ( + Courant_HD, + Courant_vis_HD, + bc_HD, + init_multi_HD, + init_multi_HD_2DRand, + init_multi_HD_2DTurb, + init_multi_HD_3DRand, + init_multi_HD_3DTurb, + init_multi_HD_KH, + init_multi_HD_shock, + limiting_HD, +) # if double precision -#from jax.config import config -#config.update("jax_enable_x64", True) +# from jax.config import config +# config.update("jax_enable_x64", True) + def _pass(carry): return carry + # Init arguments with Hydra @hydra.main(config_path="config", config_name="config") def main(cfg: DictConfig) -> None: # physical constants gamma = cfg.args.gamma # 3D non-relativistic gas - gammi1 = gamma - 1. - gamminv1 = 1. / gammi1 + gammi1 = gamma - 1.0 + gamminv1 = 1.0 / gammi1 gamgamm1inv = gamma * gamminv1 - gammi1 = gamma - 1. - gampl1 = gamma + 1. - gammi3 = gamma - 3. - gampl3 = gamma + 3. + gammi1 = gamma - 1.0 + gampl1 = gamma + 1.0 + gammi3 = gamma - 3.0 + gampl3 = gamma + 3.0 - BCs = ['trans', 'periodic', 'KHI'] # reflect + BCs = ["trans", "periodic", "KHI"] # reflect assert cfg.args.bc in BCs, "bc should be in 'trans, reflect, periodic'" dx = (cfg.args.xR - cfg.args.xL) / cfg.args.nx - dx_inv = 1. / dx + dx_inv = 1.0 / dx # dy = (cfg.args.yR - cfg.args.yL) / cfg.args.ny - dy_inv = 1. / dy + dy_inv = 1.0 / dy # dz = (cfg.args.zR - cfg.args.zL) / cfg.args.nz - dz_inv = 1. / dz + dz_inv = 1.0 / dz # cell edge coordinate xe = jnp.linspace(cfg.args.xL, cfg.args.xR, cfg.args.nx + 1) @@ -221,20 +233,24 @@ def main(cfg: DictConfig) -> None: # set viscosity if cfg.args.if_rand_param: - zeta = exp(random.uniform(log(0.001), log(10))) # uniform number between 0.01 to 100 - eta = exp(random.uniform(log(0.001), log(10))) # uniform number between 0.01 to 100 + zeta = exp( + random.uniform(log(0.001), log(10)) + ) # uniform number between 0.01 to 100 + eta = exp( + random.uniform(log(0.001), log(10)) + ) # uniform number between 0.01 to 100 else: zeta = cfg.args.zeta eta = cfg.args.eta - print('zeta: {0:>5f}, eta: {1:>5f}'.format(zeta, eta)) - visc = zeta + eta / 3. + print(f"zeta: {zeta:>5f}, eta: {eta:>5f}") + visc = zeta + eta / 3.0 def evolve(Q): t = ini_time tsave = t steps = 0 i_save = 0 - dt = 0. + dt = 0.0 tm_ini = time.time() @@ -244,11 +260,11 @@ def evolve(Q): VVz = jnp.zeros([it_tot, cfg.args.nx, cfg.args.ny, cfg.args.nz]) PPP = jnp.zeros([it_tot, cfg.args.nx, cfg.args.ny, cfg.args.nz]) # initial time-step - DDD = DDD.at[0].set(Q[0,2:-2,2:-2,2:-2]) - VVx = VVx.at[0].set(Q[1,2:-2,2:-2,2:-2]) - VVy = VVy.at[0].set(Q[2,2:-2,2:-2,2:-2]) - VVz = VVz.at[0].set(Q[3,2:-2,2:-2,2:-2]) - PPP = PPP.at[0].set(Q[4,2:-2,2:-2,2:-2]) + DDD = DDD.at[0].set(Q[0, 2:-2, 2:-2, 2:-2]) + VVx = VVx.at[0].set(Q[1, 2:-2, 2:-2, 2:-2]) + VVy = VVy.at[0].set(Q[2, 2:-2, 2:-2, 2:-2]) + VVz = VVz.at[0].set(Q[3, 2:-2, 2:-2, 2:-2]) + PPP = PPP.at[0].set(Q[4, 2:-2, 2:-2, 2:-2]) cond_fun = lambda x: x[0] < fin_time @@ -256,11 +272,11 @@ def _body_fun(carry): def _save(_carry): Q, tsave, i_save, DDD, VVx, VVy, VVz, PPP = _carry - DDD = DDD.at[i_save].set(Q[0,2:-2,2:-2,2:-2]) - VVx = VVx.at[i_save].set(Q[1,2:-2,2:-2,2:-2]) - VVy = VVy.at[i_save].set(Q[2,2:-2,2:-2,2:-2]) - VVz = VVz.at[i_save].set(Q[3,2:-2,2:-2,2:-2]) - PPP = PPP.at[i_save].set(Q[4,2:-2,2:-2,2:-2]) + DDD = DDD.at[i_save].set(Q[0, 2:-2, 2:-2, 2:-2]) + VVx = VVx.at[i_save].set(Q[1, 2:-2, 2:-2, 2:-2]) + VVy = VVy.at[i_save].set(Q[2, 2:-2, 2:-2, 2:-2]) + VVz = VVz.at[i_save].set(Q[3, 2:-2, 2:-2, 2:-2]) + PPP = PPP.at[i_save].set(Q[4, 2:-2, 2:-2, 2:-2]) tsave += dt_save i_save += 1 @@ -270,7 +286,9 @@ def _save(_carry): # if save data carry = (Q, tsave, i_save, DDD, VVx, VVy, VVz, PPP) - Q, tsave, i_save, DDD, VVx, VVy, VVz, PPP = lax.cond(t >= tsave, _save, _pass, carry) + Q, tsave, i_save, DDD, VVx, VVy, VVz, PPP = lax.cond( + t >= tsave, _save, _pass, carry + ) carry = (Q, t, dt, steps, tsave) Q, t, dt, steps, tsave = lax.fori_loop(0, show_steps, simulation_fn, carry) @@ -278,10 +296,12 @@ def _save(_carry): return (t, tsave, steps, i_save, dt, Q, DDD, VVx, VVy, VVz, PPP) carry = t, tsave, steps, i_save, dt, Q, DDD, VVx, VVy, VVz, PPP - t, tsave, steps, i_save, dt, Q, DDD, VVx, VVy, VVz, PPP = lax.while_loop(cond_fun, _body_fun, carry) + t, tsave, steps, i_save, dt, Q, DDD, VVx, VVy, VVz, PPP = lax.while_loop( + cond_fun, _body_fun, carry + ) tm_fin = time.time() - print('total elapsed time is {} sec'.format(tm_fin - tm_ini)) + print(f"total elapsed time is {tm_fin - tm_ini} sec") DDD = DDD.at[-1].set(Q[0, 2:-2, 2:-2, 2:-2]) VVx = VVx.at[-1].set(Q[1, 2:-2, 2:-2, 2:-2]) VVy = VVy.at[-1].set(Q[2, 2:-2, 2:-2, 2:-2]) @@ -292,33 +312,42 @@ def _save(_carry): @jit def simulation_fn(i, carry): Q, t, dt, steps, tsave = carry - dt = Courant_HD(Q[:,2:-2,2:-2,2:-2], dx, dy, dz, cfg.args.gamma) * cfg.args.CFL + dt = ( + Courant_HD(Q[:, 2:-2, 2:-2, 2:-2], dx, dy, dz, cfg.args.gamma) + * cfg.args.CFL + ) dt = jnp.min(jnp.array([dt, cfg.args.fin_time - t, tsave - t])) def _update(carry): Q, dt = carry # preditor step for calculating t+dt/2-th time step - Q_tmp = bc_HD(Q, mode=cfg.args.bc) # index 2 for _U is equivalent with index 0 for u + Q_tmp = bc_HD( + Q, mode=cfg.args.bc + ) # index 2 for _U is equivalent with index 0 for u Q_tmp = update(Q, Q_tmp, dt * 0.5) # update using flux at t+dt/2-th time step - Q_tmp = bc_HD(Q_tmp, mode=cfg.args.bc) # index 2 for _U is equivalent with index 0 for u + Q_tmp = bc_HD( + Q_tmp, mode=cfg.args.bc + ) # index 2 for _U is equivalent with index 0 for u Q = update(Q, Q_tmp, dt) # update via viscosity - #d_min = jnp.min(Q[0]) - #dt_vis = Courant_vis_HD(dx, dy, dz, eta/d_min, zeta/d_min) * cfg.args.CFL # for realistic viscosity + # d_min = jnp.min(Q[0]) + # dt_vis = Courant_vis_HD(dx, dy, dz, eta/d_min, zeta/d_min) * cfg.args.CFL # for realistic viscosity dt_vis = Courant_vis_HD(dx, dy, dz, eta, zeta) * cfg.args.CFL dt_vis = jnp.min(jnp.array([dt_vis, dt])) - t_vis = 0. + t_vis = 0.0 carry = Q, dt, dt_vis, t_vis - Q, dt, dt_vis, t_vis = lax.while_loop(lambda x: x[1] - x[3] > 1.e-8, update_vis, carry) + Q, dt, dt_vis, t_vis = lax.while_loop( + lambda x: x[1] - x[3] > 1.0e-8, update_vis, carry + ) return Q, dt carry = Q, dt - Q, dt = lax.cond(dt > 1.e-8, _update, _pass, carry) + Q, dt = lax.cond(dt > 1.0e-8, _update, _pass, carry) t += dt steps += 1 @@ -328,10 +357,10 @@ def _update(carry): def update(Q, Q_tmp, dt): # calculate conservative variables D0 = Q[0] - Mx = Q[1]*Q[0] - My = Q[2]*Q[0] - Mz = Q[3]*Q[0] - E0 = Q[4] * gamminv1 + 0.5*(Mx*Q[1] + My*Q[2] + Mz*Q[3]) + Mx = Q[1] * Q[0] + My = Q[2] * Q[0] + Mz = Q[3] * Q[0] + E0 = Q[4] * gamminv1 + 0.5 * (Mx * Q[1] + My * Q[2] + Mz * Q[3]) D0 = D0[2:-2, 2:-2, 2:-2] Mx = Mx[2:-2, 2:-2, 2:-2] @@ -345,34 +374,46 @@ def update(Q, Q_tmp, dt): fz = flux_z(Q_tmp) # update conservative variables - dtdx, dtdy, dtdz = dt*dx_inv, dt*dy_inv, dt*dz_inv - D0 -= dtdx * (fx[0, 1:, 2:-2, 2:-2] - fx[0, :-1, 2:-2, 2:-2])\ - + dtdy * (fy[0, 2:-2, 1:, 2:-2] - fy[0, 2:-2, :-1, 2:-2])\ + dtdx, dtdy, dtdz = dt * dx_inv, dt * dy_inv, dt * dz_inv + D0 -= ( + dtdx * (fx[0, 1:, 2:-2, 2:-2] - fx[0, :-1, 2:-2, 2:-2]) + + dtdy * (fy[0, 2:-2, 1:, 2:-2] - fy[0, 2:-2, :-1, 2:-2]) + dtdz * (fz[0, 2:-2, 2:-2, 1:] - fz[0, 2:-2, 2:-2, :-1]) + ) - Mx -= dtdx * (fx[1, 1:, 2:-2, 2:-2] - fx[1, :-1, 2:-2, 2:-2])\ - + dtdy * (fy[1, 2:-2, 1:, 2:-2] - fy[1, 2:-2, :-1, 2:-2])\ + Mx -= ( + dtdx * (fx[1, 1:, 2:-2, 2:-2] - fx[1, :-1, 2:-2, 2:-2]) + + dtdy * (fy[1, 2:-2, 1:, 2:-2] - fy[1, 2:-2, :-1, 2:-2]) + dtdz * (fz[1, 2:-2, 2:-2, 1:] - fz[1, 2:-2, 2:-2, :-1]) + ) - My -= dtdx * (fx[2, 1:, 2:-2, 2:-2] - fx[2, :-1, 2:-2, 2:-2])\ - + dtdy * (fy[2, 2:-2, 1:, 2:-2] - fy[2, 2:-2, :-1, 2:-2])\ + My -= ( + dtdx * (fx[2, 1:, 2:-2, 2:-2] - fx[2, :-1, 2:-2, 2:-2]) + + dtdy * (fy[2, 2:-2, 1:, 2:-2] - fy[2, 2:-2, :-1, 2:-2]) + dtdz * (fz[2, 2:-2, 2:-2, 1:] - fz[2, 2:-2, 2:-2, :-1]) + ) - Mz -= dtdx * (fx[3, 1:, 2:-2, 2:-2] - fx[3, :-1, 2:-2, 2:-2])\ - + dtdy * (fy[3, 2:-2, 1:, 2:-2] - fy[3, 2:-2, :-1, 2:-2])\ + Mz -= ( + dtdx * (fx[3, 1:, 2:-2, 2:-2] - fx[3, :-1, 2:-2, 2:-2]) + + dtdy * (fy[3, 2:-2, 1:, 2:-2] - fy[3, 2:-2, :-1, 2:-2]) + dtdz * (fz[3, 2:-2, 2:-2, 1:] - fz[3, 2:-2, 2:-2, :-1]) + ) - E0 -= dtdx * (fx[4, 1:, 2:-2, 2:-2] - fx[4, :-1, 2:-2, 2:-2])\ - + dtdy * (fy[4, 2:-2, 1:, 2:-2] - fy[4, 2:-2, :-1, 2:-2])\ + E0 -= ( + dtdx * (fx[4, 1:, 2:-2, 2:-2] - fx[4, :-1, 2:-2, 2:-2]) + + dtdy * (fy[4, 2:-2, 1:, 2:-2] - fy[4, 2:-2, :-1, 2:-2]) + dtdz * (fz[4, 2:-2, 2:-2, 1:] - fz[4, 2:-2, 2:-2, :-1]) + ) # reverse primitive variables - Q = Q.at[0, 2:-2, 2:-2, 2:-2].set(D0) # d - Q = Q.at[1, 2:-2, 2:-2, 2:-2].set(Mx/D0) # vx - Q = Q.at[2, 2:-2, 2:-2, 2:-2].set(My/D0) # vy - Q = Q.at[3, 2:-2, 2:-2, 2:-2].set(Mz/D0) # vz - Q = Q.at[4, 2:-2, 2:-2, 2:-2].set(gammi1 * (E0 - 0.5*(Mx**2 + My**2 + Mz**2)/D0)) # p - Q = Q.at[4].set(jnp.where(Q[4] > 1.e-8, Q[4], cfg.args.p_floor)) + Q = Q.at[0, 2:-2, 2:-2, 2:-2].set(D0) # d + Q = Q.at[1, 2:-2, 2:-2, 2:-2].set(Mx / D0) # vx + Q = Q.at[2, 2:-2, 2:-2, 2:-2].set(My / D0) # vy + Q = Q.at[3, 2:-2, 2:-2, 2:-2].set(Mz / D0) # vz + Q = Q.at[4, 2:-2, 2:-2, 2:-2].set( + gammi1 * (E0 - 0.5 * (Mx**2 + My**2 + Mz**2) / D0) + ) # p + Q = Q.at[4].set(jnp.where(Q[4] > 1.0e-8, Q[4], cfg.args.p_floor)) return Q @@ -392,17 +433,20 @@ def _update_vis_x(carry): # here the viscosity is eta*D0, so that dv/dt = eta*d^2v/dx^2 (not realistic viscosity but fast to calculate) Dm = 0.5 * (D0[2:-1, 2:-2, 2:-2] + D0[1:-2, 2:-2, 2:-2]) - fMx = (eta + visc) * Dm * dx_inv * (\ - Q[1, 2:-1, 2:-2, 2:-2] - Q[1, 1:-2, 2:-2, 2:-2]) - fMy = eta * Dm * dx_inv * (\ - Q[2, 2:-1, 2:-2, 2:-2] - Q[2, 1:-2, 2:-2, 2:-2]) - fMz = eta * Dm * dx_inv * (\ - Q[3, 2:-1, 2:-2, 2:-2] - Q[3, 1:-2, 2:-2, 2:-2]) - fE = 0.5 * (eta + visc) * Dm * dx_inv * (\ - Q[1, 2:-1, 2:-2, 2:-2] ** 2 - Q[1, 1:-2, 2:-2, 2:-2] ** 2)\ - + 0.5 * eta * Dm * dx_inv * (\ - (Q[2, 2:-1, 2:-2, 2:-2] ** 2 - Q[2, 1:-2, 2:-2, 2:-2] ** 2)\ - + (Q[3, 2:-1, 2:-2, 2:-2] ** 2 - Q[3, 1:-2, 2:-2, 2:-2] ** 2)) + fMx = ( + (eta + visc) + * Dm + * dx_inv + * (Q[1, 2:-1, 2:-2, 2:-2] - Q[1, 1:-2, 2:-2, 2:-2]) + ) + fMy = eta * Dm * dx_inv * (Q[2, 2:-1, 2:-2, 2:-2] - Q[2, 1:-2, 2:-2, 2:-2]) + fMz = eta * Dm * dx_inv * (Q[3, 2:-1, 2:-2, 2:-2] - Q[3, 1:-2, 2:-2, 2:-2]) + fE = 0.5 * (eta + visc) * Dm * dx_inv * ( + Q[1, 2:-1, 2:-2, 2:-2] ** 2 - Q[1, 1:-2, 2:-2, 2:-2] ** 2 + ) + 0.5 * eta * Dm * dx_inv * ( + (Q[2, 2:-1, 2:-2, 2:-2] ** 2 - Q[2, 1:-2, 2:-2, 2:-2] ** 2) + + (Q[3, 2:-1, 2:-2, 2:-2] ** 2 - Q[3, 1:-2, 2:-2, 2:-2] ** 2) + ) D0 = D0[2:-2, 2:-2, 2:-2] Mx = Mx[2:-2, 2:-2, 2:-2] @@ -419,7 +463,9 @@ def _update_vis_x(carry): Q = Q.at[1, 2:-2, 2:-2, 2:-2].set(Mx / D0) # vx Q = Q.at[2, 2:-2, 2:-2, 2:-2].set(My / D0) # vy Q = Q.at[3, 2:-2, 2:-2, 2:-2].set(Mz / D0) # vz - Q = Q.at[4, 2:-2, 2:-2, 2:-2].set(gammi1 * (E0 - 0.5 * (Mx ** 2 + My ** 2 + Mz ** 2) / D0)) # p + Q = Q.at[4, 2:-2, 2:-2, 2:-2].set( + gammi1 * (E0 - 0.5 * (Mx**2 + My**2 + Mz**2) / D0) + ) # p return Q, dt @@ -437,17 +483,20 @@ def _update_vis_y(carry): # here the viscosity is eta*D0, so that dv/dt = eta*d^2v/dx^2 (not realistic viscosity but fast to calculate) Dm = 0.5 * (D0[2:-2, 2:-1, 2:-2] + D0[2:-2, 1:-2, 2:-2]) - fMx = eta * Dm * dy_inv * (\ - Q[1, 2:-2, 2:-1, 2:-2] - Q[1, 2:-2, 1:-2, 2:-2]) - fMy = (eta + visc) * Dm * dy_inv * (\ - Q[2, 2:-2, 2:-1, 2:-2] - Q[2, 2:-2, 1:-2, 2:-2]) - fMz = eta * Dm * dy_inv * (\ - Q[3, 2:-2, 2:-1, 2:-2] - Q[3, 2:-2, 1:-2, 2:-2]) - fE = 0.5 * (eta + visc) * Dm * dy_inv * (\ - Q[2, 2:-2, 2:-1, 2:-2] ** 2 - Q[2, 2:-2, 1:-2, 2:-2] ** 2)\ - + 0.5 * eta * Dm * dy_inv * ( \ - (Q[3, 2:-2, 2:-1, 2:-2] ** 2 - Q[3, 2:-2, 1:-2, 2:-2] ** 2) \ - + (Q[1, 2:-2, 2:-1, 2:-2] ** 2 - Q[1, 2:-2, 1:-2, 2:-2] ** 2)) + fMx = eta * Dm * dy_inv * (Q[1, 2:-2, 2:-1, 2:-2] - Q[1, 2:-2, 1:-2, 2:-2]) + fMy = ( + (eta + visc) + * Dm + * dy_inv + * (Q[2, 2:-2, 2:-1, 2:-2] - Q[2, 2:-2, 1:-2, 2:-2]) + ) + fMz = eta * Dm * dy_inv * (Q[3, 2:-2, 2:-1, 2:-2] - Q[3, 2:-2, 1:-2, 2:-2]) + fE = 0.5 * (eta + visc) * Dm * dy_inv * ( + Q[2, 2:-2, 2:-1, 2:-2] ** 2 - Q[2, 2:-2, 1:-2, 2:-2] ** 2 + ) + 0.5 * eta * Dm * dy_inv * ( + (Q[3, 2:-2, 2:-1, 2:-2] ** 2 - Q[3, 2:-2, 1:-2, 2:-2] ** 2) + + (Q[1, 2:-2, 2:-1, 2:-2] ** 2 - Q[1, 2:-2, 1:-2, 2:-2] ** 2) + ) D0 = D0[2:-2, 2:-2, 2:-2] Mx = Mx[2:-2, 2:-2, 2:-2] @@ -464,7 +513,9 @@ def _update_vis_y(carry): Q = Q.at[1, 2:-2, 2:-2, 2:-2].set(Mx / D0) # vx Q = Q.at[2, 2:-2, 2:-2, 2:-2].set(My / D0) # vy Q = Q.at[3, 2:-2, 2:-2, 2:-2].set(Mz / D0) # vz - Q = Q.at[4, 2:-2, 2:-2, 2:-2].set(gammi1 * (E0 - 0.5 * (Mx ** 2 + My ** 2 + Mz ** 2) / D0)) # p + Q = Q.at[4, 2:-2, 2:-2, 2:-2].set( + gammi1 * (E0 - 0.5 * (Mx**2 + My**2 + Mz**2) / D0) + ) # p return Q, dt @@ -482,17 +533,20 @@ def _update_vis_z(carry): # here the viscosity is eta*D0, so that dv/dt = eta*d^2v/dx^2 (not realistic viscosity but fast to calculate) Dm = 0.5 * (D0[2:-2, 2:-2, 2:-1] + D0[2:-2, 2:-2, 1:-2]) - fMx = eta * Dm * dz_inv * (\ - Q[1, 2:-2, 2:-2, 2:-1] - Q[1, 2:-2, 2:-2, 1:-2]) - fMy = eta * Dm * dz_inv * (\ - Q[2, 2:-2, 2:-2, 2:-1] - Q[2, 2:-2, 2:-2, 1:-2]) - fMz = (eta + visc) * Dm * dz_inv * (\ - Q[3, 2:-2, 2:-2, 2:-1] - Q[3, 2:-2, 2:-2, 1:-2]) - fE = 0.5 * (eta + visc) * Dm * dz_inv * (\ - Q[3, 2:-2, 2:-2, 2:-1] ** 2 - Q[3, 2:-2, 2:-2, 1:-2] ** 2)\ - + 0.5 * eta * Dm * dz_inv * ( \ - (Q[1, 2:-2, 2:-2, 2:-1] ** 2 - Q[1, 2:-2, 2:-2, 1:-2] ** 2) \ - + (Q[2, 2:-2, 2:-2, 2:-1] ** 2 - Q[2, 2:-2, 2:-2, 1:-2] ** 2)) + fMx = eta * Dm * dz_inv * (Q[1, 2:-2, 2:-2, 2:-1] - Q[1, 2:-2, 2:-2, 1:-2]) + fMy = eta * Dm * dz_inv * (Q[2, 2:-2, 2:-2, 2:-1] - Q[2, 2:-2, 2:-2, 1:-2]) + fMz = ( + (eta + visc) + * Dm + * dz_inv + * (Q[3, 2:-2, 2:-2, 2:-1] - Q[3, 2:-2, 2:-2, 1:-2]) + ) + fE = 0.5 * (eta + visc) * Dm * dz_inv * ( + Q[3, 2:-2, 2:-2, 2:-1] ** 2 - Q[3, 2:-2, 2:-2, 1:-2] ** 2 + ) + 0.5 * eta * Dm * dz_inv * ( + (Q[1, 2:-2, 2:-2, 2:-1] ** 2 - Q[1, 2:-2, 2:-2, 1:-2] ** 2) + + (Q[2, 2:-2, 2:-2, 2:-1] ** 2 - Q[2, 2:-2, 2:-2, 1:-2] ** 2) + ) D0 = D0[2:-2, 2:-2, 2:-2] Mx = Mx[2:-2, 2:-2, 2:-2] @@ -509,12 +563,16 @@ def _update_vis_z(carry): Q = Q.at[1, 2:-2, 2:-2, 2:-2].set(Mx / D0) # vx Q = Q.at[2, 2:-2, 2:-2, 2:-2].set(My / D0) # vy Q = Q.at[3, 2:-2, 2:-2, 2:-2].set(Mz / D0) # vz - Q = Q.at[4, 2:-2, 2:-2, 2:-2].set(gammi1 * (E0 - 0.5 * (Mx ** 2 + My ** 2 + Mz ** 2) / D0)) # p + Q = Q.at[4, 2:-2, 2:-2, 2:-2].set( + gammi1 * (E0 - 0.5 * (Mx**2 + My**2 + Mz**2) / D0) + ) # p return Q, dt Q, dt, dt_vis, t_vis = carry - Q = bc_HD(Q, mode=cfg.args.bc) # index 2 for _U is equivalent with index 0 for u + Q = bc_HD( + Q, mode=cfg.args.bc + ) # index 2 for _U is equivalent with index 0 for u dt_ev = jnp.min(jnp.array([dt, dt_vis, dt - t_vis])) carry = Q, dt_ev @@ -530,7 +588,7 @@ def _update_vis_z(carry): @jit def flux_x(Q): QL, QR = limiting_HD(Q, if_second_order=cfg.args.if_second_order) - #f_Riemann = HLL(QL, QR, direc=0) + # f_Riemann = HLL(QL, QR, direc=0) f_Riemann = HLLC(QL, QR, direc=0) return f_Riemann @@ -538,15 +596,17 @@ def flux_x(Q): def flux_y(Q): _Q = jnp.transpose(Q, (0, 2, 3, 1)) # (y, z, x) QL, QR = limiting_HD(_Q, if_second_order=cfg.args.if_second_order) - #f_Riemann = jnp.transpose(HLL(QL, QR, direc=1), (0, 3, 1, 2)) # (x,y,z) = (Z,X,Y) - f_Riemann = jnp.transpose(HLLC(QL, QR, direc=1), (0, 3, 1, 2)) # (x,y,z) = (Z,X,Y) + # f_Riemann = jnp.transpose(HLL(QL, QR, direc=1), (0, 3, 1, 2)) # (x,y,z) = (Z,X,Y) + f_Riemann = jnp.transpose( + HLLC(QL, QR, direc=1), (0, 3, 1, 2) + ) # (x,y,z) = (Z,X,Y) return f_Riemann @jit def flux_z(Q): _Q = jnp.transpose(Q, (0, 3, 1, 2)) # (z, x, y) QL, QR = limiting_HD(_Q, if_second_order=cfg.args.if_second_order) - #f_Riemann = jnp.transpose(HLL(QL, QR, direc=2), (0, 2, 3, 1)) + # f_Riemann = jnp.transpose(HLL(QL, QR, direc=2), (0, 2, 3, 1)) f_Riemann = jnp.transpose(HLLC(QL, QR, direc=2), (0, 2, 3, 1)) return f_Riemann @@ -556,21 +616,31 @@ def HLL(QL, QR, direc): iX, iY, iZ = direc + 1, (direc + 1) % 3 + 1, (direc + 2) % 3 + 1 cfL = jnp.sqrt(gamma * QL[4] / QL[0]) cfR = jnp.sqrt(gamma * QR[4] / QR[0]) - Sfl = jnp.minimum(QL[iX, 2:-1], QR[iX, 1:-2]) - jnp.maximum(cfL[2:-1], cfR[1:-2]) # left-going wave - Sfr = jnp.maximum(QL[iX, 2:-1], QR[iX, 1:-2]) + jnp.maximum(cfL[2:-1], cfR[1:-2]) # right-going wave - dcfi = 1. / (Sfr - Sfl + 1.e-8) + Sfl = jnp.minimum(QL[iX, 2:-1], QR[iX, 1:-2]) - jnp.maximum( + cfL[2:-1], cfR[1:-2] + ) # left-going wave + Sfr = jnp.maximum(QL[iX, 2:-1], QR[iX, 1:-2]) + jnp.maximum( + cfL[2:-1], cfR[1:-2] + ) # right-going wave + dcfi = 1.0 / (Sfr - Sfl + 1.0e-8) UL, UR = jnp.zeros_like(QL), jnp.zeros_like(QR) UL = UL.at[0].set(QL[0]) UL = UL.at[iX].set(QL[0] * QL[iX]) UL = UL.at[iY].set(QL[0] * QL[iY]) UL = UL.at[iZ].set(QL[0] * QL[iZ]) - UL = UL.at[4].set(gamminv1 * QL[4] + 0.5 * (UL[iX] * QL[iX] + UL[iY] * QL[iY] + UL[iZ] * QL[iZ])) + UL = UL.at[4].set( + gamminv1 * QL[4] + + 0.5 * (UL[iX] * QL[iX] + UL[iY] * QL[iY] + UL[iZ] * QL[iZ]) + ) UR = UR.at[0].set(QR[0]) UR = UR.at[iX].set(QR[0] * QR[iX]) UR = UR.at[iY].set(QR[0] * QR[iY]) UR = UR.at[iZ].set(QR[0] * QR[iZ]) - UR = UR.at[4].set(gamminv1 * QR[4] + 0.5 * (UR[iX] * QR[iX] + UR[iY] * QR[iY] + UR[iZ] * QR[iZ])) + UR = UR.at[4].set( + gamminv1 * QR[4] + + 0.5 * (UR[iX] * QR[iX] + UR[iY] * QR[iY] + UR[iZ] * QR[iZ]) + ) fL, fR = jnp.zeros_like(QL), jnp.zeros_like(QR) fL = fL.at[0].set(UL[iX]) @@ -584,38 +654,56 @@ def HLL(QL, QR, direc): fR = fR.at[iZ].set(UR[iX] * QR[iZ]) fR = fR.at[4].set((UR[4] + QR[4]) * QR[iX]) # upwind advection scheme - fHLL = dcfi * (Sfr * fR[:, 1:-2] - Sfl * fL[:, 2:-1] - + Sfl * Sfr * (UL[:, 2:-1] - UR[:, 1:-2])) + fHLL = dcfi * ( + Sfr * fR[:, 1:-2] + - Sfl * fL[:, 2:-1] + + Sfl * Sfr * (UL[:, 2:-1] - UR[:, 1:-2]) + ) # L: left of cell = right-going, R: right of cell: left-going - f_Riemann = jnp.where(Sfl > 0., fR[:, 1:-2], fHLL) - f_Riemann = jnp.where(Sfr < 0., fL[:, 2:-1], f_Riemann) + f_Riemann = jnp.where(Sfl > 0.0, fR[:, 1:-2], fHLL) + f_Riemann = jnp.where(Sfr < 0.0, fL[:, 2:-1], f_Riemann) return f_Riemann @partial(jit, static_argnums=(2,)) def HLLC(QL, QR, direc): - """ full-Godunov method -- exact shock solution""" + """full-Godunov method -- exact shock solution""" iX, iY, iZ = direc + 1, (direc + 1) % 3 + 1, (direc + 2) % 3 + 1 cfL = jnp.sqrt(gamma * QL[4] / QL[0]) cfR = jnp.sqrt(gamma * QR[4] / QR[0]) - Sfl = jnp.minimum(QL[iX, 2:-1], QR[iX, 1:-2]) - jnp.maximum(cfL[2:-1], cfR[1:-2]) # left-going wave - Sfr = jnp.maximum(QL[iX, 2:-1], QR[iX, 1:-2]) + jnp.maximum(cfL[2:-1], cfR[1:-2]) # right-going wave + Sfl = jnp.minimum(QL[iX, 2:-1], QR[iX, 1:-2]) - jnp.maximum( + cfL[2:-1], cfR[1:-2] + ) # left-going wave + Sfr = jnp.maximum(QL[iX, 2:-1], QR[iX, 1:-2]) + jnp.maximum( + cfL[2:-1], cfR[1:-2] + ) # right-going wave UL, UR = jnp.zeros_like(QL), jnp.zeros_like(QR) UL = UL.at[0].set(QL[0]) UL = UL.at[iX].set(QL[0] * QL[iX]) UL = UL.at[iY].set(QL[0] * QL[iY]) UL = UL.at[iZ].set(QL[0] * QL[iZ]) - UL = UL.at[4].set(gamminv1 * QL[4] + 0.5 * (UL[iX] * QL[iX] + UL[iY] * QL[iY] + UL[iZ] * QL[iZ])) + UL = UL.at[4].set( + gamminv1 * QL[4] + + 0.5 * (UL[iX] * QL[iX] + UL[iY] * QL[iY] + UL[iZ] * QL[iZ]) + ) UR = UR.at[0].set(QR[0]) UR = UR.at[iX].set(QR[0] * QR[iX]) UR = UR.at[iY].set(QR[0] * QR[iY]) UR = UR.at[iZ].set(QR[0] * QR[iZ]) - UR = UR.at[4].set(gamminv1 * QR[4] + 0.5 * (UR[iX] * QR[iX] + UR[iY] * QR[iY] + UR[iZ] * QR[iZ])) - - Va = (Sfr - QL[iX, 2:-1]) * UL[iX, 2:-1] - (Sfl - QR[iX, 1:-2]) * UR[iX, 1:-2]- QL[4, 2:-1] + QR[4, 1:-2] + UR = UR.at[4].set( + gamminv1 * QR[4] + + 0.5 * (UR[iX] * QR[iX] + UR[iY] * QR[iY] + UR[iZ] * QR[iZ]) + ) + + Va = ( + (Sfr - QL[iX, 2:-1]) * UL[iX, 2:-1] + - (Sfl - QR[iX, 1:-2]) * UR[iX, 1:-2] + - QL[4, 2:-1] + + QR[4, 1:-2] + ) Va /= (Sfr - QL[iX, 2:-1]) * QL[0, 2:-1] - (Sfl - QR[iX, 1:-2]) * QR[0, 1:-2] Pa = QR[4, 1:-2] + QR[0, 1:-2] * (Sfl - QR[iX, 1:-2]) * (Va - QR[iX, 1:-2]) @@ -640,82 +728,202 @@ def HLLC(QL, QR, direc): far = far.at[iX].set(Dar * Va**2 + Pa) far = far.at[iY].set(Dar * Va * QL[iY, 2:-1]) far = far.at[iZ].set(Dar * Va * QL[iZ, 2:-1]) - far = far.at[4].set( (gamgamm1inv * Pa + 0.5 * Dar * (Va**2 + QL[iY, 2:-1]**2 + QL[iZ, 2:-1]**2)) * Va) + far = far.at[4].set( + ( + gamgamm1inv * Pa + + 0.5 * Dar * (Va**2 + QL[iY, 2:-1] ** 2 + QL[iZ, 2:-1] ** 2) + ) + * Va + ) fal = fal.at[0].set(Dal * Va) fal = fal.at[iX].set(Dal * Va**2 + Pa) fal = fal.at[iY].set(Dal * Va * QR[iY, 1:-2]) fal = fal.at[iZ].set(Dal * Va * QR[iZ, 1:-2]) - fal = fal.at[4].set( (gamgamm1inv * Pa + 0.5 * Dal * (Va**2 + QR[iY, 1:-2]**2 + QR[iZ, 1:-2]**2)) * Va) - - f_Riemann = jnp.where(Sfl > 0., fR[:, 1:-2], fL[:, 2:-1]) # Sf2 > 0 : supersonic - f_Riemann = jnp.where(Sfl*Va < 0., fal, f_Riemann) # SL < 0 and Va > 0 : sub-sonic - f_Riemann = jnp.where(Sfr*Va < 0., far, f_Riemann) # Va < 0 and SR > 0 : sub-sonic - #f_Riemann = jnp.where(Sfr < 0., fL[:, 2:-1], f_Riemann) # SR < 0 : supersonic + fal = fal.at[4].set( + ( + gamgamm1inv * Pa + + 0.5 * Dal * (Va**2 + QR[iY, 1:-2] ** 2 + QR[iZ, 1:-2] ** 2) + ) + * Va + ) + + f_Riemann = jnp.where( + Sfl > 0.0, fR[:, 1:-2], fL[:, 2:-1] + ) # Sf2 > 0 : supersonic + f_Riemann = jnp.where( + Sfl * Va < 0.0, fal, f_Riemann + ) # SL < 0 and Va > 0 : sub-sonic + f_Riemann = jnp.where( + Sfr * Va < 0.0, far, f_Riemann + ) # Va < 0 and SR > 0 : sub-sonic + # f_Riemann = jnp.where(Sfr < 0., fL[:, 2:-1], f_Riemann) # SR < 0 : supersonic return f_Riemann - Q = jnp.zeros([cfg.args.numbers, 5, cfg.args.nx + 4, cfg.args.ny + 4, cfg.args.nz + 4]) - if cfg.args.init_mode_Multi == '1D_rand': - Q = Q.at[:, 0, 2:-2, 2:-2, 2:-2].set(init_multi_HD(xc, yc, zc, numbers=cfg.args.numbers, - k_tot=3, init_key=cfg.args.init_key, - num_choise_k=2, - umin=1.e0, umax=1.e1, if_renorm=True)) - Q = Q.at[:, 4, 2:-2, 2:-2, 2:-2].set(init_multi_HD(xc, yc, zc, numbers=cfg.args.numbers, - k_tot=3, init_key=cfg.args.init_key+1, - num_choise_k=2, - umin=1.e1, umax=1.e2, if_renorm=True)) - Q = Q.at[:, 1, 2:-2, 2:-2, 2:-2].set(init_multi_HD(xc, yc, zc, numbers=cfg.args.numbers, - k_tot=3, init_key=cfg.args.init_key+2, - num_choise_k=2, - if_renorm=False)) - elif cfg.args.init_mode_Multi == '1D_shocks': - Q = Q.at[:,0,2:-2,2:-2,2:-2].set(init_multi_HD_shock( - xc, yc, zc, numbers=cfg.args.numbers, - init_key=cfg.args.init_key, - umin=1.e0, umax=1.e1)) - Q = Q.at[:,4,2:-2,2:-2,2:-2].set(init_multi_HD_shock( - xc, yc, zc, numbers=cfg.args.numbers, - init_key=cfg.args.init_key+1, - umin=1.e1, umax=1.e2)) - Q = Q.at[:,1,2:-2,2:-2,2:-2].set(init_multi_HD_shock( - xc, yc, zc, numbers=cfg.args.numbers, - init_key=cfg.args.init_key+2, - umin=-0.5e0, umax=0.5e0)) - elif cfg.args.init_mode_Multi == 'KHs': - assert 2. * yc[0] - (yc[1] - yc[0]) == 0., 'yL is assumed 0!' - print('now we are coming into KHs...') - Q = init_multi_HD_KH(Q, xc, yc, zc, numbers=cfg.args.numbers, - init_key=cfg.args.init_key, - M0=cfg.args.M0, dkMx=cfg.args.dkMx, gamma = cfg.args.gamma) - elif cfg.args.init_mode_Multi == '2D_Turbs': - print('now we are coming into 2DTurbs......') - Q = init_multi_HD_2DTurb(Q, xc, yc, zc, numbers=cfg.args.numbers, - init_key=cfg.args.init_key, - M0=cfg.args.M0, k_tot=cfg.args.k_tot, gamma=cfg.args.gamma) - elif cfg.args.init_mode_Multi == '2D_rand': - assert xe[0] == 0. and ye[0] == 0. and xe[-1] == 1. and ye[-1] == 1., 'xc, yc should be between 0 and 1!' - print('now we are coming into 2Drand......') - Q = init_multi_HD_2DRand(Q, xc, yc, zc, numbers=cfg.args.numbers, - init_key=cfg.args.init_key, - M0=cfg.args.M0, k_tot=cfg.args.k_tot, gamma=cfg.args.gamma) - elif cfg.args.init_mode_Multi == '3D_Turbs': - print('now we are coming into 3DTurbs......') - Q = init_multi_HD_3DTurb(Q, xc, yc, zc, numbers=cfg.args.numbers, - init_key=cfg.args.init_key, - M0=cfg.args.M0, k_tot=cfg.args.k_tot, gamma=cfg.args.gamma) - elif cfg.args.init_mode_Multi == '3D_rand': - print('now we are coming into 3Drand......') - Q = init_multi_HD_3DRand(Q, xc, yc, zc, numbers=cfg.args.numbers, - init_key=cfg.args.init_key, - M0=cfg.args.M0, k_tot=cfg.args.k_tot, gamma=cfg.args.gamma) - print('initial conditions were prepared!!') + Q = jnp.zeros( + [cfg.args.numbers, 5, cfg.args.nx + 4, cfg.args.ny + 4, cfg.args.nz + 4] + ) + if cfg.args.init_mode_Multi == "1D_rand": + Q = Q.at[:, 0, 2:-2, 2:-2, 2:-2].set( + init_multi_HD( + xc, + yc, + zc, + numbers=cfg.args.numbers, + k_tot=3, + init_key=cfg.args.init_key, + num_choise_k=2, + umin=1.0e0, + umax=1.0e1, + if_renorm=True, + ) + ) + Q = Q.at[:, 4, 2:-2, 2:-2, 2:-2].set( + init_multi_HD( + xc, + yc, + zc, + numbers=cfg.args.numbers, + k_tot=3, + init_key=cfg.args.init_key + 1, + num_choise_k=2, + umin=1.0e1, + umax=1.0e2, + if_renorm=True, + ) + ) + Q = Q.at[:, 1, 2:-2, 2:-2, 2:-2].set( + init_multi_HD( + xc, + yc, + zc, + numbers=cfg.args.numbers, + k_tot=3, + init_key=cfg.args.init_key + 2, + num_choise_k=2, + if_renorm=False, + ) + ) + elif cfg.args.init_mode_Multi == "1D_shocks": + Q = Q.at[:, 0, 2:-2, 2:-2, 2:-2].set( + init_multi_HD_shock( + xc, + yc, + zc, + numbers=cfg.args.numbers, + init_key=cfg.args.init_key, + umin=1.0e0, + umax=1.0e1, + ) + ) + Q = Q.at[:, 4, 2:-2, 2:-2, 2:-2].set( + init_multi_HD_shock( + xc, + yc, + zc, + numbers=cfg.args.numbers, + init_key=cfg.args.init_key + 1, + umin=1.0e1, + umax=1.0e2, + ) + ) + Q = Q.at[:, 1, 2:-2, 2:-2, 2:-2].set( + init_multi_HD_shock( + xc, + yc, + zc, + numbers=cfg.args.numbers, + init_key=cfg.args.init_key + 2, + umin=-0.5e0, + umax=0.5e0, + ) + ) + elif cfg.args.init_mode_Multi == "KHs": + assert 2.0 * yc[0] - (yc[1] - yc[0]) == 0.0, "yL is assumed 0!" + print("now we are coming into KHs...") + Q = init_multi_HD_KH( + Q, + xc, + yc, + zc, + numbers=cfg.args.numbers, + init_key=cfg.args.init_key, + M0=cfg.args.M0, + dkMx=cfg.args.dkMx, + gamma=cfg.args.gamma, + ) + elif cfg.args.init_mode_Multi == "2D_Turbs": + print("now we are coming into 2DTurbs......") + Q = init_multi_HD_2DTurb( + Q, + xc, + yc, + zc, + numbers=cfg.args.numbers, + init_key=cfg.args.init_key, + M0=cfg.args.M0, + k_tot=cfg.args.k_tot, + gamma=cfg.args.gamma, + ) + elif cfg.args.init_mode_Multi == "2D_rand": + assert ( + xe[0] == 0.0 and ye[0] == 0.0 and xe[-1] == 1.0 and ye[-1] == 1.0 + ), "xc, yc should be between 0 and 1!" + print("now we are coming into 2Drand......") + Q = init_multi_HD_2DRand( + Q, + xc, + yc, + zc, + numbers=cfg.args.numbers, + init_key=cfg.args.init_key, + M0=cfg.args.M0, + k_tot=cfg.args.k_tot, + gamma=cfg.args.gamma, + ) + elif cfg.args.init_mode_Multi == "3D_Turbs": + print("now we are coming into 3DTurbs......") + Q = init_multi_HD_3DTurb( + Q, + xc, + yc, + zc, + numbers=cfg.args.numbers, + init_key=cfg.args.init_key, + M0=cfg.args.M0, + k_tot=cfg.args.k_tot, + gamma=cfg.args.gamma, + ) + elif cfg.args.init_mode_Multi == "3D_rand": + print("now we are coming into 3Drand......") + Q = init_multi_HD_3DRand( + Q, + xc, + yc, + zc, + numbers=cfg.args.numbers, + init_key=cfg.args.init_key, + M0=cfg.args.M0, + k_tot=cfg.args.k_tot, + gamma=cfg.args.gamma, + ) + print("initial conditions were prepared!!") Q = device_put(Q) # putting variables in GPU (not necessary??) local_device_count = jax.local_device_count() - pm_evolve = jax.pmap(jax.vmap(evolve, axis_name='j'), axis_name='i') - t, DDD, VVx, VVy, VVz, PPP = pm_evolve(Q.reshape([local_device_count, - cfg.args.numbers//local_device_count, - 5, cfg.args.nx+4, cfg.args.ny+4, cfg.args.nz+4])) + pm_evolve = jax.pmap(jax.vmap(evolve, axis_name="j"), axis_name="i") + t, DDD, VVx, VVy, VVz, PPP = pm_evolve( + Q.reshape( + [ + local_device_count, + cfg.args.numbers // local_device_count, + 5, + cfg.args.nx + 4, + cfg.args.ny + 4, + cfg.args.nz + 4, + ] + ) + ) itot = DDD.shape[2] DDD = DDD.reshape(cfg.args.numbers, itot, cfg.args.nx, cfg.args.ny, cfg.args.nz) @@ -723,16 +931,87 @@ def HLLC(QL, QR, direc): VVy = VVy.reshape(cfg.args.numbers, itot, cfg.args.nx, cfg.args.ny, cfg.args.nz) VVz = VVz.reshape(cfg.args.numbers, itot, cfg.args.nx, cfg.args.ny, cfg.args.nz) PPP = PPP.reshape(cfg.args.numbers, itot, cfg.args.nx, cfg.args.ny, cfg.args.nz) - print('now data saving...') - jnp.save(cfg.args.save+'HD_Sols_'+cfg.args.init_mode_Multi+'_Eta'+str(eta)[:5]+'_Zeta'+str(zeta)[:5]+'_M'+str(cfg.args.M0)+'_key'+str(cfg.args.init_key)+'_D', DDD) - jnp.save(cfg.args.save+'HD_Sols_'+cfg.args.init_mode_Multi+'_Eta'+str(eta)[:5]+'_Zeta'+str(zeta)[:5]+'_M'+str(cfg.args.M0)+'_key'+str(cfg.args.init_key)+'_Vx', VVx) - jnp.save(cfg.args.save+'HD_Sols_'+cfg.args.init_mode_Multi+'_Eta'+str(eta)[:5]+'_Zeta'+str(zeta)[:5]+'_M'+str(cfg.args.M0)+'_key'+str(cfg.args.init_key)+'_Vy', VVy) - jnp.save(cfg.args.save+'HD_Sols_'+cfg.args.init_mode_Multi+'_Eta'+str(eta)[:5]+'_Zeta'+str(zeta)[:5]+'_M'+str(cfg.args.M0)+'_key'+str(cfg.args.init_key)+'_Vz', VVz) - jnp.save(cfg.args.save+'HD_Sols_'+cfg.args.init_mode_Multi+'_Eta'+str(eta)[:5]+'_Zeta'+str(zeta)[:5]+'_M'+str(cfg.args.M0)+'_key'+str(cfg.args.init_key)+'_P', PPP) - jnp.save(cfg.args.save + '/x_coordinate', xc) - jnp.save(cfg.args.save + '/y_coordinate', yc) - jnp.save(cfg.args.save + '/z_coordinate', zc) - jnp.save(cfg.args.save + '/t_coordinate', tc) - -if __name__=='__main__': + print("now data saving...") + jnp.save( + cfg.args.save + + "HD_Sols_" + + cfg.args.init_mode_Multi + + "_Eta" + + str(eta)[:5] + + "_Zeta" + + str(zeta)[:5] + + "_M" + + str(cfg.args.M0) + + "_key" + + str(cfg.args.init_key) + + "_D", + DDD, + ) + jnp.save( + cfg.args.save + + "HD_Sols_" + + cfg.args.init_mode_Multi + + "_Eta" + + str(eta)[:5] + + "_Zeta" + + str(zeta)[:5] + + "_M" + + str(cfg.args.M0) + + "_key" + + str(cfg.args.init_key) + + "_Vx", + VVx, + ) + jnp.save( + cfg.args.save + + "HD_Sols_" + + cfg.args.init_mode_Multi + + "_Eta" + + str(eta)[:5] + + "_Zeta" + + str(zeta)[:5] + + "_M" + + str(cfg.args.M0) + + "_key" + + str(cfg.args.init_key) + + "_Vy", + VVy, + ) + jnp.save( + cfg.args.save + + "HD_Sols_" + + cfg.args.init_mode_Multi + + "_Eta" + + str(eta)[:5] + + "_Zeta" + + str(zeta)[:5] + + "_M" + + str(cfg.args.M0) + + "_key" + + str(cfg.args.init_key) + + "_Vz", + VVz, + ) + jnp.save( + cfg.args.save + + "HD_Sols_" + + cfg.args.init_mode_Multi + + "_Eta" + + str(eta)[:5] + + "_Zeta" + + str(zeta)[:5] + + "_M" + + str(cfg.args.M0) + + "_key" + + str(cfg.args.init_key) + + "_P", + PPP, + ) + jnp.save(cfg.args.save + "/x_coordinate", xc) + jnp.save(cfg.args.save + "/y_coordinate", yc) + jnp.save(cfg.args.save + "/z_coordinate", zc) + jnp.save(cfg.args.save + "/t_coordinate", tc) + + +if __name__ == "__main__": main() diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_Multi.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_Multi.yaml index 1e5092f..b291196 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_Multi.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_Multi.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: -1. yR: 1. zL: -1. zR: 1. -bc: 'periodic' +bc: "periodic" gamma: 1.6666666666666667 eta: 1.e-2 zeta: 1.e-2 @@ -21,7 +21,7 @@ if_show: 1 show_steps: 100 p_floor: 1.e-4 numbers: 1000 -init_mode_Multi: '1D_rand' +init_mode_Multi: "1D_rand" init_key: 2022 if_rand_param: False -M0 : 0. \ No newline at end of file +M0: 0. diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_Multi_shock.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_Multi_shock.yaml index 958b043..8140b8f 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_Multi_shock.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_Multi_shock.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.005 ini_time: 0. fin_time: 0.4 @@ -11,7 +11,7 @@ yL: -1. yR: 1. zL: -1. zR: 1. -bc: 'trans' +bc: "trans" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -21,7 +21,7 @@ if_show: 1 show_steps: 100 p_floor: 1.e-4 numbers: 1000 -init_mode_Multi: '1D_shocks' +init_mode_Multi: "1D_shocks" init_key: 2020 if_rand_param: False -M0 : 0 \ No newline at end of file +M0: 0 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_Multi_trans.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_Multi_trans.yaml index b16c66d..6c172a3 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_Multi_trans.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_Multi_trans.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: -1. yR: 1. zL: -1. zR: 1. -bc: 'trans' +bc: "trans" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -21,7 +21,7 @@ if_show: 1 show_steps: 100 p_floor: 1.e-4 numbers: 1000 -init_mode_Multi: '1D_rand' +init_mode_Multi: "1D_rand" init_key: 2022 if_rand_param: False -M0 : 0 \ No newline at end of file +M0: 0 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube.yaml index 993ba0c..3c8a1f5 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.01 ini_time: 0. fin_time: 0.4 @@ -11,7 +11,7 @@ yL: -1. yR: 1. zL: -1. zR: 1. -bc: 'trans' +bc: "trans" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -19,10 +19,10 @@ CFL: 3.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'shocktube1' +init_mode: "shocktube1" p_floor: 1.e-4 numbers: 10 -init_mode_Multi: '1D_shocks' +init_mode_Multi: "1D_shocks" init_key: 2022 M0: 1. dk: 1. diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube2.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube2.yaml index e3c8126..9bd4d9e 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube2.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube2.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.01 ini_time: 0. fin_time: 0.15 @@ -11,7 +11,7 @@ yL: -1. yR: 1. zL: -1. zR: 1. -bc: 'trans' +bc: "trans" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -19,10 +19,10 @@ CFL: 3.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'shocktube2' +init_mode: "shocktube2" p_floor: 1.e-4 numbers: 10 -init_mode_Multi: '1D_shocks' +init_mode_Multi: "1D_shocks" init_key: 2022 M0: 1. dk: 1. diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube3.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube3.yaml index 9d634cb..7f07b3e 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube3.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube3.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.001 ini_time: 0. fin_time: 0.012 @@ -11,7 +11,7 @@ yL: -1. yR: 1. zL: -1. zR: 1. -bc: 'trans' +bc: "trans" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -19,10 +19,10 @@ CFL: 3.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'shocktube3' +init_mode: "shocktube3" p_floor: 1.e-4 numbers: 10 -init_mode_Multi: '1D_shocks' +init_mode_Multi: "1D_shocks" init_key: 2022 M0: 1. dk: 1. diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube4.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube4.yaml index f474760..469a755 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube4.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube4.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.001 ini_time: 0. fin_time: 0.035 @@ -11,7 +11,7 @@ yL: -1. yR: 1. zL: -1. zR: 1. -bc: 'trans' +bc: "trans" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -19,10 +19,10 @@ CFL: 3.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'shocktube4' +init_mode: "shocktube4" p_floor: 1.e-4 numbers: 10 -init_mode_Multi: '1D_shocks' +init_mode_Multi: "1D_shocks" init_key: 2022 M0: 1. dk: 1. diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube5.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube5.yaml index 9ae2430..23569c7 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube5.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube5.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.001 ini_time: 0. fin_time: 0.012 @@ -11,7 +11,7 @@ yL: -1. yR: 1. zL: -1. zR: 1. -bc: 'trans' +bc: "trans" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -19,10 +19,10 @@ CFL: 3.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'shocktube5' +init_mode: "shocktube5" p_floor: 1.e-4 numbers: 10 -init_mode_Multi: '1D_shocks' +init_mode_Multi: "1D_shocks" init_key: 2022 M0: 1. dk: 1. diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube6.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube6.yaml index 692bd6f..c5e3579 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube6.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube6.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -11,7 +11,7 @@ yL: -1. yR: 1. zL: -1. zR: 1. -bc: 'trans' +bc: "trans" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -19,10 +19,10 @@ CFL: 3.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'shocktube6' +init_mode: "shocktube6" p_floor: 1.e-4 numbers: 10 -init_mode_Multi: '1D_shocks' +init_mode_Multi: "1D_shocks" init_key: 2022 M0: 1. dk: 1. diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube7.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube7.yaml index fc59bbf..682e7fb 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube7.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/1D_ShockTube7.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.01 ini_time: 0. fin_time: 2. @@ -11,7 +11,7 @@ yL: -1. yR: 1. zL: -1. zR: 1. -bc: 'trans' +bc: "trans" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -19,10 +19,10 @@ CFL: 3.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'shocktube7' +init_mode: "shocktube7" p_floor: 1.e-4 numbers: 10 -init_mode_Multi: '1D_shocks' +init_mode_Multi: "1D_shocks" init_key: 2022 M0: 1. dk: 1. diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk1.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk1.yaml index 19e03c0..07b2df6 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk1.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk1.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/KH/KH_M01_dk1_Re1e3/' +save: "../save/CFD/KH/KH_M01_dk1_Re1e3/" dt_save: 0.1 ini_time: 0. fin_time: 5. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'KHI' +bc: "KHI" gamma: 1.6666666666666667 eta: 1.e-3 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 4000 p_floor: 1.e-4 -init_mode: 'KHI' +init_mode: "KHI" M0: 0.1 dk: 1. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk10.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk10.yaml index a4718c1..6520922 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk10.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk10.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.1 ini_time: 0. fin_time: 5. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'KHI' +bc: "KHI" gamma: 1.6666666666666667 eta: 1.e-3 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 4000 p_floor: 1.e-4 -init_mode: 'KHI' -M0 : 0.1 +init_mode: "KHI" +M0: 0.1 dk: 10. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk2.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk2.yaml index 91b9315..f5fcd7c 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk2.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk2.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/KH/KH_M01_dk2_Re1e3/' +save: "../save/CFD/KH/KH_M01_dk2_Re1e3/" dt_save: 0.1 ini_time: 0. fin_time: 5. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'KHI' +bc: "KHI" gamma: 1.6666666666666667 eta: 1.e-3 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 4000 p_floor: 1.e-4 -init_mode: 'KHI' +init_mode: "KHI" M0: 0.1 dk: 2. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk5.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk5.yaml index 4f9164b..0893cfc 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk5.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M01_dk5.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.1 ini_time: 0. fin_time: 5. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'KHI' +bc: "KHI" gamma: 1.6666666666666667 eta: 1.e-3 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 4000 p_floor: 1.e-4 -init_mode: 'KHI' +init_mode: "KHI" M0: 0.1 dk: 5. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M02_dk1.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M02_dk1.yaml index 1c90c61..a81f895 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M02_dk1.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M02_dk1.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.1 ini_time: 0. fin_time: 5. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'KHI' +bc: "KHI" gamma: 1.6666666666666667 eta: 1.e-3 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 4000 p_floor: 1.e-4 -init_mode: 'KHI' +init_mode: "KHI" M0: 0.2 dk: 1. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M04_dk1.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M04_dk1.yaml index dfe76cd..4c46603 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M04_dk1.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M04_dk1.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.1 ini_time: 0. fin_time: 5. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'KHI' +bc: "KHI" gamma: 1.6666666666666667 eta: 1.e-3 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 4000 p_floor: 1.e-4 -init_mode: 'KHI' +init_mode: "KHI" M0: 0.4 dk: 1. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M1_dk1.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M1_dk1.yaml index 1ca5586..9d36c74 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M1_dk1.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M1_dk1.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/KH/KH_M1_dk1_Re1e3/' +save: "../save/CFD/KH/KH_M1_dk1_Re1e3/" dt_save: 0.1 ini_time: 0. fin_time: 5. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'KHI' +bc: "KHI" gamma: 1.6666666666666667 eta: 1.e-3 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 4000 p_floor: 1.e-4 -init_mode: 'KHI' +init_mode: "KHI" M0: 1. dk: 1. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M2_dk1.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M2_dk1.yaml index 50d9364..2c176a1 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M2_dk1.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_KH_M2_dk1.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.1 ini_time: 0. fin_time: 5. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'KHI' +bc: "KHI" gamma: 1.6666666666666667 eta: 1.e-3 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 4000 p_floor: 1.e-4 -init_mode: 'KHI' -M0 : 2. -dk : 1 +init_mode: "KHI" +M0: 2. +dk: 1 dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_KH.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_KH.yaml index 85ea371..4d80a12 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_KH.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_KH.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.1 ini_time: 0. fin_time: 2. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'KHI' +bc: "KHI" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -21,7 +21,7 @@ if_show: 1 show_steps: 100 p_floor: 1.e-4 numbers: 4 -init_mode_Multi: 'KHs' -M0 : 0.1 +init_mode_Multi: "KHs" +M0: 0.1 dkMx: 2. -init_key: 2022 \ No newline at end of file +init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_Rand.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_Rand.yaml index 3faa563..a0a24e4 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_Rand.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_Rand.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.05 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'periodic' +bc: "periodic" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -21,8 +21,8 @@ if_show: 1 show_steps: 100 p_floor: 1.e-4 numbers: 100 -init_mode_Multi: '2D_rand' -M0 : 0.1 +init_mode_Multi: "2D_rand" +M0: 0.1 k_tot: 4 init_key: 2020 if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_Rand_HR.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_Rand_HR.yaml index 0b1ecd0..42055c1 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_Rand_HR.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_Rand_HR.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.05 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'periodic' +bc: "periodic" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -21,8 +21,8 @@ if_show: 1 show_steps: 100 p_floor: 1.e-4 numbers: 20 -init_mode_Multi: '2D_rand' -M0 : 0.5 +init_mode_Multi: "2D_rand" +M0: 0.5 k_tot: 4 init_key: 2020 if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_Turb.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_Turb.yaml index c73faee..acaf6a8 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_Turb.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_Multi_Turb.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.05 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'periodic' +bc: "periodic" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -21,8 +21,8 @@ if_show: 1 show_steps: 100 p_floor: 1.e-4 numbers: 20 -init_mode_Multi: '2DTurbs' -M0 : 0.1 +init_mode_Multi: "2DTurbs" +M0: 0.1 k_tot: 4 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_ShockTube.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_ShockTube.yaml index 783f4ef..257764c 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_ShockTube.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_ShockTube.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'trans' +bc: "trans" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 4000 p_floor: 1.e-4 -init_mode: '2D-shock' +init_mode: "2D-shock" M0: 1. dk: 1. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_TOV.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_TOV.yaml index 2774a16..4fc92bc 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_TOV.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/2D_TOV.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'periodic' +bc: "periodic" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 400 p_floor: 1.e-4 -init_mode: 'OTVortex' +init_mode: "OTVortex" M0: 1. dk: 1. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_BlastWave.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_BlastWave.yaml index b0bfaa9..7d753a6 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_BlastWave.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_BlastWave.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.025 ini_time: 0. fin_time: 0.5 @@ -11,7 +11,7 @@ yL: -1. yR: 1. zL: -1. zR: 1. -bc: 'trans' +bc: "trans" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 400 p_floor: 1.e-4 -init_mode: 'BlastWave' +init_mode: "BlastWave" M0: 1. dk: 1. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_Multi_Rand.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_Multi_Rand.yaml index e7a4edf..f36ce4b 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_Multi_Rand.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_Multi_Rand.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.05 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'periodic' +bc: "periodic" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -21,8 +21,8 @@ if_show: 1 show_steps: 400 p_floor: 1.e-4 numbers: 10 -init_mode_Multi: '3D_rand' -M0 : 1. +init_mode_Multi: "3D_rand" +M0: 1. k_tot: 4 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_Multi_TurbM1.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_Multi_TurbM1.yaml index a284aac..75bb721 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_Multi_TurbM1.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_Multi_TurbM1.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.05 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'periodic' +bc: "periodic" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -21,8 +21,8 @@ if_show: 1 show_steps: 400 p_floor: 1.e-4 numbers: 20 -init_mode_Multi: '3D_Turbs' -M0 : 1. +init_mode_Multi: "3D_Turbs" +M0: 1. k_tot: 4 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM01.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM01.yaml index cca4a94..3978a73 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM01.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM01.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.025 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'periodic' +bc: "periodic" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 400 p_floor: 1.e-4 -init_mode: 'turbulence' +init_mode: "turbulence" M0: 1.e-1 dk: 1. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM05.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM05.yaml index 65ab227..59d5cc5 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM05.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM05.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.025 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'periodic' +bc: "periodic" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 400 p_floor: 1.e-4 -init_mode: 'turbulence' +init_mode: "turbulence" M0: 5.e-1 dk: 1. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM1.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM1.yaml index cca4a94..3978a73 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM1.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM1.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.025 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'periodic' +bc: "periodic" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 400 p_floor: 1.e-4 -init_mode: 'turbulence' +init_mode: "turbulence" M0: 1.e-1 dk: 1. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM2.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM2.yaml index 50d8f32..a47492e 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM2.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM2.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.025 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'periodic' +bc: "periodic" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 400 p_floor: 1.e-4 -init_mode: 'turbulence' +init_mode: "turbulence" M0: 2. dk: 1. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM4.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM4.yaml index b7058e6..fbe6aa0 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM4.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/3D_TurbM4.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.025 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: 0. yR: 1. zL: 0. zR: 1. -bc: 'periodic' +bc: "periodic" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -20,10 +20,10 @@ if_second_order: 1. if_show: 1 show_steps: 400 p_floor: 1.e-4 -init_mode: 'turbulence' +init_mode: "turbulence" M0: 4. dk: 1. dkMx: 1. numbers: 4 -init_mode_Multi: 'KHs' +init_mode_Multi: "KHs" init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/default.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/default.yaml index 437e632..185a013 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/default.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/args/default.yaml @@ -1,4 +1,4 @@ -save: '../save/CFD/' +save: "../save/CFD/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -11,7 +11,7 @@ yL: -1. yR: 1. zL: -1. zR: 1. -bc: 'trans' +bc: "trans" gamma: 1.6666666666666667 eta: 1.e-8 zeta: 1.e-8 @@ -19,8 +19,8 @@ CFL: 3.e-1 if_second_order: 1. if_show: 1 show_steps: 100 -init_mode: 'shocktube1' +init_mode: "shocktube1" p_floor: 1.e-4 numbers: 10 -init_mode_Multi: '1D_shocks' -init_key: 2022 \ No newline at end of file +init_mode_Multi: "1D_shocks" +init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/config.yaml b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/config.yaml index 3a4b989..b3b9da6 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/config.yaml +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/config/config.yaml @@ -6,4 +6,4 @@ defaults: hydra: output_subdir: null run: - dir: . \ No newline at end of file + dir: . diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_testset.sh b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_testset.sh index 384989a..1b02061 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_testset.sh +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_testset.sh @@ -1,7 +1,9 @@ +#!/bin/bash + CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta1e0.yaml CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta1e1.yaml CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta1e-1.yaml CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta2e0.yaml CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta2e-1.yaml CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta4e0.yaml -CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta4e-1.yaml \ No newline at end of file +CUDA_VISIBLE_DEVICES='3' python3 advection_exact_Hydra.py +args=beta4e-1.yaml diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_testset_3DTurb.sh b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_testset_3DTurb.sh index 770b5f7..b29f590 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_testset_3DTurb.sh +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_testset_3DTurb.sh @@ -1,6 +1,6 @@ +#!/bin/bash CUDA_VISIBLE_DEVICES='0' python3 CFD_Hydra.py +args=3D_TurbM01.yaml CUDA_VISIBLE_DEVICES='0' python3 CFD_Hydra.py +args=3D_TurbM05.yaml CUDA_VISIBLE_DEVICES='0' python3 CFD_Hydra.py +args=3D_TurbM1.yaml CUDA_VISIBLE_DEVICES='0' python3 CFD_Hydra.py +args=3D_TurbM2.yaml CUDA_VISIBLE_DEVICES='0' python3 CFD_Hydra.py +args=3D_TurbM4.yaml - diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_testset_KHI.sh b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_testset_KHI.sh index 1bd7d44..03a6151 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_testset_KHI.sh +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_testset_KHI.sh @@ -1,3 +1,4 @@ +#!/bin/bash CUDA_VISIBLE_DEVICES='1' python3 CFD_Hydra.py +args=2D_KH_M01_dk1.yaml CUDA_VISIBLE_DEVICES='1' python3 CFD_Hydra.py +args=2D_KH_M02_dk1.yaml CUDA_VISIBLE_DEVICES='1' python3 CFD_Hydra.py +args=2D_KH_M04_dk1.yaml diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_1D.sh b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_1D.sh index 777f36f..b1e6886 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_1D.sh +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_1D.sh @@ -1,9 +1,10 @@ +#!/bin/bash nn=1 key=2020 -while [ $nn -le 10 ]; do - CUDA_VISIBLE_DEVICES='0,1' python3 CFD_multi_Hydra.py +args=1D_Multi.yaml ++args.init_key=$key - nn=$(expr $nn + 1) - key=$(expr $key + 1) +while [ "$nn" -le 10 ]; do + CUDA_VISIBLE_DEVICES='0,1' python3 CFD_multi_Hydra.py +args=1D_Multi.yaml ++args.init_key="$key" + nn=$(${nn} + 1) + key=$(${key} + 1) echo "$nn" echo "$key" done diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_1DShock.sh b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_1DShock.sh index aa9676c..0324a72 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_1DShock.sh +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_1DShock.sh @@ -1,9 +1,10 @@ +#!/bin/bash nn=1 key=2031 -while [ $nn -le 10 ]; do - CUDA_VISIBLE_DEVICES='0,1' python3 CFD_multi_Hydra.py +args=1D_Multi_shock.yaml ++args.init_key=$key - nn=$(expr $nn + 1) - key=$(expr $key + 1) +while [ "$nn" -le 10 ]; do + CUDA_VISIBLE_DEVICES='0,1' python3 CFD_multi_Hydra.py +args=1D_Multi_shock.yaml ++args.init_key="$key" + nn=$(${nn} + 1) + key=$(${key} + 1) echo "$nn" echo "$key" done diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_1D_trans.sh b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_1D_trans.sh index 502b3d9..2ce7891 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_1D_trans.sh +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_1D_trans.sh @@ -1,9 +1,10 @@ +#!/bin/bash nn=1 key=2020 -while [ $nn -le 10 ]; do - CUDA_VISIBLE_DEVICES='0,1' python3 CFD_multi_Hydra.py +args=1D_Multi_trans.yaml ++args.init_key=$key - nn=$(expr $nn + 1) - key=$(expr $key + 1) +while [ "$nn" -le 10 ]; do + CUDA_VISIBLE_DEVICES='0,1' python3 CFD_multi_Hydra.py +args=1D_Multi_trans.yaml ++args.init_key="$key" + nn=$(${nn} + 1) + key=$(${key} + 1) echo "$nn" echo "$key" done diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_2D.sh b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_2D.sh index a1c7c06..d7752c6 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_2D.sh +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_2D.sh @@ -1,10 +1,11 @@ +#!/bin/bash nn=1 key=2031 -while [ $nn -le 100 ]; do - CUDA_VISIBLE_DEVICES='0,1,2,3' python3 CFD_multi_Hydra.py +args=2D_Multi_Rand.yaml ++args.init_key=$key - #CUDA_VISIBLE_DEVICES='0,1,2,3' python3 CFD_multi_Hydra.py +args=2D_Multi_Rand_HR.yaml ++args.init_key=$key - nn=$(expr $nn + 1) - key=$(expr $key + 1) +while [ "$nn" -le 100 ]; do + CUDA_VISIBLE_DEVICES='0,1,2,3' python3 CFD_multi_Hydra.py +args=2D_Multi_Rand.yaml ++args.init_key="$key" + #CUDA_VISIBLE_DEVICES='0,1,2,3' python3 CFD_multi_Hydra.py +args=2D_Multi_Rand_HR.yaml ++args.init_key="$key" + nn=$(${nn} + 1) + key=$(${key} + 1) echo "$nn" echo "$key" done diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_2DTurb.sh b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_2DTurb.sh index 9dd556c..bdd281e 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_2DTurb.sh +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_2DTurb.sh @@ -1,10 +1,11 @@ +#!/bin/bash nn=1 key=2031 -#while [ $nn -le 100 ]; do -while [ $nn -le 55 ]; do - CUDA_VISIBLE_DEVICES='0,1,2,3' python3 CFD_multi_Hydra.py +args=2D_Multi_Turb.yaml ++args.init_key=$key - nn=$(expr $nn + 1) - key=$(expr $key + 1) +#while [ "$nn" -le 100 ]; do +while [ "$nn" -le 55 ]; do + CUDA_VISIBLE_DEVICES='0,1,2,3' python3 CFD_multi_Hydra.py +args=2D_Multi_Turb.yaml ++args.init_key="$key" + nn=$(${nn} + 1) + key=$(${key} + 1) echo "$nn" echo "$key" done diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_3D.sh b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_3D.sh index e6de413..ed0028f 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_3D.sh +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_3D.sh @@ -1,9 +1,10 @@ +#!/bin/bash nn=1 key=2031 -while [ $nn -le 10 ]; do - CUDA_VISIBLE_DEVICES='0,1' python3 CFD_multi_Hydra.py +args=3D_Multi_Rand.yaml ++args.init_key=$key - nn=$(expr $nn + 1) - key=$(expr $key + 1) +while [ "$nn" -le 10 ]; do + CUDA_VISIBLE_DEVICES='0,1' python3 CFD_multi_Hydra.py +args=3D_Multi_Rand.yaml ++args.init_key="$key" + nn=$(${nn} + 1) + key=$(${key} + 1) echo "$nn" echo "$key" done diff --git a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_3DTurb.sh b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_3DTurb.sh index 154a0fd..9ce9b1f 100644 --- a/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_3DTurb.sh +++ b/pdebench/data_gen/data_gen_NLE/CompressibleFluid/run_trainset_3DTurb.sh @@ -1,9 +1,10 @@ +#!/bin/bash nn=1 key=2031 -while [ $nn -le 6 ]; do - CUDA_VISIBLE_DEVICES='0,1,2,3' python3 CFD_multi_Hydra.py +args=3D_Multi_TurbM1.yaml ++args.init_key=$key - nn=$(expr $nn + 1) - key=$(expr $key + 1) +while [ "$nn" -le 6 ]; do + CUDA_VISIBLE_DEVICES='0,1,2,3' python3 CFD_multi_Hydra.py +args=3D_Multi_TurbM1.yaml ++args.init_key="$key" + nn=$(${nn} + 1) + key=$(${key} + 1) echo "$nn" echo "$key" done diff --git a/pdebench/data_gen/data_gen_NLE/Data_Merge.py b/pdebench/data_gen/data_gen_NLE/Data_Merge.py index ddfa35a..d738cff 100644 --- a/pdebench/data_gen/data_gen_NLE/Data_Merge.py +++ b/pdebench/data_gen/data_gen_NLE/Data_Merge.py @@ -143,223 +143,284 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ -''' +""" Data_Merge.py This is a script creating HDF5 from the generated data (numpy array) by our data generation scripts. -A more detailed explanation how to use this script is provided in the README. -''' +A more detailed explanation how to use this script is provided in the README. +""" -import numpy as np -import h5py +# Hydra + + +from __future__ import annotations + import glob -# Hydra +import h5py import hydra +import numpy as np from omegaconf import DictConfig + def _mergeRD(var, DataND, savedir): - _vars = ['2D', 'nu'] + _vars = ["2D", "nu"] if var not in _vars: - print(var+' is not defined!') + print(var + " is not defined!") return None idx = 0 - datas = glob.glob(savedir+'/' + var + '*key*.npy') - datas.sort() - for data in datas: + data = glob.glob(savedir + "/" + var + "*key*.npy") + data.sort() + for data in data: print(idx, data) test = np.load(data).squeeze() batch = min(test.shape[0], DataND.shape[0] - idx) - if var == '2D': - DataND[idx:idx + batch] = test[:batch, -2] + if var == "2D": + DataND[idx : idx + batch] = test[:batch, -2] else: - DataND[idx:idx + batch] = test[:batch] + DataND[idx : idx + batch] = test[:batch] idx += batch return DataND[:idx] + def _merge(var, DataND, dim, savedir): if dim == 1: - _vars = ['D', 'P', 'Vx'] + _vars = ["D", "P", "Vx"] elif dim == 2: - _vars = ['D', 'P', 'Vx', 'Vy'] + _vars = ["D", "P", "Vx", "Vy"] elif dim == 3: - _vars = ['D', 'P', 'Vx', 'Vy', 'Vz'] + _vars = ["D", "P", "Vx", "Vy", "Vz"] if var not in _vars: - print(var+' is not defined!') + print(var + " is not defined!") return None idx = 0 - datas = glob.glob(savedir+'/HD*' + var + '.npy') - datas.sort() - for data in datas: + data = glob.glob(savedir + "/HD*" + var + ".npy") + data.sort() + for data in data: print(idx, data) test = np.load(data).squeeze() batch = min(test.shape[0], DataND.shape[0] - idx) - DataND[idx:idx+batch] = test[:batch] + DataND[idx : idx + batch] = test[:batch] idx += batch return DataND[:idx] + def nan_check(data): - data = np.abs(data).reshape([data.shape[0], data.shape[1],-1]).sum(axis=-1) - return np.where(data[:,-2] < 1.e-6)[0], np.where(data[:,-2] > 1.e-6)[0] + data = np.abs(data).reshape([data.shape[0], data.shape[1], -1]).sum(axis=-1) + return np.where(data[:, -2] < 1.0e-6)[0], np.where(data[:, -2] > 1.0e-6)[0] + def merge(type, dim, bd, nbatch, savedir): - if type=='CFD': - datas = glob.glob(savedir+'/HD*D.npy') - datas.sort() - test = np.load(datas[0]) + if type == "CFD": + data = glob.glob(savedir + "/HD*D.npy") + data.sort() + test = np.load(data[0]) __nbatch, nt, nx, ny, nz = test.shape - _nbatch = __nbatch * len(datas) - print('nb, nt, nx, ny, nz: ', _nbatch, nt, nx, ny, nz) - print('nbatch: {0}, _nbatch: {1}'.format(nbatch, _nbatch)) - assert nbatch <= _nbatch, 'nbatch should be equal or less than the number of generated samples' - assert 2*nbatch > _nbatch, '2*nbatch should be larger than the number of generated samples' + _nbatch = __nbatch * len(data) + print("nb, nt, nx, ny, nz: ", _nbatch, nt, nx, ny, nz) + print(f"nbatch: {nbatch}, _nbatch: {_nbatch}") + assert ( + nbatch <= _nbatch + ), "nbatch should be equal or less than the number of generated samples" + assert ( + 2 * nbatch > _nbatch + ), "2*nbatch should be larger than the number of generated samples" if dim == 1: - DataND = np.zeros([2*nbatch, nt, nx], dtype=np.float32) - vars = ['D', 'P', 'Vx'] + DataND = np.zeros([2 * nbatch, nt, nx], dtype=np.float32) + vars = ["D", "P", "Vx"] elif dim == 2: - DataND = np.zeros([2*nbatch, nt, nx, ny], dtype=np.float32) - vars = ['D', 'P', 'Vx', 'Vy'] + DataND = np.zeros([2 * nbatch, nt, nx, ny], dtype=np.float32) + vars = ["D", "P", "Vx", "Vy"] elif dim == 3: - DataND = np.zeros([2*nbatch, nt, nx, ny, nz], dtype=np.float32) - vars = ['D', 'P', 'Vx', 'Vy', 'Vz'] + DataND = np.zeros([2 * nbatch, nt, nx, ny, nz], dtype=np.float32) + vars = ["D", "P", "Vx", "Vy", "Vz"] - elif type=='ReacDiff': - datas = glob.glob(savedir+'/nu*.npy') - datas.sort() - test = np.load(datas[0]) + elif type == "ReacDiff": + data = glob.glob(savedir + "/nu*.npy") + data.sort() + test = np.load(data[0]) __nbatch, nx, ny = test.shape - _nbatch = __nbatch * len(datas) - print('nbatch: {0}, _nbatch: {1}'.format(nbatch, _nbatch)) - assert nbatch == _nbatch, 'nbatch should be equal or less than the number of generated samples' - print('nb, nx, ny: ', _nbatch, nx, ny) + _nbatch = __nbatch * len(data) + print(f"nbatch: {nbatch}, _nbatch: {_nbatch}") + assert ( + nbatch == _nbatch + ), "nbatch should be equal or less than the number of generated samples" + print("nb, nx, ny: ", _nbatch, nx, ny) DataND = np.zeros([nbatch, nx, ny], dtype=np.float32) - vars = ['2D', 'nu'] + vars = ["2D", "nu"] for var in vars: - if type=='CFD': + if type == "CFD": _DataND = _merge(var, DataND, dim, savedir) - if var=='D': + if var == "D": idx_neg, idx_pos = nan_check(_DataND) - print('idx_neg: {0}, idx_pos: {1}'.format(len(idx_neg), len(idx_pos))) + print(f"idx_neg: {len(idx_neg)}, idx_pos: {len(idx_pos)}") if len(idx_pos) < nbatch: - print('too many ill-defined data...') - print('nbatch: {0}, idx_pos: {1}'.format(nbatch, len(idx_pos))) + print("too many ill-defined data...") + print(f"nbatch: {nbatch}, idx_pos: {len(idx_pos)}") _DataND = _DataND[idx_pos] _DataND = _DataND[:nbatch] - np.save(savedir+'/' + var + '.npy', _DataND) - elif type == 'ReacDiff': + np.save(savedir + "/" + var + ".npy", _DataND) + elif type == "ReacDiff": DataND = _mergeRD(var, DataND, savedir) - np.save(savedir+'/' + var + '.npy', DataND) - - datas = glob.glob(savedir+'/*npy') - datas.sort() - - if type == 'CFD': - zcrd = np.load(datas[-1]) - del (datas[-1]) - ycrd = np.load(datas[-1]) - del (datas[-1]) - xcrd = np.load(datas[-1]) - del (datas[-1]) - tcrd = np.load(datas[-1]) - del (datas[-1]) - if type=='ReacDiff': - #datas = glob.glob('save/' + type + '/nu*key*npy') - datas = glob.glob(savedir+'/nu*key*npy') - datas.sort() - _beta = datas[0].split('/')[-1].split('_')[3] - flnm = savedir+'/2D_DecayFlow_' + _beta + '_Train.hdf5' - with h5py.File(flnm, 'w') as f: - f.create_dataset('tensor', data=np.load(savedir+'/2D.npy')[:, None, :, :]) - f.create_dataset('nu', data=np.load(savedir+'/nu.npy')) - f.create_dataset('x-coordinate', data=xcrd) - f.create_dataset('y-coordinate', data=ycrd) - f.attrs['beta'] = float(_beta[4:]) + np.save(savedir + "/" + var + ".npy", DataND) + + data = glob.glob(savedir + "/*npy") + data.sort() + + if type == "CFD": + zcrd = np.load(data[-1]) + del data[-1] + ycrd = np.load(data[-1]) + del data[-1] + xcrd = np.load(data[-1]) + del data[-1] + tcrd = np.load(data[-1]) + del data[-1] + if type == "ReacDiff": + # data = glob.glob('save/' + type + '/nu*key*npy') + data = glob.glob(savedir + "/nu*key*npy") + data.sort() + _beta = data[0].split("/")[-1].split("_")[3] + flnm = savedir + "/2D_DecayFlow_" + _beta + "_Train.hdf5" + with h5py.File(flnm, "w") as f: + f.create_dataet("tensor", data=np.load(savedir + "/2D.npy")[:, None, :, :]) + f.create_dataet("nu", data=np.load(savedir + "/nu.npy")) + f.create_dataet("x-coordinate", data=xcrd) + f.create_dataet("y-coordinate", data=ycrd) + f.attrs["beta"] = float(_beta[4:]) return 0 - mode = datas[1].split('/')[-1].split('_')[3] - _eta = datas[1].split('/')[-1].split('_')[4] - _zeta = datas[1].split('/')[-1].split('_')[5] - _M = datas[1].split('/')[-1].split('_')[6] + mode = data[1].split("/")[-1].split("_")[3] + _eta = data[1].split("/")[-1].split("_")[4] + _zeta = data[1].split("/")[-1].split("_")[5] + _M = data[1].split("/")[-1].split("_")[6] if dim == 1: - flnm = savedir+'/1D_CFD_' + mode + '_' + _eta + '_' + _zeta + '_' + bd + '_Train.hdf5' + flnm = ( + savedir + + "/1D_CFD_" + + mode + + "_" + + _eta + + "_" + + _zeta + + "_" + + bd + + "_Train.hdf5" + ) elif dim == 2: - flnm = savedir+'/2D_CFD_' + mode + '_' + _eta + '_' + _zeta + '_' + _M + '_' + bd + '_Train.hdf5' + flnm = ( + savedir + + "/2D_CFD_" + + mode + + "_" + + _eta + + "_" + + _zeta + + "_" + + _M + + "_" + + bd + + "_Train.hdf5" + ) elif dim == 3: - flnm = savedir+'/3D_CFD_' + mode + '_' + _eta + '_' + _zeta + '_' + _M + '_' + bd + '_Train.hdf5' + flnm = ( + savedir + + "/3D_CFD_" + + mode + + "_" + + _eta + + "_" + + _zeta + + "_" + + _M + + "_" + + bd + + "_Train.hdf5" + ) print(flnm) - del(DataND) + del DataND - with h5py.File(flnm, 'w') as f: - f.create_dataset('density', data=np.load(savedir+'/D.npy')) - f.create_dataset('pressure', data=np.load(savedir+'/P.npy')) - f.create_dataset('Vx', data=np.load(savedir+'/Vx.npy')) + with h5py.File(flnm, "w") as f: + f.create_dataet("density", data=np.load(savedir + "/D.npy")) + f.create_dataet("pressure", data=np.load(savedir + "/P.npy")) + f.create_dataet("Vx", data=np.load(savedir + "/Vx.npy")) if dim > 1: - f.create_dataset('Vy', data=np.load(savedir+'/Vy.npy')) - f.create_dataset('y-coordinate', data=ycrd) + f.create_dataet("Vy", data=np.load(savedir + "/Vy.npy")) + f.create_dataet("y-coordinate", data=ycrd) if dim == 3: - f.create_dataset('Vz', data=np.load(savedir+'/Vz.npy')) - f.create_dataset('z-coordinate', data=zcrd) - f.create_dataset('x-coordinate', data = xcrd) - f.create_dataset('t-coordinate', data = tcrd) + f.create_dataet("Vz", data=np.load(savedir + "/Vz.npy")) + f.create_dataet("z-coordinate", data=zcrd) + f.create_dataet("x-coordinate", data=xcrd) + f.create_dataet("t-coordinate", data=tcrd) eta = float(_eta[3:]) zeta = float(_zeta[4:]) - print('(eta, zeta) = ', eta, zeta) - f.attrs['eta'] = eta - f.attrs['zeta'] = zeta + print("(eta, zeta) = ", eta, zeta) + f.attrs["eta"] = eta + f.attrs["zeta"] = zeta if dim > 1: M = float(_M[1:]) - f.attrs['M'] = M - print('M: ', M) + f.attrs["M"] = M + print("M: ", M) + def transform(type, savedir): - datas = glob.glob(savedir+'/*npy') - datas.sort() - xcrd = np.load(datas[-1]) - del (datas[-1]) - tcrd = np.load(datas[-1]) - del (datas[-1]) - - flnm = datas[0] - with h5py.File(flnm[:-3]+'hdf5', 'w') as f: + data = glob.glob(savedir + "/*npy") + data.sort() + xcrd = np.load(data[-1]) + del data[-1] + tcrd = np.load(data[-1]) + del data[-1] + + flnm = data[0] + with h5py.File(flnm[:-3] + "hdf5", "w") as f: print(flnm) _data = np.load(flnm) - f.create_dataset('tensor', data = _data.astype(np.float32)) - f.create_dataset('x-coordinate', data = xcrd) - f.create_dataset('t-coordinate', data = tcrd) - if type=='advection': - beta = float(flnm.split('/')[-1].split('_')[3][4:-4]) # advection train + + f.create_dataset("tensor", data=_data.astype(np.float32)) + f.create_dataset("x-coordinate", data=xcrd) + f.create_dataset("t-coordinate", data=tcrd) + if type == "advection": + beta = float(flnm.split("/")[-1].split("_")[3][4:-4]) # advection train print(f"beta: {beta}") - f.attrs['beta'] = beta + f.attrs["beta"] = beta - elif type=='burgers': - Nu = float(flnm.split('/')[-1].split('_')[-1][2:-4]) # Burgers test/train + elif type == "burgers": + Nu = float(flnm.split("/")[-1].split("_")[-1][2:-4]) # Burgers test/train print(f"Nu: {Nu}") - f.attrs['Nu'] = Nu + f.attrs["Nu"] = Nu - elif type=='ReacDiff': - Rho = float(flnm.split('/')[-1].split('_')[-1][3:-4]) # reac-diff test - Nu = float(flnm.split('/')[-1].split('_')[-2][2:]) # reac-diff test + elif type == "ReacDiff": + Rho = float(flnm.split("/")[-1].split("_")[-1][3:-4]) # reac-diff test + Nu = float(flnm.split("/")[-1].split("_")[-2][2:]) # reac-diff test print(f"Nu, rho: {Nu, Rho}") - f.attrs['Nu'] = Nu - f.attrs['rho'] = Rho + f.attrs["Nu"] = Nu + f.attrs["rho"] = Rho + # Init arguments with Hydra @hydra.main(config_path="config", config_name="config") def main(cfg: DictConfig) -> None: - pde1ds = ['advection', 'burgers', 'ReacDiff'] - if cfg.args.type in pde1ds and cfg.args.dim==1: + pde1ds = ["advection", "burgers", "ReacDiff"] + if cfg.args.type in pde1ds and cfg.args.dim == 1: transform(type=cfg.args.type, savedir=cfg.args.savedir) else: - bds = ['periodic', 'trans'] - assert cfg.args.bd in bds, 'bd should be either periodic or trans' - merge(type=cfg.args.type, dim=cfg.args.dim, bd=cfg.args.bd, nbatch=cfg.args.nbatch, savedir=cfg.args.savedir) - -if __name__=='__main__': + bds = ["periodic", "trans"] + assert cfg.args.bd in bds, "bd should be either periodic or trans" + merge( + type=cfg.args.type, + dim=cfg.args.dim, + bd=cfg.args.bd, + nbatch=cfg.args.nbatch, + savedir=cfg.args.savedir, + ) + + +if __name__ == "__main__": main() diff --git a/pdebench/data_gen/data_gen_NLE/README.md b/pdebench/data_gen/data_gen_NLE/README.md index eabacd8..695c5cb 100644 --- a/pdebench/data_gen/data_gen_NLE/README.md +++ b/pdebench/data_gen/data_gen_NLE/README.md @@ -8,12 +8,13 @@ bash data_gen/data_gen_NLE/ReactionDiffusionEq/run_DarcyFlow2D.sh ``` -which will in turn run the python script `data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_2D_multi_soluion_Hydra.py` +which will in turn run the python script +`data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_2D_multi_soluion_Hydra.py` - Update `data_gen/data_gen_NLE/config/config.yaml` to: ```yaml -type: 'ReacDiff' # 'advection'/'ReacDiff'/'burgers'/'CFD' +type: "ReacDiff" # 'advection'/'ReacDiff'/'burgers'/'CFD' dim: 2 ``` @@ -23,7 +24,7 @@ dim: 2 python data_gen/data_gen_NLE/Data_Merge.py ``` ----------------------------- +--- #### Data generation for 1D Advection Equation: @@ -41,9 +42,9 @@ bash run_trainset.sh - Update `data_gen/data_gen_NLE/config/config.yaml` to: ```yaml -type: 'advection' # 'advection'/'ReacDiff'/'burgers'/'CFD' +type: "advection" # 'advection'/'ReacDiff'/'burgers'/'CFD' dim: 1 -savedir: './save/advection' +savedir: "./save/advection" ``` ``` @@ -52,7 +53,7 @@ cd .. python Data_Merge.py ``` --------------- +--- #### Data generation for 1D Burgers' Equation: @@ -70,19 +71,18 @@ bash run_trainset.sh - Update `data_gen/data_gen_NLE/config/config.yaml` to: ```yaml -type: 'burgers' # 'advection'/'ReacDiff'/'burgers'/'CFD' +type: "burgers" # 'advection'/'ReacDiff'/'burgers'/'CFD' dim: 1 -savedir: './save/burgers' +savedir: "./save/burgers" ``` - ``` # serialize to hdf5 by transforming npy file cd .. python Data_Merge.py ``` ---------------- +--- #### Data generation for 1D Reaction Diffusion Equation: @@ -100,9 +100,9 @@ bash run_trainset.sh - Update `data_gen/data_gen_NLE/config/config.yaml` to: ```yaml -type: 'ReacDiff' # 'advection'/'ReacDiff'/'burgers'/'CFD' +type: "ReacDiff" # 'advection'/'ReacDiff'/'burgers'/'CFD' dim: 1 -savedir: './save/ReacDiff' +savedir: "./save/ReacDiff" ``` ``` diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu1e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu1e0.yaml index 30f3641..82892c5 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu1e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu1e0.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 1.e0 +nu: 1.e0 rho: 1.e0 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu2e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu2e0.yaml index f551aa0..b3a41a4 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu2e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu2e0.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 2.e0 +nu: 2.e0 rho: 1.e0 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu5e-1.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu5e-1.yaml index cdd76db..3c4721f 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu5e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu5e-1.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 5.e-1 +nu: 5.e-1 rho: 1.e0 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu5e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu5e0.yaml index bef66db..986fcf1 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu5e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e0_Nu5e0.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 5.e0 +nu: 5.e0 rho: 1.e0 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu1e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu1e0.yaml index 1a996f7..21aef17 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu1e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu1e0.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 1.e0 +nu: 1.e0 rho: 1.e1 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu2e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu2e0.yaml index 42228e0..2df6cd9 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu2e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu2e0.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 2.e0 +nu: 2.e0 rho: 1.e1 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu5e-1.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu5e-1.yaml index d8f4513..114a987 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu5e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu5e-1.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 5.e-1 +nu: 5.e-1 rho: 1.e1 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu5e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu5e0.yaml index fe56c62..a2c6bea 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu5e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho1e1_Nu5e0.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 5.e0 +nu: 5.e0 rho: 1.e1 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu1e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu1e0.yaml index 9804ca1..087cd63 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu1e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu1e0.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 1.e0 +nu: 1.e0 rho: 2.e0 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu2e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu2e0.yaml index ce67bba..4121f05 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu2e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu2e0.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 2.e0 +nu: 2.e0 rho: 2.e0 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu5e-1.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu5e-1.yaml index 04abd46..c90ac52 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu5e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu5e-1.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 5.e-1 +nu: 5.e-1 rho: 2.e0 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu5e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu5e0.yaml index 755beaf..5861512 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu5e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho2e0_Nu5e0.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 5.e0 +nu: 5.e0 rho: 2.e0 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu1e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu1e0.yaml index 33d3e25..eea3bbe 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu1e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu1e0.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 1.e0 +nu: 1.e0 rho: 5.e0 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu2e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu2e0.yaml index d9b17ad..97781ac 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu2e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu2e0.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 2.e0 +nu: 2.e0 rho: 5.e0 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu5e-1.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu5e-1.yaml index f1a7705..f584839 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu5e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu5e-1.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 5.e-1 +nu: 5.e-1 rho: 5.e0 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu5e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu5e0.yaml index 6039f11..d2ce48e 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu5e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/Rho5e0_Nu5e0.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 5.e0 +nu: 5.e0 rho: 5.e0 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/config.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/config.yaml index 30f3641..82892c5 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/config.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/args/config.yaml @@ -1,13 +1,13 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. nx: 1024 xL: 0. xR: 6.28318530718 -nu : 1.e0 +nu: 1.e0 rho: 1.e0 CFL: 4.e-1 if_show: 1 show_steps: 100 -init_mode: 'react' +init_mode: "react" diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu1e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu1e0.yaml index 7eb228f..180ed0a 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu1e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu1e0.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu1e1.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu1e1.yaml index f661d34..f313da7 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu1e1.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu1e1.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu2e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu2e0.yaml index 12479a0..3aaa868 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu2e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu2e0.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu5e-1.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu5e-1.yaml index e7c4bcd..63445a0 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu5e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu5e-1.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu5e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu5e0.yaml index cad5d73..d483cf8 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu5e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e0_Nu5e0.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu1e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu1e0.yaml index c9de644..f12dea6 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu1e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu1e0.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu1e1.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu1e1.yaml index cccd76a..da255bc 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu1e1.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu1e1.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu2e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu2e0.yaml index cff657e..3b12386 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu2e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu2e0.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu5e-1.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu5e-1.yaml index 512fcd1..17d81ce 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu5e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu5e-1.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu5e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu5e0.yaml index 7965ac3..1400217 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu5e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho1e1_Nu5e0.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu1e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu1e0.yaml index e98c943..35594b5 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu1e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu1e0.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu1e1.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu1e1.yaml index 3b71699..b12a5af 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu1e1.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu1e1.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu2e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu2e0.yaml index 2e2fc56..f3d9c47 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu2e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu2e0.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu5e-1.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu5e-1.yaml index e201d68..86db6f7 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu5e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu5e-1.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu5e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu5e0.yaml index 17996c7..b8ed164 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu5e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho2e0_Nu5e0.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu1e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu1e0.yaml index 7cddd71..ff4cc51 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu1e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu1e0.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu1e1.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu1e1.yaml index e2025e3..472c8fb 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu1e1.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu1e1.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 100 if_second_order: 1. show_steps: 10000 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu2e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu2e0.yaml index 8580063..999ef74 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu2e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu2e0.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu5e-1.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu5e-1.yaml index aae3611..5092fab 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu5e-1.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu5e-1.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 10000 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu5e0.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu5e0.yaml index 47f91c8..261a646 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu5e0.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/Rho5e0_Nu5e0.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. @@ -13,4 +13,4 @@ numbers: 10000 if_second_order: 1. show_steps: 100 init_key: 2022 -if_rand_param: False \ No newline at end of file +if_rand_param: False diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/config.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/config.yaml index 60e27b4..c26af21 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/config.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/config.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff/' +save: "../save/ReacDiff/" dt_save: 0.01 ini_time: 0. fin_time: 1. diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/config_2D.yaml b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/config_2D.yaml index 00169c2..f37cfd1 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/config_2D.yaml +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/config/multi/config_2D.yaml @@ -1,4 +1,4 @@ -save: '../save/ReacDiff//' +save: "../save/ReacDiff//" dt_save: 0.25 ini_time: 0. fin_time: 2. @@ -14,4 +14,4 @@ if_show: 1 numbers: 200 if_second_order: 1. show_steps: 100 -init_key: 2022 \ No newline at end of file +init_key: 2022 diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_2D_multi_solution_Hydra.py b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_2D_multi_solution_Hydra.py index b3b6fca..203dab9 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_2D_multi_solution_Hydra.py +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_2D_multi_solution_Hydra.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ @@ -145,35 +144,35 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ +from __future__ import annotations import sys -import random -from math import ceil, exp, log +from math import ceil -# Hydra -from omegaconf import DictConfig, OmegaConf import hydra - import jax -from jax import vmap import jax.numpy as jnp -from jax import device_put, lax +from jax import device_put, lax, vmap + +# Hydra +from omegaconf import DictConfig -sys.path.append('..') +sys.path.append("..") from utils import Courant_diff_2D, bc_2D, init_multi_2DRand def _pass(carry): return carry + # Init arguments with Hydra @hydra.main(config_path="config") def main(cfg: DictConfig) -> None: # basic parameters dx = (cfg.multi.xR - cfg.multi.xL) / cfg.multi.nx - dx_inv = 1. / dx - dy = (cfg.multi.yR - cfg.multi.yL)/cfg.multi.ny - dy_inv = 1./dy + dx_inv = 1.0 / dx + dy = (cfg.multi.yR - cfg.multi.yL) / cfg.multi.ny + dy_inv = 1.0 / dy # cell edge coordinate xe = jnp.linspace(cfg.multi.xL, cfg.multi.xR, cfg.multi.nx + 1) @@ -199,7 +198,7 @@ def evolve(u, nu): tsave = t steps = 0 i_save = 0 - dt = 0. + dt = 0.0 uu = jnp.zeros([it_tot, u.shape[0], u.shape[1]]) uu = uu.at[0].set(u) @@ -219,12 +218,16 @@ def _show(_carry): u, tsave, i_save, uu = lax.cond(t >= tsave, _show, _pass, carry) carry = (u, t, dt, steps, tsave, nu) - u, t, dt, steps, tsave, nu = lax.fori_loop(0, show_steps, simulation_fn, carry) + u, t, dt, steps, tsave, nu = lax.fori_loop( + 0, show_steps, simulation_fn, carry + ) return (t, tsave, steps, i_save, dt, u, uu, nu) carry = t, tsave, steps, i_save, dt, u, uu, nu - t, tsave, steps, i_save, dt, u, uu, nu = lax.while_loop(cond_fun, _body_fun, carry) + t, tsave, steps, i_save, dt, u, uu, nu = lax.while_loop( + cond_fun, _body_fun, carry + ) uu = uu.at[-1].set(u) return uu @@ -244,64 +247,108 @@ def _update(carry): return u, dt, nu carry = u, dt, nu - u, dt, nu = lax.cond(dt > 1.e-8, _update, _pass, carry) + u, dt, nu = lax.cond(dt > 1.0e-8, _update, _pass, carry) t += dt steps += 1 return u, t, dt, steps, tsave, nu - @jax.jit def update(u, u_tmp, dt, nu): # boundary condition - _u = bc_2D(u_tmp, mode='Neumann') + _u = bc_2D(u_tmp, mode="Neumann") # diffusion dtdx = dt * dx_inv dtdy = dt * dy_inv - fx = - 0.5 * (nu[2:-1, 2:-2] + nu[1:-2, 2:-2]) * dx_inv * (_u[2:-1, 2:-2] - _u[1:-2, 2:-2]) - fy = - 0.5 * (nu[2:-2, 2:-1] + nu[2:-2, 1:-2]) * dy_inv * (_u[2:-2, 2:-1] - _u[2:-2, 1:-2]) - u -= dtdx * (fx[1:, :] - fx[:-1, :])\ - + dtdy * (fy[:, 1:] - fy[:, :-1]) + fx = ( + -0.5 + * (nu[2:-1, 2:-2] + nu[1:-2, 2:-2]) + * dx_inv + * (_u[2:-1, 2:-2] - _u[1:-2, 2:-2]) + ) + fy = ( + -0.5 + * (nu[2:-2, 2:-1] + nu[2:-2, 1:-2]) + * dy_inv + * (_u[2:-2, 2:-1] - _u[2:-2, 1:-2]) + ) + u -= dtdx * (fx[1:, :] - fx[:-1, :]) + dtdy * (fy[:, 1:] - fy[:, :-1]) # source term: f = 1 * beta u += dt * beta return u - u = init_multi_2DRand(xc, yc, numbers=cfg.multi.numbers, k_tot=4, init_key=cfg.multi.init_key) + u = init_multi_2DRand( + xc, yc, numbers=cfg.multi.numbers, k_tot=4, init_key=cfg.multi.init_key + ) u = device_put(u) # putting variables in GPU (not necessary??) # generate random diffusion coefficient key = jax.random.PRNGKey(cfg.multi.init_key) - xms = jax.random.uniform(key, shape=[cfg.multi.numbers, 5], minval=cfg.multi.xL, maxval=cfg.multi.xR) + xms = jax.random.uniform( + key, shape=[cfg.multi.numbers, 5], minval=cfg.multi.xL, maxval=cfg.multi.xR + ) key, subkey = jax.random.split(key) - yms = jax.random.uniform(key, shape=[cfg.multi.numbers, 5], minval=cfg.multi.yL, maxval=cfg.multi.yR) + yms = jax.random.uniform( + key, shape=[cfg.multi.numbers, 5], minval=cfg.multi.yL, maxval=cfg.multi.yR + ) key, subkey = jax.random.split(key) - stds = 0.5*(cfg.multi.xR - cfg.multi.xL) * jax.random.uniform(key, shape=[cfg.multi.numbers, 5]) + stds = ( + 0.5 + * (cfg.multi.xR - cfg.multi.xL) + * jax.random.uniform(key, shape=[cfg.multi.numbers, 5]) + ) nu = jnp.zeros_like(u) for i in range(5): - nu += jnp.exp(-((xc[None, :, None] - xms[:, None, None, i]) ** 2 - + (yc[None, None, :] - yms[:, None, None, i]) ** 2) / stds[:, None, None, i]) + nu += jnp.exp( + -( + (xc[None, :, None] - xms[:, None, None, i]) ** 2 + + (yc[None, None, :] - yms[:, None, None, i]) ** 2 + ) + / stds[:, None, None, i] + ) nu = jnp.where(nu > nu.mean(), 1, 0.1) - nu = vmap(bc_2D, axis_name='i')(nu) + nu = vmap(bc_2D, axis_name="i")(nu) local_devices = jax.local_device_count() if local_devices > 1: nb, nx, ny = u.shape - vm_evolve = jax.pmap(jax.vmap(evolve, axis_name='j'), axis_name='i') - uu = vm_evolve(u.reshape([local_devices, cfg.multi.numbers//local_devices, nx, ny]),\ - nu.reshape([local_devices, cfg.multi.numbers//local_devices, nx+4, ny+4])) + vm_evolve = jax.pmap(jax.vmap(evolve, axis_name="j"), axis_name="i") + uu = vm_evolve( + u.reshape([local_devices, cfg.multi.numbers // local_devices, nx, ny]), + nu.reshape( + [local_devices, cfg.multi.numbers // local_devices, nx + 4, ny + 4] + ), + ) uu = uu.reshape([nb, -1, nx, ny]) else: vm_evolve = vmap(evolve, 0, 0) uu = vm_evolve(u, nu) - print('data saving...') - cwd = hydra.utils.get_original_cwd() + '/' - jnp.save(cwd + cfg.multi.save+'/2D_ReacDiff_Multi_beta'+str(beta)[:5]+'_key'+str(cfg.multi.init_key), uu) - jnp.save(cwd + cfg.multi.save+'/x_coordinate', xc) - jnp.save(cwd + cfg.multi.save+'/y_coordinate', yc) - jnp.save(cwd + cfg.multi.save+'/t_coordinate', tc) - jnp.save(cwd + cfg.multi.save+'/nu_diff_coef_beta'+str(beta)[:5]+'_key'+str(cfg.multi.init_key), nu[:,2:-2,2:-2]) - -if __name__=='__main__': + print("data saving...") + cwd = hydra.utils.get_original_cwd() + "/" + jnp.save( + cwd + + cfg.multi.save + + "/2D_ReacDiff_Multi_beta" + + str(beta)[:5] + + "_key" + + str(cfg.multi.init_key), + uu, + ) + jnp.save(cwd + cfg.multi.save + "/x_coordinate", xc) + jnp.save(cwd + cfg.multi.save + "/y_coordinate", yc) + jnp.save(cwd + cfg.multi.save + "/t_coordinate", tc) + jnp.save( + cwd + + cfg.multi.save + + "/nu_diff_coef_beta" + + str(beta)[:5] + + "_key" + + str(cfg.multi.init_key), + nu[:, 2:-2, 2:-2], + ) + + +if __name__ == "__main__": main() diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_Hydra.py b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_Hydra.py index 638ffcc..2e6156c 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_Hydra.py +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_Hydra.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ @@ -145,36 +144,37 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ +from __future__ import annotations -import time import sys +import time from math import ceil -# Hydra -from omegaconf import DictConfig, OmegaConf import hydra - import jax import jax.numpy as jnp from jax import device_put, lax -sys.path.append('..') -from utils import init, Courant_diff, save_data, bc +# Hydra +from omegaconf import DictConfig + +sys.path.append("..") +from utils import Courant_diff, bc, init # Init arguments with Hydra @hydra.main(config_path="config") def main(cfg: DictConfig) -> None: - print('nu: {0:.3f}, rho: {1:.3f}'.format(cfg.args.nu, cfg.args.rho)) + print(f"nu: {cfg.args.nu:.3f}, rho: {cfg.args.rho:.3f}") # basic parameters - dx = (cfg.args.xR - cfg.args.xL)/cfg.args.nx - dx_inv = 1./dx + dx = (cfg.args.xR - cfg.args.xL) / cfg.args.nx + dx_inv = 1.0 / dx # cell edge coordinate xe = jnp.linspace(cfg.args.xL, cfg.args.xR, cfg.args.nx + 1) # cell center coordinate - xc = xe[:-1] + 0.5*dx + xc = xe[:-1] + 0.5 * dx # t-coordinate it_tot = ceil((cfg.args.fin_time - cfg.args.ini_time) / cfg.args.dt_save) + 1 tc = jnp.arange(it_tot + 1) * cfg.args.dt_save @@ -185,7 +185,7 @@ def evolve(u): steps = 0 i_save = 0 tm_ini = time.time() - dt = 0. + dt = 0.0 uu = jnp.zeros([it_tot, u.shape[0]]) uu = uu.at[0].set(u) @@ -196,14 +196,16 @@ def evolve(u): tsave += cfg.args.dt_save i_save += 1 - if steps%cfg.args.show_steps==0 and cfg.args.if_show: - print('now {0:d}-steps, t = {1:.3f}, dt = {2:.3f}'.format(steps, t, dt)) + if steps % cfg.args.show_steps == 0 and cfg.args.if_show: + print(f"now {steps:d}-steps, t = {t:.3f}, dt = {dt:.3f}") carry = (u, t, dt, steps, tsave) - u, t, dt, steps, tsave = lax.fori_loop(0, cfg.args.show_steps, simulation_fn, carry) + u, t, dt, steps, tsave = lax.fori_loop( + 0, cfg.args.show_steps, simulation_fn, carry + ) tm_fin = time.time() - print('total elapsed time is {} sec'.format(tm_fin - tm_ini)) + print(f"total elapsed time is {tm_fin - tm_ini} sec") return uu, t @jax.jit @@ -219,11 +221,12 @@ def _update(carry): # update using flux at t+dt/2-th time step u = update(u, u_tmp, dt) return u, dt + def _pass(carry): return carry carry = u, dt - u, dt = lax.cond(t > 1.e-8, _update, _pass, carry) + u, dt = lax.cond(t > 1.0e-8, _update, _pass, carry) t += dt steps += 1 @@ -235,32 +238,45 @@ def update(u, u_tmp, dt): u = Piecewise_Exact_Solution(u, dt) # diffusion f = flux(u_tmp) - u -= dt * dx_inv * (f[1:cfg.args.nx + 1] - f[0:cfg.args.nx]) + u -= dt * dx_inv * (f[1 : cfg.args.nx + 1] - f[0 : cfg.args.nx]) return u @jax.jit def flux(u): - _u = bc(u, dx, Ncell=cfg.args.nx) # index 2 for _U is equivalent with index 0 for u + _u = bc( + u, dx, Ncell=cfg.args.nx + ) # index 2 for _U is equivalent with index 0 for u # source term - f = - cfg.args.nu*(_u[2:cfg.args.nx+3] - _u[1:cfg.args.nx+2])*dx_inv + f = -cfg.args.nu * (_u[2 : cfg.args.nx + 3] - _u[1 : cfg.args.nx + 2]) * dx_inv return f @jax.jit def Piecewise_Exact_Solution(u, dt): # Piecewise_Exact_Solution method # stiff equation - u = 1./(1. + jnp.exp(- cfg.args.rho*dt)*(1. - u)/u) + u = 1.0 / (1.0 + jnp.exp(-cfg.args.rho * dt) * (1.0 - u) / u) return u u = init(xc=xc, mode=cfg.args.init_mode) u = device_put(u) # putting variables in GPU (not necessary??) uu, t = evolve(u) - print('final time is: {0:.3f}'.format(t)) - - print('data saving...') - cwd = hydra.utils.get_original_cwd() + '/' - jnp.save(cwd + cfg.args.save + '/ReacDiff_' + cfg.args.init_mode + '_Nu' + str(cfg.args.nu) + '_Rho' + str(cfg.args.rho), uu) - jnp.save(cwd + cfg.args.save + '/x_coordinate', xc) - jnp.save(cwd + cfg.args.save + '/t_coordinate', tc) - -if __name__=='__main__': + print(f"final time is: {t:.3f}") + + print("data saving...") + cwd = hydra.utils.get_original_cwd() + "/" + jnp.save( + cwd + + cfg.args.save + + "/ReacDiff_" + + cfg.args.init_mode + + "_Nu" + + str(cfg.args.nu) + + "_Rho" + + str(cfg.args.rho), + uu, + ) + jnp.save(cwd + cfg.args.save + "/x_coordinate", xc) + jnp.save(cwd + cfg.args.save + "/t_coordinate", tc) + + +if __name__ == "__main__": main() diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_multi_solution_Hydra.py b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_multi_solution_Hydra.py index 1dcec4e..8b10ae2 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_multi_solution_Hydra.py +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/reaction_diffusion_multi_solution_Hydra.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ @@ -145,34 +144,35 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ +from __future__ import annotations -import sys import random +import sys from math import ceil, exp, log from pathlib import Path -# Hydra -from omegaconf import DictConfig, OmegaConf import hydra - import jax -from jax import vmap import jax.numpy as jnp from jax import device_put, lax -sys.path.append('..') -from utils import init_multi, Courant, Courant_diff, save_data, bc, limiting +# Hydra +from omegaconf import DictConfig + +sys.path.append("..") +from utils import Courant_diff, bc, init_multi def _pass(carry): return carry + # Init arguments with Hydra @hydra.main(config_path="config") def main(cfg: DictConfig) -> None: # basic parameters dx = (cfg.multi.xR - cfg.multi.xL) / cfg.multi.nx - dx_inv = 1. / dx + dx_inv = 1.0 / dx # cell edge coordinate xe = jnp.linspace(cfg.multi.xL, cfg.multi.xR, cfg.multi.nx + 1) @@ -185,12 +185,16 @@ def main(cfg: DictConfig) -> None: dt_save = cfg.multi.dt_save CFL = cfg.multi.CFL if cfg.multi.if_rand_param: - rho = exp(random.uniform(log(0.001), log(10))) # uniform number between 0.01 to 100 - nu = exp(random.uniform(log(0.001), log(10))) # uniform number between 0.01 to 100 + rho = exp( + random.uniform(log(0.001), log(10)) + ) # uniform number between 0.01 to 100 + nu = exp( + random.uniform(log(0.001), log(10)) + ) # uniform number between 0.01 to 100 else: rho = cfg.multi.rho nu = cfg.multi.nu - print('rho: {0:>5f}, nu: {1:>5f}'.format(rho, nu)) + print(f"rho: {rho:>5f}, nu: {nu:>5f}") # t-coordinate it_tot = ceil((fin_time - ini_time) / dt_save) + 1 @@ -202,7 +206,7 @@ def evolve(u): tsave = t steps = 0 i_save = 0 - dt = 0. + dt = 0.0 uu = jnp.zeros([it_tot, u.shape[0]]) uu = uu.at[0].set(u) @@ -247,53 +251,71 @@ def _update(carry): return u, dt carry = u, dt - u, dt = lax.cond(dt > 1.e-8, _update, _pass, carry) + u, dt = lax.cond(dt > 1.0e-8, _update, _pass, carry) t += dt steps += 1 return u, t, dt, steps, tsave - @jax.jit def update(u, u_tmp, dt): # stiff part u = Piecewise_Exact_Solution(u, dt) # diffusion f = flux(u_tmp) - u -= dt * dx_inv * (f[1:cfg.multi.nx + 1] - f[0:cfg.multi.nx]) + u -= dt * dx_inv * (f[1 : cfg.multi.nx + 1] - f[0 : cfg.multi.nx]) return u @jax.jit def flux(u): - _u = bc(u, dx, Ncell=cfg.multi.nx) # index 2 for _U is equivalent with index 0 for u + _u = bc( + u, dx, Ncell=cfg.multi.nx + ) # index 2 for _U is equivalent with index 0 for u # 2nd-order diffusion flux - f = - nu*(_u[2:cfg.multi.nx+3] - _u[1:cfg.multi.nx+2])*dx_inv + f = -nu * (_u[2 : cfg.multi.nx + 3] - _u[1 : cfg.multi.nx + 2]) * dx_inv return f @jax.jit def Piecewise_Exact_Solution(u, dt): # Piecewise_Exact_Solution method # stiff equation - u = 1./(1. + jnp.exp(- rho*dt)*(1. - u)/u) + u = 1.0 / (1.0 + jnp.exp(-rho * dt) * (1.0 - u) / u) return u - u = init_multi(xc, numbers=cfg.multi.numbers, k_tot=4, init_key=cfg.multi.init_key, if_norm=True) + u = init_multi( + xc, + numbers=cfg.multi.numbers, + k_tot=4, + init_key=cfg.multi.init_key, + if_norm=True, + ) u = device_put(u) # putting variables in GPU (not necessary??) - #vm_evolve = vmap(evolve, 0, 0) - #uu = vm_evolve(u) - vm_evolve = jax.pmap(jax.vmap(evolve, axis_name='j'), axis_name='i') + # vm_evolve = vmap(evolve, 0, 0) + # uu = vm_evolve(u) + vm_evolve = jax.pmap(jax.vmap(evolve, axis_name="j"), axis_name="i") local_devices = jax.local_device_count() - uu = vm_evolve(u.reshape([local_devices, cfg.multi.numbers//local_devices, -1])) + uu = vm_evolve(u.reshape([local_devices, cfg.multi.numbers // local_devices, -1])) + + print("data saving...") + cwd = hydra.utils.get_original_cwd() + "/" + jnp.save( + cwd + cfg.multi.save + "/ReacDiff_Nu" + str(nu)[:5] + "_Rho" + str(rho)[:5], uu + ) + jnp.save(cwd + cfg.multi.save + "/x_coordinate", xc) + jnp.save(cwd + cfg.multi.save + "/t_coordinate", tc) # reshape based on device count uu = uu.reshape((-1, *uu.shape[2:])) - print('data saving...') - cwd = hydra.utils.get_original_cwd() + '/' + print("data saving...") + cwd = hydra.utils.get_original_cwd() + "/" Path(cwd + cfg.multi.save).mkdir(parents=True, exist_ok=True) - jnp.save(cwd + cfg.multi.save+'ReacDiff_Nu'+str(nu)[:5]+'_Rho'+str(rho)[:5], uu) - jnp.save(cwd + cfg.multi.save+'/x_coordinate', xc) - jnp.save(cwd + cfg.multi.save+'/t_coordinate', tc) + jnp.save( + cwd + cfg.multi.save + "ReacDiff_Nu" + str(nu)[:5] + "_Rho" + str(rho)[:5], uu + ) + jnp.save(cwd + cfg.multi.save + "/x_coordinate", xc) + jnp.save(cwd + cfg.multi.save + "/t_coordinate", tc) + -if __name__=='__main__': +if __name__ == "__main__": main() diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/run_DarcyFlow2D.sh b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/run_DarcyFlow2D.sh index 1ee0716..c948681 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/run_DarcyFlow2D.sh +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/run_DarcyFlow2D.sh @@ -1,10 +1,11 @@ +#! /bin/bash nn=1 key=2020 -while [ $nn -le 50 ]; do +while [ "$nn" -le 50 ]; do CUDA_VISIBLE_DEVICES='0,1' python3 reaction_diffusion_2D_multi_solution_Hydra.py +multi=config_2D.yaml ++multi.init_k\ -ey=$key - nn=$(expr $nn + 1) - key=$(expr $key + 1) +ey="$key" + nn=$(${nn} + 1) + key=$(${key} + 1) echo "$nn" echo "$key" -done \ No newline at end of file +done diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/run_testset.sh b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/run_testset.sh index 7f6bd11..513aadc 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/run_testset.sh +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/run_testset.sh @@ -1,3 +1,4 @@ +#! /bin/bash #CUDA_VISIBLE_DEVICES='3' python3 reaction_diffusion_Hydra.py +args=Rho1e0_Nu1e0.yaml #CUDA_VISIBLE_DEVICES='3' python3 reaction_diffusion_Hydra.py +args=Rho1e0_Nu2e0.yaml #CUDA_VISIBLE_DEVICES='3' python3 reaction_diffusion_Hydra.py +args=Rho1e0_Nu5e0.yaml diff --git a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/run_trainset.sh b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/run_trainset.sh index bfc78db..21b8a51 100644 --- a/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/run_trainset.sh +++ b/pdebench/data_gen/data_gen_NLE/ReactionDiffusionEq/run_trainset.sh @@ -1,3 +1,4 @@ +#! /bin/bash CUDA_VISIBLE_DEVICES='0,1' python3 reaction_diffusion_multi_solution_Hydra.py +multi=Rho1e0_Nu1e0.yaml CUDA_VISIBLE_DEVICES='0,1' python3 reaction_diffusion_multi_solution_Hydra.py +multi=Rho1e0_Nu2e0.yaml CUDA_VISIBLE_DEVICES='0,1' python3 reaction_diffusion_multi_solution_Hydra.py +multi=Rho1e0_Nu5e0.yaml diff --git a/pdebench/data_gen/data_gen_NLE/config/config.yaml b/pdebench/data_gen/data_gen_NLE/config/config.yaml index b379b19..bc22dcf 100644 --- a/pdebench/data_gen/data_gen_NLE/config/config.yaml +++ b/pdebench/data_gen/data_gen_NLE/config/config.yaml @@ -11,6 +11,7 @@ hydra: args: type: 'ReacDiff' # 'advection'/'ReacDiff'/'burgers'/'CFD' dim: 1 - bd: 'periodic' + bd: "periodic" nbatch: 1000 + savedir: "./save/CFD/" savedir: './save/ReacDiff/' diff --git a/pdebench/data_gen/data_gen_NLE/utils.py b/pdebench/data_gen/data_gen_NLE/utils.py index d8ff81f..5e9cf87 100644 --- a/pdebench/data_gen/data_gen_NLE/utils.py +++ b/pdebench/data_gen/data_gen_NLE/utils.py @@ -1,42 +1,46 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- +from __future__ import annotations + import math as mt +from functools import partial + import jax -import numpy as np import jax.numpy as jnp -from jax import random, jit, nn, lax, vmap, scipy -from functools import partial +import numpy as np +from jax import jit, lax, nn, random, scipy, vmap # if double precision -#from jax.config import config -#config.update("jax_enable_x64", True) +# from jax.config import config +# config.update("jax_enable_x64", True) -def init(xc, mode='sin', u0=1., du=0.1): +def init(xc, mode="sin", u0=1.0, du=0.1): """ :param xc: cell center coordinate :param mode: initial condition :return: 1D scalar function u at cell center """ - modes = ['sin', 'sinsin', 'Gaussian', 'react', 'possin'] - assert mode in modes, 'mode is not defined!!' - if mode == 'sin': # sinusoidal wave - u = u0 * jnp.sin((xc + 1.) * jnp.pi) - elif mode == 'sinsin': # sinusoidal wave - u = jnp.sin((xc + 1.) * jnp.pi) + du * jnp.sin((xc + 1.) * jnp.pi*8.) - elif mode == 'Gaussian': # for diffusion check + modes = ["sin", "sinsin", "Gaussian", "react", "possin"] + assert mode in modes, "mode is not defined!!" + if mode == "sin": # sinusoidal wave + u = u0 * jnp.sin((xc + 1.0) * jnp.pi) + elif mode == "sinsin": # sinusoidal wave + u = jnp.sin((xc + 1.0) * jnp.pi) + du * jnp.sin((xc + 1.0) * jnp.pi * 8.0) + elif mode == "Gaussian": # for diffusion check t0 = 0.01 - u = jnp.exp(-xc**2*jnp.pi/(4.*t0))/jnp.sqrt(2.*t0) - elif mode == 'react': # for reaction-diffusion eq. - logu = - 0.5*(xc - jnp.pi)**2/(0.25*jnp.pi)**2 + u = jnp.exp(-(xc**2) * jnp.pi / (4.0 * t0)) / jnp.sqrt(2.0 * t0) + elif mode == "react": # for reaction-diffusion eq. + logu = -0.5 * (xc - jnp.pi) ** 2 / (0.25 * jnp.pi) ** 2 u = jnp.exp(logu) - elif mode == 'possin': # sinusoidal wave - u = u0 * jnp.abs(jnp.sin((xc + 1.) * jnp.pi)) + elif mode == "possin": # sinusoidal wave + u = u0 * jnp.abs(jnp.sin((xc + 1.0) * jnp.pi)) return u @partial(jit, static_argnums=(1, 2, 3, 4)) -def init_multi(xc, numbers=10000, k_tot=8, init_key=2022, num_choise_k=2, if_norm=False): +def init_multi( + xc, numbers=10000, k_tot=8, init_key=2022, num_choise_k=2, if_norm=False +): """ :param xc: cell center coordinate :param mode: initial condition @@ -75,19 +79,21 @@ def _norm(carry): return u cond, u = carry - u = lax.cond(cond==True, _norm, _pass, u) + u = lax.cond(cond == True, _norm, _pass, u) return cond, u key = random.PRNGKey(init_key) - selected = random.randint(key, shape=[numbers, num_choise_k], minval=0, maxval=k_tot) + selected = random.randint( + key, shape=[numbers, num_choise_k], minval=0, maxval=k_tot + ) selected = nn.one_hot(selected, k_tot, dtype=int).sum(axis=1) - kk = jnp.pi * 2. * jnp.arange(1, k_tot + 1) * selected / (xc[-1] - xc[0]) + kk = jnp.pi * 2.0 * jnp.arange(1, k_tot + 1) * selected / (xc[-1] - xc[0]) amp = random.uniform(key, shape=[numbers, k_tot, 1]) key, subkey = random.split(key) - phs = 2. * jnp.pi * random.uniform(key, shape=[numbers, k_tot, 1]) + phs = 2.0 * jnp.pi * random.uniform(key, shape=[numbers, k_tot, 1]) _u = amp * jnp.sin(kk[:, :, jnp.newaxis] * xc[jnp.newaxis, jnp.newaxis, :] + phs) _u = jnp.sum(_u, axis=1) @@ -113,17 +119,20 @@ def _norm(carry): _u *= mask carry = if_norm, _u - _, _u = normalize(carry) # normalize value between [0, 1] for reaction-diffusion eq. + _, _u = normalize( + carry + ) # normalize value between [0, 1] for reaction-diffusion eq. return _u -def init_multi_2DRand(xc, yc, numbers=10000, init_key=2022, k_tot=4, duMx = 1.e1): + +def init_multi_2DRand(xc, yc, numbers=10000, init_key=2022, k_tot=4, duMx=1.0e1): """ :param xc: cell center coordinate :param mode: initial condition :return: 1D scalar function u at cell center """ - assert numbers % jax.device_count() == 0, 'numbers should be : GPUs x integer!!' + assert numbers % jax.device_count() == 0, "numbers should be : GPUs x integer!!" def _pass(carry): return carry @@ -151,12 +160,12 @@ def __create_2DRand_init(u0, delu): qLx = dx * nx qLy = dy * ny - ## random field + # random field u = jnp.zeros([nx, ny]) key = random.PRNGKey(init_key) - kx0 = jnp.pi * 2. / qLx - ky0 = jnp.pi * 2. / qLy + kx0 = jnp.pi * 2.0 / qLx + ky0 = jnp.pi * 2.0 / qLy for j in range(-k_tot, k_tot + 1): ky = ky0 * j # from 1 to k_tot @@ -166,9 +175,9 @@ def __create_2DRand_init(u0, delu): continue # random phase key, subkey = random.split(key) - phs = 2. * jnp.pi * random.uniform(key, shape=[1]) # (vi, k) + phs = 2.0 * jnp.pi * random.uniform(key, shape=[1]) # (vi, k) - uk = 1. / jnp.sqrt(jnp.sqrt(kx ** 2 + ky ** 2)) + uk = 1.0 / jnp.sqrt(jnp.sqrt(kx**2 + ky**2)) kdx = kx * xc[:, None] + ky * yc[None, :] u += uk * jnp.sin(kdx + phs) @@ -178,10 +187,10 @@ def __create_2DRand_init(u0, delu): return u key = random.PRNGKey(init_key) - u0 = random.uniform(key, shape=([numbers, 1]), minval=1.e-1, maxval=duMx) + u0 = random.uniform(key, shape=([numbers, 1]), minval=1.0e-1, maxval=duMx) key, subkey = random.split(key) - delu = random.uniform(key, shape=([numbers, 1]), minval=1.e-2, maxval=0.5) - u = jax.vmap(__create_2DRand_init, axis_name='i')(u0, delu) + delu = random.uniform(key, shape=([numbers, 1]), minval=1.0e-2, maxval=0.5) + u = jax.vmap(__create_2DRand_init, axis_name="i")(u0, delu) # perform window function key, subkey = random.split(key) @@ -199,123 +208,148 @@ def __create_2DRand_init(u0, delu): cond, mask, _xc, xL, xR, trns = vmap(select_W, 0, 0)(carry) u = u * mask - u = u + u0[:,:,None] * (1. - mask) + u = u + u0[:, :, None] * (1.0 - mask) return u -def init_HD(u, xc, yc, zc, mode='shocktube1', direc='x', init_key=2022, - M0=0.1, dk=1, gamma=.1666666667): + +def init_HD( + u, + xc, + yc, + zc, + mode="shocktube1", + direc="x", + init_key=2022, + M0=0.1, + dk=1, + gamma=0.1666666667, +): """ :param xc: cell center coordinate :param mode: initial condition :return: 1D scalar function u at cell center """ print(mode) - modes = ['shocktube0','shocktube1','shocktube2','shocktube3','shocktube4','shocktube5','shocktube6','shocktube7', - '2D-shock', 'OTVortex', 'KHI', 'turbulence', 'sound_wave', 'c_discon', 'BlastWave'] - assert mode in modes, 'mode is not defined!!' + modes = [ + "shocktube0", + "shocktube1", + "shocktube2", + "shocktube3", + "shocktube4", + "shocktube5", + "shocktube6", + "shocktube7", + "2D-shock", + "OTVortex", + "KHI", + "turbulence", + "sound_wave", + "c_discon", + "BlastWave", + ] + assert mode in modes, "mode is not defined!!" _, nx, ny, nz = u.shape - if mode[:-1] == 'shocktube': # shock tube - - if direc == 'x': + if mode[:-1] == "shocktube": # shock tube + if direc == "x": iX, iY, iZ = 1, 2, 3 Ncell = nx _u = jnp.zeros_like(u) - elif direc == 'y': + elif direc == "y": iX, iY, iZ = 2, 3, 1 Ncell = ny _u = jnp.transpose(u, (0, 2, 3, 1)) - if direc == 'z': + if direc == "z": iX, iY, iZ = 3, 1, 2 Ncell = nz _u = jnp.transpose(u, (0, 3, 1, 2)) - if mode[-1] == '0': # test 0 for viscosity - nx0 = int(0.5*Ncell) - uL = [1., 0.75, 0.2, -0.3, 1.] - uR = [0.125, 0., 0.1, 0.9, 0.1] - elif mode[-1] == '1': # test 1 - nx0 = int(0.3*Ncell) - uL = [1., 0.75, 0., 0., 1.] - uR = [0.125, 0., 0., 0., 0.1] - elif mode[-1] == '2': # test 2 - nx0 = int(0.5*Ncell) - uL = [1., -2., 0., 0., 0.4] - uR = [1., 2., 0., 0., 0.4] - elif mode[-1] == '3': # test 3 - nx0 = int(0.5*Ncell) - uL = [1., 0., 0., 0., 1.e3] - uR = [1., 0., 0., 0., 0.01] - elif mode[-1] == '4': # test 4 - nx0 = int(0.4*Ncell) - uL = [5.99924, 19.5975, 0., 0., 460.894] - uR = [5.99242, -6.19633, 0., 0., 46.095] - elif mode[-1] == '5': # test 5 + if mode[-1] == "0": # test 0 for viscosity + nx0 = int(0.5 * Ncell) + uL = [1.0, 0.75, 0.2, -0.3, 1.0] + uR = [0.125, 0.0, 0.1, 0.9, 0.1] + elif mode[-1] == "1": # test 1 + nx0 = int(0.3 * Ncell) + uL = [1.0, 0.75, 0.0, 0.0, 1.0] + uR = [0.125, 0.0, 0.0, 0.0, 0.1] + elif mode[-1] == "2": # test 2 + nx0 = int(0.5 * Ncell) + uL = [1.0, -2.0, 0.0, 0.0, 0.4] + uR = [1.0, 2.0, 0.0, 0.0, 0.4] + elif mode[-1] == "3": # test 3 + nx0 = int(0.5 * Ncell) + uL = [1.0, 0.0, 0.0, 0.0, 1.0e3] + uR = [1.0, 0.0, 0.0, 0.0, 0.01] + elif mode[-1] == "4": # test 4 + nx0 = int(0.4 * Ncell) + uL = [5.99924, 19.5975, 0.0, 0.0, 460.894] + uR = [5.99242, -6.19633, 0.0, 0.0, 46.095] + elif mode[-1] == "5": # test 5 nx0 = int(0.8 * Ncell) - uL = [1., -19.59745, 0., 0., 1.e3] - uR = [1., -19.59745, 0., 0., 0.01] - elif mode[-1] == '6': # test 6 + uL = [1.0, -19.59745, 0.0, 0.0, 1.0e3] + uR = [1.0, -19.59745, 0.0, 0.0, 0.01] + elif mode[-1] == "6": # test 6 nx0 = int(0.5 * Ncell) - uL = [1.4, 0., 0., 0., 1.] - uR = [1., 0., 0., 0., 1.] - elif mode[-1] == '7': # test 7 + uL = [1.4, 0.0, 0.0, 0.0, 1.0] + uR = [1.0, 0.0, 0.0, 0.0, 1.0] + elif mode[-1] == "7": # test 7 nx0 = int(0.5 * Ncell) - uL = [1.4, 0.1, 0., 0., 1.] - uR = [1., 0.1, 0., 0., 1.] + uL = [1.4, 0.1, 0.0, 0.0, 1.0] + uR = [1.0, 0.1, 0.0, 0.0, 1.0] # left - _u = _u.at[0, :nx0].set(uL[0]) - _u = _u.at[iX, :nx0].set(uL[1]) - _u = _u.at[iY, :nx0].set(uL[2]) - _u = _u.at[iZ, :nx0].set(uL[3]) - _u = _u.at[4, :nx0].set(uL[4]) + _u = _u.loc[0, :nx0].set(uL[0]) + _u = _u.loc[iX, :nx0].set(uL[1]) + _u = _u.loc[iY, :nx0].set(uL[2]) + _u = _u.loc[iZ, :nx0].set(uL[3]) + _u = _u.loc[4, :nx0].set(uL[4]) # right - _u = _u.at[0, nx0:].set(uR[0]) - _u = _u.at[iX, nx0:].set(uR[1]) - _u = _u.at[iY, nx0:].set(uR[2]) - _u = _u.at[iZ, nx0:].set(uR[3]) - _u = _u.at[4, nx0:].set(uR[4]) + _u = _u.loc[0, nx0:].set(uR[0]) + _u = _u.loc[iX, nx0:].set(uR[1]) + _u = _u.loc[iY, nx0:].set(uR[2]) + _u = _u.loc[iZ, nx0:].set(uR[3]) + _u = _u.loc[4, nx0:].set(uR[4]) - if direc == 'x': + if direc == "x": u = _u - elif direc == 'y': + elif direc == "y": u = jnp.transpose(_u, (0, 3, 1, 2)) - elif direc == 'z': + elif direc == "z": u = jnp.transpose(_u, (0, 2, 3, 1)) - elif mode == '2D-shock': # shock tube - u1 = [0.5, 0., 0., 0., 0.1] - u2 = [0.1, 0., 1., 0., 1.] - u3 = [0.1, 1., 0., 0., 1.] - u4 = [0.1, 0., 0., 0., 0.01] + elif mode == "2D-shock": # shock tube + u1 = [0.5, 0.0, 0.0, 0.0, 0.1] + u2 = [0.1, 0.0, 1.0, 0.0, 1.0] + u3 = [0.1, 1.0, 0.0, 0.0, 1.0] + u4 = [0.1, 0.0, 0.0, 0.0, 0.01] # left-bottom - u = u.at[0, :nx//2, :ny//2].set(u1[0]) - u = u.at[1, :nx//2, :ny//2].set(u1[1]) - u = u.at[2, :nx//2, :ny//2].set(u1[2]) - u = u.at[3, :nx//2, :ny//2].set(u1[3]) - u = u.at[4, :nx//2, :ny//2].set(u1[4]) + u = u.loc[0, : nx // 2, : ny // 2].set(u1[0]) + u = u.loc[1, : nx // 2, : ny // 2].set(u1[1]) + u = u.loc[2, : nx // 2, : ny // 2].set(u1[2]) + u = u.loc[3, : nx // 2, : ny // 2].set(u1[3]) + u = u.loc[4, : nx // 2, : ny // 2].set(u1[4]) # right-bottom - u = u.at[0, nx//2:, :ny//2].set(u2[0]) - u = u.at[1, nx//2:, :ny//2].set(u2[1]) - u = u.at[2, nx//2:, :ny//2].set(u2[2]) - u = u.at[3, nx//2:, :ny//2].set(u2[3]) - u = u.at[4, nx//2:, :ny//2].set(u2[4]) + u = u.loc[0, nx // 2 :, : ny // 2].set(u2[0]) + u = u.loc[1, nx // 2 :, : ny // 2].set(u2[1]) + u = u.loc[2, nx // 2 :, : ny // 2].set(u2[2]) + u = u.loc[3, nx // 2 :, : ny // 2].set(u2[3]) + u = u.loc[4, nx // 2 :, : ny // 2].set(u2[4]) # left-top - u = u.at[0, :nx//2, ny//2:].set(u3[0]) - u = u.at[1, :nx//2, ny//2:].set(u3[1]) - u = u.at[2, :nx//2, ny//2:].set(u3[2]) - u = u.at[3, :nx//2, ny//2:].set(u3[3]) - u = u.at[4, :nx//2, ny//2:].set(u3[4]) + u = u.loc[0, : nx // 2, ny // 2 :].set(u3[0]) + u = u.loc[1, : nx // 2, ny // 2 :].set(u3[1]) + u = u.loc[2, : nx // 2, ny // 2 :].set(u3[2]) + u = u.loc[3, : nx // 2, ny // 2 :].set(u3[3]) + u = u.loc[4, : nx // 2, ny // 2 :].set(u3[4]) # right-top - u = u.at[0, nx//2:, ny//2:].set(u4[0]) - u = u.at[1, nx//2:, ny//2:].set(u4[1]) - u = u.at[2, nx//2:, ny//2:].set(u4[2]) - u = u.at[3, nx//2:, ny//2:].set(u4[3]) - u = u.at[4, nx//2:, ny//2:].set(u4[4]) + u = u.loc[0, nx // 2 :, ny // 2 :].set(u4[0]) + u = u.loc[1, nx // 2 :, ny // 2 :].set(u4[1]) + u = u.loc[2, nx // 2 :, ny // 2 :].set(u4[2]) + u = u.loc[3, nx // 2 :, ny // 2 :].set(u4[3]) + u = u.loc[4, nx // 2 :, ny // 2 :].set(u4[4]) - elif mode == 'OTVortex': # shock tube + elif mode == "OTVortex": # shock tube dx = xc[1] - xc[0] dy = yc[1] - yc[0] qLx = dx * xc.shape[0] @@ -326,34 +360,34 @@ def init_HD(u, xc, yc, zc, mode='shocktube1', direc='x', init_key=2022, _yc = _yc.at[2:-2].set(yc) _xc = _xc.at[:2].set(jnp.array([-2 * dx, -dx])) _yc = _yc.at[:2].set(jnp.array([-2 * dy, -dy])) - _xc = _xc.at[-2:].set(jnp.array([xc[-1] + dx, xc[-1] + 2. * dx])) - _yc = _yc.at[-2:].set(jnp.array([yc[-1] + dy, yc[-1] + 2. * dy])) + _xc = _xc.at[-2:].set(jnp.array([xc[-1] + dx, xc[-1] + 2.0 * dx])) + _yc = _yc.at[-2:].set(jnp.array([yc[-1] + dy, yc[-1] + 2.0 * dy])) - u = u.at[0].add(gamma ** 2) - u = u.at[1].set(- jnp.sin(2. * jnp.pi * _yc[None, :, None] / qLy)) - u = u.at[2].set(jnp.sin(2. * jnp.pi * _xc[:, None, None] / qLx)) - u = u.at[3].add(0.) - u = u.at[4].add(gamma) + u = u.loc[0].add(gamma**2) + u = u.loc[1].set(-jnp.sin(2.0 * jnp.pi * _yc[None, :, None] / qLy)) + u = u.loc[2].set(jnp.sin(2.0 * jnp.pi * _xc[:, None, None] / qLx)) + u = u.loc[3].add(0.0) + u = u.loc[4].add(gamma) - elif mode == 'KHI': # Kelvin-Helmholtz instability + elif mode == "KHI": # Kelvin-Helmholtz instability nx, ny, nz = xc.shape[0], yc.shape[0], zc.shape[0] - #gamma = 1.666666666666667 - #k = 1. # moved to the external input - d0_u = 2./(dk + 1.) + # gamma = 1.666666666666667 + # k = 1. # moved to the external input + d0_u = 2.0 / (dk + 1.0) d0_d = dk * d0_u d0 = 0.5 * (d0_u + d0_d) - #M0 = 0.1 # Mach number # moved to external input - ux = 1. - cs = ux/M0 - #ux = 0.1 * cs # << cs - p0 = cs**2 * d0/gamma + # M0 = 0.1 # Mach number # moved to external input + ux = 1.0 + cs = ux / M0 + # ux = 0.1 * cs # << cs + p0 = cs**2 * d0 / gamma dx = xc[1] - xc[0] dy = yc[1] - yc[0] qLx = dx * nx qLy = dy * ny - kk = 4. # wave number - kx = kk * 2. * jnp.pi / qLx - dl = 5.e-3 * qLy + kk = 4.0 # wave number + kx = kk * 2.0 * jnp.pi / qLx + dl = 5.0e-3 * qLy bound = 0.5 * qLy + dl * jnp.sin(kx * xc) # assuming yL = 0 @@ -365,19 +399,18 @@ def init_HD(u, xc, yc, zc, mode='shocktube1', direc='x', init_key=2022, vx = vx.at[i, :, :].set(_vx[:, None]) dd = dd.at[i, :, :].set(_dd[:, None]) - u = u.at[0, 2:-2, 2:-2, 2:-2].set(dd) - u = u.at[1, 2:-2, 2:-2, 2:-2].set(vx) - u = u.at[2].set(0.) - u = u.at[3].add(0.) - u = u.at[4].add(p0) - - elif mode == 'turbulence': # 3D decaying turbulence + u = u.loc[0, 2:-2, 2:-2, 2:-2].set(dd) + u = u.loc[1, 2:-2, 2:-2, 2:-2].set(vx) + u = u.loc[2].set(0.0) + u = u.loc[3].add(0.0) + u = u.loc[4].add(p0) + elif mode == "turbulence": # 3D decaying turbulence nx, ny, nz = xc.shape[0], yc.shape[0], zc.shape[0] - d0 = 1. - cs = 1./M0 - u0 = 1. # fixed - p0 = cs ** 2 * d0 / gamma + d0 = 1.0 + cs = 1.0 / M0 + u0 = 1.0 # fixed + p0 = cs**2 * d0 / gamma dx = xc[1] - xc[0] dy = yc[1] - yc[0] @@ -386,15 +419,19 @@ def init_HD(u, xc, yc, zc, mode='shocktube1', direc='x', init_key=2022, qLy = dy * ny qLz = dz * nz - ## random velocity field + # random velocity field k_tot = 3 - vx, vy, vz = np.zeros([nx, ny, nz]), np.zeros([nx, ny, nz]), np.zeros([nx, ny, nz]) + vx, vy, vz = ( + np.zeros([nx, ny, nz]), + np.zeros([nx, ny, nz]), + np.zeros([nx, ny, nz]), + ) key = random.PRNGKey(init_key) - kx0 = jnp.pi * 2. / qLx - ky0 = jnp.pi * 2. / qLy - kz0 = jnp.pi * 2. / qLz + kx0 = jnp.pi * 2.0 / qLx + ky0 = jnp.pi * 2.0 / qLy + kz0 = jnp.pi * 2.0 / qLz for k in range(-k_tot, k_tot + 1): kz = kz0 * k # from 1 to k_tot @@ -406,21 +443,25 @@ def init_HD(u, xc, yc, zc, mode='shocktube1', direc='x', init_key=2022, continue # random phase key, subkey = random.split(key) - phs = 2. * jnp.pi * random.uniform(key, shape=[3]) # (vi, k) - - uk = 1./jnp.sqrt(kx**2 + ky**2 + kz**2) - kdx = kx * xc[:,None,None] + ky * yc[None,:,None] + kz * zc[None,None,:] + phs = 2.0 * jnp.pi * random.uniform(key, shape=[3]) # (vi, k) + + uk = 1.0 / jnp.sqrt(kx**2 + ky**2 + kz**2) + kdx = ( + kx * xc[:, None, None] + + ky * yc[None, :, None] + + kz * zc[None, None, :] + ) vx += uk * jnp.sin(kdx + phs[0]) vy += uk * jnp.sin(kdx + phs[1]) vz += uk * jnp.sin(kdx + phs[2]) - del(kdx, uk, phs) + del (kdx, uk, phs) # Helmholtz decomposition to subtract expansion: k.vk - dfx, dfy, dfz = 1./qLx, 1./qLy, 1./qLz - fx = dfx * (np.arange(nx) - 1. - nx//2) - fy = dfy * (np.arange(ny) - 1. - ny//2) - fz = dfz * (np.arange(nz) - 1. - nz//2) + dfx, dfy, dfz = 1.0 / qLx, 1.0 / qLy, 1.0 / qLz + fx = dfx * (np.arange(nx) - 1.0 - nx // 2) + fy = dfy * (np.arange(ny) - 1.0 - ny // 2) + fz = dfz * (np.arange(nz) - 1.0 - nz // 2) vkx = np.fft.fftn(vx) * dx * dy * dz vky = np.fft.fftn(vy) * dx * dy * dz @@ -431,7 +472,7 @@ def init_HD(u, xc, yc, zc, mode='shocktube1', direc='x', init_key=2022, vky = np.fft.fftshift(vky) vkz = np.fft.fftshift(vkz) - #for k in range(nz): + # for k in range(nz): # for j in range(ny): # for i in range(nx): # ff = (fx[i]**2 + fy[j]**2 + fz[k]**2) @@ -442,14 +483,16 @@ def init_HD(u, xc, yc, zc, mode='shocktube1', direc='x', init_key=2022, # vky -= fdv * fy[j] * fi # vkz -= fdv * fz[k] * fi - fi = fx[:,None,None]**2 + fy[None,:,None]**2 + fz[None,None,:]**2 - fi = np.where(fi > 1.e-8, 1./fi, 0.) + fi = fx[:, None, None] ** 2 + fy[None, :, None] ** 2 + fz[None, None, :] ** 2 + fi = np.where(fi > 1.0e-8, 1.0 / fi, 0.0) - fdv = (fx[:,None,None] * vkx + fy[None,:,None] * vky + fz[None,None,:] * vkz) * fi - vkx -= fdv * fx[:,None,None] - vky -= fdv * fy[None,:,None] - vkz -= fdv * fz[None,None,:] - del(fi, fdv) + fdv = ( + fx[:, None, None] * vkx + fy[None, :, None] * vky + fz[None, None, :] * vkz + ) * fi + vkx -= fdv * fx[:, None, None] + vky -= fdv * fy[None, :, None] + vkz -= fdv * fz[None, None, :] + del (fi, fdv) # shift back to original order vkx = np.fft.ifftshift(vkx) @@ -467,19 +510,19 @@ def init_HD(u, xc, yc, zc, mode='shocktube1', direc='x', init_key=2022, vy *= u0 / vtot vz *= u0 / vtot - u = u.at[0].set(d0) - u = u.at[1,2:-2,2:-2,2:-2].set(jnp.array(vx)) - u = u.at[2,2:-2,2:-2,2:-2].set(jnp.array(vy)) - u = u.at[3,2:-2,2:-2,2:-2].add(jnp.array(vz)) - u = u.at[4].add(p0) + u = u.loc[0].set(d0) + u = u.loc[1, 2:-2, 2:-2, 2:-2].set(jnp.array(vx)) + u = u.loc[2, 2:-2, 2:-2, 2:-2].set(jnp.array(vy)) + u = u.loc[3, 2:-2, 2:-2, 2:-2].add(jnp.array(vz)) + u = u.loc[4].add(p0) - elif mode == 'BlastWave': # Kelvin-Helmholtz instability + elif mode == "BlastWave": # Kelvin-Helmholtz instability """ Stone Gardiner 2009 without B """ nx, ny, nz = xc.shape[0], yc.shape[0], zc.shape[0] - db = 1. + db = 1.0 pb = 0.1 - pc = 1.e2 # central region + pc = 1.0e2 # central region dx = xc[1] - xc[0] dy = yc[1] - yc[0] @@ -487,97 +530,112 @@ def init_HD(u, xc, yc, zc, mode='shocktube1', direc='x', init_key=2022, qLx = dx * nx qLy = dy * ny qLz = dz * nz - qL = (qLx + qLy + qLz)/3. - - #p0 = jnp.ones([nx, ny, nz]) * pb - RR = jnp.sqrt((xc[:,None,None] - xc[nx//2])**2 - + (yc[None,:,None] - yc[ny//2])**2 - + (zc[None,None,:] - zc[nz//2])**2) - p0 = jnp.where(RR > 0.05 * qL, pb, pc) - #for k in range(nz): + qL = (qLx + qLy + qLz) / 3.0 + + # p0 = jnp.ones([nx, ny, nz]) * pb + RR = jnp.sqrt( + (xc[:, None, None] - xc[nx // 2]) ** 2 + + (yc[None, :, None] - yc[ny // 2]) ** 2 + + (zc[None, None, :] - zc[nz // 2]) ** 2 + ) + p0 = jnp.where(0.05 * qL < RR, pb, pc) + # for k in range(nz): # for j in range(ny): # for i in range(nx): # RR = jnp.sqrt((xc[i] - 0.5 * qLx)**2 + (yc[j] - 0.5 * qLy)**2 + (zc[k] - 0.5 * qLz)**2) # if RR < 0.1 * qL: # p0 = p0.at[i,j,k].set(pc) - u = u.at[0].set(db) - u = u.at[1].set(0.) - u = u.at[2].set(0.) - u = u.at[3].set(0.) - u = u.at[4, 2:-2, 2:-2, 2:-2].set(p0) + u = u.loc[0].set(db) + u = u.loc[1].set(0.0) + u = u.loc[2].set(0.0) + u = u.loc[3].set(0.0) + u = u.loc[4, 2:-2, 2:-2, 2:-2].set(p0) - elif mode == 'sound_wave': # sound wave + elif mode == "sound_wave": # sound wave nx, ny, nz = xc.shape[0], yc.shape[0], zc.shape[0] gamma = 1.666666666666667 - d0 = 1. - cs = 2. - p0 = cs**2 * d0/gamma - if direc == 'x': + d0 = 1.0 + cs = 2.0 + p0 = cs**2 * d0 / gamma + if direc == "x": iX, iY, iZ = 1, 2, 3 XC = xc qL = (xc[1] - xc[0]) * nx _u = jnp.zeros_like(u) - elif direc == 'y': + elif direc == "y": iX, iY, iZ = 2, 3, 1 XC = yc qL = (yc[1] - yc[0]) * ny _u = jnp.transpose(u, (0, 2, 3, 1)) - if direc == 'z': + if direc == "z": iX, iY, iZ = 3, 1, 2 XC = zc qL = (zc[1] - zc[0]) * nz _u = jnp.transpose(u, (0, 3, 1, 2)) - kk = 2. * jnp.pi / qL - _u = _u.at[0,2:-2].set(d0 * (1. + 1.e-3 * jnp.sin(kk * XC[:, None, None]))) - _u = _u.at[iX].set((_u[0] - d0) * cs /d0) - _u = _u.at[4].set(p0 + cs**2 * (_u[0] - d0) ) + kk = 2.0 * jnp.pi / qL + _u = _u.loc[0, 2:-2].set(d0 * (1.0 + 1.0e-3 * jnp.sin(kk * XC[:, None, None]))) + _u = _u.loc[iX].set((_u[0] - d0) * cs / d0) + _u = _u.loc[4].set(p0 + cs**2 * (_u[0] - d0)) - if direc == 'x': + if direc == "x": u = _u - elif direc == 'y': + elif direc == "y": u = jnp.transpose(_u, (0, 3, 1, 2)) - elif direc == 'z': + elif direc == "z": u = jnp.transpose(_u, (0, 2, 3, 1)) - elif mode == 'c_discon': # tangent discontinuity + elif mode == "c_discon": # tangent discontinuity nx, ny, nz = xc.shape[0], yc.shape[0], zc.shape[0] - d0 = 1. - p0 = 1. + d0 = 1.0 + p0 = 1.0 vy0 = 0.1 - if direc == 'x': + if direc == "x": iX, iY, iZ = 1, 2, 3 XC = xc qL = (xc[1] - xc[0]) * nx _u = jnp.zeros_like(u) - elif direc == 'y': + elif direc == "y": iX, iY, iZ = 2, 3, 1 XC = yc qL = (yc[1] - yc[0]) * ny _u = jnp.transpose(u, (0, 2, 3, 1)) - if direc == 'z': + if direc == "z": iX, iY, iZ = 3, 1, 2 XC = zc qL = (zc[1] - zc[0]) * nz _u = jnp.transpose(u, (0, 3, 1, 2)) - _u = _u.at[0].set(d0) - _u = _u.at[iY, 2:-2].set(vy0 * scipy.special.erf(0.5 * XC[:, None, None] / jnp.sqrt(0.1))) - _u = _u.at[4].set(p0) + _u = _u.loc[0].set(d0) + _u = _u.loc[iY, 2:-2].set( + vy0 * scipy.special.erf(0.5 * XC[:, None, None] / jnp.sqrt(0.1)) + ) + _u = _u.loc[4].set(p0) - if direc == 'x': + if direc == "x": u = _u - elif direc == 'y': + elif direc == "y": u = jnp.transpose(_u, (0, 3, 1, 2)) - elif direc == 'z': + elif direc == "z": u = jnp.transpose(_u, (0, 2, 3, 1)) return u + @partial(jit, static_argnums=(3, 4, 5, 6, 7, 8, 9)) -def init_multi_HD(xc, yc, zc, numbers=10000, k_tot=10, init_key=2022, num_choise_k=2, - if_renorm=False, umax=1.e4, umin=1.e-8): +def init_multi_HD( + xc, + yc, + zc, + numbers=10000, + k_tot=10, + init_key=2022, + num_choise_k=2, + if_renorm=False, + umax=1.0e4, + umin=1.0e-8, +): """ :param xc: cell center coordinate :param mode: initial condition @@ -615,31 +673,37 @@ def _norm(carry): u /= jnp.max(u, axis=1, keepdims=True) # normalize key, subkey = random.split(key) - m_val = random.uniform(key, shape=[numbers], minval=mt.log(umin), maxval=mt.log(umax)) + m_val = random.uniform( + key, shape=[numbers], minval=mt.log(umin), maxval=mt.log(umax) + ) m_val = jnp.exp(m_val) key, subkey = random.split(key) - b_val = random.uniform(key, shape=[numbers], minval=mt.log(umin), maxval=mt.log(umax)) + b_val = random.uniform( + key, shape=[numbers], minval=mt.log(umin), maxval=mt.log(umax) + ) b_val = jnp.exp(b_val) return u * m_val[:, None] + b_val[:, None], key cond, u, key = carry carry = u, key - u, key = lax.cond(cond==True, _norm, _pass, carry) + u, key = lax.cond(cond == True, _norm, _pass, carry) return cond, u, key - assert yc.shape[0] == 1 and zc.shape[0] == 1, 'ny and nz is assumed to be 1!!' - assert numbers % jax.device_count() == 0, 'numbers should be : GPUs x integer!!' + assert yc.shape[0] == 1 and zc.shape[0] == 1, "ny and nz is assumed to be 1!!" + assert numbers % jax.device_count() == 0, "numbers should be : GPUs x integer!!" key = random.PRNGKey(init_key) - selected = random.randint(key, shape=[numbers, num_choise_k], minval=0, maxval=k_tot) + selected = random.randint( + key, shape=[numbers, num_choise_k], minval=0, maxval=k_tot + ) selected = nn.one_hot(selected, k_tot, dtype=int).sum(axis=1) - kk = jnp.pi * 2. * jnp.arange(1, k_tot + 1) * selected / (xc[-1] - xc[0]) + kk = jnp.pi * 2.0 * jnp.arange(1, k_tot + 1) * selected / (xc[-1] - xc[0]) amp = random.uniform(key, shape=[numbers, k_tot, 1]) key, subkey = random.split(key) - phs = 2. * jnp.pi * random.uniform(key, shape=[numbers, k_tot, 1]) + phs = 2.0 * jnp.pi * random.uniform(key, shape=[numbers, k_tot, 1]) _u = amp * jnp.sin(kk[:, :, jnp.newaxis] * xc[jnp.newaxis, jnp.newaxis, :] + phs) _u = jnp.sum(_u, axis=1) @@ -667,17 +731,20 @@ def _norm(carry): carry = if_renorm, _u, key _, _u, _ = renormalize(carry) # renormalize value between a given values - return _u[...,None,None] + return _u[..., None, None] + -#@partial(jit, static_argnums=(3, 4, 5, 6)) -def init_multi_HD_shock(xc, yc, zc, numbers=10000, init_key=2022, umax=1.e4, umin=1.e-8): +# @partial(jit, static_argnums=(3, 4, 5, 6)) +def init_multi_HD_shock( + xc, yc, zc, numbers=10000, init_key=2022, umax=1.0e4, umin=1.0e-8 +): """ :param xc: cell center coordinate :param mode: initial condition :return: 1D scalar function u at cell center """ - assert yc.shape[0] == 1 and zc.shape[0] == 1, 'ny and nz is assumed to be 1!!' - assert numbers % jax.device_count() == 0, 'numbers should be : GPUs x integer!!' + assert yc.shape[0] == 1 and zc.shape[0] == 1, "ny and nz is assumed to be 1!!" + assert numbers % jax.device_count() == 0, "numbers should be : GPUs x integer!!" def select_var(carry): def _func(carry): @@ -688,7 +755,7 @@ def _pass(carry): return carry vmin, vmax = carry - vmin, vmax = lax.cond(vmin > 0., _func, _pass, carry) + vmin, vmax = lax.cond(vmin > 0.0, _func, _pass, carry) return vmin, vmax nx = xc.shape[0] @@ -709,33 +776,45 @@ def _pass(carry): u = jnp.arange(xc.shape[0]) u = jnp.tile(u, (numbers, 1)) - u = jax.vmap(jnp.where, axis_name='i')(u < nx0s, QLs, QRs) - return u[...,None,None] - -#@partial(jit, static_argnums=(4, 5, 6, 7, 8, 9)) -def init_multi_HD_KH(u, xc, yc, zc, numbers=10000, init_key=2022, M0=0.1, dkMx=2., kmax=4., gamma=1.666666667): + u = jax.vmap(jnp.where, axis_name="i")(u < nx0s, QLs, QRs) + return u[..., None, None] + + +# @partial(jit, static_argnums=(4, 5, 6, 7, 8, 9)) +def init_multi_HD_KH( + u, + xc, + yc, + zc, + numbers=10000, + init_key=2022, + M0=0.1, + dkMx=2.0, + kmax=4.0, + gamma=1.666666667, +): """ :param xc: cell center coordinate :param mode: initial condition :return: 1D scalar function u at cell center """ - assert zc.shape[0] == 1, 'nz is assumed to be 1!!' - assert numbers % jax.device_count() == 0, 'numbers should be : GPUs x integer!!' + assert zc.shape[0] == 1, "nz is assumed to be 1!!" + assert numbers % jax.device_count() == 0, "numbers should be : GPUs x integer!!" def __create_KH_init(u, dk, kk): nx, ny, nz = xc.shape[0], yc.shape[0], zc.shape[0] - d0_u = 2./(dk + 1.) + d0_u = 2.0 / (dk + 1.0) d0_d = dk * d0_u d0 = 0.5 * (d0_u + d0_d) - ux = 1. - cs = ux/M0 - p0 = cs**2 * d0/gamma + ux = 1.0 + cs = ux / M0 + p0 = cs**2 * d0 / gamma dx = xc[1] - xc[0] dy = yc[1] - yc[0] qLx = dx * nx qLy = dy * ny - kx = kk * 2. * jnp.pi / qLx - dl = 5.e-3 * qLy + kx = kk * 2.0 * jnp.pi / qLx + dl = 5.0e-3 * qLy # (numbers, nx) bound = 0.5 * qLy + dl * jnp.sin(kx * xc) # assuming yL = 0 @@ -747,40 +826,43 @@ def __create_KH_init(u, dk, kk): vx = vx.at[i, :, :].set(_vx[:, None]) dd = dd.at[i, :, :].set(_dd[:, None]) - u = u.at[0, 2:-2, 2:-2, 2:-2].set(dd) - u = u.at[1, 2:-2, 2:-2, 2:-2].set(vx) - u = u.at[2].set(0.) - u = u.at[3].add(0.) - u = u.at[4].add(p0) + u = u.loc[0, 2:-2, 2:-2, 2:-2].set(dd) + u = u.loc[1, 2:-2, 2:-2, 2:-2].set(vx) + u = u.loc[2].set(0.0) + u = u.loc[3].add(0.0) + u = u.loc[4].add(p0) return u # create random density ratio key = random.PRNGKey(init_key) - dk = random.uniform(key, shape=([numbers, 1]), minval=1. / dkMx, maxval=dkMx) - #create random wave-numbers + dk = random.uniform(key, shape=([numbers, 1]), minval=1.0 / dkMx, maxval=dkMx) + # create random wave-numbers key, subkey = random.split(key) kk = random.randint(key, shape=([numbers, 1]), minval=1, maxval=kmax) - print('vmap...') - u = jax.vmap(__create_KH_init, axis_name='i')(u, dk, kk) + print("vmap...") + u = jax.vmap(__create_KH_init, axis_name="i")(u, dk, kk) return u -#@partial(jit, static_argnums=(4, 5, 6, 7, 8)) -def init_multi_HD_2DTurb(u, xc, yc, zc, numbers=10000, init_key=2022, M0=0.1, k_tot=4., gamma=1.666666667): + +# @partial(jit, static_argnums=(4, 5, 6, 7, 8)) +def init_multi_HD_2DTurb( + u, xc, yc, zc, numbers=10000, init_key=2022, M0=0.1, k_tot=4.0, gamma=1.666666667 +): """ :param xc: cell center coordinate :param mode: initial condition :return: 1D scalar function u at cell center """ - assert zc.shape[0] == 1, 'nz is assumed to be 1!!' - assert numbers % jax.device_count() == 0, 'numbers should be : GPUs x integer!!' + assert zc.shape[0] == 1, "nz is assumed to be 1!!" + assert numbers % jax.device_count() == 0, "numbers should be : GPUs x integer!!" def __create_2DTurb_init(u, keys): nx, ny, nz = xc.shape[0], yc.shape[0], zc.shape[0] - d0 = 1. - cs = 1./M0 - u0 = 1. # fixed - p0 = cs ** 2 * d0 / gamma + d0 = 1.0 + cs = 1.0 / M0 + u0 = 1.0 # fixed + p0 = cs**2 * d0 / gamma dx = xc[1] - xc[0] dy = yc[1] - yc[0] @@ -788,13 +870,13 @@ def __create_2DTurb_init(u, keys): qLx = dx * nx qLy = dy * ny - ## random velocity field + # random velocity field vx, vy = jnp.zeros([nx, ny, nz]), jnp.zeros([nx, ny, nz]) key = random.PRNGKey(keys) - kx0 = jnp.pi * 2. / qLx - ky0 = jnp.pi * 2. / qLy + kx0 = jnp.pi * 2.0 / qLx + ky0 = jnp.pi * 2.0 / qLy for j in range(-k_tot, k_tot + 1): ky = ky0 * j # from 1 to k_tot @@ -804,9 +886,9 @@ def __create_2DTurb_init(u, keys): continue # random phase key, subkey = random.split(key) - phs = 2. * jnp.pi * random.uniform(key, shape=[2]) # (vi, k) + phs = 2.0 * jnp.pi * random.uniform(key, shape=[2]) # (vi, k) - uk = 1. / jnp.sqrt(jnp.sqrt(kx ** 2 + ky ** 2)) + uk = 1.0 / jnp.sqrt(jnp.sqrt(kx**2 + ky**2)) kdx = kx * xc[:, None, None] + ky * yc[None, :, None] vx += uk * jnp.sin(kdx + phs[0]) vy += uk * jnp.sin(kdx + phs[1]) @@ -814,9 +896,9 @@ def __create_2DTurb_init(u, keys): del (kdx, uk, phs) # Helmholtz decomposition to subtract expansion: k.vk - dfx, dfy = 1. / qLx, 1. / qLy - fx = dfx * (jnp.arange(nx) - 1. - nx // 2) - fy = dfy * (jnp.arange(ny) - 1. - ny // 2) + dfx, dfy = 1.0 / qLx, 1.0 / qLy + fx = dfx * (jnp.arange(nx) - 1.0 - nx // 2) + fy = dfy * (jnp.arange(ny) - 1.0 - ny // 2) vkx = jnp.fft.fftn(vx) * dx * dy vky = jnp.fft.fftn(vy) * dx * dy @@ -826,7 +908,7 @@ def __create_2DTurb_init(u, keys): vky = jnp.fft.fftshift(vky) fi = fx[:, None, None] ** 2 + fy[None, :, None] ** 2 - fi = jnp.where(fi > 1.e-8, 1. / fi, 0.) + fi = jnp.where(fi > 1.0e-8, 1.0 / fi, 0.0) fdv = (fx[:, None, None] * vkx + fy[None, :, None] * vky) * fi vkx -= fdv * fx[:, None, None] @@ -842,31 +924,50 @@ def __create_2DTurb_init(u, keys): vy = jnp.fft.ifftn(vky).real * dfx * dfy # renormalize total velocity - vtot = jnp.sqrt(vx ** 2 + vy ** 2).mean() + vtot = jnp.sqrt(vx**2 + vy**2).mean() vx *= u0 / vtot vy *= u0 / vtot - u = u.at[0].set(d0) - u = u.at[1, 2:-2, 2:-2, 2:-2].set(vx) - u = u.at[2, 2:-2, 2:-2, 2:-2].set(vy) - u = u.at[4].add(p0) + u = u.loc[0].set(d0) + u = u.loc[1, 2:-2, 2:-2, 2:-2].set(vx) + u = u.loc[2, 2:-2, 2:-2, 2:-2].set(vy) + u = u.loc[4].add(p0) return u key = random.PRNGKey(init_key) - keys = random.randint(key, [numbers,], minval=0, maxval=10000000) - u = jax.vmap(__create_2DTurb_init, axis_name='i')(u, keys) + keys = random.randint( + key, + [ + numbers, + ], + minval=0, + maxval=10000000, + ) + u = jax.vmap(__create_2DTurb_init, axis_name="i")(u, keys) return u -def init_multi_HD_2DRand(u, xc, yc, zc, numbers=10000, init_key=2022, M0=0.1, k_tot=4., gamma=1.666666667, - dMx=1.e1, TMx=1.e1): + +def init_multi_HD_2DRand( + u, + xc, + yc, + zc, + numbers=10000, + init_key=2022, + M0=0.1, + k_tot=4.0, + gamma=1.666666667, + dMx=1.0e1, + TMx=1.0e1, +): """ :param xc: cell center coordinate :param mode: initial condition :return: 1D scalar function u at cell center """ - assert zc.shape[0] == 1, 'nz is assumed to be 1!!' - assert numbers % jax.device_count() == 0, 'numbers should be : GPUs x integer!!' + assert zc.shape[0] == 1, "nz is assumed to be 1!!" + assert numbers % jax.device_count() == 0, "numbers should be : GPUs x integer!!" def _pass(carry): return carry @@ -898,12 +999,17 @@ def __create_2DRand_init(u, d0, T0, delD, delP, keys): qLx = dx * nx qLy = dy * ny - ## random velocity field - d, p, vx, vy = jnp.zeros([nx, ny, nz]), jnp.zeros([nx, ny, nz]), jnp.zeros([nx, ny, nz]), jnp.zeros([nx, ny, nz]) + # random velocity field + d, p, vx, vy = ( + jnp.zeros([nx, ny, nz]), + jnp.zeros([nx, ny, nz]), + jnp.zeros([nx, ny, nz]), + jnp.zeros([nx, ny, nz]), + ) key = random.PRNGKey(keys) - kx0 = jnp.pi * 2. / qLx - ky0 = jnp.pi * 2. / qLy + kx0 = jnp.pi * 2.0 / qLx + ky0 = jnp.pi * 2.0 / qLy for j in range(-k_tot, k_tot + 1): ky = ky0 * j # from 1 to k_tot @@ -913,9 +1019,9 @@ def __create_2DRand_init(u, d0, T0, delD, delP, keys): continue # random phase key, subkey = random.split(key) - phs = 2. * jnp.pi * random.uniform(key, shape=[4]) # (vi, k) + phs = 2.0 * jnp.pi * random.uniform(key, shape=[4]) # (vi, k) - uk = 1. / jnp.sqrt(jnp.sqrt(kx ** 2 + ky ** 2)) + uk = 1.0 / jnp.sqrt(jnp.sqrt(kx**2 + ky**2)) kdx = kx * xc[:, None, None] + ky * yc[None, :, None] vx += uk * jnp.sin(kdx + phs[0]) vy += uk * jnp.sin(kdx + phs[1]) @@ -925,33 +1031,41 @@ def __create_2DRand_init(u, d0, T0, delD, delP, keys): del (kdx, uk, phs) # renormalize total velocity - vtot = jnp.sqrt(vx ** 2 + vy ** 2).mean() + vtot = jnp.sqrt(vx**2 + vy**2).mean() vx *= u0 / vtot vy *= u0 / vtot - #d = d0 + delD * d / jnp.abs(d).mean() - #p = p0 + delP * p / jnp.abs(p).mean() - d = d0 * (1. + delD * d / jnp.abs(d).mean()) - p = p0 * (1. + delP * p / jnp.abs(p).mean()) - - u = u.at[0, 2:-2, 2:-2, 2:-2].set(d) - u = u.at[1, 2:-2, 2:-2, 2:-2].set(vx) - u = u.at[2, 2:-2, 2:-2, 2:-2].set(vy) - u = u.at[4, 2:-2, 2:-2, 2:-2].set(p) + # d = d0 + delD * d / jnp.abs(d).mean() + # p = p0 + delP * p / jnp.abs(p).mean() + d = d0 * (1.0 + delD * d / jnp.abs(d).mean()) + p = p0 * (1.0 + delP * p / jnp.abs(p).mean()) + + u = u.loc[0, 2:-2, 2:-2, 2:-2].set(d) + u = u.loc[1, 2:-2, 2:-2, 2:-2].set(vx) + u = u.loc[2, 2:-2, 2:-2, 2:-2].set(vy) + u = u.loc[4, 2:-2, 2:-2, 2:-2].set(p) return u key = random.PRNGKey(init_key) - d0 = random.uniform(key, shape=([numbers, 1]), minval=1.e-1, maxval=dMx) + d0 = random.uniform(key, shape=([numbers, 1]), minval=1.0e-1, maxval=dMx) key, subkey = random.split(key) - delD = random.uniform(key, shape=([numbers, 1]), minval=1.e-2, maxval=0.2) + delD = random.uniform(key, shape=([numbers, 1]), minval=1.0e-2, maxval=0.2) key, subkey = random.split(key) - T0 = random.uniform(key, shape=([numbers, 1]), minval=1.e-1, maxval=TMx) + T0 = random.uniform(key, shape=([numbers, 1]), minval=1.0e-1, maxval=TMx) key, subkey = random.split(key) - delP = random.uniform(key, shape=([numbers, 1]), minval=1.e-2, maxval=0.2) + delP = random.uniform(key, shape=([numbers, 1]), minval=1.0e-2, maxval=0.2) key, subkey = random.split(key) - keys = random.randint(key, shape=([numbers, ]), minval=0, maxval=10000000) - u = jax.vmap(__create_2DRand_init, axis_name='i')(u, d0, T0, delD, delP, keys) - + keys = random.randint( + key, + shape=( + [ + numbers, + ] + ), + minval=0, + maxval=10000000, + ) + u = jax.vmap(__create_2DRand_init, axis_name="i")(u, d0, T0, delD, delP, keys) # perform window function key, subkey = random.split(key) @@ -968,26 +1082,35 @@ def __create_2DRand_init(u, d0, T0, delD, delP, keys): carry = cond, mask, _xc, _yc, xL, xR, yL, yR, trns cond, mask, _xc, _yc, xL, xR, yL, yR, trns = vmap(select_W, 0, 0)(carry) - u = u.at[:, :, 2:-2, 2:-2, 2:-2].set(u[:, :, 2:-2, 2:-2, 2:-2] * mask[:, None, :, :, None]) - u = u.at[:, 0, 2:-2, 2:-2, 2:-2].add(d0[:, :, None, None] * (1. - mask[:, :, :, None])) - u = u.at[:, 4, 2:-2, 2:-2, 2:-2].add(d0[:, :, None, None] * T0[:, :, None, None] * (1. - mask[:, :, :, None])) + u = u.loc[:, :, 2:-2, 2:-2, 2:-2].set( + u[:, :, 2:-2, 2:-2, 2:-2] * mask[:, None, :, :, None] + ) + u = u.loc[:, 0, 2:-2, 2:-2, 2:-2].add( + d0[:, :, None, None] * (1.0 - mask[:, :, :, None]) + ) + u = u.loc[:, 4, 2:-2, 2:-2, 2:-2].add( + d0[:, :, None, None] * T0[:, :, None, None] * (1.0 - mask[:, :, :, None]) + ) return u -def init_multi_HD_3DTurb(u, xc, yc, zc, numbers=100, init_key=2022, M0=0.1, k_tot=4., gamma=1.666666667): + +def init_multi_HD_3DTurb( + u, xc, yc, zc, numbers=100, init_key=2022, M0=0.1, k_tot=4.0, gamma=1.666666667 +): """ :param xc: cell center coordinate :param mode: initial condition :return: 1D scalar function u at cell center """ - assert numbers % jax.device_count() == 0, 'numbers should be : GPUs x integer!!' + assert numbers % jax.device_count() == 0, "numbers should be : GPUs x integer!!" def __create_3DTurb_init(u, keys): nx, ny, nz = xc.shape[0], yc.shape[0], zc.shape[0] - d0 = 1. - cs = 1./M0 - u0 = 1. # fixed - p0 = cs ** 2 * d0 / gamma + d0 = 1.0 + cs = 1.0 / M0 + u0 = 1.0 # fixed + p0 = cs**2 * d0 / gamma dx = xc[1] - xc[0] dy = yc[1] - yc[0] @@ -997,14 +1120,18 @@ def __create_3DTurb_init(u, keys): qLy = dy * ny qLz = dz * nz - ## random velocity field - vx, vy, vz = jnp.zeros([nx, ny, nz]), jnp.zeros([nx, ny, nz]), jnp.zeros([nx, ny, nz]) + # random velocity field + vx, vy, vz = ( + jnp.zeros([nx, ny, nz]), + jnp.zeros([nx, ny, nz]), + jnp.zeros([nx, ny, nz]), + ) key = random.PRNGKey(keys) - kx0 = jnp.pi * 2. / qLx - ky0 = jnp.pi * 2. / qLy - kz0 = jnp.pi * 2. / qLz + kx0 = jnp.pi * 2.0 / qLx + ky0 = jnp.pi * 2.0 / qLy + kz0 = jnp.pi * 2.0 / qLz for k in range(-k_tot, k_tot + 1): kz = kz0 * k # from 1 to k_tot @@ -1016,21 +1143,25 @@ def __create_3DTurb_init(u, keys): continue # random phase key, subkey = random.split(key) - phs = 2. * jnp.pi * random.uniform(key, shape=[3]) # (vi, k) - - uk = 1./jnp.sqrt(kx**2 + ky**2 + kz**2) - kdx = kx * xc[:,None,None] + ky * yc[None,:,None] + kz * zc[None,None,:] + phs = 2.0 * jnp.pi * random.uniform(key, shape=[3]) # (vi, k) + + uk = 1.0 / jnp.sqrt(kx**2 + ky**2 + kz**2) + kdx = ( + kx * xc[:, None, None] + + ky * yc[None, :, None] + + kz * zc[None, None, :] + ) vx += uk * jnp.sin(kdx + phs[0]) vy += uk * jnp.sin(kdx + phs[1]) vz += uk * jnp.sin(kdx + phs[2]) - del(kdx, uk, phs) + del (kdx, uk, phs) # Helmholtz decomposition to subtract expansion: k.vk - dfx, dfy, dfz = 1./qLx, 1./qLy, 1./qLz - fx = dfx * (jnp.arange(nx) - 1. - nx//2) - fy = dfy * (jnp.arange(ny) - 1. - ny//2) - fz = dfz * (jnp.arange(nz) - 1. - nz//2) + dfx, dfy, dfz = 1.0 / qLx, 1.0 / qLy, 1.0 / qLz + fx = dfx * (jnp.arange(nx) - 1.0 - nx // 2) + fy = dfy * (jnp.arange(ny) - 1.0 - ny // 2) + fz = dfz * (jnp.arange(nz) - 1.0 - nz // 2) vkx = jnp.fft.fftn(vx) * dx * dy * dz vky = jnp.fft.fftn(vy) * dx * dy * dz @@ -1041,14 +1172,16 @@ def __create_3DTurb_init(u, keys): vky = jnp.fft.fftshift(vky) vkz = jnp.fft.fftshift(vkz) - fi = fx[:,None,None]**2 + fy[None,:,None]**2 + fz[None,None,:]**2 - fi = jnp.where(fi > 1.e-8, 1./fi, 0.) + fi = fx[:, None, None] ** 2 + fy[None, :, None] ** 2 + fz[None, None, :] ** 2 + fi = jnp.where(fi > 1.0e-8, 1.0 / fi, 0.0) - fdv = (fx[:,None,None] * vkx + fy[None,:,None] * vky + fz[None,None,:] * vkz) * fi - vkx -= fdv * fx[:,None,None] - vky -= fdv * fy[None,:,None] - vkz -= fdv * fz[None,None,:] - del(fi, fdv) + fdv = ( + fx[:, None, None] * vkx + fy[None, :, None] * vky + fz[None, None, :] * vkz + ) * fi + vkx -= fdv * fx[:, None, None] + vky -= fdv * fy[None, :, None] + vkz -= fdv * fz[None, None, :] + del (fi, fdv) # shift back to original order vkx = jnp.fft.ifftshift(vkx) @@ -1066,27 +1199,46 @@ def __create_3DTurb_init(u, keys): vy *= u0 / vtot vz *= u0 / vtot - u = u.at[0].set(d0) - u = u.at[1, 2:-2, 2:-2, 2:-2].set(vx) - u = u.at[2, 2:-2, 2:-2, 2:-2].set(vy) - u = u.at[3, 2:-2, 2:-2, 2:-2].set(vz) - u = u.at[4].add(p0) + u = u.loc[0].set(d0) + u = u.loc[1, 2:-2, 2:-2, 2:-2].set(vx) + u = u.loc[2, 2:-2, 2:-2, 2:-2].set(vy) + u = u.loc[3, 2:-2, 2:-2, 2:-2].set(vz) + u = u.loc[4].add(p0) return u key = random.PRNGKey(init_key) - keys = random.randint(key, [numbers,], minval=0, maxval=10000000) - u = jax.vmap(__create_3DTurb_init, axis_name='i')(u, keys) + keys = random.randint( + key, + [ + numbers, + ], + minval=0, + maxval=10000000, + ) + u = jax.vmap(__create_3DTurb_init, axis_name="i")(u, keys) return u -def init_multi_HD_3DRand(u, xc, yc, zc, numbers=10000, init_key=2022, M0=0.1, k_tot=4., gamma=1.666666667, - dMx=1.e1, TMx=1.e1): + +def init_multi_HD_3DRand( + u, + xc, + yc, + zc, + numbers=10000, + init_key=2022, + M0=0.1, + k_tot=4.0, + gamma=1.666666667, + dMx=1.0e1, + TMx=1.0e1, +): """ :param xc: cell center coordinate :param mode: initial condition :return: 1D scalar function u at cell center """ - assert numbers % jax.device_count() == 0, 'numbers should be : GPUs x integer!!' + assert numbers % jax.device_count() == 0, "numbers should be : GPUs x integer!!" def _pass(carry): return carry @@ -1103,7 +1255,9 @@ def _window(carry): cond, value, xx, yy, zz, xL, xR, yL, yR, zL, zR, trns = carry carry = xx, yy, zz, value, xL, xR, yL, yR, zL, zR, trns - xx, yy, zz, value, xL, xR, yL, yR, zL, zR, trns = lax.cond(cond == 1, _window, _pass, carry) + xx, yy, zz, value, xL, xR, yL, yR, zL, zR, trns = lax.cond( + cond == 1, _window, _pass, carry + ) return cond, value, xx, yy, zz, xL, xR, yL, yR, zL, zR, trns def __create_3DRand_init(u, d0, T0, delD, delP, keys): @@ -1121,14 +1275,19 @@ def __create_3DRand_init(u, d0, T0, delD, delP, keys): qLy = dy * ny qLz = dz * nz - ## random velocity field - d, p, vx, vy, vz = jnp.zeros([nx, ny, nz]), jnp.zeros([nx, ny, nz]), \ - jnp.zeros([nx, ny, nz]), jnp.zeros([nx, ny, nz]), jnp.zeros([nx, ny, nz]) + # random velocity field + d, p, vx, vy, vz = ( + jnp.zeros([nx, ny, nz]), + jnp.zeros([nx, ny, nz]), + jnp.zeros([nx, ny, nz]), + jnp.zeros([nx, ny, nz]), + jnp.zeros([nx, ny, nz]), + ) key = random.PRNGKey(keys) - kx0 = jnp.pi * 2. / qLx - ky0 = jnp.pi * 2. / qLy - kz0 = jnp.pi * 2. / qLz + kx0 = jnp.pi * 2.0 / qLx + ky0 = jnp.pi * 2.0 / qLy + kz0 = jnp.pi * 2.0 / qLz for k in range(-k_tot, k_tot + 1): kz = kz0 * k # from 1 to k_tot @@ -1140,47 +1299,58 @@ def __create_3DRand_init(u, d0, T0, delD, delP, keys): continue # random phase key, subkey = random.split(key) - phs = 2. * jnp.pi * random.uniform(key, shape=[5]) # (vi, k) - - uk = 1./jnp.sqrt(kx**2 + ky**2 + kz**2) - kdx = kx * xc[:,None,None] + ky * yc[None,:,None] + kz * zc[None,None,:] + phs = 2.0 * jnp.pi * random.uniform(key, shape=[5]) # (vi, k) + + uk = 1.0 / jnp.sqrt(kx**2 + ky**2 + kz**2) + kdx = ( + kx * xc[:, None, None] + + ky * yc[None, :, None] + + kz * zc[None, None, :] + ) vx += uk * jnp.sin(kdx + phs[0]) vy += uk * jnp.sin(kdx + phs[1]) vz += uk * jnp.sin(kdx + phs[2]) p += uk * jnp.sin(kdx + phs[3]) d += uk * jnp.sin(kdx + phs[4]) - del(kdx, uk, phs) + del (kdx, uk, phs) # renormalize total velocity - vtot = jnp.sqrt(vx ** 2 + vy ** 2 + vz ** 2).mean() + vtot = jnp.sqrt(vx**2 + vy**2 + vz**2).mean() vx *= u0 / vtot vy *= u0 / vtot vz *= u0 / vtot - #d = d0 + delD * d / jnp.abs(d).mean() - #p = p0 + delP * p / jnp.abs(p).mean() - d = d0 * (1. + delD * d / jnp.abs(d).mean()) - p = p0 * (1. + delP * p / jnp.abs(p).mean()) - - u = u.at[0, 2:-2, 2:-2, 2:-2].set(d) - u = u.at[1, 2:-2, 2:-2, 2:-2].set(vx) - u = u.at[2, 2:-2, 2:-2, 2:-2].set(vy) - u = u.at[3, 2:-2, 2:-2, 2:-2].set(vz) - u = u.at[4, 2:-2, 2:-2, 2:-2].set(p) + # d = d0 + delD * d / jnp.abs(d).mean() + # p = p0 + delP * p / jnp.abs(p).mean() + d = d0 * (1.0 + delD * d / jnp.abs(d).mean()) + p = p0 * (1.0 + delP * p / jnp.abs(p).mean()) + + u = u.loc[0, 2:-2, 2:-2, 2:-2].set(d) + u = u.loc[1, 2:-2, 2:-2, 2:-2].set(vx) + u = u.loc[2, 2:-2, 2:-2, 2:-2].set(vy) + u = u.loc[3, 2:-2, 2:-2, 2:-2].set(vz) + u = u.loc[4, 2:-2, 2:-2, 2:-2].set(p) return u key = random.PRNGKey(init_key) - d0 = random.uniform(key, shape=([numbers, 1]), minval=1.e-1, maxval=dMx) + d0 = random.uniform(key, shape=([numbers, 1]), minval=1.0e-1, maxval=dMx) key, subkey = random.split(key) - delD = random.uniform(key, shape=([numbers, 1]), minval=1.e-2, maxval=0.2) + delD = random.uniform(key, shape=([numbers, 1]), minval=1.0e-2, maxval=0.2) key, subkey = random.split(key) - T0 = random.uniform(key, shape=([numbers, 1]), minval=1.e-1, maxval=TMx) + T0 = random.uniform(key, shape=([numbers, 1]), minval=1.0e-1, maxval=TMx) key, subkey = random.split(key) - delP = random.uniform(key, shape=([numbers, 1]), minval=1.e-2, maxval=0.2) + delP = random.uniform(key, shape=([numbers, 1]), minval=1.0e-2, maxval=0.2) key, subkey = random.split(key) - keys = random.randint(key, [numbers,], minval=0, maxval=10000000) - u = jax.vmap(__create_3DRand_init, axis_name='i')(u, d0, T0, delD, delP, keys) + keys = random.randint( + key, + [ + numbers, + ], + minval=0, + maxval=10000000, + ) + u = jax.vmap(__create_3DRand_init, axis_name="i")(u, d0, T0, delD, delP, keys) # perform window function key, subkey = random.split(key) @@ -1199,115 +1369,127 @@ def __create_3DRand_init(u, d0, T0, delD, delP, keys): _zc = jnp.repeat(zc[None, :], numbers, axis=0) trns = 0.01 * jnp.ones_like(cond) carry = cond, mask, _xc, _yc, _zc, xL, xR, yL, yR, zL, zR, trns - cond, mask, _xc, _yc, _zc, xL, xR, yL, yR, zL, zR, trns = vmap(select_W, 0, 0)(carry) - - u = u.at[:, :, 2:-2, 2:-2, 2:-2].set(u[:, :, 2:-2, 2:-2, 2:-2] * mask[:, None, :, :, :]) - u = u.at[:, 0, 2:-2, 2:-2, 2:-2].add(d0[:, :, None, None] * (1. - mask[:, :, :, :])) - u = u.at[:, 4, 2:-2, 2:-2, 2:-2].add(d0[:, :, None, None] * T0[:, :, None, None] * (1. - mask[:, :, :, :])) + cond, mask, _xc, _yc, _zc, xL, xR, yL, yR, zL, zR, trns = vmap(select_W, 0, 0)( + carry + ) + + u = u.loc[:, :, 2:-2, 2:-2, 2:-2].set( + u[:, :, 2:-2, 2:-2, 2:-2] * mask[:, None, :, :, :] + ) + u = u.loc[:, 0, 2:-2, 2:-2, 2:-2].add( + d0[:, :, None, None] * (1.0 - mask[:, :, :, :]) + ) + u = u.loc[:, 4, 2:-2, 2:-2, 2:-2].add( + d0[:, :, None, None] * T0[:, :, None, None] * (1.0 - mask[:, :, :, :]) + ) return u -def bc(u, dx, Ncell, mode='periodic'): - _u = jnp.zeros(Ncell+4) # because of 2nd-order precision in space - _u = _u.at[2:Ncell+2].set(u) - if mode=='periodic': # periodic boundary condition - _u = _u.at[0:2].set(u[-2:]) # left hand side - _u = _u.at[Ncell + 2:Ncell + 4].set(u[0:2]) # right hand side - elif mode=='reflection': - _u = _u.at[0].set(- u[3]) # left hand side - _u = _u.at[1].set(- u[2]) # left hand side - _u = _u.at[-2].set(- u[-3]) # right hand side - _u = _u.at[-1].set(- u[-4]) # right hand side - elif mode=='copy': - _u = _u.at[0].set(u[3]) # left hand side - _u = _u.at[1].set(u[2]) # left hand side - _u = _u.at[-2].set(u[-3]) # right hand side - _u = _u.at[-1].set(u[-4]) # right hand side + +def bc(u, dx, Ncell, mode="periodic"): + _u = jnp.zeros(Ncell + 4) # because of 2nd-order precision in space + _u = _u.loc[2 : Ncell + 2].set(u) + if mode == "periodic": # periodic boundary condition + _u = _u.loc[0:2].set(u[-2:]) # left hand side + _u = _u.loc[Ncell + 2 : Ncell + 4].set(u[0:2]) # right hand side + elif mode == "reflection": + _u = _u.loc[0].set(-u[3]) # left hand side + _u = _u.loc[1].set(-u[2]) # left hand side + _u = _u.loc[-2].set(-u[-3]) # right hand side + _u = _u.loc[-1].set(-u[-4]) # right hand side + elif mode == "copy": + _u = _u.loc[0].set(u[3]) # left hand side + _u = _u.loc[1].set(u[2]) # left hand side + _u = _u.loc[-2].set(u[-3]) # right hand side + _u = _u.loc[-1].set(u[-4]) # right hand side return _u -def bc_2D(_u, mode='trans'): + +def bc_2D(_u, mode="trans"): Nx, Ny = _u.shape - u = jnp.zeros([Nx + 4, Ny + 4]) # because of 2nd-order precision in space - u = u.at[2:-2, 2:-2].set(_u) + u = jnp.zeros([Nx + 4, Ny + 4]) # because of 2nd-order precision in space + u = u.loc[2:-2, 2:-2].set(_u) Nx += 2 Ny += 2 - if mode=='periodic': # periodic boundary condition + if mode == "periodic": # periodic boundary condition # left hand side - u = u.at[0:2, 2:-2].set(u[Nx-2:Nx, 2:-2]) # x - u = u.at[2:-2, 0:2].set(u[2:-2, Ny-2:Ny]) # y + u = u.loc[0:2, 2:-2].set(u[Nx - 2 : Nx, 2:-2]) # x + u = u.loc[2:-2, 0:2].set(u[2:-2, Ny - 2 : Ny]) # y # right hand side - u = u.at[Nx:Nx+2, 2:-2].set(u[2:4, 2:-2]) - u = u.at[2:-2, Ny:Ny+2].set(u[2:-2, 2:4]) - elif mode=='trans': # periodic boundary condition + u = u.loc[Nx : Nx + 2, 2:-2].set(u[2:4, 2:-2]) + u = u.loc[2:-2, Ny : Ny + 2].set(u[2:-2, 2:4]) + elif mode == "trans": # periodic boundary condition # left hand side - u = u.at[0, 2:-2].set(u[3, 2:-2]) # x - u = u.at[2:-2, 0].set(u[2:-2, 3]) # y - u = u.at[1, 2:-2].set(u[2, 2:-2]) # x - u = u.at[2:-2, 1].set(u[2:-2, 2]) # y + u = u.loc[0, 2:-2].set(u[3, 2:-2]) # x + u = u.loc[2:-2, 0].set(u[2:-2, 3]) # y + u = u.loc[1, 2:-2].set(u[2, 2:-2]) # x + u = u.loc[2:-2, 1].set(u[2:-2, 2]) # y # right hand side - u = u.at[-2, 2:-2].set(u[-3, 2:-2]) - u = u.at[2:-2, -2].set(u[2:-2, -3]) - u = u.at[-1, 2:-2].set(u[-4, 2:-2]) - u = u.at[2:-2, -1].set(u[2:-2, -4]) - elif mode=='Neumann': # periodic boundary condition + u = u.loc[-2, 2:-2].set(u[-3, 2:-2]) + u = u.loc[2:-2, -2].set(u[2:-2, -3]) + u = u.loc[-1, 2:-2].set(u[-4, 2:-2]) + u = u.loc[2:-2, -1].set(u[2:-2, -4]) + elif mode == "Neumann": # periodic boundary condition # left hand side - u = u.at[0, 2:-2].set(0.) # x - u = u.at[2:-2, 0].set(0.) # y - u = u.at[1, 2:-2].set(0.) # x - u = u.at[2:-2, 1].set(0.) # y + u = u.loc[0, 2:-2].set(0.0) # x + u = u.loc[2:-2, 0].set(0.0) # y + u = u.loc[1, 2:-2].set(0.0) # x + u = u.loc[2:-2, 1].set(0.0) # y # right hand side - u = u.at[-2, 2:-2].set(0.) - u = u.at[2:-2, -2].set(0.) - u = u.at[-1, 2:-2].set(0.) - u = u.at[2:-2, -1].set(0.) + u = u.loc[-2, 2:-2].set(0.0) + u = u.loc[2:-2, -2].set(0.0) + u = u.loc[-1, 2:-2].set(0.0) + u = u.loc[2:-2, -1].set(0.0) return u + def bc_HD(u, mode): _, Nx, Ny, Nz = u.shape Nx -= 2 Ny -= 2 Nz -= 2 - if mode=='periodic': # periodic boundary condition + if mode == "periodic": # periodic boundary condition # left hand side - u = u.at[:, 0:2, 2:-2, 2:-2].set(u[:, Nx-2:Nx, 2:-2, 2:-2]) # x - u = u.at[:, 2:-2, 0:2, 2:-2].set(u[:, 2:-2, Ny-2:Ny, 2:-2]) # y - u = u.at[:, 2:-2, 2:-2, 0:2].set(u[:, 2:-2, 2:-2, Nz-2:Nz]) # z + u = u.loc[:, 0:2, 2:-2, 2:-2].set(u[:, Nx - 2 : Nx, 2:-2, 2:-2]) # x + u = u.loc[:, 2:-2, 0:2, 2:-2].set(u[:, 2:-2, Ny - 2 : Ny, 2:-2]) # y + u = u.loc[:, 2:-2, 2:-2, 0:2].set(u[:, 2:-2, 2:-2, Nz - 2 : Nz]) # z # right hand side - u = u.at[:, Nx:Nx+2, 2:-2, 2:-2].set(u[:, 2:4, 2:-2, 2:-2]) - u = u.at[:, 2:-2, Ny:Ny+2, 2:-2].set(u[:, 2:-2, 2:4, 2:-2]) - u = u.at[:, 2:-2, 2:-2, Nz:Nz+2].set(u[:, 2:-2, 2:-2, 2:4]) - elif mode=='trans': # periodic boundary condition + u = u.loc[:, Nx : Nx + 2, 2:-2, 2:-2].set(u[:, 2:4, 2:-2, 2:-2]) + u = u.loc[:, 2:-2, Ny : Ny + 2, 2:-2].set(u[:, 2:-2, 2:4, 2:-2]) + u = u.loc[:, 2:-2, 2:-2, Nz : Nz + 2].set(u[:, 2:-2, 2:-2, 2:4]) + elif mode == "trans": # periodic boundary condition # left hand side - u = u.at[:, 0, 2:-2, 2:-2].set(u[:, 3, 2:-2, 2:-2]) # x - u = u.at[:, 2:-2, 0, 2:-2].set(u[:, 2:-2, 3, 2:-2]) # y - u = u.at[:, 2:-2, 2:-2, 0].set(u[:, 2:-2, 2:-2, 3]) # z - u = u.at[:, 1, 2:-2, 2:-2].set(u[:, 2, 2:-2, 2:-2]) # x - u = u.at[:, 2:-2, 1, 2:-2].set(u[:, 2:-2, 2, 2:-2]) # y - u = u.at[:, 2:-2, 2:-2, 1].set(u[:, 2:-2, 2:-2, 2]) # z + u = u.loc[:, 0, 2:-2, 2:-2].set(u[:, 3, 2:-2, 2:-2]) # x + u = u.loc[:, 2:-2, 0, 2:-2].set(u[:, 2:-2, 3, 2:-2]) # y + u = u.loc[:, 2:-2, 2:-2, 0].set(u[:, 2:-2, 2:-2, 3]) # z + u = u.loc[:, 1, 2:-2, 2:-2].set(u[:, 2, 2:-2, 2:-2]) # x + u = u.loc[:, 2:-2, 1, 2:-2].set(u[:, 2:-2, 2, 2:-2]) # y + u = u.loc[:, 2:-2, 2:-2, 1].set(u[:, 2:-2, 2:-2, 2]) # z # right hand side - u = u.at[:, -2, 2:-2, 2:-2].set(u[:, -3, 2:-2, 2:-2]) - u = u.at[:, 2:-2, -2, 2:-2].set(u[:, 2:-2, -3, 2:-2]) - u = u.at[:, 2:-2, 2:-2, -2].set(u[:, 2:-2, 2:-2, -3]) - u = u.at[:, -1, 2:-2, 2:-2].set(u[:, -4, 2:-2, 2:-2]) - u = u.at[:, 2:-2, -1, 2:-2].set(u[:, 2:-2, -4, 2:-2]) - u = u.at[:, 2:-2, 2:-2, -1].set(u[:, 2:-2, 2:-2, -4]) - elif mode=='KHI': # x: periodic, y, z : trans + u = u.loc[:, -2, 2:-2, 2:-2].set(u[:, -3, 2:-2, 2:-2]) + u = u.loc[:, 2:-2, -2, 2:-2].set(u[:, 2:-2, -3, 2:-2]) + u = u.loc[:, 2:-2, 2:-2, -2].set(u[:, 2:-2, 2:-2, -3]) + u = u.loc[:, -1, 2:-2, 2:-2].set(u[:, -4, 2:-2, 2:-2]) + u = u.loc[:, 2:-2, -1, 2:-2].set(u[:, 2:-2, -4, 2:-2]) + u = u.loc[:, 2:-2, 2:-2, -1].set(u[:, 2:-2, 2:-2, -4]) + elif mode == "KHI": # x: periodic, y, z : trans # left hand side - u = u.at[:, 0:2, 2:-2, 2:-2].set(u[:, Nx - 2:Nx, 2:-2, 2:-2]) # x - u = u.at[:, 2:-2, 0, 2:-2].set(u[:, 2:-2, 3, 2:-2]) # y - u = u.at[:, 2:-2, 2:-2, 0].set(u[:, 2:-2, 2:-2, 3]) # z - u = u.at[:, 2:-2, 1, 2:-2].set(u[:, 2:-2, 2, 2:-2]) # y - u = u.at[:, 2:-2, 2:-2, 1].set(u[:, 2:-2, 2:-2, 2]) # z + u = u.loc[:, 0:2, 2:-2, 2:-2].set(u[:, Nx - 2 : Nx, 2:-2, 2:-2]) # x + u = u.loc[:, 2:-2, 0, 2:-2].set(u[:, 2:-2, 3, 2:-2]) # y + u = u.loc[:, 2:-2, 2:-2, 0].set(u[:, 2:-2, 2:-2, 3]) # z + u = u.loc[:, 2:-2, 1, 2:-2].set(u[:, 2:-2, 2, 2:-2]) # y + u = u.loc[:, 2:-2, 2:-2, 1].set(u[:, 2:-2, 2:-2, 2]) # z # right hand side - u = u.at[:, Nx:Nx + 2, 2:-2, 2:-2].set(u[:, 2:4, 2:-2, 2:-2]) - u = u.at[:, 2:-2, -2, 2:-2].set(u[:, 2:-2, -3, 2:-2]) - u = u.at[:, 2:-2, 2:-2, -2].set(u[:, 2:-2, 2:-2, -3]) - u = u.at[:, 2:-2, -1, 2:-2].set(u[:, 2:-2, -4, 2:-2]) - u = u.at[:, 2:-2, 2:-2, -1].set(u[:, 2:-2, 2:-2, -4]) + u = u.loc[:, Nx : Nx + 2, 2:-2, 2:-2].set(u[:, 2:4, 2:-2, 2:-2]) + u = u.loc[:, 2:-2, -2, 2:-2].set(u[:, 2:-2, -3, 2:-2]) + u = u.loc[:, 2:-2, 2:-2, -2].set(u[:, 2:-2, 2:-2, -3]) + u = u.loc[:, 2:-2, -1, 2:-2].set(u[:, 2:-2, -4, 2:-2]) + u = u.loc[:, 2:-2, 2:-2, -1].set(u[:, 2:-2, 2:-2, -4]) return u + def bc_HD_vis(u, if_periodic=True): # for viscosity """ for the moment, assuming periodic/copy boundary @@ -1319,119 +1501,144 @@ def bc_HD_vis(u, if_periodic=True): # for viscosity Nz -= 2 if if_periodic: - u = u.at[:, 0:2, 0:2, 2:-2].set(u[:, Nx - 2:Nx, Ny - 2:Ny, 2:-2]) # xByB - u = u.at[:, 0:2, 2:-2, 0:2].set(u[:, Nx - 2:Nx, 2:-2, Nz - 2:Nz]) # xBzB - u = u.at[:, 0:2, Ny:Ny + 2, 2:-2].set(u[:, Nx - 2:Nx, 2:4, 2:-2]) # xByT - u = u.at[:, 0:2, 2:-2, Nz:Nz + 2].set(u[:, Nx - 2:Nx, 2:-2, 2:4]) # xBzT - u = u.at[:, Nx:Nx + 2, 0:2, 2:-2].set(u[:, 2:4, Ny - 2:Ny, 2:-2]) # xTyB - u = u.at[:, Nx:Nx + 2, 2:-2, 0:2].set(u[:, 2:4, 2:-2, Nz - 2:Nz]) # xTzB - u = u.at[:, Nx:Nx + 2, Ny:Ny + 2, 2:-2].set(u[:, 2:4, 2:4, 2:-2]) # xTyT - u = u.at[:, Nx:Nx + 2, 2:-2, Nz:Nz + 2].set(u[:, 2:4, 2:-2, 2:4]) # xTzT - else: # trans - u = u.at[:, 0:2, 0:2, 2:-2].set(u[:, 4:2, 4:2, 2:-2]) # xByT - u = u.at[:, 0:2, 2:-2, 0:2].set(u[:, 4:2, 2:-2, 4:2]) # xBzB - u = u.at[:, 0:2, Ny:Ny + 2, 2:-2].set(u[:, 4:2, Ny:Ny-2, 2:-2]) # xByB - u = u.at[:, 0:2, 2:-2, Nz:Nz + 2].set(u[:, 4:2, 2:-2, Nz:Nz-2]) # xBzT - u = u.at[:, Nx:Nx + 2, 0:2, 2:-2].set(u[:, Nx:Nx-2, 4:2, 2:-2]) # xTyB - u = u.at[:, Nx:Nx + 2, 2:-2, 0:2].set(u[:, Nx:Nx-2, 2:-2, 4:2]) # xTzB - u = u.at[:, Nx:Nx + 2, Ny:Ny + 2, 2:-2].set(u[:, Nx:Nx-2, Ny:Ny-2, 2:-2]) # xTyT - u = u.at[:, Nx:Nx + 2, 2:-2, Nz:Nz + 2].set(u[:, Nx:Nx-2, 2:-2, Nz:Nz-2]) # xTzT + u = u.loc[:, 0:2, 0:2, 2:-2].set(u[:, Nx - 2 : Nx, Ny - 2 : Ny, 2:-2]) # xByB + u = u.loc[:, 0:2, 2:-2, 0:2].set(u[:, Nx - 2 : Nx, 2:-2, Nz - 2 : Nz]) # xBzB + u = u.loc[:, 0:2, Ny : Ny + 2, 2:-2].set(u[:, Nx - 2 : Nx, 2:4, 2:-2]) # xByT + u = u.loc[:, 0:2, 2:-2, Nz : Nz + 2].set(u[:, Nx - 2 : Nx, 2:-2, 2:4]) # xBzT + u = u.loc[:, Nx : Nx + 2, 0:2, 2:-2].set(u[:, 2:4, Ny - 2 : Ny, 2:-2]) # xTyB + u = u.loc[:, Nx : Nx + 2, 2:-2, 0:2].set(u[:, 2:4, 2:-2, Nz - 2 : Nz]) # xTzB + u = u.loc[:, Nx : Nx + 2, Ny : Ny + 2, 2:-2].set(u[:, 2:4, 2:4, 2:-2]) # xTyT + u = u.loc[:, Nx : Nx + 2, 2:-2, Nz : Nz + 2].set(u[:, 2:4, 2:-2, 2:4]) # xTzT + else: # trans + u = u.loc[:, 0:2, 0:2, 2:-2].set(u[:, 4:2, 4:2, 2:-2]) # xByT + u = u.loc[:, 0:2, 2:-2, 0:2].set(u[:, 4:2, 2:-2, 4:2]) # xBzB + u = u.loc[:, 0:2, Ny : Ny + 2, 2:-2].set(u[:, 4:2, Ny : Ny - 2, 2:-2]) # xByB + u = u.loc[:, 0:2, 2:-2, Nz : Nz + 2].set(u[:, 4:2, 2:-2, Nz : Nz - 2]) # xBzT + u = u.loc[:, Nx : Nx + 2, 0:2, 2:-2].set(u[:, Nx : Nx - 2, 4:2, 2:-2]) # xTyB + u = u.loc[:, Nx : Nx + 2, 2:-2, 0:2].set(u[:, Nx : Nx - 2, 2:-2, 4:2]) # xTzB + u = u.loc[:, Nx : Nx + 2, Ny : Ny + 2, 2:-2].set( + u[:, Nx : Nx - 2, Ny : Ny - 2, 2:-2] + ) # xTyT + u = u.loc[:, Nx : Nx + 2, 2:-2, Nz : Nz + 2].set( + u[:, Nx : Nx - 2, 2:-2, Nz : Nz - 2] + ) # xTzT return u -def VLlimiter(a, b, c, alpha=2.): - return jnp.sign(c)\ - *(0.5 + 0.5*jnp.sign(a*b))\ - *jnp.minimum(alpha*jnp.minimum(jnp.abs(a), jnp.abs(b)), jnp.abs(c)) + +def VLlimiter(a, b, c, alpha=2.0): + return ( + jnp.sign(c) + * (0.5 + 0.5 * jnp.sign(a * b)) + * jnp.minimum(alpha * jnp.minimum(jnp.abs(a), jnp.abs(b)), jnp.abs(c)) + ) + def limiting(u, Ncell, if_second_order): # under construction - duL = u[1:Ncell + 3] - u[0:Ncell + 2] - duR = u[2:Ncell + 4] - u[1:Ncell + 3] - duM = (u[2:Ncell + 4] - u[0:Ncell + 2])*0.5 - gradu = VLlimiter(duL, duR, duM) * if_second_order + du_L = u[1 : Ncell + 3] - u[0 : Ncell + 2] + du_R = u[2 : Ncell + 4] - u[1 : Ncell + 3] + du_M = (u[2 : Ncell + 4] - u[0 : Ncell + 2]) * 0.5 + gradu = VLlimiter(du_L, du_R, du_M) * if_second_order # -1:Ncell - #uL, uR = jnp.zeros(Ncell+4), jnp.zeros(Ncell+4) + # uL, uR = jnp.zeros(Ncell+4), jnp.zeros(Ncell+4) uL, uR = jnp.zeros_like(u), jnp.zeros_like(u) - uL = uL.at[1:Ncell+3].set(u[1:Ncell+3] - 0.5*gradu) # left of cell - uR = uR.at[1:Ncell+3].set(u[1:Ncell+3] + 0.5*gradu) # right of cell + # left of cell + uL = uL.loc[1 : Ncell + 3].set(u[1 : Ncell + 3] - 0.5 * gradu) + # right of cell + uR = uR.loc[1 : Ncell + 3].set(u[1 : Ncell + 3] + 0.5 * gradu) return uL, uR + def limiting_HD(u, if_second_order): - nd, nx, ny, nz = u.shape + _, nx, _, _ = u.shape uL, uR = u, u nx -= 4 - duL = u[:, 1:nx + 3, :, :] - u[:, 0:nx + 2, :, :] - duR = u[:, 2:nx + 4, :, :] - u[:, 1:nx + 3, :, :] - duM = (u[:, 2:nx + 4, :, :] - u[:, 0:nx + 2, :, :]) * 0.5 - gradu = VLlimiter(duL, duR, duM) * if_second_order + du_L = u[:, 1 : nx + 3, :, :] - u[:, 0 : nx + 2, :, :] + du_R = u[:, 2 : nx + 4, :, :] - u[:, 1 : nx + 3, :, :] + du_M = (u[:, 2 : nx + 4, :, :] - u[:, 0 : nx + 2, :, :]) * 0.5 + gradu = VLlimiter(du_L, du_R, du_M) * if_second_order # -1:Ncell - uL = uL.at[:, 1:nx + 3, :, :].set(u[:, 1:nx + 3, :, :] - 0.5*gradu) # left of cell - uR = uR.at[:, 1:nx + 3, :, :].set(u[:, 1:nx + 3, :, :] + 0.5*gradu) # right of cell - - uL = jnp.where(uL[0] > 0., uL, u) - uL = jnp.where(uL[4] > 0., uL, u) - uR = jnp.where(uR[0] > 0., uR, u) - uR = jnp.where(uR[4] > 0., uR, u) + uL = uL.loc[:, 1 : nx + 3, :, :].set( + u[:, 1 : nx + 3, :, :] - 0.5 * gradu + ) # left of cell + uR = uR.loc[:, 1 : nx + 3, :, :].set( + u[:, 1 : nx + 3, :, :] + 0.5 * gradu + ) # right of cell + + uL = jnp.where(uL[0] > 0.0, uL, u) + uL = jnp.where(uL[4] > 0.0, uL, u) + uR = jnp.where(uR[0] > 0.0, uR, u) + uR = jnp.where(uR[4] > 0.0, uR, u) return uL, uR + def save_data(u, xc, i_save, save_dir, dt_save=None, if_final=False): if if_final: - jnp.save(save_dir+'/x_coordinate', xc) + jnp.save(save_dir + "/x_coordinate", xc) # - tc = jnp.arange(i_save+1)*dt_save - jnp.save(save_dir+'/t_coordinate', tc) + tc = jnp.arange(i_save + 1) * dt_save + jnp.save(save_dir + "/t_coordinate", tc) # - flnm = save_dir+'/Data_'+str(i_save).zfill(4) + flnm = save_dir + "/Data_" + str(i_save).zfill(4) jnp.save(flnm, u) else: - flnm = save_dir+'/Data_'+str(i_save).zfill(4) + flnm = save_dir + "/Data_" + str(i_save).zfill(4) jnp.save(flnm, u) + def save_data_HD(u, xc, yc, zc, i_save, save_dir, dt_save=None, if_final=False): if if_final: - jnp.save(save_dir+'/x_coordinate', xc) - jnp.save(save_dir+'/y_coordinate', yc) - jnp.save(save_dir+'/z_coordinate', zc) + jnp.save(save_dir + "/x_coordinate", xc) + jnp.save(save_dir + "/y_coordinate", yc) + jnp.save(save_dir + "/z_coordinate", zc) # - tc = jnp.arange(i_save+1)*dt_save - jnp.save(save_dir+'/t_coordinate', tc) + tc = jnp.arange(i_save + 1) * dt_save + jnp.save(save_dir + "/t_coordinate", tc) # - flnm = save_dir+'/Data_'+str(i_save).zfill(4) + flnm = save_dir + "/Data_" + str(i_save).zfill(4) jnp.save(flnm, u) else: - flnm = save_dir+'/Data_'+str(i_save).zfill(4) + flnm = save_dir + "/Data_" + str(i_save).zfill(4) jnp.save(flnm, u) + def Courant(u, dx): - stability_adv = dx/(jnp.max(jnp.abs(u)) + 1.e-8) + stability_adv = dx / (jnp.max(jnp.abs(u)) + 1.0e-8) return stability_adv -def Courant_diff(dx, epsilon=1.e-3): - stability_dif = 0.5*dx**2/(epsilon + 1.e-8) + +def Courant_diff(dx, epsilon=1.0e-3): + stability_dif = 0.5 * dx**2 / (epsilon + 1.0e-8) return stability_dif -def Courant_diff_2D(dx, dy, epsilon=1.e-3): - stability_dif_x = 0.5*dx**2/(epsilon + 1.e-8) - stability_dif_y = 0.5*dy**2/(epsilon + 1.e-8) + +def Courant_diff_2D(dx, dy, epsilon=1.0e-3): + stability_dif_x = 0.5 * dx**2 / (epsilon + 1.0e-8) + stability_dif_y = 0.5 * dy**2 / (epsilon + 1.0e-8) return jnp.min(jnp.array([stability_dif_x, stability_dif_y])) + def Courant_HD(u, dx, dy, dz, gamma): - cs = jnp.sqrt(gamma*u[4]/u[0]) # sound velocity - stability_adv_x = dx/(jnp.max(cs + jnp.abs(u[1])) + 1.e-8) - stability_adv_y = dy/(jnp.max(cs + jnp.abs(u[2])) + 1.e-8) - stability_adv_z = dz/(jnp.max(cs + jnp.abs(u[3])) + 1.e-8) - stability_adv = jnp.min(jnp.array([stability_adv_x, stability_adv_y, stability_adv_z])) + cs = jnp.sqrt(gamma * u[4] / u[0]) # sound velocity + stability_adv_x = dx / (jnp.max(cs + jnp.abs(u[1])) + 1.0e-8) + stability_adv_y = dy / (jnp.max(cs + jnp.abs(u[2])) + 1.0e-8) + stability_adv_z = dz / (jnp.max(cs + jnp.abs(u[3])) + 1.0e-8) + stability_adv = jnp.min( + jnp.array([stability_adv_x, stability_adv_y, stability_adv_z]) + ) return stability_adv + def Courant_vis_HD(dx, dy, dz, eta, zeta): - #visc = jnp.max(jnp.array([eta, zeta])) - visc = 4. / 3. * eta + zeta # maximum - stability_dif_x = 0.5*dx**2/(visc + 1.e-8) - stability_dif_y = 0.5*dy**2/(visc + 1.e-8) - stability_dif_z = 0.5*dz**2/(visc + 1.e-8) - stability_dif = jnp.min(jnp.array([stability_dif_x, stability_dif_y, stability_dif_z])) + # visc = jnp.max(jnp.array([eta, zeta])) + visc = 4.0 / 3.0 * eta + zeta # maximum + stability_dif_x = 0.5 * dx**2 / (visc + 1.0e-8) + stability_dif_y = 0.5 * dy**2 / (visc + 1.0e-8) + stability_dif_z = 0.5 * dz**2 / (visc + 1.0e-8) + stability_dif = jnp.min( + jnp.array([stability_dif_x, stability_dif_y, stability_dif_z]) + ) return stability_dif - diff --git a/pdebench/data_gen/gen_diff_react.py b/pdebench/data_gen/gen_diff_react.py index 1d710df..f175786 100644 --- a/pdebench/data_gen/gen_diff_react.py +++ b/pdebench/data_gen/gen_diff_react.py @@ -1,7 +1,10 @@ #!/usr/bin/env python +from __future__ import annotations import os + import dotenv + # load environment variables from `.env` file if it exists # recursively searches for `.env` in all folders starting from work dir # this allows us to keep defaults local to the machine @@ -19,25 +22,25 @@ os.environ["VECLIB_MAXIMUM_THREADS"] = num_threads os.environ["NUMEXPR_NUM_THREADS"] = num_threads -import dotenv -import hydra -from hydra.utils import get_original_cwd -from omegaconf import DictConfig, OmegaConf import logging import multiprocessing as mp from itertools import repeat -import numpy as np -from pdebench.data_gen.src import utils +import dotenv import h5py +import hydra +import numpy as np +from hydra.utils import get_original_cwd +from omegaconf import DictConfig, OmegaConf +from pdebench.data_gen.src import utils from pdebench.data_gen.uploader import dataverse_upload log = logging.getLogger(__name__) + def simulator(config, i): - from pdebench.data_gen.src import sim_diff_react - + config.sim.seed = i log.info(f"Starting seed {i}") start_time = time.time() @@ -47,7 +50,7 @@ def simulator(config, i): log.info(f"Seed {config.sim.seed} took {duration} to finish") seed_str = str(i).zfill(4) - + while True: try: with h5py.File(utils.expand_path(config.output_path), "a") as data_f: @@ -56,27 +59,37 @@ def simulator(config, i): ## should be by batch and less than 1MB ## lzf compression for float32 is kind of pointless though. data_f.create_dataset( - f"{seed_str}/data", data=data_sample, dtype="float32", compression="lzf" + f"{seed_str}/data", + data=data_sample, + dtype="float32", + compression="lzf", ) data_f.create_dataset( - f"{seed_str}/grid/x", data = sim_obj.x, dtype="float32", compression="lzf" + f"{seed_str}/grid/x", + data=sim_obj.x, + dtype="float32", + compression="lzf", ) data_f.create_dataset( - f"{seed_str}/grid/y", data=sim_obj.y, dtype="float32", compression="lzf" + f"{seed_str}/grid/y", + data=sim_obj.y, + dtype="float32", + compression="lzf", ) data_f.create_dataset( - f"{seed_str}/grid/t", data=sim_obj.t, dtype="float32", compression="lzf" + f"{seed_str}/grid/t", + data=sim_obj.t, + dtype="float32", + compression="lzf", ) seed_group = data_f[seed_str] seed_group.attrs["config"] = OmegaConf.to_yaml(config) - except IOError: + except OSError: time.sleep(0.1) continue else: break - - @hydra.main(config_path="configs/", config_name="diff-react") def main(config: DictConfig): @@ -92,17 +105,14 @@ def main(config: DictConfig): temp_path = os.getcwd() os.chdir(get_original_cwd()) - from src import utils - import h5py - # Change back to the hydra working directory os.chdir(temp_path) - + work_path = os.path.dirname(config.work_dir) output_path = os.path.join(work_path, config.data_dir, config.output_path) if not os.path.isdir(output_path): os.makedirs(output_path) - config.output_path = os.path.join(output_path, config.output_path) + '.h5' + config.output_path = os.path.join(output_path, config.output_path) + ".h5" num_samples_init = 0 num_samples_final = 1000 @@ -115,16 +125,12 @@ def main(config: DictConfig): if config.upload: dataverse_upload( file_path=config.output_path, - dataverse_url=os.getenv( - 'DATAVERSE_URL', 'https://darus.uni-stuttgart.de'), - dataverse_token=os.getenv( - 'DATAVERSE_API_TOKEN', ''), + dataverse_url=os.getenv("DATAVERSE_URL", "https://darus.uni-stuttgart.de"), + dataverse_token=os.getenv("DATAVERSE_API_TOKEN", ""), dataverse_dir=config.name, - dataverse_id=os.getenv( - 'DATAVERSE_ID', ''), - log=log) - - return + dataverse_id=os.getenv("DATAVERSE_ID", ""), + log=log, + ) import os diff --git a/pdebench/data_gen/gen_diff_sorp.py b/pdebench/data_gen/gen_diff_sorp.py index dfa2017..ec11951 100644 --- a/pdebench/data_gen/gen_diff_sorp.py +++ b/pdebench/data_gen/gen_diff_sorp.py @@ -1,7 +1,10 @@ #!/usr/bin/env python +from __future__ import annotations import os + import dotenv + # load environment variables from `.env` file if it exists # recursively searches for `.env` in all folders starting from work dir # this allows us to keep defaults local to the machine @@ -19,26 +22,25 @@ os.environ["VECLIB_MAXIMUM_THREADS"] = num_threads os.environ["NUMEXPR_NUM_THREADS"] = num_threads -import dotenv -import hydra -from hydra.utils import get_original_cwd -from omegaconf import DictConfig, OmegaConf import logging import multiprocessing as mp from itertools import repeat -import numpy as np -from pdebench.data_gen.src import utils +import dotenv import h5py +import hydra +import numpy as np +from hydra.utils import get_original_cwd +from omegaconf import DictConfig, OmegaConf +from pdebench.data_gen.src import utils from pdebench.data_gen.uploader import dataverse_upload log = logging.getLogger(__name__) def simulator(config, i): - from pdebench.data_gen.src import sim_diff_sorp - + config.sim.seed = i log.info(f"Starting seed {i}") start_time = time.time() @@ -46,7 +48,7 @@ def simulator(config, i): data_sample = sim_obj.generate_sample() duration = time.time() - start_time log.info(f"Seed {config.sim.seed} took {duration} to finish") - + seed_str = str(i).zfill(4) while True: @@ -57,23 +59,30 @@ def simulator(config, i): ## should be by batch and less than 1MB ## lzf compression for float32 is kind of pointless though. data_f.create_dataset( - f"{seed_str}/data", data=data_sample, dtype="float32", compression="lzf" + f"{seed_str}/data", + data=data_sample, + dtype="float32", + compression="lzf", ) data_f.create_dataset( - f"{seed_str}/grid/x", data = sim_obj.x, dtype="float32", compression="lzf" + f"{seed_str}/grid/x", + data=sim_obj.x, + dtype="float32", + compression="lzf", ) data_f.create_dataset( - f"{seed_str}/grid/t", data=sim_obj.t, dtype="float32", compression="lzf" + f"{seed_str}/grid/t", + data=sim_obj.t, + dtype="float32", + compression="lzf", ) seed_group = data_f[seed_str] seed_group.attrs["config"] = OmegaConf.to_yaml(config) - except IOError: + except OSError: time.sleep(0.1) continue else: break - - @hydra.main(config_path="configs/", config_name="diff-sorp") @@ -84,49 +93,41 @@ def main(config: DictConfig): # Imports should be nested inside @hydra.main to optimize tab completion # Read more here: https://github.com/facebookresearch/hydra/issues/934 - + # Change to original working directory to import modules - + temp_path = os.getcwd() os.chdir(get_original_cwd()) - - from src import utils - import h5py - - # Change back to the hydra working directory + + # Change back to the hydra working directory os.chdir(temp_path) - + work_path = os.path.dirname(config.work_dir) output_path = os.path.join(work_path, config.data_dir, config.output_path) if not os.path.isdir(output_path): os.makedirs(output_path) - config.output_path = os.path.join(output_path, config.output_path) + '.h5' - + config.output_path = os.path.join(output_path, config.output_path) + ".h5" + num_samples_init = 0 num_samples_final = 10000 - + pool = mp.Pool(mp.cpu_count()) seed = np.arange(num_samples_init, num_samples_final) seed = seed.tolist() pool.starmap(simulator, zip(repeat(config), seed)) - + if config.upload: dataverse_upload( file_path=config.output_path, - dataverse_url=os.getenv( - 'DATAVERSE_URL', 'https://darus.uni-stuttgart.de'), - dataverse_token=os.getenv( - 'DATAVERSE_API_TOKEN', ''), + dataverse_url=os.getenv("DATAVERSE_URL", "https://darus.uni-stuttgart.de"), + dataverse_token=os.getenv("DATAVERSE_API_TOKEN", ""), dataverse_dir=config.name, - dataverse_id=os.getenv( - 'DATAVERSE_ID', ''), - log=log) - - return + dataverse_id=os.getenv("DATAVERSE_ID", ""), + log=log, + ) import os if __name__ == "__main__": test = main() - \ No newline at end of file diff --git a/pdebench/data_gen/gen_ns_incomp.py b/pdebench/data_gen/gen_ns_incomp.py index 2542965..d66c36f 100644 --- a/pdebench/data_gen/gen_ns_incomp.py +++ b/pdebench/data_gen/gen_ns_incomp.py @@ -1,8 +1,9 @@ #!/usr/bin/env python +from __future__ import annotations -import hydra import dotenv -from omegaconf import DictConfig, OmegaConf +import hydra +from omegaconf import DictConfig # load environment variables from `.env` file if it exists # recursively searches for `.env` in all folders starting from work dir @@ -17,8 +18,9 @@ def main(config: DictConfig): Args: config: This function uses hydra configuration for all parameters. """ - + from src import sim_ns_incomp_2d + sim_ns_incomp_2d.ns_sim(config=config, **config) diff --git a/pdebench/data_gen/gen_radial_dam_break.py b/pdebench/data_gen/gen_radial_dam_break.py index 89b0e8a..664cd93 100644 --- a/pdebench/data_gen/gen_radial_dam_break.py +++ b/pdebench/data_gen/gen_radial_dam_break.py @@ -1,7 +1,8 @@ #!/usr/bin/env python +from __future__ import annotations -from copy import deepcopy import os +from copy import deepcopy # load environment variables from `.env` file if it exists # recursively searches for `.env` in all folders starting from work dir @@ -21,25 +22,24 @@ os.environ["NUMEXPR_NUM_THREADS"] = num_threads os.environ["NUMEXPR_MAX_THREADS"] = num_threads -import hydra -from hydra.utils import get_original_cwd -from omegaconf import DictConfig, OmegaConf -import h5py import logging import multiprocessing as mp +import time from itertools import repeat -from pdebench.data_gen.src import utils + +import h5py +import hydra import numpy as np -from pdebench.data_gen.uploader import dataverse_upload -import time +from hydra.utils import get_original_cwd +from omegaconf import DictConfig, OmegaConf +from pdebench.data_gen.src import utils from pdebench.data_gen.src.sim_radial_dam_break import RadialDamBreak2D +from pdebench.data_gen.uploader import dataverse_upload log = logging.getLogger(__name__) def simulator(base_config, i): - - config = deepcopy(base_config) config.sim.seed = i log.info(f"Starting seed {i}") @@ -60,14 +60,14 @@ def simulator(base_config, i): duration = time.time() - start_time seed_str = str(i).zfill(4) log.info(f"Seed {seed_str} took {duration} to finish") - + while True: try: with h5py.File(utils.expand_path(config.output_path), "a") as h5_file: scenario.save_state_to_disk(h5_file, seed_str) seed_group = h5_file[seed_str] seed_group.attrs["config"] = OmegaConf.to_yaml(config) - except IOError: + except OSError: time.sleep(0.1) continue else: @@ -82,7 +82,7 @@ def simulator(base_config, i): dataverse_id=os.getenv("DATAVERSE_ID", ""), log=log, ) - + @hydra.main(config_path="configs/", config_name="radial_dam_break") def main(config: DictConfig): @@ -98,26 +98,24 @@ def main(config: DictConfig): temp_path = os.getcwd() os.chdir(get_original_cwd()) - - # Change back to the hydra working directory + + # Change back to the hydra working directory os.chdir(temp_path) - + work_path = os.path.dirname(config.work_dir) output_path = os.path.join(work_path, config.data_dir, config.output_path) if not os.path.isdir(output_path): os.makedirs(output_path) - config.output_path = os.path.join(output_path, config.output_path) + '.h5' + config.output_path = os.path.join(output_path, config.output_path) + ".h5" num_samples_init = 0 num_samples_final = 10000 - + pool = mp.Pool(mp.cpu_count()) seed = np.arange(num_samples_init, num_samples_final) seed = seed.tolist() pool.starmap(simulator, zip(repeat(config), seed)) - return - if __name__ == "__main__": main() diff --git a/pdebench/data_gen/plot.py b/pdebench/data_gen/plot.py index 02a3c6f..fd952ad 100644 --- a/pdebench/data_gen/plot.py +++ b/pdebench/data_gen/plot.py @@ -1,19 +1,18 @@ -# -*- coding: utf-8 -*- """ Created on Wed May 4 09:53:18 2022 @author: timot """ +from __future__ import annotations - +import h5py import hydra -from omegaconf import DictConfig import numpy as np -import h5py -import matplotlib.pyplot as plt from hydra.utils import get_original_cwd +from omegaconf import DictConfig from pdebench.data_gen.src.plots import plot_data + @hydra.main(config_path="configs/", config_name="diff-sorp") def main(config: DictConfig): """ @@ -25,11 +24,11 @@ def main(config: DictConfig): if not os.path.isdir(output_path): os.makedirs(output_path) config.output_path = os.path.join(output_path, config.output_path) - + # Open and load file - data_path = config.output_path + '.h5' + data_path = config.output_path + ".h5" h5_file = h5py.File(data_path, "r") - + if "seed" in config.sim.keys(): # Choose random sample number idx_max = 10000 if config.plot.dim == 1 else 1000 @@ -46,7 +45,7 @@ def main(config: DictConfig): t = np.array(h5_file["grid/t"], dtype="f") t = t[postfix] # data dim = [t, x1, ..., xd, v] - + h5_file.close() os.chdir(get_original_cwd()) @@ -60,8 +59,6 @@ def main(config: DictConfig): config.name + "_" + postfix + ".png", ) - return - import os diff --git a/pdebench/data_gen/src/_attic/grf.py b/pdebench/data_gen/src/_attic/grf.py index 5004f65..5a14504 100644 --- a/pdebench/data_gen/src/_attic/grf.py +++ b/pdebench/data_gen/src/_attic/grf.py @@ -1,17 +1,19 @@ +from __future__ import annotations + import jax import jax.numpy as jnp -def grf( - seed: int = 1234, - xdim: int = 256, - ydim: int = 256, - sigma: float = 0.1, - rho: float = 0.1, - n: int = 1, - ): +def grf( + seed: int = 1234, + xdim: int = 256, + ydim: int = 256, + sigma: float = 0.1, + rho: float = 0.1, + n: int = 1, +): """ - Variables seeting for random + Variables seeding for random seed : random seed sigma : scale(?) @@ -25,21 +27,22 @@ def grf( """ rng = jax.random.PRNGKey(seed) fx, fy = jnp.meshgrid( - jnp.fft.fftfreq(xdim) * xdim, - jnp.fft.rfftfreq(ydim) * ydim, - indexing = 'ij') + jnp.fft.fftfreq(xdim) * xdim, jnp.fft.rfftfreq(ydim) * ydim, indexing="ij" + ) nfx, nfy = fx.shape fnorm = jnp.sqrt(fx**2 + fy**2) - power = jnp.exp(-(fnorm**2/rho)) - gain = jnp.sqrt(sigma**2 * power/power.sum()) # Lazy not calculating normalisation + power = jnp.exp(-(fnorm**2 / rho)) + gain = jnp.sqrt( + sigma**2 * power / power.sum() + ) # Lazy not calculating normalisation noise = ( jax.random.normal(rng, (n, nfx, nfy)) + jax.random.normal(rng, (n, nfx, nfy)) * 1j ) - noise = noise.at[...,0].set(jnp.abs(noise[..., 0])) - ## TODO: This is the rbf kernel; Matern kernel has more plausible smoothness. - ## Matern 3/2 PSD is - #(18 * jnp.sqrt(3)* jnp.pi * sigma**2)/((4 * k^2 * jnp.pi**2 + 3/(rho**2))^(5/2) rho^3) + noise = noise.at[..., 0].set(jnp.abs(noise[..., 0])) + # TODO: This is the rbf kernel; Matern kernel has more plausible smoothness. + # Matern 3/2 PSD is + # (18 * jnp.sqrt(3)* jnp.pi * sigma**2)/((4 * k^2 * jnp.pi**2 + 3/(rho**2))^(5/2) rho^3) field = jnp.fft.irfft2(noise * gain, (xdim, ydim), norm="forward") return field diff --git a/pdebench/data_gen/src/data_io.py b/pdebench/data_gen/src/data_io.py index 64ea494..f3da0da 100644 --- a/pdebench/data_gen/src/data_io.py +++ b/pdebench/data_gen/src/data_io.py @@ -1,47 +1,39 @@ -import os, os.path -import subprocess +from __future__ import annotations + import json import logging +import os +import os.path +import subprocess +import h5py +import numpy as np +from omegaconf import OmegaConf +from phi.field import Field from phi.flow import * -from phi.field import Field from phi.math import Shape -import numpy as np -import h5py - -from omegaconf import DictConfig, OmegaConf - - log = logging.getLogger(__name__) -def dims_for( - n_steps=1000, - grid_size=(100, 100), - frame_int = 1, - n_batch = 1, - **kwargs): +def dims_for(n_steps=1000, grid_size=(100, 100), frame_int=1, n_batch=1, **kwargs): """ return a dict of fields and their shapes """ - n_frames = ((n_steps-1)//frame_int) + 1 + n_frames = ((n_steps - 1) // frame_int) + 1 return dict( - velocity = (n_batch, n_frames, *grid_size, len(grid_size)), - particles= (n_batch, n_frames, *grid_size, 1), - force= (n_batch, *grid_size, len(grid_size)), - t= (n_batch, n_frames), + velocity=(n_batch, n_frames, *grid_size, len(grid_size)), + particles=(n_batch, n_frames, *grid_size, 1), + force=(n_batch, *grid_size, len(grid_size)), + t=(n_batch, n_frames), ) def dict_for(config): spec = dims_for(**config) - data_store = dict( - latest_index = -1, - config = config - ) + data_store = dict(latest_index=-1, config=config) for field_name, full_shape in spec.items(): - data_store[field_name] = np.ndarray(full_shape, dtype='float32') + data_store[field_name] = np.ndarray(full_shape, dtype="float32") return data_store @@ -50,48 +42,49 @@ def h5_for(config): spec = dims_for(**config) log.info(f"spec: {spec}") fname = f"{config['sim_name']}-{config['seed']}.h5" - data_store = h5py.File(fname, 'a') - data_store.attrs['config'] = OmegaConf.to_yaml(config) - data_store.attrs['latestIndex'] = -1 + data_store = h5py.File(fname, "a") + data_store.attrs["config"] = OmegaConf.to_yaml(config) + data_store.attrs["latestIndex"] = -1 for field_name, full_shape in spec.items(): # dataset shape is (batch, t_length, x1, ..., xd, v) - chunk_shape = (1, 1, *full_shape[2:]) # chunk shape in (1, 1, x1, ..., xd, v) + chunk_shape = (1, 1, *full_shape[2:]) # chunk shape in (1, 1, x1, ..., xd, v) # Open a dataset, creating it if it doesn’t exist. - data_store.require_dataset( + data_store.require_dataset( field_name, full_shape, - 'float32', + "float32", compression="lzf", chunks=chunk_shape, - shuffle=True) + shuffle=True, + ) return data_store def to_centre_grid(field: Field) -> CenteredGrid: - ''' + """ resample the input `Field` and return a corresponding `CenterGrid` used because the `StaggeredGrid`, which is usually the Object for velocity, does pack into nice tensors for typical neural nets - ''' + """ if isinstance(field, CenteredGrid): return field return CenteredGrid(field, resolution=field.shape.spatial, bounds=field.bounds) def _get_dim_order(shape: Shape): - ''' + """ Return a tuple of string, represents the order of dimensions e.g. ('batch','x','y','vector') If the current Shape does not have channel dims, fill in "vector" as 1. - ''' - batchNames = shape.batch.names if (shape.batch_rank > 0) else ('batch',) - channelNames = shape.channel.names if (shape.channel_rank > 0) else ('vector',) + """ + batchNames = shape.batch.names if (shape.batch_rank > 0) else ("batch",) + channelNames = shape.channel.names if (shape.channel_rank > 0) else ("vector",) return batchNames + shape.spatial.names + channelNames def to_ndarray(field: Field) -> np.ndarray: - ''' + """ Turn the current Field into ndarray, with shape (batch, x1, ..., xd, v) - ''' + """ centered = to_centre_grid(field) order = _get_dim_order(centered.shape) ndarray = centered.values.numpy(order=order) @@ -99,30 +92,33 @@ def to_ndarray(field: Field) -> np.ndarray: def dataverse_upload( - file_path, - dataverse_url, - dataverse_token, - dataverse_id, - dataverse_dir=None, - retry=10): - ''' + file_path, + dataverse_url, + dataverse_token, + dataverse_id, + dataverse_dir=None, + retry=10, +): + """ Upload a file to dataverse - ''' - darus_struct = { - "description":"", - "categories":["Data"], - "restrict": "false" - } + """ + darus_struct = {"description": "", "categories": ["Data"], "restrict": "false"} if dataverse_dir is not None: darus_struct["directoryLabel"] = f"{dataverse_dir}/" cmd = [ "curl", - "-X", "POST", - "-H", f"X-Dataverse-key:{dataverse_token}", - "-F", f"file=@{file_path}", - "-F", 'jsonData='+json.dumps(darus_struct), + "-X", + "POST", + "-H", + f"X-Dataverse-key:{dataverse_token}", + "-F", + f"file=@{file_path}", + "-F", + "jsonData=" + json.dumps(darus_struct), f"{dataverse_url}/api/datasets/:persistentId/add?persistentId={dataverse_id}", - "--retry", str(retry)] + "--retry", + str(retry), + ] log.info(f"upload cmd {cmd}") subprocess.Popen(cmd) log.info(f"upload cmd {os.getcwd()}$ {' '.join(cmd)}") diff --git a/pdebench/data_gen/src/plots.py b/pdebench/data_gen/src/plots.py index 29b95c5..cba9a74 100644 --- a/pdebench/data_gen/src/plots.py +++ b/pdebench/data_gen/src/plots.py @@ -2,11 +2,12 @@ Author : John Kim, Simon Brown, Timothy Praditia PDE Simulation packages """ +from __future__ import annotations + +import imageio import matplotlib.pyplot as plt import numpy as np -import imageio import phi.vis as phivis -import os def plot_data(data, t, dim, channel, t_fraction, config, filename): @@ -37,7 +38,7 @@ def plot_data(data, t, dim, channel, t_fraction, config, filename): plt.savefig(filename) -def save_phi_plot(result, title, filepath, bbox_inches='tight', pad_inches=0): +def save_phi_plot(result, title, filepath, bbox_inches="tight", pad_inches=0): """ save one custom figure from an array """ @@ -47,25 +48,41 @@ def save_phi_plot(result, title, filepath, bbox_inches='tight', pad_inches=0): plt.close() -def phi_plots(results, T_results, title, filepath, scale = 1, bbox_inches='tight', pad_inches=0): +def phi_plots( + results, T_results, title, filepath, scale=1, bbox_inches="tight", pad_inches=0 +): """ Save simulation custom figures, get images list """ images = [] upperfilepath = filepath for i, arr in enumerate(T_results): - filename = '{}.png'.format(title) - if upperfilepath == '': + filename = f"{title}.png" + if upperfilepath == "": filepath = filename else: - filepath = upperfilepath + '/{}'.format(filename) + filepath = upperfilepath + f"/{filename}" save_phi_plot( - scale * results[i], title, filepath, bbox_inches=bbox_inches, pad_inches=pad_inches) + scale * results[i], + title, + filepath, + bbox_inches=bbox_inches, + pad_inches=pad_inches, + ) images.append(imageio.imread(filepath)) return images -def save_sim_figures(results, T_results, simulation_name, kinematic_value, filepath, scale = 1, bbox_inches='tight', pad_inches=0): +def save_sim_figures( + results, + T_results, + simulation_name, + kinematic_value, + filepath, + scale=1, + bbox_inches="tight", + pad_inches=0, +): """ save figures, get images list """ @@ -73,14 +90,15 @@ def save_sim_figures(results, T_results, simulation_name, kinematic_value, filep upperfilepath = filepath for i, arr in enumerate(T_results): res = arr[0] - title = '{}_{}_t={}'.format(simulation_name, kinematic_value, round(T_results[i], 2)) - filename = '{}.png'.format(title) - if upperfilepath == '': + title = f"{simulation_name}_{kinematic_value}_t={round(T_results[i], 2)}" + filename = f"{title}.png" + if upperfilepath == "": filepath = filename else: - filepath = upperfilepath + '/{}'.format(filename) + filepath = upperfilepath + f"/{filename}" save_phi_plot( - scale * res, title, filepath, bbox_inches=bbox_inches, pad_inches=pad_inches) + scale * res, title, filepath, bbox_inches=bbox_inches, pad_inches=pad_inches + ) images.append(imageio.imread(filepath)) return images diff --git a/pdebench/data_gen/src/pytorch_dataset.py b/pdebench/data_gen/src/pytorch_dataset.py index 4cd2c6b..942523a 100644 --- a/pdebench/data_gen/src/pytorch_dataset.py +++ b/pdebench/data_gen/src/pytorch_dataset.py @@ -1,7 +1,10 @@ -import h5py -from torch.utils.data import Dataset, DataLoader +from __future__ import annotations + from pathlib import Path + +import h5py from pytorch_lightning import LightningDataModule +from torch.utils.data import DataLoader, Dataset class HDF5Dataset(Dataset): @@ -14,7 +17,7 @@ def __init__(self, dir_path, transform=None): super().__init__() path = Path(dir_path) assert path.is_dir() - files_path = list(path.glob('*.h5')) # all .h5 files' path + files_path = list(path.glob("*.h5")) # all .h5 files' path assert len(files_path) > 0 self.data_info = {} @@ -25,7 +28,7 @@ def __init__(self, dir_path, transform=None): for files_path in files_path: with h5py.File(str(files_path.resolve())) as f: - config = f.attrs.get('config') + config = f.attrs.get("config") for ds_name, ds in f.items(): self.names.append(ds_name) b = ds.shape[0] @@ -60,6 +63,7 @@ def _load_data(self, idx): # PATH_DATASETS = 'dummy_dataset' + class HDF5DatasetLightning(LightningDataModule): def __init__(self, data_dir: str, batch_size: int = 64, transforms=None): super().__init__() @@ -78,7 +82,7 @@ def train_dataloader(self): if __name__ == "__main__": - dir_path = 'download_dataset' # random_force_field--ns_sim--10.h5 in this directory + dir_path = "download_dataset" # random_force_field--ns_sim--10.h5 in this directory # test pytorch dataset dataset = HDF5Dataset(dir_path=dir_path, transform=None) @@ -86,8 +90,8 @@ def train_dataloader(self): dataloader = DataLoader(dataset, batch_size=64, shuffle=True) data, config = next(iter(dataloader)) for i, d in enumerate(data): - print(f'{names[i].upper()} batched data shape: ', d.size()) - print('number of config files: ', len(config)) + print(f"{names[i].upper()} batched data shape: ", d.size()) + print("number of config files: ", len(config)) # test pytorch lightning dataset lightning_dataset = HDF5DatasetLightning(dir_path, batch_size=64, transforms=None) @@ -95,5 +99,5 @@ def train_dataloader(self): lightning_dataloader = lightning_dataset.train_dataloader() data, config = next(iter(lightning_dataloader)) for i, d in enumerate(data): - print(f'{names[i].upper()} batched data shape: ', d.size()) - print('number of config files: ', len(config)) + print(f"{names[i].upper()} batched data shape: ", d.size()) + print("number of config files: ", len(config)) diff --git a/pdebench/data_gen/src/sim_diff_react.py b/pdebench/data_gen/src/sim_diff_react.py index 821e7cb..027eb53 100644 --- a/pdebench/data_gen/src/sim_diff_react.py +++ b/pdebench/data_gen/src/sim_diff_react.py @@ -1,25 +1,29 @@ +from __future__ import annotations + +import logging + import numpy as np from scipy.integrate import solve_ivp from scipy.sparse import diags -import logging + class Simulator: - - def __init__(self, - Du: float = 1E-3, - Dv: float = 5E-3, - k: float = 5E-3, - t: float = 50, - tdim: int = 501, - x_left: float = -1.0, - x_right: float = 1.0, - xdim: int = 50, - y_bottom: float = -1.0, - y_top: float = 1.0, - ydim: int = 50, - n: int = 1, - seed: int = 0): - + def __init__( + self, + Du: float = 1e-3, + Dv: float = 5e-3, + k: float = 5e-3, + t: float = 50, + tdim: int = 501, + x_left: float = -1.0, + x_right: float = 1.0, + xdim: int = 50, + y_bottom: float = -1.0, + y_top: float = 1.0, + ydim: int = 50, + n: int = 1, + seed: int = 0, + ): """ Constructor method initializing the parameters for the diffusion sorption problem. @@ -47,70 +51,72 @@ def __init__(self, self.X1 = x_right self.Y0 = y_bottom self.Y1 = y_top - + self.Nx = xdim self.Ny = ydim self.Nt = tdim - - # Calculate grid size and generate grid - self.dx = (self.X1 - self.X0)/(self.Nx) - self.dy = (self.Y1 - self.Y0)/(self.Ny) - - self.x = np.linspace(self.X0 + self.dx/2, self.X1 - self.dx/2, self.Nx) - self.y = np.linspace(self.Y0 + self.dy/2, self.Y1 - self.dy/2, self.Ny) - + + # Calculate grid size and generate grid + self.dx = (self.X1 - self.X0) / (self.Nx) + self.dy = (self.Y1 - self.Y0) / (self.Ny) + + self.x = np.linspace(self.X0 + self.dx / 2, self.X1 - self.dx / 2, self.Nx) + self.y = np.linspace(self.Y0 + self.dy / 2, self.Y1 - self.dy / 2, self.Ny) + # Time steps to store the simulation results self.t = np.linspace(0, self.T, self.Nt) - + # Initialize the logger self.log = logging.getLogger(__name__) - + self.seed = seed - + def generate_sample(self): """ Single sample generation using the parameters of this simulator. :return: The generated sample as numpy array(t, x, y, num_features) """ - + np.random.seed(self.seed) - - u0 = np.random.randn(self.Nx*self.Ny) - v0 = np.random.randn(self.Nx*self.Ny) - - u0 = u0.reshape(self.Nx*self.Ny) - v0 = v0.reshape(self.Nx*self.Ny) - u0 = np.concatenate((u0,v0)) - + + u0 = np.random.randn(self.Nx * self.Ny) + v0 = np.random.randn(self.Nx * self.Ny) + + u0 = u0.reshape(self.Nx * self.Ny) + v0 = v0.reshape(self.Nx * self.Ny) + u0 = np.concatenate((u0, v0)) + # # Normalize u0 # u0 = 2 * (u0 - u0.min()) / (u0.max() - u0.min()) - 1 # Generate arrays as diagonal inputs to the Laplacian matrix - main_diag = -2*np.ones(self.Nx)/self.dx**2 -2*np.ones(self.Nx)/self.dy**2 - main_diag[0] = -1/self.dx**2 -2/self.dy**2 - main_diag[-1] = -1/self.dx**2 -2/self.dy**2 + main_diag = ( + -2 * np.ones(self.Nx) / self.dx**2 - 2 * np.ones(self.Nx) / self.dy**2 + ) + main_diag[0] = -1 / self.dx**2 - 2 / self.dy**2 + main_diag[-1] = -1 / self.dx**2 - 2 / self.dy**2 main_diag = np.tile(main_diag, self.Ny) - main_diag[:self.Nx] = -2/self.dx**2 -1/self.dy**2 - main_diag[self.Nx*(self.Ny-1):] = -2/self.dx**2 -1/self.dy**2 - main_diag[0] = -1/self.dx**2 -1/self.dy**2 - main_diag[self.Nx-1] = -1/self.dx**2 -1/self.dy**2 - main_diag[self.Nx*(self.Ny-1)] = -1/self.dx**2 -1/self.dy**2 - main_diag[-1] = -1/self.dx**2 -1/self.dy**2 - + main_diag[: self.Nx] = -2 / self.dx**2 - 1 / self.dy**2 + main_diag[self.Nx * (self.Ny - 1) :] = -2 / self.dx**2 - 1 / self.dy**2 + main_diag[0] = -1 / self.dx**2 - 1 / self.dy**2 + main_diag[self.Nx - 1] = -1 / self.dx**2 - 1 / self.dy**2 + main_diag[self.Nx * (self.Ny - 1)] = -1 / self.dx**2 - 1 / self.dy**2 + main_diag[-1] = -1 / self.dx**2 - 1 / self.dy**2 + left_diag = np.ones(self.Nx) left_diag[0] = 0 left_diag = np.tile(left_diag, self.Ny) - left_diag = left_diag[1:]/self.dx**2 - + left_diag = left_diag[1:] / self.dx**2 + right_diag = np.ones(self.Nx) right_diag[-1] = 0 right_diag = np.tile(right_diag, self.Ny) - right_diag = right_diag[:-1]/self.dx**2 - - bottom_diag = np.ones(self.Nx*(self.Ny-1))/self.dy**2 - - top_diag = np.ones(self.Nx*(self.Ny-1))/self.dy**2 - + right_diag = right_diag[:-1] / self.dx**2 + + bottom_diag = np.ones(self.Nx * (self.Ny - 1)) / self.dy**2 + + top_diag = np.ones(self.Nx * (self.Ny - 1)) / self.dy**2 + # Generate the sparse Laplacian matrix diagonals = [main_diag, left_diag, right_diag, bottom_diag, top_diag] offsets = [0, -1, 1, -self.Nx, self.Nx] @@ -120,10 +126,14 @@ def generate_sample(self): prob = solve_ivp(self.rc_ode, (0, self.T), u0, t_eval=self.t) ode_data = prob.y - sample_u = np.transpose(ode_data[:self.Nx*self.Ny]).reshape(-1,self.Ny,self.Nx) - sample_v = np.transpose(ode_data[self.Nx*self.Ny:]).reshape(-1,self.Ny,self.Nx) + sample_u = np.transpose(ode_data[: self.Nx * self.Ny]).reshape( + -1, self.Ny, self.Nx + ) + sample_v = np.transpose(ode_data[self.Nx * self.Ny :]).reshape( + -1, self.Ny, self.Nx + ) - return np.stack((sample_u, sample_v),axis=-1) + return np.stack((sample_u, sample_v), axis=-1) def rc_ode(self, t, y): """ @@ -132,23 +142,23 @@ def rc_ode(self, t, y): :param y: The equation values to solve :return: A finite volume solution """ - + # Separate y into u and v - u = y[:self.Nx*self.Ny] - v = y[self.Nx*self.Ny:] - + u = y[: self.Nx * self.Ny] + v = y[self.Nx * self.Ny :] + # Calculate reaction function for each unknown react_u = u - u**3 - self.k - v react_v = u - v - + # Calculate time derivative for each unknown u_t = react_u + self.Du * (self.lap @ u) v_t = react_v + self.Dv * (self.lap @ v) - + # Stack the time derivative into a single array y_t - y_t = np.concatenate((u_t,v_t)) - + y_t = np.concatenate((u_t, v_t)) + # Log the simulation progress # self.log.info('t = ' + str(t)) - + return y_t diff --git a/pdebench/data_gen/src/sim_diff_sorp.py b/pdebench/data_gen/src/sim_diff_sorp.py index 7e84aaa..4160400 100644 --- a/pdebench/data_gen/src/sim_diff_sorp.py +++ b/pdebench/data_gen/src/sim_diff_sorp.py @@ -1,25 +1,28 @@ +from __future__ import annotations + +import logging + import numpy as np from scipy.integrate import solve_ivp from scipy.sparse import diags -import logging + class Simulator: - - def __init__(self, - D: float = 5E-4, - por: float = 0.29, - rho_s: float = 2880, - k_f: float = 3.5E-4, - n_f: float = 0.874, - sol: float = 1.0, - t: float = 2500, - tdim: int = 501, - x_left: float = 0.0, - x_right: float = 1.0, - xdim: int = 50, - n: int = 1, - seed: int = 0): - + def __init__( + self, + D: float = 5e-4, + por: float = 0.29, + rho_s: float = 2880, + k_f: float = 3.5e-4, + n_f: float = 0.874, + sol: float = 1.0, + t: float = 2500, + tdim: int = 501, + x_left: float = 0.0, + x_right: float = 1.0, + xdim: int = 50, + seed: int = 0, + ): """ Constructor method initializing the parameters for the diffusion sorption problem. @@ -34,7 +37,6 @@ def __init__(self, :param x_left: Left end of the 2D simulation field :param x_right: Right end of the 2D simulation field :param xdim: Number of spatial steps between x_left and x_right - :param n: Number of batches """ # Set class parameters @@ -48,82 +50,73 @@ def __init__(self, self.T = t self.X0 = x_left self.X1 = x_right - + self.Nx = xdim self.Nt = tdim - - # Calculate grid size and generate grid - self.dx = (self.X1 - self.X0)/(self.Nx) - self.x = np.linspace(self.X0 + self.dx/2, self.X1 - self.dx/2, self.Nx) - + + # Calculate grid size and generate grid + self.dx = (self.X1 - self.X0) / (self.Nx) + self.x = np.linspace(self.X0 + self.dx / 2, self.X1 - self.dx / 2, self.Nx) + # Time steps to store the simulation results self.t = np.linspace(0, self.T, self.Nt) - + # Initialize the logger self.log = logging.getLogger(__name__) - + self.seed = seed - - def generate_sample(self): + + def generate_sample(self) -> np.ndarray: """ Single sample generation using the parameters of this simulator. :return: The generated sample as numpy array(t, x, y, num_features) """ - np.random.seed(self.seed) - + generator = np.random.default_rng(self.seed) # Generate initial condition - u0 = np.ones(self.Nx) * np.random.uniform(0,0.2) + u0 = np.ones(self.Nx) * generator.uniform(0, 0.2) # Generate arrays as diagonal inputs to the Laplacian matrix - main_diag = -2*np.ones(self.Nx)/self.dx**2 - - left_diag = np.ones(self.Nx-1)/self.dx**2 - - right_diag = np.ones(self.Nx-1)/self.dx**2 - + main_diag = -2 * np.ones(self.Nx) / self.dx**2 + + left_diag = np.ones(self.Nx - 1) / self.dx**2 + + right_diag = np.ones(self.Nx - 1) / self.dx**2 + # Generate the sparse Laplacian matrix diagonals = [main_diag, left_diag, right_diag] offsets = [0, -1, 1] self.lap = diags(diagonals, offsets) - + # Initialize the right hand side to account for the boundary condition self.rhs = np.zeros(self.Nx) # Solve the diffusion reaction problem prob = solve_ivp(self.rc_ode, (0, self.T), u0, t_eval=self.t) ode_data = prob.y - + sample_c = np.transpose(ode_data) - + return np.expand_dims(sample_c, axis=-1) - def rc_ode(self, t, y): + def rc_ode(self, t: float, y): """ Solves a given equation for a particular time step. :param t: The current time step :param y: The equation values to solve :return: A finite volume solution """ - - c = y - + # Define left and right boundary conditions left_BC = self.sol - right_BC = (c[-2]-c[-1])/self.dx * self.D - + right_BC = (y[-2] - y[-1]) / self.dx * self.D + # Calculate the Freundlich retardation factor - retardation = 1 + ((1 - self.por)/self.por)*self.rho_s\ - *self.k_f*self.n_f*(c + 1e-6)**(self.n_f-1) - + retardation = 1 + ( + (1 - self.por) / self.por + ) * self.rho_s * self.k_f * self.n_f * (y + 1e-6) ** (self.n_f - 1) + # Calculate the right hand side - self.rhs[0] = self.D/retardation[0]/(self.dx**2)*left_BC - self.rhs[-1] = self.D/retardation[-1]/(self.dx**2)*right_BC - - # Calculate time derivative - c_t = self.D/retardation * (self.lap @ c) + self.rhs - y_t = c_t - - # Log the simulation progress - # self.log.info('t = ' + str(t)) - - return y_t + self.rhs[0] = self.D / retardation[0] / (self.dx**2) * left_BC + self.rhs[-1] = self.D / retardation[-1] / (self.dx**2) * right_BC + + return self.D / retardation * (self.lap @ y) + self.rhs diff --git a/pdebench/data_gen/src/sim_ns_incomp_2d.py b/pdebench/data_gen/src/sim_ns_incomp_2d.py index 0b3f82c..79509f9 100644 --- a/pdebench/data_gen/src/sim_ns_incomp_2d.py +++ b/pdebench/data_gen/src/sim_ns_incomp_2d.py @@ -2,83 +2,120 @@ Author : John Kim, Ran Zhang, Dan MacKinlay PDE Simulation packages """ -from contextlib import nullcontext -from typing import Optional +from __future__ import annotations + import logging import os +from contextlib import nullcontext +from typing import Optional import imageio import numpy as np -from tqdm import tqdm -import hydra from pdebench.data_gen.src import data_io -from pdebench.data_gen.src import image_processor +from tqdm import tqdm -log = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO, filename=__name__) +logging.root.setLevel(logging.INFO) # import wandb -def call_many(fn_list, *args, **kwargs): + +def call_many(fn_list, *args, **kwargs) -> list[callable]: """ Surely there is already a helper function for this somewhere. inverse map? """ return [fn(*args, **kwargs) for fn in fn_list] - + def ns_sim( - seed: int, - label: Optional[str]=None, - sim_name: str='ns_sim_2d', - particle_extrapolation:str='BOUNDARY', - velocity_extrapolation:str='ZERO', - NU: float=0.01, - scale: float= 10.0, - smoothness: float=3.0, - grid_size=(100,100), - enable_gravity: bool=False, - enable_obstacles: bool=False, - force_extrapolation: str='ZERO', - save_images: bool=True, - save_gif: bool=True, - save_h5:bool=True, - n_steps: int=10, - DT: float=0.01, - frame_int: int=1, - n_batch=1, - backend='jax', - device='GPU', - jit=True, - profile: bool=False, - upload: bool=False, - exec_dir: Optional[str]=None, - artefact_dir: Optional[str] = None, #hackish way of writing artefacts to a good location without fighting hydra's conf interpolation - dataverse: Optional[dict] = None, - config={}, - ): + seed: int, + label: Optional[str] = None, + sim_name: str = "ns_sim_2d", + particle_extrapolation: str = "BOUNDARY", + velocity_extrapolation: str = "ZERO", + NU: float = 0.01, + scale: float = 10.0, + smoothness: float = 3.0, + grid_size=(100, 100), + enable_gravity: bool = False, + enable_obstacles: bool = False, + force_extrapolation: str = "ZERO", + save_images: bool = True, + save_gif: bool = True, + save_h5: bool = True, + n_steps: int = 10, + DT: float = 0.01, + frame_int: int = 1, + n_batch: int = 1, + backend: str = "jax", + device: str = "GPU", + jit: bool = True, + profile: bool = False, + upload: bool = False, + dataverse: Optional[dict] = None, + config: dict | None = None, +): """ Run the actual simulation. """ - # log.info(f"exec_dir {exec_dir}") - # log.info(f"orig_dir {hydra.utils.get_original_cwd()}") - # log.info(f"artefact_dir {artefact_dir}") - # log.info(f"WORKING_DIR {os.getenv('WORKING_DIR')}") - # log.info(f"cwd {os.getcwd()}") - - if backend == 'jax': - from phi.jax.flow import extrapolation, Box, Obstacle, advect, diffuse, fluid, math, Solve, CenteredGrid, StaggeredGrid, Noise, batch - elif backend == 'pytorch': - from phi.torch.flow import extrapolation, Box, Obstacle, advect, diffuse, fluid, math, Solve, CenteredGrid, StaggeredGrid, Noise, batch + if config is None: + config = {} + + if backend == "jax": + from phi.jax.flow import ( + Box, + CenteredGrid, + Noise, + Obstacle, + Solve, + StaggeredGrid, + advect, + batch, + diffuse, + extrapolation, + fluid, + math, + ) + elif backend == "pytorch": from phi.torch import TORCH + from phi.torch.flow import ( + Box, + CenteredGrid, + Noise, + Obstacle, + Solve, + StaggeredGrid, + advect, + batch, + diffuse, + extrapolation, + fluid, + math, + ) + TORCH.set_default_device(device) else: - from phi.flow import extrapolation, Box, Obstacle, advect, diffuse, fluid, math, Solve, CenteredGrid, StaggeredGrid, Noise, batch + from phi.flow import ( + Box, + CenteredGrid, + Noise, + Obstacle, + Solve, + StaggeredGrid, + advect, + batch, + diffuse, + extrapolation, + fluid, + math, + ) # from torch.profiler import profile, record_function, ProfilerActivity - def bounds_select(x, y): + def bounds_select(x: int | None = None, y: int | None = None) -> Box: """ This function generates a 2D Phiflow Box with scale from zero to the number indicated by the bounds or infinite. @@ -89,16 +126,15 @@ def bounds_select(x, y): Returns: Box: A Box type of Phiflow """ - if x == None: + if x is None: return Box[:, 0:y] - elif y == None: + if y is None: return Box[0:x, :] - else: - return Box[0:x, 0:y] - - + return Box[0:x, 0:y] - def cauchy_momentum_step(velocity, particles, body_acceleration, NU, DT, obstacles = None): + def cauchy_momentum_step( + velocity, particles, body_acceleration, NU, DT, obstacles: tuple | None = None + ) -> tuple[fluid.Field, fluid.Field]: """ Navier-Stokes Simulation cauchy_momentum_step returns velocity and particles by solving cauchy momentum equation for one step @@ -112,23 +148,25 @@ def cauchy_momentum_step(velocity, particles, body_acceleration, NU, DT, obstacl **kwargs : Other obstacles (Simulation constraints etc) """ # Set empty obstacle as empty tuple - if obstacles == None: + if obstacles is None: obstacles = () # Computing velocity term first # Cauchy-momentum equation - velocity = advect.semi_lagrangian(velocity, velocity, dt=DT) # advection - velocity = diffuse.explicit(velocity, NU, dt=DT) # diffusion - + velocity = advect.semi_lagrangian(velocity, velocity, dt=DT) # advection + velocity = diffuse.explicit(velocity, NU, dt=DT) # diffusion + # Add external body_acceleration, constraints - velocity += DT * particles * body_acceleration # external body_acceleration - velocity = fluid.apply_boundary_conditions(velocity, obstacles) # obstacles - + velocity += DT * particles * body_acceleration # external body_acceleration + velocity = fluid.apply_boundary_conditions(velocity, obstacles) # obstacles + # Make incompressible - velocity, _ = fluid.make_incompressible(velocity, obstacles, solve=Solve('CG-adaptive', 1e-3, 0, x0=None)) # particles - - # Computing particles term next + velocity, _ = fluid.make_incompressible( + velocity, obstacles, solve=Solve("CG-adaptive", 1e-3, 0, x0=None) + ) # particles + + # Computing particles term next particles = advect.semi_lagrangian(particles, velocity, dt=DT) - + return velocity, particles # Setting the random seed for simulation. This is a global seed for Phiflow. @@ -140,18 +178,16 @@ def cauchy_momentum_step(velocity, particles, body_acceleration, NU, DT, obstacl if save_h5: data_store = data_io.h5_for(config) h5_path = data_store.filename + def _store(frame_i, t, particles, velocity, **kwargs): - data_store['particles'][:, frame_i, ...] = data_io.to_ndarray(particles) - data_store['velocity'][:, frame_i, ...] = data_io.to_ndarray(velocity) - data_store['t'][:, frame_i] = t - data_store.attrs['latestIndex'] = frame_i - callbacks.append( - _store - ) - cleanups.append( - lambda *args, **kwargs: data_store.close() - ) - ## Move output to artefacts dir here + data_store["particles"][:, frame_i, ...] = data_io.to_ndarray(particles) + data_store["velocity"][:, frame_i, ...] = data_io.to_ndarray(velocity) + data_store["t"][:, frame_i] = t + data_store.attrs["latestIndex"] = frame_i + + callbacks.append(_store) + cleanups.append(lambda *args, **kwargs: data_store.close()) + # Move output to artefacts dir here # if artefact_dir is not None: # cleanups.append( # lambda *args, **kwargs: data_store.close() @@ -162,30 +198,30 @@ def _store(frame_i, t, particles, velocity, **kwargs): lambda *args, **kwargs: data_io.dataverse_upload( file_path=h5_path, dataverse_url=os.getenv( - 'DATAVERSE_URL', 'https://darus.uni-stuttgart.de'), - dataverse_token=os.getenv( - 'DATAVERSE_API_TOKEN', ''), + "DATAVERSE_URL", "https://darus.uni-stuttgart.de" + ), + dataverse_token=os.getenv("DATAVERSE_API_TOKEN", ""), dataverse_dir=label, - dataverse_id=dataverse['dataset_id'], + dataverse_id=dataverse["dataset_id"], ) ) else: data_store = data_io.dict_for(config) + def _store(frame_i, t, particles, velocity, **kwargs): - data_store['particles'][:, frame_i, ...] = data_io.to_ndarray(particles) - data_store['velocity'][:, frame_i, ...] = data_io.to_ndarray(velocity) - data_store['t'][:, frame_i] = t - callbacks.append( - _store - ) + data_store["particles"][:, frame_i, ...] = data_io.to_ndarray(particles) + data_store["velocity"][:, frame_i, ...] = data_io.to_ndarray(velocity) + data_store["t"][:, frame_i] = t + + callbacks.append(_store) if save_images: + def _save_img(frame_i, t, particles, velocity, **kwargs): particles_images.append() velocity_images.append() - callbacks.append( - _save_img - ) + + callbacks.append(_save_img) profile_ctx = nullcontext() if profile: @@ -194,8 +230,7 @@ def _save_img(frame_i, t, particles, velocity, **kwargs): with profile_ctx as prof: # Initialization of the particles (i.e. density of the flow) grid with a Phiflow Noise() method particles = CenteredGrid( - Noise(batch(batch=n_batch), - scale=scale, smoothness=smoothness), + Noise(batch(batch=n_batch), scale=scale, smoothness=smoothness), extrapolation=getattr(extrapolation, particle_extrapolation), x=grid_size[0], y=grid_size[1], @@ -213,17 +248,17 @@ def _save_img(frame_i, t, particles, velocity, **kwargs): # Initialization of the force grid. The force is also a staggered grid with a Phiflow Noise() method or using gravity if enable_gravity: - force = math.tensor(batch(batch=n_batch),[0, -9.81]) + force = math.tensor(batch(batch=n_batch), [0, -9.81]) else: force = StaggeredGrid( - Noise(batch(batch=n_batch),vector=2), + Noise(batch(batch=n_batch), vector=2), extrapolation=getattr(extrapolation, force_extrapolation), x=grid_size[0], y=grid_size[1], bounds=bounds_select(*grid_size), ) - data_store['force'][:,...] = data_io.to_ndarray(force) + data_store["force"][:, ...] = data_io.to_ndarray(force) # Set the obstacles. Obstacles are not enabled by default. obstacles = [] @@ -235,44 +270,53 @@ def _save_img(frame_i, t, particles, velocity, **kwargs): # Use "python gen_ns_incomp.py save_gif=false" for not saving .gif animation and only save to pictures call_many( - callbacks, frame_i=0, t=0.0, - velocity=velocity, particles=particles, prof=prof) + callbacks, + frame_i=0, + t=0.0, + velocity=velocity, + particles=particles, + prof=prof, + ) - def sim_step(velocity, particles): - return cauchy_momentum_step( - velocity, particles, force, NU, DT, obstacles) + def sim_step(velocity, particles) -> tuple[fluid.Field, fluid.Field]: + return cauchy_momentum_step(velocity, particles, force, NU, DT, obstacles) if jit: sim_step = math.jit_compile(sim_step) - ts = np.linspace(0, n_steps*DT, n_steps, endpoint=False) - n_steps_actual = ((n_steps-1)//frame_int) * frame_int + 1 + ts = np.linspace(0, n_steps * DT, n_steps, endpoint=False) + n_steps_actual = ((n_steps - 1) // frame_int) * frame_int + 1 ts = ts[1:n_steps_actual] - # log.info("ts: {}".format(ts)) for step, t in enumerate(tqdm(ts), start=1): velocity, particles = sim_step( - velocity, particles,) - + velocity, + particles, + ) + if step % frame_int == 0: - frame_i = step//frame_int - log.info(f"step {step} frame_i {frame_i}") + frame_i = step // frame_int + msg = f"step {step} frame_i {frame_i}" + logging.info(msg) call_many( - callbacks, frame_i=frame_i, t=t, - velocity=velocity, particles=particles, prof=prof) + callbacks, + frame_i=frame_i, + t=t, + velocity=velocity, + particles=particles, + prof=prof, + ) if save_images and save_gif: # Saving images into .gif animation imageio.mimsave( - "{}_velocity.gif".format(sim_name), + f"{sim_name}_velocity.gif", velocity_images, duration=DT, ) imageio.mimsave( - "{}_particles.gif".format(sim_name), + f"{sim_name}_particles.gif", particles_images, duration=DT, ) call_many(cleanups) - - diff --git a/pdebench/data_gen/src/sim_radial_dam_break.py b/pdebench/data_gen/src/sim_radial_dam_break.py index 7d92550..bdbc949 100644 --- a/pdebench/data_gen/src/sim_radial_dam_break.py +++ b/pdebench/data_gen/src/sim_radial_dam_break.py @@ -1,15 +1,15 @@ -from abc import abstractmethod -from abc import ABC +from __future__ import annotations +import logging import os -import sys -import time +from abc import ABC, abstractmethod -import h5py import numpy as np import torch -from clawpack import riemann -from clawpack import pyclaw +from clawpack import pyclaw, riemann + +logging.basicConfig(level=logging.INFO, filename=__name__) +logging.root.setLevel(logging.INFO) class Basic2DScenario(ABC): @@ -66,7 +66,7 @@ def __get_hu(self): def __get_hv(self): return self.claw_state.q[self.momentumId_y, :].tolist() - def register_state_getters(self): + def register_state_getters(self) -> None: self.state_getters = { "h": self.__get_h, "u": self.__get_u, @@ -75,11 +75,11 @@ def register_state_getters(self): "hv": self.__get_hv, } - def add_save_state(self): + def add_save_state(self) -> None: for key, getter in self.state_getters.items(): self.save_state[key].append(getter()) - def init_save_state(self, T, tsteps): + def init_save_state(self, T: float, tsteps: int) -> None: self.save_state = {} self.save_state["x"] = self.domain.grid.x.centers.tolist() self.save_state["y"] = self.domain.grid.y.centers.tolist() @@ -87,7 +87,7 @@ def init_save_state(self, T, tsteps): for key, getter in self.state_getters.items(): self.save_state[key] = [getter()] - def save_state_to_disk(self, data_f, seed_str): + def save_state_to_disk(self, data_f, seed_str) -> None: T = np.asarray(self.save_state["t"]) X = np.asarray(self.save_state["x"]) Y = np.asarray(self.save_state["y"]) @@ -98,29 +98,35 @@ def save_state_to_disk(self, data_f, seed_str): data_f.create_dataset(f"{seed_str}/grid/y", data=Y, dtype="f") data_f.create_dataset(f"{seed_str}/grid/t", data=T, dtype="f") - def simulate(self, t): + def simulate(self, t) -> None: if all(v is not None for v in [self.domain, self.claw_state, self.solver]): self.solver.evolve_to_time(self.solution, t) else: - print("Simulate failed: No scenario defined.") + msg = "Simulate failed: No scenario defined." + logging.info(msg) - def run(self, T=1.0, tsteps=20, plot=False): + def run(self, T: float = 1.0, tsteps: int = 20) -> None: self.init_save_state(T, tsteps) self.solution = pyclaw.Solution(self.claw_state, self.domain) - dt = T / tsteps - start = time.time() + dt = T / float(tsteps) + for tstep in range(1, tsteps + 1): t = tstep * dt - # print("Simulating timestep {}/{} at t={:f}".format(tstep, tsteps, t)) self.simulate(t) self.add_save_state() - # print("Simulation took: {}".format(time.time() - start)) class RadialDamBreak2D(Basic2DScenario): name = "RadialDamBreak" - def __init__(self, xdim, ydim, grav=1.0, dam_radius=0.5, inner_height=2.0): + def __init__( + self, + xdim, + ydim, + grav: float = 1.0, + dam_radius: float = 0.5, + inner_height: float = 2.0, + ): self.depthId = 0 self.momentumId_x = 1 self.momentumId_y = 2 @@ -132,7 +138,7 @@ def __init__(self, xdim, ydim, grav=1.0, dam_radius=0.5, inner_height=2.0): super().__init__() # self.state_getters['bathymetry'] = self.__get_bathymetry - def setup_solver(self): + def setup_solver(self) -> None: rs = riemann.shallow_roe_with_efix_2D self.solver = pyclaw.ClawSolver2D(rs) self.solver.limiters = pyclaw.limiters.tvd.MC @@ -143,7 +149,7 @@ def setup_solver(self): self.momentumId_x = 1 self.momentumId_y = 2 - def create_domain(self): + def create_domain(self) -> None: self.xlower = -2.5 self.xupper = 2.5 self.ylower = -2.5 @@ -155,7 +161,7 @@ def create_domain(self): self.domain = pyclaw.Domain([x, y]) self.claw_state = pyclaw.State(self.domain, self.solver.num_eqn) - def set_boundary_conditions(self): + def set_boundary_conditions(self) -> None: """ Sets homogeneous Neumann boundary conditions at each end for q=(u, h*u) and for the bathymetry (auxiliary variable). @@ -165,8 +171,7 @@ def set_boundary_conditions(self): self.solver.bc_lower[1] = pyclaw.BC.extrap self.solver.bc_upper[1] = pyclaw.BC.extrap - @staticmethod - def initial_h(coords): + def initial_h(self, coords): x0 = 0.0 y0 = 0.0 x = coords[:, 0] @@ -177,17 +182,17 @@ def initial_h(coords): return h_in * (r <= self.dam_radius) + h_out * (r > self.dam_radius) @staticmethod - def initial_momentum_x(coords): + def initial_momentum_x() -> torch.Tensor: return torch.tensor(0.0) @staticmethod - def initial_momentum_y(coords): + def initial_momentum_y() -> torch.Tensor: return torch.tensor(0.0) def __get_bathymetry(self): return self.claw_state.aux[0, :].tolist() - def set_initial_conditions(self): + def set_initial_conditions(self) -> None: self.claw_state.problem_data["grav"] = self.grav xc = self.claw_state.grid.x.centers diff --git a/pdebench/data_gen/src/utils.py b/pdebench/data_gen/src/utils.py index 1193d91..8e284bc 100644 --- a/pdebench/data_gen/src/utils.py +++ b/pdebench/data_gen/src/utils.py @@ -1,17 +1,17 @@ -import logging -import warnings -from typing import List, Sequence -import os +from __future__ import annotations + import glob +import os from pprint import pprint from omegaconf import DictConfig, OmegaConf + def expand_path(path, unique=True): """ Resolve a path that may contain variables and user home directory references. """ - return os.path.expandvars(os.path.expanduser(path)) + return os.path.expandvars(os.path.expanduser(path)) def matching_paths(glob_exp): @@ -28,22 +28,25 @@ def resolve_path(path, idx=None, unique=True): if "unique" is True, and there are many matches, panic. Otherwise return the result at index "idx", which could reasonably be 0 or -1; if it is, we sort the list of files """ - matches = matching_paths(path) + matches = matching_paths(path) if idx is None: idx = 0 else: matches = sorted(matches) - + if unique and len(matches) > 1: - raise ValueError("Too many matches for glob: {}".format(path)) + raise ValueError(f"Too many matches for glob: {path}") else: try: return matches[idx] except IndexError: - raise FileNotFoundError("No matches for glob: {}".format(path)) + raise FileNotFoundError(f"No matches for glob: {path}") -def print_config(config: DictConfig, resolve: bool = True,): +def print_config( + config: DictConfig, + resolve: bool = True, +): """ basic pretty-printer for omegaconf configs """ diff --git a/pdebench/data_gen/src/vorticity.py b/pdebench/data_gen/src/vorticity.py new file mode 100644 index 0000000..385e5f3 --- /dev/null +++ b/pdebench/data_gen/src/vorticity.py @@ -0,0 +1,149 @@ +r""" Generate vorticity field :math:`\boldsymbol{\omega} = \nabla \times \boldsymbol{v}` given + velocity field :math:`\boldsymbol{v}` using numerical approximation. + +Assuming the velocitiy field of shape [n, sx, sy, sz, 3] (5D) consists of a trajectory of equidistant cells, +the vorticity field is calculated by using spectral derivatives and the Fast Fourier Transform +such that :math:`\mathcal{F}\{\frac{\partial f}{\partial x}\ = i \omega \mathcal{F}\{f\}}`. + +The code is inspired by +Brunton, S. L.,; Kutz, J. N. (2022). Data-Driven Science and Engineering: Machine Learning, Dynamical Systems, and Control (2nd ed.). +and adapted to operate on 5D data. + +This module provides the functions + - compute_spectral_vorticity_np (numpy) + - compute_spectral_vorticity_jnp (jax.numpy) + +for approximating the vorticity field. +""" +from __future__ import annotations + +import jax +import jax.numpy as jnp +import numpy as np + + +def compute_spectral_vorticity_np( + velocities: np.ndarray, dx: float, dy: float, dz: float +) -> np.ndarray: + r"""Compute vorcitity field of a [n, sx, sy, sz, 3] field, + where n denotes the number of timesteps, sx, sy, sz are the number + of bins in x-, y-, z-direction. + + :param velocities: 5D Velocity field of shape [n, sx, sy, sz, 3]. + :param lx: length x-direction + :type lx: float + :param ly: length y-direction + :type ly: float + :param lz: length z-direction + :type lz: float + :type velocities: np.ndarray + :raises ValueError: If `velocities` is not a 5D array or + of shape [n, sx, sy, sz, 3]. + :return: Vorticity field :math:`\boldsymbol{\omega} \in \mathbb{R}^{n \times s_x \times s_y \times s_z \times 3}` + :rtype: np.ndarray + """ + + if velocities.ndim != 5 or velocities.shape[-1] != 3: + msg = "Expected 5D array of shape [n, sx, sy, sz, 3]!" + raise ValueError(msg) + + dx = abs(dx) + dy = abs(dy) + dz = abs(dz) + + vx = velocities[..., 0] + vy = velocities[..., 1] + vz = velocities[..., 2] + + fxy = np.fft.fft(vx, axis=2) + fyx = np.fft.fft(vy, axis=1) + fxz = np.fft.fft(vx, axis=3) + fzx = np.fft.fft(vz, axis=1) + fyz = np.fft.fft(vy, axis=3) + fzy = np.fft.fft(vz, axis=2) + + kappa_xy = 2.0 * np.pi * np.fft.fftfreq(n=fxy.shape[2], d=dy) + kappa_yx = 2.0 * np.pi * np.fft.fftfreq(n=fyx.shape[1], d=dx) + kappa_xz = 2.0 * np.pi * np.fft.fftfreq(n=fxz.shape[3], d=dz) + kappa_zx = 2.0 * np.pi * np.fft.fftfreq(n=fzx.shape[1], d=dx) + kappa_yz = 2.0 * np.pi * np.fft.fftfreq(n=fyz.shape[3], d=dz) + kappa_zy = 2.0 * np.pi * np.fft.fftfreq(n=fzy.shape[2], d=dy) + + vxy = np.fft.ifft(1j * kappa_xy[None, None, :, None] * fxy, axis=2).real + vyx = np.fft.ifft(1j * kappa_yx[None, :, None, None] * fyx, axis=1).real + vxz = np.fft.ifft(1j * kappa_xz[None, None, None, :] * fxz, axis=3).real + vzx = np.fft.ifft(1j * kappa_zx[None, :, None, None] * fzx, axis=1).real + vyz = np.fft.ifft(1j * kappa_yz[None, None, None, :] * fyz, axis=3).real + vzy = np.fft.ifft(1j * kappa_zy[None, None, :, None] * fzy, axis=2).real + + omega_x = vzy - vyz + omega_y = vxz - vzx + omega_z = vyx - vxy + + return np.concatenate( + [omega_x[..., None], omega_y[..., None], omega_z[..., None]], axis=-1 + ) + + +@jax.jit # type: ignore[misc] +def compute_spectral_vorticity_jnp( + velocities: jnp.ndarray, dx: float, dy: float, dz: float +) -> jnp.ndarray: + r"""Compute vorcitity field of a [n, sx, sy, sz, 3] field, + where n denotes the number of timesteps, sx, sy, sz are the number + of bins in x-, y-, z-direction. In this case computations are performed on GPU + + :param velocities: 5D Velocity field of shape [n, sx, sy, sz, 3]. + :param lx: length x-direction + :type lx: float + :param ly: length y-direction + :type ly: float + :param lz: length z-direction + :type lz: float + :type velocities: np.ndarray + :raises ValueError: If `velocities` is not a 5D array or + of shape [n, sx, sy, sz, 3]. + :return: Vorticity field :math:`\boldsymbol{\omega} \in \mathbb{R}^{n \times s_x \times s_y \times s_z \times 3}` + :rtype: np.ndarray + """ + + if velocities.ndim != 5 or velocities.shape[-1] != 3: + msg = "Expected 5D array of shape [n, sx, sy, sz, 3]!" + raise ValueError(msg) + + dx = abs(dx) + dy = abs(dy) + dz = abs(dz) + + vx = velocities[..., 0] + vy = velocities[..., 1] + vz = velocities[..., 2] + + fxy = jnp.fft.fft(vx, axis=2) + fyx = jnp.fft.fft(vy, axis=1) + fxz = jnp.fft.fft(vx, axis=3) + fzx = jnp.fft.fft(vz, axis=1) + fyz = jnp.fft.fft(vy, axis=3) + fzy = jnp.fft.fft(vz, axis=2) + + kappa_xy = 2.0 * jnp.pi * jnp.fft.fftfreq(n=fxy.shape[2], d=dy) + kappa_yx = 2.0 * jnp.pi * jnp.fft.fftfreq(n=fyx.shape[1], d=dx) + kappa_xz = 2.0 * jnp.pi * jnp.fft.fftfreq(n=fxz.shape[3], d=dz) + kappa_zx = 2.0 * jnp.pi * jnp.fft.fftfreq(n=fzx.shape[1], d=dx) + kappa_yz = 2.0 * jnp.pi * jnp.fft.fftfreq(n=fyz.shape[3], d=dz) + kappa_zy = 2.0 * jnp.pi * jnp.fft.fftfreq(n=fzy.shape[2], d=dy) + + vxy = jnp.fft.ifft(1j * kappa_xy[None, None, :, None] * fxy, axis=2).real + vyx = jnp.fft.ifft(1j * kappa_yx[None, :, None, None] * fyx, axis=1).real + vxz = jnp.fft.ifft(1j * kappa_xz[None, None, None, :] * fxz, axis=3).real + vzx = jnp.fft.ifft(1j * kappa_zx[None, :, None, None] * fzx, axis=1).real + vyz = jnp.fft.ifft(1j * kappa_yz[None, None, None, :] * fyz, axis=3).real + vzy = jnp.fft.ifft(1j * kappa_zy[None, None, :, None] * fzy, axis=2).real + + omega_x = vzy - vyz + omega_y = vxz - vzx + omega_z = vyx - vxy + + return jnp.concatenate( + [omega_x[..., None], omega_y[..., None], omega_z[..., None]], axis=-1 + ) diff --git a/pdebench/data_gen/uploader.py b/pdebench/data_gen/uploader.py index c111903..02eb262 100644 --- a/pdebench/data_gen/uploader.py +++ b/pdebench/data_gen/uploader.py @@ -1,29 +1,42 @@ -import subprocess +from __future__ import annotations + import json +import subprocess + def dataverse_upload( - file_path, - dataverse_url, - dataverse_token, - dataverse_dir, - dataverse_id, - log, - retry=10): - ''' + file_path, + dataverse_url, + dataverse_token, + dataverse_dir, + dataverse_id, + log, + retry=10, +): + """ Upload a file to dataverse - ''' + """ cmd = [ "curl", - "-X", "POST", - "-H", f"X-Dataverse-key:{dataverse_token}", - "-F", f"file=@{file_path}", - "-F", 'jsonData='+json.dumps({ - "description":"", - "directoryLabel":f"{dataverse_dir}/", - "categories":["Data"], - "restrict": "false" - }), + "-X", + "POST", + "-H", + f"X-Dataverse-key:{dataverse_token}", + "-F", + f"file=@{file_path}", + "-F", + "jsonData=" + + json.dumps( + { + "description": "", + "directoryLabel": f"{dataverse_dir}/", + "categories": ["Data"], + "restrict": "false", + } + ), f"{dataverse_url}/api/datasets/:persistentId/add?persistentId={dataverse_id}", - "--retry", str(retry)] + "--retry", + str(retry), + ] log.info(cmd) - subprocess.Popen(cmd) \ No newline at end of file + subprocess.Popen(cmd) diff --git a/pdebench/data_gen/velocity2vorticity.py b/pdebench/data_gen/velocity2vorticity.py new file mode 100644 index 0000000..8181dc4 --- /dev/null +++ b/pdebench/data_gen/velocity2vorticity.py @@ -0,0 +1,104 @@ +""" Convert velocity- to vorticity field assuming 3D CFD exampe was downloaded. + The resulting .hdf5 file does not store pressure and density. +""" +from __future__ import annotations + +import argparse +from pathlib import Path + +import h5py as h5 +import jax.numpy as jnp +import numpy as np +from tqdm import tqdm + +from .src.vorticity import ( + compute_spectral_vorticity_jnp, +) + + +def convert_velocity() -> None: + parser = argparse.ArgumentParser("Convert velocity field to vorticity!") + parser.add_argument( + "-d", + "--data", + type=str, + required=True, + dest="data", + help=" Specify path to .hdf5 data file", + ) + + args = parser.parse_args() + + if not Path(args.data).exists(): + msg = f"{args.data} does not exist!" + raise FileNotFoundError(msg) + + h5file = h5.File(args.data, "r") + fname = args.data.split("/")[-1] + fname = fname.split(".hdf5")[0] + outpath = str(Path(args.data).parent / fname) + "_vorticity.hdf5" + dx = h5file["x-coordinate"][1] - h5file["x-coordinate"][0] + dy = h5file["y-coordinate"][1] - h5file["y-coordinate"][0] + dz = h5file["z-coordinate"][1] - h5file["z-coordinate"][0] + + if not Path(str(outpath)).exists(): + outfile = h5.File(str(outpath), "a") + outfile.create_dataset( + "omega_x", shape=h5file["Vx"].shape, dtype=h5file["Vx"].dtype + ) + outfile.create_dataset( + "omega_y", shape=h5file["Vy"].shape, dtype=h5file["Vy"].dtype + ) + outfile.create_dataset( + "omega_z", shape=h5file["Vz"].shape, dtype=h5file["Vz"].dtype + ) + outfile.create_dataset( + "t-coordinate", + shape=h5file["t-coordinate"].shape, + dtype=h5file["t-coordinate"].dtype, + ) + outfile.create_dataset( + "x-coordinate", + shape=h5file["x-coordinate"].shape, + dtype=h5file["x-coordinate"].dtype, + ) + outfile.create_dataset( + "y-coordinate", + shape=h5file["y-coordinate"].shape, + dtype=h5file["y-coordinate"].dtype, + ) + outfile.create_dataset( + "z-coordinate", + shape=h5file["z-coordinate"].shape, + dtype=h5file["z-coordinate"].dtype, + ) + + xcoords = h5file["x-coordinate"][:] + ycoords = h5file["y-coordinate"][:] + zcoords = h5file["z-coordinate"][:] + + outfile["t-coordinate"][:] = h5file["t-coordinate"][:] + outfile["x-coordinate"][:] = xcoords + outfile["y-coordinate"][:] = ycoords + outfile["z-coordinate"][:] = zcoords + + trials = h5file["Vx"].shape[0] + + for i in tqdm(range(trials), total=trials): + vx = h5file["Vx"][i][:] + vy = h5file["Vy"][i][:] + vz = h5file["Vz"][i][:] + velocity = np.concatenate( + [vx[..., None], vy[..., None], vz[..., None]], axis=-1 + ) + + vorticity = compute_spectral_vorticity_jnp( + jnp.array(velocity), dx, dy, dz) + + outfile["omega_x"][i] = np.array(vorticity[..., 0]) + outfile["omega_y"][i] = np.array(vorticity[..., 1]) + outfile["omega_z"][i] = np.array(vorticity[..., 2]) + + +if __name__ == "__main__": + convert_velocity() diff --git a/pdebench/models/analyse_result_forward.py b/pdebench/models/analyse_result_forward.py index 6397843..e33b16e 100644 --- a/pdebench/models/analyse_result_forward.py +++ b/pdebench/models/analyse_result_forward.py @@ -144,97 +144,113 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ -import pandas as pd -import numpy as np +from __future__ import annotations + import glob + import _pickle as cPickle import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + def main(): # get results - files = glob.glob('./*pickle') + files = glob.glob("./*pickle") files.sort() - + # metric names - var_names = ['MSE', 'normalized MSE', 'Conservation MSE', 'Maximum Error', 'MSE at boundary', - 'MSE FT low', 'MSE FT mid', 'MSE FT high'] - + var_names = [ + "MSE", + "normalized MSE", + "Conservation MSE", + "Maximum Error", + "MSE at boundary", + "MSE FT low", + "MSE FT mid", + "MSE FT high", + ] + # define index index1, index2, index3 = [], [], [] for j, fl in enumerate(files): - with open(fl, 'rb') as f: - title = fl.split('\\')[-1][:-7].split('_') + with open(fl, "rb") as f: + title = fl.split("\\")[-1][:-7].split("_") print(title) - if title[0] == '1D': - if title[1] == 'CFD': + if title[0] == "1D": + if title[1] == "CFD": index1.append(title[0] + title[1]) - index2.append(title[3] + title[4] + '_' + title[2] + '_' + title[5]) + index2.append(title[3] + title[4] + "_" + title[2] + "_" + title[5]) index3.append(title[7]) else: index1.append(title[1]) index2.append(title[3]) index3.append(title[4]) - elif title[0] == '2D': - if title[1] == 'CFD': + elif title[0] == "2D": + if title[1] == "CFD": index1.append(title[0] + title[1]) - index2.append(title[3] + title[3] + title[4] + '_' + title[2] + '_' + title[6]) + index2.append( + title[3] + title[3] + title[4] + "_" + title[2] + "_" + title[6] + ) index3.append(title[9]) else: index1.append(title[1]) index2.append(title[2]) index3.append(title[4]) - elif title[0] == '3D': + elif title[0] == "3D": index1.append(title[0] + title[1]) - index2.append(title[3] + title[4] + title[5] + '_' + title[2] + '_' + title[6]) + index2.append( + title[3] + title[4] + title[5] + "_" + title[2] + "_" + title[6] + ) index3.append(title[8]) else: index1.append(title[0]) index2.append(title[1] + title[2]) index3.append(title[3]) indexes = [index1, index2, index3] - + # create dataframe data = np.zeros([len(files), 8]) for j, fl in enumerate(files): - with open(fl, 'rb') as f: + with open(fl, "rb") as f: test = cPickle.load(f) for i, var in enumerate(test): - if i==5: + if i == 5: data[j, i:] = var else: data[j, i] = var - - index = pd.MultiIndex.from_arrays(indexes, names=('PDE', 'param', 'model')) + + index = pd.MultiIndex.from_arrays(indexes, names=("PDE", "param", "model")) data = pd.DataFrame(data, columns=var_names, index=index) - data.to_csv('Results.csv') - + data.to_csv("Results.csv") + pdes = index.get_level_values(0).drop_duplicates() num_pdes = len(pdes) models = index.get_level_values(2).drop_duplicates() num_models = len(models) x = np.arange(num_pdes) - + if num_models == 1: width = 0.5 else: - width = 0.5/(num_models-1) - - fig, ax = plt.subplots(figsize=(8,6)) + width = 0.5 / (num_models - 1) + + fig, ax = plt.subplots(figsize=(8, 6)) for i in range(num_models): - pos = x-0.3 + 0.5/(num_models-1)*i - ax.bar(pos, data[data.index.isin([models[i]],level=2)]['MSE'], width) - + pos = x - 0.3 + 0.5 / (num_models - 1) * i + ax.bar(pos, data[data.index.isin([models[i]], level=2)]["MSE"], width) + ax.set_xticks(x) - ax.set_xticklabels(pdes,fontsize=30) - ax.tick_params(axis='y',labelsize=30) - ax.set_yscale('log') - ax.set_xlabel('PDEs',fontsize=30) - ax.set_ylabel('MSE',fontsize=30) - fig.legend(models,loc=8,ncol=num_models,fontsize=20) - plt.tight_layout(rect=[0,0.1,1,1]) - plt.savefig('Results.pdf') - + ax.set_xticklabels(pdes, fontsize=30) + ax.tick_params(axis="y", labelsize=30) + ax.set_yscale("log") + ax.set_xlabel("PDEs", fontsize=30) + ax.set_ylabel("MSE", fontsize=30) + fig.legend(models, loc=8, ncol=num_models, fontsize=20) + plt.tight_layout(rect=[0, 0.1, 1, 1]) + plt.savefig("Results.pdf") + if __name__ == "__main__": main() - print("Done.") \ No newline at end of file + print("Done.") diff --git a/pdebench/models/analyse_result_inverse.py b/pdebench/models/analyse_result_inverse.py index 9111613..a0ad534 100644 --- a/pdebench/models/analyse_result_inverse.py +++ b/pdebench/models/analyse_result_inverse.py @@ -144,40 +144,45 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ -import pandas as pd -import numpy as np -import glob -import _pickle as cPickle +from __future__ import annotations + import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + def main(): - filename = 'inverse.csv' + filename = "inverse.csv" data = pd.read_csv(filename) - pdes = data['pde'].drop_duplicates() + pdes = data["pde"].drop_duplicates() num_pdes = len(pdes) models = list(data.columns.values[-2:]) num_models = len(models) x = np.arange(num_pdes) - width = 0.5/(num_models) - - fig, ax = plt.subplots(figsize=(8,6)) + width = 0.5 / (num_models) + + fig, ax = plt.subplots(figsize=(8, 6)) for i in range(num_models): - pos = x - 0.125 + 0.5/(num_models)*i - ax.bar(pos, data[data.iloc[:,1] == 'mean'][models[i]], - yerr = data[data.iloc[:,1] == 'std'][models[i]], width=width) + pos = x - 0.125 + 0.5 / (num_models) * i + ax.bar( + pos, + data[data.iloc[:, 1] == "mean"][models[i]], + yerr=data[data.iloc[:, 1] == "std"][models[i]], + width=width, + ) print(width, pos) - + ax.set_xticks(x) - ax.set_xticklabels(pdes,rotation=45,fontsize=30) - ax.tick_params(axis='y',labelsize=30) - ax.set_yscale('log') - ax.set_xlabel('PDEs',fontsize=30) - ax.set_ylabel('MSE',fontsize=30) - fig.legend(models,loc=1,ncol=num_models,fontsize=20) + ax.set_xticklabels(pdes, rotation=45, fontsize=30) + ax.tick_params(axis="y", labelsize=30) + ax.set_yscale("log") + ax.set_xlabel("PDEs", fontsize=30) + ax.set_ylabel("MSE", fontsize=30) + fig.legend(models, loc=1, ncol=num_models, fontsize=20) plt.tight_layout() - plt.savefig('ResultsInverse.pdf') - + plt.savefig("ResultsInverse.pdf") + if __name__ == "__main__": main() - print("Done.") \ No newline at end of file + print("Done.") diff --git a/pdebench/models/config/README.md b/pdebench/models/config/README.md index c3d2b2d..41a1a85 100644 --- a/pdebench/models/config/README.md +++ b/pdebench/models/config/README.md @@ -1,37 +1,40 @@ # Config Documentation -This is the documentation of the config files that were used to generate the provided [pre-trained models](https://darus.uni-stuttgart.de/dataset.xhtml?persistentId=doi:10.18419/darus-2987). -Since the default config files for all problems are already provided, this file only provides the values for the arguments that need to be changed. -N/A values mean that the default values can be used. -The complete explanation of the arguments can be found in the [README file](/README.md) +This is the documentation of the config files that were used to generate the +provided +[pre-trained models](https://darus.uni-stuttgart.de/dataset.xhtml?persistentId=doi:10.18419/darus-2987). +Since the default config files for all problems are already provided, this file +only provides the values for the arguments that need to be changed. N/A values +mean that the default values can be used. The complete explanation of the +arguments can be found in the [README file](/README.md) -| Pre-trained model | Config filename | model_name| filename (data) | ar_mode | pushforward | unroll_step | modes | width | -| :--- | :---- | :--- | :--- | :--- | :--- | ---: | ---: | ---: | -| 1D_diff-sorp_NA_NA_FNO.pt | config_diff-sorp.yaml | FNO | 1D_diff-sorp_NA_NA | N/A | N/A | N/A | 16 | 64 | -| 1D_diff-sorp_NA_NA_Unet-1-step.pt | config_diff-sorp.yaml | Unet | 1D_diff-sorp_NA_NA | False | False | N/A | N/A | N/A | -| 1D_diff-sorp_NA_NA_Unet-AR.pt | config_diff-sorp.yaml | Unet | 1D_diff-sorp_NA_NA | True | False | N/A | N/A | N/A | -| 1D_diff-sorp_NA_NA_Unet-PF-20.pt | config_diff-sorp.yaml | Unet | 1D_diff-sorp_NA_NA | True | True | 20 | N/A | N/A | -| 1D_diff-sorp_NA_NA_0001.h5_PINN.pt-15000.pt | config_pinn_diff-sorp.yaml | PINN | 1D_diff-sorp_NA_NA.h5 | N/A | N/A | N/A | N/A | N/A | -| 1D_CFD_Shock_trans_Train_FNO.pt | config_1DCFD.yaml | FNO | 1D_CFD_Shock_Eta1.e-8_Zeta1.e-8_trans_Train.hdf5 | N/A | N/A | N/A | 12 | 20 | -| 1D_CFD_Shock_trans_Train_Unet.pt | config_1DCFD.yaml | Unet | 1D_CFD_Shock_Eta1.e-8_Zeta1.e-8_trans_Train.hdf5 | True | True | 20 | N/A | N/A | -| ReacDiff_Nu1.0_Rho2.0_FNO.pt | config_ReacDiff.yaml | FNO | ReacDiff_Nu1.0_Rho2.0.hdf5 | N/A | N/A | N/A | 12 | 20 | -| ReacDiff_Nu1.0_Rho2.0_Unet.pt | config_ReacDiff.yaml | Unet | ReacDiff_Nu1.0_Rho2.0.hdf5 | True | True | 10 | N/A | N/A | -| 1D_Advection_Sols_beta4.0_FNO.pt | config_Adv.yaml | FNO | 1D_Advection_Sols_beta4.0.hdf5 | N/A | N/A | N/A | 12 | 20 | -| 1D_Advection_Sols_beta4.0_Unet.pt | config_Adv.yaml | Unet | 1D_Advection_Sols_beta4.0.hdf5 | True | True | 20 | N/A | N/A | -| 1D_Advection_Sols_beta4.0_PINN.pt-15000.pt | config_pinn_pde1d.yaml | PINN | 1D_Advection_Sols_beta4.0.hdf5 | N/A | N/A | N/A | N/A | N/A | -| 1D_Burgers_Sols_Nu1.0_FNO.pt | config_Bgs.yaml | FNO | 1D_Burgers_Sols_Nu1.0.hdf5 | N/A | N/A | N/A | 12 | 20 | -| 1D_Burgers_Sols_Nu1.0_Unet-PF-20.pt | config_Bgs.yaml | Unet | 1D_Burgers_Sols_Nu1.0.hdf5 | True | True | 20 | N/A | N/A | -| 2D_diff-react_NA_NA_FNO.pt | config_diff-react.yaml | FNO | 2D_diff-react_NA_NA | N/A | N/A | N/A | 12 | 20 | -| 2D_diff-react_NA_NA_Unet-1-step.pt | config_diff-react.yaml | Unet | 2D_diff-react_NA_NA | False | False | N/A | N/A | N/A | -| 2D_diff-react_NA_NA_Unet-AR.pt | config_diff-react.yaml | Unet | 2D_diff-react_NA_NA | True | False | N/A | N/A | N/A | -| 2D_diff-react_NA_NA_Unet-PF-20.pt | config_diff-react.yaml | Unet | 2D_diff-react_NA_NA | True | True | 20 | N/A | N/A | -| 2D_diff-react_NA_NA_0000.h5_PINN.pt-15000.pt | config_pinn_diff-react.yaml | PINN | 2D_diff-react_NA_NA.h5 | N/A | N/A | N/A | N/A | N/A | -| 2D_rdb_NA_NA_FNO.pt | config_rdb.yaml | FNO | 2D_rdb_NA_NA | N/A | N/A | N/A | 12 | 20 | -| 2D_rdb_NA_NA_Unet-1-step.pt | config_rdb.yaml | Unet | 2D_rdb_NA_NA | False | False | N/A | N/A | N/A | -| 2D_rdb_NA_NA_Unet-AR.pt | config_rdb.yaml | Unet | 2D_rdb_NA_NA | True | False | N/A | N/A | N/A | -| 2D_rdb_NA_NA_Unet-PF-20.pt | config_rdb.yaml | Unet | 2D_rdb_NA_NA | True | True | 20 | N/A | N/A | -| 2D_rdb_NA_NA_0000.h5_PINN.pt-15000.pt | config_pinn_swe2d.yaml | PINN | 2D_rdb_NA_NA.h5 | N/A | N/A | N/A | N/A | N/A | -| 2D_DarcyFlow_beta0.01_Train_FNO.pt | config_Darcy.yaml | FNO | 2D_DarcyFlow_beta0.01_Train.hdf5 | N/A | N/A | N/A | 12 | 20 | -| 2D_DarcyFlow_beta0.01_Train_Unet_PF_1.pt | config_Darcy.yaml | Unet | 2D_DarcyFlow_beta0.01_Train.hdf5 | False | False | N/A | N/A | N/A | -| 3D_CFD_Rand_M1.0_Eta1e-08_Zeta1e-08_periodic_Train_FNO.pt | config_3DCFD.yaml | FNO | 3D_CFD_Rand_M1.0_Eta1e-08_Zeta1e-08_periodic_Train.hdf5 | N/A | N/A | N/A | 12 | 20 | -| 3D_CFD_Rand_M1.0_Eta1e-08_Zeta1e-08_periodic_Train_Unet-PF-20.pt | config_3DCFD.yaml | Unet | 3D_CFD_Rand_M1.0_Eta1e-08_Zeta1e-08_periodic_Train.hdf5 | True | True | 20 | N/A | N/A | \ No newline at end of file +| Pre-trained model | Config filename | model_name | filename (data) | ar_mode | pushforward | unroll_step | modes | width | +| :--------------------------------------------------------------- | :-------------------------- | :--------- | :------------------------------------------------------ | :------ | :---------- | ----------: | ----: | ----: | +| 1D_diff-sorp_NA_NA_FNO.pt | config_diff-sorp.yaml | FNO | 1D_diff-sorp_NA_NA | N/A | N/A | N/A | 16 | 64 | +| 1D_diff-sorp_NA_NA_Unet-1-step.pt | config_diff-sorp.yaml | Unet | 1D_diff-sorp_NA_NA | False | False | N/A | N/A | N/A | +| 1D_diff-sorp_NA_NA_Unet-AR.pt | config_diff-sorp.yaml | Unet | 1D_diff-sorp_NA_NA | True | False | N/A | N/A | N/A | +| 1D_diff-sorp_NA_NA_Unet-PF-20.pt | config_diff-sorp.yaml | Unet | 1D_diff-sorp_NA_NA | True | True | 20 | N/A | N/A | +| 1D_diff-sorp_NA_NA_0001.h5_PINN.pt-15000.pt | config_pinn_diff-sorp.yaml | PINN | 1D_diff-sorp_NA_NA.h5 | N/A | N/A | N/A | N/A | N/A | +| 1D_CFD_Shock_trans_Train_FNO.pt | config_1DCFD.yaml | FNO | 1D_CFD_Shock_Eta1.e-8_Zeta1.e-8_trans_Train.hdf5 | N/A | N/A | N/A | 12 | 20 | +| 1D_CFD_Shock_trans_Train_Unet.pt | config_1DCFD.yaml | Unet | 1D_CFD_Shock_Eta1.e-8_Zeta1.e-8_trans_Train.hdf5 | True | True | 20 | N/A | N/A | +| ReacDiff_Nu1.0_Rho2.0_FNO.pt | config_ReacDiff.yaml | FNO | ReacDiff_Nu1.0_Rho2.0.hdf5 | N/A | N/A | N/A | 12 | 20 | +| ReacDiff_Nu1.0_Rho2.0_Unet.pt | config_ReacDiff.yaml | Unet | ReacDiff_Nu1.0_Rho2.0.hdf5 | True | True | 10 | N/A | N/A | +| 1D_Advection_Sols_beta4.0_FNO.pt | config_Adv.yaml | FNO | 1D_Advection_Sols_beta4.0.hdf5 | N/A | N/A | N/A | 12 | 20 | +| 1D_Advection_Sols_beta4.0_Unet.pt | config_Adv.yaml | Unet | 1D_Advection_Sols_beta4.0.hdf5 | True | True | 20 | N/A | N/A | +| 1D_Advection_Sols_beta4.0_PINN.pt-15000.pt | config_pinn_pde1d.yaml | PINN | 1D_Advection_Sols_beta4.0.hdf5 | N/A | N/A | N/A | N/A | N/A | +| 1D_Burgers_Sols_Nu1.0_FNO.pt | config_Bgs.yaml | FNO | 1D_Burgers_Sols_Nu1.0.hdf5 | N/A | N/A | N/A | 12 | 20 | +| 1D_Burgers_Sols_Nu1.0_Unet-PF-20.pt | config_Bgs.yaml | Unet | 1D_Burgers_Sols_Nu1.0.hdf5 | True | True | 20 | N/A | N/A | +| 2D_diff-react_NA_NA_FNO.pt | config_diff-react.yaml | FNO | 2D_diff-react_NA_NA | N/A | N/A | N/A | 12 | 20 | +| 2D_diff-react_NA_NA_Unet-1-step.pt | config_diff-react.yaml | Unet | 2D_diff-react_NA_NA | False | False | N/A | N/A | N/A | +| 2D_diff-react_NA_NA_Unet-AR.pt | config_diff-react.yaml | Unet | 2D_diff-react_NA_NA | True | False | N/A | N/A | N/A | +| 2D_diff-react_NA_NA_Unet-PF-20.pt | config_diff-react.yaml | Unet | 2D_diff-react_NA_NA | True | True | 20 | N/A | N/A | +| 2D_diff-react_NA_NA_0000.h5_PINN.pt-15000.pt | config_pinn_diff-react.yaml | PINN | 2D_diff-react_NA_NA.h5 | N/A | N/A | N/A | N/A | N/A | +| 2D_rdb_NA_NA_FNO.pt | config_rdb.yaml | FNO | 2D_rdb_NA_NA | N/A | N/A | N/A | 12 | 20 | +| 2D_rdb_NA_NA_Unet-1-step.pt | config_rdb.yaml | Unet | 2D_rdb_NA_NA | False | False | N/A | N/A | N/A | +| 2D_rdb_NA_NA_Unet-AR.pt | config_rdb.yaml | Unet | 2D_rdb_NA_NA | True | False | N/A | N/A | N/A | +| 2D_rdb_NA_NA_Unet-PF-20.pt | config_rdb.yaml | Unet | 2D_rdb_NA_NA | True | True | 20 | N/A | N/A | +| 2D_rdb_NA_NA_0000.h5_PINN.pt-15000.pt | config_pinn_swe2d.yaml | PINN | 2D_rdb_NA_NA.h5 | N/A | N/A | N/A | N/A | N/A | +| 2D_DarcyFlow_beta0.01_Train_FNO.pt | config_Darcy.yaml | FNO | 2D_DarcyFlow_beta0.01_Train.hdf5 | N/A | N/A | N/A | 12 | 20 | +| 2D_DarcyFlow_beta0.01_Train_Unet_PF_1.pt | config_Darcy.yaml | Unet | 2D_DarcyFlow_beta0.01_Train.hdf5 | False | False | N/A | N/A | N/A | +| 3D_CFD_Rand_M1.0_Eta1e-08_Zeta1e-08_periodic_Train_FNO.pt | config_3DCFD.yaml | FNO | 3D_CFD_Rand_M1.0_Eta1e-08_Zeta1e-08_periodic_Train.hdf5 | N/A | N/A | N/A | 12 | 20 | +| 3D_CFD_Rand_M1.0_Eta1e-08_Zeta1e-08_periodic_Train_Unet-PF-20.pt | config_3DCFD.yaml | Unet | 3D_CFD_Rand_M1.0_Eta1e-08_Zeta1e-08_periodic_Train.hdf5 | True | True | 20 | N/A | N/A | diff --git a/pdebench/models/config/args/config_1DCFD.yaml b/pdebench/models/config/args/config_1DCFD.yaml index 87863a1..d8d6524 100644 --- a/pdebench/models/config/args/config_1DCFD.yaml +++ b/pdebench/models/config/args/config_1DCFD.yaml @@ -1,4 +1,4 @@ -model_name: 'FNO' +model_name: "FNO" if_training: True continue_training: False num_workers: 2 @@ -6,7 +6,7 @@ batch_size: 100 initial_step: 10 t_train: 100 model_update: 2 -filename: '1D_CFD_periodic_Train.hdf5' +filename: "1D_CFD_periodic_Train.hdf5" single_file: True reduced_resolution: 8 reduced_resolution_t: 5 @@ -34,8 +34,8 @@ mcmc_warmup_steps: 10 mcmc_num_chains: 1 num_samples_max: 1000 in_channels_hid: 64 -inverse_model_type: InitialConditionInterp +inverse_model_type: InitialConditionInterp #Inverse grad inverse_epochs: 100 inverse_learning_rate: 0.2 -inverse_verbose_flag: False \ No newline at end of file +inverse_verbose_flag: False diff --git a/pdebench/models/config/args/config_2DCFD.yaml b/pdebench/models/config/args/config_2DCFD.yaml index 0ae0ce7..c276c0f 100644 --- a/pdebench/models/config/args/config_2DCFD.yaml +++ b/pdebench/models/config/args/config_2DCFD.yaml @@ -1,4 +1,4 @@ -model_name: 'FNO' +model_name: "FNO" if_training: True continue_training: False batch_size: 20 @@ -6,7 +6,7 @@ unroll_step: 20 num_workers: 2 t_train: 21 model_update: 2 -filename: '2D_CFD_M0.1_Eta0.01_Zeta0.01_periodic_128_Train.hdf5' +filename: "2D_CFD_M0.1_Eta0.01_Zeta0.01_periodic_128_Train.hdf5" single_file: True reduced_resolution: 2 reduced_resolution_t: 1 @@ -22,4 +22,4 @@ modes: 12 width: 20 scheduler_step: 100 scheduler_gamma: 0.5 -initial_step: 10 \ No newline at end of file +initial_step: 10 diff --git a/pdebench/models/config/args/config_3DCFD.yaml b/pdebench/models/config/args/config_3DCFD.yaml index 2bbcfc9..25df7aa 100644 --- a/pdebench/models/config/args/config_3DCFD.yaml +++ b/pdebench/models/config/args/config_3DCFD.yaml @@ -1,11 +1,11 @@ -model_name: 'Unet' +model_name: "Unet" if_training: True t_train: 21 continue_training: False batch_size: 5 unroll_step: 20 model_update: 1 -filename: '3D_CFD_Rand_M1.0_Eta1e-08_Zeta1e-08_periodic_Train.hdf5' +filename: "3D_CFD_Rand_M1.0_Eta1e-08_Zeta1e-08_periodic_Train.hdf5" single_file: True reduced_resolution: 2 reduced_resolution_t: 1 @@ -21,4 +21,4 @@ modes: 12 width: 20 scheduler_step: 100 scheduler_gamma: 0.5 -initial_step: 10 # should be the same value to unroll_step ?? \ No newline at end of file +initial_step: 10 # should be the same value to unroll_step ?? diff --git a/pdebench/models/config/args/config_Adv.yaml b/pdebench/models/config/args/config_Adv.yaml index 37d8f5f..54129d6 100644 --- a/pdebench/models/config/args/config_Adv.yaml +++ b/pdebench/models/config/args/config_Adv.yaml @@ -1,27 +1,27 @@ -model_name: 'Unet' -if_training: True -continue_training: False -batch_size: 50 -unroll_step: 20 -t_train: 200 -model_update: 1 -filename: '1D_Advection_Sols_beta4.0.hdf5' -single_file: True -reduced_resolution: 4 -reduced_resolution_t: 5 -reduced_batch: 1 -epochs: 500 -learning_rate: 1.e-3 -num_workers: 0 -#Unet -in_channels: 1 -out_channels: 1 -ar_mode: True -pushforward: True -#FNO -num_channels: 1 -modes: 12 -width: 20 -scheduler_step: 100 -scheduler_gamma: 0.5 -initial_step: 10 \ No newline at end of file +model_name: "Unet" +if_training: True +continue_training: False +batch_size: 50 +unroll_step: 20 +t_train: 200 +model_update: 1 +filename: "1D_Advection_Sols_beta4.0.hdf5" +single_file: True +reduced_resolution: 4 +reduced_resolution_t: 5 +reduced_batch: 1 +epochs: 500 +learning_rate: 1.e-3 +num_workers: 0 +#Unet +in_channels: 1 +out_channels: 1 +ar_mode: True +pushforward: True +#FNO +num_channels: 1 +modes: 12 +width: 20 +scheduler_step: 100 +scheduler_gamma: 0.5 +initial_step: 10 diff --git a/pdebench/models/config/args/config_Bgs.yaml b/pdebench/models/config/args/config_Bgs.yaml index 43e6917..3702b3b 100644 --- a/pdebench/models/config/args/config_Bgs.yaml +++ b/pdebench/models/config/args/config_Bgs.yaml @@ -1,27 +1,27 @@ -model_name: 'Unet' -if_training: True -continue_training: False -batch_size: 50 -t_train: 200 -model_update: 1 -filename: '1D_Burgers_Sols_Nu1.0.hdf5' -single_file: True -reduced_resolution: 4 -reduced_resolution_t: 5 -reduced_batch: 1 -epochs: 500 -learning_rate: 1.e-3 -num_workers: 0 -#Unet -in_channels: 1 -out_channels: 1 -ar_mode: True -pushforward: True -unroll_step: 20 -#FNO -num_channels: 1 -modes: 12 -width: 20 -scheduler_step: 100 -scheduler_gamma: 0.5 -initial_step: 10 \ No newline at end of file +model_name: "Unet" +if_training: True +continue_training: False +batch_size: 50 +t_train: 200 +model_update: 1 +filename: "1D_Burgers_Sols_Nu1.0.hdf5" +single_file: True +reduced_resolution: 4 +reduced_resolution_t: 5 +reduced_batch: 1 +epochs: 500 +learning_rate: 1.e-3 +num_workers: 0 +#Unet +in_channels: 1 +out_channels: 1 +ar_mode: True +pushforward: True +unroll_step: 20 +#FNO +num_channels: 1 +modes: 12 +width: 20 +scheduler_step: 100 +scheduler_gamma: 0.5 +initial_step: 10 diff --git a/pdebench/models/config/args/config_Darcy.yaml b/pdebench/models/config/args/config_Darcy.yaml index bca0bfa..5bd9879 100644 --- a/pdebench/models/config/args/config_Darcy.yaml +++ b/pdebench/models/config/args/config_Darcy.yaml @@ -1,4 +1,4 @@ -model_name: 'FNO' +model_name: "FNO" if_training: True continue_training: False num_workers: 2 @@ -6,7 +6,7 @@ batch_size: 50 initial_step: 10 t_train: 1 model_update: 2 -filename: '2D_DarcyFlow_beta0.01_Train.hdf5' +filename: "2D_DarcyFlow_beta0.01_Train.hdf5" single_file: True reduced_resolution: 2 reduced_resolution_t: 1 @@ -34,8 +34,8 @@ mcmc_warmup_steps: 10 mcmc_num_chains: 1 num_samples_max: 1000 in_channels_hid: 64 -inverse_model_type: InitialConditionInterp +inverse_model_type: InitialConditionInterp #Inverse grad inverse_epochs: 100 inverse_learning_rate: 0.2 -inverse_verbose_flag: False \ No newline at end of file +inverse_verbose_flag: False diff --git a/pdebench/models/config/args/config_ReacDiff.yaml b/pdebench/models/config/args/config_ReacDiff.yaml index 7b8f045..5ebfe91 100644 --- a/pdebench/models/config/args/config_ReacDiff.yaml +++ b/pdebench/models/config/args/config_ReacDiff.yaml @@ -1,10 +1,10 @@ -model_name: 'Unet' +model_name: "Unet" if_training: True continue_training: False batch_size: 50 t_train: 30 model_update: 1 -filename: 'ReacDiff_Nu0.5_Rho1.0.hdf5' +filename: "ReacDiff_Nu0.5_Rho1.0.hdf5" single_file: True reduced_resolution: 4 reduced_resolution_t: 1 @@ -24,4 +24,4 @@ modes: 12 width: 20 scheduler_step: 100 scheduler_gamma: 0.5 -initial_step: 5 \ No newline at end of file +initial_step: 5 diff --git a/pdebench/models/config/args/config_diff-react.yaml b/pdebench/models/config/args/config_diff-react.yaml index d7a8abe..a650a07 100644 --- a/pdebench/models/config/args/config_diff-react.yaml +++ b/pdebench/models/config/args/config_diff-react.yaml @@ -1,4 +1,4 @@ -model_name: 'FNO' +model_name: "FNO" if_training: False continue_training: False num_workers: 2 @@ -6,7 +6,7 @@ batch_size: 5 initial_step: 10 t_train: 101 model_update: 10 -filename: '2D_diff-react_NA_NA' +filename: "2D_diff-react_NA_NA" single_file: False reduced_resolution: 1 reduced_resolution_t: 1 @@ -34,7 +34,7 @@ mcmc_warmup_steps: 10 mcmc_num_chains: 1 num_samples_max: 1000 in_channels_hid: 64 -inverse_model_type: InitialConditionInterp +inverse_model_type: InitialConditionInterp #Inverse grad inverse_epochs: 100 inverse_learning_rate: 0.2 @@ -47,4 +47,4 @@ x_max: 1 y_min: -1 y_max: 1 t_min: 0 -t_max: 5 \ No newline at end of file +t_max: 5 diff --git a/pdebench/models/config/args/config_diff-sorp.yaml b/pdebench/models/config/args/config_diff-sorp.yaml index c352fc5..7ed4371 100644 --- a/pdebench/models/config/args/config_diff-sorp.yaml +++ b/pdebench/models/config/args/config_diff-sorp.yaml @@ -1,4 +1,4 @@ -model_name: 'FNO' +model_name: "FNO" if_training: False continue_training: False num_workers: 2 @@ -6,7 +6,7 @@ batch_size: 50 initial_step: 10 t_train: 101 model_update: 10 -filename: '1D_diff-sorp_NA_NA' +filename: "1D_diff-sorp_NA_NA" single_file: False reduced_resolution: 1 reduced_resolution_t: 1 @@ -34,7 +34,7 @@ mcmc_warmup_steps: 10 mcmc_num_chains: 1 num_samples_max: 1000 in_channels_hid: 64 -inverse_model_type: InitialConditionInterp +inverse_model_type: InitialConditionInterp #Inverse grad inverse_epochs: 100 inverse_learning_rate: 0.2 @@ -47,4 +47,4 @@ x_max: 1 y_min: 0 y_max: 1 t_min: 0 -t_max: 500 \ No newline at end of file +t_max: 500 diff --git a/pdebench/models/config/args/config_pinn_CFD1d.yaml b/pdebench/models/config/args/config_pinn_CFD1d.yaml index 4efa594..3b688a5 100644 --- a/pdebench/models/config/args/config_pinn_CFD1d.yaml +++ b/pdebench/models/config/args/config_pinn_CFD1d.yaml @@ -1,15 +1,15 @@ -model_name: 'PINN' -scenario: 'pde1D' +model_name: "PINN" +scenario: "pde1D" model_update: 500 -filename: '1D_CFD_Rand_Eta1.e-8_Zeta1.e-8_periodic_Train.hdf5' +filename: "1D_CFD_Rand_Eta1.e-8_Zeta1.e-8_periodic_Train.hdf5" epochs: 15000 input_ch: 2 output_ch: 3 learning_rate: 1.e-3 -root_path: '../data' +root_path: "../data" val_num: 10 if_periodic_bc: True period: 5000 val_time: 1.0 val_batch_idx: 10 -aux_params: [1.6666666667] \ No newline at end of file +aux_params: [1.6666666667] diff --git a/pdebench/models/config/args/config_pinn_diff-react.yaml b/pdebench/models/config/args/config_pinn_diff-react.yaml index 60f71cd..f251aac 100644 --- a/pdebench/models/config/args/config_pinn_diff-react.yaml +++ b/pdebench/models/config/args/config_pinn_diff-react.yaml @@ -1,15 +1,15 @@ -model_name: 'PINN' +model_name: "PINN" -scenario: 'diff-react' +scenario: "diff-react" model_update: 500 -filename: '2D_diff-react_NA_NA/2D_diff-react_NA_NA.h5' +filename: "2D_diff-react_NA_NA/2D_diff-react_NA_NA.h5" epochs: 100 learning_rate: 1.e-3 -seed: '0000' +seed: "0000" # unused arguments input_ch: 0 output_ch: 1 -root_path: '.' +root_path: "." val_num: 1 if_periodic_bc: False -aux_params: 0 \ No newline at end of file +aux_params: 0 diff --git a/pdebench/models/config/args/config_pinn_diff-sorp.yaml b/pdebench/models/config/args/config_pinn_diff-sorp.yaml index 8bc0701..d3e6512 100644 --- a/pdebench/models/config/args/config_pinn_diff-sorp.yaml +++ b/pdebench/models/config/args/config_pinn_diff-sorp.yaml @@ -1,15 +1,15 @@ -model_name: 'PINN' +model_name: "PINN" -scenario: 'diff-sorp' +scenario: "diff-sorp" model_update: 500 -filename: '1D_diff-sorp_NA_NA/1D_diff-sorp_NA_NA.h5' +filename: "1D_diff-sorp_NA_NA/1D_diff-sorp_NA_NA.h5" epochs: 15000 learning_rate: 1.e-3 -seed: '0000' +seed: "0000" # unused arguments input_ch: 0 output_ch: 1 -root_path: '.' +root_path: "." val_num: 1 if_periodic_bc: False -aux_params: 0 \ No newline at end of file +aux_params: 0 diff --git a/pdebench/models/config/args/config_pinn_pde1d.yaml b/pdebench/models/config/args/config_pinn_pde1d.yaml index b0013ef..b76d3cc 100644 --- a/pdebench/models/config/args/config_pinn_pde1d.yaml +++ b/pdebench/models/config/args/config_pinn_pde1d.yaml @@ -1,15 +1,15 @@ -model_name: 'PINN' -scenario: 'pde1D' +model_name: "PINN" +scenario: "pde1D" model_update: 500 -filename: '1D_Advection_Sols_beta0.1.hdf5' +filename: "1D_Advection_Sols_beta0.1.hdf5" epochs: 15000 input_ch: 2 output_ch: 1 learning_rate: 1.e-3 -root_path: '../data' +root_path: "../data" val_num: 10 if_periodic_bc: True period: 5000 val_time: 2.0 val_batch_idx: 10 -aux_params: [0.1] \ No newline at end of file +aux_params: [0.1] diff --git a/pdebench/models/config/args/config_pinn_swe2d.yaml b/pdebench/models/config/args/config_pinn_swe2d.yaml index b4fe9b0..bff678a 100644 --- a/pdebench/models/config/args/config_pinn_swe2d.yaml +++ b/pdebench/models/config/args/config_pinn_swe2d.yaml @@ -1,15 +1,15 @@ -model_name: 'PINN' +model_name: "PINN" -scenario: 'swe2d' +scenario: "swe2d" model_update: 500 -filename: '2D_rdb_NA_NA/2D_rdb_NA_NA.h5' +filename: "2D_rdb_NA_NA/2D_rdb_NA_NA.h5" epochs: 15000 learning_rate: 1.e-3 -seed: '0000' +seed: "0000" # unused arguments input_ch: 0 output_ch: 1 -root_path: '.' +root_path: "." val_num: 1 if_periodic_bc: False aux_params: 0 diff --git a/pdebench/models/config/args/config_rdb.yaml b/pdebench/models/config/args/config_rdb.yaml index d58fd4d..1690d9d 100644 --- a/pdebench/models/config/args/config_rdb.yaml +++ b/pdebench/models/config/args/config_rdb.yaml @@ -1,4 +1,4 @@ -model_name: 'FNO' +model_name: "FNO" if_training: False continue_training: False num_workers: 2 @@ -6,7 +6,7 @@ batch_size: 5 initial_step: 10 t_train: 101 model_update: 10 -filename: '2D_rdb_NA_NA' +filename: "2D_rdb_NA_NA" single_file: False reduced_resolution: 1 reduced_resolution_t: 1 @@ -34,7 +34,7 @@ mcmc_warmup_steps: 10 mcmc_num_chains: 1 num_samples_max: 1000 in_channels_hid: 64 -inverse_model_type: InitialConditionInterp +inverse_model_type: InitialConditionInterp #Inverse grad inverse_epochs: 100 inverse_learning_rate: 0.2 @@ -47,4 +47,4 @@ x_max: 2.5 y_min: -2.5 y_max: 2.5 t_min: 0 -t_max: 1 \ No newline at end of file +t_max: 1 diff --git a/pdebench/models/config/config.yaml b/pdebench/models/config/config.yaml index 198ad5c..63ca75a 100644 --- a/pdebench/models/config/config.yaml +++ b/pdebench/models/config/config.yaml @@ -7,55 +7,55 @@ hydra: output_subdir: null run: dir: . - + args: - model_name: 'FNO' - if_training: False - continue_training: False - num_workers: 2 - batch_size: 5 - initial_step: 10 - t_train: 101 - model_update: 10 - filename: '2D_diff-react_NA_NA' - single_file: False - reduced_resolution: 1 - reduced_resolution_t: 1 - reduced_batch: 1 - epochs: 500 - learning_rate: 1.e-3 - scheduler_step: 100 - scheduler_gamma: 0.5 - #Unet - in_channels: 2 - out_channels: 2 - ar_mode: True - pushforward: True - unroll_step: 20 - #FNO - num_channels: 2 - modes: 12 - width: 20 - #Inverse - base_path: ../data/ - training_type: autoregressive - #Inverse MCMC - mcmc_num_samples: 20 - mcmc_warmup_steps: 10 - mcmc_num_chains: 1 - num_samples_max: 1000 - in_channels_hid: 64 - inverse_model_type: InitialConditionInterp - #Inverse grad - inverse_epochs: 100 - inverse_learning_rate: 0.2 - inverse_verbose_flag: False - #Plotting - plot: False - channel_plot: 0 # Which channel/variable to be plotted - x_min: -1 - x_max: 1 - y_min: -1 - y_max: 1 - t_min: 0 - t_max: 5 \ No newline at end of file + model_name: "FNO" + if_training: False + continue_training: False + num_workers: 2 + batch_size: 5 + initial_step: 10 + t_train: 101 + model_update: 10 + filename: "2D_diff-react_NA_NA" + single_file: False + reduced_resolution: 1 + reduced_resolution_t: 1 + reduced_batch: 1 + epochs: 500 + learning_rate: 1.e-3 + scheduler_step: 100 + scheduler_gamma: 0.5 + #Unet + in_channels: 2 + out_channels: 2 + ar_mode: True + pushforward: True + unroll_step: 20 + #FNO + num_channels: 2 + modes: 12 + width: 20 + #Inverse + base_path: ../data/ + training_type: autoregressive + #Inverse MCMC + mcmc_num_samples: 20 + mcmc_warmup_steps: 10 + mcmc_num_chains: 1 + num_samples_max: 1000 + in_channels_hid: 64 + inverse_model_type: InitialConditionInterp + #Inverse grad + inverse_epochs: 100 + inverse_learning_rate: 0.2 + inverse_verbose_flag: False + #Plotting + plot: False + channel_plot: 0 # Which channel/variable to be plotted + x_min: -1 + x_max: 1 + y_min: -1 + y_max: 1 + t_min: 0 + t_max: 5 diff --git a/pdebench/models/config/config_darcy.yaml b/pdebench/models/config/config_darcy.yaml index 02bc9dc..4272742 100644 --- a/pdebench/models/config/config_darcy.yaml +++ b/pdebench/models/config/config_darcy.yaml @@ -9,15 +9,15 @@ hydra: dir: . args: - model_name: 'FNO' + model_name: "FNO" if_training: True continue_training: False num_workers: 2 batch_size: 50 initial_step: 1 - t_train: 1 # steady-state + t_train: 1 # steady-state model_update: 2 - filename: '2D_DarcyFlow_beta0.01_Train.hdf5' + filename: "2D_DarcyFlow_beta0.01_Train.hdf5" single_file: True reduced_resolution: 1 reduced_resolution_t: 1 @@ -37,15 +37,15 @@ args: modes: 12 width: 20 #Inverse - data_path: '../data/2D/DarcyFlow/Train/' - training_type: 'single' #autoregressive + data_path: "../data/2D/DarcyFlow/Train/" + training_type: "single" #autoregressive #Inverse MCMC mcmc_num_samples: 20 mcmc_warmup_steps: 10 mcmc_num_chains: 1 num_samples_max: 1000 in_channels_hid: 64 - inverse_model_type: InitialConditionInterp + inverse_model_type: InitialConditionInterp #Inverse grad inverse_epochs: 100 inverse_learning_rate: 0.2 @@ -54,8 +54,8 @@ args: plot: False channel_plot: 0 # Which channel/variable to be plotted x_min: -1 - x_max: 1 # spatial dimension x: [-1, 1] + x_max: 1 # spatial dimension x: [-1, 1] y_min: -1 - y_max: 1 # spatial dimension y: [-1, 1] + y_max: 1 # spatial dimension y: [-1, 1] t_min: 0 - t_max: 5 # time dimension t: [0, 5] + t_max: 5 # time dimension t: [0, 5] diff --git a/pdebench/models/config/config_rdb.yaml b/pdebench/models/config/config_rdb.yaml index ffa9ea0..e28a163 100644 --- a/pdebench/models/config/config_rdb.yaml +++ b/pdebench/models/config/config_rdb.yaml @@ -9,53 +9,53 @@ hydra: dir: . args: - model_name: 'FNO' - if_training: False - continue_training: False - num_workers: 2 - batch_size: 5 - initial_step: 10 - t_train: 101 - model_update: 2 - filename: '2D_rdb_NA_NA.h5' - single_file: True - data_path: '/path/to/swe2d/h5' - reduced_resolution: 1 - reduced_resolution_t: 1 - reduced_batch: 1 - epochs: 500 - learning_rate: 1.e-3 - scheduler_step: 100 - scheduler_gamma: 0.5 - #Unet - in_channels: 1 - out_channels: 1 - ar_mode: True - pushforward: True - unroll_step: 20 - #FNO - num_channels: 1 - modes: 12 - width: 20 - #Inverse - training_type: 'autoregressive' - #Inverse MCMC - mcmc_num_samples: 20 - mcmc_warmup_steps: 10 - mcmc_num_chains: 1 - num_samples_max: 1000 - in_channels_hid: 64 - inverse_model_type: InitialConditionInterp - #Inverse grad - inverse_epochs: 100 - inverse_learning_rate: 0.2 - inverse_verbose_flag: False - #Plotting - plot: False - channel_plot: 0 # Which channel/variable to be plotted - x_min: -2.5 - x_max: 2.5 - y_min: -2.5 - y_max: 2.5 - t_min: 0 - t_max: 1 + model_name: "FNO" + if_training: False + continue_training: False + num_workers: 2 + batch_size: 5 + initial_step: 10 + t_train: 101 + model_update: 2 + filename: "2D_rdb_NA_NA.h5" + single_file: True + data_path: "/path/to/swe2d/h5" + reduced_resolution: 1 + reduced_resolution_t: 1 + reduced_batch: 1 + epochs: 500 + learning_rate: 1.e-3 + scheduler_step: 100 + scheduler_gamma: 0.5 + #Unet + in_channels: 1 + out_channels: 1 + ar_mode: True + pushforward: True + unroll_step: 20 + #FNO + num_channels: 1 + modes: 12 + width: 20 + #Inverse + training_type: "autoregressive" + #Inverse MCMC + mcmc_num_samples: 20 + mcmc_warmup_steps: 10 + mcmc_num_chains: 1 + num_samples_max: 1000 + in_channels_hid: 64 + inverse_model_type: InitialConditionInterp + #Inverse grad + inverse_epochs: 100 + inverse_learning_rate: 0.2 + inverse_verbose_flag: False + #Plotting + plot: False + channel_plot: 0 # Which channel/variable to be plotted + x_min: -2.5 + x_max: 2.5 + y_min: -2.5 + y_max: 2.5 + t_min: 0 + t_max: 1 diff --git a/pdebench/models/config/results.yaml b/pdebench/models/config/results.yaml index 12e1728..67195b9 100644 --- a/pdebench/models/config/results.yaml +++ b/pdebench/models/config/results.yaml @@ -7,13 +7,18 @@ hydra: output_subdir: null run: dir: . - -args: - model_names: [FNO, Unet] - base_path : /home/alesiani/python/pde_benchmark/pdebench/data/ - inverse_model_type : InitialConditionInterp - filenames : [/1D/Advection/Train/1D_Advection_Sols_beta4.0.hdf5,/1D/Burgers/Train/1D_Burgers_Sols_Nu1.0.hdf5, /1D/ReactionDiffusion/Train/ReacDiff_Nu1.0_Rho2.0.hdf5, /1D/CFD/Train/1D_CFD_Shock_trans_Train.hdf5] - shortfilenames : [Advection, Burgers, ReacDiff, CFD] - results_values : [ mseloss_pred_u0 ] - result_filename : csv/results_inverse.csv +args: + model_names: [FNO, Unet] + base_path: /home/alesiani/python/pde_benchmark/pdebench/data/ + inverse_model_type: InitialConditionInterp + filenames: + [ + /1D/Advection/Train/1D_Advection_Sols_beta4.0.hdf5, + /1D/Burgers/Train/1D_Burgers_Sols_Nu1.0.hdf5, + /1D/ReactionDiffusion/Train/ReacDiff_Nu1.0_Rho2.0.hdf5, + /1D/CFD/Train/1D_CFD_Shock_trans_Train.hdf5, + ] + shortfilenames: [Advection, Burgers, ReacDiff, CFD] + results_values: [mseloss_pred_u0] + result_filename: csv/results_inverse.csv diff --git a/pdebench/models/fno/fno.py b/pdebench/models/fno/fno.py index 6371228..806d194 100644 --- a/pdebench/models/fno/fno.py +++ b/pdebench/models/fno/fno.py @@ -26,11 +26,11 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +from __future__ import annotations import torch -import torch.nn as nn -import numpy as np import torch.nn.functional as F +from torch import nn class SpectralConv1d(nn.Module): @@ -38,15 +38,21 @@ def __init__(self, in_channels, out_channels, modes1): super(SpectralConv1d, self).__init__() """ - 1D Fourier layer. It does FFT, linear transform, and Inverse FFT. + 1D Fourier layer. It does FFT, linear transform, and Inverse FFT. """ self.in_channels = in_channels self.out_channels = out_channels - self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1 + self.modes1 = ( + # Number of Fourier modes to multiply, at most floor(N/2) + 1 + modes1 + ) - self.scale = (1 / (in_channels*out_channels)) - self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, dtype=torch.cfloat)) + self.scale = 1 / (in_channels * out_channels) + self.weights1 = nn.Parameter( + self.scale + * torch.rand(in_channels, out_channels, self.modes1, dtype=torch.cfloat) + ) # Complex multiplication def compl_mul1d(self, input, weights): @@ -55,17 +61,26 @@ def compl_mul1d(self, input, weights): def forward(self, x): batchsize = x.shape[0] - #Compute Fourier coeffcients up to factor of e^(- something constant) + # Compute Fourier coefficients up to factor of e^(- something constant) x_ft = torch.fft.rfft(x) - # Multiply relevant Fourier modes - out_ft = torch.zeros(batchsize, self.out_channels, x.size(-1)//2 + 1, device=x.device, dtype=torch.cfloat) - out_ft[:, :, :self.modes1] = self.compl_mul1d(x_ft[:, :, :self.modes1], self.weights1) - - #Return to physical space + # Multiply relevant Fourier modes + out_ft = torch.zeros( + batchsize, + self.out_channels, + x.size(-1) // 2 + 1, + device=x.device, + dtype=torch.cfloat, + ) + out_ft[:, :, : self.modes1] = self.compl_mul1d( + x_ft[:, :, : self.modes1], self.weights1 + ) + + # Return to physical space x = torch.fft.irfft(out_ft, n=x.size(-1)) return x + class FNO1d(nn.Module): def __init__(self, num_channels, modes=16, width=64, initial_step=10): super(FNO1d, self).__init__() @@ -76,7 +91,7 @@ def __init__(self, num_channels, modes=16, width=64, initial_step=10): 2. 4 layers of the integral operators u' = (W + K)(u). W defined by self.w; K defined by self.conv . 3. Project from the channel space to the output space by self.fc1 and self.fc2 . - + input: the solution of the initial condition and location (a(x), x) input shape: (batchsize, x=s, c=2) output: the solution of a later timestep @@ -85,8 +100,10 @@ def __init__(self, num_channels, modes=16, width=64, initial_step=10): self.modes1 = modes self.width = width - self.padding = 2 # pad the domain if input is non-periodic - self.fc0 = nn.Linear(initial_step*num_channels+1, self.width) # input channel is 2: (a(x), x) + self.padding = 2 # pad the domain if input is non-periodic + self.fc0 = nn.Linear( + initial_step * num_channels + 1, self.width + ) # input channel is 2: (a(x), x) self.conv0 = SpectralConv1d(self.width, self.width, self.modes1) self.conv1 = SpectralConv1d(self.width, self.width, self.modes1) @@ -105,8 +122,9 @@ def forward(self, x, grid): x = torch.cat((x, grid), dim=-1) x = self.fc0(x) x = x.permute(0, 2, 1) - - x = F.pad(x, [0, self.padding]) # pad the domain if input is non-periodic + + # pad the domain if input is non-periodic + x = F.pad(x, [0, self.padding]) x1 = self.conv0(x) x2 = self.w0(x) @@ -127,7 +145,7 @@ def forward(self, x, grid): x2 = self.w3(x) x = x1 + x2 - x = x[..., :-self.padding] + x = x[..., : -self.padding] x = x.permute(0, 2, 1) x = self.fc1(x) x = F.gelu(x) @@ -140,17 +158,30 @@ def __init__(self, in_channels, out_channels, modes1, modes2): super(SpectralConv2d_fast, self).__init__() """ - 2D Fourier layer. It does FFT, linear transform, and Inverse FFT. + 2D Fourier layer. It does FFT, linear transform, and Inverse FFT. """ self.in_channels = in_channels self.out_channels = out_channels - self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1 + self.modes1 = ( + # Number of Fourier modes to multiply, at most floor(N/2) + 1 + modes1 + ) self.modes2 = modes2 - self.scale = (1 / (in_channels * out_channels)) - self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat)) - self.weights2 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat)) + self.scale = 1 / (in_channels * out_channels) + self.weights1 = nn.Parameter( + self.scale + * torch.rand( + in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat + ) + ) + self.weights2 = nn.Parameter( + self.scale + * torch.rand( + in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat + ) + ) # Complex multiplication def compl_mul2d(self, input, weights): @@ -159,20 +190,30 @@ def compl_mul2d(self, input, weights): def forward(self, x): batchsize = x.shape[0] - #Compute Fourier coeffcients up to factor of e^(- something constant) + # Compute Fourier coefficients up to factor of e^(- something constant) x_ft = torch.fft.rfft2(x) # Multiply relevant Fourier modes - out_ft = torch.zeros(batchsize, self.out_channels, x.size(-2), x.size(-1)//2 + 1, dtype=torch.cfloat, device=x.device) - out_ft[:, :, :self.modes1, :self.modes2] = \ - self.compl_mul2d(x_ft[:, :, :self.modes1, :self.modes2], self.weights1) - out_ft[:, :, -self.modes1:, :self.modes2] = \ - self.compl_mul2d(x_ft[:, :, -self.modes1:, :self.modes2], self.weights2) - - #Return to physical space + out_ft = torch.zeros( + batchsize, + self.out_channels, + x.size(-2), + x.size(-1) // 2 + 1, + dtype=torch.cfloat, + device=x.device, + ) + out_ft[:, :, : self.modes1, : self.modes2] = self.compl_mul2d( + x_ft[:, :, : self.modes1, : self.modes2], self.weights1 + ) + out_ft[:, :, -self.modes1 :, : self.modes2] = self.compl_mul2d( + x_ft[:, :, -self.modes1 :, : self.modes2], self.weights2 + ) + + # Return to physical space x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1))) return x + class FNO2d(nn.Module): def __init__(self, num_channels, modes1=12, modes2=12, width=20, initial_step=10): super(FNO2d, self).__init__() @@ -183,7 +224,7 @@ def __init__(self, num_channels, modes1=12, modes2=12, width=20, initial_step=10 2. 4 layers of the integral operators u' = (W + K)(u). W defined by self.w; K defined by self.conv . 3. Project from the channel space to the output space by self.fc1 and self.fc2 . - + input: the solution of the previous 10 timesteps + 2 locations (u(t-10, x, y), ..., u(t-1, x, y), x, y) input shape: (batchsize, x, y, c) output: the solution of the next timestep @@ -193,14 +234,22 @@ def __init__(self, num_channels, modes1=12, modes2=12, width=20, initial_step=10 self.modes1 = modes1 self.modes2 = modes2 self.width = width - self.padding = 2 # pad the domain if input is non-periodic - self.fc0 = nn.Linear(initial_step*num_channels+2, self.width) + self.padding = 2 # pad the domain if input is non-periodic + self.fc0 = nn.Linear(initial_step * num_channels + 2, self.width) # input channel is 12: the solution of the previous 10 timesteps + 2 locations (u(t-10, x, y), ..., u(t-1, x, y), x, y) - self.conv0 = SpectralConv2d_fast(self.width, self.width, self.modes1, self.modes2) - self.conv1 = SpectralConv2d_fast(self.width, self.width, self.modes1, self.modes2) - self.conv2 = SpectralConv2d_fast(self.width, self.width, self.modes1, self.modes2) - self.conv3 = SpectralConv2d_fast(self.width, self.width, self.modes1, self.modes2) + self.conv0 = SpectralConv2d_fast( + self.width, self.width, self.modes1, self.modes2 + ) + self.conv1 = SpectralConv2d_fast( + self.width, self.width, self.modes1, self.modes2 + ) + self.conv2 = SpectralConv2d_fast( + self.width, self.width, self.modes1, self.modes2 + ) + self.conv3 = SpectralConv2d_fast( + self.width, self.width, self.modes1, self.modes2 + ) self.w0 = nn.Conv2d(self.width, self.width, 1) self.w1 = nn.Conv2d(self.width, self.width, 1) self.w2 = nn.Conv2d(self.width, self.width, 1) @@ -214,7 +263,7 @@ def forward(self, x, grid): x = torch.cat((x, grid), dim=-1) x = self.fc0(x) x = x.permute(0, 3, 1, 2) - + # Pad tensor with boundary condition x = F.pad(x, [0, self.padding, 0, self.padding]) @@ -237,34 +286,77 @@ def forward(self, x, grid): x2 = self.w3(x) x = x1 + x2 - x = x[..., :-self.padding, :-self.padding] # Unpad the tensor + x = x[..., : -self.padding, : -self.padding] # Unpad the tensor x = x.permute(0, 2, 3, 1) x = self.fc1(x) x = F.gelu(x) x = self.fc2(x) - + return x.unsqueeze(-2) - + class SpectralConv3d(nn.Module): def __init__(self, in_channels, out_channels, modes1, modes2, modes3): super(SpectralConv3d, self).__init__() """ - 3D Fourier layer. It does FFT, linear transform, and Inverse FFT. + 3D Fourier layer. It does FFT, linear transform, and Inverse FFT. """ self.in_channels = in_channels self.out_channels = out_channels - self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1 + self.modes1 = ( + # Number of Fourier modes to multiply, at most floor(N/2) + 1 + modes1 + ) self.modes2 = modes2 self.modes3 = modes3 - self.scale = (1 / (in_channels * out_channels)) - self.weights1 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3, dtype=torch.cfloat)) - self.weights2 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3, dtype=torch.cfloat)) - self.weights3 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3, dtype=torch.cfloat)) - self.weights4 = nn.Parameter(self.scale * torch.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3, dtype=torch.cfloat)) + self.scale = 1 / (in_channels * out_channels) + self.weights1 = nn.Parameter( + self.scale + * torch.rand( + in_channels, + out_channels, + self.modes1, + self.modes2, + self.modes3, + dtype=torch.cfloat, + ) + ) + self.weights2 = nn.Parameter( + self.scale + * torch.rand( + in_channels, + out_channels, + self.modes1, + self.modes2, + self.modes3, + dtype=torch.cfloat, + ) + ) + self.weights3 = nn.Parameter( + self.scale + * torch.rand( + in_channels, + out_channels, + self.modes1, + self.modes2, + self.modes3, + dtype=torch.cfloat, + ) + ) + self.weights4 = nn.Parameter( + self.scale + * torch.rand( + in_channels, + out_channels, + self.modes1, + self.modes2, + self.modes3, + dtype=torch.cfloat, + ) + ) # Complex multiplication def compl_mul3d(self, input, weights): @@ -273,26 +365,41 @@ def compl_mul3d(self, input, weights): def forward(self, x): batchsize = x.shape[0] - #Compute Fourier coeffcients up to factor of e^(- something constant) - x_ft = torch.fft.rfftn(x, dim=[-3,-2,-1]) + # Compute Fourier coefficients up to factor of e^(- something constant) + x_ft = torch.fft.rfftn(x, dim=[-3, -2, -1]) # Multiply relevant Fourier modes - out_ft = torch.zeros(batchsize, self.out_channels, x.size(-3), x.size(-2), x.size(-1)//2 + 1, dtype=torch.cfloat, device=x.device) - out_ft[:, :, :self.modes1, :self.modes2, :self.modes3] = \ - self.compl_mul3d(x_ft[:, :, :self.modes1, :self.modes2, :self.modes3], self.weights1) - out_ft[:, :, -self.modes1:, :self.modes2, :self.modes3] = \ - self.compl_mul3d(x_ft[:, :, -self.modes1:, :self.modes2, :self.modes3], self.weights2) - out_ft[:, :, :self.modes1, -self.modes2:, :self.modes3] = \ - self.compl_mul3d(x_ft[:, :, :self.modes1, -self.modes2:, :self.modes3], self.weights3) - out_ft[:, :, -self.modes1:, -self.modes2:, :self.modes3] = \ - self.compl_mul3d(x_ft[:, :, -self.modes1:, -self.modes2:, :self.modes3], self.weights4) - - #Return to physical space + out_ft = torch.zeros( + batchsize, + self.out_channels, + x.size(-3), + x.size(-2), + x.size(-1) // 2 + 1, + dtype=torch.cfloat, + device=x.device, + ) + out_ft[:, :, : self.modes1, : self.modes2, : self.modes3] = self.compl_mul3d( + x_ft[:, :, : self.modes1, : self.modes2, : self.modes3], self.weights1 + ) + out_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3] = self.compl_mul3d( + x_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3], self.weights2 + ) + out_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3] = self.compl_mul3d( + x_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3], self.weights3 + ) + out_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3] = self.compl_mul3d( + x_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3], self.weights4 + ) + + # Return to physical space x = torch.fft.irfftn(out_ft, s=(x.size(-3), x.size(-2), x.size(-1))) return x + class FNO3d(nn.Module): - def __init__(self, num_channels, modes1=8, modes2=8, modes3=8, width=20, initial_step=10): + def __init__( + self, num_channels, modes1=8, modes2=8, modes3=8, width=20, initial_step=10 + ): super(FNO3d, self).__init__() """ @@ -301,7 +408,7 @@ def __init__(self, num_channels, modes1=8, modes2=8, modes3=8, width=20, initial 2. 4 layers of the integral operators u' = (W + K)(u). W defined by self.w; K defined by self.conv . 3. Project from the channel space to the output space by self.fc1 and self.fc2 . - + input: the solution of the first 10 timesteps + 3 locations (u(1, x, y), ..., u(10, x, y), x, y, t). It's a constant function in time, except for the last index. input shape: (batchsize, x=64, y=64, t=40, c=13) output: the solution of the next 40 timesteps @@ -312,14 +419,22 @@ def __init__(self, num_channels, modes1=8, modes2=8, modes3=8, width=20, initial self.modes2 = modes2 self.modes3 = modes3 self.width = width - self.padding = 6 # pad the domain if input is non-periodic - self.fc0 = nn.Linear(initial_step*num_channels+3, self.width) + self.padding = 6 # pad the domain if input is non-periodic + self.fc0 = nn.Linear(initial_step * num_channels + 3, self.width) # input channel is 12: the solution of the first 10 timesteps + 3 locations (u(1, x, y), ..., u(10, x, y), x, y, t) - self.conv0 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3) - self.conv1 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3) - self.conv2 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3) - self.conv3 = SpectralConv3d(self.width, self.width, self.modes1, self.modes2, self.modes3) + self.conv0 = SpectralConv3d( + self.width, self.width, self.modes1, self.modes2, self.modes3 + ) + self.conv1 = SpectralConv3d( + self.width, self.width, self.modes1, self.modes2, self.modes3 + ) + self.conv2 = SpectralConv3d( + self.width, self.width, self.modes1, self.modes2, self.modes3 + ) + self.conv3 = SpectralConv3d( + self.width, self.width, self.modes1, self.modes2, self.modes3 + ) self.w0 = nn.Conv3d(self.width, self.width, 1) self.w1 = nn.Conv3d(self.width, self.width, 1) self.w2 = nn.Conv3d(self.width, self.width, 1) @@ -337,8 +452,9 @@ def forward(self, x, grid): x = torch.cat((x, grid), dim=-1) x = self.fc0(x) x = x.permute(0, 4, 1, 2, 3) - - x = F.pad(x, [0, self.padding]) # pad the domain if input is non-periodic + + # pad the domain if input is non-periodic + x = F.pad(x, [0, self.padding]) x1 = self.conv0(x) x2 = self.w0(x) @@ -359,9 +475,9 @@ def forward(self, x, grid): x2 = self.w3(x) x = x1 + x2 - x = x[..., :-self.padding] - x = x.permute(0, 2, 3, 4, 1) # pad the domain if input is non-periodic + x = x[..., : -self.padding] + x = x.permute(0, 2, 3, 4, 1) # pad the domain if input is non-periodic x = self.fc1(x) x = F.gelu(x) x = self.fc2(x) - return x.unsqueeze(-2) \ No newline at end of file + return x.unsqueeze(-2) diff --git a/pdebench/models/fno/train.py b/pdebench/models/fno/train.py index a3f06c1..25c40bf 100644 --- a/pdebench/models/fno/train.py +++ b/pdebench/models/fno/train.py @@ -1,182 +1,212 @@ -import sys -import torch -import numpy as np -import pickle -import torch.nn as nn -import torch.nn.functional as F - -import operator -from functools import reduce -from functools import partial +from __future__ import annotations +import pickle from timeit import default_timer +import numpy as np +import torch +from torch import nn + # torch.manual_seed(0) # np.random.seed(0) -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") from pdebench.models.fno.fno import FNO1d, FNO2d, FNO3d -from pdebench.models.fno.utils import FNODatasetSingle, FNODatasetMult +from pdebench.models.fno.utils import FNODatasetMult, FNODatasetSingle from pdebench.models.metrics import metrics -def run_training(if_training, - continue_training, - num_workers, - modes, - width, - initial_step, - t_train, - num_channels, - batch_size, - epochs, - learning_rate, - scheduler_step, - scheduler_gamma, - model_update, - flnm, - single_file, - reduced_resolution, - reduced_resolution_t, - reduced_batch, - plot, - channel_plot, - x_min, - x_max, - y_min, - y_max, - t_min, - t_max, - base_path='../data/', - training_type='autoregressive' - ): - - print(f'Epochs = {epochs}, learning rate = {learning_rate}, scheduler step = {scheduler_step}, scheduler gamma = {scheduler_gamma}') - + +def run_training( + if_training, + continue_training, + num_workers, + modes, + width, + initial_step, + t_train, + num_channels, + batch_size, + epochs, + learning_rate, + scheduler_step, + scheduler_gamma, + model_update, + flnm, + single_file, + reduced_resolution, + reduced_resolution_t, + reduced_batch, + plot, + channel_plot, + x_min, + x_max, + y_min, + y_max, + t_min, + t_max, + base_path="../data/", + training_type="autoregressive", +): + print( + f"Epochs = {epochs}, learning rate = {learning_rate}, scheduler step = {scheduler_step}, scheduler gamma = {scheduler_gamma}" + ) + ################################################################ # load data ################################################################ - + if single_file: # filename - model_name = flnm[:-5] + '_FNO' + model_name = flnm[:-5] + "_FNO" print("FNODatasetSingle") # Initialize the dataset and dataloader - train_data = FNODatasetSingle(flnm, - reduced_resolution=reduced_resolution, - reduced_resolution_t=reduced_resolution_t, - reduced_batch=reduced_batch, - initial_step=initial_step, - saved_folder = base_path - ) - val_data = FNODatasetSingle(flnm, - reduced_resolution=reduced_resolution, - reduced_resolution_t=reduced_resolution_t, - reduced_batch=reduced_batch, - initial_step=initial_step, - if_test=True, - saved_folder = base_path - ) - + train_data = FNODatasetSingle( + flnm, + reduced_resolution=reduced_resolution, + reduced_resolution_t=reduced_resolution_t, + reduced_batch=reduced_batch, + initial_step=initial_step, + saved_folder=base_path, + ) + val_data = FNODatasetSingle( + flnm, + reduced_resolution=reduced_resolution, + reduced_resolution_t=reduced_resolution_t, + reduced_batch=reduced_batch, + initial_step=initial_step, + if_test=True, + saved_folder=base_path, + ) + else: # filename - model_name = flnm + '_FNO' - + model_name = flnm + "_FNO" + print("FNODatasetMult") - train_data = FNODatasetMult(flnm, - reduced_resolution=reduced_resolution, - reduced_resolution_t=reduced_resolution_t, - reduced_batch=reduced_batch, - saved_folder = base_path - ) - val_data = FNODatasetMult(flnm, - reduced_resolution=reduced_resolution, - reduced_resolution_t=reduced_resolution_t, - reduced_batch=reduced_batch, - if_test=True, - saved_folder = base_path) - - train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, - num_workers=num_workers, shuffle=True) - val_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size, - num_workers=num_workers, shuffle=False) - + train_data = FNODatasetMult( + flnm, + reduced_resolution=reduced_resolution, + reduced_resolution_t=reduced_resolution_t, + reduced_batch=reduced_batch, + saved_folder=base_path, + ) + val_data = FNODatasetMult( + flnm, + reduced_resolution=reduced_resolution, + reduced_resolution_t=reduced_resolution_t, + reduced_batch=reduced_batch, + if_test=True, + saved_folder=base_path, + ) + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True + ) + val_loader = torch.utils.data.DataLoader( + val_data, batch_size=batch_size, num_workers=num_workers, shuffle=False + ) + ################################################################ # training and evaluation ################################################################ - + _, _data, _ = next(iter(val_loader)) dimensions = len(_data.shape) - print('Spatial Dimension', dimensions - 3) + print("Spatial Dimension", dimensions - 3) if dimensions == 4: - model = FNO1d(num_channels=num_channels, - width=width, - modes=modes, - initial_step=initial_step).to(device) + model = FNO1d( + num_channels=num_channels, + width=width, + modes=modes, + initial_step=initial_step, + ).to(device) elif dimensions == 5: - model = FNO2d(num_channels=num_channels, - width=width, - modes1=modes, - modes2=modes, - initial_step=initial_step).to(device) + model = FNO2d( + num_channels=num_channels, + width=width, + modes1=modes, + modes2=modes, + initial_step=initial_step, + ).to(device) elif dimensions == 6: - model = FNO3d(num_channels=num_channels, - width=width, - modes1=modes, - modes2=modes, - modes3=modes, - initial_step=initial_step).to(device) - + model = FNO3d( + num_channels=num_channels, + width=width, + modes1=modes, + modes2=modes, + modes3=modes, + initial_step=initial_step, + ).to(device) + # Set maximum time step of the data to train if t_train > _data.shape[-2]: t_train = _data.shape[-2] model_path = model_name + ".pt" - + total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) - print(f'Total parameters = {total_params}') - - optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4) - scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=scheduler_step, gamma=scheduler_gamma) - + print(f"Total parameters = {total_params}") + + optimizer = torch.optim.Adam( + model.parameters(), lr=learning_rate, weight_decay=1e-4 + ) + scheduler = torch.optim.lr_scheduler.StepLR( + optimizer, step_size=scheduler_step, gamma=scheduler_gamma + ) + loss_fn = nn.MSELoss(reduction="mean") loss_val_min = np.infty - + start_epoch = 0 if not if_training: checkpoint = torch.load(model_path, map_location=device) - model.load_state_dict(checkpoint['model_state_dict']) + model.load_state_dict(checkpoint["model_state_dict"]) model.to(device) model.eval() - Lx, Ly, Lz = 1., 1., 1. - errs = metrics(val_loader, model, Lx, Ly, Lz, plot, channel_plot, - model_name, x_min, x_max, y_min, y_max, - t_min, t_max, initial_step=initial_step) - pickle.dump(errs, open(model_name+'.pickle', "wb")) - + Lx, Ly, Lz = 1.0, 1.0, 1.0 + errs = metrics( + val_loader, + model, + Lx, + Ly, + Lz, + plot, + channel_plot, + model_name, + x_min, + x_max, + y_min, + y_max, + t_min, + t_max, + initial_step=initial_step, + ) + pickle.dump(errs, open(model_name + ".pickle", "wb")) + return # If desired, restore the network by loading the weights saved in the .pt # file if continue_training: - print('Restoring model (that is the network\'s weights) from file...') + print("Restoring model (that is the network's weights) from file...") checkpoint = torch.load(model_path, map_location=device) - model.load_state_dict(checkpoint['model_state_dict']) + model.load_state_dict(checkpoint["model_state_dict"]) model.to(device) model.train() - + # Load optimizer state dict - optimizer.load_state_dict(checkpoint['optimizer_state_dict']) + optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) for state in optimizer.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.to(device) - - start_epoch = checkpoint['epoch'] - loss_val_min = checkpoint['loss'] - + + start_epoch = checkpoint["epoch"] + loss_val_min = checkpoint["loss"] + for ep in range(start_epoch, epochs): model.train() t1 = default_timer() @@ -184,7 +214,7 @@ def run_training(if_training, train_l2_full = 0 for xx, yy, grid in train_loader: loss = 0 - + # xx: input tensor (first few time steps) [b, x1, ..., xd, t_init, v] # yy: target tensor [b, x1, ..., xd, t, v] # grid: meshgrid [b, x1, ..., xd, dims] @@ -199,16 +229,15 @@ def run_training(if_training, inp_shape = list(xx.shape) inp_shape = inp_shape[:-2] inp_shape.append(-1) - - if training_type in ['autoregressive']: + + if training_type in ["autoregressive"]: # Autoregressive loop for t in range(initial_step, t_train): - # Reshape input tensor into [b, x1, ..., xd, t_init*v] inp = xx.reshape(inp_shape) - + # Extract target at current time step - y = yy[..., t:t+1, :] + y = yy[..., t : t + 1, :] # Model run im = model(inp, grid) @@ -216,11 +245,11 @@ def run_training(if_training, # Loss calculation _batch = im.size(0) loss += loss_fn(im.reshape(_batch, -1), y.reshape(_batch, -1)) - + # Concatenate the prediction at current time step into the # prediction tensor pred = torch.cat((pred, im), -2) - + # Concatenate the prediction at the current time step to be used # as input for the next time step xx = torch.cat((xx[..., 1:, :], im), dim=-2) @@ -230,21 +259,21 @@ def run_training(if_training, _yy = yy[..., :t_train, :] # if t_train is not -1 l2_full = loss_fn(pred.reshape(_batch, -1), _yy.reshape(_batch, -1)) train_l2_full += l2_full.item() - + optimizer.zero_grad() loss.backward() optimizer.step() - if training_type in ['single']: - x = xx[..., 0 , :] - y = yy[..., t_train-1:t_train, :] + if training_type in ["single"]: + x = xx[..., 0, :] + y = yy[..., t_train - 1 : t_train, :] pred = model(x, grid) _batch = yy.size(0) loss += loss_fn(pred.reshape(_batch, -1), y.reshape(_batch, -1)) - + train_l2_step += loss.item() train_l2_full += loss.item() - + optimizer.zero_grad() loss.backward() optimizer.step() @@ -258,57 +287,65 @@ def run_training(if_training, xx = xx.to(device) yy = yy.to(device) grid = grid.to(device) - - if training_type in ['autoregressive']: + + if training_type in ["autoregressive"]: pred = yy[..., :initial_step, :] inp_shape = list(xx.shape) inp_shape = inp_shape[:-2] inp_shape.append(-1) - + for t in range(initial_step, yy.shape[-2]): inp = xx.reshape(inp_shape) - y = yy[..., t:t+1, :] + y = yy[..., t : t + 1, :] im = model(inp, grid) _batch = im.size(0) - loss += loss_fn(im.reshape(_batch, -1), y.reshape(_batch, -1)) + loss += loss_fn( + im.reshape(_batch, -1), y.reshape(_batch, -1) + ) pred = torch.cat((pred, im), -2) - + xx = torch.cat((xx[..., 1:, :], im), dim=-2) - + val_l2_step += loss.item() _batch = yy.size(0) _pred = pred[..., initial_step:t_train, :] _yy = yy[..., initial_step:t_train, :] - val_l2_full += loss_fn(_pred.reshape(_batch, -1), _yy.reshape(_batch, -1)).item() + val_l2_full += loss_fn( + _pred.reshape(_batch, -1), _yy.reshape(_batch, -1) + ).item() - if training_type in ['single']: - x = xx[..., 0 , :] - y = yy[..., t_train-1:t_train, :] + if training_type in ["single"]: + x = xx[..., 0, :] + y = yy[..., t_train - 1 : t_train, :] pred = model(x, grid) _batch = yy.size(0) loss += loss_fn(pred.reshape(_batch, -1), y.reshape(_batch, -1)) - + val_l2_step += loss.item() val_l2_full += loss.item() - - if val_l2_full < loss_val_min: + + if val_l2_full < loss_val_min: loss_val_min = val_l2_full - torch.save({ - 'epoch': ep, - 'model_state_dict': model.state_dict(), - 'optimizer_state_dict': optimizer.state_dict(), - 'loss': loss_val_min - }, model_path) - - + torch.save( + { + "epoch": ep, + "model_state_dict": model.state_dict(), + "optimizer_state_dict": optimizer.state_dict(), + "loss": loss_val_min, + }, + model_path, + ) + t2 = default_timer() scheduler.step() - print('epoch: {0}, loss: {1:.5f}, t2-t1: {2:.5f}, trainL2: {3:.5f}, testL2: {4:.5f}'\ - .format(ep, loss.item(), t2 - t1, train_l2_full, val_l2_full)) - + print( + "epoch: {0}, loss: {1:.5f}, t2-t1: {2:.5f}, trainL2: {3:.5f}, testL2: {4:.5f}".format( + ep, loss.item(), t2 - t1, train_l2_full, val_l2_full + ) + ) + + if __name__ == "__main__": - run_training() print("Done.") - diff --git a/pdebench/models/fno/utils.py b/pdebench/models/fno/utils.py index 03f45eb..01b507e 100644 --- a/pdebench/models/fno/utils.py +++ b/pdebench/models/fno/utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ @@ -147,29 +146,32 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ +from __future__ import annotations -import torch -from torch.utils.data import Dataset, IterableDataset -from torch.utils.data import DataLoader +import math as mt import os -import glob + import h5py import numpy as np -import math as mt +import torch +from torch.utils.data import Dataset + class FNODatasetSingle(Dataset): - def __init__(self, filename, - initial_step=10, - saved_folder='../data/', - reduced_resolution=1, - reduced_resolution_t=1, - reduced_batch=1, - if_test=False, - test_ratio=0.1, - num_samples_max = -1 - ): + def __init__( + self, + filename, + initial_step=10, + saved_folder="../data/", + reduced_resolution=1, + reduced_resolution_t=1, + reduced_batch=1, + if_test=False, + test_ratio=0.1, + num_samples_max=-1, + ): """ - + :param filename: filename that contains the dataset :type filename: STR :param filenum: array containing indices of filename included in the dataset @@ -178,120 +180,218 @@ def __init__(self, filename, :type initial_step: INT, optional """ - + # Define path to files root_path = os.path.join(os.path.abspath(saved_folder), filename) - if filename[-2:] != 'h5': - print(f".HDF5 file extension is assumed hereafter") - - with h5py.File(root_path, 'r') as f: + if filename[-2:] != "h5": + print(".HDF5 file extension is assumed hereafter") + + with h5py.File(root_path, "r") as f: keys = list(f.keys()) keys.sort() - if 'tensor' not in keys: - _data = np.array(f['density'], dtype=np.float32) # batch, time, x,... + if "tensor" not in keys: + _data = np.array( + f["density"], dtype=np.float32 + ) # batch, time, x,... idx_cfd = _data.shape - if len(idx_cfd)==3: # 1D - self.data = np.zeros([idx_cfd[0]//reduced_batch, - idx_cfd[2]//reduced_resolution, - mt.ceil(idx_cfd[1]/reduced_resolution_t), - 3], - dtype=np.float32) - #density - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution] + if len(idx_cfd) == 3: # 1D + self.data = np.zeros( + [ + idx_cfd[0] // reduced_batch, + idx_cfd[2] // reduced_resolution, + mt.ceil(idx_cfd[1] / reduced_resolution_t), + 3, + ], + dtype=np.float32, + ) + # density + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data[:, :, :], (0, 2, 1)) - self.data[...,0] = _data # batch, x, t, ch + self.data[..., 0] = _data # batch, x, t, ch # pressure - _data = np.array(f['pressure'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution] + _data = np.array( + f["pressure"], dtype=np.float32 + ) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data[:, :, :], (0, 2, 1)) - self.data[...,1] = _data # batch, x, t, ch + self.data[..., 1] = _data # batch, x, t, ch # Vx - _data = np.array(f['Vx'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution] + _data = np.array( + f["Vx"], dtype=np.float32 + ) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data[:, :, :], (0, 2, 1)) - self.data[...,2] = _data # batch, x, t, ch + self.data[..., 2] = _data # batch, x, t, ch self.grid = np.array(f["x-coordinate"], dtype=np.float32) - self.grid = torch.tensor(self.grid[::reduced_resolution], dtype=torch.float).unsqueeze(-1) + self.grid = torch.tensor( + self.grid[::reduced_resolution], dtype=torch.float + ).unsqueeze(-1) print(self.data.shape) - if len(idx_cfd)==4: # 2D - self.data = np.zeros([idx_cfd[0]//reduced_batch, - idx_cfd[2]//reduced_resolution, - idx_cfd[3]//reduced_resolution, - mt.ceil(idx_cfd[1]/reduced_resolution_t), - 4], - dtype=np.float32) + if len(idx_cfd) == 4: # 2D + self.data = np.zeros( + [ + idx_cfd[0] // reduced_batch, + idx_cfd[2] // reduced_resolution, + idx_cfd[3] // reduced_resolution, + mt.ceil(idx_cfd[1] / reduced_resolution_t), + 4, + ], + dtype=np.float32, + ) # density - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution] + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 1)) - self.data[...,0] = _data # batch, x, t, ch + self.data[..., 0] = _data # batch, x, t, ch # pressure - _data = np.array(f['pressure'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution] + _data = np.array( + f["pressure"], dtype=np.float32 + ) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 1)) - self.data[...,1] = _data # batch, x, t, ch + self.data[..., 1] = _data # batch, x, t, ch # Vx - _data = np.array(f['Vx'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution] + _data = np.array( + f["Vx"], dtype=np.float32 + ) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 1)) - self.data[...,2] = _data # batch, x, t, ch + self.data[..., 2] = _data # batch, x, t, ch # Vy - _data = np.array(f['Vy'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution] + _data = np.array( + f["Vy"], dtype=np.float32 + ) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 1)) - self.data[...,3] = _data # batch, x, t, ch + self.data[..., 3] = _data # batch, x, t, ch x = np.array(f["x-coordinate"], dtype=np.float32) y = np.array(f["y-coordinate"], dtype=np.float32) x = torch.tensor(x, dtype=torch.float) y = torch.tensor(y, dtype=torch.float) - X, Y = torch.meshgrid(x, y, indexing='ij') - self.grid = torch.stack((X, Y), axis=-1)[::reduced_resolution, ::reduced_resolution] - - if len(idx_cfd)==5: # 3D - self.data = np.zeros([idx_cfd[0]//reduced_batch, - idx_cfd[2]//reduced_resolution, - idx_cfd[3]//reduced_resolution, - idx_cfd[4]//reduced_resolution, - mt.ceil(idx_cfd[1]/reduced_resolution_t), - 5], - dtype=np.float32) + X, Y = torch.meshgrid(x, y, indexing="ij") + self.grid = torch.stack((X, Y), axis=-1)[ + ::reduced_resolution, ::reduced_resolution + ] + + if len(idx_cfd) == 5: # 3D + self.data = np.zeros( + [ + idx_cfd[0] // reduced_batch, + idx_cfd[2] // reduced_resolution, + idx_cfd[3] // reduced_resolution, + idx_cfd[4] // reduced_resolution, + mt.ceil(idx_cfd[1] / reduced_resolution_t), + 5, + ], + dtype=np.float32, + ) # density - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution,::reduced_resolution] + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 4, 1)) - self.data[...,0] = _data # batch, x, t, ch + self.data[..., 0] = _data # batch, x, t, ch # pressure - _data = np.array(f['pressure'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution,::reduced_resolution] + _data = np.array( + f["pressure"], dtype=np.float32 + ) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 4, 1)) - self.data[...,1] = _data # batch, x, t, ch + self.data[..., 1] = _data # batch, x, t, ch # Vx - _data = np.array(f['Vx'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution,::reduced_resolution] + _data = np.array( + f["Vx"], dtype=np.float32 + ) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 4, 1)) - self.data[...,2] = _data # batch, x, t, ch + self.data[..., 2] = _data # batch, x, t, ch # Vy - _data = np.array(f['Vy'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution,::reduced_resolution] + _data = np.array( + f["Vy"], dtype=np.float32 + ) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 4, 1)) - self.data[...,3] = _data # batch, x, t, ch + self.data[..., 3] = _data # batch, x, t, ch # Vz - _data = np.array(f['Vz'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution,::reduced_resolution] + _data = np.array( + f["Vz"], dtype=np.float32 + ) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 4, 1)) - self.data[...,4] = _data # batch, x, t, ch + self.data[..., 4] = _data # batch, x, t, ch x = np.array(f["x-coordinate"], dtype=np.float32) y = np.array(f["y-coordinate"], dtype=np.float32) @@ -299,33 +399,55 @@ def __init__(self, filename, x = torch.tensor(x, dtype=torch.float) y = torch.tensor(y, dtype=torch.float) z = torch.tensor(z, dtype=torch.float) - X, Y, Z = torch.meshgrid(x, y, z, indexing='ij') - self.grid = torch.stack((X, Y, Z), axis=-1)[::reduced_resolution,\ - ::reduced_resolution,\ - ::reduced_resolution] - + X, Y, Z = torch.meshgrid(x, y, z, indexing="ij") + self.grid = torch.stack((X, Y, Z), axis=-1)[ + ::reduced_resolution, + ::reduced_resolution, + ::reduced_resolution, + ] + else: # scalar equations ## data dim = [t, x1, ..., xd, v] - _data = np.array(f['tensor'], dtype=np.float32) # batch, time, x,... + _data = np.array( + f["tensor"], dtype=np.float32 + ) # batch, time, x,... if len(_data.shape) == 3: # 1D - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution] + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data[:, :, :], (0, 2, 1)) self.data = _data[:, :, :, None] # batch, x, t, ch self.grid = np.array(f["x-coordinate"], dtype=np.float32) - self.grid = torch.tensor(self.grid[::reduced_resolution], dtype=torch.float).unsqueeze(-1) + self.grid = torch.tensor( + self.grid[::reduced_resolution], dtype=torch.float + ).unsqueeze(-1) if len(_data.shape) == 4: # 2D Darcy flow # u: label - _data = _data[::reduced_batch,:,::reduced_resolution,::reduced_resolution] + _data = _data[ + ::reduced_batch, + :, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data[:, :, :, :], (0, 2, 3, 1)) - #if _data.shape[-1]==1: # if nt==1 + # if _data.shape[-1]==1: # if nt==1 # _data = np.tile(_data, (1, 1, 1, 2)) self.data = _data # nu: input - _data = np.array(f['nu'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch, None,::reduced_resolution,::reduced_resolution] + _data = np.array( + f["nu"], dtype=np.float32 + ) # batch, time, x,... + _data = _data[ + ::reduced_batch, + None, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data[:, :, :, :], (0, 2, 3, 1)) self.data = np.concatenate([_data, self.data], axis=-1) @@ -335,31 +457,52 @@ def __init__(self, filename, y = np.array(f["y-coordinate"], dtype=np.float32) x = torch.tensor(x, dtype=torch.float) y = torch.tensor(y, dtype=torch.float) - X, Y = torch.meshgrid(x, y, indexing='ij') - self.grid = torch.stack((X, Y), axis=-1)[::reduced_resolution, ::reduced_resolution] + X, Y = torch.meshgrid(x, y, indexing="ij") + self.grid = torch.stack((X, Y), axis=-1)[ + ::reduced_resolution, ::reduced_resolution + ] + + elif filename[-2:] == "h5": # SWE-2D (RDB) + print(".H5 file extension is assumed hereafter") - elif filename[-2:] == 'h5': # SWE-2D (RDB) - print(f".H5 file extension is assumed hereafter") - - with h5py.File(root_path, 'r') as f: + with h5py.File(root_path, "r") as f: keys = list(f.keys()) keys.sort() - data_arrays = [np.array(f[key]['data'], dtype=np.float32) for key in keys] - _data = torch.from_numpy(np.stack(data_arrays, axis=0)) # [batch, nt, nx, ny, nc] - _data = _data[::reduced_batch, ::reduced_resolution_t, ::reduced_resolution, ::reduced_resolution, ...] - _data = torch.permute(_data, (0, 2, 3, 1, 4)) # [batch, nx, ny, nt, nc] - gridx, gridy = np.array(f['0023']['grid']['x'], dtype=np.float32), np.array(f['0023']['grid']['y'], dtype=np.float32) - mgridX, mgridY = np.meshgrid(gridx, gridy, indexing='ij') - _grid = torch.stack((torch.from_numpy(mgridX), torch.from_numpy(mgridY)), axis=-1) + + data_arrays = [ + np.array(f[key]["data"], dtype=np.float32) for key in keys + ] + _data = torch.from_numpy( + np.stack(data_arrays, axis=0) + ) # [batch, nt, nx, ny, nc] + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ..., + ] + _data = torch.permute(_data, (0, 2, 3, 1, 4)) # [batch, nx, ny, nt, nc] + gridx, gridy = ( + np.array(f["0023"]["grid"]["x"], dtype=np.float32), + np.array(f["0023"]["grid"]["y"], dtype=np.float32), + ) + mgridX, mgridY = np.meshgrid(gridx, gridy, indexing="ij") + _grid = torch.stack( + (torch.from_numpy(mgridX), torch.from_numpy(mgridY)), axis=-1 + ) _grid = _grid[::reduced_resolution, ::reduced_resolution, ...] - _tsteps_t = torch.from_numpy(np.array(f['0023']['grid']['t'], dtype=np.float32)) + _tsteps_t = torch.from_numpy( + np.array(f["0023"]["grid"]["t"], dtype=np.float32) + ) + tsteps_t = _tsteps_t[::reduced_resolution_t] self.data = _data self.grid = _grid self.tsteps_t = tsteps_t - if num_samples_max>0: - num_samples_max = min(num_samples_max, self.data.shape[0]) + if num_samples_max > 0: + num_samples_max = min(num_samples_max, self.data.shape[0]) else: num_samples_max = self.data.shape[0] @@ -376,23 +519,25 @@ def __init__(self, filename, def __len__(self): return len(self.data) - + def __getitem__(self, idx): - - return self.data[idx,...,:self.initial_step,:], self.data[idx], self.grid + return self.data[idx, ..., : self.initial_step, :], self.data[idx], self.grid class FNODatasetMult(Dataset): - def __init__(self, filename, - initial_step=10, - saved_folder='../data/', - reduced_resolution=1, - reduced_resolution_t=1, - reduced_batch=1, - if_test=False, test_ratio=0.1 - ): + def __init__( + self, + filename, + initial_step=10, + saved_folder="../data/", + reduced_resolution=1, + reduced_resolution_t=1, + reduced_batch=1, + if_test=False, + test_ratio=0.1, + ): """ - + :param filename: filename that contains the dataset :type filename: STR :param filenum: array containing indices of filename included in the dataset @@ -401,64 +546,63 @@ def __init__(self, filename, :type initial_step: INT, optional """ - + # Define path to files self.file_path = os.path.abspath(saved_folder + filename + ".h5") - + # Extract list of seeds - with h5py.File(self.file_path, 'r') as h5_file: + with h5py.File(self.file_path, "r") as h5_file: data_list = sorted(h5_file.keys()) - test_idx = int(len(data_list) * (1-test_ratio)) + test_idx = int(len(data_list) * (1 - test_ratio)) if if_test: self.data_list = np.array(data_list[test_idx:]) else: self.data_list = np.array(data_list[:test_idx]) - + # Time steps used as initial conditions self.initial_step = initial_step def __len__(self): return len(self.data_list) - + def __getitem__(self, idx): - # Open file and read data - with h5py.File(self.file_path, 'r') as h5_file: + with h5py.File(self.file_path, "r") as h5_file: seed_group = h5_file[self.data_list[idx]] - + # data dim = [t, x1, ..., xd, v] - data = np.array(seed_group["data"], dtype='f') + data = np.array(seed_group["data"], dtype="f") data = torch.tensor(data, dtype=torch.float) - + # convert to [x1, ..., xd, t, v] - permute_idx = list(range(1,len(data.shape)-1)) + permute_idx = list(range(1, len(data.shape) - 1)) permute_idx.extend(list([0, -1])) data = data.permute(permute_idx) - + # Extract spatial dimension of data - dim = len(data.shape) - 2 - + dim = len(data.shape) - 2 + # x, y and z are 1-D arrays # Convert the spatial coordinates to meshgrid if dim == 1: - grid = np.array(seed_group["grid"]["x"], dtype='f') + grid = np.array(seed_group["grid"]["x"], dtype="f") grid = torch.tensor(grid, dtype=torch.float).unsqueeze(-1) elif dim == 2: - x = np.array(seed_group["grid"]["x"], dtype='f') - y = np.array(seed_group["grid"]["y"], dtype='f') + x = np.array(seed_group["grid"]["x"], dtype="f") + y = np.array(seed_group["grid"]["y"], dtype="f") x = torch.tensor(x, dtype=torch.float) y = torch.tensor(y, dtype=torch.float) - X, Y = torch.meshgrid(x, y, indexing='ij') - grid = torch.stack((X,Y),axis=-1) + X, Y = torch.meshgrid(x, y, indexing="ij") + grid = torch.stack((X, Y), axis=-1) elif dim == 3: - x = np.array(seed_group["grid"]["x"], dtype='f') - y = np.array(seed_group["grid"]["y"], dtype='f') - z = np.array(seed_group["grid"]["z"], dtype='f') + x = np.array(seed_group["grid"]["x"], dtype="f") + y = np.array(seed_group["grid"]["y"], dtype="f") + z = np.array(seed_group["grid"]["z"], dtype="f") x = torch.tensor(x, dtype=torch.float) y = torch.tensor(y, dtype=torch.float) z = torch.tensor(z, dtype=torch.float) - X, Y, Z = torch.meshgrid(x, y, z, indexing='ij') - grid = torch.stack((X,Y,Z),axis=-1) - - return data[...,:self.initial_step,:], data, grid + X, Y, Z = torch.meshgrid(x, y, z, indexing="ij") + grid = torch.stack((X, Y, Z), axis=-1) + + return data[..., : self.initial_step, :], data, grid diff --git a/pdebench/models/inverse/inverse.py b/pdebench/models/inverse/inverse.py index 44c4cff..bfe3e34 100644 --- a/pdebench/models/inverse/inverse.py +++ b/pdebench/models/inverse/inverse.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- """ File: inverse.py Authors: Francesco Alesiani (makoto.takamoto@neclab.eu) - Dan MacKinlay (Dan.MacKinlay@data61.csiro.au) + Dan MacKinlay (Dan.MacKinlay@data61.csiro.au) NEC Laboratories Europe GmbH, Copyright (c) , All rights reserved. @@ -145,49 +144,45 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ +from __future__ import annotations -import sys +import pyro +import pyro.distributions as dist import torch -import numpy as np -import pickle -import torch.nn as nn import torch.nn.functional as F - -import operator -from functools import reduce -from functools import partial - -import pyro +from numpy import prod from pyro.nn import PyroModule, PyroSample -import pyro.distributions as dist -from pyro.infer import MCMC, NUTS -from pyro import poutine +from torch import nn class ElementStandardScaler: - def fit(self, x): - self.mean = x.mean() - self.std = x.std(unbiased=False) - def transform(self, x): - eps = 1e-20 - x = x - self.mean - x = x/(self.std + eps) - return x - def fit_transform(self, x): - self.fit(x) - return self.transform(x) + def fit(self, x): + self.mean = x.mean() + self.std = x.std(unbiased=False) + + def transform(self, x): + eps = 1e-20 + x = x - self.mean + x = x / (self.std + eps) + return x + + def fit_transform(self, x): + self.fit(x) + return self.transform(x) + class ProbRasterLatent(PyroModule): def __init__( - self, - process_predictor: "nn.Module", - dims = (256,256), - latent_dims = (16,16), - interpolation = "bilinear", - prior_scale = 0.01, - obs_scale = 0.01, - prior_std = 0.01, - device=None): + self, + process_predictor: nn.Module, + dims=(256, 256), + latent_dims=(16, 16), + interpolation="bilinear", + prior_scale=0.01, + obs_scale=0.01, + prior_std=0.01, + device=None, + ): super().__init__() self.dims = dims self.device = device @@ -200,64 +195,50 @@ def __init__( self.obs_scale = torch.tensor(obs_scale, device=self.device, dtype=torch.float) self.process_predictor = process_predictor process_predictor.train(False) - ## Do not fit the process predictor weights + # Do not fit the process predictor weights for param in self.process_predictor.parameters(): param.requires_grad = False - _m,_s = torch.tensor([0], device=self.device, dtype=torch.float), torch.tensor([self.prior_std], device=self.device, dtype=torch.float) - self.latent = PyroSample(dist.Normal(_m,_s).expand(latent_dims).to_event(2)) - print(self.latent_dims,self.dims) + _m, _s = ( + torch.tensor([0], device=self.device, dtype=torch.float), + torch.tensor([self.prior_std], device=self.device, dtype=torch.float), + ) + self.latent = PyroSample(dist.Normal(_m, _s).expand(latent_dims).to_event(2)) + print(self.latent_dims, self.dims) def get_latent(self): - if self.latent_dims==self.dims: + if self.latent_dims == self.dims: return self.latent.unsqueeze(0) # `mini-batch x channels x [optional depth] x [optional height] x width`. - l = F.interpolate( + l = F.interpolate( self.latent.unsqueeze(1), self.dims, mode=self.interpolation, - align_corners=False - ).squeeze(0) #squeeze/unsqueeze is because of weird interpolate semantics + align_corners=False, + ).squeeze(0) # squeeze/unsqueeze is because of weird interpolate semantics return l - def latent2source(self,latent): - if latent.shape==self.dims: + def latent2source(self, latent): + if latent.shape == self.dims: return latent.unsqueeze(0) # `mini-batch x channels x [optional depth] x [optional height] x width`. - l = F.interpolate( - latent.unsqueeze(1), - self.dims, - mode=self.interpolation, - align_corners=False - ).squeeze(0) #squeeze/unsqueeze is because of weird interpolate semantics + l = F.interpolate( + latent.unsqueeze(1), self.dims, mode=self.interpolation, align_corners=False + ).squeeze(0) # squeeze/unsqueeze is because of weird interpolate semantics return l def forward(self, grid, y=None): - #overwrite process predictor batch with my own latent + # overwrite process predictor batch with my own latent x = self.get_latent() # print("forward:x.shape,grid.shape=",x.shape,grid.shape) - mean = self.process_predictor(x.to(self.device),grid.to(self.device)) - o = pyro.sample( - "obs", dist.Normal(mean, self.obs_scale).to_event(2), - obs=y) - return o - - -import sys -import torch -import numpy as np -import pickle -import torch.nn as nn -import torch.nn.functional as F + mean = self.process_predictor(x.to(self.device), grid.to(self.device)) + o = pyro.sample("obs", dist.Normal(mean, self.obs_scale).to_event(2), obs=y) + return o -import operator -from functools import reduce -from functools import partial -from numpy import prod class InitialConditionInterp(nn.Module): """ InitialConditionInterp - Class for the inital conditions using interpoliation. Works for 1d,2d and 3d + Class for the initial conditions using interpoliation. Works for 1d,2d and 3d model_ic = InitialConditionInterp([16],[8]) model_ic = InitialConditionInterp([16,16],[8,8]) @@ -265,30 +246,31 @@ class InitialConditionInterp(nn.Module): June 2022, F.Alesiani """ + def __init__(self, dims, hidden_dim): super(InitialConditionInterp, self).__init__() self.spatial_dim = len(hidden_dim) - self.dims = [1]+dims if len(dims)==1 else dims + self.dims = [1] + dims if len(dims) == 1 else dims # self.dims = [1,1,1]+dims - self.hidden_dim = [1]+hidden_dim if len(hidden_dim)==1 else hidden_dim - self.interpolation = "bilinear" if len(hidden_dim)<3 else "trilinear" - self.scale = (1 / prod(hidden_dim)) - self.latent = nn.Parameter(self.scale * torch.rand(1, 1, *self.hidden_dim, dtype=torch.float)) + self.hidden_dim = [1] + hidden_dim if len(hidden_dim) == 1 else hidden_dim + self.interpolation = "bilinear" if len(hidden_dim) < 3 else "trilinear" + self.scale = 1 / prod(hidden_dim) + self.latent = nn.Parameter( + self.scale * torch.rand(1, 1, *self.hidden_dim, dtype=torch.float) + ) # print(self.latent.shape) - def latent2source(self,latent): - if latent.shape[2:]==self.dims: + def latent2source(self, latent): + if latent.shape[2:] == self.dims: return latent # `mini-batch x channels x [optional depth] x [optional height] x width`. - l = F.interpolate( - latent, - self.dims, - mode=self.interpolation, - align_corners=False - ) + l = F.interpolate( + latent, self.dims, mode=self.interpolation, align_corners=False + ) return l.view(self.dims) + def forward(self): x = self.latent2source(self.latent) if self.spatial_dim == 1: - x = x.squeeze(0) - return x \ No newline at end of file + x = x.squeeze(0) + return x diff --git a/pdebench/models/inverse/train.py b/pdebench/models/inverse/train.py index 30a2f0f..fa4b110 100644 --- a/pdebench/models/inverse/train.py +++ b/pdebench/models/inverse/train.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- """ File: inverse.py Authors: Francesco Alesiani (makoto.takamoto@neclab.eu) - Dan MacKinlay (Dan.MacKinlay@data61.csiro.au) + Dan MacKinlay (Dan.MacKinlay@data61.csiro.au) NEC Laboratories Europe GmbH, Copyright (c) , All rights reserved. @@ -145,266 +144,310 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ +from __future__ import annotations -import sys -import torch -import numpy as np -import pickle -import torch.nn as nn -import torch.nn.functional as F - -import operator -from functools import reduce -from functools import partial - -import pyro -from pyro.nn import PyroModule, PyroSample -import pyro.distributions as dist -from pyro.infer import MCMC, NUTS -from pyro import poutine - +import logging from timeit import default_timer - -import sys, os import hydra +import pandas as pd +import torch from omegaconf import DictConfig -from omegaconf import OmegaConf -from omegaconf import open_dict - - -import pdebench as pde -from pdebench.models.fno.fno import FNO1d,FNO2d,FNO3d -from pdebench.models.fno.utils import FNODatasetSingle, FNODatasetMult - +from pdebench.models.fno.fno import FNO1d, FNO2d, FNO3d +from pdebench.models.fno.utils import FNODatasetSingle +from pdebench.models.inverse.inverse import ( + ElementStandardScaler, + InitialConditionInterp, + ProbRasterLatent, +) +from pdebench.models.metrics import inverse_metrics from pdebench.models.unet.unet import UNet1d, UNet2d, UNet3d -from pdebench.models.unet.utils import UNetDatasetSingle,UNetDatasetMult - -from pdebench.models import metrics -from pdebench.models.metrics import LpLoss,FftLpLoss,FftMseLoss,inverse_metrics -import pandas as pd - +from pdebench.models.unet.utils import UNetDatasetSingle +from pyro.infer import MCMC, NUTS +from torch import nn +from tqdm import tqdm -from pdebench.models.inverse.inverse import ProbRasterLatent, ElementStandardScaler, InitialConditionInterp -from pdebench.models.inverse.utils import plot_ic_solution_mcmc +logging.basicConfig(level=logging.INFO, filename=__name__) +logging.root.setLevel(logging.INFO) -from torch.distributions.normal import Normal -from tqdm import tqdm +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -def load_model(model,model_path, device): +def load_model(model, model_path, device): checkpoint = torch.load(model_path, map_location=device) - model.load_state_dict(checkpoint['model_state_dict']) + model.load_state_dict(checkpoint["model_state_dict"]) model.to(device) model.eval() return model -@hydra.main(config_path='../config', config_name='config') +@hydra.main(config_path="../config", config_name="config") def main(cfg: DictConfig): - print(cfg.args.filename) - print(cfg.args) - - # we use the test data - if cfg.args.model_name in ['FNO']: - inverse_data = FNODatasetSingle(cfg.args.filename, - saved_folder = cfg.args.base_path, - reduced_resolution=cfg.args.reduced_resolution, - reduced_resolution_t=cfg.args.reduced_resolution_t, - reduced_batch=cfg.args.reduced_batch, - initial_step=cfg.args.initial_step, - if_test=True, - num_samples_max = cfg.args.num_samples_max - ) - - _data, _, _ = next(iter(inverse_loader)) + logging.info(cfg.args.filename) + logging.info(cfg.args) + + # we use the test data + if cfg.args.model_name in ["FNO"]: + inverse_data = FNODatasetSingle( + cfg.args.filename, + saved_folder=cfg.args.base_path, + reduced_resolution=cfg.args.reduced_resolution, + reduced_resolution_t=cfg.args.reduced_resolution_t, + reduced_batch=cfg.args.reduced_batch, + initial_step=cfg.args.initial_step, + if_test=True, + num_samples_max=cfg.args.num_samples_max, + ) + + _data, _, _ = next(iter(inverse_data)) dimensions = len(_data.shape) spatial_dim = dimensions - 3 - if cfg.args.model_name in ['UNET','Unet']: - inverse_data = UNetDatasetSingle(cfg.args.filename, - saved_folder = cfg.args.base_path, - reduced_resolution=cfg.args.reduced_resolution, - reduced_resolution_t=cfg.args.reduced_resolution_t, - reduced_batch=cfg.args.reduced_batch, - initial_step=cfg.args.initial_step, - if_test=True, - num_samples_max = cfg.args.num_samples_max) - - inverse_loader = torch.utils.data.DataLoader(inverse_data, batch_size=1,shuffle=False) - _data, _ = next(iter(inverse_loader)) + if cfg.args.model_name in ["UNET", "Unet"]: + inverse_data = UNetDatasetSingle( + cfg.args.filename, + saved_folder=cfg.args.base_path, + reduced_resolution=cfg.args.reduced_resolution, + reduced_resolution_t=cfg.args.reduced_resolution_t, + reduced_batch=cfg.args.reduced_batch, + initial_step=cfg.args.initial_step, + if_test=True, + num_samples_max=cfg.args.num_samples_max, + ) + + inverse_loader = torch.utils.data.DataLoader( + inverse_data, batch_size=1, shuffle=False + ) + _data, _ = next(iter(inverse_loader)) dimensions = len(_data.shape) spatial_dim = dimensions - 3 - initial_step = cfg.args.initial_step t_train = cfg.args.t_train - - model_name = cfg.args.filename[:-5] + '_' + cfg.args.model_name + + model_name = cfg.args.filename[:-5] + "_" + cfg.args.model_name model_path = cfg.args.base_path + model_name + ".pt" - if cfg.args.model_name in ['FNO']: + if cfg.args.model_name in ["FNO"]: if dimensions == 4: - print(cfg.args.num_channels) - model = FNO1d(num_channels=cfg.args.num_channels, - width=cfg.args.width, - modes=cfg.args.modes, - initial_step=cfg.args.initial_step).to(device) + logging.info(cfg.args.num_channels) + model = FNO1d( + num_channels=cfg.args.num_channels, + width=cfg.args.width, + modes=cfg.args.modes, + initial_step=cfg.args.initial_step, + ).to(device) if dimensions == 5: - model = FNO2d(num_channels=cfg.args.num_channels, - width=cfg.args.width, - modes1=cfg.args.modes, - modes2=cfg.args.modes, - initial_step=cfg.args.initial_step).to(device) + model = FNO2d( + num_channels=cfg.args.num_channels, + width=cfg.args.width, + modes1=cfg.args.modes, + modes2=cfg.args.modes, + initial_step=cfg.args.initial_step, + ).to(device) if dimensions == 6: - model = FNO3d(num_channels=cfg.args.num_channels, - width=cfg.args.width, - modes1=cfg.args.modes, - modes2=cfg.args.modes, - modes3=cfg.args.modes, - initial_step=cfg.args.initial_step).to(device) - - if cfg.args.model_name in ['UNET','Unet']: + model = FNO3d( + num_channels=cfg.args.num_channels, + width=cfg.args.width, + modes1=cfg.args.modes, + modes2=cfg.args.modes, + modes3=cfg.args.modes, + initial_step=cfg.args.initial_step, + ).to(device) + + if cfg.args.model_name in ["UNET", "Unet"]: if dimensions == 4: model = UNet1d(cfg.args.in_channels, cfg.args.out_channels).to(device) elif dimensions == 5: model = UNet2d(cfg.args.in_channels, cfg.args.out_channels).to(device) elif dimensions == 6: - model = UNet3d(cfg.args.in_channels, cfg.args.out_channels).to(device) + model = UNet3d(cfg.args.in_channels, cfg.args.out_channels).to(device) - model = load_model(model,model_path, device) + model = load_model(model, model_path, device) model.eval() - if cfg.args.inverse_model_type in ['ProbRasterLatent']: - assert(spatial_dim==1), "give me time" - if spatial_dim==1: - ns,nx,nt,nc = _data.shape + if cfg.args.inverse_model_type in ["ProbRasterLatent"]: + assert spatial_dim == 1, "give me time" + if spatial_dim == 1: + ns, nx, nt, nc = _data.shape model_inverse = ProbRasterLatent( model.to(device), - dims=[nx,1], - latent_dims = [1,cfg.args.in_channels_hid,1], - prior_scale = 0.1, - obs_scale = 0.01, - prior_std = 0.01, - device=device - ) - - if cfg.args.inverse_model_type in ['InitialConditionInterp']: + dims=[nx, 1], + latent_dims=[1, cfg.args.in_channels_hid, 1], + prior_scale=0.1, + obs_scale=0.01, + prior_std=0.01, + device=device, + ) + + if cfg.args.inverse_model_type in ["InitialConditionInterp"]: loss_fn = nn.MSELoss(reduction="mean") - input_dims = list(_data.shape[1:1+spatial_dim]) - latent_dims = len(input_dims)*[cfg.args.in_channels_hid] - if cfg.args.num_channels> 1: - input_dims=input_dims+[cfg.args.num_channels] - latent_dims=latent_dims+[cfg.args.num_channels] - print(input_dims,latent_dims) - model_ic = InitialConditionInterp(input_dims,latent_dims).to(device) - model.to(device) + input_dims = list(_data.shape[1 : 1 + spatial_dim]) + latent_dims = len(input_dims) * [cfg.args.in_channels_hid] + if cfg.args.num_channels > 1: + input_dims = [*input_dims, cfg.args.num_channels] + latent_dims = [*latent_dims, cfg.args.num_channels] + model_ic = InitialConditionInterp(input_dims, latent_dims).to(device) + model.to(device) scaler = ElementStandardScaler() loss_fn = nn.MSELoss(reduction="mean") - inverse_u0_l2_full,inverse_y_l2_full = 0,0 + inverse_u0_l2_full, inverse_y_l2_full = 0, 0 all_metric = [] t1 = default_timer() - for ks,sample in enumerate(inverse_loader): - if cfg.args.model_name in ['FNO']: + for ks, sample in enumerate(inverse_loader): + if cfg.args.model_name in ["FNO"]: (xx, yy, grid) = sample xx = xx.to(device) yy = yy.to(device) grid = grid.to(device) - model_ = lambda x, grid: model(x,grid) - if cfg.args.model_name in ['UNET','Unet']: + def model_(x, grid): + return model(x, grid) + + if cfg.args.model_name in ["UNET", "Unet"]: (xx, yy) = sample grid = None xx = xx.to(device) yy = yy.to(device) - model_ = lambda x, grid: model(x.permute([0, 2, 1])).permute([0, 2, 1]) - num_samples = ks + 1 - loss = 0 + def model_(x, grid): + return model(x.permute([0, 2, 1])).permute([0, 2, 1]) + num_samples = ks + 1 - x = xx[..., 0 , :] - y = yy[..., t_train:t_train+1 , :] + x = xx[..., 0, :] + y = yy[..., t_train : t_train + 1, :] - if ks==0: - print(x.shape,y.shape) + if ks == 0: + msg = f"{x.shape}, {y.shape}" + logging.info(msg) - #scale the input and output + # scale the input and output x = scaler.fit_transform(x) y = scaler.transform(y) - if cfg.args.inverse_model_type in ['ProbRasterLatent']: - #Create model + if cfg.args.inverse_model_type in ["ProbRasterLatent"]: + # Create model model_inverse.to(device) - nuts_kernel = NUTS(model_inverse, full_mass=False, max_tree_depth=5, jit_compile=True) # high performacne config - - mcmc = MCMC(nuts_kernel, num_samples=cfg.args.mcmc_num_samples, warmup_steps=cfg.args.mcmc_warmup_steps, num_chains=cfg.args.mcmc_num_chains,disable_progbar=True) + nuts_kernel = NUTS( + model_inverse, full_mass=False, max_tree_depth=5, jit_compile=True + ) # high performacne config + + mcmc = MCMC( + nuts_kernel, + num_samples=cfg.args.mcmc_num_samples, + warmup_steps=cfg.args.mcmc_warmup_steps, + num_chains=cfg.args.mcmc_num_chains, + disable_progbar=True, + ) mcmc.run(grid, y) - mc_samples = {k: v.detach().cpu().numpy() for k, v in mcmc.get_samples().items()} + mc_samples = { + k: v.detach().cpu().numpy() for k, v in mcmc.get_samples().items() + } # get the initial solution - latent = torch.tensor(mc_samples['latent']) + latent = torch.tensor(mc_samples["latent"]) u0 = model_inverse.latent2source(latent[0]).to(device) pred_u0 = model(u0, grid) - if cfg.args.inverse_model_type in ['InitialConditionInterp']: - optimizer = torch.optim.Adam(model_ic.parameters(), lr=cfg.args.inverse_learning_rate, weight_decay=1e-4) + if cfg.args.inverse_model_type in ["InitialConditionInterp"]: + optimizer = torch.optim.Adam( + model_ic.parameters(), + lr=cfg.args.inverse_learning_rate, + weight_decay=1e-4, + ) # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=scheduler_step, gamma=scheduler_gamma) if cfg.args.inverse_verbose_flag: _iter = tqdm(range(cfg.args.inverse_epochs)) else: _iter = range(cfg.args.inverse_epochs) - for epoch in _iter: - if cfg.args.num_channels>1: + for _ in _iter: + if cfg.args.num_channels > 1: u0 = model_ic().unsqueeze(0) else: u0 = model_ic().unsqueeze(0).unsqueeze(-1) - - pred_u0 = model_(u0,grid) - - loss_u0 = loss_fn(pred_u0,y) + + pred_u0 = model_(u0, grid) + + loss_u0 = loss_fn(pred_u0, y) optimizer.zero_grad() loss_u0.backward() optimizer.step() t2 = default_timer() if cfg.args.inverse_verbose_flag: - _iter.set_description(f"loss={loss_u0.item()}, t2-t1= {t2-t1}") + _iter.set_description(f"loss={loss_u0.item()}, t2-t1= {t2-t1}") - #compute losses + # compute losses loss_u0 = loss_fn(u0.reshape(1, -1), x.reshape(1, -1)).item() loss_y = loss_fn(pred_u0.reshape(1, -1), y.reshape(1, -1)).item() inverse_u0_l2_full += loss_u0 inverse_y_l2_full += loss_y - metric = inverse_metrics(u0,x,pred_u0,y) - metric['sample'] = ks + metric = inverse_metrics(u0, x, pred_u0, y) + metric["sample"] = ks + + all_metric += [metric] - all_metric+=[metric] - t2 = default_timer() - print('samples: {}, loss_u0: {:.5f},loss_y: {:.5f}, t2-t1: {:.5f}, mse_inverse_u0_L2: {:.5f}, mse_inverse_y_L2: {:.5f}'\ - .format(ks+1, loss_u0, loss_y, t2 - t1, inverse_u0_l2_full/num_samples, inverse_y_l2_full/num_samples)) + msg = ", ".join( + [ + f"samples: {ks + 1}", + f"loss_u0: {loss_u0:.5f}", + f"loss_y: {loss_y:.5f}", + f"t2-t1: {t2 - t1:.5f}", + f"mse_inverse_u0_L2: {inverse_u0_l2_full / num_samples:.5f}", + f"mse_inverse_y_L2: {inverse_y_l2_full / num_samples:.5f}", + ] + ) + logging.info(msg) df_metric = pd.DataFrame(all_metric) - inverse_metric_filename = cfg.args.base_path + cfg.args.filename[:-5] + '_' + cfg.args.model_name +'_'+cfg.args.inverse_model_type + ".csv" - print("saving in :", inverse_metric_filename) + inverse_metric_filename = ( + cfg.args.base_path + + cfg.args.filename[:-5] + + "_" + + cfg.args.model_name + + "_" + + cfg.args.inverse_model_type + + ".csv" + ) + msg = f"saving in : {inverse_metric_filename}" + logging.info(msg) df_metric.to_csv(inverse_metric_filename) - inverse_metric_filename = cfg.args.base_path + cfg.args.filename[:-5] + '_' + cfg.args.model_name +'_'+cfg.args.inverse_model_type+ ".pickle" - print("saving in :", inverse_metric_filename) + inverse_metric_filename = ( + cfg.args.base_path + + cfg.args.filename[:-5] + + "_" + + cfg.args.model_name + + "_" + + cfg.args.inverse_model_type + + ".pickle" + ) + msg = f"saving in : {inverse_metric_filename}" + logging.info(msg) df_metric.to_pickle(inverse_metric_filename) - inverse_metric_filename = cfg.args.base_path + cfg.args.filename[:-5] + '_' + cfg.args.model_name +'_'+cfg.args.inverse_model_type+ "_stats.csv" - print("saving in :", inverse_metric_filename) + inverse_metric_filename = ( + cfg.args.base_path + + cfg.args.filename[:-5] + + "_" + + cfg.args.model_name + + "_" + + cfg.args.inverse_model_type + + "_stats.csv" + ) + msg = f"saving in : {inverse_metric_filename}" + logging.info(msg) df_metric = df_metric.describe() df_metric.to_csv(inverse_metric_filename) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/pdebench/models/inverse/utils.py b/pdebench/models/inverse/utils.py index 7e4f0ce..2e3a74d 100644 --- a/pdebench/models/inverse/utils.py +++ b/pdebench/models/inverse/utils.py @@ -1,10 +1,9 @@ -# -*- coding: utf-8 -*- """ File: utils.py Authors: Francesco Alesiani (makoto.takamoto@neclab.eu) - Dan MacKinlay (Dan.MacKinlay@data61.csiro.au) + Dan MacKinlay (Dan.MacKinlay@data61.csiro.au) NEC Laboratories Europe GmbH, Copyright (c) , All rights reserved. @@ -145,73 +144,93 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ +from __future__ import annotations +import logging +import hydra import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +from omegaconf import DictConfig +from scipy.signal import welch + +logging.basicConfig(level=logging.INFO, filename=__name__) +logging.root.setLevel(logging.INFO) -def plot_ic_solution_mcmc(latent,x,y,grid,model_inverse,model,device,fname_save="IC_inverse_problem_mcmc.pdf"): + +def plot_ic_solution_mcmc( + latent, + x, + y, + grid, + model_inverse, + model, + device, + fname_save="IC_inverse_problem_mcmc.pdf", +): """ Plots the prediction of the initial condition estimated using MCMC from the latent with the model "model" y = model(x) - y[i] = model(latent[i]), i =0, ... + y[i] = model(latent[i]), i =0, ... June 2022, F.Alesiani - """ - fig, axes = plt.subplots(1,2,figsize=(15,7)) - ax = axes[0] + """ + fig, axes = plt.subplots(1, 2, figsize=(15, 7)) + ax = axes[0] u0 = model_inverse.latent2source(latent[0]).to(device) pred_u0 = model(u0, grid) - ax.plot(u0.detach().cpu().flatten(),'r',label="Predicted Initial Condition") + ax.plot(u0.detach().cpu().flatten(), "r", label="Predicted Initial Condition") for _latent in latent: u0 = model_inverse.latent2source(_latent).to(device) - ax.plot(u0.detach().cpu().flatten(),'r',alpha=0.1) - ax.plot(x.detach().cpu().flatten(),'b--',label="True Initial Condition") + ax.plot(u0.detach().cpu().flatten(), "r", alpha=0.1) + ax.plot(x.detach().cpu().flatten(), "b--", label="True Initial Condition") ax.legend() # plt.show() - ax = axes[1] - ax.plot(pred_u0.detach().cpu().flatten(),'r',label="Predicted forward value") - ax.plot(y.detach().cpu().flatten(),'b--',label="True forward value") + ax = axes[1] + ax.plot(pred_u0.detach().cpu().flatten(), "r", label="Predicted forward value") + ax.plot(y.detach().cpu().flatten(), "b--", label="True forward value") for _latent in latent: u0 = model_inverse.latent2source(_latent).to(device) pred_u0 = model(u0, grid) - ax.plot(pred_u0.detach().cpu().flatten(),'r',alpha=0.1) + ax.plot(pred_u0.detach().cpu().flatten(), "r", alpha=0.1) ax.legend() if fname_save: - plt.savefig(fname_save, bbox_inches='tight') - + plt.savefig(fname_save, bbox_inches="tight") -def plot_ic_solution_grad(model_ic,x,y,grid,model,device,fname_save="IC_inverse_problem_grad.pdf"): +def plot_ic_solution_grad( + model_ic, x, y, grid, model, device, fname_save="IC_inverse_problem_grad.pdf" +): """ Plots the prediction of the initial condition estimated using model_ic with the model "model" y = model(x) y' = model(model_ic()) June 2022, F.Alesiani - """ + """ - fig, axes = plt.subplots(1,2,figsize=(15,7)) - ax = axes[0] + fig, axes = plt.subplots(1, 2, figsize=(15, 7)) + ax = axes[0] u0 = model_ic().to(device).unsqueeze(0).unsqueeze(-1) pred_u0 = model(u0, grid) - ax.plot(u0.detach().cpu().flatten(),'r',label="Predicted Initial Condition") - ax.plot(x.detach().cpu().flatten(),'b--',label="True Initial Condition") + ax.plot(u0.detach().cpu().flatten(), "r", label="Predicted Initial Condition") + ax.plot(x.detach().cpu().flatten(), "b--", label="True Initial Condition") ax.legend() # plt.show() - ax = axes[1] - ax.plot(pred_u0.detach().cpu().flatten(),'r',label="Predicted forward value") - ax.plot(y.detach().cpu().flatten(),'b--',label="True forward value") + ax = axes[1] + ax.plot(pred_u0.detach().cpu().flatten(), "r", label="Predicted forward value") + ax.plot(y.detach().cpu().flatten(), "b--", label="True forward value") ax.legend() if fname_save: - plt.savefig(fname_save, bbox_inches='tight') + plt.savefig(fname_save, bbox_inches="tight") -from scipy.signal import welch -import matplotlib.pyplot as plt - -def plot_ic_solution_grad_psd(model_ic,x,y,grid,model,device,fname_save="IC_inverse_problem_grad_psd.pdf"): +def plot_ic_solution_grad_psd( + model_ic, x, y, grid, model, device, fname_save="IC_inverse_problem_grad_psd.pdf" +): """ Plots the prediction of the initial condition estimated using model_ic with the model "model" y = model(x) @@ -219,19 +238,19 @@ def plot_ic_solution_grad_psd(model_ic,x,y,grid,model,device,fname_save="IC_inve It also shows the power density June 2022, F.Alesiani - """ - fig, axes = plt.subplots(1,3,figsize=(22,7)) - ax = axes[0] + """ + fig, axes = plt.subplots(1, 3, figsize=(22, 7)) + ax = axes[0] u0 = model_ic().to(device).unsqueeze(0).unsqueeze(-1) pred_u0 = model(u0, grid) - ax.plot(u0.detach().cpu().flatten(),'r',label="Predicted Initial Condition") - ax.plot(x.detach().cpu().flatten(),'b--',label="True Initial Condition") + ax.plot(u0.detach().cpu().flatten(), "r", label="Predicted Initial Condition") + ax.plot(x.detach().cpu().flatten(), "b--", label="True Initial Condition") ax.legend() # plt.show() - ax = axes[1] - ax.plot(pred_u0.detach().cpu().flatten(),'r',label="Predicted forward value") - ax.plot(y.detach().cpu().flatten(),'b--',label="True forward value") + ax = axes[1] + ax.plot(pred_u0.detach().cpu().flatten(), "r", label="Predicted forward value") + ax.plot(y.detach().cpu().flatten(), "b--", label="True forward value") ax.legend() _u0 = u0.detach().cpu().flatten() @@ -239,76 +258,90 @@ def plot_ic_solution_grad_psd(model_ic,x,y,grid,model,device,fname_save="IC_inve fz = u0.shape[1] - fu,puu = welch(_u0,fz) - fx,pxx = welch(_x,fz) + fu, puu = welch(_u0, fz) + fx, pxx = welch(_x, fz) - ax = axes[2] - ax.semilogy(fu,puu,'r',label="predicted u0") - ax.semilogy(fx,pxx,'b--',label="x true") - ax.set_xlabel('spatial frequency') - ax.set_ylabel('PSD') + ax = axes[2] + ax.semilogy(fu, puu, "r", label="predicted u0") + ax.semilogy(fx, pxx, "b--", label="x true") + ax.set_xlabel("spatial frequency") + ax.set_ylabel("PSD") ax.legend() if fname_save: - plt.savefig(fname_save, bbox_inches='tight') - + plt.savefig(fname_save, bbox_inches="tight") -import sys, os -import hydra -from omegaconf import DictConfig -from omegaconf import OmegaConf -from omegaconf import open_dict -import pandas as pd -import numpy as np - - -def get_metric_name(filename,model_name, base_path,inverse_model_type): +def get_metric_name(filename, model_name, base_path, inverse_model_type): """ returns the name convention for the result file June 2022, F.Alesiani """ - inverse_metric_filename = base_path + filename[:-5] + '_' + model_name +'_'+ inverse_model_type + ".pickle" - return inverse_metric_filename - -def read_results(model_names,inverse_model_type, base_path, filenames,shortfilenames, verbose=False): + return ( + base_path + + filename[:-5] + + "_" + + model_name + + "_" + + inverse_model_type + + ".pickle" + ) + + +def read_results( + model_names, inverse_model_type, base_path, filenames, shortfilenames, verbose=False +): """ - reads and merges the result files. + reads and merges the result files. Shortnames are used for the name of the dataset as alternative to the file name. June 2022, F.Alesiani """ dfs = [] for model_name in model_names: - for filename,shortfilename in zip(filenames,shortfilenames): + for filename, shortfilename in zip(filenames, shortfilenames): # print(filename) - inverse_metric_filename = get_metric_name(filename,model_name, base_path,inverse_model_type) - if verbose: print ("reading resul file: ",inverse_metric_filename) - df = pd.read_pickle(inverse_metric_filename) - df['model'] = model_name - df['pde'] = shortfilename - dfs+=[df] - keys = ['pde','model'] - df = pd.concat(dfs,axis=0) - return df, keys - -@hydra.main(config_path='../config', config_name='results') + inverse_metric_filename = get_metric_name( + filename, model_name, base_path, inverse_model_type + ) + if verbose: + msg = f"reading result file: {inverse_metric_filename}" + logging.info(msg) + + dframe = pd.read_pickle(inverse_metric_filename) + dframe["model"] = model_name + dframe["pde"] = shortfilename + dfs += [dframe] + keys = ["pde", "model"] + dframe = pd.concat(dfs, axis=0) + return dframe, keys + + +@hydra.main(config_path="../config", config_name="results") def process_results(cfg: DictConfig): """ - reads and merges the result files and aggregate the results with the selected values. The results are aggregated by datafile. + reads and merges the result files and aggregate the results with the selected values. The results are aggregated by datafile. June 2022, F.Alesiani - """ - print(cfg.args) - - df, keys = read_results(cfg.args.model_names,cfg.args.inverse_model_type, cfg.args.base_path, cfg.args.filenames, cfg.args.shortfilenames) + """ + logging.info(cfg.args) + + df, keys = read_results( + cfg.args.model_names, + cfg.args.inverse_model_type, + cfg.args.base_path, + cfg.args.filenames, + cfg.args.shortfilenames, + ) df1p3 = df[keys + list(cfg.args.results_values)] - df2p3 = df1p3.groupby(by=keys).agg([np.mean,np.std]).reset_index() - print("saving results into: ", cfg.args.base_path + cfg.args.result_filename) - df2p3.to_csv(cfg.args.base_path + cfg.args.result_filename) + df2p3 = df1p3.groupby(by=keys).agg([np.mean, np.std]).reset_index() + msg = "saving results into: {cfg.args.base_path + cfg.args.result_filename}" + logging.info(msg) + df2p3.to_csv(cfg.args.base_path + cfg.args.result_filename) if __name__ == "__main__": process_results() - print("Done.") \ No newline at end of file + msg = "Done." + logging.info(msg) diff --git a/pdebench/models/metrics.py b/pdebench/models/metrics.py index 11697c8..913faab 100644 --- a/pdebench/models/metrics.py +++ b/pdebench/models/metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ @@ -145,16 +144,20 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ +from __future__ import annotations -import torch -import numpy as np import math as mt + import matplotlib.pyplot as plt +import numpy as np +import torch from mpl_toolkits.axes_grid1 import make_axes_locatable +from torch import nn + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -def metric_func(pred, target, if_mean=True, Lx=1., Ly=1., Lz=1., iLow=4, iHigh=12): +def metric_func(pred, target, if_mean=True, Lx=1.0, Ly=1.0, Lz=1.0, iLow=4, iHigh=12): """ code for calculate metrics discussed in the Brain-storming session RMSE, normalized RMSE, max error, RMSE at the boundaries, conserved variables, RMSE in Fourier space, temporal sensitivity @@ -175,14 +178,25 @@ def metric_func(pred, target, if_mean=True, Lx=1., Ly=1., Lz=1., iLow=4, iHigh=1 nb, nc, nt = idxs[0], idxs[1], idxs[-1] # RMSE - err_mean = torch.sqrt(torch.mean((pred.view([nb, nc, -1, nt]) - target.view([nb, nc, -1, nt])) ** 2, dim=2)) + err_mean = torch.sqrt( + torch.mean( + (pred.view([nb, nc, -1, nt]) - target.view([nb, nc, -1, nt])) ** 2, dim=2 + ) + ) err_RMSE = torch.mean(err_mean, axis=0) nrm = torch.sqrt(torch.mean(target.view([nb, nc, -1, nt]) ** 2, dim=2)) err_nRMSE = torch.mean(err_mean / nrm, dim=0) - err_CSV = torch.sqrt(torch.mean( - (torch.sum(pred.view([nb, nc, -1, nt]), dim=2) - torch.sum(target.view([nb, nc, -1, nt]), dim=2)) ** 2, - dim=0)) + err_CSV = torch.sqrt( + torch.mean( + ( + torch.sum(pred.view([nb, nc, -1, nt]), dim=2) + - torch.sum(target.view([nb, nc, -1, nt]), dim=2) + ) + ** 2, + dim=0, + ) + ) if len(idxs) == 4: nx = idxs[2] err_CSV /= nx @@ -193,20 +207,27 @@ def metric_func(pred, target, if_mean=True, Lx=1., Ly=1., Lz=1., iLow=4, iHigh=1 nx, ny, nz = idxs[2:5] err_CSV /= nx * ny * nz # worst case in all the data - err_Max = torch.max(torch.max( - torch.abs(pred.view([nb, nc, -1, nt]) - target.view([nb, nc, -1, nt])), dim=2)[0], dim=0)[0] + err_Max = torch.max( + torch.max( + torch.abs(pred.view([nb, nc, -1, nt]) - target.view([nb, nc, -1, nt])), + dim=2, + )[0], + dim=0, + )[0] if len(idxs) == 4: # 1D err_BD = (pred[:, :, 0, :] - target[:, :, 0, :]) ** 2 err_BD += (pred[:, :, -1, :] - target[:, :, -1, :]) ** 2 - err_BD = torch.mean(torch.sqrt(err_BD / 2.), dim=0) + err_BD = torch.mean(torch.sqrt(err_BD / 2.0), dim=0) elif len(idxs) == 5: # 2D nx, ny = idxs[2:4] err_BD_x = (pred[:, :, 0, :, :] - target[:, :, 0, :, :]) ** 2 err_BD_x += (pred[:, :, -1, :, :] - target[:, :, -1, :, :]) ** 2 err_BD_y = (pred[:, :, :, 0, :] - target[:, :, :, 0, :]) ** 2 err_BD_y += (pred[:, :, :, -1, :] - target[:, :, :, -1, :]) ** 2 - err_BD = (torch.sum(err_BD_x, dim=-2) + torch.sum(err_BD_y, dim=-2)) / (2 * nx + 2 * ny) + err_BD = (torch.sum(err_BD_x, dim=-2) + torch.sum(err_BD_y, dim=-2)) / ( + 2 * nx + 2 * ny + ) err_BD = torch.mean(torch.sqrt(err_BD), dim=0) elif len(idxs) == 6: # 3D nx, ny, nz = idxs[2:5] @@ -216,9 +237,11 @@ def metric_func(pred, target, if_mean=True, Lx=1., Ly=1., Lz=1., iLow=4, iHigh=1 err_BD_y += (pred[:, :, :, -1, :] - target[:, :, :, -1, :]) ** 2 err_BD_z = (pred[:, :, :, :, 0] - target[:, :, :, :, 0]) ** 2 err_BD_z += (pred[:, :, :, :, -1] - target[:, :, :, :, -1]) ** 2 - err_BD = torch.sum(err_BD_x.view([nb, -1, nt]), dim=-2) \ - + torch.sum(err_BD_y.view([nb, -1, nt]), dim=-2) \ - + torch.sum(err_BD_z.view([nb, -1, nt]), dim=-2) + err_BD = ( + torch.sum(err_BD_x.view([nb, -1, nt]), dim=-2) + + torch.sum(err_BD_y.view([nb, -1, nt]), dim=-2) + + torch.sum(err_BD_z.view([nb, -1, nt]), dim=-2) + ) err_BD = err_BD / (2 * nx * ny + 2 * ny * nz + 2 * nz * nx) err_BD = torch.mean(torch.sqrt(err_BD), dim=0) @@ -226,7 +249,9 @@ def metric_func(pred, target, if_mean=True, Lx=1., Ly=1., Lz=1., iLow=4, iHigh=1 nx = idxs[2] pred_F = torch.fft.rfft(pred, dim=2) target_F = torch.fft.rfft(target, dim=2) - _err_F = torch.sqrt(torch.mean(torch.abs(pred_F - target_F) ** 2, axis=0)) / nx * Lx + _err_F = ( + torch.sqrt(torch.mean(torch.abs(pred_F - target_F) ** 2, axis=0)) / nx * Lx + ) if len(idxs) == 5: # 2D pred_F = torch.fft.fftn(pred, dim=[2, 3]) target_F = torch.fft.fftn(target, dim=[2, 3]) @@ -235,7 +260,7 @@ def metric_func(pred, target, if_mean=True, Lx=1., Ly=1., Lz=1., iLow=4, iHigh=1 err_F = torch.zeros([nb, nc, min(nx // 2, ny // 2), nt]).to(device) for i in range(nx // 2): for j in range(ny // 2): - it = mt.floor(mt.sqrt(i ** 2 + j ** 2)) + it = mt.floor(mt.sqrt(i**2 + j**2)) if it > min(nx // 2, ny // 2) - 1: continue err_F[:, :, it] += _err_F[:, :, i, j] @@ -249,30 +274,49 @@ def metric_func(pred, target, if_mean=True, Lx=1., Ly=1., Lz=1., iLow=4, iHigh=1 for i in range(nx // 2): for j in range(ny // 2): for k in range(nz // 2): - it = mt.floor(mt.sqrt(i ** 2 + j ** 2 + k ** 2)) + it = mt.floor(mt.sqrt(i**2 + j**2 + k**2)) if it > min(nx // 2, ny // 2, nz // 2) - 1: continue err_F[:, :, it] += _err_F[:, :, i, j, k] _err_F = torch.sqrt(torch.mean(err_F, axis=0)) / (nx * ny * nz) * Lx * Ly * Lz err_F = torch.zeros([nc, 3, nt]).to(device) - err_F[:,0] += torch.mean(_err_F[:,:iLow], dim=1) # low freq - err_F[:,1] += torch.mean(_err_F[:,iLow:iHigh], dim=1) # middle freq - err_F[:,2] += torch.mean(_err_F[:,iHigh:], dim=1) # high freq + err_F[:, 0] += torch.mean(_err_F[:, :iLow], dim=1) # low freq + err_F[:, 1] += torch.mean(_err_F[:, iLow:iHigh], dim=1) # middle freq + err_F[:, 2] += torch.mean(_err_F[:, iHigh:], dim=1) # high freq if if_mean: - return torch.mean(err_RMSE, dim=[0, -1]), \ - torch.mean(err_nRMSE, dim=[0, -1]), \ - torch.mean(err_CSV, dim=[0, -1]), \ - torch.mean(err_Max, dim=[0, -1]), \ - torch.mean(err_BD, dim=[0, -1]), \ - torch.mean(err_F, dim=[0, -1]) + return ( + torch.mean(err_RMSE, dim=[0, -1]), + torch.mean(err_nRMSE, dim=[0, -1]), + torch.mean(err_CSV, dim=[0, -1]), + torch.mean(err_Max, dim=[0, -1]), + torch.mean(err_BD, dim=[0, -1]), + torch.mean(err_F, dim=[0, -1]), + ) else: return err_RMSE, err_nRMSE, err_CSV, err_Max, err_BD, err_F -def metrics(val_loader, model, Lx, Ly, Lz, plot, channel_plot, model_name, x_min, - x_max, y_min, y_max, t_min, t_max, mode='FNO', initial_step=None, ): - if mode=='Unet': + +def metrics( + val_loader, + model, + Lx, + Ly, + Lz, + plot, + channel_plot, + model_name, + x_min, + x_max, + y_min, + y_max, + t_min, + t_max, + mode="FNO", + initial_step=None, +): + if mode == "Unet": with torch.no_grad(): itot = 0 for xx, yy in val_loader: @@ -287,24 +331,36 @@ def metrics(val_loader, model, Lx, Ly, Lz, plot, channel_plot, model_name, x_min for t in range(initial_step, yy.shape[-2]): inp = xx.reshape(inp_shape) temp_shape = [0, -1] - temp_shape.extend([i for i in range(1,len(inp.shape)-1)]) + temp_shape.extend([i for i in range(1, len(inp.shape) - 1)]) inp = inp.permute(temp_shape) - - y = yy[..., t:t+1, :] - + + y = yy[..., t : t + 1, :] + temp_shape = [0] - temp_shape.extend([i for i in range(2,len(inp.shape))]) + temp_shape.extend([i for i in range(2, len(inp.shape))]) temp_shape.append(1) im = model(inp).permute(temp_shape).unsqueeze(-2) pred = torch.cat((pred, im), -2) xx = torch.cat((xx[..., 1:, :], im), dim=-2) - _err_RMSE, _err_nRMSE, _err_CSV, _err_Max, _err_BD, _err_F \ - = metric_func(pred, yy, if_mean=True, Lx=Lx, Ly=Ly, Lz=Lz) + ( + _err_RMSE, + _err_nRMSE, + _err_CSV, + _err_Max, + _err_BD, + _err_F, + ) = metric_func(pred, yy, if_mean=True, Lx=Lx, Ly=Ly, Lz=Lz) if itot == 0: - err_RMSE, err_nRMSE, err_CSV, err_Max, err_BD, err_F \ - = _err_RMSE, _err_nRMSE, _err_CSV, _err_Max, _err_BD, _err_F + err_RMSE, err_nRMSE, err_CSV, err_Max, err_BD, err_F = ( + _err_RMSE, + _err_nRMSE, + _err_CSV, + _err_Max, + _err_BD, + _err_F, + ) pred_plot = pred[:1] target_plot = yy[:1] val_l2_time = torch.zeros(yy.shape[-2]).to(device) @@ -315,15 +371,17 @@ def metrics(val_loader, model, Lx, Ly, Lz, plot, channel_plot, model_name, x_min err_Max += _err_Max err_BD += _err_BD err_F += _err_F - - mean_dim = [i for i in range(len(yy.shape)-2)] + + mean_dim = [i for i in range(len(yy.shape) - 2)] mean_dim.append(-1) mean_dim = tuple(mean_dim) - val_l2_time += torch.sqrt(torch.mean((pred-yy)**2, dim=mean_dim)) - + val_l2_time += torch.sqrt( + torch.mean((pred - yy) ** 2, dim=mean_dim) + ) + itot += 1 - elif mode=='FNO': + elif mode == "FNO": with torch.no_grad(): itot = 0 for xx, yy, grid in val_loader: @@ -338,16 +396,28 @@ def metrics(val_loader, model, Lx, Ly, Lz, plot, channel_plot, model_name, x_min for t in range(initial_step, yy.shape[-2]): inp = xx.reshape(inp_shape) - y = yy[..., t:t + 1, :] + y = yy[..., t : t + 1, :] im = model(inp, grid) pred = torch.cat((pred, im), -2) xx = torch.cat((xx[..., 1:, :], im), dim=-2) - _err_RMSE, _err_nRMSE, _err_CSV, _err_Max, _err_BD, _err_F \ - = metric_func(pred, yy, if_mean=True, Lx=Lx, Ly=Ly, Lz=Lz) + ( + _err_RMSE, + _err_nRMSE, + _err_CSV, + _err_Max, + _err_BD, + _err_F, + ) = metric_func(pred, yy, if_mean=True, Lx=Lx, Ly=Ly, Lz=Lz) if itot == 0: - err_RMSE, err_nRMSE, err_CSV, err_Max, err_BD, err_F \ - = _err_RMSE, _err_nRMSE, _err_CSV, _err_Max, _err_BD, _err_F + err_RMSE, err_nRMSE, err_CSV, err_Max, err_BD, err_F = ( + _err_RMSE, + _err_nRMSE, + _err_CSV, + _err_Max, + _err_BD, + _err_F, + ) pred_plot = pred[:1] target_plot = yy[:1] val_l2_time = torch.zeros(yy.shape[-2]).to(device) @@ -358,108 +428,135 @@ def metrics(val_loader, model, Lx, Ly, Lz, plot, channel_plot, model_name, x_min err_Max += _err_Max err_BD += _err_BD err_F += _err_F - - mean_dim = [i for i in range(len(yy.shape)-2)] + + mean_dim = [i for i in range(len(yy.shape) - 2)] mean_dim.append(-1) mean_dim = tuple(mean_dim) - val_l2_time += torch.sqrt(torch.mean((pred-yy)**2, dim=mean_dim)) + val_l2_time += torch.sqrt( + torch.mean((pred - yy) ** 2, dim=mean_dim) + ) itot += 1 elif mode == "PINN": raise NotImplementedError + err_RMSE = np.array(err_RMSE.data.cpu() / itot) + err_nRMSE = np.array(err_nRMSE.data.cpu() / itot) + err_CSV = np.array(err_CSV.data.cpu() / itot) + err_Max = np.array(err_Max.data.cpu() / itot) + err_BD = np.array(err_BD.data.cpu() / itot) + err_F = np.array(err_F.data.cpu() / itot) + print(f"RMSE: {err_RMSE:.5f}") + print(f"normalized RMSE: {err_nRMSE:.5f}") + print(f"RMSE of conserved variables: {err_CSV:.5f}") + print(f"Maximum value of rms error: {err_Max:.5f}") + print(f"RMSE at boundaries: {err_BD:.5f}") + print(f"RMSE in Fourier space: {err_F}") + + val_l2_time = val_l2_time / itot - err_RMSE = np.array(err_RMSE.data.cpu()/itot) - err_nRMSE = np.array(err_nRMSE.data.cpu()/itot) - err_CSV = np.array(err_CSV.data.cpu()/itot) - err_Max = np.array(err_Max.data.cpu()/itot) - err_BD = np.array(err_BD.data.cpu()/itot) - err_F = np.array(err_F.data.cpu()/itot) - print('RMSE: {0:.5f}'.format(err_RMSE)) - print('normalized RMSE: {0:.5f}'.format(err_nRMSE)) - print('RMSE of conserved variables: {0:.5f}'.format(err_CSV)) - print('Maximum value of rms error: {0:.5f}'.format(err_Max)) - print('RMSE at boundaries: {0:.5f}'.format(err_BD)) - print('RMSE in Fourier space: {0}'.format(err_F)) - - val_l2_time = val_l2_time/itot - if plot: dim = len(yy.shape) - 3 plt.ioff() if dim == 1: - - fig, ax = plt.subplots(figsize=(6.5,6)) - h = ax.imshow(pred_plot[...,channel_plot].squeeze().detach().cpu(), - extent=[t_min, t_max, x_min, x_max], origin='lower', aspect='auto') - h.set_clim(target_plot[...,channel_plot].min(), target_plot[...,channel_plot].max()) + fig, ax = plt.subplots(figsize=(6.5, 6)) + h = ax.imshow( + pred_plot[..., channel_plot].squeeze().detach().cpu(), + extent=[t_min, t_max, x_min, x_max], + origin="lower", + aspect="auto", + ) + h.set_clim( + target_plot[..., channel_plot].min(), + target_plot[..., channel_plot].max(), + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cbar = fig.colorbar(h, cax=cax) cbar.ax.tick_params(labelsize=30) ax.set_title("Prediction", fontsize=30) - ax.tick_params(axis='x',labelsize=30) - ax.tick_params(axis='y',labelsize=30) + ax.tick_params(axis="x", labelsize=30) + ax.tick_params(axis="y", labelsize=30) ax.set_ylabel("$x$", fontsize=30) ax.set_xlabel("$t$", fontsize=30) plt.tight_layout() - filename = model_name + '_pred.pdf' + filename = model_name + "_pred.pdf" plt.savefig(filename) - - fig, ax = plt.subplots(figsize=(6.5,6)) - h = ax.imshow(target_plot[...,channel_plot].squeeze().detach().cpu(), - extent=[t_min, t_max, x_min, x_max], origin='lower', aspect='auto') - h.set_clim(target_plot[...,channel_plot].min(), target_plot[...,channel_plot].max()) + + fig, ax = plt.subplots(figsize=(6.5, 6)) + h = ax.imshow( + target_plot[..., channel_plot].squeeze().detach().cpu(), + extent=[t_min, t_max, x_min, x_max], + origin="lower", + aspect="auto", + ) + h.set_clim( + target_plot[..., channel_plot].min(), + target_plot[..., channel_plot].max(), + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cbar = fig.colorbar(h, cax=cax) cbar.ax.tick_params(labelsize=30) ax.set_title("Data", fontsize=30) - ax.tick_params(axis='x',labelsize=30) - ax.tick_params(axis='y',labelsize=30) + ax.tick_params(axis="x", labelsize=30) + ax.tick_params(axis="y", labelsize=30) ax.set_ylabel("$x$", fontsize=30) ax.set_xlabel("$t$", fontsize=30) plt.tight_layout() - filename = model_name + '_data.pdf' + filename = model_name + "_data.pdf" plt.savefig(filename) - + elif dim == 2: - - fig, ax = plt.subplots(figsize=(6.5,6)) - h = ax.imshow(pred_plot[...,-1,channel_plot].squeeze().t().detach().cpu(), - extent=[x_min, x_max, y_min, y_max], origin='lower', aspect='auto') - h.set_clim(target_plot[...,-1,channel_plot].min(), target_plot[...,-1,channel_plot].max()) + fig, ax = plt.subplots(figsize=(6.5, 6)) + h = ax.imshow( + pred_plot[..., -1, channel_plot].squeeze().t().detach().cpu(), + extent=[x_min, x_max, y_min, y_max], + origin="lower", + aspect="auto", + ) + h.set_clim( + target_plot[..., -1, channel_plot].min(), + target_plot[..., -1, channel_plot].max(), + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cbar = fig.colorbar(h, cax=cax) cbar.ax.tick_params(labelsize=30) ax.set_title("Prediction", fontsize=30) - ax.tick_params(axis='x',labelsize=30) - ax.tick_params(axis='y',labelsize=30) + ax.tick_params(axis="x", labelsize=30) + ax.tick_params(axis="y", labelsize=30) ax.set_ylabel("$y$", fontsize=30) ax.set_xlabel("$x$", fontsize=30) plt.tight_layout() - filename = model_name + '_pred.pdf' + filename = model_name + "_pred.pdf" plt.savefig(filename) - - fig, ax = plt.subplots(figsize=(6.5,6)) - h = ax.imshow(target_plot[...,-1,channel_plot].squeeze().t().detach().cpu(), - extent=[x_min, x_max, y_min, y_max], origin='lower', aspect='auto') - h.set_clim(target_plot[...,-1,channel_plot].min(), target_plot[...,-1,channel_plot].max()) + + fig, ax = plt.subplots(figsize=(6.5, 6)) + h = ax.imshow( + target_plot[..., -1, channel_plot].squeeze().t().detach().cpu(), + extent=[x_min, x_max, y_min, y_max], + origin="lower", + aspect="auto", + ) + h.set_clim( + target_plot[..., -1, channel_plot].min(), + target_plot[..., -1, channel_plot].max(), + ) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cbar = fig.colorbar(h, cax=cax) cbar.ax.tick_params(labelsize=30) ax.set_title("Data", fontsize=30) - ax.tick_params(axis='x',labelsize=30) - ax.tick_params(axis='y',labelsize=30) + ax.tick_params(axis="x", labelsize=30) + ax.tick_params(axis="y", labelsize=30) ax.set_ylabel("$y$", fontsize=30) ax.set_xlabel("$x$", fontsize=30) plt.tight_layout() - filename = model_name + '_data.pdf' + filename = model_name + "_data.pdf" plt.savefig(filename) - + # plt.figure(figsize=(8,8)) # plt.semilogy(torch.arange(initial_step,yy.shape[-2]), # val_l2_time[initial_step:].detach().cpu()) @@ -469,234 +566,236 @@ def metrics(val_loader, model, Lx, Ly, Lz, plot, channel_plot, model_name, x_min # plt.tight_layout() # filename = model_name + '_mse_time.pdf' # plt.savefig(filename) - - filename = model_name + 'mse_time.npz' - np.savez(filename, t=torch.arange(initial_step,yy.shape[-2]).cpu(), - mse=val_l2_time[initial_step:].detach().cpu()) + + filename = model_name + "mse_time.npz" + np.savez( + filename, + t=torch.arange(initial_step, yy.shape[-2]).cpu(), + mse=val_l2_time[initial_step:].detach().cpu(), + ) return err_RMSE, err_nRMSE, err_CSV, err_Max, err_BD, err_F -# LpLoss Function -class LpLoss(object): +# LpLoss Function +class LpLoss: """ - Lp loss function + Lp loss function """ - def __init__(self, p=2, reduction='mean'): + + def __init__(self, p=2, reduction="mean"): super(LpLoss, self).__init__() - #Dimension and Lp-norm type are postive + # Dimension and Lp-norm type are positive assert p > 0 self.p = p self.reduction = reduction + def __call__(self, x, y, eps=1e-20): num_examples = x.size()[0] - _diff = x.view(num_examples,-1) - y.view(num_examples,-1) + _diff = x.view(num_examples, -1) - y.view(num_examples, -1) _diff = torch.norm(_diff, self.p, 1) - _norm = eps + torch.norm(y.view(num_examples,-1), self.p, 1) - if self.reduction in ['mean']: - return torch.mean(_diff/_norm) - if self.reduction in ['sum']: - return torch.sum(_diff/_norm) - return _diff/_norm - -# FftLoss Function -class FftLpLoss(object): + _norm = eps + torch.norm(y.view(num_examples, -1), self.p, 1) + if self.reduction in ["mean"]: + return torch.mean(_diff / _norm) + if self.reduction in ["sum"]: + return torch.sum(_diff / _norm) + return _diff / _norm + + +# FftLoss Function +class FftLpLoss: """ loss function in Fourier space June 2022, F.Alesiani """ - def __init__(self, p=2, reduction='mean'): + + def __init__(self, p=2, reduction="mean"): super(FftLpLoss, self).__init__() - #Dimension and Lp-norm type are postive + # Dimension and Lp-norm type are positive assert p > 0 self.p = p self.reduction = reduction - def __call__(self, x, y, flow=None,fhigh=None, eps=1e-20): + + def __call__(self, x, y, flow=None, fhigh=None, eps=1e-20): num_examples = x.size()[0] others_dims = x.shape[1:] - dims = list(range(1,len(x.shape))) - xf = torch.fft.fftn(x,dim=dims) - yf = torch.fft.fftn(y,dim=dims) - if flow is None: flow = 0 - if fhigh is None: fhigh = np.max(xf.shape[1:]) - - if len(others_dims) ==1: - xf = xf[:,flow:fhigh] - yf = yf[:,flow:fhigh] - if len(others_dims) ==2: - xf = xf[:,flow:fhigh,flow:fhigh] - yf = yf[:,flow:fhigh,flow:fhigh] - if len(others_dims) ==3: - xf = xf[:,flow:fhigh,flow:fhigh,flow:fhigh] - yf = yf[:,flow:fhigh,flow:fhigh,flow:fhigh] - if len(others_dims) ==4: - xf = xf[:,flow:fhigh,flow:fhigh,flow:fhigh,flow:fhigh] - yf = yf[:,flow:fhigh,flow:fhigh,flow:fhigh,flow:fhigh] + dims = list(range(1, len(x.shape))) + xf = torch.fft.fftn(x, dim=dims) + yf = torch.fft.fftn(y, dim=dims) + if flow is None: + flow = 0 + if fhigh is None: + fhigh = np.max(xf.shape[1:]) + + if len(others_dims) == 1: + xf = xf[:, flow:fhigh] + yf = yf[:, flow:fhigh] + if len(others_dims) == 2: + xf = xf[:, flow:fhigh, flow:fhigh] + yf = yf[:, flow:fhigh, flow:fhigh] + if len(others_dims) == 3: + xf = xf[:, flow:fhigh, flow:fhigh, flow:fhigh] + yf = yf[:, flow:fhigh, flow:fhigh, flow:fhigh] + if len(others_dims) == 4: + xf = xf[:, flow:fhigh, flow:fhigh, flow:fhigh, flow:fhigh] + yf = yf[:, flow:fhigh, flow:fhigh, flow:fhigh, flow:fhigh] _diff = xf - yf.reshape(xf.shape) - _diff = torch.norm(_diff.reshape(num_examples,-1), self.p, 1) - _norm = eps + torch.norm(yf.reshape(num_examples,-1), self.p, 1) - - if self.reduction in ['mean']: - return torch.mean(_diff/_norm) - if self.reduction in ['sum']: - return torch.sum(_diff/_norm) - return _diff/_norm - -import torch.nn.functional as F -# FftLoss Function -class FftMseLoss(object): + _diff = torch.norm(_diff.reshape(num_examples, -1), self.p, 1) + _norm = eps + torch.norm(yf.reshape(num_examples, -1), self.p, 1) + + if self.reduction in ["mean"]: + return torch.mean(_diff / _norm) + if self.reduction in ["sum"]: + return torch.sum(_diff / _norm) + return _diff / _norm + + +# FftLoss Function +class FftMseLoss: """ loss function in Fourier space June 2022, F.Alesiani """ - def __init__(self, reduction='mean'): + + def __init__(self, reduction="mean"): super(FftMseLoss, self).__init__() - #Dimension and Lp-norm type are postive + # Dimension and Lp-norm type are positive self.reduction = reduction - def __call__(self, x, y, flow=None,fhigh=None, eps=1e-20): + + def __call__(self, x, y, flow=None, fhigh=None, eps=1e-20): num_examples = x.size()[0] others_dims = x.shape[1:-2] for d in others_dims: - assert (d>1), "we expect the dimension to be the same and greater the 1" + assert d > 1, "we expect the dimension to be the same and greater the 1" # print(others_dims) - dims = list(range(1,len(x.shape)-1)) - xf = torch.fft.fftn(x,dim=dims) - yf = torch.fft.fftn(y,dim=dims) - if flow is None: flow = 0 - if fhigh is None: fhigh = np.max(xf.shape[1:]) - - if len(others_dims) ==1: - xf = xf[:,flow:fhigh] - yf = yf[:,flow:fhigh] - if len(others_dims) ==2: - xf = xf[:,flow:fhigh,flow:fhigh] - yf = yf[:,flow:fhigh,flow:fhigh] - if len(others_dims) ==3: - xf = xf[:,flow:fhigh,flow:fhigh,flow:fhigh] - yf = yf[:,flow:fhigh,flow:fhigh,flow:fhigh] - if len(others_dims) ==4: - xf = xf[:,flow:fhigh,flow:fhigh,flow:fhigh,flow:fhigh] - yf = yf[:,flow:fhigh,flow:fhigh,flow:fhigh,flow:fhigh] + dims = list(range(1, len(x.shape) - 1)) + xf = torch.fft.fftn(x, dim=dims) + yf = torch.fft.fftn(y, dim=dims) + if flow is None: + flow = 0 + if fhigh is None: + fhigh = np.max(xf.shape[1:]) + + if len(others_dims) == 1: + xf = xf[:, flow:fhigh] + yf = yf[:, flow:fhigh] + if len(others_dims) == 2: + xf = xf[:, flow:fhigh, flow:fhigh] + yf = yf[:, flow:fhigh, flow:fhigh] + if len(others_dims) == 3: + xf = xf[:, flow:fhigh, flow:fhigh, flow:fhigh] + yf = yf[:, flow:fhigh, flow:fhigh, flow:fhigh] + if len(others_dims) == 4: + xf = xf[:, flow:fhigh, flow:fhigh, flow:fhigh, flow:fhigh] + yf = yf[:, flow:fhigh, flow:fhigh, flow:fhigh, flow:fhigh] _diff = xf - yf - _diff = _diff.reshape(num_examples,-1).abs()**2 - if self.reduction in ['mean']: + _diff = _diff.reshape(num_examples, -1).abs() ** 2 + if self.reduction in ["mean"]: return torch.mean(_diff).abs() - if self.reduction in ['sum']: + if self.reduction in ["sum"]: return torch.sum(_diff).abs() return _diff.abs() -import torch.nn as nn -def inverse_metrics(u0,x,pred_u0,y): +def inverse_metrics(u0, x, pred_u0, y): """ computes all the metrics in the base and fourier space - u0: esimated initial condition, - pred_u0: prediction from the esimated initial condition, pred_u0 = model(u0) + u0: estimated initial condition, + pred_u0: prediction from the estimated initial condition, pred_u0 = model(u0) x: true initial condition y: true prediction, y = model(x) June 2022, F.Alesiani """ - + mseloss_fn = nn.MSELoss(reduction="mean") - l2loss_fn = LpLoss(p=2,reduction='mean') - l3loss_fn = LpLoss(p=3,reduction='mean') - + l2loss_fn = LpLoss(p=2, reduction="mean") + l3loss_fn = LpLoss(p=3, reduction="mean") + fftmseloss_fn = FftMseLoss(reduction="mean") - fftl2loss_fn = FftLpLoss(p=2,reduction="mean") - fftl3loss_fn = FftLpLoss(p=3,reduction="mean") + fftl2loss_fn = FftLpLoss(p=2, reduction="mean") + fftl3loss_fn = FftLpLoss(p=3, reduction="mean") - #initial condition + # initial condition mseloss_u0 = mseloss_fn(u0.view(1, -1), x.view(1, -1)).item() l2loss_u0 = l2loss_fn(u0.view(1, -1), x.view(1, -1)).item() l3loss_u0 = l3loss_fn(u0.view(1, -1), x.view(1, -1)).item() - - - fmid = u0.shape[1]//4 + + fmid = u0.shape[1] // 4 fftmseloss_u0 = fftmseloss_fn(u0, x).item() fftmseloss_low_u0 = fftmseloss_fn(u0, x, 0, fmid).item() - fftmseloss_mid_u0 = fftmseloss_fn(u0, x, fmid, 2*fmid).item() - fftmseloss_hi_u0 = fftmseloss_fn(u0, x, 2*fmid).item() - + fftmseloss_mid_u0 = fftmseloss_fn(u0, x, fmid, 2 * fmid).item() + fftmseloss_hi_u0 = fftmseloss_fn(u0, x, 2 * fmid).item() + fftl2loss_u0 = fftl2loss_fn(u0, x).item() fftl2loss_low_u0 = fftl2loss_fn(u0, x, 0, fmid).item() - fftl2loss_mid_u0 = fftl2loss_fn(u0, x, fmid, 2*fmid).item() - fftl2loss_hi_u0 = fftl2loss_fn(u0, x, 2*fmid).item() + fftl2loss_mid_u0 = fftl2loss_fn(u0, x, fmid, 2 * fmid).item() + fftl2loss_hi_u0 = fftl2loss_fn(u0, x, 2 * fmid).item() fftl3loss_u0 = fftl3loss_fn(u0, x).item() fftl3loss_low_u0 = fftl3loss_fn(u0, x, 0, fmid).item() - fftl3loss_mid_u0 = fftl3loss_fn(u0, x, fmid, 2*fmid).item() - fftl3loss_hi_u0 = fftl3loss_fn(u0, x, 2*fmid).item() + fftl3loss_mid_u0 = fftl3loss_fn(u0, x, fmid, 2 * fmid).item() + fftl3loss_hi_u0 = fftl3loss_fn(u0, x, 2 * fmid).item() - #prediction + # prediction mseloss_pred_u0 = mseloss_fn(pred_u0.reshape(1, -1), y.reshape(1, -1)).item() l2loss_pred_u0 = l2loss_fn(pred_u0.reshape(1, -1), y.reshape(1, -1)).item() l3loss_pred_u0 = l3loss_fn(pred_u0.reshape(1, -1), y.reshape(1, -1)).item() - fmid = pred_u0.shape[1]//4 + fmid = pred_u0.shape[1] // 4 pred_u0 = pred_u0.squeeze(-1) y = y.squeeze(-1) - + fftmseloss_pred_u0 = fftmseloss_fn(pred_u0, y).item() fftmseloss_low_pred_u0 = fftmseloss_fn(pred_u0, y, 0, fmid).item() - fftmseloss_mid_pred_u0 = fftmseloss_fn(pred_u0, y, fmid, 2*fmid).item() - fftmseloss_hi_pred_u0 = fftmseloss_fn(pred_u0, y, 2*fmid).item() - + fftmseloss_mid_pred_u0 = fftmseloss_fn(pred_u0, y, fmid, 2 * fmid).item() + fftmseloss_hi_pred_u0 = fftmseloss_fn(pred_u0, y, 2 * fmid).item() + fftl2loss_pred_u0 = fftl2loss_fn(pred_u0, y).item() fftl2loss_low_pred_u0 = fftl2loss_fn(pred_u0, y, 0, fmid).item() - fftl2loss_mid_pred_u0 = fftl2loss_fn(pred_u0, y, fmid, 2*fmid).item() - fftl2loss_hi_pred_u0= fftl2loss_fn(pred_u0, y, 2*fmid).item() + fftl2loss_mid_pred_u0 = fftl2loss_fn(pred_u0, y, fmid, 2 * fmid).item() + fftl2loss_hi_pred_u0 = fftl2loss_fn(pred_u0, y, 2 * fmid).item() fftl3loss_pred_u0 = fftl3loss_fn(pred_u0, y).item() fftl3loss_low_pred_u0 = fftl3loss_fn(pred_u0, y, 0, fmid).item() - fftl3loss_mid_pred_u0 = fftl3loss_fn(pred_u0, y, fmid, 2*fmid).item() - fftl3loss_hi_pred_u0 = fftl3loss_fn(pred_u0, y, 2*fmid).item() + fftl3loss_mid_pred_u0 = fftl3loss_fn(pred_u0, y, fmid, 2 * fmid).item() + fftl3loss_hi_pred_u0 = fftl3loss_fn(pred_u0, y, 2 * fmid).item() metric = { - 'mseloss_u0': mseloss_u0 - ,'l2loss_u0': l2loss_u0 - ,'l3loss_u0': l3loss_u0 - - ,'mseloss_pred_u0': mseloss_pred_u0 - ,'l2loss_pred_u0': l2loss_pred_u0 - ,'l3loss_pred_u0': l3loss_pred_u0 - - - ,'fftmseloss_u0': fftmseloss_u0 - ,'fftmseloss_low_u0': fftmseloss_low_u0 - ,'fftmseloss_mid_u0': fftmseloss_mid_u0 - ,'fftmseloss_hi_u0': fftmseloss_hi_u0 - - - - ,'fftmseloss_pred_u0': fftmseloss_pred_u0 - ,'fftmseloss_low_pred_u0': fftmseloss_low_pred_u0 - ,'fftmseloss_mid_pred_u0': fftmseloss_mid_pred_u0 - ,'fftmseloss_hi_pred_u0': fftmseloss_hi_pred_u0 - - ,'fftl2loss_u0': fftl2loss_u0 - ,'fftl2loss_low_u0': fftl2loss_low_u0 - ,'fftl2loss_mid_u0': fftl2loss_mid_u0 - ,'fftl2loss_hi_u0': fftl2loss_hi_u0 - - ,'fftl2loss_pred_u0': fftl2loss_pred_u0 - ,'fftl2loss_low_pred_u0': fftl2loss_low_pred_u0 - ,'fftl2loss_mid_pred_u0': fftl2loss_mid_pred_u0 - ,'fftl2loss_hi_pred_u0': fftl2loss_hi_pred_u0 - - ,'fftl3loss_u0': fftl3loss_u0 - ,'fftl3loss_low_u0': fftl3loss_low_u0 - ,'fftl3loss_mid_u0': fftl3loss_mid_u0 - ,'fftl3loss_hi_u0': fftl3loss_hi_u0 - - ,'fftl3loss_pred_u0': fftl3loss_pred_u0 - ,'fftl3loss_low_pred_u0': fftl3loss_low_pred_u0 - ,'fftl3loss_mid_pred_u0': fftl3loss_mid_pred_u0 - ,'fftl3loss_hi_pred_u0': fftl3loss_hi_pred_u0 - } + "mseloss_u0": mseloss_u0, + "l2loss_u0": l2loss_u0, + "l3loss_u0": l3loss_u0, + "mseloss_pred_u0": mseloss_pred_u0, + "l2loss_pred_u0": l2loss_pred_u0, + "l3loss_pred_u0": l3loss_pred_u0, + "fftmseloss_u0": fftmseloss_u0, + "fftmseloss_low_u0": fftmseloss_low_u0, + "fftmseloss_mid_u0": fftmseloss_mid_u0, + "fftmseloss_hi_u0": fftmseloss_hi_u0, + "fftmseloss_pred_u0": fftmseloss_pred_u0, + "fftmseloss_low_pred_u0": fftmseloss_low_pred_u0, + "fftmseloss_mid_pred_u0": fftmseloss_mid_pred_u0, + "fftmseloss_hi_pred_u0": fftmseloss_hi_pred_u0, + "fftl2loss_u0": fftl2loss_u0, + "fftl2loss_low_u0": fftl2loss_low_u0, + "fftl2loss_mid_u0": fftl2loss_mid_u0, + "fftl2loss_hi_u0": fftl2loss_hi_u0, + "fftl2loss_pred_u0": fftl2loss_pred_u0, + "fftl2loss_low_pred_u0": fftl2loss_low_pred_u0, + "fftl2loss_mid_pred_u0": fftl2loss_mid_pred_u0, + "fftl2loss_hi_pred_u0": fftl2loss_hi_pred_u0, + "fftl3loss_u0": fftl3loss_u0, + "fftl3loss_low_u0": fftl3loss_low_u0, + "fftl3loss_mid_u0": fftl3loss_mid_u0, + "fftl3loss_hi_u0": fftl3loss_hi_u0, + "fftl3loss_pred_u0": fftl3loss_pred_u0, + "fftl3loss_low_pred_u0": fftl3loss_low_pred_u0, + "fftl3loss_mid_pred_u0": fftl3loss_mid_pred_u0, + "fftl3loss_hi_pred_u0": fftl3loss_hi_pred_u0, + } return metric diff --git a/pdebench/models/pinn/pde_definitions.py b/pdebench/models/pinn/pde_definitions.py index 9369cf3..834fd37 100644 --- a/pdebench/models/pinn/pde_definitions.py +++ b/pdebench/models/pinn/pde_definitions.py @@ -1,6 +1,7 @@ +from __future__ import annotations + import deepxde as dde import numpy as np -import torch def reaction_1(u1, u2): @@ -14,7 +15,6 @@ def reaction_2(u1, u2): def pde_diffusion_reaction(x, y): - d1 = 1e-3 d2 = 5e-3 @@ -55,8 +55,8 @@ def pde_diffusion_sorption(x, y): ) return du1_t - D / retardation_factor * du1_xx - - + + def pde_swe1d(): raise NotImplementedError @@ -85,15 +85,18 @@ def pde_swe2d(x, y): return eq1 + eq2 + eq3 + def pde_adv1d(x, y, beta): dy_x = dde.grad.jacobian(y, x, i=0, j=0) dy_t = dde.grad.jacobian(y, x, i=0, j=1) return dy_t + beta * dy_x + def pde_diffusion_reaction_1d(x, y, nu, rho): dy_t = dde.grad.jacobian(y, x, i=0, j=1) dy_xx = dde.grad.hessian(y, x, i=0, j=0) - return dy_t - nu * dy_xx - rho * y * (1. - y) + return dy_t - nu * dy_xx - rho * y * (1.0 - y) + def pde_burgers1D(x, y, nu): dy_x = dde.grad.jacobian(y, x, i=0, j=0) @@ -101,11 +104,12 @@ def pde_burgers1D(x, y, nu): dy_xx = dde.grad.hessian(y, x, i=0, j=0) return dy_t + y * dy_x - nu / np.pi * dy_xx + def pde_CFD1d(x, y, gamma): h = y[..., 0].unsqueeze(1) # rho u = y[..., 1].unsqueeze(1) # v p = y[..., 2].unsqueeze(1) # p - E = p/(gamma - 1.) + 0.5 * h * u**2 + E = p / (gamma - 1.0) + 0.5 * h * u**2 E = E.unsqueeze(1) Fx = u * (E + p) Fx = Fx.unsqueeze(1) @@ -125,12 +129,13 @@ def pde_CFD1d(x, y, gamma): return eq1 + eq2 + eq3 + def pde_CFD2d(x, y, gamma): h = y[..., 0].unsqueeze(1) # rho ux = y[..., 1].unsqueeze(1) # vx uy = y[..., 2].unsqueeze(1) # vy p = y[..., 3].unsqueeze(1) # p - E = p/(gamma - 1.) + 0.5 * h * (ux**2 + uy**2) + E = p / (gamma - 1.0) + 0.5 * h * (ux**2 + uy**2) E = E.unsqueeze(1) Fx = ux * (E + p) Fx = Fx.unsqueeze(1) @@ -160,13 +165,14 @@ def pde_CFD2d(x, y, gamma): return eq1 + eq2 + eq3 + eq4 + def pde_CFD3d(x, y, gamma): h = y[..., 0].unsqueeze(1) # rho ux = y[..., 1].unsqueeze(1) # vx uy = y[..., 2].unsqueeze(1) # vy uz = y[..., 3].unsqueeze(1) # vz p = y[..., 4].unsqueeze(1) # p - E = p/(gamma - 1.) + 0.5 * h * (ux**2 + uy**2 + uz**2) + E = p / (gamma - 1.0) + 0.5 * h * (ux**2 + uy**2 + uz**2) E = E.unsqueeze(1) Fx = ux * (E + p) Fx = Fx.unsqueeze(1) @@ -206,4 +212,4 @@ def pde_CFD3d(x, y, gamma): eq4 = h * (uz_t + ux * uz_x + uy * uz_y + uz * uz_z) - p_z eq5 = E_t + Fx_x + Fy_y + Fz_z - return eq1 + eq2 + eq3 + eq4 + eq5 \ No newline at end of file + return eq1 + eq2 + eq3 + eq4 + eq5 diff --git a/pdebench/models/pinn/train.py b/pdebench/models/pinn/train.py index 4d3ec2f..07de94a 100644 --- a/pdebench/models/pinn/train.py +++ b/pdebench/models/pinn/train.py @@ -1,37 +1,32 @@ """Backend supported: tensorflow.compat.v1, tensorflow, pytorch""" -import deepxde as dde -import numpy as np +from __future__ import annotations + import pickle + +import deepxde as dde import matplotlib.pyplot as plt -import os, sys +import numpy as np import torch - -from typing import Tuple - -from pdebench.models.pinn.utils import ( - PINNDatasetRadialDambreak, - PINNDatasetDiffReact, - PINNDataset2D, - PINNDatasetDiffSorption, - PINNDatasetBump, - PINNDataset1Dpde, - PINNDataset2Dpde, - PINNDataset3Dpde, -) +from pdebench.models.metrics import metric_func from pdebench.models.pinn.pde_definitions import ( - pde_diffusion_reaction, - pde_swe2d, - pde_diffusion_sorption, - pde_swe1d, pde_adv1d, - pde_diffusion_reaction_1d, pde_burgers1D, pde_CFD1d, pde_CFD2d, - pde_CFD3d, + pde_diffusion_reaction, + pde_diffusion_reaction_1d, + pde_diffusion_sorption, + pde_swe2d, +) +from pdebench.models.pinn.utils import ( + PINNDataset1Dpde, + PINNDataset2D, + PINNDataset2Dpde, + PINNDataset3Dpde, + PINNDatasetDiffReact, + PINNDatasetDiffSorption, + PINNDatasetRadialDambreak, ) - -from pdebench.models.metrics import metrics, metric_func def setup_diffusion_sorption(filename, seed): @@ -93,6 +88,7 @@ def transform_output(x, y): return model, dataset + def setup_diffusion_reaction(filename, seed): # TODO: read from dataset config file geom = dde.geometry.Rectangle((-1, -1), (1, 1)) @@ -134,8 +130,7 @@ def setup_diffusion_reaction(filename, seed): return model, dataset -def setup_swe_2d(filename, seed) -> Tuple[dde.Model, PINNDataset2D]: - +def setup_swe_2d(filename, seed) -> tuple[dde.Model, PINNDataset2D]: dataset = PINNDatasetRadialDambreak(filename, seed) # TODO: read from dataset config file @@ -182,50 +177,63 @@ def setup_swe_2d(filename, seed) -> Tuple[dde.Model, PINNDataset2D]: return model, dataset + def _boundary_r(x, on_boundary, xL, xR): - return (on_boundary and np.isclose(x[0], xL)) or (on_boundary and np.isclose(x[0], xR)) - -def setup_pde1D(filename="1D_Advection_Sols_beta0.1.hdf5", - root_path='data', - val_batch_idx=-1, - input_ch=2, - output_ch=1, - hidden_ch=40, - xL=0., - xR=1., - if_periodic_bc=True, - aux_params=[0.1]): + return (on_boundary and np.isclose(x[0], xL)) or ( + on_boundary and np.isclose(x[0], xR) + ) + +def setup_pde1D( + filename="1D_Advection_Sols_beta0.1.hdf5", + root_path="data", + val_batch_idx=-1, + input_ch=2, + output_ch=1, + hidden_ch=40, + xL=0.0, + xR=1.0, + if_periodic_bc=True, + aux_params=[0.1], +): # TODO: read from dataset config file geom = dde.geometry.Interval(xL, xR) boundary_r = lambda x, on_boundary: _boundary_r(x, on_boundary, xL, xR) - if filename[0] == 'R': + if filename[0] == "R": timedomain = dde.geometry.TimeDomain(0, 1.0) - pde = lambda x, y : pde_diffusion_reaction_1d(x, y, aux_params[0], aux_params[1]) + pde = lambda x, y: pde_diffusion_reaction_1d(x, y, aux_params[0], aux_params[1]) else: - if filename.split('_')[1][0]=='A': + if filename.split("_")[1][0] == "A": timedomain = dde.geometry.TimeDomain(0, 2.0) pde = lambda x, y: pde_adv1d(x, y, aux_params[0]) - elif filename.split('_')[1][0] == 'B': + elif filename.split("_")[1][0] == "B": timedomain = dde.geometry.TimeDomain(0, 2.0) pde = lambda x, y: pde_burgers1D(x, y, aux_params[0]) - elif filename.split('_')[1][0]=='C': + elif filename.split("_")[1][0] == "C": timedomain = dde.geometry.TimeDomain(0, 1.0) pde = lambda x, y: pde_CFD1d(x, y, aux_params[0]) geomtime = dde.geometry.GeometryXTime(geom, timedomain) - dataset = PINNDataset1Dpde(filename, root_path=root_path, val_batch_idx=val_batch_idx) + dataset = PINNDataset1Dpde( + filename, root_path=root_path, val_batch_idx=val_batch_idx + ) # prepare initial condition initial_input, initial_u = dataset.get_initial_condition() - if filename.split('_')[1][0] == 'C': - ic_data_d = dde.icbc.PointSetBC(initial_input.cpu(), initial_u[:,0].unsqueeze(1), component=0) - ic_data_v = dde.icbc.PointSetBC(initial_input.cpu(), initial_u[:,1].unsqueeze(1), component=1) - ic_data_p = dde.icbc.PointSetBC(initial_input.cpu(), initial_u[:,2].unsqueeze(1), component=2) + if filename.split("_")[1][0] == "C": + ic_data_d = dde.icbc.PointSetBC( + initial_input.cpu(), initial_u[:, 0].unsqueeze(1), component=0 + ) + ic_data_v = dde.icbc.PointSetBC( + initial_input.cpu(), initial_u[:, 1].unsqueeze(1), component=1 + ) + ic_data_p = dde.icbc.PointSetBC( + initial_input.cpu(), initial_u[:, 2].unsqueeze(1), component=2 + ) else: ic_data_u = dde.icbc.PointSetBC(initial_input.cpu(), initial_u, component=0) # prepare boundary condition if if_periodic_bc: - if filename.split('_')[1][0] == 'C': + if filename.split("_")[1][0] == "C": bc_D = dde.icbc.PeriodicBC(geomtime, 0, boundary_r) bc_V = dde.icbc.PeriodicBC(geomtime, 1, boundary_r) bc_P = dde.icbc.PeriodicBC(geomtime, 2, boundary_r) @@ -250,7 +258,9 @@ def setup_pde1D(filename="1D_Advection_Sols_beta0.1.hdf5", ) else: ic = dde.icbc.IC( - geomtime, lambda x: -np.sin(np.pi * x[:, 0:1]), lambda _, on_initial: on_initial + geomtime, + lambda x: -np.sin(np.pi * x[:, 0:1]), + lambda _, on_initial: on_initial, ) bd_input, bd_uL, bd_uR = dataset.get_boundary_condition() bc_data_uL = dde.icbc.PointSetBC(bd_input.cpu(), bd_uL, component=0) @@ -264,74 +274,104 @@ def setup_pde1D(filename="1D_Advection_Sols_beta0.1.hdf5", num_boundary=1000, num_initial=5000, ) - net = dde.nn.FNN([input_ch] + [hidden_ch] * 6 + [output_ch], "tanh", "Glorot normal") + net = dde.nn.FNN( + [input_ch] + [hidden_ch] * 6 + [output_ch], "tanh", "Glorot normal" + ) model = dde.Model(data, net) return model, dataset -def setup_CFD2D(filename="2D_CFD_RAND_Eta1.e-8_Zeta1.e-8_periodic_Train.hdf5", - root_path='data', - val_batch_idx=-1, - input_ch=2, - output_ch=4, - hidden_ch=40, - xL=0., - xR=1., - yL=0., - yR=1., - if_periodic_bc=True, - aux_params=[1.6667]): +def setup_CFD2D( + filename="2D_CFD_RAND_Eta1.e-8_Zeta1.e-8_periodic_Train.hdf5", + root_path="data", + val_batch_idx=-1, + input_ch=2, + output_ch=4, + hidden_ch=40, + xL=0.0, + xR=1.0, + yL=0.0, + yR=1.0, + if_periodic_bc=True, + aux_params=[1.6667], +): # TODO: read from dataset config file geom = dde.geometry.Rectangle((-1, -1), (1, 1)) - timedomain = dde.geometry.TimeDomain(0., 1.0) + timedomain = dde.geometry.TimeDomain(0.0, 1.0) pde = lambda x, y: pde_CFD2d(x, y, aux_params[0]) geomtime = dde.geometry.GeometryXTime(geom, timedomain) - dataset = PINNDataset2Dpde(filename, root_path=root_path, val_batch_idx=val_batch_idx) + dataset = PINNDataset2Dpde( + filename, root_path=root_path, val_batch_idx=val_batch_idx + ) # prepare initial condition initial_input, initial_u = dataset.get_initial_condition() - ic_data_d = dde.icbc.PointSetBC(initial_input.cpu(), initial_u[...,0].unsqueeze(1), component=0) - ic_data_vx = dde.icbc.PointSetBC(initial_input.cpu(), initial_u[...,1].unsqueeze(1), component=1) - ic_data_vy = dde.icbc.PointSetBC(initial_input.cpu(), initial_u[...,2].unsqueeze(1), component=2) - ic_data_p = dde.icbc.PointSetBC(initial_input.cpu(), initial_u[...,3].unsqueeze(1), component=3) + ic_data_d = dde.icbc.PointSetBC( + initial_input.cpu(), initial_u[..., 0].unsqueeze(1), component=0 + ) + ic_data_vx = dde.icbc.PointSetBC( + initial_input.cpu(), initial_u[..., 1].unsqueeze(1), component=1 + ) + ic_data_vy = dde.icbc.PointSetBC( + initial_input.cpu(), initial_u[..., 2].unsqueeze(1), component=2 + ) + ic_data_p = dde.icbc.PointSetBC( + initial_input.cpu(), initial_u[..., 3].unsqueeze(1), component=3 + ) # prepare boundary condition bc = dde.icbc.PeriodicBC(geomtime, lambda x: 0, lambda _, on_boundary: on_boundary) data = dde.data.TimePDE( geomtime, pde, - [ic_data_d, ic_data_vx, ic_data_vy, ic_data_p],#, bc], + [ic_data_d, ic_data_vx, ic_data_vy, ic_data_p], # , bc], num_domain=1000, num_boundary=1000, num_initial=5000, ) - net = dde.nn.FNN([input_ch] + [hidden_ch] * 6 + [output_ch], "tanh", "Glorot normal") + net = dde.nn.FNN( + [input_ch] + [hidden_ch] * 6 + [output_ch], "tanh", "Glorot normal" + ) model = dde.Model(data, net) return model, dataset -def setup_CFD3D(filename="3D_CFD_RAND_Eta1.e-8_Zeta1.e-8_periodic_Train.hdf5", - root_path='data', - val_batch_idx=-1, - input_ch=2, - output_ch=4, - hidden_ch=40, - aux_params=[1.6667]): +def setup_CFD3D( + filename="3D_CFD_RAND_Eta1.e-8_Zeta1.e-8_periodic_Train.hdf5", + root_path="data", + val_batch_idx=-1, + input_ch=2, + output_ch=4, + hidden_ch=40, + aux_params=[1.6667], +): # TODO: read from dataset config file - geom = dde.geometry.Cuboid((0., 0., 0.), (1., 1., 1.)) - timedomain = dde.geometry.TimeDomain(0., 1.0) + geom = dde.geometry.Cuboid((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)) + timedomain = dde.geometry.TimeDomain(0.0, 1.0) pde = lambda x, y: pde_CFD2d(x, y, aux_params[0]) geomtime = dde.geometry.GeometryXTime(geom, timedomain) - dataset = PINNDataset3Dpde(filename, root_path=root_path, val_batch_idx=val_batch_idx) + dataset = PINNDataset3Dpde( + filename, root_path=root_path, val_batch_idx=val_batch_idx + ) # prepare initial condition initial_input, initial_u = dataset.get_initial_condition() - ic_data_d = dde.icbc.PointSetBC(initial_input.cpu(), initial_u[...,0].unsqueeze(1), component=0) - ic_data_vx = dde.icbc.PointSetBC(initial_input.cpu(), initial_u[...,1].unsqueeze(1), component=1) - ic_data_vy = dde.icbc.PointSetBC(initial_input.cpu(), initial_u[...,2].unsqueeze(1), component=2) - ic_data_vz = dde.icbc.PointSetBC(initial_input.cpu(), initial_u[...,3].unsqueeze(1), component=3) - ic_data_p = dde.icbc.PointSetBC(initial_input.cpu(), initial_u[...,4].unsqueeze(1), component=4) + ic_data_d = dde.icbc.PointSetBC( + initial_input.cpu(), initial_u[..., 0].unsqueeze(1), component=0 + ) + ic_data_vx = dde.icbc.PointSetBC( + initial_input.cpu(), initial_u[..., 1].unsqueeze(1), component=1 + ) + ic_data_vy = dde.icbc.PointSetBC( + initial_input.cpu(), initial_u[..., 2].unsqueeze(1), component=2 + ) + ic_data_vz = dde.icbc.PointSetBC( + initial_input.cpu(), initial_u[..., 3].unsqueeze(1), component=3 + ) + ic_data_p = dde.icbc.PointSetBC( + initial_input.cpu(), initial_u[..., 4].unsqueeze(1), component=4 + ) # prepare boundary condition bc = dde.icbc.PeriodicBC(geomtime, lambda x: 0, lambda _, on_boundary: on_boundary) data = dde.data.TimePDE( @@ -342,16 +382,29 @@ def setup_CFD3D(filename="3D_CFD_RAND_Eta1.e-8_Zeta1.e-8_periodic_Train.hdf5", num_boundary=1000, num_initial=5000, ) - net = dde.nn.FNN([input_ch] + [hidden_ch] * 6 + [output_ch], "tanh", "Glorot normal") + net = dde.nn.FNN( + [input_ch] + [hidden_ch] * 6 + [output_ch], "tanh", "Glorot normal" + ) model = dde.Model(data, net) return model, dataset -def _run_training(scenario, epochs, learning_rate, model_update, flnm, - input_ch, output_ch, - root_path, val_batch_idx, if_periodic_bc, aux_params, - if_single_run, - seed): + +def _run_training( + scenario, + epochs, + learning_rate, + model_update, + flnm, + input_ch, + output_ch, + root_path, + val_batch_idx, + if_periodic_bc, + aux_params, + if_single_run, + seed, +): if scenario == "swe2d": model, dataset = setup_swe_2d(filename=flnm, seed=seed) n_components = 1 @@ -362,32 +415,38 @@ def _run_training(scenario, epochs, learning_rate, model_update, flnm, model, dataset = setup_diffusion_sorption(filename=flnm, seed=seed) n_components = 1 elif scenario == "pde1D": - model, dataset = setup_pde1D(filename=flnm, - root_path=root_path, - input_ch=input_ch, - output_ch=output_ch, - val_batch_idx=val_batch_idx, - if_periodic_bc=if_periodic_bc, - aux_params=aux_params) - if flnm.split('_')[1][0] == 'C': + model, dataset = setup_pde1D( + filename=flnm, + root_path=root_path, + input_ch=input_ch, + output_ch=output_ch, + val_batch_idx=val_batch_idx, + if_periodic_bc=if_periodic_bc, + aux_params=aux_params, + ) + if flnm.split("_")[1][0] == "C": n_components = 3 else: n_components = 1 elif scenario == "CFD2D": - model, dataset = setup_CFD2D(filename=flnm, - root_path=root_path, - input_ch=input_ch, - output_ch=output_ch, - val_batch_idx=val_batch_idx, - aux_params=aux_params) + model, dataset = setup_CFD2D( + filename=flnm, + root_path=root_path, + input_ch=input_ch, + output_ch=output_ch, + val_batch_idx=val_batch_idx, + aux_params=aux_params, + ) n_components = 4 - elif scenario == "CFD3D": - model, dataset = setup_CFD3D(filename=flnm, - root_path=root_path, - input_ch=input_ch, - output_ch=output_ch, - val_batch_idx=val_batch_idx, - aux_params=aux_params) + elif scenario == "CFD3D": + model, dataset = setup_CFD3D( + filename=flnm, + root_path=root_path, + input_ch=input_ch, + output_ch=output_ch, + val_batch_idx=val_batch_idx, + aux_params=aux_params, + ) n_components = 5 else: raise NotImplementedError(f"PINN training not implemented for {scenario}") @@ -455,21 +514,54 @@ def _run_training(scenario, epochs, learning_rate, model_update, flnm, else: return test_pred, test_gt, model_name -def run_training(scenario, epochs, learning_rate, model_update, flnm, - input_ch=1, output_ch=1, - root_path='../data/', val_num=10, if_periodic_bc=True, aux_params=[None], seed='0000'): +def run_training( + scenario, + epochs, + learning_rate, + model_update, + flnm, + input_ch=1, + output_ch=1, + root_path="../data/", + val_num=10, + if_periodic_bc=True, + aux_params=[None], + seed="0000", +): if val_num == 1: # single job - _run_training(scenario, epochs, learning_rate, model_update, flnm, - input_ch, output_ch, - root_path, -val_num, if_periodic_bc, aux_params, - if_single_run=True, seed=seed) + _run_training( + scenario, + epochs, + learning_rate, + model_update, + flnm, + input_ch, + output_ch, + root_path, + -val_num, + if_periodic_bc, + aux_params, + if_single_run=True, + seed=seed, + ) else: for val_batch_idx in range(-1, -val_num, -1): - test_pred, test_gt, model_name = _run_training(scenario, epochs, learning_rate, model_update, flnm, - input_ch, output_ch, - root_path, val_batch_idx, if_periodic_bc, aux_params, - if_single_run=False, seed=seed) + test_pred, test_gt, model_name = _run_training( + scenario, + epochs, + learning_rate, + model_update, + flnm, + input_ch, + output_ch, + root_path, + val_batch_idx, + if_periodic_bc, + aux_params, + if_single_run=False, + seed=seed, + ) if val_batch_idx == -1: pred, target = test_pred.unsqueeze(0), test_gt.unsqueeze(0) else: diff --git a/pdebench/models/pinn/utils.py b/pdebench/models/pinn/utils.py index ba11980..91d13a3 100644 --- a/pdebench/models/pinn/utils.py +++ b/pdebench/models/pinn/utils.py @@ -1,18 +1,17 @@ -# -*- coding: utf-8 -*- """ Created on Wed Apr 20 09:43:15 2022 @author: timot """ +from __future__ import annotations -import numpy as np -import torch -from torch.utils.data import Dataset -from torch.utils.data import DataLoader import os + import h5py -from omegaconf import DictConfig, OmegaConf +import numpy as np +import torch import yaml +from torch.utils.data import Dataset class PINNDataset1D(Dataset): @@ -319,8 +318,9 @@ def get_initial_condition(self): return (self.data_input[:Nx, :], np.expand_dims(u0, 1)) + class PINNDataset1Dpde(Dataset): - def __init__(self, filename, root_path='data', val_batch_idx=-1): + def __init__(self, filename, root_path="data", val_batch_idx=-1): """ :param filename: filename that contains the dataset :type filename: STR @@ -343,9 +343,10 @@ def __init__(self, filename, root_path='data', val_batch_idx=-1): # main data keys = list(h5_file.keys()) keys.sort() - if 'tensor' in keys: - self.data_output = torch.tensor(np.array(h5_file["tensor"][val_batch_idx]), - dtype=torch.float) + if "tensor" in keys: + self.data_output = torch.tensor( + np.array(h5_file["tensor"][val_batch_idx]), dtype=torch.float + ) # permute from [t, x] -> [x, t] self.data_output = self.data_output.T @@ -358,12 +359,14 @@ def __init__(self, filename, root_path='data', val_batch_idx=-1): _data1 = np.array(h5_file["density"][val_batch_idx]) _data2 = np.array(h5_file["Vx"][val_batch_idx]) _data3 = np.array(h5_file["pressure"][val_batch_idx]) - _data = np.concatenate([_data1[...,None], _data2[...,None], _data3[...,None]], axis=-1) + _data = np.concatenate( + [_data1[..., None], _data2[..., None], _data3[..., None]], axis=-1 + ) # permute from [t, x] -> [x, t] _data = np.transpose(_data, (1, 0, 2)) self.data_output = torch.tensor(_data, dtype=torch.float) - del(_data, _data1, _data2, _data3) + del (_data, _data1, _data2, _data3) # for init/boundary conditions self.init_data = self.data_output[:, 0] @@ -371,7 +374,7 @@ def __init__(self, filename, root_path='data', val_batch_idx=-1): self.bd_data_R = self.data_output[-1] self.tdim = self.data_output.size(1) - self.data_grid_t = self.data_grid_t[:self.tdim] + self.data_grid_t = self.data_grid_t[: self.tdim] XX, TT = torch.meshgrid( [self.data_grid_x, self.data_grid_t], @@ -381,18 +384,18 @@ def __init__(self, filename, root_path='data', val_batch_idx=-1): self.data_input = torch.vstack([XX.ravel(), TT.ravel()]).T h5_file.close() - if 'tensor' in keys: + if "tensor" in keys: self.data_output = self.data_output.reshape(-1, 1) else: self.data_output = self.data_output.reshape(-1, 3) def get_initial_condition(self): # return (self.data_grid_x[:, None], self.init_data) - return (self.data_input[::self.tdim, :], self.init_data) + return (self.data_input[:: self.tdim, :], self.init_data) def get_boundary_condition(self): # return (self.data_grid_t[:self.nt, None], self.bd_data_L, self.bd_data_R) - return (self.data_input[:self.xdim, :], self.bd_data_L, self.bd_data_R) + return (self.data_input[: self.xdim, :], self.bd_data_L, self.bd_data_R) def get_test_data(self, n_last_time_steps, n_components=1): n_x = len(self.data_grid_x) @@ -442,8 +445,9 @@ def __len__(self): def __getitem__(self, idx): return self.data_input[idx, :], self.data_output[idx] + class PINNDataset2Dpde(Dataset): - def __init__(self, filename, root_path='data', val_batch_idx=-1, rdc_x=9, rdc_y=9): + def __init__(self, filename, root_path="data", val_batch_idx=-1, rdc_x=9, rdc_y=9): """ :param filename: filename that contains the dataset :type filename: STR @@ -476,24 +480,31 @@ def __init__(self, filename, root_path='data', val_batch_idx=-1, rdc_x=9, rdc_y= _data2 = np.array(h5_file["Vx"][val_batch_idx]) _data3 = np.array(h5_file["Vy"][val_batch_idx]) _data4 = np.array(h5_file["pressure"][val_batch_idx]) - _data = np.concatenate([_data1[...,None], _data2[...,None], _data3[...,None], _data4[...,None]], - axis=-1) + _data = np.concatenate( + [ + _data1[..., None], + _data2[..., None], + _data3[..., None], + _data4[..., None], + ], + axis=-1, + ) # permute from [t, x, y, v] -> [x, y, t, v] _data = np.transpose(_data, (1, 2, 0, 3)) _data = _data[::rdc_x, ::rdc_y] self.data_output = torch.tensor(_data, dtype=torch.float) - del(_data, _data1, _data2, _data3, _data4) + del (_data, _data1, _data2, _data3, _data4) # for init/boundary conditions self.init_data = self.data_output[..., 0, :] self.bd_data_xL = self.data_output[0] self.bd_data_xR = self.data_output[-1] - self.bd_data_yL = self.data_output[:,0] - self.bd_data_yR = self.data_output[:,-1] + self.bd_data_yL = self.data_output[:, 0] + self.bd_data_yR = self.data_output[:, -1] self.tdim = self.data_output.size(2) - self.data_grid_t = self.data_grid_t[:self.tdim] + self.data_grid_t = self.data_grid_t[: self.tdim] XX, YY, TT = torch.meshgrid( [self.data_grid_x, self.data_grid_y, self.data_grid_t], @@ -507,14 +518,17 @@ def __init__(self, filename, root_path='data', val_batch_idx=-1, rdc_x=9, rdc_y= def get_initial_condition(self): # return (self.data_grid_x[:, None], self.init_data) - return (self.data_input[::self.tdim, :], self.init_data) + return (self.data_input[:: self.tdim, :], self.init_data) def get_boundary_condition(self): # return (self.data_grid_t[:self.nt, None], self.bd_data_L, self.bd_data_R) - return (self.data_input[:self.xdim*self.ydim, :], - self.bd_data_xL, self.bd_data_xR, - self.bd_data_yL, self.bd_data_yR - ) + return ( + self.data_input[: self.xdim * self.ydim, :], + self.bd_data_xL, + self.bd_data_xR, + self.bd_data_yL, + self.bd_data_yR, + ) def get_test_data(self, n_last_time_steps, n_components=1): n_x = len(self.data_grid_x) @@ -565,8 +579,11 @@ def __len__(self): def __getitem__(self, idx): return self.data_input[idx, :], self.data_output[idx].unsqueeze(1) + class PINNDataset3Dpde(Dataset): - def __init__(self, filename, root_path='data', val_batch_idx=-1, rdc_x=2, rdc_y=2, rdc_z=2): + def __init__( + self, filename, root_path="data", val_batch_idx=-1, rdc_x=2, rdc_y=2, rdc_z=2 + ): """ :param filename: filename that contains the dataset :type filename: STR @@ -607,48 +624,62 @@ def __init__(self, filename, root_path='data', val_batch_idx=-1, rdc_x=2, rdc_y= _data3 = np.array(h5_file["Vy"][val_batch_idx]) _data4 = np.array(h5_file["Vz"][val_batch_idx]) _data5 = np.array(h5_file["pressure"][val_batch_idx]) - _data = np.concatenate([_data1[...,None], _data2[...,None], _data3[...,None], _data4[...,None], _data5[...,None]], - axis=-1) + _data = np.concatenate( + [ + _data1[..., None], + _data2[..., None], + _data3[..., None], + _data4[..., None], + _data5[..., None], + ], + axis=-1, + ) # permute from [t, x, y, z, v] -> [x, y, z, t, v] _data = np.transpose(_data, (1, 2, 3, 0, 4)) _data = _data[::rdc_x, ::rdc_y, ::rdc_z] self.data_output = torch.tensor(_data, dtype=torch.float) - del(_data, _data1, _data2, _data3, _data4, _data5) + del (_data, _data1, _data2, _data3, _data4, _data5) # for init/boundary conditions self.init_data = self.data_output[..., 0, :] self.bd_data_xL = self.data_output[0] self.bd_data_xR = self.data_output[-1] - self.bd_data_yL = self.data_output[:,0] - self.bd_data_yR = self.data_output[:,-1] - self.bd_data_zL = self.data_output[:,:,0] - self.bd_data_zR = self.data_output[:,:,-1] + self.bd_data_yL = self.data_output[:, 0] + self.bd_data_yR = self.data_output[:, -1] + self.bd_data_zL = self.data_output[:, :, 0] + self.bd_data_zR = self.data_output[:, :, -1] self.tdim = self.data_output.size(3) - self.data_grid_t = self.data_grid_t[:self.tdim] + self.data_grid_t = self.data_grid_t[: self.tdim] XX, YY, ZZ, TT = torch.meshgrid( [self.data_grid_x, self.data_grid_y, self.data_grid_z, self.data_grid_t], indexing="ij", ) - self.data_input = torch.vstack([XX.ravel(), YY.ravel(), ZZ.ravel(), TT.ravel()]).T + self.data_input = torch.vstack( + [XX.ravel(), YY.ravel(), ZZ.ravel(), TT.ravel()] + ).T h5_file.close() self.data_output = self.data_output.reshape(-1, 5) def get_initial_condition(self): # return (self.data_grid_x[:, None], self.init_data) - return (self.data_input[::self.tdim, :], self.init_data) + return (self.data_input[:: self.tdim, :], self.init_data) def get_boundary_condition(self): # return (self.data_grid_t[:self.nt, None], self.bd_data_L, self.bd_data_R) - return (self.data_input[:self.xdim*self.ydim*self.zdim, :], - self.bd_data_xL, self.bd_data_xR, - self.bd_data_yL, self.bd_data_yR, - self.bd_data_zL, self.bd_data_zR, - ) + return ( + self.data_input[: self.xdim * self.ydim * self.zdim, :], + self.bd_data_xL, + self.bd_data_xR, + self.bd_data_yL, + self.bd_data_yR, + self.bd_data_zL, + self.bd_data_zR, + ) def get_test_data(self, n_last_time_steps, n_components=1): n_x = len(self.data_grid_x) @@ -671,7 +702,12 @@ def get_test_data(self, n_last_time_steps, n_components=1): test_output = test_output[:, :, :, -n_last_time_steps:, :] test_input = torch.vstack( - [test_input_x.ravel(), test_input_y.ravel(), test_input_z.ravel(), test_input_t.ravel()] + [ + test_input_x.ravel(), + test_input_y.ravel(), + test_input_z.ravel(), + test_input_t.ravel(), + ] ).T # stack depending on number of output components @@ -692,7 +728,9 @@ def unravel_tensor(self, raveled_tensor, n_last_time_steps, n_components=1): n_x = len(self.data_grid_x) n_y = len(self.data_grid_y) n_z = len(self.data_grid_z) - return raveled_tensor.reshape((1, n_x, n_y, n_z, n_last_time_steps, n_components)) + return raveled_tensor.reshape( + (1, n_x, n_y, n_z, n_last_time_steps, n_components) + ) def generate_plot_input(self, time=1.0): return None @@ -701,4 +739,4 @@ def __len__(self): return len(self.data_output) def __getitem__(self, idx): - return self.data_input[idx, :], self.data_output[idx].unsqueeze(1) \ No newline at end of file + return self.data_input[idx, :], self.data_output[idx].unsqueeze(1) diff --git a/pdebench/models/run_forward_1D.sh b/pdebench/models/run_forward_1D.sh index 9128b3e..82b2acd 100644 --- a/pdebench/models/run_forward_1D.sh +++ b/pdebench/models/run_forward_1D.sh @@ -1,3 +1,4 @@ +#!/bin/bash ## 'FNO' # Advection CUDA_VISIBLE_DEVICES='0' python3 train_models_forward.py +args=config_Adv.yaml ++args.filename='1D_Advection_Sols_beta0.1.hdf5' ++args.model_name='FNO' @@ -65,7 +66,7 @@ CUDA_VISIBLE_DEVICES='0' python3 train_models_forward.py +args=config_pinn_pde1d CUDA_VISIBLE_DEVICES='0' python3 train_models_forward.py +args=config_pinn_pde1d.yaml ++args.filename='ReacDiff_Nu0.5_Rho10.0.hdf5' ++args.aux_params=[0.5,10.] ++args.val_time=0.5 CUDA_VISIBLE_DEVICES='0' python3 train_models_forward.py +args=config_pinn_pde1d.yaml ++args.filename='ReacDiff_Nu2.0_Rho1.0.hdf5' ++args.aux_params=[2.,1.] ++args.val_time=0.5 CUDA_VISIBLE_DEVICES='0' python3 train_models_forward.py +args=config_pinn_pde1d.yaml ++args.filename='ReacDiff_Nu2.0_Rho10.0.hdf5' ++args.aux_params=[2.,10.] ++args.val_time=0.5 -# Burgers Eq. +# Burgers Eq. CUDA_VISIBLE_DEVICES='0' python3 train_models_forward.py +args=config_pinn_pde1d.yaml ++args.filename='1D_Burgers_Sols_Nu0.001.hdf5' ++args.aux_params=[0.001] CUDA_VISIBLE_DEVICES='0' python3 train_models_forward.py +args=config_pinn_pde1d.yaml ++args.filename='1D_Burgers_Sols_Nu0.01.hdf5' ++args.aux_params=[0.01] CUDA_VISIBLE_DEVICES='0' python3 train_models_forward.py +args=config_pinn_pde1d.yaml ++args.filename='1D_Burgers_Sols_Nu0.1.hdf5' ++args.aux_params=[0.1] diff --git a/pdebench/models/run_inverse.sh b/pdebench/models/run_inverse.sh index 2242dde..fe828e7 100644 --- a/pdebench/models/run_inverse.sh +++ b/pdebench/models/run_inverse.sh @@ -1,27 +1,25 @@ -# /bin/bash +#! /bin/bash # F.Alesiani, 2022, June 6th # Train forward model HYDRA_FULL_ERROR=1 python3 train_models_inverse.py ++args.filename='/1D/Advection/Train/1D_Advection_Sols_beta4.0.hdf5' ++args.model_name='FNO' HYDRA_FULL_ERROR=1 python3 train_models_inverse.py ++args.filename='/1D/Burgers/Train/1D_Burgers_Sols_Nu1.0.hdf5' ++args.model_name='FNO' HYDRA_FULL_ERROR=1 python3 train_models_inverse.py ++args.filename='/1D/ReactionDiffusion/Train/ReacDiff_Nu1.0_Rho2.0.hdf5' ++args.model_name='FNO' -HYDRA_FULL_ERROR=1 python3 train_models_inverse.py ++args.filename='/1D/CFD/Train/1D_CFD_Shock_trans_Train.hdf5' ++args.model_name='FNO' ++args.in_channels=3 ++args.out_channels=3 ++args.num_channels=3 ++args.final_time=5 +HYDRA_FULL_ERROR=1 python3 train_models_inverse.py ++args.filename='/1D/CFD/Train/1D_CFD_Shock_trans_Train.hdf5' ++args.model_name='FNO' ++args.in_channels=3 ++args.out_channels=3 ++args.num_channels=3 ++args.final_time=5 HYDRA_FULL_ERROR=1 python3 train_models_inverse.py ++args.filename='/1D/Advection/Train/1D_Advection_Sols_beta4.0.hdf5' ++args.model_name='Unet' HYDRA_FULL_ERROR=1 python3 train_models_inverse.py ++args.filename='/1D/Burgers/Train/1D_Burgers_Sols_Nu1.0.hdf5' ++args.model_name='Unet' HYDRA_FULL_ERROR=1 python3 train_models_inverse.py ++args.filename='/1D/ReactionDiffusion/Train/ReacDiff_Nu1.0_Rho2.0.hdf5' ++args.model_name='Unet' -HYDRA_FULL_ERROR=1 python3 train_models_inverse.py ++args.filename='/1D/CFD/Train/1D_CFD_Shock_trans_Train.hdf5' ++args.model_name='Unet' ++args.in_channels=3 ++args.out_channels=3 ++args.num_channels=3 ++args.final_time=5 +HYDRA_FULL_ERROR=1 python3 train_models_inverse.py ++args.filename='/1D/CFD/Train/1D_CFD_Shock_trans_Train.hdf5' ++args.model_name='Unet' ++args.in_channels=3 ++args.out_channels=3 ++args.num_channels=3 ++args.final_time=5 # Inverse HYDRA_FULL_ERROR=1 python3 inverse/train.py ++args.filename='/1D/Advection/Train/1D_Advection_Sols_beta4.0.hdf5' ++args.model_name='FNO' HYDRA_FULL_ERROR=1 python3 inverse/train.py ++args.filename='/1D/Burgers/Train/1D_Burgers_Sols_Nu1.0.hdf5' ++args.model_name='FNO' HYDRA_FULL_ERROR=1 python3 inverse/train.py ++args.filename='/1D/ReactionDiffusion/Train/ReacDiff_Nu1.0_Rho2.0.hdf5' ++args.model_name='FNO' -HYDRA_FULL_ERROR=1 python3 inverse/train.py ++args.filename='/1D/CFD/Train/1D_CFD_Shock_trans_Train.hdf5' ++args.model_name='FNO' ++args.in_channels=3 ++args.out_channels=3 ++args.num_channels=3 ++args.final_time=5 +HYDRA_FULL_ERROR=1 python3 inverse/train.py ++args.filename='/1D/CFD/Train/1D_CFD_Shock_trans_Train.hdf5' ++args.model_name='FNO' ++args.in_channels=3 ++args.out_channels=3 ++args.num_channels=3 ++args.final_time=5 HYDRA_FULL_ERROR=1 python3 inverse/train.py ++args.filename='/1D/Advection/Train/1D_Advection_Sols_beta4.0.hdf5' ++args.model_name='Unet' HYDRA_FULL_ERROR=1 python3 inverse/train.py ++args.filename='/1D/Burgers/Train/1D_Burgers_Sols_Nu1.0.hdf5' ++args.model_name='Unet' HYDRA_FULL_ERROR=1 python3 inverse/train.py ++args.filename='/1D/ReactionDiffusion/Train/ReacDiff_Nu1.0_Rho2.0.hdf5' ++args.model_name='Unet' -HYDRA_FULL_ERROR=1 python3 inverse/train.py ++args.filename='/1D/CFD/Train/1D_CFD_Shock_trans_Train.hdf5' ++args.model_name='Unet' ++args.in_channels=3 ++args.out_channels=3 ++args.num_channels=3 ++args.final_time=5 - - +HYDRA_FULL_ERROR=1 python3 inverse/train.py ++args.filename='/1D/CFD/Train/1D_CFD_Shock_trans_Train.hdf5' ++args.model_name='Unet' ++args.in_channels=3 ++args.out_channels=3 ++args.num_channels=3 ++args.final_time=5 diff --git a/pdebench/models/train_models_forward.py b/pdebench/models/train_models_forward.py index 5ebd40a..8ca3f75 100644 --- a/pdebench/models/train_models_forward.py +++ b/pdebench/models/train_models_forward.py @@ -145,22 +145,17 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ -import sys, os +from __future__ import annotations + import hydra from omegaconf import DictConfig -import operator -from functools import reduce -from functools import partial - -from timeit import default_timer - - @hydra.main(version_base="1.2", config_path="config", config_name="config_rdb") def main(cfg: DictConfig): if cfg.args.model_name == "FNO": from pdebench.models.fno.train import run_training as run_training_FNO + print("FNO") run_training_FNO( if_training=cfg.args.if_training, @@ -195,6 +190,7 @@ def main(cfg: DictConfig): ) elif cfg.args.model_name == "Unet": from pdebench.models.unet.train import run_training as run_training_Unet + print("Unet") run_training_Unet( if_training=cfg.args.if_training, @@ -231,6 +227,7 @@ def main(cfg: DictConfig): elif cfg.args.model_name == "PINN": # not importing globally as DeepXDE changes some global PyTorch settings from pdebench.models.pinn.train import run_training as run_training_PINN + print("PINN") run_training_PINN( scenario=cfg.args.scenario, @@ -244,7 +241,7 @@ def main(cfg: DictConfig): root_path=cfg.args.root_path, val_num=cfg.args.val_num, if_periodic_bc=cfg.args.if_periodic_bc, - aux_params=cfg.args.aux_params + aux_params=cfg.args.aux_params, ) diff --git a/pdebench/models/train_models_inverse.py b/pdebench/models/train_models_inverse.py index c5099be..918b9c6 100644 --- a/pdebench/models/train_models_inverse.py +++ b/pdebench/models/train_models_inverse.py @@ -144,90 +144,86 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ -import sys, os +from __future__ import annotations + import hydra from omegaconf import DictConfig - -import operator -from functools import reduce -from functools import partial - -from timeit import default_timer - from pdebench.models.fno.train import run_training as run_training_FNO from pdebench.models.pinn.train import run_training as run_training_PINN from pdebench.models.unet.train import run_training as run_training_Unet -@hydra.main(config_path='config', config_name='config') +@hydra.main(config_path="config", config_name="config") def main(cfg: DictConfig): print(cfg.args) - if cfg.args.model_name=='FNO': - print('FNO') - run_training_FNO(if_training=cfg.args.if_training, - continue_training=cfg.args.continue_training, - num_workers=cfg.args.num_workers, - modes=cfg.args.modes, - width=cfg.args.width, - initial_step=cfg.args.initial_step, - t_train=cfg.args.t_train, - num_channels=cfg.args.num_channels, - batch_size=cfg.args.batch_size, - epochs=cfg.args.epochs, - learning_rate=cfg.args.learning_rate, - scheduler_step=cfg.args.scheduler_step, - scheduler_gamma=cfg.args.scheduler_gamma, - model_update=cfg.args.model_update, - flnm=cfg.args.filename, - single_file=cfg.args.single_file, - reduced_resolution=cfg.args.reduced_resolution, - reduced_resolution_t=cfg.args.reduced_resolution_t, - reduced_batch=cfg.args.reduced_batch, - plot=cfg.args.plot, - channel_plot=cfg.args.channel_plot, - x_min=cfg.args.x_min, - x_max=cfg.args.x_max, - y_min=cfg.args.y_min, - y_max=cfg.args.y_max, - t_min=cfg.args.t_min, - t_max=cfg.args.t_max, - base_path = cfg.args.base_path, - training_type = cfg.args.training_type - ) - elif cfg.args.model_name=='Unet': - print('Unet') - run_training_Unet(if_training=cfg.args.if_training, - continue_training=cfg.args.continue_training, - num_workers=cfg.args.num_workers, - initial_step=cfg.args.initial_step, - t_train=cfg.args.t_train, - in_channels=cfg.args.in_channels, - out_channels=cfg.args.out_channels, - batch_size=cfg.args.batch_size, - unroll_step=cfg.args.unroll_step, - ar_mode=cfg.args.ar_mode, - pushforward=cfg.args.pushforward, - epochs=cfg.args.epochs, - learning_rate=cfg.args.learning_rate, - scheduler_step=cfg.args.scheduler_step, - scheduler_gamma=cfg.args.scheduler_gamma, - model_update=cfg.args.model_update, - flnm=cfg.args.filename, - single_file=cfg.args.single_file, - reduced_resolution=cfg.args.reduced_resolution, - reduced_resolution_t=cfg.args.reduced_resolution_t, - reduced_batch=cfg.args.reduced_batch, - plot=cfg.args.plot, - channel_plot=cfg.args.channel_plot, - x_min=cfg.args.x_min, - x_max=cfg.args.x_max, - y_min=cfg.args.y_min, - y_max=cfg.args.y_max, - t_min=cfg.args.t_min, - t_max=cfg.args.t_max, - base_path = cfg.args.base_path, - training_type = cfg.args.training_type - ) + if cfg.args.model_name == "FNO": + print("FNO") + run_training_FNO( + if_training=cfg.args.if_training, + continue_training=cfg.args.continue_training, + num_workers=cfg.args.num_workers, + modes=cfg.args.modes, + width=cfg.args.width, + initial_step=cfg.args.initial_step, + t_train=cfg.args.t_train, + num_channels=cfg.args.num_channels, + batch_size=cfg.args.batch_size, + epochs=cfg.args.epochs, + learning_rate=cfg.args.learning_rate, + scheduler_step=cfg.args.scheduler_step, + scheduler_gamma=cfg.args.scheduler_gamma, + model_update=cfg.args.model_update, + flnm=cfg.args.filename, + single_file=cfg.args.single_file, + reduced_resolution=cfg.args.reduced_resolution, + reduced_resolution_t=cfg.args.reduced_resolution_t, + reduced_batch=cfg.args.reduced_batch, + plot=cfg.args.plot, + channel_plot=cfg.args.channel_plot, + x_min=cfg.args.x_min, + x_max=cfg.args.x_max, + y_min=cfg.args.y_min, + y_max=cfg.args.y_max, + t_min=cfg.args.t_min, + t_max=cfg.args.t_max, + base_path=cfg.args.base_path, + training_type=cfg.args.training_type, + ) + elif cfg.args.model_name == "Unet": + print("Unet") + run_training_Unet( + if_training=cfg.args.if_training, + continue_training=cfg.args.continue_training, + num_workers=cfg.args.num_workers, + initial_step=cfg.args.initial_step, + t_train=cfg.args.t_train, + in_channels=cfg.args.in_channels, + out_channels=cfg.args.out_channels, + batch_size=cfg.args.batch_size, + unroll_step=cfg.args.unroll_step, + ar_mode=cfg.args.ar_mode, + pushforward=cfg.args.pushforward, + epochs=cfg.args.epochs, + learning_rate=cfg.args.learning_rate, + scheduler_step=cfg.args.scheduler_step, + scheduler_gamma=cfg.args.scheduler_gamma, + model_update=cfg.args.model_update, + flnm=cfg.args.filename, + single_file=cfg.args.single_file, + reduced_resolution=cfg.args.reduced_resolution, + reduced_resolution_t=cfg.args.reduced_resolution_t, + reduced_batch=cfg.args.reduced_batch, + plot=cfg.args.plot, + channel_plot=cfg.args.channel_plot, + x_min=cfg.args.x_min, + x_max=cfg.args.x_max, + y_min=cfg.args.y_min, + y_max=cfg.args.y_max, + t_min=cfg.args.t_min, + t_max=cfg.args.t_max, + base_path=cfg.args.base_path, + training_type=cfg.args.training_type, + ) elif cfg.args.model_name == "PINN": print("PINN") run_training_PINN( @@ -239,6 +235,7 @@ def main(cfg: DictConfig): seed=cfg.args.seed, ) + if __name__ == "__main__": main() - print("Done.") \ No newline at end of file + print("Done.") diff --git a/pdebench/models/unet/train.py b/pdebench/models/unet/train.py index 7b301c9..30bdcfa 100644 --- a/pdebench/models/unet/train.py +++ b/pdebench/models/unet/train.py @@ -1,361 +1,402 @@ -# -*- coding: utf-8 -*- +from __future__ import annotations -import sys -import torch -import numpy as np +import logging import pickle -import torch.nn as nn -import torch.nn.functional as F - -import operator -from functools import reduce -from functools import partial - +from pathlib import Path from timeit import default_timer +import numpy as np +import torch +from pdebench.models.metrics import metrics +from pdebench.models.unet.unet import UNet1d, UNet2d, UNet3d +from pdebench.models.unet.utils import UNetDatasetMult, UNetDatasetSingle +from torch import nn + # torch.manual_seed(0) # np.random.seed(0) +logging.basicConfig(level=logging.INFO, filename=__name__) +logging.root.setLevel(logging.INFO) -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -from pdebench.models.unet.unet import UNet1d, UNet2d, UNet3d -from pdebench.models.unet.utils import UNetDatasetSingle, UNetDatasetMult -from pdebench.models.metrics import metrics -def run_training(if_training, - continue_training, - num_workers, - initial_step, - t_train, - in_channels, - out_channels, - batch_size, - unroll_step, - ar_mode, - pushforward, - epochs, - learning_rate, - scheduler_step, - scheduler_gamma, - model_update, - flnm, - single_file, - reduced_resolution, - reduced_resolution_t, - reduced_batch, - plot, - channel_plot, - x_min, - x_max, - y_min, - y_max, - t_min, - t_max, - base_path='../data/', - training_type='autoregressive' - ): - - print(f'Epochs = {epochs}, learning rate = {learning_rate}, scheduler step = {scheduler_step}, scheduler gamma = {scheduler_gamma}') - +def run_training( + if_training, + continue_training, + num_workers, + initial_step, + t_train, + in_channels, + out_channels, + batch_size, + unroll_step, + ar_mode, + pushforward, + epochs, + learning_rate, + scheduler_step, + scheduler_gamma, + model_update, + flnm, + single_file, + reduced_resolution, + reduced_resolution_t, + reduced_batch, + plot, + channel_plot, + x_min, + x_max, + y_min, + y_max, + t_min, + t_max, + base_path="../data/", + training_type="autoregressive", +): + msg = f"Epochs = {epochs}, learning rate = {learning_rate}, scheduler step = {scheduler_step}, scheduler gamma = {scheduler_gamma}" + logging.info(msg) + ################################################################ # load data ################################################################ - + if single_file: # filename - model_name = flnm[:-5] + '_Unet' - + model_name = flnm[:-5] + "_Unet" + # Initialize the dataset and dataloader - train_data = UNetDatasetSingle(flnm, - saved_folder=base_path, - reduced_resolution=reduced_resolution, - reduced_resolution_t=reduced_resolution_t, - reduced_batch=reduced_batch, - initial_step=initial_step) - val_data = UNetDatasetSingle(flnm, - saved_folder=base_path, - reduced_resolution=reduced_resolution, - reduced_resolution_t=reduced_resolution_t, - reduced_batch=reduced_batch, - initial_step=initial_step, - if_test=True) - + train_data = UNetDatasetSingle( + flnm, + saved_folder=base_path, + reduced_resolution=reduced_resolution, + reduced_resolution_t=reduced_resolution_t, + reduced_batch=reduced_batch, + initial_step=initial_step, + ) + val_data = UNetDatasetSingle( + flnm, + saved_folder=base_path, + reduced_resolution=reduced_resolution, + reduced_resolution_t=reduced_resolution_t, + reduced_batch=reduced_batch, + initial_step=initial_step, + if_test=True, + ) + else: # filename - model_name = flnm + '_Unet' - - train_data = UNetDatasetMult(flnm, - reduced_resolution=reduced_resolution, - reduced_resolution_t=reduced_resolution_t, - reduced_batch=reduced_batch, - saved_folder=base_path) - val_data = UNetDatasetMult(flnm, - reduced_resolution=reduced_resolution, - reduced_resolution_t=reduced_resolution_t, - reduced_batch=reduced_batch, - if_test=True, - saved_folder=base_path) - - train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, - num_workers=num_workers, shuffle=True) - val_loader = torch.utils.data.DataLoader(val_data, batch_size=batch_size, - num_workers=num_workers, shuffle=False) - + model_name = flnm + "_Unet" + + train_data = UNetDatasetMult( + flnm, + reduced_resolution=reduced_resolution, + reduced_resolution_t=reduced_resolution_t, + reduced_batch=reduced_batch, + saved_folder=base_path, + ) + val_data = UNetDatasetMult( + flnm, + reduced_resolution=reduced_resolution, + reduced_resolution_t=reduced_resolution_t, + reduced_batch=reduced_batch, + if_test=True, + saved_folder=base_path, + ) + + train_loader = torch.utils.data.DataLoader( + train_data, batch_size=batch_size, num_workers=num_workers, shuffle=True + ) + val_loader = torch.utils.data.DataLoader( + val_data, batch_size=batch_size, num_workers=num_workers, shuffle=False + ) + ################################################################ # training and evaluation ################################################################ - - #model = UNet2d(in_channels, out_channels).to(device) + + # model = UNet2d(in_channels, out_channels).to(device) _, _data = next(iter(val_loader)) dimensions = len(_data.shape) - print('Spatial Dimension', dimensions - 3) - if training_type in ['autoregressive']: + msg = f"Spatial Dimension: {dimensions - 3}" + logging.info(msg) + if training_type in ["autoregressive"]: if dimensions == 4: - model = UNet1d(in_channels*initial_step, out_channels).to(device) + model = UNet1d(in_channels * initial_step, out_channels).to(device) elif dimensions == 5: - model = UNet2d(in_channels*initial_step, out_channels).to(device) + model = UNet2d(in_channels * initial_step, out_channels).to(device) elif dimensions == 6: - model = UNet3d(in_channels*initial_step, out_channels).to(device) - if training_type in ['single']: + model = UNet3d(in_channels * initial_step, out_channels).to(device) + if training_type in ["single"]: if dimensions == 4: model = UNet1d(in_channels, out_channels).to(device) elif dimensions == 5: model = UNet2d(in_channels, out_channels).to(device) elif dimensions == 6: model = UNet3d(in_channels, out_channels).to(device) - + # Set maximum time step of the data to train - if t_train > _data.shape[-2]: - t_train = _data.shape[-2] + t_train = min(t_train, _data.shape[-2]) # Set maximum of unrolled time step for the pushforward trick if t_train - unroll_step < 1: unroll_step = t_train - 1 - if training_type in ['autoregressive']: + if training_type in ["autoregressive"]: if ar_mode: if pushforward: - model_name = model_name + '-PF-' + str(unroll_step) + model_name = model_name + "-PF-" + str(unroll_step) if not pushforward: unroll_step = _data.shape[-2] - model_name = model_name + '-AR' + model_name = model_name + "-AR" else: - model_name = model_name + '-1-step' - + model_name = model_name + "-1-step" + model_path = model_name + ".pt" - + total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) - print(f'Total parameters = {total_params}') - - optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4) - scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=scheduler_step, gamma=scheduler_gamma) - + msg = f"Total parameters = {total_params}" + logging.info(msg) + + optimizer = torch.optim.Adam( + model.parameters(), lr=learning_rate, weight_decay=1e-4 + ) + scheduler = torch.optim.lr_scheduler.StepLR( + optimizer, step_size=scheduler_step, gamma=scheduler_gamma + ) + loss_fn = nn.MSELoss(reduction="mean") - loss_val_min = np.infty - + loss_val_min = np.inf + start_epoch = 0 if not if_training: checkpoint = torch.load(model_path, map_location=device) - model.load_state_dict(checkpoint['model_state_dict']) + model.load_state_dict(checkpoint["model_state_dict"]) model.to(device) model.eval() - Lx, Ly, Lz = 1., 1., 1. - errs = metrics(val_loader, model, Lx, Ly, Lz, plot, channel_plot, - model_name, x_min, x_max, y_min, y_max, - t_min, t_max, mode='Unet', initial_step=initial_step) - pickle.dump(errs, open(model_name+'.pickle', "wb")) - + Lx, Ly, Lz = 1.0, 1.0, 1.0 + errs = metrics( + val_loader, + model, + Lx, + Ly, + Lz, + plot, + channel_plot, + model_name, + x_min, + x_max, + y_min, + y_max, + t_min, + t_max, + mode="Unet", + initial_step=initial_step, + ) + pickle.dump(errs, Path.open(model_name + ".pickle", "wb")) + return # If desired, restore the network by loading the weights saved in the .pt # file if continue_training: - print('Restoring model (that is the network\'s weights) from file...') + msg = "Restoring model (that is the network's weights) from file..." + logging.info(msg) checkpoint = torch.load(model_path, map_location=device) - model.load_state_dict(checkpoint['model_state_dict']) + model.load_state_dict(checkpoint["model_state_dict"]) model.to(device) model.train() - + # Load optimizer state dict - optimizer.load_state_dict(checkpoint['optimizer_state_dict']) + optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) for state in optimizer.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.to(device) - - start_epoch = checkpoint['epoch'] - loss_val_min = checkpoint['loss'] - print('start training...') - + start_epoch = checkpoint["epoch"] + loss_val_min = checkpoint["loss"] + + msg = "start training..." + logging.info(msg) + if ar_mode: - for ep in range(start_epoch, epochs): model.train() t1 = default_timer() train_l2_step = 0 train_l2_full = 0 - + for xx, yy in train_loader: loss = 0 - + # xx: input tensor (first few time steps) [b, x1, ..., xd, t_init, v] # yy: target tensor [b, x1, ..., xd, t, v] # grid: meshgrid [b, x1, ..., xd, dims] - xx = xx.to(device) - yy = yy.to(device) - - if training_type in ['autoregressive']: + xx_tensor = xx.to(device) + yy_tensor = yy.to(device) + if training_type in ["autoregressive"]: # Initialize the prediction tensor pred = yy[..., :initial_step, :] - + # Extract shape of the input tensor for reshaping (i.e. stacking the # time and channels dimension together) - inp_shape = list(xx.shape) + inp_shape = list(xx_tensor.shape) inp_shape = inp_shape[:-2] inp_shape.append(-1) - + # Autoregressive loop for t in range(initial_step, t_train): - - if t < t_train-unroll_step: + if t < t_train - unroll_step: with torch.no_grad(): # Reshape input tensor into [b, x1, ..., xd, t_init*v] - inp = xx.reshape(inp_shape) + inp = xx_tensor.reshape(inp_shape) temp_shape = [0, -1] - temp_shape.extend([i for i in range(1,len(inp.shape)-1)]) + temp_shape.extend(list(range(1, len(inp.shape) - 1))) inp = inp.permute(temp_shape) - + # Extract target at current time step - y = yy[..., t:t+1, :] - + y = yy_tensor[..., t : t + 1, :] + # Model run temp_shape = [0] - temp_shape.extend([i for i in range(2,len(inp.shape))]) + temp_shape.extend(list(range(2, len(inp.shape)))) temp_shape.append(1) im = model(inp).permute(temp_shape).unsqueeze(-2) - + # Concatenate the prediction at current time step into the # prediction tensor pred = torch.cat((pred, im), -2) - + # Concatenate the prediction at the current time step to be used # as input for the next time step - xx = torch.cat((xx[..., 1:, :], im), dim=-2) - + xx_tensor = torch.cat( + (xx_tensor[..., 1:, :], im), dim=-2 + ) + else: # Reshape input tensor into [b, x1, ..., xd, t_init*v] - inp = xx.reshape(inp_shape) + inp = xx_tensor.reshape(inp_shape) temp_shape = [0, -1] - temp_shape.extend([i for i in range(1,len(inp.shape)-1)]) + temp_shape.extend(list(range(1, len(inp.shape) - 1))) inp = inp.permute(temp_shape) - + # Extract target at current time step - y = yy[..., t:t+1, :] - + y = yy_tensor[..., t : t + 1, :] + # Model run temp_shape = [0] - temp_shape.extend([i for i in range(2,len(inp.shape))]) + temp_shape.extend(list(range(2, len(inp.shape)))) temp_shape.append(1) im = model(inp).permute(temp_shape).unsqueeze(-2) - + # Loss calculation - loss += loss_fn(im.reshape(batch_size, -1), y.reshape(batch_size, -1)) - + loss += loss_fn( + im.reshape(batch_size, -1), y.reshape(batch_size, -1) + ) + # Concatenate the prediction at current time step into the # prediction tensor pred = torch.cat((pred, im), -2) - + # Concatenate the prediction at the current time step to be used # as input for the next time step - xx = torch.cat((xx[..., 1:, :], im), dim=-2) - + xx_tensor = torch.cat((xx_tensor[..., 1:, :], im), dim=-2) + train_l2_step += loss.item() - _batch = yy.size(0) - _yy = yy[..., :t_train, :] + _batch = yy_tensor.size(0) + _yy = yy_tensor[..., :t_train, :] l2_full = loss_fn(pred.reshape(_batch, -1), _yy.reshape(_batch, -1)) train_l2_full += l2_full.item() - + optimizer.zero_grad() loss.backward() optimizer.step() - if training_type in ['single']: - x = xx[..., 0 , :] - y = yy[..., t_train-1:t_train, :] + if training_type in ["single"]: + x = xx[..., 0, :] + y = yy[..., t_train - 1 : t_train, :] pred = model(x.permute([0, 2, 1])).permute([0, 2, 1]) _batch = yy.size(0) loss += loss_fn(pred.reshape(_batch, -1), y.reshape(_batch, -1)) - + train_l2_step += loss.item() train_l2_full += loss.item() - + optimizer.zero_grad() loss.backward() optimizer.step() - if ep % model_update == 0: val_l2_step = 0 val_l2_full = 0 with torch.no_grad(): for xx, yy in val_loader: loss = 0 - xx = xx.to(device) - yy = yy.to(device) - - if training_type in ['autoregressive']: - pred = yy[..., :initial_step, :] + xx_tensor = xx.to(device) + yy_tensor = yy.to(device) + + if training_type in ["autoregressive"]: + pred = yy_tensor[..., :initial_step, :] inp_shape = list(xx.shape) inp_shape = inp_shape[:-2] inp_shape.append(-1) - + for t in range(initial_step, t_train): - inp = xx.reshape(inp_shape) + inp = xx_tensor.reshape(inp_shape) temp_shape = [0, -1] - temp_shape.extend([i for i in range(1,len(inp.shape)-1)]) + temp_shape.extend(list(range(1, len(inp.shape) - 1))) inp = inp.permute(temp_shape) - y = yy[..., t:t+1, :] + y = yy_tensor[..., t : t + 1, :] temp_shape = [0] - temp_shape.extend([i for i in range(2,len(inp.shape))]) + temp_shape.extend(list(range(2, len(inp.shape)))) temp_shape.append(1) im = model(inp).permute(temp_shape).unsqueeze(-2) - loss += loss_fn(im.reshape(batch_size, -1), y.reshape(batch_size, -1)) - + loss += loss_fn( + im.reshape(batch_size, -1), + y.reshape(batch_size, -1), + ) + pred = torch.cat((pred, im), -2) - - xx = torch.cat((xx[..., 1:, :], im), dim=-2) - + + xx_tensor = torch.cat( + (xx_tensor[..., 1:, :], im), dim=-2 + ) + val_l2_step += loss.item() _batch = yy.size(0) _pred = pred[..., initial_step:t_train, :] - _yy = yy[..., initial_step:t_train, :] - val_l2_full += loss_fn(_pred.reshape(_batch, -1), _yy.reshape(_batch, -1)).item() - - if training_type in ['single']: - x = xx[..., 0 , :] - y = yy[..., t_train-1:t_train, :] + _yy = yy_tensor[..., initial_step:t_train, :] + val_l2_full += loss_fn( + _pred.reshape(_batch, -1), _yy.reshape(_batch, -1) + ).item() + + if training_type in ["single"]: + x = xx[..., 0, :] + y = yy[..., t_train - 1 : t_train, :] pred = model(x.permute([0, 2, 1])).permute([0, 2, 1]) _batch = yy.size(0) loss += loss_fn(pred.reshape(_batch, -1), y.reshape(_batch, -1)) - + val_l2_step += loss.item() val_l2_full += loss.item() - if val_l2_full < loss_val_min: + if val_l2_full < loss_val_min: loss_val_min = val_l2_full - torch.save({ - 'epoch': ep, - 'model_state_dict': model.state_dict(), - 'optimizer_state_dict': optimizer.state_dict(), - 'loss': loss_val_min - }, model_path) - + torch.save( + { + "epoch": ep, + "model_state_dict": model.state_dict(), + "optimizer_state_dict": optimizer.state_dict(), + "loss": loss_val_min, + }, + model_path, + ) + t2 = default_timer() scheduler.step() - print('epoch: {0}, loss: {1:.5f}, t2-t1: {2:.5f}, trainL2: {3:.5f}, testL2: {4:.5f}'\ - .format(ep, loss.item(), t2 - t1, train_l2_step, val_l2_step)) + msg = f"epoch: {ep}, loss: {loss.item():.5f}, t2-t1: {t2 - t1:.5f}, trainL2: {train_l2_step:.5f}, testL2: {val_l2_step:.5f}" + logging.info(msg) else: for ep in range(start_epoch, epochs): @@ -363,115 +404,125 @@ def run_training(if_training, t1 = default_timer() train_l2_step = 0 train_l2_full = 0 - + for xx, yy in train_loader: loss = 0 - + # xx: input tensor (first few time steps) [b, x1, ..., xd, t_init, v] # yy: target tensor [b, x1, ..., xd, t, v] - xx = xx.to(device) - yy = yy.to(device) - + xx_tensor = xx.to(device) + yy_tensor = yy.to(device) + # Initialize the prediction tensor - pred = yy[..., :initial_step, :] - + pred = yy_tensor[..., :initial_step, :] + # Extract shape of the input tensor for reshaping (i.e. stacking the # time and channels dimension together) - inp_shape = list(xx.shape) + inp_shape = list(xx_tensor.shape) inp_shape = inp_shape[:-2] inp_shape.append(-1) - + # Autoregressive loop for t in range(initial_step, t_train): - # Reshape input tensor into [b, x1, ..., xd, t_init*v] - inp = yy[..., t-initial_step:t, :].reshape(inp_shape) + inp = yy_tensor[..., t - initial_step : t, :].reshape(inp_shape) temp_shape = [0, -1] - temp_shape.extend([i for i in range(1,len(inp.shape)-1)]) + temp_shape.extend(list(range(1, len(inp.shape) - 1))) inp = inp.permute(temp_shape) inp = torch.normal(inp, 0.001) - + # Extract target at current time step - y = yy[..., t:t+1, :] - + y = yy_tensor[..., t : t + 1, :] + # Model run temp_shape = [0] - temp_shape.extend([i for i in range(2,len(inp.shape))]) + temp_shape.extend(list(range(2, len(inp.shape)))) temp_shape.append(1) im = model(inp).permute(temp_shape).unsqueeze(-2) - + # Loss calculation - loss += loss_fn(im.reshape(batch_size, -1), y.reshape(batch_size, -1)) - + loss += loss_fn( + im.reshape(batch_size, -1), y.reshape(batch_size, -1) + ) + # Concatenate the prediction at current time step into the # prediction tensor pred = torch.cat((pred, im), -2) - + # Concatenate the prediction at the current time step to be used # as input for the next time step # xx = torch.cat((xx[..., 1:, :], im), dim=-2) - + train_l2_step += loss.item() _batch = yy.size(0) - _yy = yy[..., :t_train, :] # if t_train is not -1 + _yy = yy_tensor[..., :t_train, :] # if t_train is not -1 l2_full = loss_fn(pred.reshape(_batch, -1), _yy.reshape(_batch, -1)) train_l2_full += l2_full.item() - + optimizer.zero_grad() loss.backward() optimizer.step() - + if ep % model_update == 0 or ep == epochs: val_l2_step = 0 val_l2_full = 0 with torch.no_grad(): for xx, yy in val_loader: loss = 0 - xx = xx.to(device) - yy = yy.to(device) - - pred = yy[..., :initial_step, :] - inp_shape = list(xx.shape) + xx_tensor = xx.to(device) + yy_tensor = yy.to(device) + + pred = yy_tensor[..., :initial_step, :] + inp_shape = list(xx_tensor.shape) inp_shape = inp_shape[:-2] inp_shape.append(-1) - + for t in range(initial_step, t_train): - inp = yy[..., t-initial_step:t, :].reshape(inp_shape) + inp = yy_tensor[..., t - initial_step : t, :].reshape( + inp_shape + ) temp_shape = [0, -1] - temp_shape.extend([i for i in range(1,len(inp.shape)-1)]) + temp_shape.extend(list(range(1, len(inp.shape) - 1))) inp = inp.permute(temp_shape) - y = yy[..., t:t+1, :] + y = yy_tensor[..., t : t + 1, :] temp_shape = [0] - temp_shape.extend([i for i in range(2,len(inp.shape))]) + temp_shape.extend(list(range(2, len(inp.shape)))) temp_shape.append(1) im = model(inp).permute(temp_shape).unsqueeze(-2) - loss += loss_fn(im.reshape(batch_size, -1), y.reshape(batch_size, -1)) - + loss += loss_fn( + im.reshape(batch_size, -1), y.reshape(batch_size, -1) + ) + pred = torch.cat((pred, im), -2) - + val_l2_step += loss.item() _batch = yy.size(0) _pred = pred[..., initial_step:t_train, :] - _yy = yy[..., initial_step:t_train, :] # if t_train is not -1 - val_l2_full += loss_fn(_pred.reshape(_batch, -1), _yy.reshape(_batch, -1)).item() - - if val_l2_full < loss_val_min: + # if t_train is not -1 + _yy = yy_tensor[..., initial_step:t_train, :] + val_l2_full += loss_fn( + _pred.reshape(_batch, -1), _yy.reshape(_batch, -1) + ).item() + + if val_l2_full < loss_val_min: loss_val_min = val_l2_full - torch.save({ - 'epoch': ep, - 'model_state_dict': model.state_dict(), - 'optimizer_state_dict': optimizer.state_dict(), - 'loss': loss_val_min - }, model_path) - - + torch.save( + { + "epoch": ep, + "model_state_dict": model.state_dict(), + "optimizer_state_dict": optimizer.state_dict(), + "loss": loss_val_min, + }, + model_path, + ) + t2 = default_timer() scheduler.step() - print('epoch: {0}, loss: {1:.5f}, t2-t1: {2:.5f}, trainL2: {3:.5f}, testL2: {4:.5f}'\ - .format(ep, loss.item(), t2 - t1, train_l2_step, val_l2_step)) + msg = f"epoch: {ep}, loss: {loss.item():.5f}, t2-t1: {t2 - t1:.5f}, trainL2: {train_l2_step:.5f}, testL2: {val_l2_step:.5f}" + logging.info(msg) if __name__ == "__main__": - run_training() - print("Done.") \ No newline at end of file + msg = "Done." + logging.info(msg) diff --git a/pdebench/models/unet/unet.py b/pdebench/models/unet/unet.py index 7bd828b..f06e7e1 100644 --- a/pdebench/models/unet/unet.py +++ b/pdebench/models/unet/unet.py @@ -18,14 +18,15 @@ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. """ +from __future__ import annotations + from collections import OrderedDict import torch -import torch.nn as nn +from torch import nn class UNet1d(nn.Module): - def __init__(self, in_channels=3, out_channels=1, init_features=32): super(UNet1d, self).__init__() @@ -119,7 +120,6 @@ def _block(in_channels, features, name): class UNet2d(nn.Module): - def __init__(self, in_channels=3, out_channels=1, init_features=32): super(UNet2d, self).__init__() @@ -210,10 +210,9 @@ def _block(in_channels, features, name): ] ) ) - -class UNet3d(nn.Module): +class UNet3d(nn.Module): def __init__(self, in_channels=3, out_channels=1, init_features=32): super(UNet3d, self).__init__() @@ -303,4 +302,4 @@ def _block(in_channels, features, name): (name + "tanh2", nn.Tanh()), ] ) - ) \ No newline at end of file + ) diff --git a/pdebench/models/unet/utils.py b/pdebench/models/unet/utils.py index d38c242..d8c61cf 100644 --- a/pdebench/models/unet/utils.py +++ b/pdebench/models/unet/utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ @@ -147,165 +146,252 @@ THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY. """ +from __future__ import annotations -import torch -from torch.utils.data import Dataset -from torch.utils.data import DataLoader +import math as mt import os -import glob + import h5py import numpy as np -import math as mt +import torch +from torch.utils.data import Dataset + class UNetDatasetSingle(Dataset): - def __init__(self, filename, - initial_step=10, - saved_folder='../data/', - reduced_resolution=1, - reduced_resolution_t=1, - reduced_batch=1, - if_test=False, - test_ratio=0.1, - num_samples_max = -1): + def __init__( + self, + filename, + initial_step=10, + saved_folder="../data/", + reduced_resolution=1, + reduced_resolution_t=1, + reduced_batch=1, + if_test=False, + test_ratio=0.1, + num_samples_max=-1, + ): """ - + :param filename: filename that contains the dataset :type filename: STR :param filenum: array containing indices of filename included in the dataset :type filenum: ARRAY """ - + # Define path to files root_path = os.path.abspath(saved_folder + filename) - assert filename[-2:] != 'h5', 'HDF5 data is assumed!!' - - with h5py.File(root_path, 'r') as f: + assert filename[-2:] != "h5", "HDF5 data is assumed!!" + + with h5py.File(root_path, "r") as f: keys = list(f.keys()) keys.sort() - if 'tensor' not in keys: - _data = np.array(f['density'], dtype=np.float32) # batch, time, x,... + if "tensor" not in keys: + _data = np.array(f["density"], dtype=np.float32) # batch, time, x,... idx_cfd = _data.shape - if len(idx_cfd)==3: # 1D - self.data = np.zeros([idx_cfd[0]//reduced_batch, - idx_cfd[2]//reduced_resolution, - mt.ceil(idx_cfd[1]/reduced_resolution_t), - 3], - dtype=np.float32) - #density - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution] + if len(idx_cfd) == 3: # 1D + self.data = np.zeros( + [ + idx_cfd[0] // reduced_batch, + idx_cfd[2] // reduced_resolution, + mt.ceil(idx_cfd[1] / reduced_resolution_t), + 3, + ], + dtype=np.float32, + ) + # density + _data = _data[ + ::reduced_batch, ::reduced_resolution_t, ::reduced_resolution + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data[:, :, :], (0, 2, 1)) - self.data[...,0] = _data # batch, x, t, ch + self.data[..., 0] = _data # batch, x, t, ch # pressure - _data = np.array(f['pressure'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution] + _data = np.array( + f["pressure"], dtype=np.float32 + ) # batch, time, x,... + _data = _data[ + ::reduced_batch, ::reduced_resolution_t, ::reduced_resolution + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data[:, :, :], (0, 2, 1)) - self.data[...,1] = _data # batch, x, t, ch + self.data[..., 1] = _data # batch, x, t, ch # Vx - _data = np.array(f['Vx'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution] + _data = np.array(f["Vx"], dtype=np.float32) # batch, time, x,... + _data = _data[ + ::reduced_batch, ::reduced_resolution_t, ::reduced_resolution + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data[:, :, :], (0, 2, 1)) - self.data[...,2] = _data # batch, x, t, ch - - if len(idx_cfd)==4: # 2D - self.data = np.zeros([idx_cfd[0]//reduced_batch, - idx_cfd[2]//reduced_resolution, - idx_cfd[3]//reduced_resolution, - mt.ceil(idx_cfd[1]/reduced_resolution_t), - 4], - dtype=np.float32) + self.data[..., 2] = _data # batch, x, t, ch + + if len(idx_cfd) == 4: # 2D + self.data = np.zeros( + [ + idx_cfd[0] // reduced_batch, + idx_cfd[2] // reduced_resolution, + idx_cfd[3] // reduced_resolution, + mt.ceil(idx_cfd[1] / reduced_resolution_t), + 4, + ], + dtype=np.float32, + ) # density - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution] + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 1)) - self.data[...,0] = _data # batch, x, t, ch + self.data[..., 0] = _data # batch, x, t, ch # pressure - _data = np.array(f['pressure'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution] + _data = np.array( + f["pressure"], dtype=np.float32 + ) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 1)) - self.data[...,1] = _data # batch, x, t, ch + self.data[..., 1] = _data # batch, x, t, ch # Vx - _data = np.array(f['Vx'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution] + _data = np.array(f["Vx"], dtype=np.float32) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 1)) - self.data[...,2] = _data # batch, x, t, ch + self.data[..., 2] = _data # batch, x, t, ch # Vy - _data = np.array(f['Vy'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution] + _data = np.array(f["Vy"], dtype=np.float32) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 1)) - self.data[...,3] = _data # batch, x, t, ch - - if len(idx_cfd)==5: # 3D - self.data = np.zeros([idx_cfd[0]//reduced_batch, - idx_cfd[2]//reduced_resolution, - idx_cfd[3]//reduced_resolution, - idx_cfd[4]//reduced_resolution, - mt.ceil(idx_cfd[1]/reduced_resolution_t), - 5], - dtype=np.float32) + self.data[..., 3] = _data # batch, x, t, ch + + if len(idx_cfd) == 5: # 3D + self.data = np.zeros( + [ + idx_cfd[0] // reduced_batch, + idx_cfd[2] // reduced_resolution, + idx_cfd[3] // reduced_resolution, + idx_cfd[4] // reduced_resolution, + mt.ceil(idx_cfd[1] / reduced_resolution_t), + 5, + ], + dtype=np.float32, + ) # density - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution,::reduced_resolution] + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 4, 1)) - self.data[...,0] = _data # batch, x, t, ch + self.data[..., 0] = _data # batch, x, t, ch # pressure - _data = np.array(f['pressure'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution,::reduced_resolution] + _data = np.array( + f["pressure"], dtype=np.float32 + ) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 4, 1)) - self.data[...,1] = _data # batch, x, t, ch + self.data[..., 1] = _data # batch, x, t, ch # Vx - _data = np.array(f['Vx'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution,::reduced_resolution] + _data = np.array(f["Vx"], dtype=np.float32) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 4, 1)) - self.data[...,2] = _data # batch, x, t, ch + self.data[..., 2] = _data # batch, x, t, ch # Vy - _data = np.array(f['Vy'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution,::reduced_resolution] + _data = np.array(f["Vy"], dtype=np.float32) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 4, 1)) - self.data[...,3] = _data # batch, x, t, ch + self.data[..., 3] = _data # batch, x, t, ch # Vz - _data = np.array(f['Vz'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution,::reduced_resolution,::reduced_resolution] + _data = np.array(f["Vz"], dtype=np.float32) # batch, time, x,... + _data = _data[ + ::reduced_batch, + ::reduced_resolution_t, + ::reduced_resolution, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data, (0, 2, 3, 4, 1)) - self.data[...,4] = _data # batch, x, t, ch + self.data[..., 4] = _data # batch, x, t, ch else: # scalar equations ## data dim = [t, x1, ..., xd, v] - _data = np.array(f['tensor'], dtype=np.float32) # batch, time, x,... + _data = np.array(f["tensor"], dtype=np.float32) # batch, time, x,... if len(_data.shape) == 3: # 1D - _data = _data[::reduced_batch,::reduced_resolution_t,::reduced_resolution] + _data = _data[ + ::reduced_batch, ::reduced_resolution_t, ::reduced_resolution + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data[:, :, :], (0, 2, 1)) self.data = _data[:, :, :, None] # batch, x, t, ch if len(_data.shape) == 4: # 2D Darcy flow # u: label - _data = _data[::reduced_batch,:,::reduced_resolution,::reduced_resolution] + _data = _data[ + ::reduced_batch, :, ::reduced_resolution, ::reduced_resolution + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data[:, :, :, :], (0, 2, 3, 1)) - #if _data.shape[-1]==1: # if nt==1 + # if _data.shape[-1]==1: # if nt==1 # _data = np.tile(_data, (1, 1, 1, 2)) self.data = _data # nu: input - _data = np.array(f['nu'], dtype=np.float32) # batch, time, x,... - _data = _data[::reduced_batch, None,::reduced_resolution,::reduced_resolution] + _data = np.array(f["nu"], dtype=np.float32) # batch, time, x,... + _data = _data[ + ::reduced_batch, + None, + ::reduced_resolution, + ::reduced_resolution, + ] ## convert to [x1, ..., xd, t, v] _data = np.transpose(_data[:, :, :, :], (0, 2, 3, 1)) self.data = np.concatenate([_data, self.data], axis=-1) self.data = self.data[:, :, :, :, None] # batch, x, y, t, ch - if num_samples_max>0: - num_samples_max = min(num_samples_max,self.data.shape[0]) + if num_samples_max > 0: + num_samples_max = min(num_samples_max, self.data.shape[0]) else: num_samples_max = self.data.shape[0] @@ -317,29 +403,30 @@ def __init__(self, filename, # Time steps used as initial conditions self.initial_step = initial_step - - self.data = torch.tensor(self.data) + self.data = torch.tensor(self.data) def __len__(self): return len(self.data) - + def __getitem__(self, idx): - - return self.data[idx,...,:self.initial_step,:], self.data[idx] - + return self.data[idx, ..., : self.initial_step, :], self.data[idx] + class UNetDatasetMult(Dataset): - def __init__(self, filename, - initial_step=10, - saved_folder='../data/', - reduced_resolution=1, - reduced_resolution_t=1, - reduced_batch=1, - if_test=False, test_ratio=0.1 - ): + def __init__( + self, + filename, + initial_step=10, + saved_folder="../data/", + reduced_resolution=1, + reduced_resolution_t=1, + reduced_batch=1, + if_test=False, + test_ratio=0.1, + ): """ - + :param filename: filename that contains the dataset :type filename: STR :param filenum: array containing indices of filename included in the dataset @@ -348,39 +435,38 @@ def __init__(self, filename, :type initial_step: INT, optional """ - + # Define path to files self.file_path = os.path.abspath(saved_folder + filename + ".h5") - + # Extract list of seeds - with h5py.File(self.file_path, 'r') as h5_file: + with h5py.File(self.file_path, "r") as h5_file: data_list = sorted(h5_file.keys()) - test_idx = int(len(data_list) * (1-test_ratio)) + test_idx = int(len(data_list) * (1 - test_ratio)) if if_test: self.data_list = np.array(data_list[test_idx:]) else: self.data_list = np.array(data_list[:test_idx]) - + # Time steps used as initial conditions self.initial_step = initial_step def __len__(self): return len(self.data_list) - + def __getitem__(self, idx): - # Open file and read data - with h5py.File(self.file_path, 'r') as h5_file: + with h5py.File(self.file_path, "r") as h5_file: seed_group = h5_file[self.data_list[idx]] - + # data dim = [t, x1, ..., xd, v] - data = np.array(seed_group["data"], dtype='f') + data = np.array(seed_group["data"], dtype="f") data = torch.tensor(data, dtype=torch.float) - + # convert to [x1, ..., xd, t, v] - permute_idx = list(range(1,len(data.shape)-1)) + permute_idx = list(range(1, len(data.shape) - 1)) permute_idx.extend(list([0, -1])) data = data.permute(permute_idx) - - return data[...,:self.initial_step,:], data \ No newline at end of file + + return data[..., : self.initial_step, :], data diff --git a/pyproject.toml b/pyproject.toml index 3e1d3f1..9ff674d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [build-system] -requires = ["setuptools"] -build-backend = "setuptools.build_meta" +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" [project] requires-python = ">=3.9,<3.11" @@ -19,26 +19,23 @@ authors = [ ] license = {file = "LICENSE.txt"} dependencies = [ - "scipy", - "matplotlib", - "h5py", - "pandas", - "python-dotenv", - "hydra-core", + "scipy", + "numpy<2", + "matplotlib", + "h5py", + "pandas", + "python-dotenv", + "hydra-core", "torch~=1.13.0", - "torchvision~=0.14.1", - "deepxde~=1.1.3", - "pyro-ppl", + "torchvision~=0.14.1", + "deepxde~=1.1.3", + "pyro-ppl", "tqdm", ] -[project.urls] -Homepage = "https://github.com/pdebench/PDEBenchm" -Documentation = "https://github.com/pdebench/PDEBench" -Repository = "https://github.com/pdebench/PDEBench" - [project.optional-dependencies] datagen310 = [ + "six", "clawpack@git+https://github.com/clawpack/clawpack.git@d619d6835ce128a0421aa52d70d2a6c9d9d1ce93", "dash", "phiflow", @@ -50,6 +47,7 @@ datagen310 = [ "jaxlib @ https://storage.googleapis.com/jax-releases/cuda11/jaxlib-0.4.11+cuda11.cudnn86-cp310-cp310-manylinux2014_x86_64.whl", ] datagen39 = [ + "six", "clawpack@git+https://github.com/clawpack/clawpack.git@d619d6835ce128a0421aa52d70d2a6c9d9d1ce93", "dash", "phiflow", @@ -61,5 +59,144 @@ datagen39 = [ "jaxlib @ https://storage.googleapis.com/jax-releases/cuda11/jaxlib-0.4.11+cuda11.cudnn86-cp39-cp39-manylinux2014_x86_64.whl" ] +test = [ + "pytest >=6", + "pytest-cov >=3", + "jax", # cpu only + "nox" +] + +docs = [ + "sphinx>=7.0", + "myst_parser>=0.13", + "sphinx_copybutton", + "sphinx_autodoc_typehints", + "furo>=2023.08.17" +] + +dev = ["anybadge", + "ruff", + "pytest", + "pytest-coverage", + "pytest-mypy", + "hatchling", + "nox", + "pre-commit"] + +[project.urls] +Homepage = "https://github.com/pdebench/PDEBenchm" +Documentation = "https://github.com/pdebench/PDEBench" +Repository = "https://github.com/pdebench/PDEBench" + +[tool.hatch] +version.source = "vcs" +build.hooks.vcs.version-file = "pdebench/_version.py" +build.include = [ + "pdebench" +] +metadata.allow-direct-references = true + +[tool.hatch.envs.default] +features = ["test"] +scripts.test = "pytest {args}" + + +[tool.pytest.ini_options] +minversion = "6.0" +addopts = ["-ra", "--showlocals", "--strict-markers", "--strict-config"] +xfail_strict = true +filterwarnings = [ + "error", +] +log_cli_level = "INFO" +testpaths = [ + "tests", +] + + +[tool.coverage] +run.source = ["pdebench"] +report.exclude_also = [ + '\.\.\.', + 'if typing.TYPE_CHECKING:', +] + +[tool.mypy] +files = ["pdebench", "tests"] +python_version = "3.8" +warn_unused_configs = true +strict = true +enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"] +warn_unreachable = true +disallow_untyped_defs = false +disallow_incomplete_defs = false +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "pdebench.*" +disallow_untyped_defs = true +disallow_incomplete_defs = true + +[tool.ruff] +src = ["pdebench"] + +[tool.ruff.format] +exclude = ["*.pyi"] + +[tool.ruff.lint] +extend-select = [ + "B", # flake8-bugbear + "I", # isort + "ARG", # flake8-unused-arguments + "C4", # flake8-comprehensions + "EM", # flake8-errmsg + "ICN", # flake8-import-conventions + "G", # flake8-logging-format + "PGH", # pygrep-hooks + "PIE", # flake8-pie + "PL", # pylint + "PT", # flake8-pytest-style + "PTH", # flake8-use-pathlib + "RET", # flake8-return + "RUF", # Ruff-specific + "SIM", # flake8-simplify + "T20", # flake8-print + "UP", # pyupgrade + "YTT", # flake8-2020 + "EXE", # flake8-executable + "NPY", # NumPy specific rules + "PD", # pandas-vet +] +ignore = [ + "PLR09", # Too many <...> + "PLR2004", # Magic value used in comparison + "ISC001", # Conflicts with formatter + "UP007" +] +isort.required-imports = ["from __future__ import annotations"] +# Uncomment if using a _compat.typing backport +# typing-modules = ["pdebench._compat.typing"] + +[tool.ruff.lint.per-file-ignores] +"tests/**" = ["T20"] +"noxfile.py" = ["T20"] + + +[tool.pylint] +py-version = "3.8" +ignore-paths = [".*/_version.py"] +reports.output-format = "colorized" +similarities.ignore-imports = "yes" +messages_control.disable = [ + "design", + "fixme", + "line-too-long", + "missing-module-docstring", + "wrong-import-position", +] + [tool.setuptools.dynamic] -readme = {file = ["README.md"], content-type = "text/markdown"} \ No newline at end of file +readme = {file = ["README.md"], content-type = "text/markdown"} + +[project.scripts] +velocity2vorticity = "pdebench.data_gen.velocity2vorticity:convert_velocity" diff --git a/tests/test_vorticity.py b/tests/test_vorticity.py new file mode 100644 index 0000000..b984b20 --- /dev/null +++ b/tests/test_vorticity.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +import jax.numpy as jnp +import numpy as np +import pytest +from pdebench.data_gen.src.vorticity import ( + compute_spectral_vorticity_jnp, + compute_spectral_vorticity_np, +) + + +@pytest.fixture() +def generate_random_spectral_velvor() -> tuple[np.ndarray, np.ndarray]: + """Generate random 5D velocity- and corresponding vorticity field + + :return: Velocity- and vorticity field + :rtype: tuple[np.ndarray, np.ndarray] + """ + generator = np.random.default_rng(seed=None) + vel = generator.uniform(size=(10, 16, 32, 32, 3)) + vx = vel[..., 0] + vy = vel[..., 1] + vz = vel[..., 2] + + fxy = np.fft.fft(vx, axis=2) + fxz = np.fft.fft(vx, axis=3) + fyx = np.fft.fft(vy, axis=1) + fyz = np.fft.fft(vy, axis=3) + fzx = np.fft.fft(vz, axis=1) + fzy = np.fft.fft(vz, axis=2) + + kappa_xy = 2.0 * np.pi * np.fft.fftfreq(vel.shape[2], 1.0 / vel.shape[2]) + kappa_xz = 2.0 * np.pi * np.fft.fftfreq(vel.shape[3], 1.0 / vel.shape[3]) + kappa_yx = 2.0 * np.pi * np.fft.fftfreq(vel.shape[1], 1.0 / vel.shape[1]) + kappa_yz = 2.0 * np.pi * np.fft.fftfreq(vel.shape[3], 1.0 / vel.shape[3]) + kappa_zx = 2.0 * np.pi * np.fft.fftfreq(vel.shape[1], 1.0 / vel.shape[1]) + kappa_zy = 2.0 * np.pi * np.fft.fftfreq(vel.shape[2], 1.0 / vel.shape[2]) + + vxy = np.fft.ifft(1j * kappa_xy[None, None, :, None] * fxy, axis=2).real + vyx = np.fft.ifft(1j * kappa_yx[None, :, None, None] * fyx, axis=1).real + vxz = np.fft.ifft(1j * kappa_xz[None, None, None, :] * fxz, axis=3).real + vzx = np.fft.ifft(1j * kappa_zx[None, :, None, None] * fzx, axis=1).real + vyz = np.fft.ifft(1j * kappa_yz[None, None, None, :] * fyz, axis=3).real + vzy = np.fft.ifft(1j * kappa_zy[None, None, :, None] * fzy, axis=2).real + + omegax = vzy - vyz + omegay = vxz - vzx + omegaz = vyx - vxy + + omega = np.concatenate( + [omegax[..., None], omegay[..., None], omegaz[..., None]], axis=-1 + ) + + return vel, omega + + +def test_vorticity_np(generate_random_spectral_velvor) -> None: + """Test approximated vorticity by spectral derivation""" + vel, vort = generate_random_spectral_velvor + dx = 1.0 / vel.shape[1] + dy = 1.0 / vel.shape[2] + dz = 1.0 / vel.shape[3] + + vort_np = compute_spectral_vorticity_np(vel, dx, dy, dz) + np.testing.assert_almost_equal(vort_np, vort) + + +def test_vorticity_jnp(generate_random_spectral_velvor) -> None: + """Test approximated vorticity by spectral derivation""" + vel, vort = generate_random_spectral_velvor + dx = 1.0 / vel.shape[1] + dy = 1.0 / vel.shape[2] + dz = 1.0 / vel.shape[3] + + vort_jnp = compute_spectral_vorticity_jnp(jnp.array(vel), dx, dy, dz) + np.testing.assert_almost_equal(np.array(vort_jnp), vort, decimal=4)