From 9225f15824fa78de87f68a97b839fcf3a78adc3c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 18 Jul 2023 17:39:27 -0600 Subject: [PATCH] Update develop-ref after #2248 and #2254 (#2255) Co-authored-by: johnhg Co-authored-by: Julie Prestopnik Co-authored-by: cristianastan2 Co-authored-by: John Halley Gotway Co-authored-by: bikegeek Co-authored-by: Lisa Goodrich Co-authored-by: Julie Prestopnik Co-authored-by: George McCabe <23407799+georgemccabe@users.noreply.github.com> Co-authored-by: Hank Fisher Co-authored-by: Dan Adriaansen Co-authored-by: jprestop Co-authored-by: Tracy Hertneky Co-authored-by: Giovanni Rosa Co-authored-by: j-opatz <59586397+j-opatz@users.noreply.github.com> Co-authored-by: Mrinal Biswas Co-authored-by: j-opatz Co-authored-by: Daniel Adriaansen Co-authored-by: Jonathan Vigh Co-authored-by: bikegeek <3753118+bikegeek@users.noreply.github.com> Co-authored-by: Will Mayfield <59745143+willmayfield@users.noreply.github.com> Co-authored-by: lisagoodrich <33230218+lisagoodrich@users.noreply.github.com> Co-authored-by: metplus-bot <97135045+metplus-bot@users.noreply.github.com> Co-authored-by: Tracy Hertneky <39317287+hertneky@users.noreply.github.com> Co-authored-by: Giovanni Rosa Co-authored-by: mrinalbiswas Co-authored-by: Christina Kalb Co-authored-by: jason-english <73247785+jason-english@users.noreply.github.com> fix Contributor's Guide GitHub Workflow page (#1774) fix release (#1790) fix GitHub Actions warnings (#1864) fix #1884 develop PCPCombine {custom} in subtract method (#1887) fix #1939 develop - failure reading obs when zipped file also exists (#1941) Closes https://github.com/dtcenter/METplus/issues/1986 fix develop Fix broken documentation links (#2004) fix #2026 develop StatAnalysis looping (#2028) fix priority of obs_window config variables so that wrapper-specific version is preferred over generic OBS_WINDOW_BEGIN/END (#2062) fix #2070 var list numeric order (#2072) fix #2087 develop docs_pdf (#2091) fix #2096/#2098 develop - fix skip if output exists and do not error if no commands were run (#2099) Fix for Dockerfile smell DL4000 (#2112) fix #2082 develop regrid.convert/censor_thresh/censor_val (#2140) fix #2082 main_v5.0 regrid.convert/censor_thresh/censor_val (#2101) fix #2137 develop PointStat -obs_valid_beg/end (#2141) fix failured introduced by urllib3 (see https://github.com/urllib3/urllib3/issues/2168) fix #2161 develop PCPCombine additional field arguments in -subtract mode (#2162) fix #2168 develop - StatAnalysis time shift (#2169) fix releases. (#2183) fix #2189 develop - spaces in complex thresholds (#2191) fix #2179 develop TCPairs fix -diag argument (#2187) fixes (#2200) fix diff tests (#2217) fix automated tests (#2237) fix #2235 rename multivar_itensity to multivar_intensity_flag (#2236) fix #2241 Create directory containing -out_stat file (#2242) fix #2245 use unique run ID to name logger instance (#2247) fix #2244 develop fix diff tests (#2254) --- .github/actions/run_tests/entrypoint.sh | 16 +- .github/jobs/run_diff_docker.py | 4 + .github/parm/pytest_groups.txt | 8 - .github/parm/use_case_groups.json | 2 +- docs/Contributors_Guide/add_use_case.rst | 6 + docs/Contributors_Guide/testing.rst | 132 ++++++++---- docs/Users_Guide/quicksearch.rst | 3 + docs/Users_Guide/release-notes.rst | 116 ++++------ docs/Users_Guide/wrappers.rst | 2 +- .../met_tool_wrapper/TCDiag/TCDiag.py | 52 ++++- .../use_cases/met_tool_wrapper/TCGen/TCGen.py | 1 + .../TCMPRPlotter/TCMPRPlotter.py | 1 + .../TCPairs/TCPairs_extra_tropical.py | 1 + .../TCPairs/TCPairs_tropical.py | 1 + .../use_cases/met_tool_wrapper/TCRMW/TCRMW.py | 1 + .../met_tool_wrapper/TCStat/TCStat.py | 1 + ...GFS_obsGFS_FeatureRelative_SeriesByInit.py | 1 + ...GFS_obsGFS_FeatureRelative_SeriesByLead.py | 1 + ...riesByLead_PyEmbed_Multiple_Diagnostics.py | 1 + .../MODEMultivar_fcstHRRR_obsMRMS_HRRRanl.py | 102 +++------ ...otter_fcstGFS_obsGFS_UserScript_ExtraTC.py | 1 + .../GridStat_fcstHAFS_obsTDR_NetCDF.py | 1 + .../Plotter_fcstGFS_obsGFS_ExtraTC.py | 1 + .../Plotter_fcstGFS_obsGFS_RPlotting.py | 1 + .../TCGen_fcstGFS_obsBDECK_2021season.py | 1 + ...at_fcstADECK_obsBDECK_ATCF_BasicExample.py | 1 + .../TCRMW_fcstGFS_fcstOnly_gonzalo.py | 1 + ...CII2NC_PointStat_fcstHAFS_obsFRD_NetCDF.py | 1 + internal/scripts/docker_env/Dockerfile | 5 + .../scripts/docker_env/Dockerfile.cartopy | 5 + internal/scripts/docker_env/README.md | 10 +- .../scripts/{pytest_env.sh => test_env.sh} | 16 +- internal/tests/pytests/conftest.py | 51 ++--- internal/tests/pytests/minimum_pytest.conf | 7 +- .../tests/pytests/minimum_pytest.dakota.sh | 4 - .../tests/pytests/minimum_pytest.docker.sh | 5 - .../tests/pytests/minimum_pytest.eyewall.sh | 5 - internal/tests/pytests/minimum_pytest.hera.sh | 4 - .../tests/pytests/minimum_pytest.kiowa.sh | 5 - .../tests/pytests/minimum_pytest.seneca.sh | 4 - .../tests/pytests/minimum_pytest.venus.sh | 4 - internal/tests/pytests/pytest.ini | 1 + .../pytests/util/diff_util/test_diff_util.py | 155 +++++++++++++ .../grid_stat/test_grid_stat_wrapper.py | 1 + .../series_analysis/test_series_analysis.py | 1 + .../stat_analysis/test_stat_analysis.py | 20 -- .../wrappers/tc_diag/test_tc_diag_wrapper.py | 23 +- .../wrappers/tc_stat/test_tc_stat_wrapper.py | 120 +++++++---- internal/tests/use_cases/all_use_cases.txt | 2 +- metplus/util/config_metplus.py | 12 +- metplus/util/diff_util.py | 203 ++++++++++++------ metplus/wrappers/tc_diag_wrapper.py | 153 +++++++++---- metplus/wrappers/tc_stat_wrapper.py | 38 +++- metplus/wrappers/tcmpr_plotter_wrapper.py | 7 +- parm/met_config/TCDiagConfig_wrapped | 2 +- .../met_tool_wrapper/TCDiag/TCDiag.conf | 54 +++-- 56 files changed, 866 insertions(+), 511 deletions(-) delete mode 100644 .github/parm/pytest_groups.txt rename internal/scripts/docker_env/scripts/{pytest_env.sh => test_env.sh} (72%) delete mode 100644 internal/tests/pytests/minimum_pytest.dakota.sh delete mode 100644 internal/tests/pytests/minimum_pytest.docker.sh delete mode 100644 internal/tests/pytests/minimum_pytest.eyewall.sh delete mode 100644 internal/tests/pytests/minimum_pytest.hera.sh delete mode 100644 internal/tests/pytests/minimum_pytest.kiowa.sh delete mode 100644 internal/tests/pytests/minimum_pytest.seneca.sh delete mode 100644 internal/tests/pytests/minimum_pytest.venus.sh create mode 100644 internal/tests/pytests/util/diff_util/test_diff_util.py diff --git a/.github/actions/run_tests/entrypoint.sh b/.github/actions/run_tests/entrypoint.sh index bd2aa579dc..78ce25e086 100644 --- a/.github/actions/run_tests/entrypoint.sh +++ b/.github/actions/run_tests/entrypoint.sh @@ -8,8 +8,6 @@ WS_PATH=$RUNNER_WORKSPACE/$REPO_NAME # set CI jobs directory variable to easily move it CI_JOBS_DIR=.github/jobs -PYTESTS_GROUPS_FILEPATH=.github/parm/pytest_groups.txt - source ${GITHUB_WORKSPACE}/${CI_JOBS_DIR}/bash_functions.sh # get branch name for push or pull request events @@ -34,7 +32,7 @@ fi # running unit tests (pytests) if [[ "$INPUT_CATEGORIES" == pytests* ]]; then - export METPLUS_ENV_TAG="pytest.v5.1" + export METPLUS_ENV_TAG="test.v5.1" export METPLUS_IMG_TAG=${branch_name} echo METPLUS_ENV_TAG=${METPLUS_ENV_TAG} echo METPLUS_IMG_TAG=${METPLUS_IMG_TAG} @@ -56,15 +54,9 @@ if [[ "$INPUT_CATEGORIES" == pytests* ]]; then . echo Running Pytests - command="export METPLUS_PYTEST_HOST=docker; cd internal/tests/pytests;" - command+="status=0;" - for x in `cat $PYTESTS_GROUPS_FILEPATH`; do - marker="${x//_or_/ or }" - marker="${marker//not_/not }" - command+="/usr/local/conda/envs/${METPLUS_ENV_TAG}/bin/pytest -vv --cov=../../../metplus --cov-append -m \"$marker\"" - command+=";if [ \$? != 0 ]; then status=1; fi;" - done - command+="if [ \$status != 0 ]; then echo ERROR: Some pytests failed. Search for FAILED to review; false; fi" + command="export METPLUS_TEST_OUTPUT_BASE=/data/output;" + command+="/usr/local/conda/envs/${METPLUS_ENV_TAG}/bin/pytest internal/tests/pytests -vv --cov=metplus --cov-append --cov-report=term-missing;" + command+="if [ \$? != 0 ]; then echo ERROR: Some pytests failed. Search for FAILED to review; false; fi" time_command docker run -v $WS_PATH:$GITHUB_WORKSPACE --workdir $GITHUB_WORKSPACE $RUN_TAG bash -c "$command" exit $? fi diff --git a/.github/jobs/run_diff_docker.py b/.github/jobs/run_diff_docker.py index 85a3246a6a..2cfa81bd13 100755 --- a/.github/jobs/run_diff_docker.py +++ b/.github/jobs/run_diff_docker.py @@ -24,6 +24,7 @@ OUTPUT_DIR = '/data/output' DIFF_DIR = '/data/diff' + def copy_diff_output(diff_files): """! Loop through difference output and copy files to directory so it can be made available for comparison. @@ -45,6 +46,7 @@ def copy_diff_output(diff_files): copy_to_diff_dir(diff_file, 'diff') + def copy_to_diff_dir(file_path, data_type): """! Generate output path based on input file path, adding text based on data_type to the filename, then @@ -85,6 +87,7 @@ def copy_to_diff_dir(file_path, data_type): return True + def main(): print('******************************') print("Comparing output to truth data") @@ -97,5 +100,6 @@ def main(): if diff_files: copy_diff_output(diff_files) + if __name__ == '__main__': main() diff --git a/.github/parm/pytest_groups.txt b/.github/parm/pytest_groups.txt deleted file mode 100644 index a5ca80e665..0000000000 --- a/.github/parm/pytest_groups.txt +++ /dev/null @@ -1,8 +0,0 @@ -run_metplus -util -wrapper -wrapper_a -wrapper_b -wrapper_c -wrapper_d -plotting_or_long diff --git a/.github/parm/use_case_groups.json b/.github/parm/use_case_groups.json index faf63b90ad..e8cc31f58c 100644 --- a/.github/parm/use_case_groups.json +++ b/.github/parm/use_case_groups.json @@ -1,7 +1,7 @@ [ { "category": "met_tool_wrapper", - "index_list": "0-29,59-61", + "index_list": "0-29,59-62", "run": false }, { diff --git a/docs/Contributors_Guide/add_use_case.rst b/docs/Contributors_Guide/add_use_case.rst index 8710145e06..b4ca716bb1 100644 --- a/docs/Contributors_Guide/add_use_case.rst +++ b/docs/Contributors_Guide/add_use_case.rst @@ -327,6 +327,12 @@ file.grib2, run the following command:: wgrib2 file.grib2 | grep TMP | wgrib2 -i file.grib2 -grib_out subset.grib2 +The egrep command can be used for more complex subsetting of grib2 data. +Example: To create a file called subset.grib2 from file.grib2 that contains +PRMSL data and TMP data on 1000, 900, 800, 700, 500, and 100 mb levels:: + + wgrib2 file.grib2 -s | egrep '(:TMP:1000 mb:|:TMP:900 mb:|:TMP:800 mb:|:TMP:700 mb:|:TMP:500 mb:|:TMP:100 mb:|:PRMSL)' | wgrib2 -i file.grib2 -grib subset.grib2 + If the input data is in NetCDF format, the `ncks `_ tool can be used to subset the file(s). diff --git a/docs/Contributors_Guide/testing.rst b/docs/Contributors_Guide/testing.rst index e1e0af8e6a..1229359804 100644 --- a/docs/Contributors_Guide/testing.rst +++ b/docs/Contributors_Guide/testing.rst @@ -9,24 +9,73 @@ directory. Unit Tests ---------- -Unit tests are run with pytest. They are found in the *pytests* directory. +Unit tests are run with pytest. +They are found in the *internal/tests/pytests* directory under the *wrappers* +and *util* directories. Each tool has its own subdirectory containing its test files. -Unit tests can be run by running the 'pytest' command from the -internal/tests/pytests directory of the repository. -The 'pytest' Python package must be available. +Pytest Requirements +^^^^^^^^^^^^^^^^^^^ + +The following Python packages are required to run the tests. + +* **pytest**: Runs the tests +* **python-dateutil**: Required to run METplus wrappers +* **netCDF4**: Required for some METplus wrapper functionality +* **pytest-cov** (optional): Only if generating code coverage stats +* **pillow** (optional): Only used if running diff utility tests +* **pdf2image** (optional): Only used if running diff utility tests + +Running +^^^^^^^ + +To run the unit tests, set the environment variable +**METPLUS_TEST_OUTPUT_BASE** to a path where the user running has write +permissions, nativate to the METplus directory, then call pytest:: + + export METPLUS_TEST_OUTPUT_BASE=/d1/personal/${USER}/pytest + cd METplus + pytest internal/tests/pytests + A report will be output showing which pytest categories failed. -When running on a new computer, a **minimum_pytest..sh** -file must be created to be able to run the script. This file contains -information about the local environment so that the tests can run. +To view verbose test output, add the **-vv** argument:: + + pytest internal/tests/pytests -vv + +Code Coverage +^^^^^^^^^^^^^ + +If the *pytest-cov* package is installed, the code coverage report can +be generated from the tests by running:: + + pytest internal/tests/pytests --cov=metplus --cov-report=term-missing + +In addition to the pass/fail report, the code coverage information will be +displayed including line numbers that are not covered by any test. -All unit tests must include one of the custom markers listed in the +Subsetting Tests by Directory +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A subset of the unit tests can be run by adjusting the path. +Be sure to include the *--cov-append* argument so the results of the run +are appended to the full code coverage results. +To run only the GridStat unit tests:: + + pytest internal/tests/pytests/wrappers/grid_stat --cov=metplus --cov-report=term-missing --cov-append + + +Subsetting Tests by Marker +^^^^^^^^^^^^^^^^^^^^^^^^^^ +Unit tests can include one of the custom markers listed in the internal/tests/pytests/pytest.ini file. Some examples include: + * diff + * run_metplus * util * wrapper_a * wrapper_b * wrapper_c + * wrapper_d * wrapper * long * plotting @@ -42,47 +91,52 @@ New pytest markers should be added to the pytest.ini file with a brief description. If they are not added to the markers list, then a warning will be output when running the tests. -There are many unit tests for METplus and false failures can occur if all of -the are attempted to run at once. To run only tests with a given marker, run:: - pytest -m + pytest internal/tests/pytests -m To run all tests that do not have a given marker, run:: - pytest -m "not " + pytest internal/tests/pytests -m "not " + +For example, **if you are running on a system that does not have the additional +dependencies required to run the diff utility tests**, you can run all of the +tests except those by running:: + + pytest internal/tests/pytests -m "not diff" + +Multiple marker groups can be run by using the *or* keyword:: + + pytest internal/tests/pytests -m " or " + +Writing Unit Tests +^^^^^^^^^^^^^^^^^^ + +metplus_config fixture +"""""""""""""""""""""" -Multiple marker groups can be run by using the 'or' keyword:: +Many unit tests utilize a pytest fixture named **metplus_config**. +This is defined in the **conftest.py** file in internal/tests/pytests. +This is used to create a METplusConfig object that contains the minimum +configurations needed to run METplus, like **OUTPUT_BASE**. +Using this fixture in a pytest will initialize the METplusConfig object to use +in the tests. - pytest -m " or " +This also creates a unique output directory for each test where +logs and output files are written. This directory is created under +**$METPLUS_TEST_OUTPUT_BASE**/test_output and is named with the run ID. +If the test passes, then the output directory is automatically removed. +If the test fails, the output directory will not be removed so the content +can be reviewed to debug the issue. +To use it, add **metplus_config** as an argument to the test function:: -Use Case Tests --------------- + def test_something(metplus_config) -Use case tests are run via a Python script called **test_use_cases.py**, -found in the *use_cases* directory. -Eventually the running of these tests will be automated using an external -tool, such as GitHub Actions or Travis CI. -The script contains a list of use cases that are found in the repository. -For each computer that will run the use cases, a -**metplus_test_env..sh** file must exist to set local configurations. -All of the use cases can be run by executing the script -**run_test_use_cases.sh**. The use case test script will output the results -into a directory such as */d1//test-use-case-b*, defined in the -environment file. -If */d1//test-use-case-b* already exists, its content will be copied -over to */d1//test-use-case-a*. If data is found in -the */d1//test-use-case-b* directory already exists, its content -will be copied -over to the */d1//test-use-case-a* directory, the script will prompt -the user to remove those files. -Once the tests have finished running, the output found in the two -directories can be compared to see what has changed. Suggested commands -to run to compare the output will be shown on the screen after completion -of the script. +then set a variable called **config** using the fixture name:: -To see which files and directories are only found in one run:: + config = metplus_config - diff -r /d1/mccabe/test-use-case-a /d1/mccabe/test-use-case-b | grep Only +Additional configuration variables can be set by using the set method:: + config.set('config', key, value) diff --git a/docs/Users_Guide/quicksearch.rst b/docs/Users_Guide/quicksearch.rst index b2a3b68fed..48303cdd57 100644 --- a/docs/Users_Guide/quicksearch.rst +++ b/docs/Users_Guide/quicksearch.rst @@ -10,6 +10,7 @@ METplus Quick Search for Use Cases Use Cases by MET Tool: +====================== .. only:: html @@ -163,6 +164,7 @@ Use Cases by METplus Feature: | `Runtime Frequency <../search.html?q=RuntimeFreqUseCase&check_keywords=yes&area=default>`_ | `Series by Initialization <../search.html?q=SeriesByInitUseCase&check_keywords=yes&area=default>`_ | `Series by Forecast Lead <../search.html?q=SeriesByLeadUseCase&check_keywords=yes&area=default>`_ + | `Tropical Cyclone <../search.html?q=TropicalCycloneUseCase&check_keywords=yes&area=default>`_ | `Validation of Models or Analyses <../search.html?q=ValidationUseCase&check_keywords=yes&area=default>`_ | `User Defined Script <../search.html?q=UserScriptUseCase&check_keywords=yes&area=default>`_ @@ -191,6 +193,7 @@ Use Cases by METplus Feature: | **Runtime Frequency**: *RuntimeFreqUseCase* | **Series by Initialization**: *SeriesByInitUseCase* | **Series by Forecast Lead**: *SeriesByLeadUseCase* + | **Tropical Cyclone**: *TropicalCycloneUseCase* | **Validation of Models or Analyses**: *ValidationUseCase* | **User Defined Script**: *UserScriptUseCase* diff --git a/docs/Users_Guide/release-notes.rst b/docs/Users_Guide/release-notes.rst index 6ce013ba02..9c6825a9e4 100644 --- a/docs/Users_Guide/release-notes.rst +++ b/docs/Users_Guide/release-notes.rst @@ -30,24 +30,36 @@ When applicable, release notes are followed by the `GitHub issue `__ number which describes the bugfix, enhancement, or new feature. -METplus Version 5.1.0-rc2 Release Notes (2023-06-29) ----------------------------------------------------- - - .. dropdown:: Bugfix - - * Fix produtil bug for WCOSS - (`#2227 `_) - -METplus Version 5.1.0-rc1 Release Notes (2023-06-07) ----------------------------------------------------- +METplus Version 5.1.0 Release Notes (2023-07-21) +------------------------------------------------ .. dropdown:: Enhancements + * Add support for multiple interp widths + (`#2049 `_) + * TCPairs - Add support for setting consensus.write_members + (`#2054 `_) + * Update use cases to use new Python directory structure in MET + (`#2115 `_) * Add support for new multivariate MODE settings (`#2197 `_) .. dropdown:: Bugfix + * StatAnalysis - allow run once for each valid time + (`#2026 `_) + * App specific OBS_WINDOW variables not taking precedence over generic + (`#2006 `_) + * Skip-if-output-exists logic incorrectly skips files + (`#2096 `_) + * PointStat -obs_valid_beg/end arguments not set properly + (`#2137 `_) + * Allow setting of convert, censor_thresh, and censor_val in regrid dictionary + (`#2082 `_) + * METplus run errors if no commands were run + (`#2098 `_) + * TCPairs setting -diag option causes failure + (`#2179 `_) * Define the order of the forecast variables numerically rather than alphabetically (`#2070 `_) * Allow setting of convert, censor_thresh, and censor_val in regrid dictionary @@ -63,91 +75,29 @@ METplus Version 5.1.0-rc1 Release Notes (2023-06-07) * StatAnalysis time shifting failure (`#2168 `_) - .. dropdown:: New Wrappers + .. dropdown:: New Wrappers * TCDiag (beta) (`#1626 `_) .. dropdown:: New Use Cases + * Multi-Variate MODE (`#1516 `_) * Read in Argo profile data netCDF files for use in METplus with python embedding (`#1977 `_) .. dropdown:: Documentation + * Update the METplus Components Python Requirements Documentation + (`#2016 `_) + * Enhance the Release Notes by adding dropdown menus + (`#2076 `_) * Update the METplus Components Python Requirements (`#1978 `_, `#2016 `_) * Add documentation on support for releases to the Release Guide (`#2106 `_) - .. dropdown:: Internal - - * Update Contributor's Guide to use GH Action to update truth data - (`#2068 `_) - * Enhance GitHub Workflow documentation - (`#2147 `_) - * Update the development release guide instructions to remove references to a Coordinated release - (`#2159 `_) - * Refactored code to resolve many SonarQube items - (`#1610 `_) - -METplus Version 5.1.0-beta2 Release Notes (2023-04-26) ------------------------------------------------------- - - .. dropdown:: Enhancements - - * Update use cases to use new Python directory structure in MET - (`#2115 `_) - - .. dropdown:: Bugfix - - * App specific OBS_WINDOW variables not taking precedence over generic - (`#2006 `_) - * Skip-if-output-exists logic incorrectly skips files - (`#2096 `_) - * PointStat -obs_valid_beg/end arguments not set properly - (`#2137 `_) - * Allow setting of convert, censor_thresh, and censor_val in regrid dictionary - (`#2082 `_) - * METplus run errors if no commands were run - (`#2098 `_) - * TCPairs setting -diag option causes failure - (`#2179 `_) - - .. dropdown:: New Use Cases - - * Multi-Variate MODE (`#1516 `_) - - .. dropdown:: Documentation - - * Enhance the Release Notes by adding dropdown menus - (`#2076 `_) - - .. dropdown:: Internal - - * Add 'LICENSE.md' to the METplus repo - (`#2058 `_) - -METplus Version 5.1.0-beta1 Release Notes (2023-02-28) ------------------------------------------------------- - - .. dropdown:: Enhancements - - * Add support for multiple interp widths (`#2049 `_) - * TCPairs - Add support for setting consensus.write_members - (`#2054 `_) - - .. dropdown:: Bugfix - - * StatAnalysis - allow run once for each valid time - (`#2026 `_) - - .. dropdown:: Documentation - - * Update the METplus Components Python Requirements Documentation - (`#2016 `_) - .. dropdown:: Internal * Improve use case testing @@ -160,6 +110,16 @@ METplus Version 5.1.0-beta1 Release Notes (2023-02-28) (`#2022 `_) * Add 'License.txt' to the METplus repo (`#2058 `_) + * Add 'LICENSE.md' to the METplus repo + (`#2058 `_) + * Update Contributor's Guide to use GH Action to update truth data + (`#2068 `_) + * Enhance GitHub Workflow documentation + (`#2147 `_) + * Update the development release guide instructions to remove references to a Coordinated release + (`#2159 `_) + * Refactored code to resolve many SonarQube items + (`#1610 `_) METplus Version 5.0.0 Release Notes (2022-12-09) diff --git a/docs/Users_Guide/wrappers.rst b/docs/Users_Guide/wrappers.rst index d5e3862641..b90bfcf5e3 100644 --- a/docs/Users_Guide/wrappers.rst +++ b/docs/Users_Guide/wrappers.rst @@ -7692,7 +7692,7 @@ TCDiag Description ----------- -Used to configure the MET tool TC-Diag. +The TC-Diag wrapper encapsulates the behavior of the MET `tc_diag `_ tool. It provides the infrastructure to compute diagnostics from model fields and tracks. It can be configured to run over a single intialization time, all of the initialization times for a given storm, or over many storms. Configuration also allows a user to select which domain(s) of the input model data to use in the diagnostics calculations, set which levels and variables will be used as well as details about the azimuth-range grid used for the calculations, and to control which output files are generated. Future functionality of the tc_diag tool, such as vortex removal, will also be configurable from this wrapper. METplus Configuration --------------------- diff --git a/docs/use_cases/met_tool_wrapper/TCDiag/TCDiag.py b/docs/use_cases/met_tool_wrapper/TCDiag/TCDiag.py index d2a8f0b75d..d525cbc8f8 100644 --- a/docs/use_cases/met_tool_wrapper/TCDiag/TCDiag.py +++ b/docs/use_cases/met_tool_wrapper/TCDiag/TCDiag.py @@ -5,18 +5,48 @@ met_tool_wrapper/TCDiag/TCDiag.conf """ +############################################################################## +# Overview +# -------------------- +# +# This use case illustrates the use of tc_diag tool, which is currently +# considered a beta-level release that lacks full functionality. +# The use case illustrates running the +# tc_diag tool for a tropical cyclone forecast case and generating +# intermediate NetCDF output files of the input model's data transformed +# onto an azimuth-range grid. When the full functionality of the +# tc_diag tool is released in MET v12.0.0, this use case will be also +# output environmental diagnostics computed from callable Python scripts. +# +# The diagnostics are computed on a range-azimuth grid that follows the +# projected storm track. For inputs, it uses 0.25 deg gridded GRIB files from the +# a retrospective reforecast of the Global Forecast System (GFS). For the track, it uses the +# GFS's predicted track to ensure that the model's simulated storm doesn't +# contaminate the diagnostics result as a result of the model's simulated +# storm being mistaken for environmental factors. (Note: +# a future version of the tc_diag tool will include removal of the model's vortex, +# allowing diagnostics to be computed along any arbitrarily defined track.) +# +# Novel aspects of this use case: +# * This is the first example use case to run the tc_diag tool. +# * Example of running for a single tropical cyclone forecast case from +# Tropical Storm Bret (2023) using GFS data. + ############################################################################## # Scientific Objective # -------------------- # -# TODO: Add content here +# Generate intermediate data files, in which the input model's data have been +# transformed to a range-azimuth grid, in preparation for further diagnostic +# calculations using Python-based routines. ############################################################################## # Datasets # -------- # -# **Forecast:** GFS FV3 -# **Track:** A Deck +# **Forecast:** GFS grib files +# +# **Track:** a-deck file (Automated Tropical Cyclone Forecast System format) # # **Location:** All of the input data required for this use case can be found # in the met_test sample data tarball. Click here to the METplus releases page @@ -26,6 +56,11 @@ # This tarball should be unpacked into the directory that you will set the # value of INPUT_BASE. See `Running METplus`_ section for more information. # +# **Data source:** Users may obtain real-time data from the deterministic GFS runs from +# NOAA's NOMADS server: +# https://nomads.ncep.noaa.gov/pub/data/nccf/com/gfs/prod/gfs.YYYYMMDD/ZZ/atmos/ +# where YYYYMMDD is the date (4-digit year, 2-digit month, 2-digit day), +# ZZ is the initialization hour of the desired model cycle (00, 06, 12, 18). ############################################################################## # METplus Components @@ -42,8 +77,8 @@ # TCDiag is the only tool called in this example. It processes the following # run times: # -# **Init:** 2016-09-29- 00Z -# **Forecast lead:** 141, 143, and 147 hour +# **Init:** 2023-06-20 0000Z +# **Forecast lead:** 0, 6, and 12 hours # ############################################################################## @@ -67,7 +102,7 @@ # If there is a setting in the MET configuration file that is currently not supported by METplus you'd like to control, please refer to: # :ref:`Overriding Unsupported MET config file settings` # -# .. note:: See the :ref:`TCDiag MET Configuration` section of the User's Guide for more information on the environment variables used in the file below: +# .. note:: See the :ref:`TCDiag MET Configuration` section of the User's Guide for more information on the environment variables used in the file below: # # .. highlight:: bash # .. literalinclude:: ../../../../parm/met_config/TCDiagConfig_wrapped @@ -96,7 +131,8 @@ # Output for this use case will be found in met_tool_wrapper/TCDiag (relative to **OUTPUT_BASE**) # and will contain the following files: # -# * tc_diag_aal142016.nc +# * tc_diag_AL032023_GFSO_2023062012_cyl_grid_nest.nc +# * tc_diag_AL032023_GFSO_2023062012_cyl_grid_parent.nc # ############################################################################## @@ -105,8 +141,10 @@ # # .. note:: # +# * DiagnosticsUseCase # * TCDiagToolUseCase # * GRIB2FileUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/met_tool_wrapper/TCGen/TCGen.py b/docs/use_cases/met_tool_wrapper/TCGen/TCGen.py index 1580255aa4..e3feba3fd2 100644 --- a/docs/use_cases/met_tool_wrapper/TCGen/TCGen.py +++ b/docs/use_cases/met_tool_wrapper/TCGen/TCGen.py @@ -124,6 +124,7 @@ # # * TCGenToolUseCase # * DTCOrgUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/met_tool_wrapper/TCMPRPlotter/TCMPRPlotter.py b/docs/use_cases/met_tool_wrapper/TCMPRPlotter/TCMPRPlotter.py index 34f456e36c..84a9ced20c 100644 --- a/docs/use_cases/met_tool_wrapper/TCMPRPlotter/TCMPRPlotter.py +++ b/docs/use_cases/met_tool_wrapper/TCMPRPlotter/TCMPRPlotter.py @@ -110,6 +110,7 @@ # .. note:: # # * TCMPRPlotterUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/met_tool_wrapper/TCPairs/TCPairs_extra_tropical.py b/docs/use_cases/met_tool_wrapper/TCPairs/TCPairs_extra_tropical.py index c685439818..0913065d7f 100644 --- a/docs/use_cases/met_tool_wrapper/TCPairs/TCPairs_extra_tropical.py +++ b/docs/use_cases/met_tool_wrapper/TCPairs/TCPairs_extra_tropical.py @@ -124,6 +124,7 @@ # # * TCPairsToolUseCase # * SBUOrgUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/met_tool_wrapper/TCPairs/TCPairs_tropical.py b/docs/use_cases/met_tool_wrapper/TCPairs/TCPairs_tropical.py index 9a9e174b61..e0cb0ab729 100644 --- a/docs/use_cases/met_tool_wrapper/TCPairs/TCPairs_tropical.py +++ b/docs/use_cases/met_tool_wrapper/TCPairs/TCPairs_tropical.py @@ -128,6 +128,7 @@ # # * TCPairsToolUseCase # * DTCOrgUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/met_tool_wrapper/TCRMW/TCRMW.py b/docs/use_cases/met_tool_wrapper/TCRMW/TCRMW.py index 8e7e468a2a..d3d3bdf40a 100644 --- a/docs/use_cases/met_tool_wrapper/TCRMW/TCRMW.py +++ b/docs/use_cases/met_tool_wrapper/TCRMW/TCRMW.py @@ -122,6 +122,7 @@ # # * TCRMWToolUseCase # * GRIB2FileUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/met_tool_wrapper/TCStat/TCStat.py b/docs/use_cases/met_tool_wrapper/TCStat/TCStat.py index 4c4d08d037..7386d9ee6f 100644 --- a/docs/use_cases/met_tool_wrapper/TCStat/TCStat.py +++ b/docs/use_cases/met_tool_wrapper/TCStat/TCStat.py @@ -120,6 +120,7 @@ # .. note:: # # * TCStatToolUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByInit.py b/docs/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByInit.py index 4d6266a927..0dce153a7b 100644 --- a/docs/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByInit.py +++ b/docs/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByInit.py @@ -252,6 +252,7 @@ # * SBUOrgUseCase # * DiagnosticsUseCase # * RuntimeFreqUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.py b/docs/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.py index e8bd1bbafa..eb93987e42 100644 --- a/docs/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.py +++ b/docs/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.py @@ -244,6 +244,7 @@ # * SBUOrgUseCase # * DiagnosticsUseCase # * RuntimeFreqUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead_PyEmbed_Multiple_Diagnostics.py b/docs/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead_PyEmbed_Multiple_Diagnostics.py index f521ae9835..52808805f9 100644 --- a/docs/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead_PyEmbed_Multiple_Diagnostics.py +++ b/docs/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead_PyEmbed_Multiple_Diagnostics.py @@ -334,6 +334,7 @@ # * SBUOrgUseCase # * DiagnosticsUseCase # * RuntimeFreqUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/model_applications/short_range/MODEMultivar_fcstHRRR_obsMRMS_HRRRanl.py b/docs/use_cases/model_applications/short_range/MODEMultivar_fcstHRRR_obsMRMS_HRRRanl.py index db6b138f37..b7f6c8bb42 100644 --- a/docs/use_cases/model_applications/short_range/MODEMultivar_fcstHRRR_obsMRMS_HRRRanl.py +++ b/docs/use_cases/model_applications/short_range/MODEMultivar_fcstHRRR_obsMRMS_HRRRanl.py @@ -12,14 +12,21 @@ # -------------------- # # This use case demonstrates how to run Multivariate MODE to identify complex -# objects from two or more fields, defined by a logical expression. This use -# case identifies blizzard-like objects defined by: 1) the presence of snow -# precipitation type, 2) 10-m winds > 20 mph, and 3) visibility < 1/2 mile. -# The use of multivariate MODE is well-suited to assess the structure and +# objects from two or more fields defined by a logical expression. This use +# case identifies blizzard-like objects defined by the intersection of : 1) the +# presence of snow precipitation type, 2) 10-m winds > 20 mph, and 3) visibility +# < 1/2 mile. The use of multivariate MODE is well-suited to assess the structure and # placement of complex high-impact events such as blizzard conditions and heavy -# snow bands. Output from this use-case consists of the MODE forecast and observation -# super objects and the MODE ASCII, NetCDF, and PostScript files. -# +# snow bands. Output from this use-case consists of the MODE ASCII, NetCDF, and +# PostScript files for the MODE forecast and observation super objects. +# +# In this case, MODE super object intensity statistics were ouput for both 10-m +# wind and visibility. Using the the MODE_MULTIVAR_INTENSITY_FLAG, the user can +# control for which variables super object intensity statistics will be output. +# If all are set to False, then no intensity information will be output and only +# statistics relative to the super-object geometry will be available. In the case +# no requested intesities, the parameters MODE_FCST/OBS_MULTIVAR_NAME and/or +# MODE_FCST/OBS_MULTIVAR_LEVEL may be used as identifiers for the super-object. ############################################################################## # Datasets @@ -43,36 +50,20 @@ # This tarball should be unpacked into the directory that you will set the # value of INPUT_BASE. See :ref:`running-metplus` for more information. - ############################################################################## # METplus Components # ------------------ # -# This use case runs MODE using multiple variables to output the super objects -# based on a user-defined logical expression. Currently, the initial multivariate -# MODE run only outputs the super objects and additional steps are required to -# produce the statistical output. GenVxMask is run on a field(s) of interest -# using the super objects to mask the field(s). Finally, MODE is run a second -# time on the super-object-masked field(s) to output attribute statistics for -# the field(s). -# -# **Note:** The second MODE run can also be run directly on the super objects if -# field-specific statistics, such as intensity, is not desired. +# This use case utilizes the METplus MODE wrapper, ingesting multiple variables +# to output complex super objects based on a user-defined logical expression. # ############################################################################## # METplus Workflow # ---------------- # -# The following tools are used for each run time: -# -# MODE(mv), GenVxMask(fcst_super), GenVxMask(obs_super), MODE(super) -# -# Where the first instance of MODE runs over multiple variables to identify -# super objects for the forecast and observation, GenVxMask masks the raw input -# field(s) using the super objects, and the second instance of MODE is run -# traditionally to compare the masked forecast and observed super objects and -# and provide statistics. +# MODE is the only tool called and ingests multiple fields to create a complex +# super object. # # This example runs a single forecast hour. # @@ -128,50 +119,17 @@ # INFO: METplus has successfully finished running. # # Refer to the value set for **OUTPUT_BASE** to find where the output data was generated. -# Output for this use case will be found in OUTPUT_BASE for the various MET tools -# and will contain the following files: -# -# **mode/2021020100/f21** -# -# Multivariate output - first instance -# -# Precipitation type = snow -# -# * 00/mode_210000L_20210201_210000V_000000A_cts.txt -# * 00/mode_210000L_20210201_210000V_000000A_obj.nc -# * 00/mode_210000L_20210201_210000V_000000A_obj.txt -# * 00/mode_210000L_20210201_210000V_000000A.ps -# -# Visibility -# -# * 01/mode_210000L_20210201_210000V_000000A_cts.txt -# * 01/mode_210000L_20210201_210000V_000000A_obj.nc -# * 01/mode_210000L_20210201_210000V_000000A_obj.txt -# * 01/mode_210000L_20210201_210000V_000000A.ps -# -# 10-m Winds -# -# * 02/mode_210000L_20210201_210000V_000000A_cts.txt -# * 02/mode_210000L_20210201_210000V_000000A_obj.nc -# * 02/mode_210000L_20210201_210000V_000000A_obj.txt -# * 02/mode_210000L_20210201_210000V_000000A.ps -# -# Super Objects -# -# * f_super.nc -# * o_super.nc -# -# MODE 10-m wind super object output - second instance -# -# * mode_HRRR_vs_ANALYSIS_WIND_super_Z10_210000L_20210201_210000V_000000A_cts.txt -# * mode_HRRR_vs_ANALYSIS_WIND_super_Z10_210000L_20210201_210000V_000000A_obj.nc -# * mode_HRRR_vs_ANALYSIS_WIND_super_Z10_210000L_20210201_210000V_000000A_obj.txt -# * mode_HRRR_vs_ANALYSIS_WIND_super_Z10_210000L_20210201_210000V_000000A.ps -# -# **gen_vx_mask/2021020100** -# -# * fcst_wind_super_2021020100_f21.nc -# * obs_wind_super_2021020121.nc +# Output for this use case will be found in OUTPUT_BASE and will contain the following +# files in the directory mode/2021020100/f21: +# +# * mode_Fcst_VIS_L0_Obs_VIS_L0_HRRR_vs_ANALYSIS_210000L_20210201_210000V_000000A_cts.txt +# * mode_Fcst_VIS_L0_Obs_VIS_L0_HRRR_vs_ANALYSIS_210000L_20210201_210000V_000000A_obj.nc +# * mode_Fcst_VIS_L0_Obs_VIS_L0_HRRR_vs_ANALYSIS_210000L_20210201_210000V_000000A_obj.txt +# * mode_Fcst_VIS_L0_Obs_VIS_L0_HRRR_vs_ANALYSIS_210000L_20210201_210000V_000000A.ps +# * mode_Fcst_WIND_Z10_Obs_WIND_Z10_HRRR_vs_ANALYSIS_210000L_20210201_210000V_000000A_cts.txt +# * mode_Fcst_WIND_Z10_Obs_WIND_Z10_HRRR_vs_ANALYSIS_210000L_20210201_210000V_000000A_obj.nc +# * mode_Fcst_WIND_Z10_Obs_WIND_Z10_HRRR_vs_ANALYSIS_210000L_20210201_210000V_000000A_obj.txt +# * mode_Fcst_WIND_Z10_Obs_WIND_Z10_HRRR_vs_ANALYSIS_210000L_20210201_210000V_000000A.ps ############################################################################## # Keywords @@ -180,7 +138,6 @@ # .. note:: # # * MODEToolUseCase -# * GenVxMaskToolUseCase # * ShortRangeAppUseCase # * GRIB2FileUseCase # * RegriddingInToolUseCase @@ -188,7 +145,6 @@ # * NCAROrgUseCase # * DiagnosticsUseCase # -# # Navigate to the :ref:`quick-search` page to discover other similar use cases. # # diff --git a/docs/use_cases/model_applications/tc_and_extra_tc/CyclonePlotter_fcstGFS_obsGFS_UserScript_ExtraTC.py b/docs/use_cases/model_applications/tc_and_extra_tc/CyclonePlotter_fcstGFS_obsGFS_UserScript_ExtraTC.py index ea52376bb1..22edf8e333 100644 --- a/docs/use_cases/model_applications/tc_and_extra_tc/CyclonePlotter_fcstGFS_obsGFS_UserScript_ExtraTC.py +++ b/docs/use_cases/model_applications/tc_and_extra_tc/CyclonePlotter_fcstGFS_obsGFS_UserScript_ExtraTC.py @@ -162,6 +162,7 @@ # * TCPairsToolUseCase # * SBUOrgUseCase # * CyclonePlotterUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/model_applications/tc_and_extra_tc/GridStat_fcstHAFS_obsTDR_NetCDF.py b/docs/use_cases/model_applications/tc_and_extra_tc/GridStat_fcstHAFS_obsTDR_NetCDF.py index ad6205e929..3a19f8abd0 100644 --- a/docs/use_cases/model_applications/tc_and_extra_tc/GridStat_fcstHAFS_obsTDR_NetCDF.py +++ b/docs/use_cases/model_applications/tc_and_extra_tc/GridStat_fcstHAFS_obsTDR_NetCDF.py @@ -152,6 +152,7 @@ # # * TCandExtraTCAppUseCase # * GridStatToolUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_ExtraTC.py b/docs/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_ExtraTC.py index 8f10fbb482..a0fbb57a1d 100644 --- a/docs/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_ExtraTC.py +++ b/docs/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_ExtraTC.py @@ -139,6 +139,7 @@ # * NOAAEMCOrgUseCase # * SBUOrgUseCase # * DTCOrgUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_RPlotting.py b/docs/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_RPlotting.py index d2aaaa4e20..f7ddd8c091 100644 --- a/docs/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_RPlotting.py +++ b/docs/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_RPlotting.py @@ -157,6 +157,7 @@ # * MediumRangeAppUseCase # * SBUOrgUseCase # * DTCOrgUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/model_applications/tc_and_extra_tc/TCGen_fcstGFS_obsBDECK_2021season.py b/docs/use_cases/model_applications/tc_and_extra_tc/TCGen_fcstGFS_obsBDECK_2021season.py index 307fb4a6d4..d66e08cb21 100644 --- a/docs/use_cases/model_applications/tc_and_extra_tc/TCGen_fcstGFS_obsBDECK_2021season.py +++ b/docs/use_cases/model_applications/tc_and_extra_tc/TCGen_fcstGFS_obsBDECK_2021season.py @@ -135,6 +135,7 @@ # .. note:: # # * TCGenToolUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/model_applications/tc_and_extra_tc/TCPairs_TCStat_fcstADECK_obsBDECK_ATCF_BasicExample.py b/docs/use_cases/model_applications/tc_and_extra_tc/TCPairs_TCStat_fcstADECK_obsBDECK_ATCF_BasicExample.py index 99df0976dc..f60d401898 100644 --- a/docs/use_cases/model_applications/tc_and_extra_tc/TCPairs_TCStat_fcstADECK_obsBDECK_ATCF_BasicExample.py +++ b/docs/use_cases/model_applications/tc_and_extra_tc/TCPairs_TCStat_fcstADECK_obsBDECK_ATCF_BasicExample.py @@ -149,6 +149,7 @@ # # * TCPairsToolUseCase # * TCStatToolUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/model_applications/tc_and_extra_tc/TCRMW_fcstGFS_fcstOnly_gonzalo.py b/docs/use_cases/model_applications/tc_and_extra_tc/TCRMW_fcstGFS_fcstOnly_gonzalo.py index 8bc43a8c78..ccad94908b 100644 --- a/docs/use_cases/model_applications/tc_and_extra_tc/TCRMW_fcstGFS_fcstOnly_gonzalo.py +++ b/docs/use_cases/model_applications/tc_and_extra_tc/TCRMW_fcstGFS_fcstOnly_gonzalo.py @@ -123,6 +123,7 @@ # # * TCRMWToolUseCase # * GRIB2FileUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/docs/use_cases/model_applications/tc_and_extra_tc/UserScript_ASCII2NC_PointStat_fcstHAFS_obsFRD_NetCDF.py b/docs/use_cases/model_applications/tc_and_extra_tc/UserScript_ASCII2NC_PointStat_fcstHAFS_obsFRD_NetCDF.py index 313bd0c8d3..8b24b5a671 100644 --- a/docs/use_cases/model_applications/tc_and_extra_tc/UserScript_ASCII2NC_PointStat_fcstHAFS_obsFRD_NetCDF.py +++ b/docs/use_cases/model_applications/tc_and_extra_tc/UserScript_ASCII2NC_PointStat_fcstHAFS_obsFRD_NetCDF.py @@ -164,6 +164,7 @@ # * UserScriptUseCase # * PointStatToolUseCase # * ASCII2NCToolUseCase +# * TropicalCycloneUseCase # # Navigate to the :ref:`quick-search` page to discover other similar use cases. # diff --git a/internal/scripts/docker_env/Dockerfile b/internal/scripts/docker_env/Dockerfile index bbc7aa6c4d..6e639d1887 100644 --- a/internal/scripts/docker_env/Dockerfile +++ b/internal/scripts/docker_env/Dockerfile @@ -19,3 +19,8 @@ ARG METPLUS_ENV_VERSION ARG ENV_NAME RUN conda list --name ${ENV_NAME}.${METPLUS_ENV_VERSION} > \ /usr/local/conda/envs/${ENV_NAME}.${METPLUS_ENV_VERSION}/environments.yml + +# remove base environment to free up space +ARG METPLUS_ENV_VERSION +ARG BASE_ENV=metplus_base +RUN conda env remove -y --name ${BASE_ENV}.${METPLUS_ENV_VERSION} diff --git a/internal/scripts/docker_env/Dockerfile.cartopy b/internal/scripts/docker_env/Dockerfile.cartopy index c736c2ea73..079259d51b 100644 --- a/internal/scripts/docker_env/Dockerfile.cartopy +++ b/internal/scripts/docker_env/Dockerfile.cartopy @@ -27,3 +27,8 @@ RUN apt update && apt -y upgrade \ && rm -f cartopy_feature_download.py \ && curl https://raw.githubusercontent.com/SciTools/cartopy/master/tools/cartopy_feature_download.py > cartopy_feature_download.py \ && /usr/local/conda/envs/${ENV_NAME}.${METPLUS_ENV_VERSION}/bin/python3 cartopy_feature_download.py cultural physical + +# remove base environment to free up space +ARG METPLUS_ENV_VERSION +ARG BASE_ENV=metplus_base +RUN conda env remove -y --name ${BASE_ENV}.${METPLUS_ENV_VERSION} diff --git a/internal/scripts/docker_env/README.md b/internal/scripts/docker_env/README.md index 80ff1ff7fb..45083f0e57 100644 --- a/internal/scripts/docker_env/README.md +++ b/internal/scripts/docker_env/README.md @@ -426,9 +426,9 @@ export METPLUS_ENV_VERSION=v5.1 -## pytest.v5.1 (from metplus_base.v5.1) +## test.v5.1 (from metplus_base.v5.1) -This environment is used in automation to run the pytests. It requires all of the +This environment is used in automation to run the pytests and diff tests. It requires all of the packages needed to run all of the METplus wrappers, the pytest package and the pytest code coverage package. @@ -436,10 +436,10 @@ code coverage package. ``` export METPLUS_ENV_VERSION=v5.1 -docker build -t dtcenter/metplus-envs:pytest.${METPLUS_ENV_VERSION} \ +docker build -t dtcenter/metplus-envs:test.${METPLUS_ENV_VERSION} \ --build-arg METPLUS_ENV_VERSION \ - --build-arg ENV_NAME=pytest . -docker push dtcenter/metplus-envs:pytest.${METPLUS_ENV_VERSION} + --build-arg ENV_NAME=test . +docker push dtcenter/metplus-envs:test.${METPLUS_ENV_VERSION} ``` diff --git a/internal/scripts/docker_env/scripts/pytest_env.sh b/internal/scripts/docker_env/scripts/test_env.sh similarity index 72% rename from internal/scripts/docker_env/scripts/pytest_env.sh rename to internal/scripts/docker_env/scripts/test_env.sh index 94e83772a0..922d1f552d 100755 --- a/internal/scripts/docker_env/scripts/pytest_env.sh +++ b/internal/scripts/docker_env/scripts/test_env.sh @@ -1,16 +1,20 @@ #! /bin/sh ################################################################################ -# Environment: pytest.v5.1 -# Last Updated: 2023-01-31 (mccabe@ucar.edu) +# Environment: test.v5.1 +# Last Updated: 2023-07-14 (mccabe@ucar.edu) # Notes: Adds pytest and pytest coverage packages to run unit tests # Added pandas because plot_util test needs it # Added netcdf4 because SeriesAnalysis test needs it +# Added pillow and pdf2image for diff tests # Python Packages: # TODO: update version numbers # pytest==? # pytest-cov==? # pandas==? +# netcdf4==? +# pillow==? +# pdf2image==? # # Other Content: None ################################################################################ @@ -19,7 +23,7 @@ METPLUS_VERSION=$1 # Conda environment to create -ENV_NAME=pytest.${METPLUS_VERSION} +ENV_NAME=test.${METPLUS_VERSION} # Conda environment to use as base for new environment BASE_ENV=metplus_base.${METPLUS_VERSION} @@ -29,3 +33,9 @@ conda install -y --name ${ENV_NAME} -c conda-forge pytest conda install -y --name ${ENV_NAME} -c conda-forge pytest-cov conda install -y --name ${ENV_NAME} -c conda-forge pandas conda install -y --name ${ENV_NAME} -c conda-forge netcdf4 +conda install -y --name ${ENV_NAME} -c conda-forge pillow + +apt-get update +apt-get install -y poppler-utils + +conda install -y --name ${ENV_NAME} -c conda-forge pdf2image diff --git a/internal/tests/pytests/conftest.py b/internal/tests/pytests/conftest.py index 8056e4cfe4..968f96b736 100644 --- a/internal/tests/pytests/conftest.py +++ b/internal/tests/pytests/conftest.py @@ -13,48 +13,23 @@ from metplus.util import config_metplus -# get host from either METPLUS_PYTEST_HOST or from actual host name -# Look for minimum_pytest..sh script to source -# error and exit if not found -pytest_host = os.environ.get('METPLUS_PYTEST_HOST') -if pytest_host is None: - import socket - pytest_host = socket.gethostname() - print("No hostname provided with METPLUS_PYTEST_HOST, " - f"using {pytest_host}") -else: - print(f"METPLUS_PYTEST_HOST = {pytest_host}") - -minimum_pytest_file = os.path.join(os.path.dirname(__file__), - f'minimum_pytest.{pytest_host}.sh') -if not os.path.exists(minimum_pytest_file): - print(f"ERROR: minimum_pytest.{pytest_host}.sh file must exist in " - "pytests directory. Set METPLUS_PYTEST_HOST correctly or " - "create file to run pytests on this host.") - sys.exit(4) - -# source minimum_pytest..sh script -current_user = getpass.getuser() -command = shlex.split(f"env -i bash -c 'export USER={current_user} && " - f"source {minimum_pytest_file} && env'") -proc = subprocess.Popen(command, stdout=subprocess.PIPE) - -for line in proc.stdout: - line = line.decode(encoding='utf-8', errors='strict').strip() - key, value = line.split('=') - os.environ[key] = value - -proc.communicate() - -output_base = os.environ['METPLUS_TEST_OUTPUT_BASE'] +output_base = os.environ.get('METPLUS_TEST_OUTPUT_BASE') if not output_base: print('ERROR: METPLUS_TEST_OUTPUT_BASE must be set to a path to write') sys.exit(1) -test_output_dir = os.path.join(output_base, 'test_output') -if os.path.exists(test_output_dir): - print(f'Removing test output dir: {test_output_dir}') - shutil.rmtree(test_output_dir) +try: + test_output_dir = os.path.join(output_base, 'test_output') + if os.path.exists(test_output_dir): + print(f'Removing test output dir: {test_output_dir}') + shutil.rmtree(test_output_dir) + + if not os.path.exists(test_output_dir): + print(f'Creating test output dir: {test_output_dir}') + os.makedirs(test_output_dir) +except PermissionError: + print(f'ERROR: Cannot write to $METPLUS_TEST_OUTPUT_BASE: {output_base}') + sys.exit(2) @pytest.hookimpl(tryfirst=True, hookwrapper=True) diff --git a/internal/tests/pytests/minimum_pytest.conf b/internal/tests/pytests/minimum_pytest.conf index ae6183fe5e..703338e43c 100644 --- a/internal/tests/pytests/minimum_pytest.conf +++ b/internal/tests/pytests/minimum_pytest.conf @@ -1,8 +1,9 @@ [config] -INPUT_BASE = {ENV[METPLUS_TEST_INPUT_BASE]} +INPUT_BASE = {ENV[METPLUS_TEST_OUTPUT_BASE]}/input OUTPUT_BASE = {ENV[METPLUS_TEST_OUTPUT_BASE]}/test_output/{RUN_ID} -MET_INSTALL_DIR = {ENV[METPLUS_TEST_MET_INSTALL_DIR]} -TMP_DIR = {ENV[METPLUS_TEST_TMP_DIR]} +MET_INSTALL_DIR = {ENV[METPLUS_TEST_OUTPUT_BASE]} + +DO_NOT_RUN_EXE = True LOG_LEVEL = DEBUG LOG_LEVEL_TERMINAL = WARNING diff --git a/internal/tests/pytests/minimum_pytest.dakota.sh b/internal/tests/pytests/minimum_pytest.dakota.sh deleted file mode 100644 index 0b66555fa9..0000000000 --- a/internal/tests/pytests/minimum_pytest.dakota.sh +++ /dev/null @@ -1,4 +0,0 @@ -export METPLUS_TEST_INPUT_BASE=/d3/projects/MET/METplus_Data -export METPLUS_TEST_OUTPUT_BASE=/d3/personal/${USER}/pytest -export METPLUS_TEST_MET_INSTALL_DIR=/d3/projects/MET/MET_releases/met-9.1_beta3 -export METPLUS_TEST_TMP_DIR=${METPLUS_TEST_OUTPUT_BASE}/tmp diff --git a/internal/tests/pytests/minimum_pytest.docker.sh b/internal/tests/pytests/minimum_pytest.docker.sh deleted file mode 100644 index 1ab0c44f61..0000000000 --- a/internal/tests/pytests/minimum_pytest.docker.sh +++ /dev/null @@ -1,5 +0,0 @@ -# These are the paths from within the docker container, docker-space -export METPLUS_TEST_INPUT_BASE=/data/input -export METPLUS_TEST_OUTPUT_BASE=/data/output -export METPLUS_TEST_MET_INSTALL_DIR=/usr/local -export METPLUS_TEST_TMP_DIR=${METPLUS_TEST_OUTPUT_BASE}/tmp diff --git a/internal/tests/pytests/minimum_pytest.eyewall.sh b/internal/tests/pytests/minimum_pytest.eyewall.sh deleted file mode 100644 index 06a69dd650..0000000000 --- a/internal/tests/pytests/minimum_pytest.eyewall.sh +++ /dev/null @@ -1,5 +0,0 @@ -export METPLUS_TEST_INPUT_BASE=/d1/METplus_Data -export METPLUS_TEST_OUTPUT_BASE=/d1/${USER}/pytest -export METPLUS_TEST_MET_INSTALL_DIR=/usr/local/met-9.0 -#export METPLUS_TEST_MET_INSTALL_DIR=/d1/CODE/MET/MET_releases/met-9.0_beta4 -export METPLUS_TEST_TMP_DIR=${METPLUS_TEST_OUTPUT_BASE}/tmp diff --git a/internal/tests/pytests/minimum_pytest.hera.sh b/internal/tests/pytests/minimum_pytest.hera.sh deleted file mode 100644 index bfb541180d..0000000000 --- a/internal/tests/pytests/minimum_pytest.hera.sh +++ /dev/null @@ -1,4 +0,0 @@ -export METPLUS_TEST_INPUT_BASE=/home/${USER}/metplus_pytests -export METPLUS_TEST_OUTPUT_BASE=/home/${USER}/metplus_pytests/out -export METPLUS_TEST_MET_INSTALL_DIR=/contrib/met/8.1 -export METPLUS_TEST_TMP_DIR=/tmp diff --git a/internal/tests/pytests/minimum_pytest.kiowa.sh b/internal/tests/pytests/minimum_pytest.kiowa.sh deleted file mode 100644 index 33cb80aa93..0000000000 --- a/internal/tests/pytests/minimum_pytest.kiowa.sh +++ /dev/null @@ -1,5 +0,0 @@ -export METPLUS_TEST_INPUT_BASE=/d1/projects/METplus/METplus_Data -export METPLUS_TEST_OUTPUT_BASE=/d1/personal/${USER}/pytest -export METPLUS_TEST_MET_INSTALL_DIR=/usr/local/met -#export METPLUS_TEST_MET_INSTALL_DIR=/d1/projects/MET/MET_releases/met-9.0_beta4 -export METPLUS_TEST_TMP_DIR=${METPLUS_TEST_OUTPUT_BASE}/tmp diff --git a/internal/tests/pytests/minimum_pytest.seneca.sh b/internal/tests/pytests/minimum_pytest.seneca.sh deleted file mode 100644 index 9fac2a711f..0000000000 --- a/internal/tests/pytests/minimum_pytest.seneca.sh +++ /dev/null @@ -1,4 +0,0 @@ -export METPLUS_TEST_INPUT_BASE=/d1/projects/METplus/METplus_Data -export METPLUS_TEST_OUTPUT_BASE=/d1/personal/${USER}/pytest -export METPLUS_TEST_MET_INSTALL_DIR=/usr/local/met -export METPLUS_TEST_TMP_DIR=${METPLUS_TEST_OUTPUT_BASE}/tmp diff --git a/internal/tests/pytests/minimum_pytest.venus.sh b/internal/tests/pytests/minimum_pytest.venus.sh deleted file mode 100644 index 2c4774e348..0000000000 --- a/internal/tests/pytests/minimum_pytest.venus.sh +++ /dev/null @@ -1,4 +0,0 @@ -export METPLUS_TEST_INPUT_BASE=/gpfs/dell2/emc/verification/noscrub/$USER/METplus/METplus-3.0_sample_data -export METPLUS_TEST_OUTPUT_BASE=/gpfs/dell2/emc/verification/noscrub/$USER/metplus_test -export METPLUS_TEST_MET_INSTALL_DIR=/gpfs/dell2/emc/verification/noscrub/$USER/met/9.0_beta4 -export METPLUS_TEST_TMP_DIR=${METPLUS_TEST_OUTPUT_BASE}/tmp diff --git a/internal/tests/pytests/pytest.ini b/internal/tests/pytests/pytest.ini index 1a9aa7a977..2851d20601 100644 --- a/internal/tests/pytests/pytest.ini +++ b/internal/tests/pytests/pytest.ini @@ -9,3 +9,4 @@ markers = wrapper: custom marker for testing metplus/wrapper logic - all others long: custom marker for tests that take a long time to run plotting: custom marker for tests that involve plotting + diff: custom marker for diff util tests that require additional packages diff --git a/internal/tests/pytests/util/diff_util/test_diff_util.py b/internal/tests/pytests/util/diff_util/test_diff_util.py new file mode 100644 index 0000000000..7ac2c837b4 --- /dev/null +++ b/internal/tests/pytests/util/diff_util/test_diff_util.py @@ -0,0 +1,155 @@ +import pytest + +import os +import shutil +import uuid + +from metplus.util.diff_util import dirs_are_equal, ROUNDING_OVERRIDES +from metplus.util import mkdir_p + +test_output_dir = os.path.join(os.environ['METPLUS_TEST_OUTPUT_BASE'], + 'test_output') + +stat_header = 'VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE' +mpr_line_1 = 'V11.1.0 HRRR ALL_1.25 120000 20220701_200000 20220701_200000 000000 20220701_200000 20220701_200000 HPBL m L0 HPBL m L0 ADPSFC DENVER BILIN 4 NA NA NA NA MPR 5 4 DENVER 39.78616 -104.41425 0 0 2160.80324 1498.06763 AMDAR NA NA NA' +mpr_line_2 = 'V11.1.0 HRRR ALL_1.25 120000 20220701_200000 20220701_200000 000000 20220701_200000 20220701_200000 HPBL m L0 HPBL m L0 ADPSFC DENVER BILIN 4 NA NA NA NA MPR 5 4 DENVER 39.78616 -104.41425 0 0 2160.80324 1498.05994 AMDAR NA NA NA' +file_path_1 = '/some/path/of/fake/file/one' +file_path_2 = '/some/path/of/fake/file/two' +file_path_3 = '/some/path/of/fake/file/three' +csv_header = 'Last Name, First Name, Progress' +csv_val_1 = 'Mackenzie, Stu, 0.9999' +csv_val_2 = 'Kenny-Smith, Ambrose, 0.8977' + + +def create_diff_files(files_a, files_b): + unique_id = str(uuid.uuid4())[0:8] + dir_a = os.path.join(test_output_dir, f'diff_{unique_id}', 'a') + dir_b = os.path.join(test_output_dir, f'diff_{unique_id}', 'b') + mkdir_p(dir_a) + mkdir_p(dir_b) + write_test_files(dir_a, files_a) + write_test_files(dir_b, files_b) + return dir_a, dir_b + + +def write_test_files(dirname, files): + for filename, lines in files.items(): + filepath = os.path.join(dirname, filename) + if os.path.sep in filename: + parent_dir = os.path.dirname(filepath) + mkdir_p(parent_dir) + + with open(filepath, 'w') as file_handle: + for line in lines: + file_handle.write(f'{line}\n') + + +@pytest.mark.parametrize( + 'a_files, b_files, rounding_override, expected_is_equal', [ + # txt both empty dir + ({}, {}, None, True), + # txt A empty dir + ({}, {'filename.txt': ['some', 'text']}, None, False), + # txt B empty dir + ({'filename.txt': ['some', 'text']}, {}, None, False), + # txt both empty file + ({'filename.txt': []}, {'filename.txt': []}, None, True), + # txt A empty file + ({'filename.txt': []}, {'filename.txt': ['some', 'text']}, None, False), + # txt B empty file + ({'filename.txt': ['some', 'text']}, {'filename.txt': []}, None, False), + # stat header columns + ({'filename.stat': [stat_header, mpr_line_1]}, + {'filename.stat': [f'{stat_header} NEW_COLUMN', mpr_line_1]}, + None, False), + # stat number of lines + ({'filename.stat': [stat_header, mpr_line_1]}, + {'filename.stat': [stat_header, mpr_line_1, mpr_line_2]}, + None, False), + # stat number of columns + ({'filename.stat': [stat_header, mpr_line_1]}, + {'filename.stat': [stat_header, f'{mpr_line_1} extra_value']}, + None, False), + # stat string + ({'filename.stat': [stat_header, mpr_line_1]}, + {'filename.stat': [stat_header, mpr_line_1.replace('L0', 'Z0')]}, + None, False), + # stat default precision + ({'filename.stat': [stat_header, mpr_line_1]}, + {'filename.stat': [stat_header, mpr_line_1.replace('39.78616', '39.78615')]}, + None, False), + # stat float override precision + ({'filename.stat': [stat_header, mpr_line_1]}, + {'filename.stat': [stat_header, mpr_line_1.replace('39.78616', '39.78615')]}, + 4, True), + # stat out of order + ({'filename.stat': [stat_header, mpr_line_1, mpr_line_2]}, + {'filename.stat': [stat_header, mpr_line_2, mpr_line_1]}, + 4, True), + # stat version differs + ({'filename.stat': [stat_header, mpr_line_1]}, + {'filename.stat': [stat_header, mpr_line_1.replace('V11.1.0', 'V12.0.0')]}, + None, True), + # file_list A without file_list line + ({'file_list.txt': [file_path_1, file_path_2, file_path_3]}, + {'file_list.txt': ['file_list', file_path_1, file_path_2, file_path_3]}, + None, True), + # file_list B without file_list line + ({'file_list.txt': ['file_list', file_path_1, file_path_2, file_path_3]}, + {'file_list.txt': [file_path_1, file_path_2, file_path_3]}, + None, True), + # file_list out of order + ({'file_list.txt': ['file_list', file_path_1, file_path_2, file_path_3]}, + {'file_list.txt': ['file_list', file_path_2, file_path_3, file_path_1]}, + None, True), + # csv equal + ({'file_list.csv': [csv_header, csv_val_1, csv_val_2]}, + {'file_list.csv': [csv_header, csv_val_1, csv_val_2]}, + None, True), + # csv number of columns A + ({'file_list.csv': [csv_header, csv_val_1, csv_val_2]}, + {'file_list.csv': [f'{csv_header}, Position', f'{csv_val_1}, flute', f'{csv_val_2}, harmonica']}, + None, False), + # csv number of columns B + ({'file_list.csv': [f'{csv_header}, Position', f'{csv_val_1}, flute', f'{csv_val_2}, harmonica']}, + {'file_list.csv': [csv_header, csv_val_1, csv_val_2]}, + None, False), + # csv number of lines A + ({'file_list.csv': [csv_header, csv_val_1, csv_val_2]}, + {'file_list.csv': [csv_header, csv_val_1]}, + None, False), + # csv number of lines B + ({'file_list.csv': [csv_header, csv_val_1]}, + {'file_list.csv': [csv_header, csv_val_1, csv_val_2]}, + None, False), + # csv diff default precision + ({'file_list.csv': [csv_header, csv_val_1, csv_val_2]}, + {'file_list.csv': [csv_header, csv_val_1.replace('0.9999', '0.9998'), csv_val_2]}, + None, False), + # csv diff default precision + ({'file_list.csv': [csv_header, csv_val_1, csv_val_2]}, + {'file_list.csv': [csv_header, csv_val_1.replace('0.9999', '0.9998'), csv_val_2]}, + 3, True), + # csv diff first item + ({'file_list.csv': [csv_header, csv_val_1, csv_val_2]}, + {'file_list.csv': [csv_header, csv_val_1.replace('Mackenzie', 'Art'), csv_val_2]}, + None, False), + ] +) +@pytest.mark.diff +def test_diff_dir_text_files(a_files, b_files, rounding_override, expected_is_equal): + if rounding_override: + for filename in a_files: + ROUNDING_OVERRIDES[filename] = rounding_override + + a_dir, b_dir = create_diff_files(a_files, b_files) + assert dirs_are_equal(a_dir, b_dir) == expected_is_equal + + # pass individual files instead of entire directory + for filename in a_files: + if filename in b_files: + a_path = os.path.join(a_dir, filename) + b_path = os.path.join(b_dir, filename) + assert dirs_are_equal(a_path, b_path) == expected_is_equal + + shutil.rmtree(os.path.dirname(a_dir)) diff --git a/internal/tests/pytests/wrappers/grid_stat/test_grid_stat_wrapper.py b/internal/tests/pytests/wrappers/grid_stat/test_grid_stat_wrapper.py index 71d65503a8..80dfe61170 100644 --- a/internal/tests/pytests/wrappers/grid_stat/test_grid_stat_wrapper.py +++ b/internal/tests/pytests/wrappers/grid_stat/test_grid_stat_wrapper.py @@ -765,6 +765,7 @@ def test_grid_stat_single_field(metplus_config, config_overrides, if item not in wrapper.WRAPPER_ENV_VAR_KEYS] env_var_keys = wrapper.WRAPPER_ENV_VAR_KEYS + missing_env + assert len(all_cmds) == len(expected_cmds) for (cmd, env_vars), expected_cmd in zip(all_cmds, expected_cmds): # ensure commands are generated as expected assert cmd == expected_cmd diff --git a/internal/tests/pytests/wrappers/series_analysis/test_series_analysis.py b/internal/tests/pytests/wrappers/series_analysis/test_series_analysis.py index e5815a5b4c..7f0b7e14db 100644 --- a/internal/tests/pytests/wrappers/series_analysis/test_series_analysis.py +++ b/internal/tests/pytests/wrappers/series_analysis/test_series_analysis.py @@ -901,6 +901,7 @@ def test_get_output_dir(metplus_config, template, storm_id, label, expected_resu @pytest.mark.wrapper_a def test_get_netcdf_min_max(metplus_config): + pytest.skip('Rewrite this test to write a NetCDF file and check vals instead of using file in met install dir') expected_min = 0.0 expected_max = 8.0 diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index de609612ab..9da87d0faa 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -875,26 +875,6 @@ def test_parse_model_info(metplus_config): == expected_out_stat_filename_template) -@pytest.mark.wrapper_d -def test_run_stat_analysis(metplus_config): - # Test running of stat_analysis - st = stat_analysis_wrapper(metplus_config) - # Test 1 - expected_filename = (st.config.getdir('OUTPUT_BASE')+'/stat_analysis' - '/00Z/MODEL_TEST/MODEL_TEST_20190101.stat') - if os.path.exists(expected_filename): - os.remove(expected_filename) - comparison_filename = (METPLUS_BASE+'/internal/tests/data/stat_data/' - +'test_20190101.stat') - st.c_dict['DATE_BEG'] = datetime.datetime.strptime('20190101', '%Y%m%d') - st.c_dict['DATE_END'] = datetime.datetime.strptime('20190101', '%Y%m%d') - st.c_dict['DATE_TYPE'] = 'VALID' - st._run_stat_analysis({}) - assert os.path.exists(expected_filename) - assert (os.path.getsize(expected_filename) == - os.path.getsize(comparison_filename)) - - @pytest.mark.parametrize( 'data_type, config_list, expected_list', [ ('FCST', '\"0,*,*\"', ['"0,*,*"']), diff --git a/internal/tests/pytests/wrappers/tc_diag/test_tc_diag_wrapper.py b/internal/tests/pytests/wrappers/tc_diag/test_tc_diag_wrapper.py index faddf14e20..d62db8b247 100644 --- a/internal/tests/pytests/wrappers/tc_diag/test_tc_diag_wrapper.py +++ b/internal/tests/pytests/wrappers/tc_diag/test_tc_diag_wrapper.py @@ -9,7 +9,7 @@ deck_template = 'aal14{date?fmt=%Y}_short.dat' input_template = 'gfs.subset.t00z.pgrb2.0p25.f*' -output_template = 'tc_diag_aal14{date?fmt=%Y}.nc' +output_template = '{date?fmt=%Y}' time_fmt = '%Y%m%d%H' run_times = ['2016092900'] @@ -24,6 +24,9 @@ '{ name="TMP"; level="P100"; }];' ) +input_domain = 'parent' +input_tech_id_list = 'GFSO' + def get_data_dir(config): return os.path.join(config.getdir('METPLUS_BASE'), @@ -46,9 +49,11 @@ def set_minimum_config_settings(config): config.set('config', 'TC_DIAG_CONFIG_FILE', '{PARM_BASE}/met_config/TCDiagConfig_wrapped') config.set('config', 'TC_DIAG_DECK_TEMPLATE', deck_template) - config.set('config', 'TC_DIAG_INPUT_TEMPLATE', input_template) + config.set('config', 'TC_DIAG_INPUT1_TEMPLATE', input_template) + config.set('config', 'TC_DIAG_INPUT1_DOMAIN', input_domain) + config.set('config', 'TC_DIAG_INPUT1_TECH_ID_LIST', input_tech_id_list) config.set('config', 'TC_DIAG_OUTPUT_DIR', - '{OUTPUT_BASE}/TCDiag/output') + '{OUTPUT_BASE}/tc_diag') config.set('config', 'TC_DIAG_OUTPUT_TEMPLATE', output_template) config.set('config', 'BOTH_VAR1_NAME', 'PRMSL') @@ -231,13 +236,11 @@ def test_tc_diag_run(metplus_config, config_overrides, config_file = wrapper.c_dict.get('CONFIG_FILE') out_dir = wrapper.c_dict.get('OUTPUT_DIR') - expected_cmds = [(f"{app_path} " - f"-deck {deck_dir}/aal142016_short.dat " - f"-data {file_list_file} " - f"-config {config_file} " - f"-out {out_dir}/tc_diag_aal142016.nc " - f"{verbosity}"), - ] + expected_cmds = [ + (f"{app_path} -deck {deck_dir}/aal142016_short.dat " + f"-data {input_domain} {input_tech_id_list} {file_list_file} " + f"-config {config_file} -outdir {out_dir}/2016/ {verbosity}"), + ] all_cmds = wrapper.run_all_times() print(f"ALL COMMANDS: {all_cmds}") diff --git a/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py b/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py index 40b66b6c1d..12bcc44db5 100644 --- a/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py +++ b/internal/tests/pytests/wrappers/tc_stat/test_tc_stat_wrapper.py @@ -14,6 +14,7 @@ config_init_beg = '20170705' config_init_end = '20170901' + def get_config(metplus_config): # extra_configs = [] # extra_configs.append(os.path.join(os.path.dirname(__file__), @@ -44,16 +45,56 @@ def get_config(metplus_config): return config -def tc_stat_wrapper(metplus_config): - """! Returns a default TCStatWrapper with /path/to entries in the - metplus_system.conf and metplus_runtime.conf configuration - files. Subsequent tests can customize the final METplus configuration - to over-ride these /path/to values.""" - - # Default, empty TcStatWrapper with some configuration values set - # to /path/to: +@pytest.mark.parametrize( + 'config_overrides, expected_dirs, expected_job_string', [ + # 0: dump_row and out_stat files + ({'TC_STAT_JOB_ARGS': ("-job summary -line_type TCMPR -column " + "'ABS(AMAX_WIND-BMAX_WIND)' " + "-dump_row dump_row/summary.tcst, " + "-job summary -line_type TCMPR -column " + "'ABS(AMAX_WIND-BMAX_WIND)' " + "-out_stat out_stat/stat_summary.tcst")}, + ['dump_row', 'out_stat'], + ('jobs = ["-job summary -line_type TCMPR -column \'ABS(AMAX_WIND-BMAX_WIND)\' -dump_row dump_row/summary.tcst",' + '"-job summary -line_type TCMPR -column \'ABS(AMAX_WIND-BMAX_WIND)\' -out_stat out_stat/stat_summary.tcst"];')), + + # 1: dump_row file + ({'TC_STAT_JOB_ARGS': ("-job summary -line_type TCMPR -column " + "'ABS(AMAX_WIND-BMAX_WIND)' " + "-dump_row dump_row/summary.tcst")}, ['dump_row'], 'jobs = ["-job summary -line_type TCMPR -column \'ABS(AMAX_WIND-BMAX_WIND)\' -dump_row dump_row/summary.tcst"];'), + # 2: out_stat file + ({'TC_STAT_JOB_ARGS': ("-job summary -line_type TCMPR -column " + "'ABS(AMAX_WIND-BMAX_WIND)' " + "-out_stat out_stat/stat_summary.tcst")}, + ['out_stat'], + 'jobs = ["-job summary -line_type TCMPR -column \'ABS(AMAX_WIND-BMAX_WIND)\' -out_stat out_stat/stat_summary.tcst"];'), + ] +) +@pytest.mark.wrapper +def test_tc_stat_handle_jobs(metplus_config, config_overrides, expected_dirs, + expected_job_string): config = get_config(metplus_config) - return TCStatWrapper(config) + + # turn off "do not run" setting so directories are created + config.set('config', 'DO_NOT_RUN_EXE', False) + + # set config variable overrides + for key, value in config_overrides.items(): + config.set('config', key, value) + + # initialize wrapper and ensure it was initialized properly + wrapper = TCStatWrapper(config) + assert wrapper.isOK + + # ensure job string is formatted as expected + actual_job_string = wrapper.handle_jobs(time_info={}) + assert actual_job_string == expected_job_string + assert wrapper.env_var_dict['METPLUS_JOBS'] == expected_job_string + + # ensure output directories are created properly + for expected in expected_dirs: + expected_dir = os.path.join(wrapper.c_dict['OUTPUT_DIR'], expected) + assert os.path.exists(expected_dir) and os.path.isdir(expected_dir) @pytest.mark.parametrize( 'config_overrides, env_var_values', [ @@ -197,6 +238,9 @@ def tc_stat_wrapper(metplus_config): # 46 out_valid_mask ({'TC_STAT_OUT_VALID_MASK': 'MET_BASE/poly/EAST.poly', }, {'METPLUS_OUT_VALID_MASK': 'out_valid_mask = "MET_BASE/poly/EAST.poly";'}), + # 47 output template + ({'TC_STAT_OUTPUT_TEMPLATE': 'tc_stat.out.nc', }, + {}), ] ) @@ -232,7 +276,7 @@ def test_tc_stat_run(metplus_config, config_overrides, env_var_values): verbosity = f"-v {wrapper.c_dict['VERBOSITY']}" config_file = wrapper.c_dict.get('CONFIG_FILE') lookin_dir = wrapper.c_dict.get('LOOKIN_DIR') - out_temp = wrapper.c_dict.get('OUTPUT_TEMPLATE') + out_temp = wrapper.c_dict.get('JOB_OUTPUT_TEMPLATE') out_dir = wrapper.c_dict.get('OUTPUT_DIR') out_arg = f' -out {out_dir}/{out_temp}' if out_temp else '' @@ -360,24 +404,23 @@ def test_override_config_in_c_dict(metplus_config, overrides, c_dict): @pytest.mark.parametrize( 'jobs, init_dt, expected_output', [ # single fake job - (['job1'], - None, - 'jobs = ["job1"];' - ), + (['job1'], + None, + 'jobs = ["job1"];' + ), # 2 jobs, no time info - (['-job filter -dump_row /filt.tcst', - '-job rirw -line_type TCMPR '], - None, - 'jobs = ["-job filter -dump_row /filt.tcst",' - '"-job rirw -line_type TCMPR"];' - ), - + (['-job filter -dump_row /filt.tcst', + '-job rirw -line_type TCMPR '], + None, + 'jobs = ["-job filter -dump_row /filt.tcst",' + '"-job rirw -line_type TCMPR"];' + ), # 2 jobs, time info sub (['-job filter -dump_row /{init?fmt=%Y%m%d%H}.tcst', '-job rirw -line_type TCMPR '], - datetime.datetime(2019, 10, 31, 12), - 'jobs = ["-job filter -dump_row /2019103112.tcst",' - '"-job rirw -line_type TCMPR"];' + datetime.datetime(2019, 10, 31, 12), + 'jobs = ["-job filter -dump_row /2019103112.tcst",' + '"-job rirw -line_type TCMPR"];' ), ] ) @@ -388,7 +431,8 @@ def test_handle_jobs(metplus_config, jobs, init_dt, expected_output): else: time_info = None - wrapper = tc_stat_wrapper(metplus_config) + config = get_config(metplus_config) + wrapper = TCStatWrapper(config) output_base = wrapper.config.getdir('OUTPUT_BASE') output_dir = os.path.join(output_base, 'test_handle_jobs') @@ -411,19 +455,19 @@ def cleanup_test_dirs(parent_dirs, output_dir): @pytest.mark.parametrize( 'jobs, init_dt, expected_output, parent_dirs', [ # single fake job, no parent dir - (['job1'], - None, - 'jobs = ["job1"];', - None - ), + (['job1'], + None, + 'jobs = ["job1"];', + None + ), # 2 jobs, no time info, 1 parent dir - (['-job filter -dump_row /filt.tcst', - '-job rirw -line_type TCMPR '], - None, - 'jobs = ["-job filter -dump_row /filt.tcst",' - '"-job rirw -line_type TCMPR"];', - [''], - ), + (['-job filter -dump_row /filt.tcst', + '-job rirw -line_type TCMPR '], + None, + 'jobs = ["-job filter -dump_row /filt.tcst",' + '"-job rirw -line_type TCMPR"];', + [''], + ), # 2 jobs, time info sub, 1 parent dir (['-job filter -dump_row /{init?fmt=%Y%m%d%H}.tcst', @@ -452,7 +496,7 @@ def cleanup_test_dirs(parent_dirs, output_dir): 'jobs = ["-job filter -dump_row /sub1/2019103112.tcst",' '"-job filter -dump_row /sub2/20191031.tcst"];', ['/sub1', - '/sub2',], + '/sub2', ], ), ] ) diff --git a/internal/tests/use_cases/all_use_cases.txt b/internal/tests/use_cases/all_use_cases.txt index 3ddc50030a..40f8a0b443 100644 --- a/internal/tests/use_cases/all_use_cases.txt +++ b/internal/tests/use_cases/all_use_cases.txt @@ -61,7 +61,7 @@ Category: met_tool_wrapper 59::IODA2NC::met_tool_wrapper/IODA2NC/IODA2NC.conf 60::PointStat_python_embedding_obs:: met_tool_wrapper/PointStat/PointStat_python_embedding_obs.conf 61::PlotPointObs:: met_tool_wrapper/PlotPointObs/PlotPointObs.conf -#62::TCDiag:: met_tool_wrapper/TCDiag/TCDiag.conf +62::TCDiag:: met_tool_wrapper/TCDiag/TCDiag.conf Category: air_quality_and_comp 0::EnsembleStat_fcstICAP_obsMODIS_aod::model_applications/air_quality_and_comp/EnsembleStat_fcstICAP_obsMODIS_aod.conf diff --git a/metplus/util/config_metplus.py b/metplus/util/config_metplus.py index f90aeed939..f898039818 100644 --- a/metplus/util/config_metplus.py +++ b/metplus/util/config_metplus.py @@ -240,7 +240,7 @@ def launch(config_list): config.set('config', 'CONFIG_INPUT', ','.join(config_format_list)) # save unique identifier for the METplus run - config.set('config', 'RUN_ID', str(uuid.uuid4())[0:8]) + config.set('config', 'RUN_ID', config.run_id) # get OUTPUT_BASE to make sure it is set correctly so the first error # that is logged relates to OUTPUT_BASE, not LOG_DIR, which is likely @@ -450,7 +450,8 @@ def __init__(self, conf=None): interpolation=None) if (conf is None) else conf super().__init__(conf) self._cycle = None - self._logger = logging.getLogger('metplus') + self.run_id = str(uuid.uuid4())[0:8] + self._logger = logging.getLogger(f'metplus.{self.run_id}') # config.logger is called in wrappers, so set this name # so the code doesn't break self.logger = self._logger @@ -461,6 +462,13 @@ def __init__(self, conf=None): # add section to hold environment variables defined by the user self.add_section('user_env_vars') + def __del__(self): + """!When object is deleted, close and remove all log handlers""" + handlers = self.logger.handlers[:] + for handler in handlers: + self.logger.removeHandler(handler) + handler.close() + def log(self, sublog=None): """! Overrides method in ProdConfig If the sublog argument is diff --git a/metplus/util/diff_util.py b/metplus/util/diff_util.py index f26ba790f2..4278206c6d 100755 --- a/metplus/util/diff_util.py +++ b/metplus/util/diff_util.py @@ -13,16 +13,17 @@ IMAGE_EXTENSIONS = [ '.jpg', '.jpeg', + '.png', ] NETCDF_EXTENSIONS = [ '.nc', '.cdf', + '.nc4', ] SKIP_EXTENSIONS = [ '.zip', - '.png', '.gif', '.ix', ] @@ -38,9 +39,31 @@ UNSUPPORTED_EXTENSIONS = [ ] +# keywords to search and skip diff tests if found in file path +# PBL use case can be removed after dtcenter/METplus#2246 is completed +SKIP_KEYWORDS = [ + 'PointStat_fcstHRRR_obsAMDAR_PBLH_PyEmbed', +] + + +### +# Rounding Constants +### + +# number of decimal places to use for comparing floats by default +DEFAULT_ROUNDING_PRECISION = 6 + +# dictionary where key is a keyword to search (e.g. use case name) +# and the value is the rounding precision to use for files that +# match the keyword +ROUNDING_OVERRIDES = { + 'UserScript_obsCFSR_obsOnly_MJO_ENSO': 5, + 'UserScript_fcstS2S_obsERAI_CrossSpectra': 4, +} + # number of decision places to accept float differences # Note: Completing METplus issue #1873 could allow this to be set to 6 -ROUNDING_PRECISION = 5 +rounding_precision = DEFAULT_ROUNDING_PRECISION def get_file_type(filepath): @@ -76,6 +99,12 @@ def get_file_type(filepath): return 'unknown' +def dirs_are_equal(dir_a, dir_b, debug=False, save_diff=False): + if compare_dir(dir_a, dir_b, debug=debug, save_diff=save_diff): + return False + return True + + def compare_dir(dir_a, dir_b, debug=False, save_diff=False): print('::group::Full diff results:') # if input are files and not directories, compare them @@ -177,6 +206,13 @@ def compare_files(filepath_a, filepath_b, debug=False, dir_a=None, dir_b=None, print(f"file_A: {filepath_a}") print(f"file_B: {filepath_b}\n") + for skip in SKIP_KEYWORDS: + if skip in filepath_a or skip in filepath_b: + print(f'WARNING: Skipping diff that contains keyword: {skip}') + return None + + set_rounding_precision(filepath_a) + # if file does not exist in dir_b, report difference if not os.path.exists(filepath_b): if debug: @@ -205,19 +241,20 @@ def compare_files(filepath_a, filepath_b, debug=False, dir_a=None, dir_b=None, return _handle_image_files(filepath_a, filepath_b, save_diff) # if not any of the above types, use diff to compare - print("Comparing text files") - if not filecmp.cmp(filepath_a, filepath_b): - # if files differ, open files and handle expected diffs - if not compare_txt_files(filepath_a, filepath_b, dir_a, dir_b): - print(f"ERROR: File differs: {filepath_b}") - return filepath_a, filepath_b, 'Text diff', '' + return _handle_text_files(filepath_a, filepath_b, dir_a, dir_b) - print("No differences in text files") - return True - else: - print("No differences in text files") - return True +def set_rounding_precision(filepath): + global rounding_precision + for keyword, precision in ROUNDING_OVERRIDES.items(): + if keyword not in filepath: + continue + print(f'Using rounding precision {precision} for {keyword}') + rounding_precision = precision + return + + print(f'Using default rounding precision {DEFAULT_ROUNDING_PRECISION}') + rounding_precision = DEFAULT_ROUNDING_PRECISION def _handle_csv_files(filepath_a, filepath_b): @@ -267,6 +304,21 @@ def _handle_image_files(filepath_a, filepath_b, save_diff): return filepath_a, filepath_b, 'Image diff', diff_file +def _handle_text_files(filepath_a, filepath_b, dir_a, dir_b): + print("Comparing text files") + if filecmp.cmp(filepath_a, filepath_b, shallow=False): + print("No differences found from filecmp.cmp") + return True + + # if files differ, open files and handle expected diffs + if not compare_txt_files(filepath_a, filepath_b, dir_a, dir_b): + print(f"ERROR: File differs: {filepath_b}") + return filepath_a, filepath_b, 'Text diff', '' + + print("No differences found from compare_txt_files") + return True + + def compare_pdf_as_images(filepath_a, filepath_b, save_diff=False): try: from pdf2image import convert_from_path @@ -315,9 +367,9 @@ def compare_images(image_a, image_b): nx, ny = image_diff.size for x in range(0, int(nx)): for y in range(0, int(ny)): - pixel = image_diff.getpixel((x, y)) - if pixel != 0 and pixel != (0, 0, 0, 0) and pixel != (0, 0, 0): - print(f"Difference pixel: {pixel}") + diff_pixel = image_diff.getpixel((x, y)) + if not _is_zero_pixel(diff_pixel): + print(f"Difference pixel: {diff_pixel}") diff_count += 1 if diff_count: print(f"ERROR: Found {diff_count} differences between images") @@ -325,6 +377,17 @@ def compare_images(image_a, image_b): return None +def _is_zero_pixel(pixel): + """!Check if difference pixel is 0, which means no differences. + + @param pixel pixel value or tuple if multi-layer image + @returns True if all values are 0 or False if any value is non-zero + """ + if isinstance(pixel, tuple): + return all(val == 0 for val in pixel) + return pixel == 0 + + def save_diff_file(image_diff, filepath_b): rel_path, file_extension = os.path.splitext(filepath_b) diff_file = f'{rel_path}_diff.png' @@ -381,16 +444,16 @@ def _compare_csv_columns(lines_a, lines_b): status = True for num, (line_a, line_b) in enumerate(zip(lines_a, lines_b), start=1): for key in keys_a: - val_a = line_a[key] - val_b = line_b[key] + val_a = line_a[key].strip() + val_b = line_b[key].strip() # prevent error if values are diffs are less than - # ROUNDING_PRECISION decimal places + # rounding_precision decimal places # METplus issue #1873 addresses the real problem try: if _is_equal_rounded(val_a, val_b): continue print(f"ERROR: Line {num} - {key} differs by " - f"less than {ROUNDING_PRECISION} decimals: " + f"less than {rounding_precision} decimals: " f"TRUTH = {val_a}, OUTPUT = {val_b}") status = False except ValueError: @@ -405,6 +468,8 @@ def _compare_csv_columns(lines_a, lines_b): def _is_equal_rounded(value_a, value_b): if value_a == value_b: return True + if not _is_number(value_a) or not _is_number(value_b): + return False if _truncate_float(value_a) == _truncate_float(value_b): return True if _round_float(value_a) == _round_float(value_b): @@ -412,13 +477,17 @@ def _is_equal_rounded(value_a, value_b): return False +def _is_number(value): + return value.replace('.', '1').replace('-', '1').strip().isdigit() + + def _truncate_float(value): - factor = 1 / (10 ** ROUNDING_PRECISION) + factor = 1 / (10 ** rounding_precision) return float(value) // factor * factor def _round_float(value): - return round(float(value), ROUNDING_PRECISION) + return round(float(value), rounding_precision) def compare_txt_files(filepath_a, filepath_b, dir_a=None, dir_b=None): @@ -435,10 +504,9 @@ def compare_txt_files(filepath_a, filepath_b, dir_a=None, dir_b=None): if not len(lines_a): print("Both text files are empty, so they are equal") return True - else: - print(f"Empty file: {filepath_b}\n" - f"Not empty: {filepath_a}") - return False + print(f"Empty file: {filepath_b}\n" + f"Not empty: {filepath_a}") + return False # filepath_b is not empty but filepath_a is empty elif not len(lines_a): print(f"Empty file: {filepath_a}\n" @@ -462,46 +530,38 @@ def compare_txt_files(filepath_a, filepath_b, dir_a=None, dir_b=None): is_stat_file = lines_a[0].startswith('VERSION') # if it is, save the header columns + header_a = None if is_stat_file: - print("Comparing stat file") + print("Comparing stat files") + # pull out header line and skip VERSION to prevent + # diffs from version number changes header_a = lines_a.pop(0).split()[1:] - else: - header_a = None + header_b = lines_b.pop(0).split()[1:] + if len(header_a) != len(header_b): + print('ERROR: Different number of header columns\n' + f' A: {header_a}\n B: {header_b}') + return False if len(lines_a) != len(lines_b): print(f"ERROR: Different number of lines in {filepath_b}") print(f" File_A: {len(lines_a)}\n File_B: {len(lines_b)}") return False - all_good = diff_text_lines(lines_a, - lines_b, - dir_a=dir_a, - dir_b=dir_b, - print_error=False, - is_file_list=is_file_list, - is_stat_file=is_stat_file, - header_a=header_a) + if diff_text_lines(lines_a, lines_b, dir_a=dir_a, dir_b=dir_b, + print_error=False, is_file_list=is_file_list, + is_stat_file=is_stat_file, header_a=header_a): + return True # if differences found in text file, sort and try again - if not all_good: - lines_a.sort() - lines_b.sort() - all_good = diff_text_lines(lines_a, - lines_b, - dir_a=dir_a, - dir_b=dir_b, - print_error=True, - is_file_list=is_file_list, - is_stat_file=is_stat_file, - header_a=header_a) - - return all_good + lines_a.sort() + lines_b.sort() + return diff_text_lines(lines_a, lines_b, dir_a=dir_a, dir_b=dir_b, + print_error=True, is_file_list=is_file_list, + is_stat_file=is_stat_file, header_a=header_a) -def diff_text_lines(lines_a, lines_b, - dir_a=None, dir_b=None, - print_error=False, - is_file_list=False, is_stat_file=False, +def diff_text_lines(lines_a, lines_b, dir_a=None, dir_b=None, + print_error=False, is_file_list=False, is_stat_file=False, header_a=None): all_good = True for line_a, line_b in zip(lines_a, lines_b): @@ -523,33 +583,46 @@ def diff_text_lines(lines_a, lines_b, continue if print_error: - print(f"ERROR: Line differs\n" - f" A: {compare_a}\n B: {compare_b}") + print(f"ERROR: Line differs\n A: {compare_a}\n B: {compare_b}") all_good = False return all_good -def _diff_stat_line(compare_a, compare_b, header_a, print_error=False): +def _diff_stat_line(compare_a, compare_b, header, print_error=False): """Compare values in .stat file. Ignore first column which contains MET version number @param compare_a list of values in line A @param compare_b list of values in line B - @param header_a list of header values in file A excluding MET version + @param header list of header values in file A excluding MET version @param print_error If True, print an error message if any value differs """ cols_a = compare_a.split()[1:] cols_b = compare_b.split()[1:] + + # error message to print if a diff is found + message = f"ERROR: Stat line differs\n A: {compare_a}\n B: {compare_b}\n\n" + + # error if different number of columns are found + if len(cols_a) != len(cols_b): + if print_error: + print(f'{message}Different number of columns') + return False + all_good = True - for col_a, col_b, label in zip(cols_a, cols_b, header_a): - if col_a == col_b: + for index, (col_a, col_b) in enumerate(zip(cols_a, cols_b), 2): + if _is_equal_rounded(col_a, col_b): continue - if print_error: - print(f"ERROR: {label} differs:\n" - f" A: {col_a}\n B: {col_b}") all_good = False + if not print_error: + continue + + label = f'column {index}' if index >= len(header) else header[index] + message += f" Diff in {label}:\n A: {col_a}\n B: {col_b}\n" + if not all_good and print_error: + print(message) return all_good @@ -696,4 +769,6 @@ def _all_values_are_equal(var_a, var_b): dir_a = sys.argv[1] dir_b = sys.argv[2] save_diff = len(sys.argv) > 3 - compare_dir(dir_a, dir_b, debug=True, save_diff=save_diff) + # if any files were flagged, exit non-zero + if compare_dir(dir_a, dir_b, debug=True, save_diff=save_diff): + sys.exit(2) diff --git a/metplus/wrappers/tc_diag_wrapper.py b/metplus/wrappers/tc_diag_wrapper.py index e2c49a013c..1b8ebd8a75 100755 --- a/metplus/wrappers/tc_diag_wrapper.py +++ b/metplus/wrappers/tc_diag_wrapper.py @@ -15,7 +15,8 @@ from ..util import time_util from . import RuntimeFreqWrapper from ..util import do_string_sub, skip_time, get_lead_sequence -from ..util import parse_var_list, sub_var_list +from ..util import parse_var_list, sub_var_list, getlist +from ..util import find_indices_in_config_section from ..util.met_config import add_met_config_dict_list '''!@namespace TCDiagWrapper @@ -85,29 +86,28 @@ def create_c_dict(self): self.log_error('Only RUN_ONCE_PER_INIT_OR_VALID is supported for ' 'TC_DIAG_RUNTIME_FREQ.') - # get the MET config file path or use default - c_dict['CONFIG_FILE'] = self.get_config_file('TCDiagConfig_wrapped') + # get command line arguments domain and tech id list for -data + self._read_data_inputs(c_dict) - c_dict['INPUT_DIR'] = self.config.getdir('TC_DIAG_INPUT_DIR', '') - c_dict['INPUT_TEMPLATE'] = self.config.getraw('config', - 'TC_DIAG_INPUT_TEMPLATE') - c_dict['INPUT_FILE_LIST'] = self.config.getraw( - 'config', 'TC_DIAG_INPUT_FILE_LIST' + # get -deck argument dir/template + c_dict['DECK_INPUT_DIR'] = self.config.getdir('TC_DIAG_DECK_INPUT_DIR', + '') + c_dict['DECK_INPUT_TEMPLATE'] = ( + self.config.getraw('config', + 'TC_DIAG_DECK_TEMPLATE') ) + # get output dir/template c_dict['OUTPUT_DIR'] = self.config.getdir('TC_DIAG_OUTPUT_DIR', '') c_dict['OUTPUT_TEMPLATE'] = ( self.config.getraw('config', 'TC_DIAG_OUTPUT_TEMPLATE') ) - c_dict['DECK_INPUT_DIR'] = self.config.getdir('TC_DIAG_DECK_INPUT_DIR', - '') - c_dict['DECK_INPUT_TEMPLATE'] = ( - self.config.getraw('config', - 'TC_DIAG_DECK_TEMPLATE') - ) + # get the MET config file path or use default + c_dict['CONFIG_FILE'] = self.get_config_file('TCDiagConfig_wrapped') + # get variables to set in wrapped MET config file self.add_met_config(name='model', data_type='list', metplus_configs=['TC_DIAG_MODEL', 'MODEL']) @@ -232,23 +232,78 @@ def create_c_dict(self): return c_dict + def _read_data_inputs(self, c_dict): + """! Parse the -data arguments from the METplusConfig object. + Sets c_dict DATA_INPUTS key with a list of dictionaries. + Each input should include domain, tech_id_list, and dir/template. + Logs error if any required variables are not set. + + @param c_dict dictionary to save values into + """ + # get template indices + indices = list( + find_indices_in_config_section(r'TC_DIAG_INPUT(\d+)_TEMPLATE$', + self.config, + index_index=1).keys() + ) + + # if no template indices were found, look for file list indices + if not indices: + indices = list( + find_indices_in_config_section(r'TC_DIAG_INPUT(\d+)_FILE_LIST$', + self.config, + index_index=1).keys() + ) + # error if no file list or template indices were found + if not indices: + self.log_error( + 'Must set TC_DIAG_INPUT_TEMPLATE/DOMAIN/TECH_ID_LIST' + ) + return + + c_dict['DATA_INPUTS'] = [] + for index in indices: + prefix = f'TC_DIAG_INPUT{index}_' + directory = self.config.getdir(f'{prefix}DIR') + template = self.config.getraw('config', f'{prefix}TEMPLATE') + + # get file list if template is not set + if template: + file_list = None + else: + file_list = self.config.getraw('config', f'{prefix}FILE_LIST') + + domain = self.config.getraw('config', f'{prefix}DOMAIN') + if not domain: + self.log_error(f'Must set {prefix}DOMAIN') + + tech_id_list = getlist( + self.config.getraw('config', f'{prefix}TECH_ID_LIST') + ) + if not tech_id_list: + self.log_error(f'Must set {prefix}TECH_ID_LIST') + + data_dict = { + 'template': template, + 'directory': directory, + 'file_list': file_list, + 'domain': domain, + 'tech_id_list': tech_id_list, + } + c_dict['DATA_INPUTS'].append(data_dict) + def get_command(self): cmd = self.app_path # add deck cmd += ' -deck ' + self.c_dict['DECK_FILE'] - # add input files - cmd += ' -data' - for infile in self.infiles: - cmd += ' ' + infile - # add arguments cmd += ' ' + ' '.join(self.args) # add output path out_path = self.get_output_path() - cmd += ' -out ' + out_path + cmd += ' -outdir ' + out_path # add verbosity cmd += ' -v ' + self.c_dict['VERBOSITY'] @@ -259,13 +314,15 @@ def run_at_time_once(self, time_info): Args: @param time_info dictionary containing timing information """ + self.clear() time_info = time_util.ti_calculate(time_info) + # get input files - if self.find_input_files(time_info) is None: + if not self.find_input_files(time_info): return # get output path - if not self.find_and_check_output_file(time_info): + if not self.find_and_check_output_file(time_info, is_directory=True): return # get field information to set in MET config @@ -281,12 +338,6 @@ def run_at_time_once(self, time_info): # set environment variables if using config file self.set_environment_variables(time_info) - # build command and run - cmd = self.get_command() - if cmd is None: - self.log_error("Could not generate command") - return - self.build() def set_data_field(self, time_info): @@ -327,47 +378,57 @@ def find_input_files(self, time_info): # get deck file deck_file = self.find_data(time_info, data_type='DECK') if not deck_file: - return None - + return False self.c_dict['DECK_FILE'] = deck_file + # get files and values for -data arguments lead_seq = get_lead_sequence(self.config, time_info) + for data_dict in self.c_dict['DATA_INPUTS']: + if not self._find_data_inputs(data_dict, lead_seq, time_info, + deck_file): + return False + return True - # get input files - if self.c_dict['INPUT_FILE_LIST']: - self.logger.debug("Explicit file list file: " - f"{self.c_dict['INPUT_FILE_LIST']}") - list_file = do_string_sub(self.c_dict['INPUT_FILE_LIST'], - **time_info) + def _find_data_inputs(self, data_dict, lead_seq, time_info, deck_file): + # check if file list file is set and use that instead of template/dir + input_file_list = data_dict['file_list'] + if input_file_list: + self.logger.debug(f"Explicit file list file: {input_file_list}") + list_file = do_string_sub(input_file_list, **time_info) if not os.path.exists(list_file): self.log_error(f'Could not find file list: {list_file}') - return None + return False else: - all_input_files = [] + # set c_dict variables that are used in find_data function + self.c_dict['INPUT_DIR'] = data_dict['directory'] + self.c_dict['INPUT_TEMPLATE'] = data_dict['template'] + all_input_files = [] for lead in lead_seq: - self.clear() - time_info['lead'] = lead - - time_info = time_util.ti_calculate(time_info) + time_info_lead = time_info.copy() + time_info_lead['lead'] = lead + time_info_lead = time_util.ti_calculate(time_info_lead) # get a list of the input data files, # write to an ascii file if there are more than one - input_files = self.find_data(time_info, return_list=True) + input_files = self.find_data(time_info_lead, return_list=True) if not input_files: continue all_input_files.extend(input_files) if not all_input_files: - return None + return False # create an ascii file with a list of the input files list_file = f"{os.path.basename(deck_file)}_data_files.txt" list_file = self.write_list_file(list_file, all_input_files) - self.infiles.append(list_file) - return self.infiles + # build argument with file list file, domain, and tech id list + domain = data_dict['domain'] + tech_ids = ','.join(data_dict['tech_id_list']) + self.args.append(f'-data {domain} {tech_ids} {list_file}') + return True def set_lead_list(self, time_info): self.env_var_dict['METPLUS_LEAD_LIST'] = '' diff --git a/metplus/wrappers/tc_stat_wrapper.py b/metplus/wrappers/tc_stat_wrapper.py index ce09ba9400..9c7b6722b4 100755 --- a/metplus/wrappers/tc_stat_wrapper.py +++ b/metplus/wrappers/tc_stat_wrapper.py @@ -365,16 +365,9 @@ def handle_jobs(self, time_info): subbed_job = do_string_sub(job, **time_info) if time_info else job formatted_jobs.append(subbed_job.strip()) - # check if -dump_row is used - # if it is, create parent directory of output file - split_job = subbed_job.split(' ') - if '-dump_row' in split_job: - index = split_job.index('-dump_row') + 1 - filepath = split_job[index] - self.c_dict['OUTPUT_TEMPLATE'] = filepath - - if not self.find_and_check_output_file(time_info): - return None + # create parent directory of output file + if not self._create_job_out_dirs(subbed_job, time_info): + return None job_list_string = '","'.join(formatted_jobs) job_list_string = f'jobs = ["{job_list_string}"];' @@ -384,6 +377,31 @@ def handle_jobs(self, time_info): return job_list_string + def _create_job_out_dirs(self, job_args, time_info): + """!Create output directories for output files specified by job args + like -dump_row and -out_stat to prevent the command from failing. + + @param job_args list of job arguments to parse + @param time_info time dictionary used to fill in filename + template tags if used + @returns False if something went wrong trying to create directories or + True if everything went smoothly. + """ + split_job = job_args.split(' ') + for out_type in ('-dump_row', '-out_stat'): + # continue if job arg that writes a file is not found in job args + if out_type not in split_job: + continue + + # if job arg is found, create parent directory of output file + index = split_job.index(out_type) + 1 + filepath = split_job[index] + self.c_dict['OUTPUT_TEMPLATE'] = filepath + if not self.find_and_check_output_file(time_info): + return False + + return True + def handle_out_file(self, time_info): """! If output template is set, """ diff --git a/metplus/wrappers/tcmpr_plotter_wrapper.py b/metplus/wrappers/tcmpr_plotter_wrapper.py index e8408586dc..3e7fe3238e 100755 --- a/metplus/wrappers/tcmpr_plotter_wrapper.py +++ b/metplus/wrappers/tcmpr_plotter_wrapper.py @@ -85,7 +85,12 @@ def create_c_dict(self): # check that R script can be found if not os.path.exists(c_dict['TCMPR_SCRIPT']): - self.log_error('plot_tcmpr.R script could not be found') + self.logger.error('plot_tcmpr.R script could not be found') + + # if running script, set isOK to False + # this allows tests to run without needing MET_INSTALL_DIR + if not c_dict.get('DO_NOT_RUN_EXE', False): + self.isOK = False # get input data c_dict['INPUT_DATA'] = ( diff --git a/parm/met_config/TCDiagConfig_wrapped b/parm/met_config/TCDiagConfig_wrapped index 43d3708c09..d20be1a55b 100644 --- a/parm/met_config/TCDiagConfig_wrapped +++ b/parm/met_config/TCDiagConfig_wrapped @@ -182,7 +182,7 @@ tmp_dir = "${MET_TMP_DIR}"; //output_prefix = ${METPLUS_OUTPUT_PREFIX} -//version = "V11.0.0"; +//version = "V11.1.0"; //////////////////////////////////////////////////////////////////////////////// diff --git a/parm/use_cases/met_tool_wrapper/TCDiag/TCDiag.conf b/parm/use_cases/met_tool_wrapper/TCDiag/TCDiag.conf index 7f2bcc3b0e..b489eec5ba 100644 --- a/parm/use_cases/met_tool_wrapper/TCDiag/TCDiag.conf +++ b/parm/use_cases/met_tool_wrapper/TCDiag/TCDiag.conf @@ -27,10 +27,10 @@ PROCESS_LIST = TCDiag LOOP_BY = INIT INIT_TIME_FMT = %Y%m%d%H -INIT_BEG = 2016092900 -INIT_END = 2016092900 +INIT_BEG = 2023062012 +INIT_END = 2023062012 INIT_INCREMENT = 21600 - +LEAD_SEQ = 0, 6, 12 ### # File I/O @@ -38,13 +38,21 @@ INIT_INCREMENT = 21600 ### TC_DIAG_DECK_INPUT_DIR = {INPUT_BASE}/met_test/new/tc_data/adeck -TC_DIAG_DECK_TEMPLATE = aal14{date?fmt=%Y}_short.dat +TC_DIAG_DECK_TEMPLATE = subset.aal03{date?fmt=%Y}.dat + +TC_DIAG_INPUT1_DIR = {INPUT_BASE}/met_test/new/model_data/grib2/gfs +TC_DIAG_INPUT1_TEMPLATE = subset.gfs.t12z.pgrb2.0p50.f* +TC_DIAG_INPUT1_DOMAIN = parent +TC_DIAG_INPUT1_TECH_ID_LIST = AVNO + -TC_DIAG_INPUT_DIR = {INPUT_BASE}/met_test/new/model_data/grib2/gfs_fv3 -TC_DIAG_INPUT_TEMPLATE = gfs.subset.t00z.pgrb2.0p25.f* +TC_DIAG_INPUT2_DIR = {INPUT_BASE}/met_test/new/model_data/grib2/gfs +TC_DIAG_INPUT2_TEMPLATE = subset.gfs.t12z.pgrb2.0p50.f* +TC_DIAG_INPUT2_DOMAIN = nest +TC_DIAG_INPUT2_TECH_ID_LIST = AVNO -TC_DIAG_OUTPUT_DIR = {OUTPUT_BASE}/met_tool_wrapper/TCDiag -TC_DIAG_OUTPUT_TEMPLATE = tc_diag_aal14{date?fmt=%Y}.nc +TC_DIAG_OUTPUT_DIR = {OUTPUT_BASE}/tc_diag +TC_DIAG_OUTPUT_TEMPLATE = {date?fmt=%Y} ### @@ -53,10 +61,10 @@ TC_DIAG_OUTPUT_TEMPLATE = tc_diag_aal14{date?fmt=%Y}.nc ### BOTH_VAR1_NAME = PRMSL -BOTH_VAR1_LEVELS = L0 +BOTH_VAR1_LEVELS = Z0 BOTH_VAR2_NAME = TMP -BOTH_VAR2_LEVELS = P1000, P900, P800, P700, P500, P100 +BOTH_VAR2_LEVELS = P1000, P925, P850, P700, P500, P400, P300, P250, P200, P150, P100 ### @@ -64,17 +72,17 @@ BOTH_VAR2_LEVELS = P1000, P900, P800, P700, P500, P100 # https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#tcdiag ### -#LOG_TC_DIAG_VERBOSITY = 2 +LOG_TC_DIAG_VERBOSITY = 2 TC_DIAG_CONFIG_FILE = {PARM_BASE}/met_config/TCDiagConfig_wrapped -MODEL = fv3 +MODEL = GFSO -TC_DIAG_STORM_ID = AL142016 +TC_DIAG_STORM_ID = AL032023 TC_DIAG_BASIN = AL -TC_DIAG_CYCLONE = 14 +TC_DIAG_CYCLONE = 03 -#TC_DIAG_INIT_INCLUDE = +TC_DIAG_INIT_INCLUDE = {init?fmt=%Y%m%d%H} #TC_DIAG_VALID_BEG = #TC_DIAG_VALID_END = #TC_DIAG_VALID_INCLUDE_LIST = @@ -84,12 +92,18 @@ TC_DIAG_CYCLONE = 14 #TC_DIAG_DIAG_SCRIPT = -#TC_DIAG_DOMAIN_INFO1_DOMAIN = -#TC_DIAG_DOMAIN_INFO1_N_RANGE = -#TC_DIAG_DOMAIN_INFO1_N_AZIMUTH = -#TC_DIAG_DOMAIN_INFO1_DELTA_RANGE_KM = +TC_DIAG_DOMAIN_INFO1_DOMAIN = parent +TC_DIAG_DOMAIN_INFO1_N_RANGE = 150 +TC_DIAG_DOMAIN_INFO1_N_AZIMUTH = 8 +TC_DIAG_DOMAIN_INFO1_DELTA_RANGE_KM = 10.0 #TC_DIAG_DOMAIN_INFO1_DIAG_SCRIPT = +TC_DIAG_DOMAIN_INFO2_DOMAIN = nest +TC_DIAG_DOMAIN_INFO2_N_RANGE = 150 +TC_DIAG_DOMAIN_INFO2_N_AZIMUTH = 8 +TC_DIAG_DOMAIN_INFO2_DELTA_RANGE_KM = 2.0 + + #TC_DIAG_CENSOR_THRESH = #TC_DIAG_CENSOR_VAL = #TC_DIAG_CONVERT = @@ -117,3 +131,5 @@ TC_DIAG_CYCLONE = 14 #TC_DIAG_NC_DIAG_FLAG = #TC_DIAG_CIRA_DIAG_FLAG = #TC_DIAG_OUTPUT_PREFIX = + +#LOG_LEVEL=DEBUG \ No newline at end of file