diff --git a/.github/jobs/get_use_case_commands.py b/.github/jobs/get_use_case_commands.py index 04920faa9c..b4de2094b0 100755 --- a/.github/jobs/get_use_case_commands.py +++ b/.github/jobs/get_use_case_commands.py @@ -14,7 +14,7 @@ sys.path.insert(0, METPLUS_TOP_DIR) from internal.tests.use_cases.metplus_use_case_suite import METplusUseCaseSuite -from metplus.util.met_util import expand_int_string_to_list +from metplus.util.string_manip import expand_int_string_to_list from docker_utils import VERSION_EXT diff --git a/docs/Contributors_Guide/deprecation.rst b/docs/Contributors_Guide/deprecation.rst index 491baef9e6..6c6d63e2f2 100644 --- a/docs/Contributors_Guide/deprecation.rst +++ b/docs/Contributors_Guide/deprecation.rst @@ -26,7 +26,7 @@ wrong variable and it is using WGRIB2 = wgrib2. check_for_deprecated_config() ----------------------------- -In **met_util.py** there is a function called +In **metplus/util/config_metplus.py** there is a function called check_for_deprecated_config. It contains a dictionary of dictionaries called deprecated_dict that specifies the old config name, the section it was found in, and a suggested alternative (None if no alternative diff --git a/docs/Users_Guide/getting_started.rst b/docs/Users_Guide/getting_started.rst index 3aada6c852..7941a97f68 100644 --- a/docs/Users_Guide/getting_started.rst +++ b/docs/Users_Guide/getting_started.rst @@ -327,7 +327,7 @@ user configuration file and The last line of the screen output should match this format:: - 05/04 09:42:52.277 metplus (met_util.py:212) INFO: METplus has successfully finished running. + 05/04 09:42:52.277 metplus INFO: METplus has successfully finished running. If this log message is not shown, there is likely an issue with one or more of the default configuration variable overrides in the @@ -339,7 +339,7 @@ how the :ref:`common_config_variables` control a use case run. If the run was successful, the line above the success message should contain the path to the METplus log file that was generated:: - 05/04 09:44:21.534 metplus (met_util.py:211) INFO: Check the log file for more information: /path/to/output/logs/metplus.log.20210504094421 + 05/04 09:44:21.534 metplus INFO: Check the log file for more information: /path/to/output/logs/metplus.log.20210504094421 * Review the log file and compare it to the Example.conf use case configuration file to see how the settings correspond to the result. diff --git a/docs/Users_Guide/systemconfiguration.rst b/docs/Users_Guide/systemconfiguration.rst index a52b997d3c..17a164b051 100644 --- a/docs/Users_Guide/systemconfiguration.rst +++ b/docs/Users_Guide/systemconfiguration.rst @@ -426,7 +426,7 @@ This defines the format of the ERROR log messages. Setting the value to:: Produces a log file with ERROR lines that match this format:: - 04/29 16:03:34.858 metplus (met_util.py:218) ERROR: METplus has finished running but had 1 error. + 04/29 16:03:34.858 metplus (run_util.py:192) ERROR: METplus has finished running but had 1 error. The format of the timestamp is set by :ref:`LOG_LINE_DATE_FORMAT`. @@ -442,7 +442,7 @@ This defines the format of the DEBUG log messages. Setting the value to:: Produces a log file with DEBUG lines that match this format:: - 04/29 15:54:22.851 metplus (met_util.py:207) DEBUG: METplus took 0:00:00.850983 to run. + 04/29 15:54:22.851 metplus (run_util.py:177) DEBUG: METplus took 0:00:00.850983 to run. The format of the timestamp is set by :ref:`LOG_LINE_DATE_FORMAT`. @@ -2648,9 +2648,9 @@ In most cases, there is a simple one-to-one relationship between a deprecated co Example:: - (met_util.py) ERROR: DEPRECATED CONFIG ITEMS WERE FOUND. PLEASE REMOVE/REPLACE THEM FROM CONFIG FILES - (met_util.py) ERROR: [dir] MODEL_DATA_DIR should be replaced with EXTRACT_TILES_GRID_INPUT_DIR - (met_util.py) ERROR: [config] STAT_LIST should be replaced with SERIES_ANALYSIS_STAT_LIST + ERROR: DEPRECATED CONFIG ITEMS WERE FOUND. PLEASE REMOVE/REPLACE THEM FROM CONFIG FILES + ERROR: [dir] MODEL_DATA_DIR should be replaced with EXTRACT_TILES_GRID_INPUT_DIR + ERROR: [config] STAT_LIST should be replaced with SERIES_ANALYSIS_STAT_LIST These cases can be handled automatically by using the :ref:`validate_config`. @@ -2666,10 +2666,10 @@ Starting in METplus 3.0, users are required to either explicitly set both FCST_* Example:: - (met_util.py) ERROR: If FCST_VAR1_NAME is set, the user must either set OBS_VAR1_NAME or change FCST_VAR1_NAME to BOTH_VAR1_NAME - (met_util.py) ERROR: If FCST_VAR2_NAME is set, the user must either set OBS_VAR2_NAME or change FCST_VAR2_NAME to BOTH_VAR2_NAME - (met_util.py) ERROR: If FCST_VAR1_LEVELS is set, the user must either set OBS_VAR1_LEVELS or change FCST_VAR1_LEVELS to BOTH_VAR1_LEVELS - (met_util.py) ERROR: If FCST_VAR2_LEVELS is set, the user must either set OBS_VAR2_LEVELS or change FCST_VAR2_LEVELS to BOTH_VAR2_LEVELS + ERROR: If FCST_VAR1_NAME is set, the user must either set OBS_VAR1_NAME or change FCST_VAR1_NAME to BOTH_VAR1_NAME + ERROR: If FCST_VAR2_NAME is set, the user must either set OBS_VAR2_NAME or change FCST_VAR2_NAME to BOTH_VAR2_NAME + ERROR: If FCST_VAR1_LEVELS is set, the user must either set OBS_VAR1_LEVELS or change FCST_VAR1_LEVELS to BOTH_VAR1_LEVELS + ERROR: If FCST_VAR2_LEVELS is set, the user must either set OBS_VAR2_LEVELS or change FCST_VAR2_LEVELS to BOTH_VAR2_LEVELS These cases can be handled automatically by using the :ref:`validate_config`, but users should review the suggested changes, as they may want to update differently. @@ -2682,7 +2682,7 @@ Instead of only being able to specify FCST_PCP_COMBINE_INPUT_LEVEL, users can no Example:: - (met_util.py) ERROR: [config] OBS_PCP_COMBINE_INPUT_LEVEL should be replaced with OBS_PCP_COMBINE_INPUT_ACCUMS + ERROR: [config] OBS_PCP_COMBINE_INPUT_LEVEL should be replaced with OBS_PCP_COMBINE_INPUT_ACCUMS These cases can be handled automatically by using the :ref:`validate_config`, but users should review the suggested changes, as they may want to include other available input accumulations. @@ -2719,17 +2719,17 @@ Due to these changes, MET configuration files that refer to any of these depreca Example log output:: - (met_util.py) DEBUG: Checking for deprecated environment variables in: DeprecatedConfig - (met_util.py) ERROR: Please remove deprecated environment variable ${GRID_VX} found in MET config file: DeprecatedConfig - (met_util.py) ERROR: MET to_grid variable should reference ${REGRID_TO_GRID} environment variable - (met_util.py) INFO: Be sure to set GRID_STAT_REGRID_TO_GRID to the correct value. + DEBUG: Checking for deprecated environment variables in: DeprecatedConfig + ERROR: Please remove deprecated environment variable ${GRID_VX} found in MET config file: DeprecatedConfig + ERROR: MET to_grid variable should reference ${REGRID_TO_GRID} environment variable + INFO: Be sure to set GRID_STAT_REGRID_TO_GRID to the correct value. - (met_util.py) ERROR: Please remove deprecated environment variable ${MET_VALID_HHMM} found in MET config file: DeprecatedConfig - (met_util.py) ERROR: Set GRID_STAT_CLIMO_MEAN_INPUT_[DIR/TEMPLATE] in a METplus config file to set CLIMO_MEAN_FILE in a MET config + ERROR: Please remove deprecated environment variable ${MET_VALID_HHMM} found in MET config file: DeprecatedConfig + ERROR: Set GRID_STAT_CLIMO_MEAN_INPUT_[DIR/TEMPLATE] in a METplus config file to set CLIMO_MEAN_FILE in a MET config - (met_util.py) ERROR: output_prefix variable should reference ${OUTPUT_PREFIX} environment variable - (met_util.py) INFO: GRID_STAT_OUTPUT_PREFIX will need to be added to the METplus config file that sets GRID_STAT_CONFIG_FILE. Set it to: - (met_util.py) INFO: GRID_STAT_OUTPUT_PREFIX = {CURRENT_FCST_NAME}_vs_{CURRENT_OBS_NAME} + ERROR: output_prefix variable should reference ${OUTPUT_PREFIX} environment variable + INFO: GRID_STAT_OUTPUT_PREFIX will need to be added to the METplus config file that sets GRID_STAT_CONFIG_FILE. Set it to: + INFO: GRID_STAT_OUTPUT_PREFIX = {CURRENT_FCST_NAME}_vs_{CURRENT_OBS_NAME} These cases can be handled automatically by using the :ref:`validate_config`, but users should review the suggested changes and make sure they add the appropriate recommended METplus configuration variables to their files to achieve the same behavior. diff --git a/docs/use_cases/met_tool_wrapper/Example/Example.py b/docs/use_cases/met_tool_wrapper/Example/Example.py index b19ebcf781..3c4a11b43a 100644 --- a/docs/use_cases/met_tool_wrapper/Example/Example.py +++ b/docs/use_cases/met_tool_wrapper/Example/Example.py @@ -174,30 +174,30 @@ # # You should also see a series of log output listing init/valid times, forecast lead times, and filenames derived from the filename templates. Here is an excerpt:: # -# 12/30 19:44:02.901 metplus (met_util.py:425) INFO: **************************************** -# 12/30 19:44:02.901 metplus (met_util.py:426) INFO: * Running METplus -# 12/30 19:44:02.902 metplus (met_util.py:432) INFO: * at valid time: 201702010000 -# 12/30 19:44:02.902 metplus (met_util.py:435) INFO: **************************************** -# 12/30 19:44:02.902 metplus.Example (example_wrapper.py:58) INFO: Running ExampleWrapper at valid time 20170201000000 -# 12/30 19:44:02.902 metplus.Example (example_wrapper.py:63) INFO: Input directory is /dir/containing/example/data -# 12/30 19:44:02.902 metplus.Example (example_wrapper.py:64) INFO: Input template is {init?fmt=%Y%m%d}/file_{init?fmt=%Y%m%d}_{init?fmt=%2H}_F{lead?fmt=%3H}.ext -# 12/30 19:44:02.902 metplus.Example (example_wrapper.py:79) INFO: Processing forecast lead 3 hours initialized at 2017-01-31 21Z and valid at 2017-02-01 00Z -# 12/30 19:44:02.903 metplus.Example (example_wrapper.py:88) INFO: Looking in input directory for file: 20170131/file_20170131_21_F003.ext -# 12/30 19:44:02.903 metplus.Example (example_wrapper.py:79) INFO: Processing forecast lead 6 hours initialized at 2017-01-31 18Z and valid at 2017-02-01 00Z -# 12/30 19:44:02.903 metplus.Example (example_wrapper.py:88) INFO: Looking in input directory for file: 20170131/file_20170131_18_F006.ext -# 12/30 19:44:02.904 metplus.Example (example_wrapper.py:79) INFO: Processing forecast lead 9 hours initialized at 2017-01-31 15Z and valid at 2017-02-01 00Z -# 12/30 19:44:02.904 metplus.Example (example_wrapper.py:88) INFO: Looking in input directory for file: 20170131/file_20170131_15_F009.ext -# 12/30 19:44:02.904 metplus.Example (example_wrapper.py:79) INFO: Processing forecast lead 12 hours initialized at 2017-01-31 12Z and valid at 2017-02-01 00Z -# 12/30 19:44:02.904 metplus.Example (example_wrapper.py:88) INFO: Looking in input directory for file: 20170131/file_20170131_12_F012.ext -# 12/30 19:44:02.904 metplus (met_util.py:425) INFO: **************************************** -# 12/30 19:44:02.904 metplus (met_util.py:426) INFO: * Running METplus -# 12/30 19:44:02.905 metplus (met_util.py:432) INFO: * at valid time: 201702010600 -# 12/30 19:44:02.905 metplus (met_util.py:435) INFO: **************************************** -# 12/30 19:44:02.905 metplus.Example (example_wrapper.py:58) INFO: Running ExampleWrapper at valid time 20170201060000 -# 12/30 19:44:02.905 metplus.Example (example_wrapper.py:63) INFO: Input directory is /dir/containing/example/data -# 12/30 19:44:02.905 metplus.Example (example_wrapper.py:64) INFO: Input template is {init?fmt=%Y%m%d}/file_{init?fmt=%Y%m%d}_{init?fmt=%2H}_F{lead?fmt=%3H}.ext -# 12/30 19:44:02.905 metplus.Example (example_wrapper.py:79) INFO: Processing forecast lead 3 hours initialized at 2017-02-01 03Z and valid at 2017-02-01 06Z -# 12/30 19:44:02.906 metplus.Example (example_wrapper.py:88) INFO: Looking in input directory for file: 20170201/file_20170201_03_F003.ext +# 12/30 19:44:02.901 metplus INFO: **************************************** +# 12/30 19:44:02.901 metplus INFO: * Running METplus +# 12/30 19:44:02.902 metplus INFO: * at valid time: 201702010000 +# 12/30 19:44:02.902 metplus INFO: **************************************** +# 12/30 19:44:02.902 metplus INFO: Running ExampleWrapper at valid time 20170201000000 +# 12/30 19:44:02.902 metplus INFO: Input directory is /dir/containing/example/data +# 12/30 19:44:02.902 metplus INFO: Input template is {init?fmt=%Y%m%d}/file_{init?fmt=%Y%m%d}_{init?fmt=%2H}_F{lead?fmt=%3H}.ext +# 12/30 19:44:02.902 metplus INFO: Processing forecast lead 3 hours initialized at 2017-01-31 21Z and valid at 2017-02-01 00Z +# 12/30 19:44:02.903 metplus INFO: Looking in input directory for file: 20170131/file_20170131_21_F003.ext +# 12/30 19:44:02.903 metplus INFO: Processing forecast lead 6 hours initialized at 2017-01-31 18Z and valid at 2017-02-01 00Z +# 12/30 19:44:02.903 metplus INFO: Looking in input directory for file: 20170131/file_20170131_18_F006.ext +# 12/30 19:44:02.904 metplus INFO: Processing forecast lead 9 hours initialized at 2017-01-31 15Z and valid at 2017-02-01 00Z +# 12/30 19:44:02.904 metplus INFO: Looking in input directory for file: 20170131/file_20170131_15_F009.ext +# 12/30 19:44:02.904 metplus INFO: Processing forecast lead 12 hours initialized at 2017-01-31 12Z and valid at 2017-02-01 00Z +# 12/30 19:44:02.904 metplus INFO: Looking in input directory for file: 20170131/file_20170131_12_F012.ext +# 12/30 19:44:02.904 metplus INFO: **************************************** +# 12/30 19:44:02.904 metplus INFO: * Running METplus +# 12/30 19:44:02.905 metplus INFO: * at valid time: 201702010600 +# 12/30 19:44:02.905 metplus INFO: **************************************** +# 12/30 19:44:02.905 metplus INFO: Running ExampleWrapper at valid time 20170201060000 +# 12/30 19:44:02.905 metplus INFO: Input directory is /dir/containing/example/data +# 12/30 19:44:02.905 metplus INFO: Input template is {init?fmt=%Y%m%d}/file_{init?fmt=%Y%m%d}_{init?fmt=%2H}_F{lead?fmt=%3H}.ext +# 12/30 19:44:02.905 metplus INFO: Processing forecast lead 3 hours initialized at 2017-02-01 03Z and valid at 2017-02-01 06Z +# 12/30 19:44:02.906 metplus INFO: Looking in input directory for file: 20170201/file_20170201_03_F003.ext # ############################################################################## diff --git a/internal/tests/pytests/util/config_metplus/test_config_metplus.py b/internal/tests/pytests/util/config_metplus/test_config_metplus.py index cd9360dcdd..8974d69b9a 100644 --- a/internal/tests/pytests/util/config_metplus/test_config_metplus.py +++ b/internal/tests/pytests/util/config_metplus/test_config_metplus.py @@ -7,7 +7,7 @@ from datetime import datetime from metplus.util import config_metplus - +from metplus.util.time_util import ti_calculate @pytest.mark.util def test_get_default_config_list(): @@ -1103,3 +1103,65 @@ def test_format_var_items_options_semicolon(config_value, var_items = config_metplus._format_var_items(field_configs, time_info) result = var_items.get('extra') assert result == expected_result + + +@pytest.mark.parametrize( + 'input_dict, expected_list', [ + ({'init': datetime(2019, 2, 1, 6), + 'lead': 7200, }, + [ + {'index': '1', + 'fcst_name': 'FNAME_2019', + 'fcst_level': 'Z06', + 'obs_name': 'ONAME_2019', + 'obs_level': 'L06', + }, + {'index': '1', + 'fcst_name': 'FNAME_2019', + 'fcst_level': 'Z08', + 'obs_name': 'ONAME_2019', + 'obs_level': 'L08', + }, + ]), + ({'init': datetime(2021, 4, 13, 9), + 'lead': 10800, }, + [ + {'index': '1', + 'fcst_name': 'FNAME_2021', + 'fcst_level': 'Z09', + 'obs_name': 'ONAME_2021', + 'obs_level': 'L09', + }, + {'index': '1', + 'fcst_name': 'FNAME_2021', + 'fcst_level': 'Z12', + 'obs_name': 'ONAME_2021', + 'obs_level': 'L12', + }, + ]), + ] +) +@pytest.mark.util +def test_sub_var_list(metplus_config, input_dict, expected_list): + config = metplus_config + config.set('config', 'FCST_VAR1_NAME', 'FNAME_{init?fmt=%Y}') + config.set('config', 'FCST_VAR1_LEVELS', 'Z{init?fmt=%H}, Z{valid?fmt=%H}') + config.set('config', 'OBS_VAR1_NAME', 'ONAME_{init?fmt=%Y}') + config.set('config', 'OBS_VAR1_LEVELS', 'L{init?fmt=%H}, L{valid?fmt=%H}') + + time_info = ti_calculate(input_dict) + + actual_temp = config_metplus.parse_var_list(config) + + pp = pprint.PrettyPrinter() + print(f'Actual var list (before sub):') + pp.pprint(actual_temp) + + actual_list = config_metplus.sub_var_list(actual_temp, time_info) + print(f'Actual var list (after sub):') + pp.pprint(actual_list) + + assert len(actual_list) == len(expected_list) + for actual, expected in zip(actual_list, expected_list): + for key, value in expected.items(): + assert actual.get(key) == value diff --git a/internal/tests/pytests/util/met_util/test_met_util.py b/internal/tests/pytests/util/met_util/test_met_util.py deleted file mode 100644 index e4965bdd09..0000000000 --- a/internal/tests/pytests/util/met_util/test_met_util.py +++ /dev/null @@ -1,323 +0,0 @@ -#!/usr/bin/env python3 - -import pytest - -import datetime -import os -from dateutil.relativedelta import relativedelta -import pprint - -from metplus.util import met_util as util -from metplus.util import time_util -from metplus.util.config_metplus import parse_var_list - - -@pytest.mark.parametrize( - 'filename, ext', [ - ('internal/tests/data/zip/testfile.txt', '.gz'), - ('internal/tests/data/zip/testfile2.txt', '.bz2'), - ('internal/tests/data/zip/testfile3.txt', '.zip'), - ('internal/tests/data/zip/testfile4.txt', ''), - ] -) -@pytest.mark.util -def test_preprocess_file_stage(metplus_config, filename, ext): - conf = metplus_config - metplus_base = conf.getdir('METPLUS_BASE') - stage_dir = conf.getdir('STAGING_DIR', - os.path.join(conf.getdir('OUTPUT_BASE'), - 'stage')) - filepath = os.path.join(metplus_base, - filename+ext) - if ext: - stagepath = stage_dir + os.path.join(metplus_base, - filename) - if os.path.exists(stagepath): - os.remove(stagepath) - else: - stagepath = filepath - - outpath = util.preprocess_file(filepath, None, conf) - assert stagepath == outpath and os.path.exists(outpath) - - -@pytest.mark.parametrize( - 'filename, data_type, allow_dir, expected', [ - # filename is None or empty string - return None - (None, None, False, None), - ('', None, False, None), - # python data types - pass through full filename value - ('some:set:of:words', 'PYTHON_NUMPY', False, 'some:set:of:words'), - ('some:set:of:words', 'PYTHON_XARRAY', False, 'some:set:of:words'), - ('some:set:of:words', 'PYTHON_PANDAS', False, 'some:set:of:words'), - # allow directory - pass through full dir path - ('dir', None, True, 'dir'), - # base filename is python embedding type - return python embed type - ('/some/path/PYTHON_NUMPY', None, False, 'PYTHON_NUMPY'), - ('/some/path/PYTHON_XARRAY', None, False, 'PYTHON_XARRAY'), - ('/some/path/PYTHON_PANDAS', None, False, 'PYTHON_PANDAS'), - ] -) -@pytest.mark.util -def test_preprocess_file_options(metplus_config, - filename, - data_type, - allow_dir, - expected): - config = metplus_config - if filename == 'dir': - filename = config.getdir('METPLUS_BASE') - expected = filename - result = util.preprocess_file(filename, data_type, config, allow_dir) - assert result == expected - - -def test_get_lead_sequence_lead(metplus_config): - input_dict = {'valid': datetime.datetime(2019, 2, 1, 13)} - conf = metplus_config - conf.set('config', 'LEAD_SEQ', "3,6,9,12") - test_seq = util.get_lead_sequence(conf, input_dict) - hour_seq = [] - for test in test_seq: - hour_seq.append(time_util.ti_get_hours_from_relativedelta(test)) - lead_seq = [3, 6, 9, 12] - assert hour_seq == lead_seq - - -@pytest.mark.parametrize( - 'key, value', [ - ('begin_end_incr(3,12,3)', [ 3, 6, 9, 12]), - ('begin_end_incr( 3,12 , 3)', [ 3, 6, 9, 12]), - ('begin_end_incr(0,10,2)', [ 0, 2, 4, 6, 8, 10]), - ('begin_end_incr(10,0,-2)', [ 10, 8, 6, 4, 2, 0]), - ('begin_end_incr(2,2,20)', [ 2 ]), - ('begin_end_incr(72,72,6)', [ 72 ]), - ('begin_end_incr(0,12,1), begin_end_incr(15,60,3)', [0,1,2,3,4,5,6,7,8,9,10,11,12,15,18,21,24,27,30,33,36,39,42,45,48,51,54,57,60]), - ('begin_end_incr(0,10,2), 12', [ 0, 2, 4, 6, 8, 10, 12]), - ('begin_end_incr(0,10,2)H, 12', [ 0, 2, 4, 6, 8, 10, 12]), - ('begin_end_incr(0,10800,3600)S, 4H', [ 0, 1, 2, 3, 4]), - ] -) -@pytest.mark.util -def test_get_lead_sequence_lead_list(metplus_config, key, value): - input_dict = { 'valid' : datetime.datetime(2019, 2, 1, 13) } - conf = metplus_config - conf.set('config', 'LEAD_SEQ', key) - test_seq = util.get_lead_sequence(conf, input_dict) - hour_seq = [] - - for test in test_seq: - hour_seq.append(time_util.ti_get_hours_from_relativedelta(test)) - lead_seq = value - assert hour_seq == lead_seq - - -@pytest.mark.parametrize( - 'config_dict, expected_list', [ - # 1 group - ({'LEAD_SEQ_1': "0, 1, 2, 3", - 'LEAD_SEQ_1_LABEL': 'Day1', - }, [0, 1, 2, 3]), - # 2 groups, no overlap - ({'LEAD_SEQ_1': "0, 1, 2, 3", - 'LEAD_SEQ_1_LABEL': 'Day1', - 'LEAD_SEQ_2': "8, 9, 10, 11", - 'LEAD_SEQ_2_LABEL': 'Day2', - }, [0, 1, 2, 3, 8, 9, 10, 11]), - # 2 groups, overlap - ({'LEAD_SEQ_1': "0, 1, 2, 3", - 'LEAD_SEQ_1_LABEL': 'Day1', - 'LEAD_SEQ_2': "3, 4, 5, 6", - 'LEAD_SEQ_2_LABEL': 'Day2', - }, [0, 1, 2, 3, 4, 5, 6]), - # 2 groups, no overlap, out of order - ({'LEAD_SEQ_1': "8, 9, 10, 11", - 'LEAD_SEQ_1_LABEL': 'Day2', - 'LEAD_SEQ_2': "0, 1, 2, 3", - 'LEAD_SEQ_2_LABEL': 'Day1', - }, [8, 9, 10, 11, 0, 1, 2, 3]), - # 2 groups, overlap, out of order - ({'LEAD_SEQ_1': "3, 4, 5, 6", - 'LEAD_SEQ_1_LABEL': 'Day2', - 'LEAD_SEQ_2': "0, 1, 2, 3", - 'LEAD_SEQ_2_LABEL': 'Day1', - }, [3, 4, 5, 6, 0, 1, 2]), - ] -) -@pytest.mark.util -def test_get_lead_sequence_groups(metplus_config, config_dict, expected_list): - config = metplus_config - for key, value in config_dict.items(): - config.set('config', key, value) - - output_list = util.get_lead_sequence(config) - hour_seq = [] - - for output in output_list: - hour_seq.append( - time_util.ti_get_hours_from_relativedelta(output) - ) - - assert hour_seq == expected_list - - -@pytest.mark.parametrize( - 'current_hour, lead_seq', [ - (0, [0, 12, 24, 36]), - (1, [1, 13, 25]), - (2, [2, 14, 26]), - (3, [3, 15, 27]), - (4, [4, 16, 28]), - (5, [5, 17, 29]), - (6, [6, 18, 30]), - (7, [7, 19, 31]), - (8, [8, 20, 32]), - (9, [9, 21, 33]), - (10, [10, 22, 34]), - (11, [11, 23, 35]), - (12, [0, 12, 24, 36]), - (13, [1, 13, 25]), - (14, [2, 14, 26]), - (15, [3, 15, 27]), - (16, [4, 16, 28]), - (17, [5, 17, 29]), - (18, [6, 18, 30]), - (19, [7, 19, 31]), - (20, [8, 20, 32]), - (21, [9, 21, 33]), - (22, [10, 22, 34]), - (23, [11, 23, 35]) - ] -) -@pytest.mark.util -def test_get_lead_sequence_init(metplus_config, current_hour, lead_seq): - input_dict = {'valid': datetime.datetime(2019, 2, 1, current_hour)} - conf = metplus_config - conf.set('config', 'INIT_SEQ', "0, 12") - conf.set('config', 'LEAD_SEQ_MAX', 36) - test_seq = util.get_lead_sequence(conf, input_dict) - assert test_seq == [relativedelta(hours=lead) for lead in lead_seq] - - -@pytest.mark.util -def test_get_lead_sequence_init_min_10(metplus_config): - input_dict = {'valid': datetime.datetime(2019, 2, 1, 12)} - conf = metplus_config - conf.set('config', 'INIT_SEQ', "0, 12") - conf.set('config', 'LEAD_SEQ_MAX', 24) - conf.set('config', 'LEAD_SEQ_MIN', 10) - test_seq = util.get_lead_sequence(conf, input_dict) - lead_seq = [12, 24] - assert test_seq == [relativedelta(hours=lead) for lead in lead_seq] - - -@pytest.mark.parametrize( - 'int_string, expected_result', [ - ('4', [4]), - ('4-12', [4, 5, 6, 7, 8, 9, 10, 11, 12]), - ('5,18-24,29', [5, 18, 19, 20, 21, 22, 23, 24, 29]), - ('7,8,9,13', [7, 8, 9, 13]), - ('4+', [4, '+']), - ('4-12+', [4, 5, 6, 7, 8, 9, 10, 11, 12, '+']), - ('5,18-24,29+', [5, 18, 19, 20, 21, 22, 23, 24, 29, '+']), - ('7,8,9,13+', [7, 8, 9, 13, '+']), - ] -) -@pytest.mark.util -def test_expand_int_string_to_list(int_string, expected_result): - result = util.expand_int_string_to_list(int_string) - assert result == expected_result - - -@pytest.mark.parametrize( - 'subset_definition, expected_result', [ - ([1, 3, 5], ['b', 'd', 'f']), - ([1, 3, 5, '+'], ['b', 'd', 'f', 'g', 'h', 'i', 'j']), - ([1], ['b']), - (1, ['b']), - ([3, '+'], ['d', 'e', 'f', 'g', 'h', 'i', 'j']), - (None, ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']), - (slice(1,4,1), ['b', 'c', 'd']), - (slice(2,9,2), ['c', 'e', 'g', 'i']), - ] -) -@pytest.mark.util -def test_subset_list(subset_definition, expected_result): - full_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] - result = util.subset_list(full_list, subset_definition) - assert result == expected_result - - -@pytest.mark.parametrize( - 'level, expected_result', [ - ('level', 'level'), - ('P500', 'P500'), - ('*,*', 'all_all'), - ('1,*,*', '1_all_all'), - ] -) -@pytest.mark.util -def test_format_level(level, expected_result): - assert util.format_level(level) == expected_result - - -@pytest.mark.parametrize( - 'input_dict, expected_list', [ - ({'init': datetime.datetime(2019, 2, 1, 6), - 'lead': 7200, }, - [ - {'index': '1', - 'fcst_name': 'FNAME_2019', - 'fcst_level': 'Z06', - 'obs_name': 'ONAME_2019', - 'obs_level': 'L06', - }, - {'index': '1', - 'fcst_name': 'FNAME_2019', - 'fcst_level': 'Z08', - 'obs_name': 'ONAME_2019', - 'obs_level': 'L08', - }, - ]), - ({'init': datetime.datetime(2021, 4, 13, 9), - 'lead': 10800, }, - [ - {'index': '1', - 'fcst_name': 'FNAME_2021', - 'fcst_level': 'Z09', - 'obs_name': 'ONAME_2021', - 'obs_level': 'L09', - }, - {'index': '1', - 'fcst_name': 'FNAME_2021', - 'fcst_level': 'Z12', - 'obs_name': 'ONAME_2021', - 'obs_level': 'L12', - }, - ]), - ] -) -@pytest.mark.util -def test_sub_var_list(metplus_config, input_dict, expected_list): - config = metplus_config - config.set('config', 'FCST_VAR1_NAME', 'FNAME_{init?fmt=%Y}') - config.set('config', 'FCST_VAR1_LEVELS', 'Z{init?fmt=%H}, Z{valid?fmt=%H}') - config.set('config', 'OBS_VAR1_NAME', 'ONAME_{init?fmt=%Y}') - config.set('config', 'OBS_VAR1_LEVELS', 'L{init?fmt=%H}, L{valid?fmt=%H}') - - time_info = time_util.ti_calculate(input_dict) - - actual_temp = parse_var_list(config) - - pp = pprint.PrettyPrinter() - print(f'Actual var list (before sub):') - pp.pprint(actual_temp) - - actual_list = util.sub_var_list(actual_temp, time_info) - print(f'Actual var list (after sub):') - pp.pprint(actual_list) - - assert len(actual_list) == len(expected_list) - for actual, expected in zip(actual_list, expected_list): - for key, value in expected.items(): - assert actual.get(key) == value diff --git a/internal/tests/pytests/util/string_manip/test_util_string_manip.py b/internal/tests/pytests/util/string_manip/test_util_string_manip.py index 323d5f4cb9..c5c27b3e56 100644 --- a/internal/tests/pytests/util/string_manip/test_util_string_manip.py +++ b/internal/tests/pytests/util/string_manip/test_util_string_manip.py @@ -8,6 +8,43 @@ from metplus.util.string_manip import _fix_list +@pytest.mark.parametrize( + 'subset_definition, expected_result', [ + ([1, 3, 5], ['b', 'd', 'f']), + ([1, 3, 5, '+'], ['b', 'd', 'f', 'g', 'h', 'i', 'j']), + ([1], ['b']), + (1, ['b']), + ([3, '+'], ['d', 'e', 'f', 'g', 'h', 'i', 'j']), + (None, ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']), + (slice(1,4,1), ['b', 'c', 'd']), + (slice(2,9,2), ['c', 'e', 'g', 'i']), + ] +) +@pytest.mark.util +def test_subset_list(subset_definition, expected_result): + full_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'] + result = subset_list(full_list, subset_definition) + assert result == expected_result + + +@pytest.mark.parametrize( + 'int_string, expected_result', [ + ('4', [4]), + ('4-12', [4, 5, 6, 7, 8, 9, 10, 11, 12]), + ('5,18-24,29', [5, 18, 19, 20, 21, 22, 23, 24, 29]), + ('7,8,9,13', [7, 8, 9, 13]), + ('4+', [4, '+']), + ('4-12+', [4, 5, 6, 7, 8, 9, 10, 11, 12, '+']), + ('5,18-24,29+', [5, 18, 19, 20, 21, 22, 23, 24, 29, '+']), + ('7,8,9,13+', [7, 8, 9, 13, '+']), + ] +) +@pytest.mark.util +def test_expand_int_string_to_list(int_string, expected_result): + result = expand_int_string_to_list(int_string) + assert result == expected_result + + @pytest.mark.parametrize( 'value, expected_result', [ (3.3, 3.5), @@ -321,3 +358,16 @@ def test_comparison_to_letter_format(expression, expected_result): @pytest.mark.util def test_format_thresh(expression, expected_result): assert format_thresh(expression) == expected_result + + +@pytest.mark.parametrize( + 'level, expected_result', [ + ('level', 'level'), + ('P500', 'P500'), + ('*,*', 'all_all'), + ('1,*,*', '1_all_all'), + ] +) +@pytest.mark.util +def test_format_level(level, expected_result): + assert format_level(level) == expected_result diff --git a/internal/tests/pytests/util/system_util/test_system_util.py b/internal/tests/pytests/util/system_util/test_system_util.py index 8a8a2e480f..6a829a05a6 100644 --- a/internal/tests/pytests/util/system_util/test_system_util.py +++ b/internal/tests/pytests/util/system_util/test_system_util.py @@ -97,3 +97,63 @@ def test_get_storms_mtd(metplus_config): # ensure header matches expected format if storm_dict: assert storm_dict['header'].split()[index] == sort_column + + +@pytest.mark.parametrize( + 'filename, ext', [ + ('internal/tests/data/zip/testfile.txt', '.gz'), + ('internal/tests/data/zip/testfile2.txt', '.bz2'), + ('internal/tests/data/zip/testfile3.txt', '.zip'), + ('internal/tests/data/zip/testfile4.txt', ''), + ] +) +@pytest.mark.util +def test_preprocess_file_stage(metplus_config, filename, ext): + conf = metplus_config + metplus_base = conf.getdir('METPLUS_BASE') + stage_dir = conf.getdir('STAGING_DIR', + os.path.join(conf.getdir('OUTPUT_BASE'), + 'stage')) + filepath = os.path.join(metplus_base, + filename+ext) + if ext: + stagepath = stage_dir + os.path.join(metplus_base, + filename) + if os.path.exists(stagepath): + os.remove(stagepath) + else: + stagepath = filepath + + outpath = preprocess_file(filepath, None, conf) + assert stagepath == outpath and os.path.exists(outpath) + + +@pytest.mark.parametrize( + 'filename, data_type, allow_dir, expected', [ + # filename is None or empty string - return None + (None, None, False, None), + ('', None, False, None), + # python data types - pass through full filename value + ('some:set:of:words', 'PYTHON_NUMPY', False, 'some:set:of:words'), + ('some:set:of:words', 'PYTHON_XARRAY', False, 'some:set:of:words'), + ('some:set:of:words', 'PYTHON_PANDAS', False, 'some:set:of:words'), + # allow directory - pass through full dir path + ('dir', None, True, 'dir'), + # base filename is python embedding type - return python embed type + ('/some/path/PYTHON_NUMPY', None, False, 'PYTHON_NUMPY'), + ('/some/path/PYTHON_XARRAY', None, False, 'PYTHON_XARRAY'), + ('/some/path/PYTHON_PANDAS', None, False, 'PYTHON_PANDAS'), + ] +) +@pytest.mark.util +def test_preprocess_file_options(metplus_config, + filename, + data_type, + allow_dir, + expected): + config = metplus_config + if filename == 'dir': + filename = config.getdir('METPLUS_BASE') + expected = filename + result = preprocess_file(filename, data_type, config, allow_dir) + assert result == expected diff --git a/internal/tests/pytests/util/time_looping/test_time_looping.py b/internal/tests/pytests/util/time_looping/test_time_looping.py index b61ba030d7..bb67cfabbd 100644 --- a/internal/tests/pytests/util/time_looping/test_time_looping.py +++ b/internal/tests/pytests/util/time_looping/test_time_looping.py @@ -4,7 +4,7 @@ from dateutil.relativedelta import relativedelta from metplus.util.time_looping import * -from metplus.util.time_util import ti_calculate +from metplus.util.time_util import ti_calculate, ti_get_hours_from_relativedelta @pytest.mark.parametrize( @@ -307,3 +307,140 @@ def test_time_generator_error_check_beg_end(metplus_config, prefix): # _END time comes before _BEG time config.set('config', f'{prefix}_END', '2020112012') assert next(time_generator(config)) is None + + +def test_get_lead_sequence_lead(metplus_config): + input_dict = {'valid': datetime(2019, 2, 1, 13)} + conf = metplus_config + conf.set('config', 'LEAD_SEQ', "3,6,9,12") + test_seq = get_lead_sequence(conf, input_dict) + hour_seq = [] + for test in test_seq: + hour_seq.append(ti_get_hours_from_relativedelta(test)) + lead_seq = [3, 6, 9, 12] + assert hour_seq == lead_seq + + +@pytest.mark.parametrize( + 'key, value', [ + ('begin_end_incr(3,12,3)', [ 3, 6, 9, 12]), + ('begin_end_incr( 3,12 , 3)', [ 3, 6, 9, 12]), + ('begin_end_incr(0,10,2)', [ 0, 2, 4, 6, 8, 10]), + ('begin_end_incr(10,0,-2)', [ 10, 8, 6, 4, 2, 0]), + ('begin_end_incr(2,2,20)', [ 2 ]), + ('begin_end_incr(72,72,6)', [ 72 ]), + ('begin_end_incr(0,12,1), begin_end_incr(15,60,3)', [0,1,2,3,4,5,6,7,8,9,10,11,12,15,18,21,24,27,30,33,36,39,42,45,48,51,54,57,60]), + ('begin_end_incr(0,10,2), 12', [ 0, 2, 4, 6, 8, 10, 12]), + ('begin_end_incr(0,10,2)H, 12', [ 0, 2, 4, 6, 8, 10, 12]), + ('begin_end_incr(0,10800,3600)S, 4H', [ 0, 1, 2, 3, 4]), + ] +) +@pytest.mark.util +def test_get_lead_sequence_lead_list(metplus_config, key, value): + input_dict = { 'valid' : datetime(2019, 2, 1, 13) } + conf = metplus_config + conf.set('config', 'LEAD_SEQ', key) + test_seq = get_lead_sequence(conf, input_dict) + hour_seq = [] + + for test in test_seq: + hour_seq.append(ti_get_hours_from_relativedelta(test)) + lead_seq = value + assert hour_seq == lead_seq + + +@pytest.mark.parametrize( + 'config_dict, expected_list', [ + # 1 group + ({'LEAD_SEQ_1': "0, 1, 2, 3", + 'LEAD_SEQ_1_LABEL': 'Day1', + }, [0, 1, 2, 3]), + # 2 groups, no overlap + ({'LEAD_SEQ_1': "0, 1, 2, 3", + 'LEAD_SEQ_1_LABEL': 'Day1', + 'LEAD_SEQ_2': "8, 9, 10, 11", + 'LEAD_SEQ_2_LABEL': 'Day2', + }, [0, 1, 2, 3, 8, 9, 10, 11]), + # 2 groups, overlap + ({'LEAD_SEQ_1': "0, 1, 2, 3", + 'LEAD_SEQ_1_LABEL': 'Day1', + 'LEAD_SEQ_2': "3, 4, 5, 6", + 'LEAD_SEQ_2_LABEL': 'Day2', + }, [0, 1, 2, 3, 4, 5, 6]), + # 2 groups, no overlap, out of order + ({'LEAD_SEQ_1': "8, 9, 10, 11", + 'LEAD_SEQ_1_LABEL': 'Day2', + 'LEAD_SEQ_2': "0, 1, 2, 3", + 'LEAD_SEQ_2_LABEL': 'Day1', + }, [8, 9, 10, 11, 0, 1, 2, 3]), + # 2 groups, overlap, out of order + ({'LEAD_SEQ_1': "3, 4, 5, 6", + 'LEAD_SEQ_1_LABEL': 'Day2', + 'LEAD_SEQ_2': "0, 1, 2, 3", + 'LEAD_SEQ_2_LABEL': 'Day1', + }, [3, 4, 5, 6, 0, 1, 2]), + ] +) +@pytest.mark.util +def test_get_lead_sequence_groups(metplus_config, config_dict, expected_list): + config = metplus_config + for key, value in config_dict.items(): + config.set('config', key, value) + + output_list = get_lead_sequence(config) + hour_seq = [] + + for output in output_list: + hour_seq.append(ti_get_hours_from_relativedelta(output)) + + assert hour_seq == expected_list + + +@pytest.mark.parametrize( + 'current_hour, lead_seq', [ + (0, [0, 12, 24, 36]), + (1, [1, 13, 25]), + (2, [2, 14, 26]), + (3, [3, 15, 27]), + (4, [4, 16, 28]), + (5, [5, 17, 29]), + (6, [6, 18, 30]), + (7, [7, 19, 31]), + (8, [8, 20, 32]), + (9, [9, 21, 33]), + (10, [10, 22, 34]), + (11, [11, 23, 35]), + (12, [0, 12, 24, 36]), + (13, [1, 13, 25]), + (14, [2, 14, 26]), + (15, [3, 15, 27]), + (16, [4, 16, 28]), + (17, [5, 17, 29]), + (18, [6, 18, 30]), + (19, [7, 19, 31]), + (20, [8, 20, 32]), + (21, [9, 21, 33]), + (22, [10, 22, 34]), + (23, [11, 23, 35]) + ] +) +@pytest.mark.util +def test_get_lead_sequence_init(metplus_config, current_hour, lead_seq): + input_dict = {'valid': datetime(2019, 2, 1, current_hour)} + conf = metplus_config + conf.set('config', 'INIT_SEQ', "0, 12") + conf.set('config', 'LEAD_SEQ_MAX', 36) + test_seq = get_lead_sequence(conf, input_dict) + assert test_seq == [relativedelta(hours=lead) for lead in lead_seq] + + +@pytest.mark.util +def test_get_lead_sequence_init_min_10(metplus_config): + input_dict = {'valid': datetime(2019, 2, 1, 12)} + conf = metplus_config + conf.set('config', 'INIT_SEQ', "0, 12") + conf.set('config', 'LEAD_SEQ_MAX', 24) + conf.set('config', 'LEAD_SEQ_MIN', 10) + test_seq = get_lead_sequence(conf, input_dict) + lead_seq = [12, 24] + assert test_seq == [relativedelta(hours=lead) for lead in lead_seq] diff --git a/internal/tests/pytests/util/time_util/test_time_util.py b/internal/tests/pytests/util/time_util/test_time_util.py index 6d133bd67e..dd7a71adff 100644 --- a/internal/tests/pytests/util/time_util/test_time_util.py +++ b/internal/tests/pytests/util/time_util/test_time_util.py @@ -8,6 +8,16 @@ from metplus.util import time_util +@pytest.mark.parametrize( + 'time_str, shift, expected_output', [ + ('20221101000000', -1, '20221031235959'), + ] +) +@pytest.mark.util +def test_shift_time_seconds(time_str, shift, expected_output): + assert time_util.shift_time_seconds(time_str, shift) == expected_output + + @pytest.mark.parametrize( 'input_str, expected_output', [ ('', []), @@ -22,7 +32,7 @@ '1440000', '1680000', '1920000', '2160000', '2400000']), ] ) -@pytest.mark.wrapper_d +@pytest.mark.util def test_get_met_time_list(input_str, expected_output): assert time_util.get_met_time_list(input_str) == expected_output diff --git a/internal/tests/use_cases/metplus_use_case_suite.py b/internal/tests/use_cases/metplus_use_case_suite.py index d194b4c3ae..b26accc597 100644 --- a/internal/tests/use_cases/metplus_use_case_suite.py +++ b/internal/tests/use_cases/metplus_use_case_suite.py @@ -11,7 +11,7 @@ sys.path.insert(0, os.path.join(os.path.abspath(dirname(__file__)), os.pardir, os.pardir)) -from metplus.util.met_util import subset_list +from metplus.util.string_manip import subset_list class METplusUseCase: """! Contains name of use case and a list of configuration command line diff --git a/metplus/util/__init__.py b/metplus/util/__init__.py index b133c8bfb7..58df7821c6 100644 --- a/metplus/util/__init__.py +++ b/metplus/util/__init__.py @@ -7,7 +7,6 @@ from .doc_util import * from .config_metplus import * from .run_util import * -from .met_util import * from .met_config import * from .time_looping import * from .field_util import * diff --git a/metplus/util/config_metplus.py b/metplus/util/config_metplus.py index 0ec9f9c3e1..5e9b02c07c 100644 --- a/metplus/util/config_metplus.py +++ b/metplus/util/config_metplus.py @@ -51,10 +51,12 @@ 'get_custom_string_list', 'find_indices_in_config_section', 'parse_var_list', + 'sub_var_list', 'get_process_list', 'validate_configuration_variables', 'is_loop_by_init', 'handle_tmp_dir', + 'log_runtime_banner', ] '''!@var METPLUS_BASE @@ -2137,3 +2139,57 @@ def write_final_conf(config): config.logger.info('Overwrite final conf here: %s' % (final_conf,)) with open(final_conf, 'wt') as conf_file: config.write(conf_file) + + +def log_runtime_banner(config, time_input, process): + loop_by = time_input['loop_by'] + run_time = time_input[loop_by].strftime("%Y-%m-%d %H:%M") + + process_name = process.__class__.__name__ + if process.instance: + process_name = f"{process_name}({process.instance})" + + config.logger.info("****************************************") + config.logger.info(f"* Running METplus {process_name}") + config.logger.info(f"* at {loop_by} time: {run_time}") + config.logger.info("****************************************") + + +def sub_var_list(var_list, time_info): + """! Perform string substitution on var list values with time info + + @param var_list list of field info to substitute values into + @param time_info dictionary containing time information + @returns var_list with values substituted + """ + if not var_list: + return [] + + out_var_list = [] + for var_info in var_list: + out_var_info = _sub_var_info(var_info, time_info) + out_var_list.append(out_var_info) + + return out_var_list + + +def _sub_var_info(var_info, time_info): + if not var_info: + return {} + + out_var_info = {} + for key, value in var_info.items(): + if isinstance(value, list): + out_value = [] + for item in value: + out_value.append(do_string_sub(item, + skip_missing_tags=True, + **time_info)) + else: + out_value = do_string_sub(value, + skip_missing_tags=True, + **time_info) + + out_var_info[key] = out_value + + return out_var_info diff --git a/metplus/util/met_util.py b/metplus/util/met_util.py deleted file mode 100644 index 71ae0e8382..0000000000 --- a/metplus/util/met_util.py +++ /dev/null @@ -1,778 +0,0 @@ -import os -import shutil -import sys -from datetime import datetime, timedelta, timezone -import re -import gzip -import bz2 -import zipfile -import struct - -from dateutil.relativedelta import relativedelta - -from .string_manip import getlist, getlistint -from .system_util import mkdir_p -from . import time_util as time_util -from .string_template_substitution import do_string_sub -from .string_template_substitution import parse_template -from .time_looping import time_generator - - -"""!@namespace met_util - @brief Provides Utility functions for METplus. -""" - -from .constants import * - - -def loop_over_times_and_call(config, processes, custom=None): - """! Loop over all run times and call wrappers listed in config - - @param config METplusConfig object - @param processes list of CommandBuilder subclass objects (Wrappers) to call - @param custom (optional) custom loop string value - @returns list of tuples with all commands run and the environment variables - that were set for each - """ - # keep track of commands that were run - all_commands = [] - for time_input in time_generator(config): - if not isinstance(processes, list): - processes = [processes] - - for process in processes: - # if time could not be read, increment errors for each process - if time_input is None: - process.errors += 1 - continue - - log_runtime_banner(config, time_input, process) - add_to_time_input(time_input, - instance=process.instance, - custom=custom) - - process.clear() - process.run_at_time(time_input) - if process.all_commands: - all_commands.extend(process.all_commands) - process.all_commands.clear() - - return all_commands - -def log_runtime_banner(config, time_input, process): - loop_by = time_input['loop_by'] - run_time = time_input[loop_by].strftime("%Y-%m-%d %H:%M") - - process_name = process.__class__.__name__ - if process.instance: - process_name = f"{process_name}({process.instance})" - - config.logger.info("****************************************") - config.logger.info(f"* Running METplus {process_name}") - config.logger.info(f"* at {loop_by} time: {run_time}") - config.logger.info("****************************************") - -def add_to_time_input(time_input, clock_time=None, instance=None, custom=None): - if clock_time: - clock_dt = datetime.strptime(clock_time, '%Y%m%d%H%M%S') - time_input['now'] = clock_dt - - # if instance is set, use that value, otherwise use empty string - time_input['instance'] = instance if instance else '' - - # if custom is specified, set it - # otherwise leave it unset so it can be set within the wrapper - if custom: - time_input['custom'] = custom - -def get_lead_sequence(config, input_dict=None, wildcard_if_empty=False): - """!Get forecast lead list from LEAD_SEQ or compute it from INIT_SEQ. - Restrict list by LEAD_SEQ_[MIN/MAX] if set. Now returns list of relativedelta objects - Args: - @param config METplusConfig object to query config variable values - @param input_dict time dictionary needed to handle using INIT_SEQ. Must contain - valid key if processing INIT_SEQ - @param wildcard_if_empty if no lead sequence was set, return a - list with '*' if this is True, otherwise return a list with 0 - @returns list of relativedelta objects or a list containing 0 if none are found - """ - - out_leads = [] - lead_min, lead_max, no_max = get_lead_min_max(config) - - # check if LEAD_SEQ, INIT_SEQ, or LEAD_SEQ_ are set - # if more than one is set, report an error and exit - lead_seq = getlist(config.getstr('config', 'LEAD_SEQ', '')) - init_seq = getlistint(config.getstr('config', 'INIT_SEQ', '')) - lead_groups = get_lead_sequence_groups(config) - - if not are_lead_configs_ok(lead_seq, - init_seq, - lead_groups, - config, - input_dict, - no_max): - return None - - if lead_seq: - # return lead sequence if wildcard characters are used - if lead_seq == ['*']: - return lead_seq - - out_leads = handle_lead_seq(config, - lead_seq, - lead_min, - lead_max) - - # use INIT_SEQ to build lead list based on the valid time - elif init_seq: - out_leads = handle_init_seq(init_seq, - input_dict, - lead_min, - lead_max) - elif lead_groups: - out_leads = handle_lead_groups(lead_groups) - - if not out_leads: - if wildcard_if_empty: - return ['*'] - - return [0] - - return out_leads - -def are_lead_configs_ok(lead_seq, init_seq, lead_groups, - config, input_dict, no_max): - if lead_groups is None: - return False - - error_message = ('are both listed in the configuration. ' - 'Only one may be used at a time.') - if lead_seq: - if init_seq: - config.logger.error(f'LEAD_SEQ and INIT_SEQ {error_message}') - return False - - if lead_groups: - config.logger.error(f'LEAD_SEQ and LEAD_SEQ_ {error_message}') - return False - - if init_seq and lead_groups: - config.logger.error(f'INIT_SEQ and LEAD_SEQ_ {error_message}') - return False - - if init_seq: - # if input dictionary not passed in, - # cannot compute lead sequence from it, so exit - if input_dict is None: - config.logger.error('Cannot run using INIT_SEQ for this wrapper') - return False - - # if looping by init, fail and exit - if 'valid' not in input_dict.keys(): - log_msg = ('INIT_SEQ specified while looping by init time.' - ' Use LEAD_SEQ or change to loop by valid time') - config.logger.error(log_msg) - return False - - # maximum lead must be specified to run with INIT_SEQ - if no_max: - config.logger.error('LEAD_SEQ_MAX must be set to use INIT_SEQ') - return False - - return True - -def get_lead_min_max(config): - # remove any items that are outside of the range specified - # by LEAD_SEQ_MIN and LEAD_SEQ_MAX - # convert min and max to relativedelta objects, then use current time - # to compare them to each forecast lead - # this is an approximation because relative time offsets depend on - # each runtime - huge_max = '4000Y' - lead_min_str = config.getstr_nocheck('config', 'LEAD_SEQ_MIN', '0') - lead_max_str = config.getstr_nocheck('config', 'LEAD_SEQ_MAX', huge_max) - no_max = lead_max_str == huge_max - lead_min = time_util.get_relativedelta(lead_min_str, 'H') - lead_max = time_util.get_relativedelta(lead_max_str, 'H') - return lead_min, lead_max, no_max - -def handle_lead_seq(config, lead_strings, lead_min=None, lead_max=None): - out_leads = [] - leads = [] - for lead in lead_strings: - relative_delta = time_util.get_relativedelta(lead, 'H') - if relative_delta is not None: - leads.append(relative_delta) - else: - config.logger.error(f'Invalid item {lead} in LEAD_SEQ. Exiting.') - return None - - if lead_min is None and lead_max is None: - return leads - - # add current time to leads to approximate month and year length - now_time = datetime.now() - lead_min_approx = now_time + lead_min - lead_max_approx = now_time + lead_max - for lead in leads: - lead_approx = now_time + lead - if lead_approx >= lead_min_approx and lead_approx <= lead_max_approx: - out_leads.append(lead) - - return out_leads - -def handle_init_seq(init_seq, input_dict, lead_min, lead_max): - out_leads = [] - lead_min_hours = time_util.ti_get_hours_from_relativedelta(lead_min) - lead_max_hours = time_util.ti_get_hours_from_relativedelta(lead_max) - - valid_hr = int(input_dict['valid'].strftime('%H')) - for init in init_seq: - if valid_hr >= init: - current_lead = valid_hr - init - else: - current_lead = valid_hr + (24 - init) - - while current_lead <= lead_max_hours: - if current_lead >= lead_min_hours: - out_leads.append(relativedelta(hours=current_lead)) - current_lead += 24 - - out_leads = sorted(out_leads, key=lambda - rd: time_util.ti_get_seconds_from_relativedelta(rd, - input_dict['valid'])) - return out_leads - -def handle_lead_groups(lead_groups): - """! Read groups of forecast leads and create a list with all unique items - - @param lead_group dictionary where the values are lists of forecast - leads stored as relativedelta objects - @returns list of forecast leads stored as relativedelta objects - """ - out_leads = [] - for _, lead_seq in lead_groups.items(): - for lead in lead_seq: - if lead not in out_leads: - out_leads.append(lead) - - return out_leads - -def get_lead_sequence_groups(config): - # output will be a dictionary where the key will be the - # label specified and the value will be the list of forecast leads - lead_seq_dict = {} - # used in plotting - all_conf = config.keys('config') - indices = [] - regex = re.compile(r"LEAD_SEQ_(\d+)") - for conf in all_conf: - result = regex.match(conf) - if result is not None: - indices.append(result.group(1)) - - # loop over all possible variables and add them to list - for index in indices: - if config.has_option('config', f"LEAD_SEQ_{index}_LABEL"): - label = config.getstr('config', f"LEAD_SEQ_{index}_LABEL") - else: - log_msg = (f'Need to set LEAD_SEQ_{index}_LABEL to describe ' - f'LEAD_SEQ_{index}') - config.logger.error(log_msg) - return None - - # get forecast list for n - lead_string_list = getlist(config.getstr('config', f'LEAD_SEQ_{index}')) - lead_seq = handle_lead_seq(config, - lead_string_list, - lead_min=None, - lead_max=None) - # add to output dictionary - lead_seq_dict[label] = lead_seq - - return lead_seq_dict - - -def get_files(filedir, filename_regex, logger=None): - """! Get all the files (with a particular - naming format) by walking - through the directories. - Args: - @param filedir: The topmost directory from which the - search begins. - @param filename_regex: The regular expression that - defines the naming format - of the files of interest. - Returns: - file_paths (string): a list of filenames (with full filepath) - """ - file_paths = [] - - # Walk the tree - for root, _, files in os.walk(filedir): - for filename in files: - # add it to the list only if it is a match - # to the specified format - match = re.match(filename_regex, filename) - if match: - # Join the two strings to form the full - # filepath. - filepath = os.path.join(root, filename) - file_paths.append(filepath) - else: - continue - return sorted(file_paths) - - -def shift_time_seconds(time_str, shift): - """ Adjust time by shift seconds. Format is %Y%m%d%H%M%S - Args: - @param time_str: Start time in %Y%m%d%H%M%S - @param shift: Amount to adjust time in seconds - Returns: - New time in format %Y%m%d%H%M%S - """ - return (datetime.strptime(time_str, "%Y%m%d%H%M%S") + - timedelta(seconds=shift)).strftime("%Y%m%d%H%M%S") - - -def sub_var_info(var_info, time_info): - if not var_info: - return {} - - out_var_info = {} - for key, value in var_info.items(): - if isinstance(value, list): - out_value = [] - for item in value: - out_value.append(do_string_sub(item, - skip_missing_tags=True, - **time_info)) - else: - out_value = do_string_sub(value, - skip_missing_tags=True, - **time_info) - - out_var_info[key] = out_value - - return out_var_info - -def sub_var_list(var_list, time_info): - """! Perform string substitution on var list values with time info - - @param var_list list of field info to substitute values into - @param time_info dictionary containing time information - @returns var_list with values substituted - """ - if not var_list: - return [] - - out_var_list = [] - for var_info in var_list: - out_var_info = sub_var_info(var_info, time_info) - out_var_list.append(out_var_info) - - return out_var_list - -def split_level(level): - """! If level value starts with a letter, then separate that letter from - the rest of the string. i.e. 'A03' will be returned as 'A', '03'. If no - level type letter is found and the level value consists of alpha-numeric - characters, return an empty string as the level type and the full level - string as the level value - - @param level input string to parse/split - @returns tuple of level type and level value - """ - if not level: - return '', '' - - match = re.match(r'^([a-zA-Z])(\w+)$', level) - if match: - level_type = match.group(1) - level = match.group(2) - return level_type, level - - match = re.match(r'^[\w]+$', level) - if match: - return '', level - - return '', '' - -def get_filetype(filepath, logger=None): - """!This function determines if the filepath is a NETCDF or GRIB file - based on the first eight bytes of the file. - It returns the string GRIB, NETCDF, or a None object. - - Note: If it is NOT determined to ba a NETCDF file, - it returns GRIB, regardless. - Unless there is an IOError exception, such as filepath refers - to a non-existent file or filepath is only a directory, than - None is returned, without a system exit. - - Args: - @param filepath: path/to/filename - @param logger the logger, optional - Returns: - @returns The string GRIB, NETCDF or a None object - """ - # Developer Note - # Since we have the impending code-freeze, keeping the behavior the same, - # just changing the implementation. - # The previous logic did not test for GRIB it would just return 'GRIB' - # if you couldn't run ncdump on the file. - # Also note: - # As John indicated ... there is the case when a grib file - # may not start with GRIB ... and if you pass the MET command filtetype=GRIB - # MET will handle it ok ... - - # Notes on file format and determining type. - # https://www.wmo.int/pages/prog/www/WDM/Guides/Guide-binary-2.html - # https://www.unidata.ucar.edu/software/netcdf/docs/faq.html - # http: // www.hdfgroup.org / HDF5 / doc / H5.format.html - - # Interpreting single byte by byte - so ok to ignore endianess - # od command: - # od -An -c -N8 foo.nc - # od -tx1 -N8 foo.nc - # GRIB - # Octet no. IS Content - # 1-4 'GRIB' (Coded CCITT-ITA No. 5) (ASCII); - # 5-7 Total length, in octets, of GRIB message(including Sections 0 & 5); - # 8 Edition number - currently 1 - # NETCDF .. ie. od -An -c -N4 foo.nc which will output - # C D F 001 - # C D F 002 - # 211 H D F - # HDF5 - # Magic numbers Hex: 89 48 44 46 0d 0a 1a 0a - # ASCII: \211 HDF \r \n \032 \n - - # Below is a reference that may be used in the future to - # determine grib version. - # import struct - # with open ("foo.grb2","rb")as binary_file: - # binary_file.seek(7) - # one_byte = binary_file.read(1) - # - # This would return an integer with value 1 or 2, - # B option is an unsigned char. - # struct.unpack('B',one_byte)[0] - - # if filepath is set to None, return None to avoid crash - if filepath == None: - return None - - try: - # read will return up to 8 bytes, if file is 0 bytes in length, - # than first_eight_bytes will be the empty string ''. - # Don't test the file length, just adds more time overhead. - with open(filepath, "rb") as binary_file: - binary_file.seek(0) - first_eight_bytes = binary_file.read(8) - - # From the first eight bytes of the file, unpack the bytes - # of the known identifier byte locations, in to a string. - # Example, if this was a netcdf file than ONLY name_cdf would - # equal 'CDF' the other variables, name_hdf would be 'DF ' - # name_grid 'CDF ' - name_cdf, name_hdf, name_grib = [None] * 3 - if len(first_eight_bytes) == 8: - name_cdf = struct.unpack('3s', first_eight_bytes[:3])[0] - name_hdf = struct.unpack('3s', first_eight_bytes[1:4])[0] - name_grib = struct.unpack('4s', first_eight_bytes[:4])[0] - - # Why not just use a else, instead of elif else if we are going to - # return GRIB ? It allows for expansion, ie. Maybe we pass in a - # logger and log the cases we can't determine the type. - if name_cdf == 'CDF' or name_hdf == 'HDF': - return "NETCDF" - elif name_grib == 'GRIB': - return "GRIB" - else: - # This mimicks previous behavoir, were we at least will always return GRIB. - # It also handles the case where GRIB was not in the first 4 bytes - # of a legitimate grib file, see John. - # logger.info('Can't determine type, returning GRIB - # as default %s'%filepath) - return "GRIB" - - except IOError: - # Skip the IOError, and keep processing data. - # ie. filepath references a file that does not exist - # or filepath is a directory. - return None - - # Previous Logic - # ncdump_exe = config.getexe('NCDUMP') - #try: - # result = subprocess.check_output([ncdump_exe, filepath]) - - #except subprocess.CalledProcessError: - # return "GRIB" - - #regex = re.search("netcdf", result) - #if regex is not None: - # return "NETCDF" - #else: - # return None - -def preprocess_file(filename, data_type, config, allow_dir=False): - """ Decompress gzip, bzip, or zip files or convert Gempak files to NetCDF - Args: - @param filename: Path to file without zip extensions - @param config: Config object - Returns: - Path to staged unzipped file or original file if already unzipped - """ - if not filename: - return None - - if allow_dir and os.path.isdir(filename): - return filename - - # if using python embedding for input, return the keyword - if os.path.basename(filename) in PYTHON_EMBEDDING_TYPES: - return os.path.basename(filename) - - # if filename starts with a python embedding type, return the full value - for py_embed_type in PYTHON_EMBEDDING_TYPES: - if filename.startswith(py_embed_type): - return filename - - # if _INPUT_DATATYPE value contains PYTHON, return the full value - if data_type is not None and 'PYTHON' in data_type: - return filename - - stage_dir = config.getdir('STAGING_DIR') - - if os.path.isfile(filename): - # if filename provided ends with a valid compression extension, - # remove the extension and call function again so the - # file will be uncompressed properly. This is done so that - # the function will handle files passed to it with an - # extension the same way as files passed - # without an extension but the compressed equivalent exists - for ext in COMPRESSION_EXTENSIONS: - if filename.endswith(ext): - return preprocess_file(filename[:-len(ext)], data_type, config) - # if extension is grd (Gempak), then look in staging dir for nc file - if filename.endswith('.grd') or data_type == "GEMPAK": - if filename.endswith('.grd'): - stagefile = stage_dir + filename[:-3]+"nc" - else: - stagefile = stage_dir + filename+".nc" - if os.path.isfile(stagefile): - return stagefile - # if it does not exist, run GempakToCF and return staged nc file - # Create staging area if it does not exist - mkdir_p(os.path.dirname(stagefile)) - - # only import GempakToCF if needed - from ..wrappers import GempakToCFWrapper - - run_g2c = GempakToCFWrapper(config) - run_g2c.infiles.append(filename) - run_g2c.set_output_path(stagefile) - cmd = run_g2c.get_command() - if cmd is None: - config.logger.error("GempakToCF could not generate command") - return None - if config.logger: - config.logger.debug("Converting Gempak file into {}".format(stagefile)) - run_g2c.build() - return stagefile - - return filename - - # nc file requested and the Gempak equivalent exists - if os.path.isfile(filename[:-2]+'grd'): - return preprocess_file(filename[:-2]+'grd', data_type, config) - - # if file exists in the staging area, return that path - outpath = stage_dir + filename - if os.path.isfile(outpath): - return outpath - - # Create staging area directory only if file has compression extension - if any([os.path.isfile(f'{filename}{ext}') - for ext in COMPRESSION_EXTENSIONS]): - mkdir_p(os.path.dirname(outpath)) - - # uncompress gz, bz2, or zip file - if os.path.isfile(filename+".gz"): - if config.logger: - config.logger.debug("Uncompressing gz file to {}".format(outpath)) - with gzip.open(filename+".gz", 'rb') as infile: - with open(outpath, 'wb') as outfile: - outfile.write(infile.read()) - infile.close() - outfile.close() - return outpath - elif os.path.isfile(filename+".bz2"): - if config.logger: - config.logger.debug("Uncompressing bz2 file to {}".format(outpath)) - with open(filename+".bz2", 'rb') as infile: - with open(outpath, 'wb') as outfile: - outfile.write(bz2.decompress(infile.read())) - infile.close() - outfile.close() - return outpath - elif os.path.isfile(filename+".zip"): - if config.logger: - config.logger.debug("Uncompressing zip file to {}".format(outpath)) - with zipfile.ZipFile(filename+".zip") as z: - with open(outpath, 'wb') as f: - f.write(z.read(os.path.basename(filename))) - return outpath - - # if input doesn't need to exist, return filename - if not config.getbool('config', 'INPUT_MUST_EXIST', True): - return filename - - return None - - -def expand_int_string_to_list(int_string): - """! Expand string into a list of integer values. Items are separated by - commas. Items that are formatted X-Y will be expanded into each number - from X to Y inclusive. If the string ends with +, then add a str '+' - to the end of the list. Used in .github/jobs/get_use_case_commands.py - - @param int_string String containing a comma-separated list of integers - @returns List of integers and potentially '+' as the last item - """ - subset_list = [] - - # if string ends with +, remove it and add it back at the end - if int_string.strip().endswith('+'): - int_string = int_string.strip(' +') - hasPlus = True - else: - hasPlus = False - - # separate into list by comma - comma_list = int_string.split(',') - for comma_item in comma_list: - dash_list = comma_item.split('-') - # if item contains X-Y, expand it - if len(dash_list) == 2: - for i in range(int(dash_list[0].strip()), - int(dash_list[1].strip())+1, - 1): - subset_list.append(i) - else: - subset_list.append(int(comma_item.strip())) - - if hasPlus: - subset_list.append('+') - - return subset_list - -def subset_list(full_list, subset_definition): - """! Extract subset of items from full_list based on subset_definition - Used in internal/tests/use_cases/metplus_use_case_suite.py - - @param full_list List of all use cases that were requested - @param subset_definition Defines how to subset the full list. If None, - no subsetting occurs. If an integer value, select that index only. - If a slice object, i.e. slice(2,4,1), pass slice object into list. - If list, subset full list by integer index values in list. If - last item in list is '+' then subset list up to 2nd last index, then - get all items from 2nd last item and above - """ - if subset_definition is not None: - subset_list = [] - - # if case slice is a list, use only the indices in the list - if isinstance(subset_definition, list): - # if last slice value is a plus sign, get rest of items - # after 2nd last slice value - if subset_definition[-1] == '+': - plus_value = subset_definition[-2] - # add all values before last index before plus - subset_list.extend([full_list[i] - for i in subset_definition[:-2]]) - # add last index listed + all items above - subset_list.extend(full_list[plus_value:]) - else: - # list of integers, so get items based on indices - subset_list = [full_list[i] for i in subset_definition] - else: - subset_list = full_list[subset_definition] - else: - subset_list = full_list - - # if only 1 item is left, make it a list before returning - if not isinstance(subset_list, list): - subset_list = [subset_list] - - return subset_list - -def is_met_netcdf(file_path): - """! Check if a file is a MET-generated NetCDF file. - If the file is not a NetCDF file, OSError occurs. - If the MET_version attribute doesn't exist, AttributeError occurs. - If the netCDF4 package is not available, ImportError should occur. - All of these situations result in the file being considered not - a MET-generated NetCDF file - Args: - @param file_path full path to file to check - @returns True if file is a MET-generated NetCDF file and False if - it is not or it can't be determined. - """ - try: - from netCDF4 import Dataset - nc_file = Dataset(file_path, 'r') - getattr(nc_file, 'MET_version') - except (AttributeError, OSError, ImportError): - return False - - return True - -def netcdf_has_var(file_path, name, level): - """! Check if name is a variable in the NetCDF file. If not, check if - {name}_{level} (with level prefix letter removed, i.e. 06 from A06) - If the file is not a NetCDF file, OSError occurs. - If the MET_version attribute doesn't exist, AttributeError occurs. - If the netCDF4 package is not available, ImportError should occur. - All of these situations result in the file being considered not - a MET-generated NetCDF file - Args: - @param file_path full path to file to check - @returns True if file is a MET-generated NetCDF file and False if - it is not or it can't be determined. - """ - try: - from netCDF4 import Dataset - - nc_file = Dataset(file_path, 'r') - variables = nc_file.variables.keys() - - # if name is a variable, return that name - if name in variables: - return name - - - # if name_level is a variable, return that - name_underscore_level = f"{name}_{split_level(level)[1]}" - if name_underscore_level in variables: - return name_underscore_level - - # requested variable name is not found in file - return None - - except (AttributeError, OSError, ImportError): - return False - - -def format_level(level): - """! Format level string to prevent NetCDF level values from creating - filenames and field names with bad characters. Replaces '*' with 'all' - and ',' with '_' - - @param level string of level to format - @returns formatted string - """ - return level.replace('*', 'all').replace(',', '_') diff --git a/metplus/util/run_util.py b/metplus/util/run_util.py index 353bab0073..fb7b743b35 100644 --- a/metplus/util/run_util.py +++ b/metplus/util/run_util.py @@ -10,7 +10,6 @@ from . import config_metplus from . import camel_to_underscore - def pre_run_setup(config_inputs): version_number = get_metplus_version() @@ -192,4 +191,4 @@ def post_run_cleanup(config, app_name, total_errors): error_msg += '.' logger.error(error_msg) logger.info(log_message) - sys.exit(1) \ No newline at end of file + sys.exit(1) diff --git a/metplus/util/string_manip.py b/metplus/util/string_manip.py index 40b366b1b7..5ddb62e867 100644 --- a/metplus/util/string_manip.py +++ b/metplus/util/string_manip.py @@ -378,3 +378,118 @@ def generate_tmp_filename(): def template_to_regex(template): in_template = re.sub(r'\.', '\\.', template) return re.sub(r'{lead.*?}', '.*', in_template) + + +def split_level(level): + """! If level value starts with a letter, then separate that letter from + the rest of the string. i.e. 'A03' will be returned as 'A', '03'. If no + level type letter is found and the level value consists of alpha-numeric + characters, return an empty string as the level type and the full level + string as the level value + + @param level input string to parse/split + @returns tuple of level type and level value + """ + if not level: + return '', '' + + match = re.match(r'^([a-zA-Z])(\w+)$', level) + if match: + level_type = match.group(1) + level = match.group(2) + return level_type, level + + match = re.match(r'^[\w]+$', level) + if match: + return '', level + + return '', '' + + +def format_level(level): + """! Format level string to prevent NetCDF level values from creating + filenames and field names with bad characters. Replaces '*' with 'all' + and ',' with '_' + + @param level string of level to format + @returns formatted string + """ + return level.replace('*', 'all').replace(',', '_') + + +def expand_int_string_to_list(int_string): + """! Expand string into a list of integer values. Items are separated by + commas. Items that are formatted X-Y will be expanded into each number + from X to Y inclusive. If the string ends with +, then add a str '+' + to the end of the list. Used in .github/jobs/get_use_case_commands.py + + @param int_string String containing a comma-separated list of integers + @returns List of integers and potentially '+' as the last item + """ + subset_list = [] + + # if string ends with +, remove it and add it back at the end + if int_string.strip().endswith('+'): + int_string = int_string.strip(' +') + hasPlus = True + else: + hasPlus = False + + # separate into list by comma + comma_list = int_string.split(',') + for comma_item in comma_list: + dash_list = comma_item.split('-') + # if item contains X-Y, expand it + if len(dash_list) == 2: + for i in range(int(dash_list[0].strip()), + int(dash_list[1].strip())+1, + 1): + subset_list.append(i) + else: + subset_list.append(int(comma_item.strip())) + + if hasPlus: + subset_list.append('+') + + return subset_list + + +def subset_list(full_list, subset_definition): + """! Extract subset of items from full_list based on subset_definition + Used in internal/tests/use_cases/metplus_use_case_suite.py + + @param full_list List of all use cases that were requested + @param subset_definition Defines how to subset the full list. If None, + no subsetting occurs. If an integer value, select that index only. + If a slice object, i.e. slice(2,4,1), pass slice object into list. + If list, subset full list by integer index values in list. If + last item in list is '+' then subset list up to 2nd last index, then + get all items from 2nd last item and above + """ + if subset_definition is not None: + subset_list = [] + + # if case slice is a list, use only the indices in the list + if isinstance(subset_definition, list): + # if last slice value is a plus sign, get rest of items + # after 2nd last slice value + if subset_definition[-1] == '+': + plus_value = subset_definition[-2] + # add all values before last index before plus + subset_list.extend([full_list[i] + for i in subset_definition[:-2]]) + # add last index listed + all items above + subset_list.extend(full_list[plus_value:]) + else: + # list of integers, so get items based on indices + subset_list = [full_list[i] for i in subset_definition] + else: + subset_list = full_list[subset_definition] + else: + subset_list = full_list + + # if only 1 item is left, make it a list before returning + if not isinstance(subset_list, list): + subset_list = [subset_list] + + return subset_list diff --git a/metplus/util/system_util.py b/metplus/util/system_util.py index 8b9be3aeed..f47d9a8936 100644 --- a/metplus/util/system_util.py +++ b/metplus/util/system_util.py @@ -5,8 +5,15 @@ """ import os +import re from pathlib import Path import getpass +import gzip +import bz2 +import zipfile +import struct + +from .constants import PYTHON_EMBEDDING_TYPES, COMPRESSION_EXTENSIONS def mkdir_p(path): @@ -134,3 +141,320 @@ def prune_empty(output_dir, logger): logger.debug("Empty directory: " + full_dir + "...removing") os.rmdir(full_dir) + + +def get_files(filedir, filename_regex, logger=None): + """! Get all the files (with a particular naming format) by walking + through the directories. + + @param filedir The topmost directory from which the search begins. + @param filename_regex The regular expression that defines the naming + format of the files of interest. + @returns list of filenames (with full filepath) + """ + file_paths = [] + + # Walk the tree + for root, _, files in os.walk(filedir): + for filename in files: + # add it to the list only if it is a match + # to the specified format + match = re.match(filename_regex, filename) + if match: + # Join the two strings to form the full + # filepath. + filepath = os.path.join(root, filename) + file_paths.append(filepath) + else: + continue + return sorted(file_paths) + + +def preprocess_file(filename, data_type, config, allow_dir=False): + """ Decompress gzip, bzip, or zip files or convert Gempak files to NetCDF + Args: + @param filename: Path to file without zip extensions + @param config: Config object + Returns: + Path to staged unzipped file or original file if already unzipped + """ + if not filename: + return None + + if allow_dir and os.path.isdir(filename): + return filename + + # if using python embedding for input, return the keyword + if os.path.basename(filename) in PYTHON_EMBEDDING_TYPES: + return os.path.basename(filename) + + # if filename starts with a python embedding type, return the full value + for py_embed_type in PYTHON_EMBEDDING_TYPES: + if filename.startswith(py_embed_type): + return filename + + # if _INPUT_DATATYPE value contains PYTHON, return the full value + if data_type is not None and 'PYTHON' in data_type: + return filename + + stage_dir = config.getdir('STAGING_DIR') + + if os.path.isfile(filename): + # if filename provided ends with a valid compression extension, + # remove the extension and call function again so the + # file will be uncompressed properly. This is done so that + # the function will handle files passed to it with an + # extension the same way as files passed + # without an extension but the compressed equivalent exists + for ext in COMPRESSION_EXTENSIONS: + if filename.endswith(ext): + return preprocess_file(filename[:-len(ext)], data_type, config) + # if extension is grd (Gempak), then look in staging dir for nc file + if filename.endswith('.grd') or data_type == "GEMPAK": + if filename.endswith('.grd'): + stagefile = stage_dir + filename[:-3]+"nc" + else: + stagefile = stage_dir + filename+".nc" + if os.path.isfile(stagefile): + return stagefile + # if it does not exist, run GempakToCF and return staged nc file + # Create staging area if it does not exist + mkdir_p(os.path.dirname(stagefile)) + + # only import GempakToCF if needed + from ..wrappers import GempakToCFWrapper + + run_g2c = GempakToCFWrapper(config) + run_g2c.infiles.append(filename) + run_g2c.set_output_path(stagefile) + cmd = run_g2c.get_command() + if cmd is None: + config.logger.error("GempakToCF could not generate command") + return None + if config.logger: + config.logger.debug("Converting Gempak file into {}".format(stagefile)) + run_g2c.build() + return stagefile + + return filename + + # nc file requested and the Gempak equivalent exists + if os.path.isfile(filename[:-2]+'grd'): + return preprocess_file(filename[:-2]+'grd', data_type, config) + + # if file exists in the staging area, return that path + outpath = stage_dir + filename + if os.path.isfile(outpath): + return outpath + + # Create staging area directory only if file has compression extension + if any([os.path.isfile(f'{filename}{ext}') + for ext in COMPRESSION_EXTENSIONS]): + mkdir_p(os.path.dirname(outpath)) + + # uncompress gz, bz2, or zip file + if os.path.isfile(filename+".gz"): + if config.logger: + config.logger.debug("Uncompressing gz file to {}".format(outpath)) + with gzip.open(filename+".gz", 'rb') as infile: + with open(outpath, 'wb') as outfile: + outfile.write(infile.read()) + infile.close() + outfile.close() + return outpath + elif os.path.isfile(filename+".bz2"): + if config.logger: + config.logger.debug("Uncompressing bz2 file to {}".format(outpath)) + with open(filename+".bz2", 'rb') as infile: + with open(outpath, 'wb') as outfile: + outfile.write(bz2.decompress(infile.read())) + infile.close() + outfile.close() + return outpath + elif os.path.isfile(filename+".zip"): + if config.logger: + config.logger.debug("Uncompressing zip file to {}".format(outpath)) + with zipfile.ZipFile(filename+".zip") as z: + with open(outpath, 'wb') as f: + f.write(z.read(os.path.basename(filename))) + return outpath + + # if input doesn't need to exist, return filename + if not config.getbool('config', 'INPUT_MUST_EXIST', True): + return filename + + return None + + +def netcdf_has_var(file_path, name, level): + """! Check if name is a variable in the NetCDF file. If not, check if + {name}_{level} (with level prefix letter removed, i.e. 06 from A06) + If the file is not a NetCDF file, OSError occurs. + If the MET_version attribute doesn't exist, AttributeError occurs. + If the netCDF4 package is not available, ImportError should occur. + All of these situations result in the file being considered not + a MET-generated NetCDF file. (CURRENTLY UNUSED) + + @param file_path full path to file to check + @returns True if file is a MET-generated NetCDF file and False if + it is not or it can't be determined. + """ + try: + from netCDF4 import Dataset + + nc_file = Dataset(file_path, 'r') + variables = nc_file.variables.keys() + + # if name is a variable, return that name + if name in variables: + return name + + # if name_level is a variable, return that + name_underscore_level = f"{name}_{split_level(level)[1]}" + if name_underscore_level in variables: + return name_underscore_level + + # requested variable name is not found in file + return None + + except (AttributeError, OSError, ImportError): + return False + + +def is_met_netcdf(file_path): + """! Check if a file is a MET-generated NetCDF file. + If the file is not a NetCDF file, OSError occurs. + If the MET_version attribute doesn't exist, AttributeError occurs. + If the netCDF4 package is not available, ImportError should occur. + All of these situations result in the file being considered not + a MET-generated NetCDF file (CURRENTLY NOT USED) + + @param file_path full path to file to check + @returns True if file is a MET-generated NetCDF file and False if + it is not or it can't be determined. + """ + try: + from netCDF4 import Dataset + nc_file = Dataset(file_path, 'r') + getattr(nc_file, 'MET_version') + except (AttributeError, OSError, ImportError): + return False + + return True + + +def get_filetype(filepath, logger=None): + """!This function determines if the filepath is a NETCDF or GRIB file + based on the first eight bytes of the file. + It returns the string GRIB, NETCDF, or a None object. + + Note: If it is NOT determined to ba a NETCDF file, + it returns GRIB, regardless. + Unless there is an IOError exception, such as filepath refers + to a non-existent file or filepath is only a directory, than + None is returned, without a system exit. (CURRENTLY NOT USED) + + @param filepath: path/to/filename + @param logger the logger, optional + @returns The string GRIB, NETCDF or a None object + """ + # Developer Note + # Since we have the impending code-freeze, keeping the behavior the same, + # just changing the implementation. + # The previous logic did not test for GRIB it would just return 'GRIB' + # if you couldn't run ncdump on the file. + # Also note: + # As John indicated ... there is the case when a grib file + # may not start with GRIB ... and if you pass the MET command filtetype=GRIB + # MET will handle it ok ... + + # Notes on file format and determining type. + # https://www.wmo.int/pages/prog/www/WDM/Guides/Guide-binary-2.html + # https://www.unidata.ucar.edu/software/netcdf/docs/faq.html + # http: // www.hdfgroup.org / HDF5 / doc / H5.format.html + + # Interpreting single byte by byte - so ok to ignore endianess + # od command: + # od -An -c -N8 foo.nc + # od -tx1 -N8 foo.nc + # GRIB + # Octet no. IS Content + # 1-4 'GRIB' (Coded CCITT-ITA No. 5) (ASCII); + # 5-7 Total length, in octets, of GRIB message(including Sections 0 & 5); + # 8 Edition number - currently 1 + # NETCDF .. ie. od -An -c -N4 foo.nc which will output + # C D F 001 + # C D F 002 + # 211 H D F + # HDF5 + # Magic numbers Hex: 89 48 44 46 0d 0a 1a 0a + # ASCII: \211 HDF \r \n \032 \n + + # Below is a reference that may be used in the future to + # determine grib version. + # import struct + # with open ("foo.grb2","rb")as binary_file: + # binary_file.seek(7) + # one_byte = binary_file.read(1) + # + # This would return an integer with value 1 or 2, + # B option is an unsigned char. + # struct.unpack('B',one_byte)[0] + + # if filepath is set to None, return None to avoid crash + if filepath == None: + return None + + try: + # read will return up to 8 bytes, if file is 0 bytes in length, + # than first_eight_bytes will be the empty string ''. + # Don't test the file length, just adds more time overhead. + with open(filepath, "rb") as binary_file: + binary_file.seek(0) + first_eight_bytes = binary_file.read(8) + + # From the first eight bytes of the file, unpack the bytes + # of the known identifier byte locations, in to a string. + # Example, if this was a netcdf file than ONLY name_cdf would + # equal 'CDF' the other variables, name_hdf would be 'DF ' + # name_grid 'CDF ' + name_cdf, name_hdf, name_grib = [None] * 3 + if len(first_eight_bytes) == 8: + name_cdf = struct.unpack('3s', first_eight_bytes[:3])[0] + name_hdf = struct.unpack('3s', first_eight_bytes[1:4])[0] + name_grib = struct.unpack('4s', first_eight_bytes[:4])[0] + + # Why not just use a else, instead of elif else if we are going to + # return GRIB ? It allows for expansion, ie. Maybe we pass in a + # logger and log the cases we can't determine the type. + if name_cdf == 'CDF' or name_hdf == 'HDF': + return "NETCDF" + elif name_grib == 'GRIB': + return "GRIB" + else: + # This mimicks previous behavoir, were we at least will always return GRIB. + # It also handles the case where GRIB was not in the first 4 bytes + # of a legitimate grib file, see John. + # logger.info('Can't determine type, returning GRIB + # as default %s'%filepath) + return "GRIB" + + except IOError: + # Skip the IOError, and keep processing data. + # ie. filepath references a file that does not exist + # or filepath is a directory. + return None + + # Previous Logic + # ncdump_exe = config.getexe('NCDUMP') + #try: + # result = subprocess.check_output([ncdump_exe, filepath]) + + #except subprocess.CalledProcessError: + # return "GRIB" + + #regex = re.search("netcdf", result) + #if regex is not None: + # return "NETCDF" + #else: + # return None diff --git a/metplus/util/time_looping.py b/metplus/util/time_looping.py index 4fa40c3fe4..2cd124ff8d 100644 --- a/metplus/util/time_looping.py +++ b/metplus/util/time_looping.py @@ -1,9 +1,12 @@ +import re from datetime import datetime, timedelta -from .string_manip import getlist -from .time_util import get_relativedelta +from .string_manip import getlist, getlistint +from .time_util import get_relativedelta, add_to_time_input +from .time_util import ti_get_hours_from_relativedelta +from .time_util import ti_get_seconds_from_relativedelta from .string_template_substitution import do_string_sub - +from .config_metplus import log_runtime_banner def time_generator(config): """! Generator used to read METplusConfig variables for time looping @@ -123,6 +126,41 @@ def get_start_and_end_times(config): return start_dt, end_dt +def loop_over_times_and_call(config, processes, custom=None): + """! Loop over all run times and call wrappers listed in config + + @param config METplusConfig object + @param processes list of CommandBuilder subclass objects (Wrappers) to call + @param custom (optional) custom loop string value + @returns list of tuples with all commands run and the environment variables + that were set for each + """ + # keep track of commands that were run + all_commands = [] + for time_input in time_generator(config): + if not isinstance(processes, list): + processes = [processes] + + for process in processes: + # if time could not be read, increment errors for each process + if time_input is None: + process.errors += 1 + continue + + log_runtime_banner(config, time_input, process) + add_to_time_input(time_input, + instance=process.instance, + custom=custom) + + process.clear() + process.run_at_time(time_input) + if process.all_commands: + all_commands.extend(process.all_commands) + process.all_commands.clear() + + return all_commands + + def _validate_time_values(start_dt, end_dt, time_interval, prefix, logger): if not start_dt: logger.error(f"Could not read {prefix}_BEG") @@ -287,3 +325,211 @@ def skip_time(time_info, skip_times): # if skip time never matches, return False return False + + +def get_lead_sequence(config, input_dict=None, wildcard_if_empty=False): + """!Get forecast lead list from LEAD_SEQ or compute it from INIT_SEQ. + Restrict list by LEAD_SEQ_[MIN/MAX] if set. Now returns list of relativedelta objects + Args: + @param config METplusConfig object to query config variable values + @param input_dict time dictionary needed to handle using INIT_SEQ. Must contain + valid key if processing INIT_SEQ + @param wildcard_if_empty if no lead sequence was set, return a + list with '*' if this is True, otherwise return a list with 0 + @returns list of relativedelta objects or a list containing 0 if none are found + """ + + out_leads = [] + lead_min, lead_max, no_max = _get_lead_min_max(config) + + # check if LEAD_SEQ, INIT_SEQ, or LEAD_SEQ_ are set + # if more than one is set, report an error and exit + lead_seq = getlist(config.getstr('config', 'LEAD_SEQ', '')) + init_seq = getlistint(config.getstr('config', 'INIT_SEQ', '')) + lead_groups = get_lead_sequence_groups(config) + + if not _are_lead_configs_ok(lead_seq, + init_seq, + lead_groups, + config, + input_dict, + no_max): + return None + + if lead_seq: + # return lead sequence if wildcard characters are used + if lead_seq == ['*']: + return lead_seq + + out_leads = _handle_lead_seq(config, + lead_seq, + lead_min, + lead_max) + + # use INIT_SEQ to build lead list based on the valid time + elif init_seq: + out_leads = _handle_init_seq(init_seq, + input_dict, + lead_min, + lead_max) + elif lead_groups: + out_leads = _handle_lead_groups(lead_groups) + + if not out_leads: + if wildcard_if_empty: + return ['*'] + + return [0] + + return out_leads + +def _are_lead_configs_ok(lead_seq, init_seq, lead_groups, + config, input_dict, no_max): + if lead_groups is None: + return False + + error_message = ('are both listed in the configuration. ' + 'Only one may be used at a time.') + if lead_seq: + if init_seq: + config.logger.error(f'LEAD_SEQ and INIT_SEQ {error_message}') + return False + + if lead_groups: + config.logger.error(f'LEAD_SEQ and LEAD_SEQ_ {error_message}') + return False + + if init_seq and lead_groups: + config.logger.error(f'INIT_SEQ and LEAD_SEQ_ {error_message}') + return False + + if init_seq: + # if input dictionary not passed in, + # cannot compute lead sequence from it, so exit + if input_dict is None: + config.logger.error('Cannot run using INIT_SEQ for this wrapper') + return False + + # if looping by init, fail and exit + if 'valid' not in input_dict.keys(): + log_msg = ('INIT_SEQ specified while looping by init time.' + ' Use LEAD_SEQ or change to loop by valid time') + config.logger.error(log_msg) + return False + + # maximum lead must be specified to run with INIT_SEQ + if no_max: + config.logger.error('LEAD_SEQ_MAX must be set to use INIT_SEQ') + return False + + return True + +def _get_lead_min_max(config): + # remove any items that are outside of the range specified + # by LEAD_SEQ_MIN and LEAD_SEQ_MAX + # convert min and max to relativedelta objects, then use current time + # to compare them to each forecast lead + # this is an approximation because relative time offsets depend on + # each runtime + huge_max = '4000Y' + lead_min_str = config.getstr_nocheck('config', 'LEAD_SEQ_MIN', '0') + lead_max_str = config.getstr_nocheck('config', 'LEAD_SEQ_MAX', huge_max) + no_max = lead_max_str == huge_max + lead_min = get_relativedelta(lead_min_str, 'H') + lead_max = get_relativedelta(lead_max_str, 'H') + return lead_min, lead_max, no_max + +def _handle_lead_seq(config, lead_strings, lead_min=None, lead_max=None): + out_leads = [] + leads = [] + for lead in lead_strings: + relative_delta = get_relativedelta(lead, 'H') + if relative_delta is not None: + leads.append(relative_delta) + else: + config.logger.error(f'Invalid item {lead} in LEAD_SEQ. Exiting.') + return None + + if lead_min is None and lead_max is None: + return leads + + # add current time to leads to approximate month and year length + now_time = datetime.now() + lead_min_approx = now_time + lead_min + lead_max_approx = now_time + lead_max + for lead in leads: + lead_approx = now_time + lead + if lead_approx >= lead_min_approx and lead_approx <= lead_max_approx: + out_leads.append(lead) + + return out_leads + +def _handle_init_seq(init_seq, input_dict, lead_min, lead_max): + out_leads = [] + lead_min_hours = ti_get_hours_from_relativedelta(lead_min) + lead_max_hours = ti_get_hours_from_relativedelta(lead_max) + + valid_hr = int(input_dict['valid'].strftime('%H')) + for init in init_seq: + if valid_hr >= init: + current_lead = valid_hr - init + else: + current_lead = valid_hr + (24 - init) + + while current_lead <= lead_max_hours: + if current_lead >= lead_min_hours: + out_leads.append(get_relativedelta(current_lead, default_unit='H')) + current_lead += 24 + + out_leads = sorted(out_leads, key=lambda + rd: ti_get_seconds_from_relativedelta(rd, input_dict['valid'])) + return out_leads + +def _handle_lead_groups(lead_groups): + """! Read groups of forecast leads and create a list with all unique items + + @param lead_group dictionary where the values are lists of forecast + leads stored as relativedelta objects + @returns list of forecast leads stored as relativedelta objects + """ + out_leads = [] + for _, lead_seq in lead_groups.items(): + for lead in lead_seq: + if lead not in out_leads: + out_leads.append(lead) + + return out_leads + +def get_lead_sequence_groups(config): + # output will be a dictionary where the key will be the + # label specified and the value will be the list of forecast leads + lead_seq_dict = {} + # used in plotting + all_conf = config.keys('config') + indices = [] + regex = re.compile(r"LEAD_SEQ_(\d+)") + for conf in all_conf: + result = regex.match(conf) + if result is not None: + indices.append(result.group(1)) + + # loop over all possible variables and add them to list + for index in indices: + if config.has_option('config', f"LEAD_SEQ_{index}_LABEL"): + label = config.getstr('config', f"LEAD_SEQ_{index}_LABEL") + else: + log_msg = (f'Need to set LEAD_SEQ_{index}_LABEL to describe ' + f'LEAD_SEQ_{index}') + config.logger.error(log_msg) + return None + + # get forecast list for n + lead_string_list = getlist(config.getstr('config', f'LEAD_SEQ_{index}')) + lead_seq = _handle_lead_seq(config, + lead_string_list, + lead_min=None, + lead_max=None) + # add to output dictionary + lead_seq_dict[label] = lead_seq + + return lead_seq_dict diff --git a/metplus/util/time_util.py b/metplus/util/time_util.py index e1bd4b1f93..97e4881567 100755 --- a/metplus/util/time_util.py +++ b/metplus/util/time_util.py @@ -33,6 +33,18 @@ } +def shift_time_seconds(time_str, shift): + """ Adjust time by shift seconds. Format is %Y%m%d%H%M%S + Args: + @param time_str: Start time in %Y%m%d%H%M%S + @param shift: Amount to adjust time in seconds + Returns: + New time in format %Y%m%d%H%M%S + """ + return (datetime.datetime.strptime(time_str, "%Y%m%d%H%M%S") + + datetime.timedelta(seconds=shift)).strftime("%Y%m%d%H%M%S") + + def get_relativedelta(value, default_unit='S'): """!Converts time values ending in Y, m, d, H, M, or S to relativedelta object Args: @@ -483,3 +495,17 @@ def ti_calculate(input_dict_preserve): out_dict['lead_seconds'] = total_seconds return out_dict + + +def add_to_time_input(time_input, clock_time=None, instance=None, custom=None): + if clock_time: + clock_dt = datetime.strptime(clock_time, '%Y%m%d%H%M%S') + time_input['now'] = clock_dt + + # if instance is set, use that value, otherwise use empty string + time_input['instance'] = instance if instance else '' + + # if custom is specified, set it + # otherwise leave it unset so it can be set within the wrapper + if custom: + time_input['custom'] = custom diff --git a/metplus/wrappers/ascii2nc_wrapper.py b/metplus/wrappers/ascii2nc_wrapper.py index 7555c008be..02a06fd65e 100755 --- a/metplus/wrappers/ascii2nc_wrapper.py +++ b/metplus/wrappers/ascii2nc_wrapper.py @@ -12,10 +12,9 @@ import os -from ..util import met_util as util from ..util import time_util from . import CommandBuilder -from ..util import do_string_sub, skip_time +from ..util import do_string_sub, skip_time, get_lead_sequence '''!@namespace ASCII2NCWrapper @brief Wraps the ASCII2NC tool to reformat ascii format to NetCDF @@ -242,7 +241,7 @@ def run_at_time(self, input_dict): Args: @param input_dict dictionary containing timing information """ - lead_seq = util.get_lead_sequence(self.config, input_dict) + lead_seq = get_lead_sequence(self.config, input_dict) for lead in lead_seq: self.clear() input_dict['lead'] = lead diff --git a/metplus/wrappers/command_builder.py b/metplus/wrappers/command_builder.py index 1a6ec50d73..43e424cea7 100755 --- a/metplus/wrappers/command_builder.py +++ b/metplus/wrappers/command_builder.py @@ -19,16 +19,15 @@ from .command_runner import CommandRunner from ..util.constants import PYTHON_EMBEDDING_TYPES -from ..util import getlist -from ..util import met_util as util +from ..util import getlist, preprocess_file, loop_over_times_and_call from ..util import do_string_sub, ti_calculate, get_seconds_from_string -from ..util import get_time_from_file +from ..util import get_time_from_file, shift_time_seconds from ..util import config_metplus from ..util import METConfig from ..util import MISSING_DATA_VALUE from ..util import get_custom_string_list from ..util import get_wrapped_met_config_file, add_met_config_item, format_met_config -from ..util import remove_quotes +from ..util import remove_quotes, split_level from ..util import get_field_info, format_field_info from ..util import get_wrapper_name, is_python_script from ..util.met_config import add_met_config_dict, handle_climo_dict @@ -533,7 +532,7 @@ def find_data(self, time_info, var_info=None, data_type='', mandatory=True, # separate character from beginning of numeric # level value if applicable - level = util.split_level(v_level)[1] + level = split_level(v_level)[1] # set level to 0 character if it is not a number if not level.isdigit(): @@ -660,10 +659,10 @@ def find_exact_file(self, level, data_type, time_info, mandatory=True, # check if file exists input_data_type = self.c_dict.get(data_type + 'INPUT_DATATYPE', '') - processed_path = util.preprocess_file(file_path, - input_data_type, - self.config, - allow_dir=allow_dir) + processed_path = preprocess_file(file_path, + input_data_type, + self.config, + allow_dir=allow_dir) # report error if file path could not be found if not processed_path: @@ -706,9 +705,9 @@ def find_file_in_window(self, level, data_type, time_info, mandatory=True, # get range of times that will be considered valid_range_lower = self.c_dict.get(data_type + 'FILE_WINDOW_BEGIN', 0) valid_range_upper = self.c_dict.get(data_type + 'FILE_WINDOW_END', 0) - lower_limit = int(datetime.strptime(util.shift_time_seconds(valid_time, valid_range_lower), + lower_limit = int(datetime.strptime(shift_time_seconds(valid_time, valid_range_lower), "%Y%m%d%H%M%S").strftime("%s")) - upper_limit = int(datetime.strptime(util.shift_time_seconds(valid_time, valid_range_upper), + upper_limit = int(datetime.strptime(shift_time_seconds(valid_time, valid_range_upper), "%Y%m%d%H%M%S").strftime("%s")) msg = f"Looking for {data_type}INPUT files under {data_dir} within range " +\ @@ -767,16 +766,16 @@ def find_file_in_window(self, level, data_type, time_info, mandatory=True, # check if file(s) needs to be preprocessed before returning the path # if one file was found and return_list if False, return single file if len(closest_files) == 1 and not return_list: - return util.preprocess_file(closest_files[0], - self.c_dict.get(data_type + 'INPUT_DATATYPE', ''), - self.config) + return preprocess_file(closest_files[0], + self.c_dict.get(data_type + 'INPUT_DATATYPE', ''), + self.config) # return list if multiple files are found out = [] for close_file in closest_files: - outfile = util.preprocess_file(close_file, - self.c_dict.get(data_type + 'INPUT_DATATYPE', ''), - self.config) + outfile = preprocess_file(close_file, + self.c_dict.get(data_type + 'INPUT_DATATYPE', ''), + self.config) out.append(outfile) return out @@ -1284,7 +1283,7 @@ def run_all_times(self, custom=None): @param custom (optional) custom loop string value """ - return util.loop_over_times_and_call(self.config, self, custom=custom) + return loop_over_times_and_call(self.config, self, custom=custom) @staticmethod def format_met_config_dict(c_dict, name, keys=None): diff --git a/metplus/wrappers/cyclone_plotter_wrapper.py b/metplus/wrappers/cyclone_plotter_wrapper.py index 787d896ab7..e6ceda8be1 100644 --- a/metplus/wrappers/cyclone_plotter_wrapper.py +++ b/metplus/wrappers/cyclone_plotter_wrapper.py @@ -37,10 +37,9 @@ WRAPPER_CANNOT_RUN = True EXCEPTION_ERR = err_msg -from ..util import met_util as util from ..util import do_string_sub from ..util import time_generator, add_to_time_input -from ..util import mkdir_p +from ..util import mkdir_p, get_files from . import CommandBuilder @@ -195,8 +194,7 @@ def retrieve_data(self): self.logger.debug("Get data from all files in the directory " + self.input_data) # Get the list of all files (full file path) in this directory - all_input_files = util.get_files(self.input_data, ".*.tcst", - self.logger) + all_input_files = get_files(self.input_data, ".*.tcst", self.logger) # read each file into pandas then concatenate them together df_list = [pd.read_csv(file, delim_whitespace=True) for file in all_input_files] diff --git a/metplus/wrappers/ensemble_stat_wrapper.py b/metplus/wrappers/ensemble_stat_wrapper.py index e31ff82679..aa392e9b58 100755 --- a/metplus/wrappers/ensemble_stat_wrapper.py +++ b/metplus/wrappers/ensemble_stat_wrapper.py @@ -13,10 +13,9 @@ import os import glob -from ..util import met_util as util +from ..util import sub_var_list +from ..util import do_string_sub, parse_var_list, PYTHON_EMBEDDING_TYPES from . import CompareGriddedWrapper -from ..util import do_string_sub -from ..util import parse_var_list """!@namespace EnsembleStatWrapper @brief Wraps the MET tool ensemble_stat to compare ensemble datasets @@ -136,8 +135,8 @@ def create_c_dict(self): # check if more than 1 obs datatype is set to python embedding, # only one can be used - if (c_dict['OBS_POINT_INPUT_DATATYPE'] in util.PYTHON_EMBEDDING_TYPES and - c_dict['OBS_GRID_INPUT_DATATYPE'] in util.PYTHON_EMBEDDING_TYPES): + if (c_dict['OBS_POINT_INPUT_DATATYPE'] in PYTHON_EMBEDDING_TYPES and + c_dict['OBS_GRID_INPUT_DATATYPE'] in PYTHON_EMBEDDING_TYPES): self.log_error("Both OBS_ENSEMBLE_STAT_INPUT_POINT_DATATYPE and " "OBS_ENSEMBLE_STAT_INPUT_GRID_DATATYPE" " are set to Python Embedding types. " @@ -145,9 +144,9 @@ def create_c_dict(self): # if either are set, set OBS_INPUT_DATATYPE to that value so # it can be found by the check_for_python_embedding function - elif c_dict['OBS_POINT_INPUT_DATATYPE'] in util.PYTHON_EMBEDDING_TYPES: + elif c_dict['OBS_POINT_INPUT_DATATYPE'] in PYTHON_EMBEDDING_TYPES: c_dict['OBS_INPUT_DATATYPE'] = c_dict['OBS_POINT_INPUT_DATATYPE'] - elif c_dict['OBS_GRID_INPUT_DATATYPE'] in util.PYTHON_EMBEDDING_TYPES: + elif c_dict['OBS_GRID_INPUT_DATATYPE'] in PYTHON_EMBEDDING_TYPES: c_dict['OBS_INPUT_DATATYPE'] = c_dict['OBS_GRID_INPUT_DATATYPE'] c_dict['N_MEMBERS'] = ( @@ -424,8 +423,7 @@ def run_at_time_all_fields(self, time_info): return # parse optional var list for FCST and/or OBS fields - var_list = util.sub_var_list(self.c_dict['VAR_LIST_TEMP'], - time_info) + var_list = sub_var_list(self.c_dict['VAR_LIST_TEMP'], time_info) # if empty var list for FCST/OBS, use None as first var, # else use first var in list diff --git a/metplus/wrappers/extract_tiles_wrapper.py b/metplus/wrappers/extract_tiles_wrapper.py index 66ea168b77..ed11b38356 100755 --- a/metplus/wrappers/extract_tiles_wrapper.py +++ b/metplus/wrappers/extract_tiles_wrapper.py @@ -13,8 +13,8 @@ from datetime import datetime import re -from ..util import met_util as util from ..util import do_string_sub, ti_calculate, skip_time +from ..util import get_lead_sequence, sub_var_list from ..util import parse_var_list, round_0p5, get_storms, prune_empty from .regrid_data_plane_wrapper import RegridDataPlaneWrapper from . import CommandBuilder @@ -206,7 +206,7 @@ def run_at_time(self, input_dict): """ # loop of forecast leads and process each - lead_seq = util.get_lead_sequence(self.config, input_dict) + lead_seq = get_lead_sequence(self.config, input_dict) for lead in lead_seq: input_dict['lead'] = lead @@ -383,8 +383,7 @@ def get_object_indices(object_cats): def call_regrid_data_plane(self, time_info, track_data, input_type): # set var list from config using time info - var_list = util.sub_var_list(self.c_dict['VAR_LIST_TEMP'], - time_info) + var_list = sub_var_list(self.c_dict['VAR_LIST_TEMP'], time_info) for data_type in ['FCST', 'OBS']: grid = self.get_grid(data_type, track_data[data_type], diff --git a/metplus/wrappers/gempak_to_cf_wrapper.py b/metplus/wrappers/gempak_to_cf_wrapper.py index 22421ef9c5..53a5a5cb71 100755 --- a/metplus/wrappers/gempak_to_cf_wrapper.py +++ b/metplus/wrappers/gempak_to_cf_wrapper.py @@ -12,8 +12,7 @@ import os -from ..util import met_util as util -from ..util import do_string_sub, skip_time +from ..util import do_string_sub, skip_time, get_lead_sequence from ..util import time_util from . import CommandBuilder @@ -75,7 +74,7 @@ def run_at_time(self, input_dict): Args: @param input_dict dictionary containing timing information """ - lead_seq = util.get_lead_sequence(self.config, input_dict) + lead_seq = get_lead_sequence(self.config, input_dict) for lead in lead_seq: self.clear() input_dict['lead'] = lead diff --git a/metplus/wrappers/grid_diag_wrapper.py b/metplus/wrappers/grid_diag_wrapper.py index d44026af25..eb1c5e98bc 100755 --- a/metplus/wrappers/grid_diag_wrapper.py +++ b/metplus/wrappers/grid_diag_wrapper.py @@ -12,11 +12,9 @@ import os -from ..util import met_util as util from ..util import time_util from . import RuntimeFreqWrapper -from ..util import do_string_sub -from ..util import parse_var_list +from ..util import do_string_sub, parse_var_list, sub_var_list '''!@namespace GridDiagWrapper @brief Wraps the Grid-Diag tool @@ -187,7 +185,7 @@ def set_data_field(self, time_info): @param time_info time dictionary to use for string substitution @returns True if field list could be built, False if not. """ - field_list = util.sub_var_list(self.c_dict['VAR_LIST_TEMP'], time_info) + field_list = sub_var_list(self.c_dict['VAR_LIST_TEMP'], time_info) if not field_list: self.log_error("Could not get field information from config.") return False diff --git a/metplus/wrappers/grid_stat_wrapper.py b/metplus/wrappers/grid_stat_wrapper.py index ad7650c0f9..379e6a6282 100755 --- a/metplus/wrappers/grid_stat_wrapper.py +++ b/metplus/wrappers/grid_stat_wrapper.py @@ -12,7 +12,6 @@ import os -from ..util import met_util as util from . import CompareGriddedWrapper # pylint:disable=pointless-string-statement diff --git a/metplus/wrappers/mode_wrapper.py b/metplus/wrappers/mode_wrapper.py index bec9f67cf2..1a539ea021 100755 --- a/metplus/wrappers/mode_wrapper.py +++ b/metplus/wrappers/mode_wrapper.py @@ -12,7 +12,6 @@ import os -from ..util import met_util as util from . import CompareGriddedWrapper from ..util import do_string_sub diff --git a/metplus/wrappers/mtd_wrapper.py b/metplus/wrappers/mtd_wrapper.py index 1d8d9c9327..217427badc 100755 --- a/metplus/wrappers/mtd_wrapper.py +++ b/metplus/wrappers/mtd_wrapper.py @@ -12,8 +12,8 @@ import os -from ..util import met_util as util -from ..util import time_util +from ..util import get_lead_sequence, sub_var_list +from ..util import ti_calculate from ..util import do_string_sub, skip_time from ..util import parse_var_list from . import CompareGriddedWrapper @@ -197,8 +197,7 @@ def run_at_time_loop_string(self, input_dict): Args: @param input_dict dictionary containing timing information """ - var_list = util.sub_var_list(self.c_dict['VAR_LIST_TEMP'], - input_dict) + var_list = sub_var_list(self.c_dict['VAR_LIST_TEMP'], input_dict) # if only processing a single data set (FCST or OBS) then only read # that var list and process @@ -219,7 +218,7 @@ def run_at_time_loop_string(self, input_dict): for var_info in var_list: if self.c_dict.get('EXPLICIT_FILE_LIST', False): - time_info = time_util.ti_calculate(input_dict) + time_info = ti_calculate(input_dict) model_list_path = do_string_sub(self.c_dict['FCST_FILE_LIST'], **time_info) self.logger.debug(f"Explicit FCST file: {model_list_path}") @@ -246,13 +245,13 @@ def run_at_time_loop_string(self, input_dict): obs_list = [] # find files for each forecast lead time - lead_seq = util.get_lead_sequence(self.config, input_dict) + lead_seq = get_lead_sequence(self.config, input_dict) tasks = [] for lead in lead_seq: input_dict['lead'] = lead - time_info = time_util.ti_calculate(input_dict) + time_info = ti_calculate(input_dict) tasks.append(time_info) for current_task in tasks: @@ -282,7 +281,7 @@ def run_at_time_loop_string(self, input_dict): # write ascii file with list of files to process input_dict['lead'] = lead_seq[0] - time_info = time_util.ti_calculate(input_dict) + time_info = ti_calculate(input_dict) # if var name is a python embedding script, check type of python # input and name file list file accordingly @@ -313,7 +312,7 @@ def run_single_mode(self, input_dict, var_info): data_src = self.c_dict.get('SINGLE_DATA_SRC') if self.c_dict.get('EXPLICIT_FILE_LIST', False): - time_info = time_util.ti_calculate(input_dict) + time_info = ti_calculate(input_dict) single_list_path = do_string_sub( self.c_dict[f'{data_src}_FILE_LIST'], **time_info @@ -330,10 +329,10 @@ def run_single_mode(self, input_dict, var_info): else: find_method = self.find_model - lead_seq = util.get_lead_sequence(self.config, input_dict) + lead_seq = get_lead_sequence(self.config, input_dict) for lead in lead_seq: input_dict['lead'] = lead - current_task = time_util.ti_calculate(input_dict) + current_task = ti_calculate(input_dict) single_file = find_method(current_task, var_info) if single_file is None: @@ -346,7 +345,7 @@ def run_single_mode(self, input_dict, var_info): # write ascii file with list of files to process input_dict['lead'] = lead_seq[0] - time_info = time_util.ti_calculate(input_dict) + time_info = ti_calculate(input_dict) file_ext = self.check_for_python_embedding(data_src, var_info) if not file_ext: return diff --git a/metplus/wrappers/pb2nc_wrapper.py b/metplus/wrappers/pb2nc_wrapper.py index f85ea70d76..fff7783f79 100755 --- a/metplus/wrappers/pb2nc_wrapper.py +++ b/metplus/wrappers/pb2nc_wrapper.py @@ -13,9 +13,8 @@ import os import re -from ..util import getlistint, skip_time -from ..util import met_util as util -from ..util import time_util +from ..util import getlistint, skip_time, get_lead_sequence +from ..util import ti_calculate from ..util import do_string_sub from . import CommandBuilder @@ -258,11 +257,11 @@ def set_valid_window_variables(self, time_info): def run_at_time(self, input_dict): """! Loop over each forecast lead and build pb2nc command """ # loop of forecast leads and process each - lead_seq = util.get_lead_sequence(self.config, input_dict) + lead_seq = get_lead_sequence(self.config, input_dict) for lead in lead_seq: input_dict['lead'] = lead - lead_string = time_util.ti_calculate(input_dict)['lead_string'] + lead_string = ti_calculate(input_dict)['lead_string'] self.logger.info("Processing forecast lead {}".format(lead_string)) for custom_string in self.c_dict['CUSTOM_LOOP_LIST']: diff --git a/metplus/wrappers/pcp_combine_wrapper.py b/metplus/wrappers/pcp_combine_wrapper.py index f8b8a56c2d..f87b07fadf 100755 --- a/metplus/wrappers/pcp_combine_wrapper.py +++ b/metplus/wrappers/pcp_combine_wrapper.py @@ -7,12 +7,11 @@ import os from datetime import timedelta -from ..util import met_util as util -from ..util import do_string_sub, getlist +from ..util import do_string_sub, getlist, preprocess_file from ..util import get_seconds_from_string, ti_get_lead_string, ti_calculate from ..util import get_relativedelta, ti_get_seconds_from_relativedelta from ..util import time_string_to_met_time, seconds_to_met_time -from ..util import parse_var_list, template_to_regex +from ..util import parse_var_list, template_to_regex, split_level from . import ReformatGriddedWrapper '''!@namespace PCPCombineWrapper @@ -348,9 +347,9 @@ def setup_subtract_method(self, time_info, accum, data_src): # get first file filepath1 = do_string_sub(full_template, **time_info) - file1 = util.preprocess_file(filepath1, - self.c_dict[data_src+'_INPUT_DATATYPE'], - self.config) + file1 = preprocess_file(filepath1, + self.c_dict[data_src+'_INPUT_DATATYPE'], + self.config) if file1 is None: self.log_error(f'Could not find {data_src} file {filepath1} ' @@ -394,9 +393,9 @@ def setup_subtract_method(self, time_info, accum, data_src): time_info2['custom'] = time_info.get('custom', '') filepath2 = do_string_sub(full_template, **time_info2) - file2 = util.preprocess_file(filepath2, - self.c_dict[data_src+'_INPUT_DATATYPE'], - self.config) + file2 = preprocess_file(filepath2, + self.c_dict[data_src+'_INPUT_DATATYPE'], + self.config) if file2 is None: self.log_error(f'Could not find {data_src} file {filepath2} ' @@ -611,7 +610,7 @@ def _get_lookback_seconds(self, time_info, var_info, data_src): else: lookback = '0' - _, lookback = util.split_level(lookback) + _, lookback = split_level(lookback) lookback_seconds = get_seconds_from_string( lookback, @@ -791,7 +790,7 @@ def get_lowest_fcst_file(self, valid_time, data_src): search_file = do_string_sub(search_file, **time_info) self.logger.debug(f"Looking for {search_file}") - search_file = util.preprocess_file( + search_file = preprocess_file( search_file, self.c_dict[data_src+'_INPUT_DATATYPE'], self.config) @@ -847,9 +846,9 @@ def find_input_file(self, init_time, valid_time, search_accum, data_src): in_template) input_path = do_string_sub(input_path, **time_info) - return util.preprocess_file(input_path, - self.c_dict[f'{data_src}_INPUT_DATATYPE'], - self.config), lead + return preprocess_file(input_path, + self.c_dict[f'{data_src}_INPUT_DATATYPE'], + self.config), lead def get_template_accum(self, accum_dict, search_time, lead, data_src): # apply string substitution to accum amount diff --git a/metplus/wrappers/plot_data_plane_wrapper.py b/metplus/wrappers/plot_data_plane_wrapper.py index ed2e59eb03..4f874c42f1 100755 --- a/metplus/wrappers/plot_data_plane_wrapper.py +++ b/metplus/wrappers/plot_data_plane_wrapper.py @@ -12,10 +12,9 @@ import os -from ..util import met_util as util from ..util import time_util from . import CommandBuilder -from ..util import do_string_sub, remove_quotes, skip_time +from ..util import do_string_sub, remove_quotes, skip_time, get_lead_sequence '''!@namespace PlotDataPlaneWrapper @brief Wraps the PlotDataPlane tool to plot data @@ -115,7 +114,7 @@ def run_at_time(self, input_dict): Args: @param input_dict dictionary containing timing information """ - lead_seq = util.get_lead_sequence(self.config, input_dict) + lead_seq = get_lead_sequence(self.config, input_dict) for lead in lead_seq: self.clear() input_dict['lead'] = lead diff --git a/metplus/wrappers/point_stat_wrapper.py b/metplus/wrappers/point_stat_wrapper.py index 9f5a1645c2..3115b0c80e 100755 --- a/metplus/wrappers/point_stat_wrapper.py +++ b/metplus/wrappers/point_stat_wrapper.py @@ -13,7 +13,6 @@ import os from ..util import getlistint -from ..util import met_util as util from ..util import time_util from ..util import do_string_sub from . import CompareGriddedWrapper diff --git a/metplus/wrappers/py_embed_ingest_wrapper.py b/metplus/wrappers/py_embed_ingest_wrapper.py index c59847deda..accd8ad1fa 100755 --- a/metplus/wrappers/py_embed_ingest_wrapper.py +++ b/metplus/wrappers/py_embed_ingest_wrapper.py @@ -13,11 +13,10 @@ import os import re -from ..util import met_util as util from ..util import time_util from . import CommandBuilder from . import RegridDataPlaneWrapper -from ..util import do_string_sub +from ..util import do_string_sub, get_lead_sequence VALID_PYTHON_EMBED_TYPES = ['NUMPY', 'XARRAY', 'PANDAS'] @@ -132,7 +131,7 @@ def run_at_time(self, input_dict): generally contains 'now' (current) time and 'init' or 'valid' time """ # get forecast leads to loop over - lead_seq = util.get_lead_sequence(self.config, input_dict) + lead_seq = get_lead_sequence(self.config, input_dict) for lead in lead_seq: # set forecast lead time in hours diff --git a/metplus/wrappers/reformat_gridded_wrapper.py b/metplus/wrappers/reformat_gridded_wrapper.py index 182d342685..9acb458595 100755 --- a/metplus/wrappers/reformat_gridded_wrapper.py +++ b/metplus/wrappers/reformat_gridded_wrapper.py @@ -12,7 +12,7 @@ import os -from ..util import met_util as util +from ..util import get_lead_sequence, sub_var_list from ..util import time_util, skip_time from . import CommandBuilder @@ -52,7 +52,7 @@ def run_at_time(self, input_dict): """ app_name_caps = self.app_name.upper() class_name = self.__class__.__name__[0: -7] - lead_seq = util.get_lead_sequence(self.config, input_dict) + lead_seq = get_lead_sequence(self.config, input_dict) run_list = [] if self.config.getbool('config', 'FCST_'+app_name_caps+'_RUN', False): @@ -93,8 +93,8 @@ def run_at_time(self, input_dict): self.c_dict['CUSTOM_STRING'] = custom_string var_list_name = f'VAR_LIST_{to_run}' var_list = ( - util.sub_var_list(self.c_dict.get(var_list_name, ''), - time_info) + sub_var_list(self.c_dict.get(var_list_name, ''), + time_info) ) if not var_list: var_list = None diff --git a/metplus/wrappers/regrid_data_plane_wrapper.py b/metplus/wrappers/regrid_data_plane_wrapper.py index 1cf8de3142..c58ebfc7c2 100755 --- a/metplus/wrappers/regrid_data_plane_wrapper.py +++ b/metplus/wrappers/regrid_data_plane_wrapper.py @@ -12,12 +12,11 @@ import os -from ..util import met_util as util from ..util import time_util from ..util import do_string_sub from ..util import parse_var_list from ..util import get_process_list -from ..util import remove_quotes +from ..util import remove_quotes, split_level, format_level from . import ReformatGriddedWrapper # pylint:disable=pointless-string-statement @@ -173,7 +172,7 @@ def handle_output_file(self, time_info, field_info, data_type): @returns True if command should be run, False if it should not be run """ - _, level = util.split_level(field_info[f'{data_type.lower()}_level']) + _, level = split_level(field_info[f'{data_type.lower()}_level']) time_info['level'] = time_util.get_seconds_from_string(level, 'H') return self.find_and_check_output_file(time_info) @@ -255,7 +254,7 @@ def get_output_names(self, var_list, data_type): for field_info in var_list: input_name = field_info[f'{data_type.lower()}_name'] input_level = field_info[f'{data_type.lower()}_level'] - input_level = util.format_level(input_level) + input_level = format_level(input_level) output_name = f"{input_name}_{input_level}" output_names.append(output_name) diff --git a/metplus/wrappers/tc_gen_wrapper.py b/metplus/wrappers/tc_gen_wrapper.py index 7d66b626d9..bec1e1a567 100755 --- a/metplus/wrappers/tc_gen_wrapper.py +++ b/metplus/wrappers/tc_gen_wrapper.py @@ -14,9 +14,8 @@ import datetime import re -from ..util import met_util as util from ..util import time_util -from ..util import do_string_sub, skip_time +from ..util import do_string_sub, skip_time, get_lead_sequence from ..util import time_generator from . import CommandBuilder @@ -426,7 +425,7 @@ def find_input_files(self, time_info): ) # set METPLUS_LEAD_LIST to list of forecast leads used - lead_seq = util.get_lead_sequence(self.config, time_info) + lead_seq = get_lead_sequence(self.config, time_info) if lead_seq != [0]: lead_list = [] for lead in lead_seq: diff --git a/metplus/wrappers/tcrmw_wrapper.py b/metplus/wrappers/tcrmw_wrapper.py index a86da67083..12a4cad6a2 100755 --- a/metplus/wrappers/tcrmw_wrapper.py +++ b/metplus/wrappers/tcrmw_wrapper.py @@ -12,11 +12,10 @@ import os -from ..util import met_util as util from ..util import time_util from . import CommandBuilder -from ..util import do_string_sub, skip_time -from ..util import parse_var_list +from ..util import do_string_sub, skip_time, get_lead_sequence +from ..util import parse_var_list, sub_var_list '''!@namespace TCRMWWrapper @brief Wraps the TC-RMW tool @@ -258,8 +257,7 @@ def set_data_field(self, time_info): @param time_info time dictionary to use for string substitution @returns True if field list could be built, False if not. """ - field_list = util.sub_var_list(self.c_dict['VAR_LIST_TEMP'], - time_info) + field_list = sub_var_list(self.c_dict['VAR_LIST_TEMP'], time_info) if not field_list: self.log_error("Could not get field information from config.") return False @@ -293,7 +291,7 @@ def find_input_files(self, time_info): self.c_dict['DECK_FILE'] = deck_file - lead_seq = util.get_lead_sequence(self.config, time_info) + lead_seq = get_lead_sequence(self.config, time_info) # get input files if self.c_dict['INPUT_FILE_LIST']: diff --git a/metplus/wrappers/usage_wrapper.py b/metplus/wrappers/usage_wrapper.py index d3fb8cf852..77c26b5758 100644 --- a/metplus/wrappers/usage_wrapper.py +++ b/metplus/wrappers/usage_wrapper.py @@ -13,7 +13,7 @@ class UsageWrapper(CommandBuilder): def __init__(self, config, instance=None): self.app_name = 'Usage' super().__init__(config, instance=instance) - # get unique list of processes from met_util + # get unique list of processes self.available_processes = list(set(val for val in LOWER_TO_WRAPPER_NAME.values())) self.available_processes.sort() diff --git a/metplus/wrappers/user_script_wrapper.py b/metplus/wrappers/user_script_wrapper.py index 50384c0190..32e50ac385 100755 --- a/metplus/wrappers/user_script_wrapper.py +++ b/metplus/wrappers/user_script_wrapper.py @@ -13,7 +13,6 @@ import os from datetime import datetime -from ..util import met_util as util from ..util import time_util from . import RuntimeFreqWrapper from ..util import do_string_sub