From b24b9386fa21261457aaa45081af6c4176c12f28 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 30 Jul 2024 18:06:16 -0600 Subject: [PATCH] Update develop-ref after dtcenter/MET#2939 (#2941) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 2673 Moved dvariable declaration after include * #2673 Move down namespace below include * Feature #2395 wdir (#2820) * Per #2395, add new columns to VL1L2, VAL1L2, and VCNT line types for wind direction statistics. Work still in progress. * Per #2395, write the new VCNT columns to the output and document the additions to the VL1L2, VAL1L2, and VCNT columns. * Per #2395, add the definition of new statistics to Appendix G. * Per #2395, update file version history. * Per #2395, tweak warning message about zero wind vectors and update grid-stat and point-stat to log calls to the do_vl1l2() function. * Per #2395, refine the weights for wind direction stats, ignoring the undefined directions. * Update src/tools/core/stat_analysis/aggr_stat_line.cc * Update src/tools/core/stat_analysis/parse_stat_line.cc * Update src/tools/core/stat_analysis/aggr_stat_line.cc * Recent changes to branch protection rules for the develop branch have broken the logic of the update_truth.yml GHA workflow. Instead of submitting a PR to merge develop into develop-ref directly, use an intermediate update_truth_for_develop branch. * Feature #2280 ens_prob (#2823) * Per #2280, update to support probability threshold strings like ==8, where 8 is the number of ensemble members, to create probability bins centered on the n/8 for n = 0 ... 8. * Per #2280, update docs about probability threshold settings. * Per #2280, use a loose tolerance when checking for consistent bin widths. * Per #2280, add a new unit test for grid_stat to demonstrate processing the output from gen_ens_prod. * Per #2280, when verifying NMEP probability forecasts, smooth the obs data first. * Per #2280, only request STAT output for the PCT line type to match unit_grid_stat.xml and minimize the new output files. * Per #2280, update config option docs. * Per #2280, update config option docs. * #2673 Change 0 to nullptr * #2673 Change 0 to nullptr * #2673 Change 0 to nullptr * #2673 Change 0 to nullptr * #2673 Change 0 to nullptr * #2673 Removed the redundant parentheses with return * #2673 Removed the redundant parentheses with return * #2673 Removed the redundant parentheses with return * #2673 Removed the redundant parentheses with return * #2673 Removed the redundant parentheses with return * #2673 restored return statement * #2673 Added std namespace * #2673 Moved down 'using namespace' statement. Removed trailing spaces * #2673 Moved down 'using namespace' statement. * #2673 Moved down 'using namespace' statement. * #2673 Moved down 'using namespace' statement. * #2673 Moved down 'using namespace' statement. * #2673 Added std namespace * #2673 Added std namespace * #2673 Added std namespace * #2673 Changed literal 1 to boolean value, true * Feature #2673 enum_to_string (#2835) * Feature #2583 ecnt (#2825) * Unrelated to #2583, fix typo in code comments. * Per #2583, add hooks write 3 new ECNT columns for observation error data. * Per #2583, make error messages about mis-matched array lengths more informative. * Per #2583, switch to more concise variable naming conventions of ign_oerr_cnv, ign_oerr_cor, and dawid_seb. * Per #2583, fix typo to enable compilation * Per #2583, define the 5 new ECNT column names. * Per #2583, add 5 new columns to the ECNT table in the Ensemble-Stat chapter * Per #2583, update stat_columns.cc to write these 5 new ECNT columns * Per #2583, update ECNTInfo class to compute the 5 new ECNT statistics. * Per #2583, update stat-analysis to parse the 5 new ECNT columns. * Per #2583, update aggregate_stat logic for 5 new ECNT columns. * Per #2583, update PairDataEnsemble logic for 5 new ECNT columns * Per #2583, update vx_statistics library with obs_error handling logic for the 5 new ECNT columns * Per #2583, changes to make it compile * Per #2583, changes to make it compile * Per #2583, switch to a consistent ECNT column naming convention with OERR at the end. Using IGN_CONV_OERR and IGN_CORR_OERR. * Per #2583, define ObsErrorEntry::variance() with a call to the dist_var() utility function. * Per #2583, update PairDataEnsemble::compute_pair_vals() to compute the 5 new stats with the correct inputs. * Per #2583, add DEBUG(10) log messages about computing these new stats. * Per #2583, update Stat-Analysis to compute these 5 new stats from the ORANK line type. * Per #2583, whitespace and comments. * Per #2583, update the User's Guide. * Per #2583, remove the DS_ADD_OERR and DS_MULT_OERR ECNT columns and rename DS_OERR as DSS, since observation error is not actually involved in its computation. * Per #2583, minor update to Appendix C * Per #2583, rename ECNT line type statistic DSS to IDSS. * Per #2583, fix a couple of typos * Per #2583, more error checking. * Per #2583, remove the ECNT IDSS column since its just 2*pi*IGN, the existing ignorance score, and only provides meaningful information when combined with the other Dawid-Sebastiani statistics that have already been removed. * Per #2583, add Eric's documentation of these new stats to Appendix C. Along the way, update the DOI links in the references based on this APA style guide: https://apastyle.apa.org/style-grammar-guidelines/references/dois-urls#:~:text=Include%20a%20DOI%20for%20all,URL%2C%20include%20only%20the%20DOI. * Per #2583, fix new equations with embedded underscores for PDF by defining both html and pdf formatting options. * Per #2583, update the ign_conv_oerr equation to include a 2 *pi multiplier for consistency with the existing ignorance score. Also, fix the documented equations. * Per #2583, remove log file that was inadvertently added on this branch. * Per #2583, simplify ObsErrorEntry::variance() implementation. For the distribution type of NONE, return a variance of 0.0 rather than bad data, as discussed with @michelleharrold and @JeffBeck-NOAA on 3/8/2024. --------- Co-authored-by: MET Tools Test Account * Revert #2825 since more documentation and testing is needed (#2837) This reverts commit 108a8958b206d6712197823a083666ab039bf818. * Feature #2583 ecnt fix IGN_OERR_CORR (#2838) * Unrelated to #2583, fix typo in code comments. * Per #2583, add hooks write 3 new ECNT columns for observation error data. * Per #2583, make error messages about mis-matched array lengths more informative. * Per #2583, switch to more concise variable naming conventions of ign_oerr_cnv, ign_oerr_cor, and dawid_seb. * Per #2583, fix typo to enable compilation * Per #2583, define the 5 new ECNT column names. * Per #2583, add 5 new columns to the ECNT table in the Ensemble-Stat chapter * Per #2583, update stat_columns.cc to write these 5 new ECNT columns * Per #2583, update ECNTInfo class to compute the 5 new ECNT statistics. * Per #2583, update stat-analysis to parse the 5 new ECNT columns. * Per #2583, update aggregate_stat logic for 5 new ECNT columns. * Per #2583, update PairDataEnsemble logic for 5 new ECNT columns * Per #2583, update vx_statistics library with obs_error handling logic for the 5 new ECNT columns * Per #2583, changes to make it compile * Per #2583, changes to make it compile * Per #2583, switch to a consistent ECNT column naming convention with OERR at the end. Using IGN_CONV_OERR and IGN_CORR_OERR. * Per #2583, define ObsErrorEntry::variance() with a call to the dist_var() utility function. * Per #2583, update PairDataEnsemble::compute_pair_vals() to compute the 5 new stats with the correct inputs. * Per #2583, add DEBUG(10) log messages about computing these new stats. * Per #2583, update Stat-Analysis to compute these 5 new stats from the ORANK line type. * Per #2583, whitespace and comments. * Per #2583, update the User's Guide. * Per #2583, remove the DS_ADD_OERR and DS_MULT_OERR ECNT columns and rename DS_OERR as DSS, since observation error is not actually involved in its computation. * Per #2583, minor update to Appendix C * Per #2583, rename ECNT line type statistic DSS to IDSS. * Per #2583, fix a couple of typos * Per #2583, more error checking. * Per #2583, remove the ECNT IDSS column since its just 2*pi*IGN, the existing ignorance score, and only provides meaningful information when combined with the other Dawid-Sebastiani statistics that have already been removed. * Per #2583, add Eric's documentation of these new stats to Appendix C. Along the way, update the DOI links in the references based on this APA style guide: https://apastyle.apa.org/style-grammar-guidelines/references/dois-urls#:~:text=Include%20a%20DOI%20for%20all,URL%2C%20include%20only%20the%20DOI. * Per #2583, fix new equations with embedded underscores for PDF by defining both html and pdf formatting options. * Per #2583, update the ign_conv_oerr equation to include a 2 *pi multiplier for consistency with the existing ignorance score. Also, fix the documented equations. * Per #2583, remove log file that was inadvertently added on this branch. * Per #2583, simplify ObsErrorEntry::variance() implementation. For the distribution type of NONE, return a variance of 0.0 rather than bad data, as discussed with @michelleharrold and @JeffBeck-NOAA on 3/8/2024. * Per #2583, updates to ensemble-stat.rst recommended by @michelleharrold and @JeffBeck-NOAA. * Per #2583, implement changes to the IGN_CORR_OERR corrected as directed by @ericgilleland. --------- Co-authored-by: MET Tools Test Account * Update the pull request template to include a question about expected impacts to existing METplus Use Cases. * #2830 Changed enum Builtin to enum class * #2830 Converted enum to enum class at config_constants.h * Feature #2830 bootstrap enum (#2843) * Bugfix #2833 develop azimuth (#2840) * Per #2833, fix n-1 bug when defining the azimuth delta for range/azimuth grids. * Per #2833, when definng TcrmwData:range_max_km, divide by n_range - 1 since the range values start at 0. * Per #2833, remove max_range_km from the TC-RMW config file. Set the default rmw_scale to NA so that its not used by default. And update the documentation. Still actually need to make the logic of the code work as it should. * Per #2833, update tc_rmw to define the range as either a function of rmw or using explicit spacing in km. * Per #2833, update the TCRMW Config files to remove the max_range_km entry, and update the unit test for one call to use RMW ranges and the other to use ranges defined in kilometers. * Per #2833, just correct code comments. * Per #2833, divide by n - 1 when computing the range delta, rather than n. * Per #2833, correct the handling of the maximum range in the tc-rmw tool. For fixed delta km, need to define the max range when setting up the grid at the beginning. --------- Co-authored-by: MET Tools Test Account * #2830 Changed enum PadSize to enum class * #2830 Removed redundant parantheses * #2830 Removed commenyted out code * #2830 Use auto * #2830 Changed enum to enum class for DistType, InterpMthd, GridTemplates, and NormalizeType * #2830 Moved enum_class_as_integer from header file to cc files * #2830 Added enum_as_int.hpp * #2830 Added enum_as_int.hpp * Deleted enum_class_as_integer and renamed it to enum_class_as_int * Removed redundant paranthese * #2830 Changed enum to enumclass * #2830 Changed enum_class_as_integer to enum_class_as_int * Feature #2379 sonarqube gha (#2847) * Per #2379, testing initial GHA SonarQube setup. * Per #2379, switch to only analyzing the src directory. * Per #2379, move more config logic from sonar-project.properties into the workflow. #ci-skip-all * Per #2379, try removing + symbols * Per #2379, move projectKey into xml workflow and remove sonar-project.properties. * Per #2379, try following the instructions at https://github.com/sonarsource-cfamily-examples/linux-autotools-gh-actions-sq/blob/main/.github/workflows/build.yml ci-skip-all * Per #2379, see details of progress described in this issue comment: https://github.com/dtcenter/MET/issues/2379#issuecomment-2000242425 * Unrelated to #2379, just removing spurious space that gets flagged as a diff when re-running enum_to_string on seneca. * Per #2379, try running SonarQube through GitHub. * Per #2379, remove empty env section and also disable the testing workflow temporarily during sonarqube development. * Per #2379, fix docker image name. * Per #2379, delete unneeded script. * Per #2379, update GHA to scan Python code and push to the correct SonarQube projects. * Per #2379, update GHA SonarQube project names * Per #2379, update the build job name * Per #2379, update the comile step name * Per #2379, switch to consistent SONAR variable names. * Per #2379, fix type in sed expressions. * Per #2379, just rename the log artifact * Per #2379, use time_command wrapper instead of run_command. * Per #2379, fix bad env var name * Per #2379, switch from egrep to grep. * Per #2379, just try cat-ting the logfile * Per #2379, test whether cat-ting the log file actually works. * Per #2379, revert back * Per #2379, mention SonarQube in the PR template. Make workflow name more succinct. * Per #2379, add SONAR_REFERENCE_BRANCH setting to define the sonar.newCode.referenceBranch property. The goal is to define the comparison reference branch for each SonarQube scan. * Per #2379, have the sonarqube.yml job print the reference branch it's using * Per #2379, intentionally introduce a new code smell to see if SonarQube correctly flag it as appearing in new code. * Per #2379, trying adding the SonarQube quality gate check. * Per #2379, add logic for using the report-task.txt output files to check the quality gate status for both the python and cxx scans. * Per #2379 must use unique GHA id's * Per #2379, working on syntax for quality gate checks * Per #2379, try again. * Per #2379, try again * Per #2379, try again * Per #2379, try again * Per #2379, try again * Per #2379, try again * Per #2379, try yet again * Per #2379 * Per #2379, add more debug * Per #2379, remove -it option from docker run commands * Per #2379, again * Per #2379, now that the scan works as expected, remove the intentional SonarQube code smell as well as debug logging. * Hotfix related to #2379. The sonar.newCode.referenceBranch and sonar.branch.name cannot be set to the same string! Only add the newCode definition when they differ. * #2830 Changed enum STATJobType to enum class * #2830 Changed STATLineType to enum class * #2830 Changed Action to enum class * #2830 Changed ModeDataType to enum class * #2830 Changed StepCase to enum class * #2830 Changed enum to enum class * #2830 Changed GenesisPairCategory to enum class * #2830 Removed rediundabt parenrthese * #2830 Reduced same if checking * #2830 Cleanup * #2830 USe empty() instead of lebgth checking * #2830 Adjusted indentations * Feature #2379 develop sonarqube updates (#2850) * Per #2379, move rgb2ctable.py into the python utility scripts directory for better organization and to enable convenient SonarQube scanning. * Per #2379, remove point.py from the vx_python3_utils directory which cleary was inadvertenlty added during development 4 years ago. As far as I can tell it isn't being called by any other code and doesn't belong in the repository. Note that scripts/python/met/point.py has the same name but is entirely different. * Per #2379, update the GHA SonarQube scan to do a single one with Python and C++ combined. The nightly build script is still doing 2 separate scans for now. If this all works well, they could also be combined into a single one. * Per #2379, eliminate MET_CONFIG_OPTIONS from the SonarQube workflow since it doesn't need to be and probably shouldn't be configurable. * Per #2379, trying to copy report-task.txt out of the image * Per #2379, update build_met_sonarqube.sh to check the scan return status * Per #2379, fix bash assignment syntax * Per #2379, remove unused SCRIPT_DIR envvar * Per #2379, switch to a single SonarQube scan for MET's nightly build as well * Feature 2654 ascii2nc polar buoy support (#2846) * Added iabp data type, and modified file_handler to filter based on time range, which was added as a command line option * handle time using input year, hour, min, and doy * cleanup and switch to position day of year for time computations * Added an ascii2nc unit test for iabp data * Added utility scripts to pull iabp data from the web and find files in a time range * Modified iabp_handler to always output a placeholder 'location' observation with value 1 * added description of IABP data python utility scripts * Fixed syntax error * Fixed Another syntax error. * Slight reformat of documentation * Per #2654, update the Makefiles in scripts/python/utility to include all the python scripts that should be installed. * Per #2654, remove unused code from get_iabp_from_web.py that is getting flagged as a bug by SonarQube. * Per #2654, fix typo in docs --------- Co-authored-by: John Halley Gotway Co-authored-by: MET Tools Test Account * Feature #2786 rpss_from_prob (#2861) * Per #2786, small change to a an error message unrelated to this development. * Per #2786, add RPSInfo::set_climo_prob() function to derive the RPS line type from climatology probability bins. And update Ensemble-Stat to call it. * Per #2786, minor change to clarify error log message. * Per #2786, for is_prob = TRUE input, the RPS line type is the only output option. Still need to update docs! * Per #2786, add new call to Ensemble-Stat to test computing RPS from climo probabilities * Per #2786, use name rps_climo_bin_prob to be very explicit. * Per #2786, redefine logic of RPSInfo::set_climo_bin_prob() to match the CPC definition. Note that reliability, resolution, uncertainty, and RPSS based on the sample climatology are all set to bad data. Need to investigate whether they can be computed using these inputs. * Per #2786, remove the requirement that any fcst.prob_cat_thresh thresholds must be defined. If they are defined, pass them through to the FCST_THRESH output column. If not, write NA. Add check to make sure the event occurs in exactly 1 category. * Per #2786, don't enforce fcst.prob_cat_thresh == obs.prob_cat_thresh for probabilistic inputs. And add more is_prob checks so that only the RPS line type can be written when given probabilistic inputs. * updated documentation * Per #2786, call rescale_probability() function to convert from 0-100 probs to 0-1 probs. --------- Co-authored-by: j-opatz * Feature #2862 v12.0.0-beta4 (#2864) * Feature #2379 develop single_sq_project (#2865) * Hotfix to the documentation in the develop branch. Issue #2858 was closed as a duplicate of #2857. I had included it in the MET-12.0.0-beta4 release notes, but the work is not yet actually complete. * Feature 2842 ugrid config (#2852) * #2842 Removed UGrid related setting * #2842 Corrected vertical level for data_plane_array * #2842 Do not allow the time range * #2842 The UGridConfig file can be passed as ugrid_dataset * #2842 Changed -config option to -ugrid_config * #2842 Deleted UGrid configurations * 2842 Fix a compile error when UGrid is disabled * #2842 Cleanup * #2842 Added an unittest point_stat_ugrid_mpas_config * #2842 Added a PointStatConfig without UGrid dataset. * #2842 Corrected ty[po at the variable name * Switched from time_centered to time_instant. I think time_centered is the center of the forecast lead window and time_instant is the time the forecast is valid (end of forecast window). * #2842 Removed ugrid_max_distance_km and unused metadata names * #2842 Restored time variable time_instant for LFric * #2842 Adjust lon between -180 and 180 * #2842 Adjust lon between -180 and 180 * #2842 Adjust lon between -180 and 180 * #2842 Adjusted lon to between -180 to 180 * #2842 Changed variable names * Per #2842, switch from degrees east to west right when the longitudes are read. * #2842, switch from degrees east to west right when the longitudes are read * #2842 Cleanup debug messages --------- Co-authored-by: Howard Soh Co-authored-by: Daniel Adriaansen Co-authored-by: John Halley Gotway * Feature 2753 comp script config (#2868) * set dynamic library file extension to .dylib if running on MacOS and .so otherwise * Added disabling of jasper documentation for compiliation on Hera * Updated * remove extra export of compiler env vars * include full path to log file so it is easier to file the log file to examine when a command fails * send cmake output to a log file * remove redundant semi-colon * use full path to log file so it is easier to examine on failure * use run_cmd to catch if rm command fails * Modifications for compilation on hera, gaea, and orion * Updating * fixed variable name * clean up if/else statements * set TIFF_LIBRARY_RELEASE argument to use full path to dynamic library file to prevent failure installing proj library * set LDFLAGS so that LDFLAGS value set in the user's environment will also be used * Updated based on gaea, orion, and hera installs * Updated * change extension of dynamic library files only if architecture is arm64 because older Macs still use .so * added netcdf library to args to prevent error installing NetCDF-CXX when PROJ has been installed in the same run of the script -- PATH is set in the COMPILE_PROJ if block that causes this flag from being added automatically * clean up how rpath and -L are added to LDFLAGS so that each entry is separate -- prevents errors installing on Mac arm64 because multiple rpath values aren't read using :. Also use MET_PROJLIB * Updated * removed -ltiff from MET libs * only add path to rpath and -L arguments if they are not already included in LDFLAGS * changed from using LIB_TIFF (full path to tiff lib file) to use TIFF_LIB_DIR (dir containing tiff lib file). Added TIFF_INCLUDE_DIR to proj compilation and -DJAS_ENABLE_DOC to jasper compliation taken from @jprestop branch * update comments * ensure all MET_* and MET_*LIB variables are added to the rpath for consistency * remove unnecessary if block and only export LDFLAGS at the end of setting locally * Updated * Added section for adding /lib64 and rearranged placement of ADDTL_DIR * Commenting out the running of the Jasper lib tests * Updating and/or removing files * Updating and/or removing files * Latest udpates which include the addition of the tiff library for proj * Remove commented out line. Co-authored-by: John Halley Gotway * Make indentation consistent. Co-authored-by: John Halley Gotway * Make indentation consistent. Co-authored-by: John Halley Gotway * Make indentation consistent. Co-authored-by: John Halley Gotway * Per 2753, added -lm to configure_lib_args for NetCDF-CXX * Per #2753 updating acorn files * Per #2753, update wcoss2 files * Per #2753, updating acorn file to include MET_PYTHON_EXE * Per #2753, updated files for 12.0.0 for derecho * Per #2753, updated derecho file adding MET_PYTHON_EXE and made corrections * Updating config files * Updating orion files * Updates for gaea's files * Updating gaea modulefile * Removing modulefile for cheyenne * Added MET_PYTHON_EXE * Added MET_PYTHON_EXE to hera too * Adding file for hercules * Removing equals sign from setenv * Adding file for hercules * Updated script to add libjpeg installation for grib2c * Per #2753, Adding file for casper --------- Co-authored-by: George McCabe <23407799+georgemccabe@users.noreply.github.com> Co-authored-by: John Halley Gotway * Feature #2795 level_mismatch_warning (#2873) * Per #2795, move the warning message about level mismatch from the config validation step to when the forecast files are being processed. Only check this when the number of forecast fields is greater than 1, but no longer limit the check to pressure levels only. * Per #2795, add comments * Whitespace * Per #2795, port level mismatch fix over to Ensemble-Stat. Check it for each verification task, but only print it once for each task, rather than once for each task * ensemble member. * Feature #2870 removing_MISSING_warning (#2872) * Per #2870, define utility functions for parsing the file type from a file list and for logging missing files, checking for the MISSING keyword. Also, update Ensemble-Stat and Gen-Ens-Prod to call these functions. * Per #2870, update the gen_ens_prod tests to demonstrate the use of the MISSING keyword for missing files. METplus uses this keyword for Ensemble-Stat and Gen-Ens-Prod. * Feature 2842 ugrid config (#2875) * #2842 Removed UGrid related setting * #2842 Corrected vertical level for data_plane_array * #2842 Do not allow the time range * #2842 The UGridConfig file can be passed as ugrid_dataset * #2842 Changed -config option to -ugrid_config * #2842 Deleted UGrid configurations * 2842 Fix a compile error when UGrid is disabled * #2842 Cleanup * #2842 Added an unittest point_stat_ugrid_mpas_config * #2842 Added a PointStatConfig without UGrid dataset. * #2842 Corrected ty[po at the variable name * Switched from time_centered to time_instant. I think time_centered is the center of the forecast lead window and time_instant is the time the forecast is valid (end of forecast window). * #2842 Removed ugrid_max_distance_km and unused metadata names * #2842 Restored time variable time_instant for LFric * #2842 Adjust lon between -180 and 180 * #2842 Adjust lon between -180 and 180 * #2842 Adjust lon between -180 and 180 * #2842 Adjusted lon to between -180 to 180 * #2842 Changed variable names * Per #2842, switch from degrees east to west right when the longitudes are read. * #2842, switch from degrees east to west right when the longitudes are read * #2842 Cleanup debug messages * #2842 Disabled output types except STAT for sl1l2 * #2842 Disabled output types except STAT for sl1l2 and MPR * #2842 Reduced output files for UGrid --------- Co-authored-by: Howard Soh Co-authored-by: Daniel Adriaansen Co-authored-by: John Halley Gotway * Hotfix to develop branch to remove duplicate test named 'point_stat_ugrid_mpas_config'. That was causing unit_ugrid.xml to fail because it was still looking for .txt output files that are no longer being generated. * Feature 2748 document ugrid (#2869) * Initial documentation of the UGRID capability. * Fixes error in references, adds appendix to index, and adds sub-section for configuration entries and a table for metadata map items. * Corrects LFRic, rewords section on UGRID conventions, updates description of using GridStat, and removes mention of nodes. * Forgot one more mention of UGRID conventions. * Incorporates more suggestions from @willmayfield. * Switches to numerical table reference. * Feature #2781 Convert MET NetCDF point obs to Pandas DataFrame (#2877) * Per #2781, added function to convert MET NetCDF point observation data to pandas so it can be read and modified in a python embedding script. Added example python embedding script * ignore python cache files * fixed function call * reduce cognitive complexity to satisfy SonarQube and add boolean return value to catch if function fails to read data * clean up script and add comments * replace call to object function that doesn't exist, handle exception when file passed to script cannot be read by the NetCDF library * rename example script * add new example script to makefiles * fix logic to build pandas DataFrame to properly get header information from observation header IDs * Per #2781, add unit test to demonstrate python embedding script that reads MET NetCDF point observation file and converts it to a pandas DataFrame * Per #2781, added init function for nc_point_obs to take an input filename. Also raise TypeError exception from nc_point_obs.read_data() if input file cannot be read * call parent class init function to properly initialize nc_point_obs * Feature #2833 pcp_combine_missing (#2886) * Per #2883, add -input_thresh command line option to configure allowable missing input files. * Per #2883, update pcp_combine usage statement. * Per #2883, update existing pcp_combine -derive unit test example by adding 3 new missing file inputs at the beginning, middle, and end of the file list. The first two are ignored since they include the MISSING keyword, but the third without that keyword triggers a warning message as desired. The -input_thresh option is added to only require 70% of the input files be present. This should produce the exact same output data. * Per #2883, update the pcp_combine logic for the sum command to allow missing data files based on the -input_thresh threshold. Add a test in unit_pcp_combine.xml to demonstrate. * Update docs/Users_Guide/reformat_grid.rst Co-authored-by: George McCabe <23407799+georgemccabe@users.noreply.github.com> * Per #2883, update pcp_combine usage statement in the code to be more simliar to the User's Guide. * Per #2883, switch to using derive_file_list_missing as the one containing missing files and recreate derive_file_list as it had existed for the test named pcp_combine_derive_VLD_THRESH. * Per #2883, move initialization inside the same loop to resolve SonarQube issues. * Per #2883, update sum_data_files() to switch from allocating memory to using STL vectors to satisfy SonarQube. * Per #2883, changes to declarations of variables to satisfy SonarQube. * Per #2883, address more SonarQube issues * Per #2883, backing out an unintended change I made to tcrmw_grid.cc. This change belongs on a different branch. * Per #2883, update logic of parse_file_list_type() function to handle python input strings. Also update pcp_combine to parse the type of input files being read and log non-missing python input files expected. --------- Co-authored-by: George McCabe <23407799+georgemccabe@users.noreply.github.com> * Per #2888, update STATAnalysisJob::dump_stat_line() to support dumping stat line types VCNT, RPS, DMAP, and SSIDX. (#2891) * Per #2659, making updates as proposed at the 20240516 MET Eng. Mtg. (#2895) * Feature #2395 TOTAL_DIR (#2892) * Per #2395, remove the n_dir_undef and n_dira_undef variables that are superceded by the new dcount and dacount VL1L2Info members to keep track of the number of valid wind direction vectors. * Per #2395, add TOTAL_DIR columns to the VL1L2, VAL1L2, and VCNT line types and update the header column tables. * Per #2395, update the User's Guide to list the new TOTAL_DIR columns in the VL1L2, VAL1L2, and VCNT line types. * Per #2395, update stat_analysis to parse the new TOTAL_DIR columns and use the values to aggregate results when needed. * Per #2395, for SonarQube change 'const char *' to 'const char * const' to satisfy the finding that 'Global variables should be const.' Should probably switch from 'char char *' to strings eventually. But for now, I'm just making up for some SonarQube technical debt. * Per #2395, fix typo in placement of the DIR_ME column name in the met_header_columns_V12.0.txt file * Per #2395, add 2 new Stat-Analysis jobs to demonstrate the processing of VL1L2 lines. * Per #2395, update logic of is_vector_dir_stat(). Instead of just checking 'DIR_', check 'DIR_ME', 'DIR_MAE', and 'DIR_MSE' to avoid an false positive match for the 'DIR_ERR' column which is computed from the vector partial sums rather than the individual direction differences. * Bugfix #2897 develop python_valid_time (#2899) * Per #2897, fix typos in 2 log messages. Also fix the bug in storing the valid time strings. The time string in vld_array should exactly correspond to the numeric unixtime values in vld_num_array. Therefore they need to be updated inside the same if block. The bug is that we were storing only the unique unixtime values but storing ALL of the valid time string, not just the unique ones. * Per #2897, minor change to formatting of log message * MET #2897, don’t waste time searching, just set the index to n - 1 * Per #2897, remove unused add_prec_point_obs(...) function * Per #2897, update add_point_obs(...) logic for DEBUG(9) to print very detailed log messages about what obs are being rejected and which are being used for each verification task. * Per #2897, refine the 'using' log message to make the wording consistent with the summary rejection reason counts log message * Per #2897, update the User's Guide about -v 9 for Point-Stat --------- Co-authored-by: j-opatz Co-authored-by: MET Tools Test Account * Bugfix 2867 point2grid qc flag (#2890) * #2867 Added compute_adp_qc_flag and adjusted ADP QC flags * #2867 Added point2grid_GOES_16_ADP_Enterprise_high. Changed AOD QC flags to 0,1,2 (was 1,2,3) * #2867 Added get_nc_att_values_ * #2867 Added get_nc_att_values. Added the argument allow_conversion to get_nc_data(netCDF::NcVar *, uchar *data) * #2867 Read the ADP QC flag values and meanings attributes from DQF variable and set the QC high, meduium, low values to support Enterprise algorithm. Adjusted the ADP QC values by using AOD qc values * #2867 Cleanup * #2867 Corrected indent * #2867 Changed log message * #2867 Removed unused argument * #2867 Removed unused argument * Cleanup * #2867 Fix SonarQube findings * #2867 Deleted protected section with no members * #2867 Cleanup * #2867 FIxed SonarQube findings; unused local variables, decalare as const, etc * #2867 MOved include directives to top * #2867 Changed some argumenmt with references to avoid copying objects * #2867 Do not filter by QC flag if -qc is not given * #2867 Use enumj class for GOES QC: HIGH, MEDIUM, and LOW * #2867 Added log message back which were deleted accidently * #2867 Chaned statci const to constexpr * #2867 Initial release. Separated from nc_utils.h * @2867 Added nc_utils_core.h * #2867 Moved some blocks to nc_utils_core.h * #2867 Include nc_utils_core.h * #2867 Added const references * Per #2867, fixing typo in comments. --------- Co-authored-by: Howard Soh Co-authored-by: j-opatz * Hotfix to develop to fix the update_truth.yml workflow logic. This testing workflow run failed (https://github.com/dtcenter/MET/actions/runs/9209471209). Here we switch to a unique update truth branch name to avoid conflicts. * Avoid pushing directly to the develop or main_vX.Y branches since that is not necessary for the automation logic in MET. * #2904 Changed R path to R-4.4.0 (#2905) Co-authored-by: Howard Soh * Feature #2912 pb2nc error (#2914) * Feature 2717 convert unit.pl to unit.py (#2871) * created unit.py module in new internal/test_unit/python directory * added xml parsing to unit.py * added repl_env function * added reading of the remaining xml tags in build_tests function * progress on main function (putting together test commands) * a few more lines in the main function * minor updates * fixed how the test command was being run * added if name/main and command line parsing * fixed handling of no 'env' in cmd_only mode * handle params from xml that have \ after filename without space in between * added logging * added some more pieces to unit * more updates to unit.py, including running checks on output files * bug fixes, improved handling of output file names, improved handling of env vars, improved logging output * fixed how shell commands are run, and other minor fixes * added last bits from the perl script, fixed some bugs * created unit.py module in new internal/test_unit/python directory * added xml parsing to unit.py * added repl_env function * added reading of the remaining xml tags in build_tests function * progress on main function (putting together test commands) * a few more lines in the main function * minor updates * update scripts to call python unit test script instead of the old perl script * fix she-bang line to allow script to be run without python3 before it * add missing test_dir and exit_on_fail tags that are found in the rest of the unit test xml files * fix call to logger.warning * change tags named 'exists' to 'exist' to match the rest of the xml files * added logger to function * removed tab at end of line that was causing output file path to be excluded from the command * fix broken checks for output files * incorporated george's recommended changes * changed default to overwrite logs; allow for more than one xml file to be passed in command --------- Co-authored-by: Natalie babij Co-authored-by: Natalie babij Co-authored-by: Natalie babij Co-authored-by: Natalie Babij Co-authored-by: John Halley Gotway Co-authored-by: George McCabe <23407799+georgemccabe@users.noreply.github.com> Co-authored-by: j-opatz * Bugfix 2867 point2grid qc unittest (#2913) * #2867 Added compute_adp_qc_flag and adjusted ADP QC flags * #2867 Added point2grid_GOES_16_ADP_Enterprise_high. Changed AOD QC flags to 0,1,2 (was 1,2,3) * #2867 Added get_nc_att_values_ * #2867 Added get_nc_att_values. Added the argument allow_conversion to get_nc_data(netCDF::NcVar *, uchar *data) * #2867 Read the ADP QC flag values and meanings attributes from DQF variable and set the QC high, meduium, low values to support Enterprise algorithm. Adjusted the ADP QC values by using AOD qc values * #2867 Cleanup * #2867 Corrected indent * #2867 Changed log message * #2867 Removed unused argument * #2867 Removed unused argument * Cleanup * #2867 Fix SonarQube findings * #2867 Deleted protected section with no members * #2867 Cleanup * #2867 FIxed SonarQube findings; unused local variables, decalare as const, etc * #2867 MOved include directives to top * #2867 Changed some argumenmt with references to avoid copying objects * #2867 Do not filter by QC flag if -qc is not given * #2867 Use enumj class for GOES QC: HIGH, MEDIUM, and LOW * #2867 Added log message back which were deleted accidently * #2867 Chaned statci const to constexpr * #2867 Initial release. Separated from nc_utils.h * @2867 Added nc_utils_core.h * #2867 Moved some blocks to nc_utils_core.h * #2867 Include nc_utils_core.h * #2867 Added const references * #2867 Some 'static const' were chnaged to constexpr * #2867 Changed -qc options (1,2,3 to 0,1 - high & medium) for AOD * #2867 Merged develop branch * #2867 Corrected the unit test name --------- Co-authored-by: Howard Soh * Feature #2911 tc_stat_set_hdr (#2916) * Per #2911, no real changes for Stat-Analysis. Just changing order of variables for consistency. * Per #2911, add StatHdrColumns::apply_set_hdr_opts(...) function to be used by TC-Stat. * Per #2911, move ByColumn to the TCStatJob base class and add HdrName and HdrValue to support the -set_hdr job command. * Per #2911, update GSI tools to call the newly added StatHdrColumns::apply_set_hdr_opts(...) function. * Per #2911, update logic of Stat-Analysis for consistency to make use of common apply_set_hdr_opts() function. * Per #2911, add DataLine::set_item() function to support -set_hdr options. * Per #2911, just update contents of error message * Per #2911, add TCStatLine member functions for has() and get_offset(). * Per #2911, update tc_stat to support applying -set_hdr to TC-Stat filter jobs. * Per #2911, revise TC-Stat config files to exercise the -set_hdr job command option * Per #2911, update TC-Stat documentation to mention the -set_hdr job command option * Per #2911, add note * Per #2911, as recommended by SonarQube, make some of these member functions const. * Bugfix #2856 develop ens_climo (#2918) * Per #2856, port over fixes from main_v11.1 to develop. * Per #2856, correct conditionals in set_job_controls.sh and tweak existing Ensemble-Stat configuration file to exercise the logic that's being impacted here. * Bugfix #2841 develop tang_rad_winds (#2921) * Per #2841, port over fixes from bugfix_2841_main_v11.1_tang_rad_winds for the develop branch * Per #2841, clarify in the docs that azimuths are defined in degrees counter-clockwise from due east. * Per #2841, just updating with output from enum_to_string. * Per #2841, tweak the documentation. * Per #2841, correct the location of using namespace lines. * Per #2841, update compute_tc_diag.py to no longer skip writing the radial and tangential wind diagnostics. * Per #2841, update compute_tc_diag.py to no longer skip writing radial and tangential wind diagnostics. * Revert "Per #2841, update compute_tc_diag.py to no longer skip writing radial and tangential wind diagnostics." This reverts commit f097345bedcfcca663e8fb4322eed5b5e00e19fd. * Revert "Per #2841, update compute_tc_diag.py to no longer skip writing the radial and tangential wind diagnostics." This reverts commit c0402151b038c59efab99c060cc5c390edf002f6. * Per #2841, update comp_dir.sh logic to include .dat in the files that are diffed * Replace tab with spaces * Per #2841, correct the units for the azimuth netcdf output variable * Per #2841, reverse the x dimension of the rotated latlon grid to effectively switch from counterclockwise rotation to clockwise. --------- Co-authored-by: MET Tools Test Account * Feature #2601 seeps climo config (#2927) * #2601 Added seeps_grid_climo_name and seeps_point_climo_name * #2601 Added seeps_grid_climo_name * #2601 Removed SEEPS settings * #2601 Initial release * #2601 Changed to set the SEEPS climo by using the configuration * #2601 Removed SEESP settings at PointStatConfig_APCP and use PointStatConfig_SEEPS for SEEPSm testing * #2601 Updated descryption for seeps_grid_climo_name * #2601 Added a argument for the SEEPS clomo file * #2601 Added conf_key_seeps_grid_climo_name and conf_key_seeps_point_climo_name * #2601 Support the climo filename from the configuration * #2601 Corrected key for climo name * Removing duplicate word --------- Co-authored-by: Howard Soh Co-authored-by: Julie Prestopnik * Feature 2673 sonarqube beta5 redundant parentheses (#2930) * #2673 Removed redundant_parentheses * #2673 Removed redundant_parentheses * #2673 Removed redundant parentheses * #2673 Removed redundant parentheses --------- Co-authored-by: Howard Soh * Fix release checksum action (#2929) * Feature 2857 tripolar coordinates (#2928) * #2857 Added MetNcCFDataFile::build_grid_from_lat_lon_vars * #2857 Added NcCfFile::build_grid_from_lat_lon_vars * #2857 Check the coordinates attribute to find latitude, longitude, and time variables * #2857 Get the lat/lon variables from coordinates attribute if exists * #2857 Added two constants * #2857 Deleted debug messages * #2857 Added lat_vname and lon_vname for var_name_map * #2857 Added two unit tests: point2grid_sea_ice_tripolar and point2grid_sea_ice_tripolar_config * #2857 Initial release * #2857 Correct dictinary to get file_type * #2857 DO not check the time variable for point2grid * #2857 Added point2grid_tripolar_rtofs --------- Co-authored-by: Howard Soh * Feature 2932 v12.0.0-beta5 (#2933) * Per #2932, updating version and release notes * Per #2932, updating date on release notes * Per #2932, fixed formatting and links * Update release-notes.rst * Update release-notes.rst Removing inline backticks since they do not format the way I expected, especially when put inside bolded release notes. --------- Co-authored-by: John Halley Gotway * Feature fix release notes (#2934) * Fixing up release notes * Update release-notes.rst --------- Co-authored-by: John Halley Gotway * Per dtcenter/METplus#2643 discussion, add more detail about the budget interpolation method. * Feature #2924 fcst climo, PR 1 of 2 (#2939) * Per #2924, Update the MPR and ORANK output line types to just write duplicate existing climo values, update the header tables and MPR/ORANK documentation tables. * Per #2924, update get_n_orank_columns() logic * Per #2924, update the Stat-Analysis parsing logic to parse the new MPR and ORANK climatology columns. * Per #2924, making some changes to the vx_statistics library to store climo data... but more work to come. Committing this first set of changes that are incomplete but do compile. * Per #2924, this big set of changes does compile but make test produces a segfault for ensemble-stat * Per #2924, fix return value for is_keeper_obs() * Per #2924, move fcst_info/obs_info into the VxPairBase pointer. * Per #2924, update Ensemble-Stat to set the VxPairBase::fcst_info pointer * Per #2924 udpate handling of fcst_info and obs_info pointers in Ensemble-Stat * Per #2924, update the GSI tools to handle the new fcst climo columns. * Per #2924, add backward compatibility logic so that when old climo column names are requested, the new ones are used. * Per #2924, print a DEBUG(2) log message if old column names are used. * Per #2924, switch the unit tests to reference the updated MPR column names rather than the old ones. * Per #2924, working progress. Not fully compiling yet * Per #2924, another round of changes. Removing MPR:FCST_CLIMO_CDF output column. This compiles but not sure if it actually runs yet * Per #2924, work in progress * Per #2924, work in progress. Almost compiling again. * Per #2924, get it compiling * Per #2924, add back in support for SCP and CDP which are interpreted as SOCP and OCDP, resp * Per #2924, update docs about SCP and CDP threshold types * Per #2924, minor whitespace changes * Per #2924, fix an uninitialized pointer bug by defining/calling SeepsClimoGrid::init_from_scratch() member function. The constructor had been calling clear() to delete pointers that weren't properly initialized to nullptr. Also, simplify some map processing logic. * Per #2924, rename SeepsAggScore from seeps to seeps_agg for clarity and to avoid conflicts in member function implementations. * Per #2924, fix seeps compilation error in Point-Stat * Per #2924, fix bug in the boolean logic for handling the do_climo_cdp NetCDF output option. * Per #2924, add missing exit statement. * Per #2924, tweak threshold.h * Per #2924, define one perc_thresh_info entry for each enumerated PercThreshType value * Per #2924, simplify the logic for handling percentile threshold types and print a log message once when the old versions are still used. * Per #2924, update the string comparison return value logic * Per #2924, fix the perc thresh string parsing logic by calling ConcatString::startswith() * Per #2924, switch all instances of CDP to OCDP. Gen-Ens-Prod was writing NetCDF files with OCDP in the output variable names, but Grid-Stat was requesting that the wrong variable name be read. So the unit tests failed. * Per #2924, add more doc details * Per #2924, update default config file to indicate when climo_mean and climo_stdev can be set seperately in the fcst and obs dictionaries. * Per #2924, update the MET tools to parse climo_mean and climo_stdev separately from the fcst and obs dictionaries. * Per #2924, backing out new/modified columns to minimize reg test diffs * Per #2924, one more section to be commented out later. * Per #2924, replace several calls to strncmp() with ConcatString::startswith() to simplify the code * Per #2924, strip out some more references to OBS_CLIMO_... in the unit tests. * Per #2924, delete accidental file * Per #2924 fix broken XML comments * Per #2924, fix comments * Per #2924, address SonarQube findings * Per #2924, tweak a Point-Stat and Grid-Stat unit test config file to make the output more comparable to develop. * Per #2924, fix bug in the logic of PairDataPoint and PairDataEnsemble, when looping over the 3-dim array do not return when checking the climo and fcst values. Instead we need to continue to the next loop iteration. * Per #2924, address more SonarQube code smells to reduce the overall number in MET for this PR. * Per #2924, correct the logic for parsing climo data from MPR lines. * Per #2924, cleanup grid_stat.cc source code by making calls to DataPlane::is_empty() and Grid::nxy(). * Per #2924, remove unneeded ==0 * Hotfix to the develop branch for a copy/paste bug introduced by PR #2939 --------- Co-authored-by: Howard Soh Co-authored-by: John Halley Gotway Co-authored-by: Howard Soh Co-authored-by: MET Tools Test Account Co-authored-by: davidalbo Co-authored-by: j-opatz Co-authored-by: Daniel Adriaansen Co-authored-by: Julie Prestopnik Co-authored-by: George McCabe <23407799+georgemccabe@users.noreply.github.com> Co-authored-by: natalieb-noaa <146213121+natalieb-noaa@users.noreply.github.com> Co-authored-by: Natalie babij Co-authored-by: Natalie babij Co-authored-by: Natalie babij Co-authored-by: Natalie Babij Co-authored-by: metplus-bot <97135045+metplus-bot@users.noreply.github.com> --- data/config/EnsembleStatConfig_default | 8 +- data/config/GenEnsProdConfig_default | 5 +- data/config/GridStatConfig_default | 8 +- data/config/PointStatConfig_default | 8 +- data/config/SeriesAnalysisConfig_default | 11 +- docs/Users_Guide/config_options.rst | 162 +- docs/Users_Guide/ensemble-stat.rst | 28 +- docs/Users_Guide/gen-ens-prod.rst | 6 +- docs/Users_Guide/grid-stat.rst | 2 +- docs/Users_Guide/point-stat.rst | 18 +- docs/Users_Guide/release-notes.rst | 34 + docs/conf.py | 4 +- internal/test_unit/config/GenEnsProdConfig | 6 +- .../test_unit/config/GridStatConfig_climo_WMO | 18 +- .../config/GridStatConfig_gen_ens_prod | 8 +- .../config/GridStatConfig_mpr_thresh | 14 + .../config/PointStatConfig_climo_WMO | 18 +- .../config/PointStatConfig_mpr_thresh | 12 + .../config/SeriesAnalysisConfig_climo | 4 +- internal/test_unit/hdr/met_12_0.hdr | 2 +- .../test_unit/xml/unit_climatology_1.5deg.xml | 10 +- src/basic/vx_config/config.tab.cc | 22 +- src/basic/vx_config/config.tab.yy | 22 +- src/basic/vx_config/config_constants.h | 14 +- src/basic/vx_config/my_config_scanner.cc | 51 +- src/basic/vx_config/threshold.cc | 364 ++-- src/basic/vx_config/threshold.h | 235 +- src/basic/vx_log/logger.cc | 5 +- src/basic/vx_util/CircularTemplate.cc | 38 +- src/basic/vx_util/GridPoint.cc | 1 - src/basic/vx_util/data_plane.cc | 2 +- src/basic/vx_util/data_plane_util.cc | 74 +- src/basic/vx_util/data_plane_util.h | 4 +- src/basic/vx_util/interp_util.cc | 12 +- src/basic/vx_util/interp_util.h | 28 +- src/basic/vx_util/memory.cc | 4 +- src/basic/vx_util/polyline.cc | 4 +- src/basic/vx_util/stat_column_defs.h | 54 +- src/basic/vx_util/thresh_array.cc | 89 +- src/basic/vx_util/thresh_array.h | 14 +- src/libcode/vx_analysis_util/stat_job.cc | 21 +- src/libcode/vx_analysis_util/stat_job.h | 1 + src/libcode/vx_analysis_util/stat_line.cc | 50 + src/libcode/vx_data2d/var_info.h | 2 +- .../vx_data2d_nc_wrf/var_info_nc_wrf.cc | 2 +- .../vx_data2d_python/grid_from_python_dict.cc | 2 +- src/libcode/vx_grid/goes_grid.cc | 4 +- src/libcode/vx_grid/laea_grid.cc | 2 +- src/libcode/vx_seeps/seeps.cc | 52 +- src/libcode/vx_seeps/seeps.h | 2 + src/libcode/vx_shapedata/mode_conf_info.cc | 29 +- src/libcode/vx_shapedata/node.cc | 6 +- src/libcode/vx_stat_out/stat_columns.cc | 60 +- src/libcode/vx_statistics/compute_stats.cc | 166 +- src/libcode/vx_statistics/ens_stats.cc | 18 +- src/libcode/vx_statistics/met_stats.cc | 86 +- src/libcode/vx_statistics/met_stats.h | 6 +- src/libcode/vx_statistics/pair_base.cc | 1912 ++++++++++++++--- src/libcode/vx_statistics/pair_base.h | 293 ++- .../vx_statistics/pair_data_ensemble.cc | 1067 +++------ .../vx_statistics/pair_data_ensemble.h | 112 +- src/libcode/vx_statistics/pair_data_point.cc | 1663 +++----------- src/libcode/vx_statistics/pair_data_point.h | 177 +- .../vx_summary/summary_calc_percentile.cc | 8 +- src/libcode/vx_tc_util/atcf_prob_line.cc | 2 +- src/tools/core/ensemble_stat/ensemble_stat.cc | 291 +-- .../ensemble_stat/ensemble_stat_conf_info.cc | 43 +- src/tools/core/grid_stat/grid_stat.cc | 537 +++-- .../core/grid_stat/grid_stat_conf_info.cc | 22 +- src/tools/core/point_stat/point_stat.cc | 435 ++-- src/tools/core/point_stat/point_stat.h | 5 +- .../core/point_stat/point_stat_conf_info.cc | 28 +- .../core/series_analysis/series_analysis.cc | 81 +- .../core/stat_analysis/aggr_stat_line.cc | 170 +- .../core/stat_analysis/parse_stat_line.cc | 54 +- .../core/stat_analysis/parse_stat_line.h | 8 +- .../core/stat_analysis/stat_analysis_job.cc | 3 +- .../wavelet_stat/wavelet_stat_conf_info.cc | 4 +- src/tools/dev_utils/met_nc_file.cc | 2 +- src/tools/other/ascii2nc/airnow_locations.cc | 18 +- .../other/ascii2nc/ascii2nc_conf_info.cc | 2 +- src/tools/other/ascii2nc/little_r_handler.cc | 20 +- src/tools/other/ascii2nc/met_handler.cc | 26 +- src/tools/other/ascii2nc/ndbc_locations.cc | 14 +- src/tools/other/gen_ens_prod/gen_ens_prod.cc | 25 +- src/tools/other/gen_vx_mask/gen_vx_mask.cc | 7 +- src/tools/other/grid_diag/grid_diag.cc | 24 +- src/tools/other/gsi_tools/gsid2mpr.cc | 26 +- src/tools/other/gsi_tools/gsidens2orank.cc | 33 +- .../other/madis2nc/madis2nc_conf_info.cc | 2 +- .../modis_regrid/data_plane_to_netcdf.cc | 30 +- src/tools/tc_utils/tc_diag/python_tc_diag.cc | 4 +- 92 files changed, 4758 insertions(+), 4322 deletions(-) diff --git a/data/config/EnsembleStatConfig_default b/data/config/EnsembleStatConfig_default index 57f1984807..c496dd3507 100644 --- a/data/config/EnsembleStatConfig_default +++ b/data/config/EnsembleStatConfig_default @@ -130,7 +130,8 @@ ens_phist_bin_size = 0.05; //////////////////////////////////////////////////////////////////////////////// // -// Climatology data +// Climatology mean data +// May be set separately in the "fcst" and "obs" dictionaries // climo_mean = { @@ -149,12 +150,17 @@ climo_mean = { hour_interval = 6; } +// +// Climatology standard deviation data +// May be set separately in the "fcst" and "obs" dictionaries +// climo_stdev = climo_mean; climo_stdev = { file_name = []; } // +// Climatology distribution settings // May be set separately in each "obs.field" entry // climo_cdf = { diff --git a/data/config/GenEnsProdConfig_default b/data/config/GenEnsProdConfig_default index e2cb994bba..c650ec8b24 100644 --- a/data/config/GenEnsProdConfig_default +++ b/data/config/GenEnsProdConfig_default @@ -95,7 +95,7 @@ nmep_smooth = { //////////////////////////////////////////////////////////////////////////////// // -// Climatology data +// Climatology mean data // climo_mean = { @@ -114,6 +114,9 @@ climo_mean = { hour_interval = 6; } +// +// Climatology standard deviation data +// climo_stdev = climo_mean; climo_stdev = { file_name = []; diff --git a/data/config/GridStatConfig_default b/data/config/GridStatConfig_default index 4bec5ecf4b..4a0c7dce44 100644 --- a/data/config/GridStatConfig_default +++ b/data/config/GridStatConfig_default @@ -75,7 +75,8 @@ obs = fcst; //////////////////////////////////////////////////////////////////////////////// // -// Climatology data +// Climatology mean data +// May be set separately in the "fcst" and "obs" dictionaries // climo_mean = { @@ -94,12 +95,17 @@ climo_mean = { hour_interval = 6; } +// +// Climatology standard deviation data +// May be set separately in the "fcst" and "obs" dictionaries +// climo_stdev = climo_mean; climo_stdev = { file_name = []; } // +// Climatology distribution settings // May be set separately in each "obs.field" entry // climo_cdf = { diff --git a/data/config/PointStatConfig_default b/data/config/PointStatConfig_default index d4b277c6a3..266c251e59 100644 --- a/data/config/PointStatConfig_default +++ b/data/config/PointStatConfig_default @@ -118,7 +118,8 @@ message_type_group_map = [ //////////////////////////////////////////////////////////////////////////////// // -// Climatology data +// Climatology mean data +// May be set separately in the "fcst" and "obs" dictionaries // climo_mean = { @@ -137,12 +138,17 @@ climo_mean = { hour_interval = 6; } +// +// Climatology standard deviation data +// May be set separately in the "fcst" and "obs" dictionaries +// climo_stdev = climo_mean; climo_stdev = { file_name = []; } // +// Climatology distribution settings // May be set separately in each "obs.field" entry // climo_cdf = { diff --git a/data/config/SeriesAnalysisConfig_default b/data/config/SeriesAnalysisConfig_default index 2b0cdfa53e..8f76139011 100644 --- a/data/config/SeriesAnalysisConfig_default +++ b/data/config/SeriesAnalysisConfig_default @@ -61,7 +61,8 @@ obs = fcst; //////////////////////////////////////////////////////////////////////////////// // -// Climatology data +// Climatology mean data +// May be set separately in the "fcst" and "obs" dictionaries // climo_mean = { @@ -80,11 +81,19 @@ climo_mean = { hour_interval = 6; } +// +// Climatology standard deviation data +// May be set separately in the "fcst" and "obs" dictionaries +// climo_stdev = climo_mean; climo_stdev = { file_name = []; } +// +// Climatology distribution settings +// May be set separately in each "obs.field" entry +// climo_cdf = { cdf_bins = 1; center_bins = FALSE; diff --git a/docs/Users_Guide/config_options.rst b/docs/Users_Guide/config_options.rst index 5a92571886..de538bd7cb 100644 --- a/docs/Users_Guide/config_options.rst +++ b/docs/Users_Guide/config_options.rst @@ -60,8 +60,8 @@ The configuration file language supports the following data types: * Percentile Thresholds: * A threshold type (<, <=, ==, !=, >=, or >), followed by a percentile - type description (SFP, SOP, SCP, USP, CDP, or FBIAS), followed by a - numeric value, typically between 0 and 100. + type description (SFP, SOP, SFCP, SOCP, USP, FCDP, OCDP, or FBIAS), + followed by a numeric value, typically between 0 and 100. * Note that the two letter threshold type abbreviations (lt, le, eq, ne, ge, gt) are not supported for percentile thresholds. @@ -93,8 +93,14 @@ The configuration file language supports the following data types: * "SOP" for a percentile of the sample observation values. e.g. ">SOP75" means greater than the 75-th observation percentile. - * "SCP" for a percentile of the sample climatology values. - e.g. ">SCP90" means greater than the 90-th climatology percentile. + * "SFCP" for a percentile of the sample forecast climatology values. + e.g. ">SFCP90" means greater than the 90-th forecast climatology + percentile. + + * "SOCP" for a percentile of the sample observation climatology values. + e.g. ">SOCP90" means greater than the 90-th observation climatology + percentile. For backward compatibility, the "SCP" threshold type + is processed the same as "SOCP". * "USP" for a user-specified percentile threshold. e.g. " 0.0. - * "CDP" for climatological distribution percentile thresholds. - These thresholds require that the climatological mean and standard - deviation be defined using the climo_mean and climo_stdev config file - options, respectively. The categorical (cat_thresh), conditional - (cnt_thresh), or wind speed (wind_thresh) thresholds are defined - relative to the climatological distribution at each point. Therefore, - the actual numeric threshold applied can change for each point. - e.g. ">CDP50" means greater than the 50-th percentile of the + * "FCDP" for forecast climatological distribution percentile thresholds. + These thresholds require that the forecast climatological mean and + standard deviation be defined using the "climo_mean" and "climo_stdev" + config file options, respectively. The categorical (cat_thresh), + conditional (cnt_thresh), or wind speed (wind_thresh) thresholds can + be defined relative to the climatological distribution at each point. + Therefore, the actual numeric threshold applied can change for each point. + e.g. ">FCDP50" means greater than the 50-th percentile of the climatological distribution for each point. - - * When percentile thresholds of type SFP, SOP, SCP, or CDP are requested - for continuous filtering thresholds (cnt_thresh), wind speed thresholds - (wind_thresh), or observation filtering thresholds (obs_thresh in - ensemble_stat), the following special logic is applied. Percentile + + * "OCDP" for observation climatological distribution percentile thresholds. + The "OCDP" threshold logic matches the "FCDP" logic described above. + However these thresholds are defined using the observation climatological + mean and standard deviation rather than the forecast climatological data. + For backward compatibility, the "CDP" threshold type is processed the + same as "OCDP". + + * When percentile thresholds of type SFP, SOP, SFCP, SOCP, FCDP, or OCDP are + requested for continuous filtering thresholds (cnt_thresh), wind speed + thresholds (wind_thresh), or observation filtering thresholds (obs_thresh + in ensemble_stat), the following special logic is applied. Percentile thresholds of type equality are automatically converted to percentile bins which span the values from 0 to 100. - For example, "==CDP25" is automatically expanded to 4 percentile bins: - >=CDP0&&=CDP25&&=CDP50&&=CDP75&&<=CDP100 + For example, "==OCDP25" is automatically expanded to 4 percentile bins: + >=OCDP0&&=OCDP25&&=OCDP50&&=OCDP75&&<=OCDP100 - * When sample percentile thresholds of type SFP, SOP, SCP, or FBIAS are - requested, MET recomputes the actual percentile that the threshold + * When sample percentile thresholds of type SFP, SOP, SFCP, SOCP, or FBIAS + are requested, MET recomputes the actual percentile that the threshold represents. If the requested percentile and actual percentile differ by more than 5%, a warning message is printed. This may occur when the sample size is small or the data values are not truly continuous. - * When percentile thresholds of type SFP, SOP, SCP, or USP are used, the - actual threshold value is appended to the FCST_THRESH and OBS_THRESH + * When percentile thresholds of type SFP, SOP, SFCP, SOCP, or USP are used, + the actual threshold value is appended to the FCST_THRESH and OBS_THRESH output columns. For example, if the 90-th percentile of the current set of forecast values is 3.5, then the requested threshold "<=SFP90" is written to the output as "<=SFP90(3.5)". - + * When parsing FCST_THRESH and OBS_THRESH columns, the Stat-Analysis tool ignores the actual percentile values listed in parentheses. - + +.. note:: + + Prior to MET version 12.0.0, forecast climatological inputs were not + supported. The observation climatological inputs were used to process + threshold types named "SCP" and "CDP". + + For backward compatibility, the "SCP" threshold type is processed the same + as "SOCP" and "CDP" the same as "OCDP". + + Users are encouraged to replace the deprecated "SCP" and "CDP" threshold + types with the updated "SOCP" and "OCDP" types, respectively. + * Piecewise-Linear Function (currently used only by MODE): * A list of (x, y) points enclosed in parenthesis (). @@ -765,7 +790,18 @@ using the following entries: * NEAREST for the nearest grid point (width = 1) * BUDGET for the mass-conserving budget interpolation - + + * The budget interpolation method is often used for precipitation + in order to roughly conserve global averages. However it is + computationally intensive and relatively slow. To compute the + interpolated value for each point of the target grid, a higher + resolution 5x5 mesh with 0.2 grid box spacing is centered on + the point and bilinear interpolation is performed for each + of those 25 lat/lon locations. The budget interpolation value + is computed as the average of those 25 bilinear interpolation + values, assuming enough valid data is present to meet the + "vld_thresh" threshold. + * FORCE to compare gridded data directly with no interpolation as long as the grid x and y dimensions match. @@ -1437,8 +1473,11 @@ climo_mean ---------- The "climo_mean" dictionary specifies climatology mean data to be read by the -Grid-Stat, Point-Stat, Ensemble-Stat, and Series-Analysis tools. It consists -of several entires defining the climatology file names and fields to be used. +Grid-Stat, Point-Stat, Ensemble-Stat, and Series-Analysis tools. It can be +set inside the "fcst" and "obs" dictionaries to specify separate forecast and +observation climatology data or once at the top-level configuration file +context to use the same data for both. It consists of several entries defining +the climatology file names and fields to be used. * The "file_names" entry specifies one or more file names containing the gridded climatology data to be used. @@ -1495,19 +1534,22 @@ climo_stdev The "climo_stdev" dictionary specifies climatology standard deviation data to be read by the Grid-Stat, Point-Stat, Ensemble-Stat, and Series-Analysis -tools. The "climo_mean" and "climo_stdev" data define the climatological -distribution for each grid point, assuming normality. These climatological -distributions are used in two ways: +tools. It can be set inside the "fcst" and "obs" dictionaries to specify +separate forecast and observation climatology data or once at the top-level +configuration file context to use the same data for both. The "climo_mean" and +"climo_stdev" data define the climatological distribution for each grid point, +assuming normality. These climatological distributions are used in two ways: (1) - To define climatological distribution percentile (CDP) thresholds which - can be used as categorical (cat_thresh), continuous (cnt_thresh), or wind - speed (wind_thresh) thresholds. + To define climatological distribution percentiles thresholds (FCDP and + OCDP) which can be used as categorical (cat_thresh), continuous (cnt_thresh), + or wind speed (wind_thresh) thresholds. (2) To subset matched pairs into climatological bins based on where the - observation value falls within the climatological distribution. See the - "climo_cdf" dictionary. + observation value falls within the observation climatological distribution. + See the "climo_cdf" dictionary. Note that only the observation climatology + data is used for this purpose, not the forecast climatology data. This dictionary is identical to the "climo_mean" dictionary described above but points to files containing climatological standard deviation values @@ -1524,11 +1566,12 @@ over the "climo_mean" setting and then updating the "file_name" entry. climo_cdf --------- -The "climo_cdf" dictionary specifies how the the climatological mean -("climo_mean") and standard deviation ("climo_stdev") data are used to +The "climo_cdf" dictionary specifies how the the observation climatological +mean ("climo_mean") and standard deviation ("climo_stdev") data are used to evaluate model performance relative to where the observation value falls -within the climatological distribution. This dictionary consists of the -following entries: +within the observation climatological distribution. It can be set inside the +"obs" dictionary or at the top-level configuration file context. This +dictionary consists of the following entries: (1) The "cdf_bins" entry defines the climatological bins either as an integer @@ -1542,11 +1585,11 @@ following entries: (4) The "direct_prob" entry may be set to TRUE or FALSE. -MET uses the climatological mean and standard deviation to construct a normal -PDF at each observation location. The total area under the PDF is 1, and the -climatological CDF value is computed as the area of the PDF to the left of -the observation value. Since the CDF is a value between 0 and 1, the CDF -bins must span that same range. +MET uses the observation climatological mean and standard deviation to +construct a normal PDF at each observation location. The total area under the +PDF is 1, and the climatological CDF value is computed as the area of the PDF +to the left of the observation value. Since the CDF is a value between 0 and 1, +the CDF bins must span that same range. When "cdf_bins" is set to an array of floats, they explicitly define the climatological bins. The array must begin with 0.0 and end with 1.0. @@ -1590,20 +1633,21 @@ all pairs into a single climatological bin. climate_data ------------ - -When specifying climatology data for probability forecasts, either supply a -probabilistic "climo_mean" field or non-probabilistic "climo_mean" and -"climo_stdev" fields from which a normal approximation of the climatological -probabilities should be derived. -When "climo_mean" is set to a probability field with a range of [0, 1] and -"climo_stdev" is unset, the MET tools use the "climo_mean" probability values -directly to compute Brier Skill Score (BSS). +When specifying observation climatology data to evaluate probability +forecasts, either supply a probabilistic observation "climo_mean" field or +non-probabilistic "climo_mean" and "climo_stdev" fields from which a normal +approximation of the observation climatological probabilities should be +derived. + +When the observation "climo_mean" is set to a probability field with a range +of [0, 1] and "climo_stdev" is unset, the MET tools use the "climo_mean" +probability values directly to compute Brier Skill Score (BSS). -When "climo_mean" and "climo_stdev" are both set to non-probability fields, -the MET tools use the mean, standard deviation, and observation event -threshold to derive a normal approximation of the climatological -probabilities. +When the observation "climo_mean" and "climo_stdev" are both set to +non-probability fields, the MET tools use the mean, standard deviation, and +observation event threshold to derive a normal approximation of the +observation climatological probabilities. The "direct_prob" option controls the derivation logic. When "direct_prob" is true, the climatological probability is computed directly from the @@ -1686,7 +1730,7 @@ Point-Stat and Ensemble-Stat, the reference time is the forecast valid time. mask --- - + The "mask" entry is a dictionary that specifies the verification masking regions to be used when computing statistics. Each mask defines a geographic extent, and any matched pairs falling inside that area will be @@ -3748,7 +3792,7 @@ obs_prepbufr_map Default mapping for PREPBUFR. Replace input BUFR variable names with GRIB abbreviations in the output. This default map is appended to obs_bufr_map. This should not typically be overridden. This default mapping provides -backward-compatibility for earlier versions of MET which wrote GRIB +backward compatibility for earlier versions of MET which wrote GRIB abbreviations to the output. .. code-block:: none diff --git a/docs/Users_Guide/ensemble-stat.rst b/docs/Users_Guide/ensemble-stat.rst index dd221dd1d2..dc0fe8c248 100644 --- a/docs/Users_Guide/ensemble-stat.rst +++ b/docs/Users_Guide/ensemble-stat.rst @@ -856,30 +856,36 @@ The format of the STAT and ASCII output of the Ensemble-Stat tool are described * - 37 - ENS_i - Value of the ith ensemble member (repeated) - * - Last-7 + * - Last-9 - OBS_QC - Quality control string for the observation - * - Last-6 + * - Last-8 - ENS_MEAN - The unperturbed ensemble mean value - * - Last-5 - - CLIMO_MEAN - - Climatological mean value (named CLIMO prior to met-10.0.0) - * - Last-4 + * - Last-7 + - OBS_CLIMO_MEAN + - Observation climatological mean value (named CLIMO_MEAN prior to met-12.0.0) + * - Last-6 - SPREAD - The spread (standard deviation) of the unperturbed ensemble member values - * - Last-3 + * - Last-5 - ENS_MEAN _OERR - The PERTURBED ensemble mean (e.g. with Observation Error). - * - Last-2 + * - Last-4 - SPREAD_OERR - The spread (standard deviation) of the PERTURBED ensemble member values (e.g. with Observation Error). - * - Last-1 + * - Last-3 - SPREAD_PLUS_OERR - The square root of the sum of the unperturbed ensemble variance and the observation error variance. + * - Last-2 + - OBS_CLIMO_STDEV + - Observation climatological standard deviation value (named CLIMO_STDEV prior to met-12.0.0) + * - Last-1 + - FCST_CLIMO_MEAN + - Forecast climatological mean value * - Last - - CLIMO_STDEV - - Climatological standard deviation value + - FCST_CLIMO_STDEV + - Forecast climatological standard deviation value .. role:: raw-html(raw) :format: html diff --git a/docs/Users_Guide/gen-ens-prod.rst b/docs/Users_Guide/gen-ens-prod.rst index 57da0849cb..7f6d68c6c7 100644 --- a/docs/Users_Guide/gen-ens-prod.rst +++ b/docs/Users_Guide/gen-ens-prod.rst @@ -30,7 +30,9 @@ The Gen-Ens-Prod tool writes the gridded relative frequencies, NEP, and NMEP fie Climatology Data ---------------- -The ensemble relative frequencies derived by Gen-Ens-Prod are computed by applying threshold(s) to the input ensemble member data. Those thresholds can be simple and remain constant over the entire domain (e.g. >0) or can be defined relative to the climatological distribution at each grid point (e.g. >CDP90, for exceeding the 90-th percentile of climatology). When using climatological distribution percentile (CDP) thresholds, the climatological mean and standard deviation must be provided in the configuration file. +The ensemble relative frequencies derived by Gen-Ens-Prod are computed by applying threshold(s) to the input ensemble member data. Those thresholds can be simple and remain constant over the entire domain (e.g. >0) or can be defined relative to the climatological distribution at each grid point (e.g. >OCDP90, for exceeding the 90-th percentile of the observation climatology data provided). + +To use climatological distribution percentile thresholds, users must specify the climatological mean ("climo_mean") and standard deviation ("climo_stdev") entries in the configuration file. With forecast climatology inputs, use forecast climatology distribution percentile thresholds (e.g. >FCDP90). With observation climatology inputs, use observation climatological distribution percentile thresholds instead (e.g. >OCDP90). However, Gen-Ens-Prod cannot actually determine the input climatology data source and both "FCDP" and "OCDP" threshold types will work. Practical Information ===================== @@ -295,7 +297,7 @@ The **ensemble_flag** specifies which derived ensemble fields should be calculat 13. Climatology mean (**climo_mean**) and standard deviation (**climo_stdev**) data regridded to the model domain -14. Climatological Distribution Percentile field for each CDP threshold specified +14. Climatological Distribution Percentile field for each FCDP or OCDP threshold specified gen_ens_prod Output ------------------- diff --git a/docs/Users_Guide/grid-stat.rst b/docs/Users_Guide/grid-stat.rst index 051178cfb1..cac539e497 100644 --- a/docs/Users_Guide/grid-stat.rst +++ b/docs/Users_Guide/grid-stat.rst @@ -451,7 +451,7 @@ _____________________ The **nc_pairs_flag** entry may either be set to a boolean value or a dictionary specifying which fields should be written. Setting it to TRUE indicates the output NetCDF matched pairs file should be created with all available output fields, while setting all to FALSE disables its creation. This is done regardless of if **output_flag** dictionary indicates any statistics should be computed. The **latlon, raw**, and **diff** entries control the creation of output variables for the latitude and longitude, the forecast and observed fields after they have been modified by any user-defined regridding, censoring, and conversion, and the forecast minus observation difference fields, respectively. The **climo, weight**, and **nbrhd** entries control the creation of output variables for the climatological mean and standard deviation fields, the grid area weights applied, and the fractional coverage fields computed for neighborhood verification methods. Setting these entries to TRUE indicates that they should be written, while setting them to FALSE disables their creation. -Setting the **climo_cdp** entry to TRUE enables the creation of an output variable for each climatological distribution percentile (CDP) threshold requested in the configuration file. Note that enabling **nbrhd** output may lead to very large output files. The **gradient** entry controls the creation of output variables for the FCST and OBS gradients in the grid-x and grid-y directions. The **distance_map** entry controls the creation of output variables for the FCST and OBS distance maps for each categorical threshold. The **apply_mask** entry controls whether to create the FCST, OBS, and DIFF output variables for all defined masking regions. Setting this to TRUE will create the FCST, OBS, and DIFF output variables for all defined masking regions. Setting this to FALSE will create the FCST, OBS, and DIFF output variables for only the FULL verification domain. +Setting the **climo_cdp** entry to TRUE enables the creation of an output variable for each climatological distribution percentile (FCDP or OCDP) threshold requested in the configuration file. Note that enabling **nbrhd** output may lead to very large output files. The **gradient** entry controls the creation of output variables for the FCST and OBS gradients in the grid-x and grid-y directions. The **distance_map** entry controls the creation of output variables for the FCST and OBS distance maps for each categorical threshold. The **apply_mask** entry controls whether to create the FCST, OBS, and DIFF output variables for all defined masking regions. Setting this to TRUE will create the FCST, OBS, and DIFF output variables for all defined masking regions. Setting this to FALSE will create the FCST, OBS, and DIFF output variables for only the FULL verification domain. ______________________ diff --git a/docs/Users_Guide/point-stat.rst b/docs/Users_Guide/point-stat.rst index 0f42d54129..6c9849511e 100644 --- a/docs/Users_Guide/point-stat.rst +++ b/docs/Users_Guide/point-stat.rst @@ -1499,14 +1499,20 @@ The first set of header columns are common to all of the output files generated - OBS_QC - Quality control flag for observation * - 35 - - CLIMO_MEAN - - Climatological mean value + - OBS_CLIMO_MEAN + - Observation climatological mean value (named CLIMO_MEAN prior to met-12.0.0) * - 36 - - CLIMO_STDEV - - Climatological standard deviation value + - OBS_CLIMO_STDEV + - Observation climatological standard deviation value (named CLIMO_STDEV prior to met-12.0.0) * - 37 - - CLIMO_CDF - - Climatological cumulative distribution function value + - OBS_CLIMO_CDF + - Observation climatological cumulative distribution function value (named CLIMO_CDF prior to met-12.0.0) + * - 38 + - FCST_CLIMO_MEAN + - Forecast climatological mean value + * - 39 + - FCST_CLIMO_STDEV + - Forecast climatological standard deviation value .. _table_PS_format_info_SEEPS_MPR: diff --git a/docs/Users_Guide/release-notes.rst b/docs/Users_Guide/release-notes.rst index c8f2fb0f2c..95c6c7233b 100644 --- a/docs/Users_Guide/release-notes.rst +++ b/docs/Users_Guide/release-notes.rst @@ -9,6 +9,40 @@ When applicable, release notes are followed by the GitHub issue number which des enhancement, or new feature (`MET GitHub issues `_). Important issues are listed **in bold** for emphasis. +MET Version 12.0.0-beta5 Release Notes (20240710) +------------------------------------------------- + + .. dropdown:: Repository, build, and test + + * Reimplement and enhance the Perl-based (unit.pl) unit test control script in Python (`#2717 `_). + * Update compilation script and configuration files as needed for supported platforms (`#2753 `_). + * Update tag used for the release checksum action (`#2929 `_). + + .. dropdown:: Bugfixes + + * Bugfix (METbaseimage): Fix the environment to correct the ncdump runtime linker error (`METbaseimage#24 `_). + * Bugfix: Fix the Grid-Stat configuration file to support the MET_SEEPS_GRID_CLIMO_NAME option (`#2601 `_). + * **Bugfix: Fix TC-RMW to correct the tangential and radial wind computations** (`#2841 `_). + * Bugfix: Fix Ensemble-Stat's handling of climo data when verifying ensemble-derived probabilities (`#2856 `_). + * **Bugfix: Fix Point2Grid's handling of the -qc option for ADP input files** (`#2867 `_). + * Bugfix: Fix Stat-Analysis errors for jobs using the -dump_row option and the -line_type option with VCNT, RPS, DMAP, or SSIDX (`#2888 `_). + * Bugfix: Fix inconsistent handling of point observation valid times processed through Python embedding (`#2897 `_). + + .. dropdown:: Enhancements + + * **Add new wind direction verification statistics for RMSE, Bias, and MAE** (`#2395 `_). + * Document UGRID configuration options added to Point-Stat and Grid-Stat (`#2748 `_ + * Refine Point-Stat Warning message about fcst/obs level mismatch (`#2795 `_). + * **Add new -ugrid_config command line option for unstructured grid inputs to Grid-Stat and Point-Stat** (`#2842 `_). + * Enhance Point2Grid to support modified quality control settings for smoke/dust AOD data in GOES-16/17 as of April 16, 2024 (`#2853 `_). + * **Enhance Point2Grid to support a wider variety of input tripolar datasets** (`#2857 `_). + * Test NOAA Unstructured grids in MET-12.0.0 (`#2860 `_). + * Enhance Ensemble-Stat and Gen-Ens-Prod to omit warning messages for the MISSING keyword (`#2870 `_). + * Add new Python functionality to convert MET NetCDF observation data to a Pandas DataFrame (`#2781 `_). + * Enhance PCP-Combine to allow missing data (`#2883 `_). + * Enhance TC-Stat to support the -set_hdr job command option (`#2911 `_). + * Refine ERROR messages written by PB2NC (`#2912 `_). + MET Version 12.0.0-beta4 Release Notes (20240417) ------------------------------------------------- diff --git a/docs/conf.py b/docs/conf.py index 3a135308ab..f6ffb456ad 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,11 +20,11 @@ project = 'MET' author = 'UCAR/NCAR, NOAA, CSU/CIRA, and CU/CIRES' author_list = 'Prestopnik, J., H. Soh, L. Goodrich, B. Brown, R. Bullock, J. Halley Gotway, K. Newman, J. Opatz, T. Jensen' -version = '12.0.0-beta4' +version = '12.0.0-beta5' verinfo = version release = f'{version}' release_year = '2024' -release_date = f'{release_year}-04-17' +release_date = f'{release_year}-07-10' copyright = f'{release_year}, {author}' # -- General configuration --------------------------------------------------- diff --git a/internal/test_unit/config/GenEnsProdConfig b/internal/test_unit/config/GenEnsProdConfig index 9a565c2fb4..813272dc14 100644 --- a/internal/test_unit/config/GenEnsProdConfig +++ b/internal/test_unit/config/GenEnsProdConfig @@ -60,17 +60,17 @@ ens = { { name = "UGRD"; level = [ "Z10" ]; - cat_thresh = [ CDP75 ]; + cat_thresh = [ >OCDP75 ]; }, { name = "WIND"; level = [ "Z10" ]; - cat_thresh = [ >=CDP25&&<=CDP75 ]; + cat_thresh = [ >=OCDP25&&<=OCDP75 ]; } ]; } diff --git a/internal/test_unit/config/GridStatConfig_climo_WMO b/internal/test_unit/config/GridStatConfig_climo_WMO index 58f184cde0..739d122295 100644 --- a/internal/test_unit/config/GridStatConfig_climo_WMO +++ b/internal/test_unit/config/GridStatConfig_climo_WMO @@ -64,25 +64,25 @@ fcst = { { name = "UGRD"; level = [ "P500" ]; }, { name = "VGRD"; level = [ "P500" ]; }, { name = "WIND"; level = [ "P500" ]; }, - { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; } + { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; } ]; } obs = { - cnt_thresh = [ NA, =CDP50, ==CDP25 ]; + cnt_thresh = [ NA, =OCDP50, ==OCDP25 ]; field = [ { name = "TMP"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, { name = "UGRD"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, { name = "VGRD"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, { name = "WIND"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, - { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; } + { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; } ]; } diff --git a/internal/test_unit/config/GridStatConfig_gen_ens_prod b/internal/test_unit/config/GridStatConfig_gen_ens_prod index 8893c2d5ff..015ef3e946 100644 --- a/internal/test_unit/config/GridStatConfig_gen_ens_prod +++ b/internal/test_unit/config/GridStatConfig_gen_ens_prod @@ -66,9 +66,9 @@ fcst = { prob = TRUE; field = [ - { name = "UGRD_Z10_ENS_FREQ_ltCDP25"; }, - { name = "UGRD_Z10_ENS_NEP_ltCDP25_NBRHD25"; }, - { name = "UGRD_Z10_ENS_NMEP_ltCDP25_NBRHD25_GAUSSIAN1"; } + { name = "UGRD_Z10_ENS_FREQ_ltOCDP25"; }, + { name = "UGRD_Z10_ENS_NEP_ltOCDP25_NBRHD25"; }, + { name = "UGRD_Z10_ENS_NMEP_ltOCDP25_NBRHD25_GAUSSIAN1"; } ]; } @@ -76,7 +76,7 @@ obs = { name = "UGRD"; level = "Z10"; - cat_thresh = =0.25&&<=0.75 ]; +// desc = "OBS_CLIMO_CDF_IQR"; +// nc_pairs_var_suffix = desc; +// } { mpr_column = [ "ABS(OBS-CLIMO_MEAN)" ]; mpr_thresh = [ <=5 ]; @@ -91,6 +104,7 @@ fcst = { desc = "CLIMO_CDF_IQR"; nc_pairs_var_suffix = desc; } +// MET #2924 End replace ]; } obs = fcst; diff --git a/internal/test_unit/config/PointStatConfig_climo_WMO b/internal/test_unit/config/PointStatConfig_climo_WMO index 1ddc8deb1e..01c3f9b3de 100644 --- a/internal/test_unit/config/PointStatConfig_climo_WMO +++ b/internal/test_unit/config/PointStatConfig_climo_WMO @@ -55,25 +55,25 @@ fcst = { { name = "UGRD"; level = [ "P500" ]; }, { name = "VGRD"; level = [ "P500" ]; }, { name = "WIND"; level = [ "P500" ]; }, - { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; } + { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; } ]; } obs = { - cnt_thresh = [ NA, =CDP50, ==CDP25 ]; + cnt_thresh = [ NA, =OCDP50, ==OCDP25 ]; field = [ { name = "TMP"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, { name = "UGRD"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, { name = "VGRD"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, { name = "WIND"; level = [ "P500" ]; cnt_thresh = [ NA ]; wind_thresh = [ NA ]; }, - { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; }, - { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >CDP90 ]; } + { name = "TMP"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "UGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "VGRD"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; }, + { name = "WIND"; level = [ "P850" ]; cat_thresh = [ >OCDP90 ]; } ]; } diff --git a/internal/test_unit/config/PointStatConfig_mpr_thresh b/internal/test_unit/config/PointStatConfig_mpr_thresh index 5740d4907a..e0d54b9219 100644 --- a/internal/test_unit/config/PointStatConfig_mpr_thresh +++ b/internal/test_unit/config/PointStatConfig_mpr_thresh @@ -68,6 +68,17 @@ fcst = { mpr_thresh = [ <=5 ]; desc = "ABS_OBS_FCST_DIFF"; }, +// MET #2924 Replace this section +// { +// mpr_column = [ "ABS(OBS-OBS_CLIMO_MEAN)" ]; +// mpr_thresh = [ <=5 ]; +// desc = "ABS_OBS_CLIMO_MEAN_DIFF"; +// }, +// { +// mpr_column = [ "OBS_CLIMO_CDF" ]; +// mpr_thresh = [ >=0.25&&<=0.75 ]; +// desc = "OBS_CLIMO_CDF_IQR"; +// } { mpr_column = [ "ABS(OBS-CLIMO_MEAN)" ]; mpr_thresh = [ <=5 ]; @@ -78,6 +89,7 @@ fcst = { mpr_thresh = [ >=0.25&&<=0.75 ]; desc = "CLIMO_CDF_IQR"; } +// MET #2924 End replace ]; } obs = fcst; diff --git a/internal/test_unit/config/SeriesAnalysisConfig_climo b/internal/test_unit/config/SeriesAnalysisConfig_climo index 196c38dafa..3728482541 100644 --- a/internal/test_unit/config/SeriesAnalysisConfig_climo +++ b/internal/test_unit/config/SeriesAnalysisConfig_climo @@ -37,7 +37,7 @@ regrid = { censor_thresh = []; censor_val = []; -cat_thresh = [ >CDP25, >CDP50, >CDP75 ]; +cat_thresh = [ >OCDP25, >OCDP50, >OCDP75 ]; cnt_thresh = [ NA ]; cnt_logic = UNION; @@ -50,7 +50,7 @@ fcst = { ]; } obs = { - cnt_thresh = [ NA, >CDP25&&OCDP25&& \ -lookin &OUTPUT_DIR;/climatology_1.5deg/point_stat_WMO_CLIMO_1.5DEG_120000L_20120409_120000V.stat \ - -job aggregate_stat -line_type MPR -out_line_type CTC -fcst_lev P850 -interp_mthd NEAREST -by FCST_VAR -out_thresh '>CDP90' \ + -job aggregate_stat -line_type MPR -out_line_type CTC -fcst_lev P850 -interp_mthd NEAREST -by FCST_VAR -out_thresh '>OCDP90' \ -out_stat &OUTPUT_DIR;/climatology_1.5deg/stat_analysis_WMO_1.5DEG_MPR_to_CTC_out.stat @@ -82,11 +82,19 @@ OUTPUT_DIR &OUTPUT_DIR;/climatology_1.5deg + \ -lookin &OUTPUT_DIR;/climatology_1.5deg/point_stat_WMO_CLIMO_1.5DEG_120000L_20120409_120000V.stat \ -job filter -line_type MPR -column_thresh CLIMO_CDF 'lt0.1||gt0.9' \ -dump_row &OUTPUT_DIR;/climatology_1.5deg/stat_analysis_WMO_1.5DEG_FILTER_CDF_dump.stat + &OUTPUT_DIR;/climatology_1.5deg/stat_analysis_WMO_1.5DEG_FILTER_CDF_dump.stat diff --git a/src/basic/vx_config/config.tab.cc b/src/basic/vx_config/config.tab.cc index 61513c670b..69427413b0 100644 --- a/src/basic/vx_config/config.tab.cc +++ b/src/basic/vx_config/config.tab.cc @@ -3229,11 +3229,10 @@ Simple_Node * s = new Simple_Node; s->op = op; -if ( (info.perc_index < 0) || (info.perc_index >= n_perc_thresh_infos) ) { +if ( info.ptype == no_perc_thresh_type ) { - mlog << Error - << "\ndo_simple_perc_thresh() -> bad perc_index ... " - << (info.perc_index) << "\n\n"; + mlog << Error << "\ndo_simple_perc_thresh() -> " + << "bad percentile threshold type\n\n"; exit ( 1 ); @@ -3243,7 +3242,7 @@ s->T = bad_data_double; s->PT = info.value; -s->Ptype = perc_thresh_info[info.perc_index].type; +s->Ptype = info.ptype; // // sanity check @@ -3274,7 +3273,7 @@ if ( s->Ptype == perc_thresh_freq_bias && s->PT <= 0 ) { if ( op >= 0 ) { ConcatString cs; - cs << perc_thresh_info[info.perc_index].short_name; + cs << perc_thresh_info_map.at(info.ptype).short_name; cs << info.value; fix_float(cs); @@ -3303,11 +3302,10 @@ Simple_Node * s = new Simple_Node; s->op = op; -if ( (info.perc_index < 0) || (info.perc_index >= n_perc_thresh_infos) ) { +if ( info.ptype == no_perc_thresh_type ) { - mlog << Error - << "\ndo_compound_perc_thresh() -> bad perc_index ... " - << (info.perc_index) << "\n\n"; + mlog << Error << "\ndo_compound_perc_thresh() -> " + << "bad percentile threshold type\n\n"; exit ( 1 ); @@ -3318,7 +3316,7 @@ else s->T = num.d; s->PT = info.value; -s->Ptype = perc_thresh_info[info.perc_index].type; +s->Ptype = info.ptype; // // sanity check @@ -3349,7 +3347,7 @@ if ( s->Ptype == perc_thresh_freq_bias && !is_eq(s->PT, 1.0) ) { if ( op >= 0 ) { ConcatString cs; - cs << perc_thresh_info[info.perc_index].short_name; + cs << perc_thresh_info_map.at(info.ptype).short_name; cs << info.value; fix_float(cs); cs << "(" << number_string << ")"; diff --git a/src/basic/vx_config/config.tab.yy b/src/basic/vx_config/config.tab.yy index dabe56d786..cab56d64af 100644 --- a/src/basic/vx_config/config.tab.yy +++ b/src/basic/vx_config/config.tab.yy @@ -1623,11 +1623,10 @@ Simple_Node * s = new Simple_Node; s->op = op; -if ( (info.perc_index < 0) || (info.perc_index >= n_perc_thresh_infos) ) { +if ( info.ptype == no_perc_thresh_type ) { - mlog << Error - << "\ndo_simple_perc_thresh() -> bad perc_index ... " - << (info.perc_index) << "\n\n"; + mlog << Error << "\ndo_simple_perc_thresh() -> " + << "bad percentile threshold type\n\n"; exit ( 1 ); @@ -1637,7 +1636,7 @@ s->T = bad_data_double; s->PT = info.value; -s->Ptype = perc_thresh_info[info.perc_index].type; +s->Ptype = info.ptype; // // sanity check @@ -1668,7 +1667,7 @@ if ( s->Ptype == perc_thresh_freq_bias && s->PT <= 0 ) { if ( op >= 0 ) { ConcatString cs; - cs << perc_thresh_info[info.perc_index].short_name; + cs << perc_thresh_info_map.at(info.ptype).short_name; cs << info.value; fix_float(cs); @@ -1697,11 +1696,10 @@ Simple_Node * s = new Simple_Node; s->op = op; -if ( (info.perc_index < 0) || (info.perc_index >= n_perc_thresh_infos) ) { +if ( info.ptype == no_perc_thresh_type ) { - mlog << Error - << "\ndo_compound_perc_thresh() -> bad perc_index ... " - << (info.perc_index) << "\n\n"; + mlog << Error << "\ndo_compound_perc_thresh() -> " + << "bad percentile threshold type\n\n"; exit ( 1 ); @@ -1712,7 +1710,7 @@ else s->T = num.d; s->PT = info.value; -s->Ptype = perc_thresh_info[info.perc_index].type; +s->Ptype = info.ptype; // // sanity check @@ -1743,7 +1741,7 @@ if ( s->Ptype == perc_thresh_freq_bias && !is_eq(s->PT, 1.0) ) { if ( op >= 0 ) { ConcatString cs; - cs << perc_thresh_info[info.perc_index].short_name; + cs << perc_thresh_info_map.at(info.ptype).short_name; cs << info.value; fix_float(cs); cs << "(" << number_string << ")"; diff --git a/src/basic/vx_config/config_constants.h b/src/basic/vx_config/config_constants.h index d7f2453689..e1a18aeb1a 100644 --- a/src/basic/vx_config/config_constants.h +++ b/src/basic/vx_config/config_constants.h @@ -723,10 +723,18 @@ static const char conf_key_is_wind_direction[] = "is_wind_direction"; static const char conf_key_is_prob[] = "is_prob"; // -// Climatology parameter key names +// Climatology data parameter key names +// +static const char conf_key_climo_mean_field[] = "climo_mean.field"; +static const char conf_key_fcst_climo_mean_field[] = "fcst.climo_mean.field"; +static const char conf_key_obs_climo_mean_field[] = "obs.climo_mean.field"; +static const char conf_key_climo_stdev_field[] = "climo_stdev.field"; +static const char conf_key_fcst_climo_stdev_field[] = "fcst.climo_stdev.field"; +static const char conf_key_obs_climo_stdev_field[] = "obs.climo_stdev.field"; + +// +// Climatology distribution parameter key names // -static const char conf_key_climo_mean_field[] = "climo_mean.field"; -static const char conf_key_climo_stdev_field[] = "climo_stdev.field"; static const char conf_key_climo_cdf[] = "climo_cdf"; static const char conf_key_cdf_bins[] = "cdf_bins"; static const char conf_key_center_bins[] = "center_bins"; diff --git a/src/basic/vx_config/my_config_scanner.cc b/src/basic/vx_config/my_config_scanner.cc index 13e41a196c..25a5cec234 100644 --- a/src/basic/vx_config/my_config_scanner.cc +++ b/src/basic/vx_config/my_config_scanner.cc @@ -514,7 +514,7 @@ int do_id() { -int j, k; +int j; const char *method_name = "do_id() -> "; Column += m_strlen(configtext); @@ -618,15 +618,7 @@ for (j=0; j " << "unable to parse string \"" << configtext << "\"\n\n"; @@ -1475,10 +1436,6 @@ if ( index < 0 ) { } -configlval.pc_info.perc_index = index; -configlval.pc_info.value = value; - - return SIMPLE_PERC_THRESH; } diff --git a/src/basic/vx_config/threshold.cc b/src/basic/vx_config/threshold.cc index 2bf216e084..bfff7a0089 100644 --- a/src/basic/vx_config/threshold.cc +++ b/src/basic/vx_config/threshold.cc @@ -36,6 +36,12 @@ extern ThreshNode * result; extern bool test_mode; +extern const std::string scp_perc_thresh_type_str("SCP"); + +extern const std::string cdp_perc_thresh_type_str("CDP"); + +static bool print_climo_perc_thresh_log_message = true; + //////////////////////////////////////////////////////////////////////// @@ -49,6 +55,92 @@ return ( t == thresh_le || t == thresh_ge || t == thresh_eq ); } +//////////////////////////////////////////////////////////////////////// + + +bool is_climo_dist_type(PercThreshType t) + +{ + +return ( t == perc_thresh_fcst_climo_dist || + t == perc_thresh_obs_climo_dist ); + +} + + +//////////////////////////////////////////////////////////////////////// + + +bool parse_perc_thresh(const char *str, PC_info *info) + +{ + +bool match = false; + +if ( perc_thresh_info_map.empty() ) return false; + +ConcatString search_cs(str); + +for (auto const& x : perc_thresh_info_map) { + + if ( search_cs.startswith(x.second.short_name.c_str()) && + is_number(str + x.second.short_name.size()) ) { + + if ( info ) { + + info->ptype = x.first; + + info->value = atof(str + x.second.short_name.size()); + + } + + match = true; + + break; + + } + +} + + // + // MET #2924: For backward compatibility support SCP and CDP + // threshold types + // + +if ( !match && + (search_cs.startswith(scp_perc_thresh_type_str.c_str()) || + search_cs.startswith(cdp_perc_thresh_type_str.c_str())) ) { + + if ( print_climo_perc_thresh_log_message ) { + + mlog << Debug(2) << R"(Please replace the deprecated "SCP" and "CDP" )" + << R"(threshold types with "SOCP" and "OCDP", respectively, in the ")" + << str << R"(" threshold string.\n)"; + + print_climo_perc_thresh_log_message = false; + + } + + ConcatString cs; + + if ( search_cs.startswith(scp_perc_thresh_type_str.c_str()) ) { + cs << perc_thresh_info_map.at(perc_thresh_sample_obs_climo).short_name; + cs << str + scp_perc_thresh_type_str.size(); + } + else { + cs << perc_thresh_info_map.at(perc_thresh_obs_climo_dist).short_name; + cs << str + cdp_perc_thresh_type_str.size(); + } + + return parse_perc_thresh(cs.c_str(), info); + +} + +return match; + +} + + //////////////////////////////////////////////////////////////////////// @@ -119,27 +211,15 @@ if ( right_child ) { delete right_child; right_child = nullptr; } //////////////////////////////////////////////////////////////////////// -bool Or_Node::check(double x) const +bool Or_Node::check(double x, const ClimoPntInfo *cpi) const { -return check(x, bad_data_double, bad_data_double); - -} - - -//////////////////////////////////////////////////////////////////////// - - -bool Or_Node::check(double x, double cmn, double csd) const - -{ - -const bool tf_left = left_child->check(x, cmn, csd); +const bool tf_left = left_child->check(x, cpi); if ( tf_left ) return true; -const bool tf_right = right_child->check(x, cmn, csd); +const bool tf_right = right_child->check(x, cpi); return tf_right; @@ -168,13 +248,13 @@ return n; //////////////////////////////////////////////////////////////////////// -double Or_Node::climo_prob() const +double Or_Node::obs_climo_prob() const { if ( !left_child || !right_child ) { - mlog << Error << "\nOr_Node::climo_prob() -> " + mlog << Error << "\nOr_Node::obs_climo_prob() -> " << "node not populated!\n\n"; exit ( 1 ); @@ -182,8 +262,8 @@ if ( !left_child || !right_child ) { } double prob = bad_data_double; -double prob_left = left_child->climo_prob(); -double prob_right = right_child->climo_prob(); +double prob_left = left_child->obs_climo_prob(); +double prob_right = right_child->obs_climo_prob(); if ( !is_bad_data(prob_left) && !is_bad_data(prob_right) ) { @@ -220,21 +300,8 @@ return ( left_child->need_perc() || right_child->need_perc() ); //////////////////////////////////////////////////////////////////////// -void Or_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr) - -{ - -set_perc(fptr, optr, cptr, 0, 0); - -return; - -} - - -//////////////////////////////////////////////////////////////////////// - - -void Or_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr, +void Or_Node::set_perc(const NumArray *fptr, const NumArray *optr, + const NumArray *fcptr, const NumArray *ocptr, const SingleThresh *fthr, const SingleThresh *othr) { @@ -248,8 +315,8 @@ if ( !left_child || !right_child ) { } - left_child->set_perc(fptr, optr, cptr, fthr, othr); -right_child->set_perc(fptr, optr, cptr, fthr, othr); + left_child->set_perc(fptr, optr, fcptr, ocptr, fthr, othr); +right_child->set_perc(fptr, optr, fcptr, ocptr, fthr, othr); return; @@ -340,27 +407,15 @@ if ( right_child ) { delete right_child; right_child = nullptr; } //////////////////////////////////////////////////////////////////////// -bool And_Node::check(double x) const - -{ - -return check(x, bad_data_double, bad_data_double); - -} - - -//////////////////////////////////////////////////////////////////////// - - -bool And_Node::check(double x, double cmn, double csd) const +bool And_Node::check(double x, const ClimoPntInfo *cpi) const { -const bool tf_left = left_child->check(x, cmn, csd); +const bool tf_left = left_child->check(x, cpi); if ( ! tf_left ) return false; -const bool tf_right = right_child->check(x, cmn, csd); +const bool tf_right = right_child->check(x, cpi); return ( tf_left && tf_right ); @@ -389,13 +444,13 @@ return n; //////////////////////////////////////////////////////////////////////// -double And_Node::climo_prob() const +double And_Node::obs_climo_prob() const { if ( !left_child || !right_child ) { - mlog << Error << "\nAnd_Node::climo_prob() -> " + mlog << Error << "\nAnd_Node::obs_climo_prob() -> " << "node not populated!\n\n"; exit ( 1 ); @@ -403,8 +458,8 @@ if ( !left_child || !right_child ) { } double prob = bad_data_double; -double prob_left = left_child->climo_prob(); -double prob_right = right_child->climo_prob(); +double prob_left = left_child->obs_climo_prob(); +double prob_right = right_child->obs_climo_prob(); // // For opposing inequalities, compute the difference in percentiles @@ -459,21 +514,8 @@ return ( left_child->need_perc() || right_child->need_perc() ); //////////////////////////////////////////////////////////////////////// -void And_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr) - -{ - -set_perc(fptr, optr, cptr, 0, 0); - -return; - -} - - -//////////////////////////////////////////////////////////////////////// - - -void And_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr, +void And_Node::set_perc(const NumArray *fptr, const NumArray *optr, + const NumArray *fcptr, const NumArray *ocptr, const SingleThresh *fthr, const SingleThresh *othr) { @@ -487,8 +529,8 @@ if ( !left_child || !right_child ) { } - left_child->set_perc(fptr, optr, cptr, fthr, othr); -right_child->set_perc(fptr, optr, cptr, fthr, othr); + left_child->set_perc(fptr, optr, fcptr, ocptr, fthr, othr); +right_child->set_perc(fptr, optr, fcptr, ocptr, fthr, othr); return; @@ -578,23 +620,11 @@ if ( child ) { delete child; child = nullptr; } //////////////////////////////////////////////////////////////////////// -bool Not_Node::check(double x) const - -{ - -return check(x, bad_data_double, bad_data_double); - -} - - -//////////////////////////////////////////////////////////////////////// - - -bool Not_Node::check(double x, double cmn, double csd) const +bool Not_Node::check(double x, const ClimoPntInfo *cpi) const { -const bool tf = child->check(x, cmn, csd); +const bool tf = child->check(x, cpi); return !tf; @@ -622,12 +652,12 @@ return n; //////////////////////////////////////////////////////////////////////// -double Not_Node::climo_prob() const +double Not_Node::obs_climo_prob() const { double prob = bad_data_double; -double prob_child = child->climo_prob(); +double prob_child = child->obs_climo_prob(); if ( !is_bad_data(prob_child) ) prob = 1.0 - prob_child; @@ -660,23 +690,11 @@ return child->need_perc(); //////////////////////////////////////////////////////////////////////// -void Not_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr) - -{ - -set_perc(fptr, optr, cptr, 0, 0); - -return; - -} - - -//////////////////////////////////////////////////////////////////////// - - -void Not_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr, +void Not_Node::set_perc(const NumArray *fptr, const NumArray *optr, + const NumArray *fcptr, const NumArray *ocptr, const SingleThresh *fthr, const SingleThresh *othr) + { if ( !child ) { @@ -688,7 +706,7 @@ if ( !child ) { } -child->set_perc(fptr, optr, cptr, fthr, othr); +child->set_perc(fptr, optr, fcptr, ocptr, fthr, othr); return; @@ -780,35 +798,44 @@ Simple_Node::~Simple_Node() //////////////////////////////////////////////////////////////////////// -bool Simple_Node::check(double x) const +bool Simple_Node::check(double x, const ClimoPntInfo *cpi) const { -return check(x, bad_data_double, bad_data_double); +if ( op == thresh_na ) return true; -} +double tval; + // + // check climo distribution percentile thresholds + // -//////////////////////////////////////////////////////////////////////// +if ( is_climo_dist_type(Ptype) ) { + + // + // check the pointer + // + if(!cpi) { -bool Simple_Node::check(double x, double cmn, double csd) const + mlog << Error << "\nSimple_Node::check(double, const ClimoPntInfo *) const -> " + << "climatological distribution percentile threshold type requested " + << "with no ClimoPntInfo provided!\n\n"; -{ + exit ( 1 ); -if ( op == thresh_na ) return true; + } -double tval; + double cmn = (Ptype == perc_thresh_fcst_climo_dist ? cpi->fcmn : cpi->ocmn); + double csd = (Ptype == perc_thresh_fcst_climo_dist ? cpi->fcsd : cpi->ocsd); // - // check climo distribution percentile thresholds + // check the climo data // -if ( Ptype == perc_thresh_climo_dist ) { - if(is_bad_data(cmn) || is_bad_data(csd)) { - mlog << Error << "\nSimple_Node::check(double, double, double) const -> " + mlog << Error << "\nSimple_Node::check(double, const ClimoPntInfo *) const -> " << "climatological distribution percentile threshold \"" << s << "\" requested with invalid mean (" << cmn << ") or standard deviation (" << csd << ").\n\n"; @@ -832,7 +859,7 @@ else { if ( Ptype != no_perc_thresh_type && is_bad_data(tval) ) { - mlog << Error << "\nSimple_Node::check(double, double, double) const -> " + mlog << Error << "\nSimple_Node::check(double, const ClimoPntInfo *) const -> " << "percentile threshold \"" << s << "\" used before it was set.\n\n"; @@ -856,7 +883,7 @@ switch ( op ) { case thresh_ne: tf = !eq; break; default: - mlog << Error << "\nSimple_Node::check(double, double, double) const -> " + mlog << Error << "\nSimple_Node::check(double, const ClimoPntInfo *) const -> " << "bad op ... " << op << "\n\n"; exit ( 1 ); @@ -914,24 +941,12 @@ return; } -//////////////////////////////////////////////////////////////////////// - - -void Simple_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr) - -{ - -set_perc(fptr, optr, cptr, 0, 0); - -return; - -} - //////////////////////////////////////////////////////////////////////// -void Simple_Node::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr, +void Simple_Node::set_perc(const NumArray *fptr, const NumArray *optr, + const NumArray *fcptr, const NumArray *ocptr, const SingleThresh *fthr, const SingleThresh *othr) { @@ -946,9 +961,10 @@ bool fbias_fcst = false; // handle sample percentile types // - if ( Ptype == perc_thresh_sample_fcst ) ptr = fptr; -else if ( Ptype == perc_thresh_sample_obs ) ptr = optr; -else if ( Ptype == perc_thresh_sample_climo ) ptr = cptr; + if ( Ptype == perc_thresh_sample_fcst ) ptr = fptr; +else if ( Ptype == perc_thresh_sample_obs ) ptr = optr; +else if ( Ptype == perc_thresh_sample_fcst_climo ) ptr = fcptr; +else if ( Ptype == perc_thresh_sample_obs_climo ) ptr = ocptr; // // handle bias-correction type @@ -960,7 +976,7 @@ else if ( Ptype == perc_thresh_freq_bias ) { mlog << Error << "\nSimple_Node::set_perc() -> " << "not enough information provided to define the " - << perc_thresh_info[Ptype].long_name + << perc_thresh_info_map.at(Ptype).long_name << " threshold \"" << s << "\".\n\n"; exit ( 1 ); @@ -1013,7 +1029,7 @@ else if ( Ptype == perc_thresh_freq_bias ) { mlog << Error << "\nSimple_Node::set_perc() -> " << "unsupported options for computing the " - << perc_thresh_info[Ptype].long_name + << perc_thresh_info_map.at(Ptype).long_name << " threshold \"" << s << "\".\n\n"; exit ( 1 ); @@ -1028,7 +1044,7 @@ else if ( Ptype == perc_thresh_freq_bias ) { mlog << Error << "\nSimple_Node::set_perc() -> " << "unable to compute the percentile for the " - << perc_thresh_info[Ptype].long_name + << perc_thresh_info_map.at(Ptype).long_name << " threshold \"" << s << "\".\n\n"; exit ( 1 ); @@ -1051,7 +1067,7 @@ else { if ( !ptr ) { mlog << Error << "\nSimple_Node::set_perc() -> " - << perc_thresh_info[Ptype].long_name + << perc_thresh_info_map.at(Ptype).long_name << " threshold \"" << s << "\" requested but no data provided.\n\n"; @@ -1086,7 +1102,7 @@ else { if ( data.n() == 0 ) { mlog << Error << "\nSimple_Node::set_perc() -> " - << "can't compute " << perc_thresh_info[Ptype].long_name + << "can't compute " << perc_thresh_info_map.at(Ptype).long_name << " threshold \"" << s << "\" because no valid data was provided.\n\n"; @@ -1216,15 +1232,15 @@ return; //////////////////////////////////////////////////////////////////////// -double Simple_Node::climo_prob() const +double Simple_Node::obs_climo_prob() const { double prob = bad_data_double; -if ( Ptype == perc_thresh_climo_dist ) { +if ( Ptype == perc_thresh_obs_climo_dist ) { - // Climo probability varies based on the threshold type + // Observation climo probability varies based on the threshold type switch ( op ) { case thresh_lt: @@ -1251,9 +1267,9 @@ if ( Ptype == perc_thresh_climo_dist ) { default: - mlog << Error << "\nSimple_Node::climo_prob() -> " - << "cannot convert climatological distribution percentile " - << "threshold to a probability!\n\n"; + mlog << Error << "\nSimple_Node::obs_climo_prob() -> " + << "cannot convert observation climatological distribution " + << "percentile threshold to a probability!\n\n"; exit ( 1 ); @@ -1272,9 +1288,10 @@ bool Simple_Node::need_perc() const { -return ( Ptype == perc_thresh_sample_fcst || - Ptype == perc_thresh_sample_obs || - Ptype == perc_thresh_sample_climo || +return ( Ptype == perc_thresh_sample_fcst || + Ptype == perc_thresh_sample_obs || + Ptype == perc_thresh_sample_fcst_climo || + Ptype == perc_thresh_sample_obs_climo || Ptype == perc_thresh_freq_bias ); } @@ -1496,17 +1513,16 @@ return; //////////////////////////////////////////////////////////////////////// -void SingleThresh::set(double pt, ThreshType ind, int perc_index, double t) +void SingleThresh::set(double pt, ThreshType ind, PercThreshType ptype, double t) { clear(); -if ( (perc_index < 0) || (perc_index >= n_perc_thresh_infos) ) { +if ( ptype == no_perc_thresh_type ) { - mlog << Error - << "\nSingleThresh::set(double pt, ThreshType ind, int perc_index, double t) -> " - << "bad perc_index ... " << perc_index << "\n\n"; + mlog << Error << "\nSingleThresh::set(double, ThreshType, PercThreshType, double) -> " + << "bad percentile threshold type\n\n"; exit ( 1 ); @@ -1515,12 +1531,12 @@ if ( (perc_index < 0) || (perc_index >= n_perc_thresh_infos) ) { Simple_Node * a = new Simple_Node; ConcatString cs; -cs << perc_thresh_info[perc_index].short_name << pt; +cs << perc_thresh_info_map.at(ptype).short_name << pt; if( !is_bad_data(t) ) cs << "(" << t << ")"; a->T = t; a->op = ind; -a->Ptype = perc_thresh_info[perc_index].type; +a->Ptype = ptype; a->PT = pt; a->s << thresh_type_str[ind] << cs; a->abbr_s << thresh_abbr_str[ind] << cs; @@ -1656,27 +1672,15 @@ return false; //////////////////////////////////////////////////////////////////////// -void SingleThresh::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr) - -{ - -set_perc(fptr, optr, cptr, 0, 0); - -return; - -} - - -//////////////////////////////////////////////////////////////////////// - - -void SingleThresh::set_perc(const NumArray *fptr, const NumArray *optr, const NumArray *cptr, +void SingleThresh::set_perc(const NumArray *fptr, const NumArray *optr, + const NumArray *fcptr, const NumArray *ocptr, const SingleThresh *fthr, const SingleThresh *othr) + { if ( node ) { - node->set_perc(fptr, optr, cptr, fthr, othr); + node->set_perc(fptr, optr, fcptr, ocptr, fthr, othr); } @@ -1796,23 +1800,11 @@ return; //////////////////////////////////////////////////////////////////////// -bool SingleThresh::check(double x) const - -{ - -return check(x, bad_data_double, bad_data_double); - -} - - -//////////////////////////////////////////////////////////////////////// - - -bool SingleThresh::check(double x, double cmn, double csd) const +bool SingleThresh::check(double x, const ClimoPntInfo *cpi) const { -return ( node ? node->check(x, cmn, csd) : true ); +return ( node ? node->check(x, cpi) : true ); } diff --git a/src/basic/vx_config/threshold.h b/src/basic/vx_config/threshold.h index 0f39bd3413..c879cfe3c7 100644 --- a/src/basic/vx_config/threshold.h +++ b/src/basic/vx_config/threshold.h @@ -13,6 +13,7 @@ //////////////////////////////////////////////////////////////////////// #include +#include #include #include "concat_string.h" @@ -22,7 +23,6 @@ //////////////////////////////////////////////////////////////////////// - // // Enumeration of thresholding operations // @@ -39,7 +39,6 @@ enum ThreshType { thresh_complex = -2, no_thresh_type = -1 - }; static const int n_thresh_type = 7; @@ -55,82 +54,71 @@ static const char thresh_default_sep[] = ","; extern bool is_inclusive(ThreshType); - //////////////////////////////////////////////////////////////////////// - // // Enumeration of percentile threshold types // enum PercThreshType { - - perc_thresh_user_specified = 0, - perc_thresh_sample_fcst = 1, - perc_thresh_sample_obs = 2, - perc_thresh_sample_climo = 3, - perc_thresh_climo_dist = 4, - perc_thresh_freq_bias = 5, + perc_thresh_user_specified = 0, + perc_thresh_sample_fcst = 1, + perc_thresh_sample_obs = 2, + perc_thresh_sample_fcst_climo = 3, + perc_thresh_sample_obs_climo = 4, + perc_thresh_fcst_climo_dist = 5, + perc_thresh_obs_climo_dist = 6, + perc_thresh_freq_bias = 7, no_perc_thresh_type = -1 - }; - -static const int n_perc_thresh_type = 7; - +extern bool is_climo_dist_type(PercThreshType); struct PercThreshInfo { - - const PercThreshType type; - - const char * const short_name; - - const int short_name_length; - - const char * const long_name; - + const std::string short_name; + const std::string long_name; }; - -static const PercThreshInfo perc_thresh_info [] = { - - { perc_thresh_user_specified, "USP", 3, "USER_SPECIFIED_PERC" }, - - { perc_thresh_sample_fcst, "SFP", 3, "SAMPLE_FCST_PERC" }, - - { perc_thresh_sample_obs, "SOP", 3, "SAMPLE_OBS_PERC" }, - - { perc_thresh_sample_climo, "SCP", 3, "SAMPLE_CLIMO_PERC" }, - - { perc_thresh_climo_dist, "CDP", 3, "CLIMO_DIST_PERC" }, - - { perc_thresh_freq_bias, "FBIAS", 5, "FREQ_BIAS_PERC" }, - +static const std::map perc_thresh_info_map = { + { perc_thresh_user_specified, { "USP", "USER_SPECIFIED_PERC" } }, + { perc_thresh_sample_fcst, { "SFP", "SAMPLE_FCST_PERC" } }, + { perc_thresh_sample_obs, { "SOP", "SAMPLE_OBS_PERC" } }, + { perc_thresh_sample_fcst_climo, { "SFCP", "SAMPLE_FCST_CLIMO_PERC" } }, + { perc_thresh_sample_obs_climo, { "SOCP", "SAMPLE_OBS_CLIMO_PERC" } }, + { perc_thresh_fcst_climo_dist, { "FCDP", "CLIMO_FCST_DIST_PERC" } }, + { perc_thresh_obs_climo_dist, { "OCDP", "CLIMO_OBS_DIST_PERC" } }, + { perc_thresh_freq_bias, { "FBIAS", "FREQ_BIAS_PERC" } }, }; -static const int n_perc_thresh_infos = sizeof(perc_thresh_info)/sizeof(*perc_thresh_info); - static const int perc_thresh_default_precision = 0; static const double perc_thresh_default_tol = 0.05; - struct PC_info { + PercThreshType ptype; + double value; +}; - int perc_index; +extern bool parse_perc_thresh(const char *str, PC_info *info = nullptr); - double value; +struct ClimoPntInfo { + ClimoPntInfo() { clear(); } + ClimoPntInfo(double a, double b, double c, double d) : + fcmn(a), fcsd(b), ocmn(c), ocsd(d) {} + void set(double a, double b, double c, double d) { fcmn = a; fcsd = b; ocmn = c; ocsd = d; } + void clear() { fcmn = fcsd = ocmn = ocsd = bad_data_double; } + double fcmn; + double fcsd; + double ocmn; + double ocsd; }; - //////////////////////////////////////////////////////////////////////// - class SingleThresh; class Simple_Node; - //////////////////////////////////////////////////////////////////////// class ThreshNode { @@ -144,8 +132,7 @@ class ThreshNode { ThreshNode(); virtual ~ThreshNode(); - virtual bool check(double) const = 0; - virtual bool check(double, double, double) const = 0; + virtual bool check(double, const ClimoPntInfo *cpi = nullptr) const = 0; virtual ThreshNode * copy() const = 0; @@ -157,14 +144,14 @@ class ThreshNode { virtual double pvalue() const = 0; - virtual double climo_prob() const = 0; + virtual double obs_climo_prob() const = 0; virtual bool need_perc() const = 0; - virtual void set_perc(const NumArray *, const NumArray *, const NumArray *) = 0; - - virtual void set_perc(const NumArray *, const NumArray *, const NumArray *, - const SingleThresh *, const SingleThresh *) = 0; + virtual void set_perc(const NumArray *, const NumArray *, + const NumArray *, const NumArray *, + const SingleThresh *fthr = nullptr, + const SingleThresh *othr = nullptr) = 0; virtual void multiply_by(const double) = 0; @@ -186,31 +173,30 @@ class Or_Node : public ThreshNode { Or_Node(); ~Or_Node(); - bool check(double) const; - bool check(double, double, double) const; + bool check(double, const ClimoPntInfo *cpi = nullptr) const override; - ThreshNode * copy() const; + ThreshNode * copy() const override; - ThreshType type() const; + ThreshType type() const override; - double value() const; + double value() const override; - PercThreshType ptype() const; + PercThreshType ptype() const override; - double pvalue() const; + double pvalue() const override; - double climo_prob() const; + double obs_climo_prob() const override; - bool need_perc() const; + bool need_perc() const override; - void set_perc(const NumArray *, const NumArray *, const NumArray *); + void set_perc(const NumArray *, const NumArray *, + const NumArray *, const NumArray *, + const SingleThresh *fthr = nullptr, + const SingleThresh *othr = nullptr) override; - void set_perc(const NumArray *, const NumArray *, const NumArray *, - const SingleThresh *, const SingleThresh *); + void multiply_by(const double) override; - void multiply_by(const double); - - void get_simple_nodes(std::vector &) const; + void get_simple_nodes(std::vector &) const override; ThreshNode * left_child; ThreshNode * right_child; @@ -237,31 +223,30 @@ class And_Node : public ThreshNode { And_Node(); ~And_Node(); - bool check(double) const; - bool check(double, double, double) const; - - ThreshType type() const; + bool check(double, const ClimoPntInfo *cpi = nullptr) const override; - double value() const; + ThreshType type() const override; - PercThreshType ptype() const; + double value() const override; - double pvalue() const; + PercThreshType ptype() const override; - double climo_prob() const; + double pvalue() const override; - bool need_perc() const; + double obs_climo_prob() const override; - void set_perc(const NumArray *, const NumArray *, const NumArray *); + bool need_perc() const override; - void set_perc(const NumArray *, const NumArray *, const NumArray *, - const SingleThresh *, const SingleThresh *); + void set_perc(const NumArray *, const NumArray *, + const NumArray *, const NumArray *, + const SingleThresh *fthr = nullptr, + const SingleThresh *othr = nullptr) override; - void multiply_by(const double); + void multiply_by(const double) override; - void get_simple_nodes(std::vector &) const; + void get_simple_nodes(std::vector &) const override; - ThreshNode * copy() const; + ThreshNode * copy() const override; ThreshNode * left_child; ThreshNode * right_child; @@ -288,31 +273,30 @@ class Not_Node : public ThreshNode { Not_Node(); ~Not_Node(); - bool check(double) const; - bool check(double, double, double) const; + bool check(double, const ClimoPntInfo *cpi = nullptr) const override; - ThreshType type() const; + ThreshType type() const override; - double value() const; + double value() const override; - PercThreshType ptype() const; + PercThreshType ptype() const override; - double pvalue() const; + double pvalue() const override; - double climo_prob() const; + double obs_climo_prob() const override; - bool need_perc() const; + bool need_perc() const override; - void set_perc(const NumArray *, const NumArray *, const NumArray *); + void set_perc(const NumArray *, const NumArray *, + const NumArray *, const NumArray *, + const SingleThresh *fthr = nullptr, + const SingleThresh *othr = nullptr) override; - void set_perc(const NumArray *, const NumArray *, const NumArray *, - const SingleThresh *, const SingleThresh *); + void multiply_by(const double) override; - void multiply_by(const double); + void get_simple_nodes(std::vector &) const override; - void get_simple_nodes(std::vector &) const; - - ThreshNode * copy() const; + ThreshNode * copy() const override; ThreshNode * child; @@ -354,39 +338,38 @@ class Simple_Node : public ThreshNode { void set_na(); - void set_perc(const NumArray *, const NumArray *, const NumArray *); - - void set_perc(const NumArray *, const NumArray *, const NumArray *, - const SingleThresh *, const SingleThresh *); + void set_perc(const NumArray *, const NumArray *, + const NumArray *, const NumArray *, + const SingleThresh *fthr = nullptr, + const SingleThresh *othr = nullptr) override; // // get stuff // - ThreshType type() const; + ThreshType type() const override; - double value() const; + double value() const override; - PercThreshType ptype() const; + PercThreshType ptype() const override; - double pvalue() const; + double pvalue() const override; - double climo_prob() const; + double obs_climo_prob() const override; - bool need_perc() const; + bool need_perc() const override; - void get_simple_nodes(std::vector &) const; + void get_simple_nodes(std::vector &) const override; // // do stuff // - ThreshNode * copy() const; + ThreshNode * copy() const override; - bool check(double) const; - bool check(double, double, double) const; + bool check(double, const ClimoPntInfo *cpi = nullptr) const override; - void multiply_by(const double); + void multiply_by(const double) override; }; @@ -430,14 +413,15 @@ class SingleThresh { void clear(); void set(double t, ThreshType); - void set(double pt, ThreshType, int perc_index, double t = bad_data_double); + void set(double pt, ThreshType, PercThreshType, double t = bad_data_double); void set(const ThreshNode *); void set(const char *); bool need_perc() const; - void set_perc(const NumArray *, const NumArray *, const NumArray *); - void set_perc(const NumArray *, const NumArray *, const NumArray *, - const SingleThresh *, const SingleThresh *); + void set_perc(const NumArray *, const NumArray *, + const NumArray *, const NumArray *, + const SingleThresh *fthr = nullptr, + const SingleThresh *othr = nullptr); void set_na(); @@ -445,7 +429,7 @@ class SingleThresh { double get_value() const; PercThreshType get_ptype() const; double get_pvalue() const; - double get_climo_prob() const; + double get_obs_climo_prob() const; void get_simple_nodes(std::vector &) const; void multiply_by(const double); @@ -453,8 +437,7 @@ class SingleThresh { ConcatString get_str(int precision = thresh_default_precision) const; ConcatString get_abbr_str(int precision = thresh_default_precision) const; - bool check(double) const; - bool check(double, double, double) const; + bool check(double, const ClimoPntInfo *cpi = nullptr) const; }; @@ -462,11 +445,11 @@ class SingleThresh { //////////////////////////////////////////////////////////////////////// -inline ThreshType SingleThresh::get_type() const { return ( node ? node->type() : thresh_na ); } -inline double SingleThresh::get_value() const { return ( node ? node->value() : bad_data_double ); } -inline PercThreshType SingleThresh::get_ptype() const { return ( node ? node->ptype() : no_perc_thresh_type ); } -inline double SingleThresh::get_pvalue() const { return ( node ? node->pvalue() : bad_data_double ); } -inline double SingleThresh::get_climo_prob() const { return ( node ? node->climo_prob() : bad_data_double ); } +inline ThreshType SingleThresh::get_type() const { return ( node ? node->type() : thresh_na ); } +inline double SingleThresh::get_value() const { return ( node ? node->value() : bad_data_double ); } +inline PercThreshType SingleThresh::get_ptype() const { return ( node ? node->ptype() : no_perc_thresh_type ); } +inline double SingleThresh::get_pvalue() const { return ( node ? node->pvalue() : bad_data_double ); } +inline double SingleThresh::get_obs_climo_prob() const { return ( node ? node->obs_climo_prob() : bad_data_double ); } //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_log/logger.cc b/src/basic/vx_log/logger.cc index 8535cca6e4..c3d6ee1b48 100644 --- a/src/basic/vx_log/logger.cc +++ b/src/basic/vx_log/logger.cc @@ -627,7 +627,6 @@ Logger & Logger::operator<<(const string s) // // put the next character into the ConcatString msg // - // tmp[0] = s[i]; msg.add(s[i]); if (s[i] == '\n') @@ -636,7 +635,7 @@ Logger & Logger::operator<<(const string s) // this was a newline, so // put msg into the StringArray messages // - messages.add((string)msg); + messages.add((string)msg); // // clear msg, and continue checking s @@ -655,7 +654,7 @@ Logger & Logger::operator<<(const string s) { if (s[s.length() - 1] != '\n') { - messages.add((string)msg); + messages.add((string)msg); msg.clear(); } diff --git a/src/basic/vx_util/CircularTemplate.cc b/src/basic/vx_util/CircularTemplate.cc index 593b1eedec..4fb91c1f85 100644 --- a/src/basic/vx_util/CircularTemplate.cc +++ b/src/basic/vx_util/CircularTemplate.cc @@ -41,12 +41,12 @@ CircularTemplate::CircularTemplate(const int width, bool wrap_lon) : _wrapLon = wrap_lon; - // width of 2 is not supported - if (width == 2) { + // width of 2 is not supported + if (width == 2) { mlog << Error << "\nCircularTemplate::CircularTemplate() -> " << "unsupported width of " << width << " for circles.\n\n"; - exit(1); - } + exit(1); + } bool evenWidth = ((width % 2) == 0); @@ -59,9 +59,9 @@ CircularTemplate::CircularTemplate(const int width, bool wrap_lon) : // offset is within the circle. double radius = (width-1)/2.0; - + // Create the offsets list. - + // Need to increase the area we look at if the width is even, because // some valid offset points will actually be farther from the reference point // than the radius, because the reference point is offset from the true @@ -71,31 +71,31 @@ CircularTemplate::CircularTemplate(const int width, bool wrap_lon) : if(evenWidth) maxOffset++; int minOffset = static_cast(floor(-1 * radius)); - + for(int y = minOffset; y <= maxOffset; y++) { for(int x = minOffset; x <= maxOffset; x++) { double double_x = (double)x; double double_y = (double)y; - if(evenWidth) { + if(evenWidth) { // if width is even, the reference point is actually shifted 1/2 a grid spacing down and to the left, // from the true center of the circle. - // - // so when we calculate distance, we need to subtract .5 so that the distance reflects the distance from the center - // of the circle, instead of the distance from the reference. - // - // for example - a circle with width == 4. The reference point is the lower left corner of the center square. - // the point directly below that is at (0,-1), but it's actually (-.5, -1.5) from the center of the circle. - // - // another example - same circle. The point directly to the right of the reference point is (1,0), but it's - // actually (.5,-.5) from the center. - + // + // so when we calculate distance, we need to subtract .5 so that the distance reflects the distance from the center + // of the circle, instead of the distance from the reference. + // + // for example - a circle with width == 4. The reference point is the lower left corner of the center square. + // the point directly below that is at (0,-1), but it's actually (-.5, -1.5) from the center of the circle. + // + // another example - same circle. The point directly to the right of the reference point is (1,0), but it's + // actually (.5,-.5) from the center. + double_x -= 0.5; double_y -= 0.5; } double distance= sqrt((double_x * double_x) + (double_y * double_y)); - if(distance <= radius) _addOffset(x, y); + if(distance <= radius) _addOffset(x, y); } // end for x } // end for y diff --git a/src/basic/vx_util/GridPoint.cc b/src/basic/vx_util/GridPoint.cc index 8ddd5f31b9..009cf86bf4 100644 --- a/src/basic/vx_util/GridPoint.cc +++ b/src/basic/vx_util/GridPoint.cc @@ -64,7 +64,6 @@ GridPoint::~GridPoint(void) void GridPoint::rotate(const double angle) { - //const double M_PI = 3.14159265358979323846; double angle_rad = angle * M_PI / 180.0; double cosa = cos(angle_rad); double sina = sin(angle_rad); diff --git a/src/basic/vx_util/data_plane.cc b/src/basic/vx_util/data_plane.cc index 2aa218b6a0..e3edf3d0cf 100644 --- a/src/basic/vx_util/data_plane.cc +++ b/src/basic/vx_util/data_plane.cc @@ -518,7 +518,7 @@ void DataPlane::censor(const ThreshArray &censor_thresh, for(i=0; iis_empty() && !csd->is_empty()) use_climo = true; - } + if(fcmn && !fcmn->is_empty() && + fcsd && !fcsd->is_empty() && + ocmn && !ocmn->is_empty() && + ocsd && !ocsd->is_empty()) use_climo = true; // Check climatology dimensions if(use_climo) { // Check dimensions - if(cmn->nx() != dp.nx() || cmn->ny() != dp.ny()) { + if(fcmn->nx() != dp.nx() || fcmn->ny() != dp.ny()) { + mlog << Error << "\nfractional_coverage() -> " + << "forecast climatology mean dimension (" + << fcmn->nx() << ", " << fcmn->ny() + << ") does not match the data dimenion (" + << dp.nx() << ", " << dp.ny() << ")!\n\n"; + exit(1); + } + if(fcsd->nx() != dp.nx() || fcsd->ny() != dp.ny()) { + mlog << Error << "\nfractional_coverage() -> " + << "forecast climatology standard deviation dimension (" + << fcsd->nx() << ", " << fcsd->ny() + << ") does not match the data dimenion (" + << dp.nx() << ", " << dp.ny() << ")!\n\n"; + exit(1); + } + if(ocmn->nx() != dp.nx() || ocmn->ny() != dp.ny()) { mlog << Error << "\nfractional_coverage() -> " - << "climatology mean dimension (" - << cmn->nx() << ", " << cmn->ny() + << "observation climatology mean dimension (" + << ocmn->nx() << ", " << ocmn->ny() << ") does not match the data dimenion (" << dp.nx() << ", " << dp.ny() << ")!\n\n"; exit(1); } - if(csd->nx() != dp.nx() || csd->ny() != dp.ny()) { + if(ocsd->nx() != dp.nx() || ocsd->ny() != dp.ny()) { mlog << Error << "\nfractional_coverage() -> " - << "climatology standard deviation dimension (" - << csd->nx() << ", " << csd->ny() + << "observation climatology standard deviation dimension (" + << ocsd->nx() << ", " << ocsd->ny() << ") does not match the data dimenion (" << dp.nx() << ", " << dp.ny() << ")!\n\n"; exit(1); @@ -255,7 +274,7 @@ void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, #pragma omp parallel default(none) \ shared(mlog, dp, frac_dp, shape, width, wrap_lon, t) \ - shared(use_climo, cmn, csd, vld_t, bad) \ + shared(use_climo, fcmn, fcsd, ocmn, ocsd, vld_t, bad)\ private(x, y, n_vld, n_thr, gp, v) { @@ -293,9 +312,14 @@ void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, gp = gt->getNextInGrid()) { if(is_bad_data(v = dp.get(gp->x, gp->y))) continue; n_vld++; - if(t.check(v, - (use_climo ? cmn->get(gp->x, gp->y) : bad), - (use_climo ? csd->get(gp->x, gp->y) : bad))) n_thr++; + ClimoPntInfo cpi; + if(use_climo) { + cpi.set(fcmn->get(gp->x, gp->y), + fcsd->get(gp->x, gp->y), + ocmn->get(gp->x, gp->y), + ocsd->get(gp->x, gp->y)); + } + if(t.check(v, &cpi)) n_thr++; } } // Subtract off the bottom edge, shift up, and add the top. @@ -307,9 +331,14 @@ void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, gp = gt->getNextInBotEdge()) { if(is_bad_data(v = dp.get(gp->x, gp->y))) continue; n_vld--; - if(t.check(v, - (use_climo ? cmn->get(gp->x, gp->y) : bad), - (use_climo ? csd->get(gp->x, gp->y) : bad))) n_thr--; + ClimoPntInfo cpi; + if(use_climo) { + cpi.set(fcmn->get(gp->x, gp->y), + fcsd->get(gp->x, gp->y), + ocmn->get(gp->x, gp->y), + ocsd->get(gp->x, gp->y)); + } + if(t.check(v, &cpi)) n_thr--; } // Increment Y @@ -321,9 +350,14 @@ void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, gp = gt->getNextInTopEdge()) { if(is_bad_data(v = dp.get(gp->x, gp->y))) continue; n_vld++; - if(t.check(v, - (use_climo ? cmn->get(gp->x, gp->y) : bad), - (use_climo ? csd->get(gp->x, gp->y) : bad))) n_thr++; + ClimoPntInfo cpi; + if(use_climo) { + cpi.set(fcmn->get(gp->x, gp->y), + fcsd->get(gp->x, gp->y), + ocmn->get(gp->x, gp->y), + ocsd->get(gp->x, gp->y)); + } + if(t.check(v, &cpi)) n_thr++; } } diff --git a/src/basic/vx_util/data_plane_util.h b/src/basic/vx_util/data_plane_util.h index 7f0b9b27a7..0cb74224ad 100644 --- a/src/basic/vx_util/data_plane_util.h +++ b/src/basic/vx_util/data_plane_util.h @@ -53,7 +53,9 @@ extern DataPlane smooth_field(const DataPlane &dp, extern void fractional_coverage(const DataPlane &dp, DataPlane &frac_dp, int width, GridTemplateFactory::GridTemplates shape, bool wrap_lon, SingleThresh t, - const DataPlane *cmn, const DataPlane *csd, double vld_t); + const DataPlane *fcmn, const DataPlane *fcsd, + const DataPlane *ocmn, const DataPlane *ocsd, + double vld_t); extern void apply_mask(const DataPlane &, const MaskPlane &, NumArray &); extern void apply_mask(DataPlane &, const MaskPlane &); diff --git a/src/basic/vx_util/interp_util.cc b/src/basic/vx_util/interp_util.cc index b5cb88c412..4bf797c2c6 100644 --- a/src/basic/vx_util/interp_util.cc +++ b/src/basic/vx_util/interp_util.cc @@ -704,7 +704,7 @@ double interp_geog_match(const DataPlane &dp, const GridTemplate >, //////////////////////////////////////////////////////////////////////// double interp_nbrhd(const DataPlane &dp, const GridTemplate >, int x, int y, - double t, const SingleThresh *st, double cmn, double csd, + double t, const SingleThresh *st, const ClimoPntInfo *cpi, const MaskPlane *mp) { int count, count_thr; @@ -723,7 +723,7 @@ double interp_nbrhd(const DataPlane &dp, const GridTemplate >, int x, int y, if(is_bad_data(data)) continue; count++; - if(st->check(data, cmn, csd)) count_thr++; + if(st->check(data, cpi)) count_thr++; } // Check whether enough valid grid points were found @@ -1099,8 +1099,8 @@ double compute_horz_interp(const DataPlane &dp, const GridTemplateFactory::GridTemplates shape, bool wrap_lon, double interp_thresh, const SingleThresh *cat_thresh) { - return compute_horz_interp(dp, obs_x, obs_y, obs_v, bad_data_double, - bad_data_double, mthd, width, shape, wrap_lon, + return compute_horz_interp(dp, obs_x, obs_y, obs_v, nullptr, + mthd, width, shape, wrap_lon, interp_thresh, cat_thresh); } @@ -1108,7 +1108,7 @@ double compute_horz_interp(const DataPlane &dp, double compute_horz_interp(const DataPlane &dp, double obs_x, double obs_y, - double obs_v, double cmn, double csd, + double obs_v, const ClimoPntInfo *cpi, const InterpMthd mthd, const int width, const GridTemplateFactory::GridTemplates shape, bool wrap_lon, double interp_thresh, @@ -1157,7 +1157,7 @@ double compute_horz_interp(const DataPlane &dp, case InterpMthd::Nbrhd: // Neighborhood fractional coverage v = interp_nbrhd(dp, *gt, x, y, - interp_thresh, cat_thresh, cmn, csd); + interp_thresh, cat_thresh, cpi); break; case InterpMthd::Bilin: // Bilinear interpolation diff --git a/src/basic/vx_util/interp_util.h b/src/basic/vx_util/interp_util.h index e8f30975de..9e5561eb63 100644 --- a/src/basic/vx_util/interp_util.h +++ b/src/basic/vx_util/interp_util.h @@ -64,10 +64,10 @@ extern NumArray interp_points (const DataPlane &dp, const GridTemplate >, dou // GridTemplate version takes center x/y extern NumArray interp_points (const DataPlane &dp, const GridTemplate >, int x, int y); -extern double interp_min (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = 0); -extern double interp_max (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = 0); -extern double interp_median (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = 0); -extern double interp_uw_mean (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = 0); +extern double interp_min (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = nullptr); +extern double interp_max (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = nullptr); +extern double interp_median (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = nullptr); +extern double interp_uw_mean (const DataPlane &dp, const GridTemplate >, int x, int y, double t, const MaskPlane *mp = nullptr); // Non-GridTemplate version takes lower-left corner x/y extern double interp_min_ll (const DataPlane &dp, int x_ll, int y_ll, int w, double t); @@ -76,19 +76,19 @@ extern double interp_median_ll (const DataPlane &dp, int x_ll, int y_ll, int extern double interp_uw_mean_ll (const DataPlane &dp, int x_ll, int y_ll, int w, double t); // GridTemplate version takes center x/y -extern double interp_dw_mean (const DataPlane &, const GridTemplate >, double obs_x, double obs_y, int i_pow, double t, const MaskPlane *mp = 0); -extern double interp_ls_fit (const DataPlane &, const GridTemplate >, double obs_x, double obs_y, double t, const MaskPlane *mp = 0); +extern double interp_dw_mean (const DataPlane &, const GridTemplate >, double obs_x, double obs_y, int i_pow, double t, const MaskPlane *mp = nullptr); +extern double interp_ls_fit (const DataPlane &, const GridTemplate >, double obs_x, double obs_y, double t, const MaskPlane *mp = nullptr); extern void interp_gaussian_dp(DataPlane &, const GaussianInfo &, double t); extern double interp_gaussian (const DataPlane &, const DataPlane &, double obs_x, double obs_y, int max_r, double t); -extern double interp_geog_match(const DataPlane &, const GridTemplate >, double obs_x, double obs_y, double obs_v, const MaskPlane *mp = 0); +extern double interp_geog_match(const DataPlane &, const GridTemplate >, double obs_x, double obs_y, double obs_v, const MaskPlane *mp = nullptr); extern double interp_nbrhd (const DataPlane &, const GridTemplate >, int x, int y, double t, const SingleThresh *, - double cmn, double csd, const MaskPlane *mp = 0); -extern double interp_bilin (const DataPlane &, bool wrap_lon, double obs_x, double obs_y, const MaskPlane *mp = 0); -extern double interp_xy (const DataPlane &, bool wrap_lon, int x, int y, const MaskPlane *mp = 0); + const ClimoPntInfo *, const MaskPlane *mp = nullptr); +extern double interp_bilin (const DataPlane &, bool wrap_lon, double obs_x, double obs_y, const MaskPlane *mp = nullptr); +extern double interp_xy (const DataPlane &, bool wrap_lon, int x, int y, const MaskPlane *mp = nullptr); -extern double interp_best (const DataPlane &dp, const GridTemplate >, int x, int y, double obs_v, double t, const MaskPlane *mp = 0); +extern double interp_best (const DataPlane &dp, const GridTemplate >, int x, int y, double obs_v, double t, const MaskPlane *mp = nullptr); extern void get_xy_ll (double x, double y, int w, int h, int &x_ll, int &y_ll); @@ -115,15 +115,15 @@ extern double compute_horz_interp(const DataPlane &dp, const InterpMthd mthd, const int width, const GridTemplateFactory::GridTemplates shape, bool wrap_lon, double interp_thresh, - const SingleThresh *cat_thresh = 0); + const SingleThresh *cat_thresh = nullptr); extern double compute_horz_interp(const DataPlane &dp, double obs_x, double obs_y, - double obs_v, double cmn, double csd, + double obs_v, const ClimoPntInfo *, const InterpMthd mthd, const int width, const GridTemplateFactory::GridTemplates shape, bool wrap_lon, double interp_thresh, - const SingleThresh *cat_thresh = 0); + const SingleThresh *cat_thresh = nullptr); extern double compute_vert_pinterp(double, double, double, double, double); extern double compute_vert_zinterp(double, double, double, double, double); diff --git a/src/basic/vx_util/memory.cc b/src/basic/vx_util/memory.cc index 621b3be0bb..297902685b 100644 --- a/src/basic/vx_util/memory.cc +++ b/src/basic/vx_util/memory.cc @@ -1,4 +1,4 @@ -// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* +/// *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* // ** Copyright UCAR (c) 1992 - 2024 // ** University Corporation for Atmospheric Research (UCAR) // ** National Center for Atmospheric Research (NCAR) @@ -33,7 +33,7 @@ void oom() { void oom_grib2() { mlog << Error << "\nOut of memory reading GRIB2 data! Exiting!\n" - << "Check that MET and the GRIB2C library were compiled " + << "Check that MET and the GRIB2C library were compiled " << "consistently, either with or without the -D__64BIT__ " << "flag.\n\n"; exit(1); diff --git a/src/basic/vx_util/polyline.cc b/src/basic/vx_util/polyline.cc index 5d96776544..03c38ae00e 100644 --- a/src/basic/vx_util/polyline.cc +++ b/src/basic/vx_util/polyline.cc @@ -190,7 +190,7 @@ void Polyline::extend_points(int n) { if(!u || !v) { mlog << Error << "\nPolyline::extend_points(int) -> " - << "memory allocation error 1" << "\n\n"; + << "memory allocation error 1" << "\n\n"; exit(1); } @@ -303,7 +303,7 @@ double Polyline::angle() const { if(n_points < 3) { mlog << Error << "\nPolyline::angle() -> " - << "not enough points!\n\n"; + << "not enough points!\n\n"; exit(1); } diff --git a/src/basic/vx_util/stat_column_defs.h b/src/basic/vx_util/stat_column_defs.h index 9a92cf0970..5ae3a64b1a 100644 --- a/src/basic/vx_util/stat_column_defs.h +++ b/src/basic/vx_util/stat_column_defs.h @@ -197,13 +197,26 @@ static const char * const eclv_columns [] = { "N_PNT", "CL_", "VALUE_" }; +/* MET #2924 Replace this change static const char * const mpr_columns [] = { - "TOTAL", "INDEX", "OBS_SID", - "OBS_LAT", "OBS_LON", "OBS_LVL", - "OBS_ELV", "FCST", "OBS", - "OBS_QC", "CLIMO_MEAN", "CLIMO_STDEV", - "CLIMO_CDF" + "TOTAL", "INDEX", "OBS_SID", + "OBS_LAT", "OBS_LON", "OBS_LVL", + "OBS_ELV", "FCST", "OBS", + "OBS_QC", + "OBS_CLIMO_MEAN", "OBS_CLIMO_STDEV", "OBS_CLIMO_CDF", + "FCST_CLIMO_MEAN", "FCST_CLIMO_STDEV" }; +*/ + +static const char * const mpr_columns [] = { + "TOTAL", "INDEX", "OBS_SID", + "OBS_LAT", "OBS_LON", "OBS_LVL", + "OBS_ELV", "FCST", "OBS", + "OBS_QC", + "CLIMO_MEAN", "CLIMO_STDEV", "CLIMO_CDF" +}; + +// MET #2924 End replace static const char * const nbrctc_columns [] = { "TOTAL", "FY_OY", "FY_ON", @@ -295,15 +308,30 @@ static const char * const phist_columns [] = { "BIN_" }; +/* MET #2924 Replace this section static const char * const orank_columns [] = { - "TOTAL", "INDEX", "OBS_SID", - "OBS_LAT", "OBS_LON", "OBS_LVL", - "OBS_ELV", "OBS", "PIT", - "RANK", "N_ENS_VLD", "N_ENS", - "ENS_", "OBS_QC", "ENS_MEAN", - "CLIMO_MEAN", "SPREAD", "ENS_MEAN_OERR", - "SPREAD_OERR", "SPREAD_PLUS_OERR", "CLIMO_STDEV" + "TOTAL", "INDEX", "OBS_SID", + "OBS_LAT", "OBS_LON", "OBS_LVL", + "OBS_ELV", "OBS", "PIT", + "RANK", "N_ENS_VLD", "N_ENS", + "ENS_", "OBS_QC", "ENS_MEAN", + "OBS_CLIMO_MEAN", "SPREAD", "ENS_MEAN_OERR", + "SPREAD_OERR", "SPREAD_PLUS_OERR", "OBS_CLIMO_STDEV", + "FCST_CLIMO_MEAN", "FCST_CLIMO_STDEV" }; +*/ + +static const char * const orank_columns [] = { + "TOTAL", "INDEX", "OBS_SID", + "OBS_LAT", "OBS_LON", "OBS_LVL", + "OBS_ELV", "OBS", "PIT", + "RANK", "N_ENS_VLD", "N_ENS", + "ENS_", "OBS_QC", "ENS_MEAN", + "CLIMO_MEAN", "SPREAD", "ENS_MEAN_OERR", + "SPREAD_OERR", "SPREAD_PLUS_OERR", "CLIMO_STDEV" +}; + +// MET #2924 End replace static const char * const ssvar_columns [] = { "TOTAL", "N_BIN", "BIN_i", @@ -456,7 +484,7 @@ inline int get_n_eclv_columns (int n) { return(4 + 2*n); } // inline int get_n_rhist_columns (int n) { return(2 + n); } // n = N_RANK inline int get_n_phist_columns (int n) { return(3 + n); } // n = N_BINS inline int get_n_relp_columns (int n) { return(2 + n); } // n = N_ENS -inline int get_n_orank_columns (int n) { return(20 + n); } // n = N_ENS +inline int get_n_orank_columns (int n) { return(22 + n); } // n = N_ENS //////////////////////////////////////////////////////////////////////// diff --git a/src/basic/vx_util/thresh_array.cc b/src/basic/vx_util/thresh_array.cc index 4a0b421ae1..993857678f 100644 --- a/src/basic/vx_util/thresh_array.cc +++ b/src/basic/vx_util/thresh_array.cc @@ -291,9 +291,9 @@ void ThreshArray::parse_thresh_str(const char *thresh_str) { //////////////////////////////////////////////////////////////////////// int ThreshArray::has(const SingleThresh &st) const { - int index, status; + int index; - status = has(st, index); + int status = has(st, index); return status; } @@ -301,13 +301,12 @@ int ThreshArray::has(const SingleThresh &st) const { //////////////////////////////////////////////////////////////////////// int ThreshArray::has(const SingleThresh &st, int & index) const { - int j; index = -1; if(Nelements == 0) return 0; - for(j=0; j t[i+1].get_value() || t[i].get_type() != t[i+1].get_type() || @@ -386,34 +382,29 @@ void ThreshArray::check_bin_thresh() const { //////////////////////////////////////////////////////////////////////// -int ThreshArray::check_bins(double v) const { - return check_bins(v, bad_data_double, bad_data_double); -} - -//////////////////////////////////////////////////////////////////////// - -int ThreshArray::check_bins(double v, double mn, double sd) const { +int ThreshArray::check_bins(double v, const ClimoPntInfo *cpi) const { int i, bin; // Check for bad data or no thresholds if(is_bad_data(v) || Nelements == 0) return bad_data_int; - // For < and <=, check thresholds left to right. - if(t[0].get_type() == thresh_lt || t[0].get_type() == thresh_le) { + // For < and <=, check thresholds left to right + if(t[0].get_type() == thresh_lt || + t[0].get_type() == thresh_le) { for(i=0, bin=-1; i and >=, check thresholds right to left. + // For > and >=, check thresholds right to left else { for(i=Nelements-1, bin=-1; i>=0; i--) { - if(t[i].check(v, mn, sd)) { + if(t[i].check(v, cpi)) { bin = i+1; break; } @@ -421,26 +412,19 @@ int ThreshArray::check_bins(double v, double mn, double sd) const { if(bin == -1) bin = 0; } - // The bin value returned is 1-based, not 0-based. + // The bin value returned is 1-based, not 0-based return bin; } //////////////////////////////////////////////////////////////////////// -bool ThreshArray::check_dbl(double v) const { - return check_dbl(v, bad_data_double, bad_data_double); -} - -//////////////////////////////////////////////////////////////////////// - -bool ThreshArray::check_dbl(double v, double mn, double sd) const { - int i; +bool ThreshArray::check_dbl(double v, const ClimoPntInfo *cpi) const { // // Check if the value satisifes all the thresholds in the array // - for(i=0; ithresh()[i]), &(oarr->thresh()[i])); + t[i].set_perc(fptr, optr, fcptr, ocptr, + &(farr->thresh()[i]), + &(oarr->thresh()[i])); } return; @@ -755,11 +741,13 @@ ThreshArray process_perc_thresh_bins(const ThreshArray &ta_in) { for(i=0; i=100) { @@ -810,11 +798,11 @@ ThreshArray process_rps_cdp_thresh(const ThreshArray &ta) { SingleThresh st; ThreshArray ta_out; - // Check for evenly-spaced CDP thresholds + // Check for evenly-spaced OCDP thresholds for(int i=0; i #include #include +#include #include #include #include @@ -34,6 +35,32 @@ using namespace std; +//////////////////////////////////////////////////////////////////////// + + + // + // MET #2924 Rename climatology column names + // + +static const map mpr_rename_map = { + { "CLIMO_MEAN", "OBS_CLIMO_MEAN" }, + { "CLIMO_STDEV", "OBS_CLIMO_STDEV" }, + { "CLIMO_CDF", "OBS_CLIMO_CDF" } +}; + +static const map orank_rename_map = { + { "CLIMO_MEAN", "OBS_CLIMO_MEAN" }, + { "CLIMO_STDEV", "OBS_CLIMO_STDEV" } +}; + +static const map< STATLineType, map > stat_lty_rename_map = { + { STATLineType::mpr, mpr_rename_map }, + { STATLineType::orank, orank_rename_map } +}; + +static StringArray print_stat_rename_message; + + //////////////////////////////////////////////////////////////////////// @@ -444,6 +471,29 @@ if ( is_bad_data(offset) ) { if ( !get_file()->header().has(col_str, offset) ) offset = bad_data_int; } + // + // If not found, check renamed columns for backward compatibility + // + +if ( is_bad_data(offset) ) { + + string s(col_str); + + if ( stat_lty_rename_map.count(Type) && + stat_lty_rename_map.at(Type).count(s) ) { + if ( !print_stat_rename_message.has(s) ) { + mlog << Debug(2) << "The \"" << s << "\" column in the " + << statlinetype_to_string(Type) + << " line type has been renamed as \"" + << (stat_lty_rename_map.at(Type)).at(s) + << "\". Please switch to using MET" + << met_version << " column names.\n"; + print_stat_rename_message.add(s); + } + return ( get_item((stat_lty_rename_map.at(Type)).at(s).c_str()) ); + } +} + // // Return bad data string for no match // diff --git a/src/libcode/vx_data2d/var_info.h b/src/libcode/vx_data2d/var_info.h index 0d8717f416..3271376816 100644 --- a/src/libcode/vx_data2d/var_info.h +++ b/src/libcode/vx_data2d/var_info.h @@ -278,7 +278,7 @@ class EnsVarInfo { private: std::vector inputs; // Vector of InputInfo - VarInfo * ctrl_info; // Field info for control member + VarInfo * ctrl_info; // Field info for control member public: EnsVarInfo(); diff --git a/src/libcode/vx_data2d_nc_wrf/var_info_nc_wrf.cc b/src/libcode/vx_data2d_nc_wrf/var_info_nc_wrf.cc index 6beba9a08d..edb45079e6 100644 --- a/src/libcode/vx_data2d_nc_wrf/var_info_nc_wrf.cc +++ b/src/libcode/vx_data2d_nc_wrf/var_info_nc_wrf.cc @@ -239,7 +239,7 @@ void VarInfoNcWrf::set_magic(const ConcatString &nstr, const ConcatString &lstr) level_value = unix_time; as_offset = false; } - else if (is_number(ptr2)) { + else if (is_number(ptr2)) { if (as_offset) level = atoi(ptr2); else { level = vx_data2d_dim_by_value; diff --git a/src/libcode/vx_data2d_python/grid_from_python_dict.cc b/src/libcode/vx_data2d_python/grid_from_python_dict.cc index c1b07c779c..fa69bd9d8e 100644 --- a/src/libcode/vx_data2d_python/grid_from_python_dict.cc +++ b/src/libcode/vx_data2d_python/grid_from_python_dict.cc @@ -527,7 +527,7 @@ else { if ( ! pointdata_from_np_array(np, &vals) ) { mlog << Error << "\nlookup_python_num_array() -> " - << "can't parse values for \"" << key << "\" from python \"" + << "can't parse values for \"" << key << "\" from python \"" << Py_TYPE(obj)->tp_name << "\" object.\n"; } diff --git a/src/libcode/vx_grid/goes_grid.cc b/src/libcode/vx_grid/goes_grid.cc index c00babc717..296dc27a11 100644 --- a/src/libcode/vx_grid/goes_grid.cc +++ b/src/libcode/vx_grid/goes_grid.cc @@ -431,13 +431,13 @@ void GoesImagerData::compute_lat_lon() mlog << Error << method_name << " index=" << index << " too big than " << buf_len << "\n"; else { - if (std::isnan(lat_rad)) lat = bad_data_float; + if (std::isnan(lat_rad)) lat = bad_data_float; else { lat = lat_rad * deg_per_rad; if (lat > lat_max) {lat_max = lat; idx_lat_max = index; } if (lat < lat_min) {lat_min = lat; idx_lat_min = index; } } - if (std::isnan(lon_rad)) lon = bad_data_float; + if (std::isnan(lon_rad)) lon = bad_data_float; else { lon = lon_of_projection_origin - (lon_rad * deg_per_rad); if (lon > lon_max) {lon_max = lon; idx_lon_max = index; } diff --git a/src/libcode/vx_grid/laea_grid.cc b/src/libcode/vx_grid/laea_grid.cc index 6d59f25f6e..b14ddf159a 100644 --- a/src/libcode/vx_grid/laea_grid.cc +++ b/src/libcode/vx_grid/laea_grid.cc @@ -455,7 +455,7 @@ v = new double [n]; if ( !u || !v ) { mlog << Error << "\nLaeaGrid::xy_closedpolyline_area() -> " - << "memory allocation error\n\n"; + << "memory allocation error\n\n"; exit ( 1 ); diff --git a/src/libcode/vx_seeps/seeps.cc b/src/libcode/vx_seeps/seeps.cc index c3114acffc..4204f59370 100644 --- a/src/libcode/vx_seeps/seeps.cc +++ b/src/libcode/vx_seeps/seeps.cc @@ -64,34 +64,23 @@ void release_seeps_climo() { //////////////////////////////////////////////////////////////////////// SeepsClimoGrid *get_seeps_climo_grid(int month, ConcatString seeps_grid_climo_name, int hour) { - bool not_found = true; - SeepsClimoGrid *seeps_climo_grid = nullptr; - for (map::iterator it=seeps_climo_grid_map_00.begin(); - it!=seeps_climo_grid_map_00.end(); ++it) { - if (it->first == month) { - not_found = false; - seeps_climo_grid = (SeepsClimoGrid *)it->second; - break; - } - } - if (not_found) { - seeps_climo_grid = new SeepsClimoGrid(month, hour, seeps_grid_climo_name); - seeps_climo_grid_map_00[month] = seeps_climo_grid; + if (seeps_climo_grid_map_00.count(month) == 0) { + seeps_climo_grid_map_00[month] = nullptr; + seeps_climo_grid_map_00[month] = new SeepsClimoGrid(month, hour, seeps_grid_climo_name); } - return seeps_climo_grid; + + return seeps_climo_grid_map_00[month]; } //////////////////////////////////////////////////////////////////////// void release_seeps_climo_grid(int month, int hour) { - for (map::iterator it=seeps_climo_grid_map_00.begin(); - it!=seeps_climo_grid_map_00.end(); ++it) { - if (it->first == month) { - delete it->second; - seeps_climo_grid_map_00.erase(it); - break; - } + + if (seeps_climo_grid_map_00.count(month) > 0) { + delete seeps_climo_grid_map_00[month]; + seeps_climo_grid_map_00[month] = nullptr; + seeps_climo_grid_map_00.erase(month); } } @@ -103,10 +92,8 @@ double weighted_average(double v1, double w1, double v2, double w2) { v1 * w1 + v2 * w2); } - //////////////////////////////////////////////////////////////////////// - void SeepsAggScore::clear() { n_obs = 0; @@ -166,7 +153,6 @@ SeepsAggScore & SeepsAggScore::operator+=(const SeepsAggScore &c) { return *this; } - //////////////////////////////////////////////////////////////////////// SeepsClimoBase::SeepsClimoBase(ConcatString seeps_climo_name) : climo_file_name{seeps_climo_name} { @@ -657,22 +643,14 @@ void SeepsClimo::read_seeps_scores(ConcatString filename) { } - - //////////////////////////////////////////////////////////////////////// - SeepsClimoGrid::SeepsClimoGrid(int month, int hour, ConcatString seeps_climo_name) : month{month}, hour{hour}, SeepsClimoBase{seeps_climo_name} { - - clear(); - p1_buf = p2_buf = t1_buf = t2_buf = nullptr; - s12_buf = s13_buf = s21_buf = s23_buf = s31_buf = s32_buf = nullptr; - + init_from_scratch(); ConcatString seeps_name = get_climo_filename(); if (file_exists(seeps_name.c_str())) read_seeps_scores(seeps_name); - } //////////////////////////////////////////////////////////////////////// @@ -683,6 +661,14 @@ SeepsClimoGrid::~SeepsClimoGrid() { //////////////////////////////////////////////////////////////////////// +void SeepsClimoGrid::init_from_scratch() { + p1_buf = p2_buf = t1_buf = t2_buf = nullptr; + s12_buf = s13_buf = s21_buf = s23_buf = s31_buf = s32_buf = nullptr; + clear(); +} + +//////////////////////////////////////////////////////////////////////// + void SeepsClimoGrid::clear() { SeepsClimoBase::clear(); if (nullptr != p1_buf) { delete [] p1_buf; p1_buf = nullptr; } diff --git a/src/libcode/vx_seeps/seeps.h b/src/libcode/vx_seeps/seeps.h index 208275e7cd..808acefa3e 100644 --- a/src/libcode/vx_seeps/seeps.h +++ b/src/libcode/vx_seeps/seeps.h @@ -230,6 +230,8 @@ class SeepsClimoGrid : public SeepsClimoBase { double *s31_buf; double *s32_buf; + void init_from_scratch(); + protected: void clear() override; ConcatString get_env_climo_name() override { return MET_ENV_SEEPS_GRID_CLIMO_NAME; }; diff --git a/src/libcode/vx_shapedata/mode_conf_info.cc b/src/libcode/vx_shapedata/mode_conf_info.cc index 642db14034..080365c757 100644 --- a/src/libcode/vx_shapedata/mode_conf_info.cc +++ b/src/libcode/vx_shapedata/mode_conf_info.cc @@ -1192,19 +1192,23 @@ void ModeConfInfo::set_perc_thresh(const DataPlane &f_dp, // // Compute percentiles // - Fcst->conv_thresh_array.set_perc(&fsort, &osort, (NumArray *) 0, + Fcst->conv_thresh_array.set_perc(&fsort, &osort, + nullptr, nullptr, &(Fcst->conv_thresh_array), &(Obs->conv_thresh_array)); - Obs->conv_thresh_array.set_perc(&fsort, &osort, (NumArray *) 0, + Obs->conv_thresh_array.set_perc(&fsort, &osort, + nullptr, nullptr, &(Fcst->conv_thresh_array), &(Obs->conv_thresh_array)); - Fcst->merge_thresh_array.set_perc(&fsort, &osort, (NumArray *) 0, + Fcst->merge_thresh_array.set_perc(&fsort, &osort, + nullptr, nullptr, &(Fcst->merge_thresh_array), &(Obs->merge_thresh_array)); - Obs->merge_thresh_array.set_perc(&fsort, &osort, (NumArray *) 0, + Obs->merge_thresh_array.set_perc(&fsort, &osort, + nullptr, nullptr, &(Fcst->merge_thresh_array), &(Obs->merge_thresh_array)); @@ -1253,10 +1257,12 @@ void ModeConfInfo::set_perc_thresh(const DataPlane &dp) // // Compute percentiles by hacking in the same input as if its two // - F->conv_thresh_array.set_perc(&sort, &sort, (NumArray *) 0, + F->conv_thresh_array.set_perc(&sort, &sort, + nullptr, nullptr, &(F->conv_thresh_array), &(F->conv_thresh_array)); - F->merge_thresh_array.set_perc(&sort, &sort, (NumArray *) 0, + F->merge_thresh_array.set_perc(&sort, &sort, + nullptr, nullptr, &(F->merge_thresh_array), &(F->merge_thresh_array)); return; @@ -1789,14 +1795,17 @@ PercThreshType ModeConfInfo::perctype(const Mode_Field_Info &f) const if (f.conv_thresh_array.n() > 0) { pc = f.conv_thresh_array[0].get_ptype(); } - if (pm == perc_thresh_sample_climo || pc == perc_thresh_sample_climo) { + if (pm == perc_thresh_sample_fcst_climo || pm == perc_thresh_sample_obs_climo || + pc == perc_thresh_sample_fcst_climo || pc == perc_thresh_sample_obs_climo) { mlog << Error << "\nModeConfInfo::perctype()\n" - << " Thresholding with 'SCP' in an input not implemented for multivariate mode\n\n"; + << " Thresholding with 'SFCP' or 'SOCP' in an input not implemented " + << "for multivariate mode\n\n"; exit ( 1 ); } - if (pm == perc_thresh_climo_dist || pc == perc_thresh_climo_dist) { + if (is_climo_dist_type(pm) || is_climo_dist_type(pc)) { mlog << Error << "\nModeConfInfo::perctype()\n" - << " Thresholding with 'CDP' in an input not implemented for multivariate mode\n\n"; + << " Thresholding with 'CDP', 'FCDP', or 'OCDP' in an " + << "input not implemented for multivariate mode\n\n"; exit ( 1 ); } if (pm == perc_thresh_freq_bias || diff --git a/src/libcode/vx_shapedata/node.cc b/src/libcode/vx_shapedata/node.cc index 329bd9f961..a0ac3397e3 100644 --- a/src/libcode/vx_shapedata/node.cc +++ b/src/libcode/vx_shapedata/node.cc @@ -213,8 +213,8 @@ Node *Node::get_child(int n) const { if( n >= (children_count = n_children()) ) { mlog << Error << "\nNode::get_child(int) -> " - << "attempting to access child number " << n << " when only " - << children_count << " exist\n\n"; + << "attempting to access child number " << n << " when only " + << children_count << " exist\n\n"; exit(1); } @@ -295,7 +295,7 @@ double Node::angle() const { if(p.n_points < 3 && n_children() == 0) { mlog << Error << "\nNode::angle() -> " - << "not enough points!\n\n"; + << "not enough points!\n\n"; exit(1); } diff --git a/src/libcode/vx_stat_out/stat_columns.cc b/src/libcode/vx_stat_out/stat_columns.cc index 0bd1c9393a..330a74d827 100644 --- a/src/libcode/vx_stat_out/stat_columns.cc +++ b/src/libcode/vx_stat_out/stat_columns.cc @@ -488,6 +488,10 @@ void write_orank_header_row(int hdr_flag, int n_ens, AsciiTable &at, at.set_entry(r, c+17+n_ens, (string)orank_columns[18]); at.set_entry(r, c+18+n_ens, (string)orank_columns[19]); at.set_entry(r, c+19+n_ens, (string)orank_columns[20]); + /* MET #2924 Uncomment this section + at.set_entry(r, c+20+n_ens, (string)orank_columns[21]); + at.set_entry(r, c+21+n_ens, (string)orank_columns[22]); + */ return; } @@ -4076,11 +4080,12 @@ void write_mpr_cols(const PairDataPoint *pd_ptr, int i, // // Matched Pairs (MPR) // Dump out the MPR line: - // TOTAL, INDEX, OBS_SID, - // OBS_LAT, OBS_LON, OBS_LVL, - // OBS_ELV, FCST, OBS, - // OBS_QC, CLIMO_MEAN, CLIMO_STDEV, - // CLIMO_CDF + // TOTAL, INDEX, OBS_SID, + // OBS_LAT, OBS_LON, OBS_LVL, + // OBS_ELV, FCST, OBS, + // OBS_QC, + // OBS_CLIMO_MEAN, OBS_CLIMO_STDEV, OBS_CLIMO_CDF, + // FCST_CLIMO_MEAN, FCST_CLIMO_STDEV // at.set_entry(r, c+0, // Total Number of Pairs pd_ptr->n_obs); @@ -4112,14 +4117,22 @@ void write_mpr_cols(const PairDataPoint *pd_ptr, int i, at.set_entry(r, c+9, // Observation Quality Control (string)pd_ptr->o_qc_sa[i]); - at.set_entry(r, c+10, // Climatological Mean Value - pd_ptr->cmn_na[i]); + at.set_entry(r, c+10, // Observation Climatological Mean Value + pd_ptr->ocmn_na[i]); - at.set_entry(r, c+11, // Climatological Standard Deviation Value - pd_ptr->csd_na[i]); + at.set_entry(r, c+11, // Observation Climatological Standard Deviation Value + pd_ptr->ocsd_na[i]); - at.set_entry(r, c+12, // Climatological CDF Value - pd_ptr->cdf_na[i]); + at.set_entry(r, c+12, // Observation Climatological CDF Value + pd_ptr->ocdf_na[i]); + +/* MET #2924 Uncomment this section + at.set_entry(r, c+13, // Forecast Climatological Mean Value + pd_ptr->fcmn_na[i]); + + at.set_entry(r, c+14, // Forecast Climatological Standard Deviation Value + pd_ptr->fcsd_na[i]); +*/ return; } @@ -4493,9 +4506,10 @@ void write_orank_cols(const PairDataEnsemble *pd_ptr, int i, // OBS_ELV, OBS, PIT, // RANK, N_ENS_VLD, N_ENS, // [ENS_] (for each ensemble member) - // OBS_QC, ENS_MEAN, CLIMO_MEAN, - // SPREAD, ENS_MEAN_OERR, SPREAD_OERR, - // SPREAD_PLUS_OERR, CLIMO_STDEV + // OBS_QC, ENS_MEAN, OBS_CLIMO_MEAN, + // SPREAD, ENS_MEAN_OERR, SPREAD_OERR, + // SPREAD_PLUS_OERR, OBS_CLIMO_STDEV, FCST_CLIMO_MEAN, + // FCST_CLIMO_STDEV // at.set_entry(r, c+0, // Total Number of Pairs pd_ptr->n_obs); // Use n_obs instead of n_pair to include missing data @@ -4551,9 +4565,9 @@ void write_orank_cols(const PairDataEnsemble *pd_ptr, int i, at.set_entry(r, c+13+pd_ptr->n_ens, pd_ptr->mn_na[i]); - // Climatology mean values + // Observation climatology mean values at.set_entry(r, c+14+pd_ptr->n_ens, - pd_ptr->cmn_na[i]); + pd_ptr->ocmn_na[i]); // Unperturbed ensemble spread values at.set_entry(r, c+15+pd_ptr->n_ens, @@ -4571,9 +4585,19 @@ void write_orank_cols(const PairDataEnsemble *pd_ptr, int i, at.set_entry(r, c+18+pd_ptr->n_ens, square_root(pd_ptr->var_plus_oerr_na[i])); - // Climatology standard deviation values + // Observation climatology standard deviation values at.set_entry(r, c+19+pd_ptr->n_ens, - pd_ptr->csd_na[i]); + pd_ptr->ocsd_na[i]); + +/* MET #2924 Uncomment this section + // Forecast climatology mean values + at.set_entry(r, c+20+pd_ptr->n_ens, + pd_ptr->fcmn_na[i]); + + // Forecast climatology standard deviation values + at.set_entry(r, c+21+pd_ptr->n_ens, + pd_ptr->fcsd_na[i]); +*/ return; } diff --git a/src/libcode/vx_statistics/compute_stats.cc b/src/libcode/vx_statistics/compute_stats.cc index d775637dd3..40c4e82589 100644 --- a/src/libcode/vx_statistics/compute_stats.cc +++ b/src/libcode/vx_statistics/compute_stats.cc @@ -150,11 +150,10 @@ void compute_cntinfo(const PairDataPoint &pd, const NumArray &i_na, bool precip_flag, bool rank_flag, bool normal_ci_flag, CNTInfo &cnt_info) { int i, j, n; - double f, o, c, wgt, wgt_sum; + double f, o, fc, oc, wgt, wgt_sum; double f_bar, o_bar, ff_bar, oo_bar, fo_bar; double fa_bar, oa_bar, ffa_bar, ooa_bar, foa_bar; double err, err_bar, abs_err_bar, err_sq_bar, den; - bool cmn_flag; // // Allocate memory to store the differences @@ -176,7 +175,8 @@ void compute_cntinfo(const PairDataPoint &pd, const NumArray &i_na, // // Flag to process climo // - cmn_flag = set_climo_flag(pd.f_na, pd.cmn_na); + bool cmn_flag = set_climo_flag(pd.f_na, pd.fcmn_na) && + set_climo_flag(pd.f_na, pd.ocmn_na); // // Get the sum of the weights @@ -199,7 +199,8 @@ void compute_cntinfo(const PairDataPoint &pd, const NumArray &i_na, f = pd.f_na[j]; o = pd.o_na[j]; - c = (cmn_flag ? pd.cmn_na[j] : bad_data_double); + fc = (cmn_flag ? pd.fcmn_na[j] : bad_data_double); + oc = (cmn_flag ? pd.ocmn_na[j] : bad_data_double); wgt = pd.wgt_na[i]/wgt_sum; // @@ -207,7 +208,8 @@ void compute_cntinfo(const PairDataPoint &pd, const NumArray &i_na, // if(is_bad_data(f) || is_bad_data(o) || - (cmn_flag && is_bad_data(c))) continue; + (cmn_flag && is_bad_data(fc)) || + (cmn_flag && is_bad_data(oc))) continue; // // Compute the error @@ -226,11 +228,11 @@ void compute_cntinfo(const PairDataPoint &pd, const NumArray &i_na, n++; if(cmn_flag) { - fa_bar += wgt*(f-c); - oa_bar += wgt*(o-c); - foa_bar += wgt*(f-c)*(o-c); - ffa_bar += wgt*(f-c)*(f-c); - ooa_bar += wgt*(o-c)*(o-c); + fa_bar += wgt*(f-fc); + oa_bar += wgt*(o-oc); + foa_bar += wgt*(f-fc)*(o-oc); + ffa_bar += wgt*(f-fc)*(f-fc); + ooa_bar += wgt*(o-oc)*(o-oc); } } // end for i @@ -587,7 +589,9 @@ void compute_ctsinfo(const PairDataPoint &pd, const NumArray &i_na, // // Add this pair to the contingency table // - cts_info.add(pd.f_na[j], pd.o_na[j], pd.cmn_na[j], pd.csd_na[j]); + ClimoPntInfo cpi(pd.fcmn_na[j], pd.fcsd_na[j], + pd.ocmn_na[j], pd.ocsd_na[j]); + cts_info.add(pd.f_na[j], pd.o_na[j], &cpi); } // end for i @@ -684,7 +688,9 @@ void compute_mctsinfo(const PairDataPoint &pd, const NumArray &i_na, // // Add this pair to the contingency table // - mcts_info.add(pd.f_na[j], pd.o_na[j], pd.cmn_na[j], pd.csd_na[j]); + ClimoPntInfo cpi(pd.fcmn_na[j], pd.fcsd_na[j], + pd.ocmn_na[j], pd.ocsd_na[j]); + mcts_info.add(pd.f_na[j], pd.o_na[j], &cpi); } // end for i @@ -761,14 +767,14 @@ void compute_pctinfo(const PairDataPoint &pd, bool pstd_flag, n_pair = pd.f_na.n(); // Flag to process climo - cmn_flag = (set_climo_flag(pd.f_na, pd.cmn_na) || - (cprob_in && cprob_in->n() > 0)); + cmn_flag = (set_climo_flag(pd.f_na, pd.ocmn_na) || + (cprob_in && cprob_in->n() > 0)); // Use input climatological probabilities or derive them if(cmn_flag) { if(cprob_in) climo_prob = *cprob_in; else climo_prob = derive_climo_prob(pd.cdf_info_ptr, - pd.cmn_na, pd.csd_na, + pd.ocmn_na, pd.ocsd_na, pct_info.othresh); } @@ -797,10 +803,16 @@ void compute_pctinfo(const PairDataPoint &pd, bool pstd_flag, // for(i=0; in_obs = count; - seeps->mean_fcst = fcst_sum / count; - seeps->mean_obs = obs_sum / count; - seeps->score = score_sum / count; + seeps_agg->n_obs = count; + seeps_agg->mean_fcst = fcst_sum / count; + seeps_agg->mean_obs = obs_sum / count; + seeps_agg->score = score_sum / count; weighted_score = 0.; for (int i=0; i count) density_cnt = count; @@ -1502,35 +1514,35 @@ void compute_aggregated_seeps(const PairDataPoint *pd, SeepsAggScore *seeps) { seeps_mprs.clear(); // The weight for s12 to s32 should come from climo file, but not available yet - seeps->pv1 = pvf[0] + pvf[3] + pvf[6]; // sum by column for obs - seeps->pv2 = pvf[1] + pvf[4] + pvf[7]; // sum by column for obs - seeps->pv3 = pvf[2] + pvf[5] + pvf[8]; // sum by column for obs - seeps->pf1 = pvf[0] + pvf[1] + pvf[2]; // sum by row for forecast - seeps->pf2 = pvf[3] + pvf[4] + pvf[5]; // sum by row for forecast - seeps->pf3 = pvf[6] + pvf[7] + pvf[8]; // sum by row for forecast - seeps->s12 = c12 * seeps->pf1 * seeps->pv2; - seeps->s13 = c13 * seeps->pf1 * seeps->pv3; - seeps->s21 = c21 * seeps->pf2 * seeps->pv1; - seeps->s23 = c23 * seeps->pf2 * seeps->pv3; - seeps->s31 = c31 * seeps->pf3 * seeps->pv1; - seeps->s32 = c32 * seeps->pf3 * seeps->pv2; - seeps->weighted_score = weighted_score; + seeps_agg->pv1 = pvf[0] + pvf[3] + pvf[6]; // sum by column for obs + seeps_agg->pv2 = pvf[1] + pvf[4] + pvf[7]; // sum by column for obs + seeps_agg->pv3 = pvf[2] + pvf[5] + pvf[8]; // sum by column for obs + seeps_agg->pf1 = pvf[0] + pvf[1] + pvf[2]; // sum by row for forecast + seeps_agg->pf2 = pvf[3] + pvf[4] + pvf[5]; // sum by row for forecast + seeps_agg->pf3 = pvf[6] + pvf[7] + pvf[8]; // sum by row for forecast + seeps_agg->s12 = c12 * seeps_agg->pf1 * seeps_agg->pv2; + seeps_agg->s13 = c13 * seeps_agg->pf1 * seeps_agg->pv3; + seeps_agg->s21 = c21 * seeps_agg->pf2 * seeps_agg->pv1; + seeps_agg->s23 = c23 * seeps_agg->pf2 * seeps_agg->pv3; + seeps_agg->s31 = c31 * seeps_agg->pf3 * seeps_agg->pv1; + seeps_agg->s32 = c32 * seeps_agg->pf3 * seeps_agg->pv2; + seeps_agg->weighted_score = weighted_score; mlog << Debug(7) << method_name - << "SEEPS score=" << seeps->score << " weighted_score=" << weighted_score - << " pv1=" << seeps->pv1 << " pv2=" << seeps->pv2 << " pv3=" << seeps->pv3 - << " pf1=" << seeps->pf1 << " pf2=" << seeps->pf2 << " pf3=" << seeps->pf3 << "\n"; + << "SEEPS score=" << seeps_agg->score << " weighted_score=" << weighted_score + << " pv1=" << seeps_agg->pv1 << " pv2=" << seeps_agg->pv2 << " pv3=" << seeps_agg->pv3 + << " pf1=" << seeps_agg->pf1 << " pf2=" << seeps_agg->pf2 << " pf3=" << seeps_agg->pf3 << "\n"; } else { mlog << Debug(5) << method_name << "no SEEPS_MPR available\n"; } - seeps->c12 = c12; - seeps->c13 = c13; - seeps->c21 = c21; - seeps->c23 = c23; - seeps->c31 = c31; - seeps->c32 = c32; + seeps_agg->c12 = c12; + seeps_agg->c13 = c13; + seeps_agg->c21 = c21; + seeps_agg->c23 = c23; + seeps_agg->c31 = c31; + seeps_agg->c32 = c32; if (count != (c12+c13+c21+c23+c31+c32+count_diagonal)){ mlog << Debug(6) << method_name @@ -1546,7 +1558,7 @@ void compute_aggregated_seeps(const PairDataPoint *pd, SeepsAggScore *seeps) { void compute_aggregated_seeps_grid(const DataPlane &fcst_dp, const DataPlane &obs_dp, DataPlane &seeps_dp, DataPlane &seeps_dp_fcat, - DataPlane &seeps_dp_ocat,SeepsAggScore *seeps, + DataPlane &seeps_dp_ocat, SeepsAggScore *seeps_agg, int month, int hour, const SingleThresh &seeps_p1_thresh, const ConcatString &seeps_climo_name) { int fcst_cat, obs_cat; @@ -1559,7 +1571,6 @@ void compute_aggregated_seeps_grid(const DataPlane &fcst_dp, const DataPlane &ob int c12, c13, c21, c23, c31, c32; double obs_sum, fcst_sum; double seeps_score, seeps_score_sum, seeps_score_partial_sum; - SeepsScore *seeps_mpr; static const char *method_name = "compute_aggregated_seeps_grid() -> "; seeps_dp.set_size(nx, ny); @@ -1568,8 +1579,7 @@ void compute_aggregated_seeps_grid(const DataPlane &fcst_dp, const DataPlane &ob obs_sum = fcst_sum = seeps_score_sum = 0.; seeps_count = count_diagonal = nan_count = bad_count = 0; c12 = c13 = c21 = c23 = c31 = c32 = 0; - - seeps->clear(); + seeps_agg->clear(); SeepsClimoGrid *seeps_climo = get_seeps_climo_grid(month, seeps_climo_name); seeps_climo->set_p1_thresh(seeps_p1_thresh); for (int i=0; iget_record(ix, iy, fcst_value, obs_value); + SeepsScore *seeps_mpr = seeps_climo->get_record(ix, iy, fcst_value, obs_value); if (seeps_mpr != nullptr) { fcst_cat = seeps_mpr->fcst_cat; obs_cat = seeps_mpr->obs_cat; @@ -1621,7 +1631,7 @@ void compute_aggregated_seeps_grid(const DataPlane &fcst_dp, const DataPlane &ob pvf_cnt[seeps_mpr->s_idx] += 1; } - delete seeps_mpr; + if(seeps_mpr) { delete seeps_mpr; seeps_mpr = nullptr; } } } seeps_dp.set(seeps_score, ix, iy); @@ -1632,41 +1642,41 @@ void compute_aggregated_seeps_grid(const DataPlane &fcst_dp, const DataPlane &ob } int cell_count = dp_size - nan_count - bad_count; if (cell_count > 0) { - seeps->weighted_score = seeps_score_sum/cell_count; + seeps_agg->weighted_score = seeps_score_sum/cell_count; for (int i=0; in_obs = seeps_count; - seeps->c12 = c12; - seeps->c13 = c13; - seeps->c21 = c21; - seeps->c23 = c23; - seeps->c31 = c31; - seeps->c32 = c32; + seeps_agg->n_obs = seeps_count; + seeps_agg->c12 = c12; + seeps_agg->c13 = c13; + seeps_agg->c21 = c21; + seeps_agg->c23 = c23; + seeps_agg->c31 = c31; + seeps_agg->c32 = c32; if (seeps_count > 0) { - seeps->mean_fcst = fcst_sum / seeps_count; - seeps->mean_obs = obs_sum / seeps_count; - - seeps->pv1 = pvf[0] + pvf[3] + pvf[6]; // sum by column for obs - seeps->pv2 = pvf[1] + pvf[4] + pvf[7]; // sum by column for obs - seeps->pv3 = pvf[2] + pvf[5] + pvf[8]; // sum by column for obs - seeps->pf1 = pvf[0] + pvf[1] + pvf[2]; // sum by row for forecast - seeps->pf2 = pvf[3] + pvf[4] + pvf[5]; // sum by row for forecast - seeps->pf3 = pvf[6] + pvf[7] + pvf[8]; // sum by row for forecast - seeps->s12 = c12 * seeps->pf1 * seeps->pv2; - seeps->s13 = c13 * seeps->pf1 * seeps->pv3; - seeps->s21 = c21 * seeps->pf2 * seeps->pv1; - seeps->s23 = c23 * seeps->pf2 * seeps->pv3; - seeps->s31 = c31 * seeps->pf3 * seeps->pv1; - seeps->s32 = c32 * seeps->pf3 * seeps->pv2; - seeps->score = seeps_score_sum / seeps_count; + seeps_agg->mean_fcst = fcst_sum / seeps_count; + seeps_agg->mean_obs = obs_sum / seeps_count; + + seeps_agg->pv1 = pvf[0] + pvf[3] + pvf[6]; // sum by column for obs + seeps_agg->pv2 = pvf[1] + pvf[4] + pvf[7]; // sum by column for obs + seeps_agg->pv3 = pvf[2] + pvf[5] + pvf[8]; // sum by column for obs + seeps_agg->pf1 = pvf[0] + pvf[1] + pvf[2]; // sum by row for forecast + seeps_agg->pf2 = pvf[3] + pvf[4] + pvf[5]; // sum by row for forecast + seeps_agg->pf3 = pvf[6] + pvf[7] + pvf[8]; // sum by row for forecast + seeps_agg->s12 = c12 * seeps_agg->pf1 * seeps_agg->pv2; + seeps_agg->s13 = c13 * seeps_agg->pf1 * seeps_agg->pv3; + seeps_agg->s21 = c21 * seeps_agg->pf2 * seeps_agg->pv1; + seeps_agg->s23 = c23 * seeps_agg->pf2 * seeps_agg->pv3; + seeps_agg->s31 = c31 * seeps_agg->pf3 * seeps_agg->pv1; + seeps_agg->s32 = c32 * seeps_agg->pf3 * seeps_agg->pv2; + seeps_agg->score = seeps_score_sum / seeps_count; } mlog << Debug(6) << method_name - << "SEEPS score=" << seeps->score << " weighted_score=" << seeps->weighted_score - << " pv1=" << seeps->pv1 << " pv2=" << seeps->pv2 << " pv3=" << seeps->pv3 - << " pf1=" << seeps->pf1 << " pf2=" << seeps->pf2 << " pf3=" << seeps->pf3 << "\n"; + << "SEEPS score=" << seeps_agg->score << " weighted_score=" << seeps_agg->weighted_score + << " pv1=" << seeps_agg->pv1 << " pv2=" << seeps_agg->pv2 << " pv3=" << seeps_agg->pv3 + << " pf1=" << seeps_agg->pf1 << " pf2=" << seeps_agg->pf2 << " pf3=" << seeps_agg->pf3 << "\n"; if(mlog.verbosity_level() >= detailed_debug_level) { char buffer[100]; ConcatString log_message; diff --git a/src/libcode/vx_statistics/ens_stats.cc b/src/libcode/vx_statistics/ens_stats.cc index 6483e68045..a9b475acff 100644 --- a/src/libcode/vx_statistics/ens_stats.cc +++ b/src/libcode/vx_statistics/ens_stats.cc @@ -518,7 +518,7 @@ void RPSInfo::set_prob_cat_thresh(const ThreshArray &ta) { //////////////////////////////////////////////////////////////////////// void RPSInfo::set_cdp_thresh(const ThreshArray &ta) { - fthresh = derive_cdp_thresh(ta); + fthresh = derive_ocdp_thresh(ta); } //////////////////////////////////////////////////////////////////////// @@ -546,8 +546,8 @@ void RPSInfo::set(const PairDataEnsemble &pd) { // Check RPS threshold formatting: monotonically increasing fthresh.check_bin_thresh(); - // Flag to process climo - cmn_flag = set_climo_flag(pd.o_na, pd.cmn_na); + // Flag to process observation climatology data + cmn_flag = set_climo_flag(pd.o_na, pd.ocmn_na); // Setup probability thresholds, equally spaced by ensemble size for(i=0; i<=n_prob; i++) p_thresh.add((double) i/n_prob); @@ -575,20 +575,24 @@ void RPSInfo::set(const PairDataEnsemble &pd) { // Derive climatological probabilities if(cmn_flag) climo_prob = derive_climo_prob(pd.cdf_info_ptr, - pd.cmn_na, pd.csd_na, + pd.ocmn_na, pd.ocsd_na, fthresh[i]); // Loop over the observations for(j=0; j " + // The o_na, ocmn_na, and ocsd_na have already been populated + if(o_na.n() != ocmn_na.n() || o_na.n() != ocsd_na.n()) { + mlog << Error << "\nPairBase::compute_climo_cdf() -> " << "the observation, climo mean, and climo stdev arrays " << "must all have the same length (" << o_na.n() << ").\n\n"; exit(1); } - cdf_na.extend(o_na.n()); + ocdf_na.extend(o_na.n()); for(i=0; i= n_obs) { mlog << Error << "\nPairBase::set_point_obs() -> " @@ -512,7 +545,7 @@ void PairBase::set_point_obs(int i_obs, const char *sid, elv_na.set(i_obs, elv); o_na.set(i_obs, o); o_qc_sa.set(i_obs, qc); - set_climo(i_obs, o, cmn, csd); + set_climo(i_obs, o, cpi); return; } @@ -626,7 +659,7 @@ ob_val_t PairBase::compute_percentile(string obs_key, int perc) { //////////////////////////////////////////////////////////////////////// -void PairBase::print_obs_summary(){ +void PairBase::print_obs_summary() const { if(!IsPointVx) return; @@ -637,7 +670,7 @@ void PairBase::print_obs_summary(){ // iterate over ordered list map keys in the station id map for(int i=0; i= 0 && fabs(dist) < dist_abv) { - dist_abv = fabs(dist); - i_abv = i; - } - } + init_from_scratch(); - // Check if the observation is above the forecast range - if(is_eq(dist_blw, 1.0e30) && !is_eq(dist_abv, 1.0e30)) { + assign(vx_pb); +} - // Set the index below to the index above and perform no vertical - // interpolation - i_blw = i_abv; - } - // Check if the observation is below the forecast range - else if(!is_eq(dist_blw, 1.0e30) && is_eq(dist_abv, 1.0e30)) { +//////////////////////////////////////////////////////////////////////// - // Set the index above to the index below and perform no vertical - // interpolation - i_abv = i_blw; - } - // Check if an error occurred - else if(is_eq(dist_blw, 1.0e30) && is_eq(dist_abv, 1.0e30)) { +VxPairBase & VxPairBase::operator=(const VxPairBase &vx_pb) { - mlog << Error << "\nfind_vert_lvl() -> " - << "could not find a level above and/or below the " - << "observation level of " << obs_lvl << ".\n\n"; - exit(1); - } + if(this == &vx_pb) return *this; + + assign(vx_pb); + + return *this; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::init_from_scratch() { + + fcst_info = (VarInfo *) nullptr; + obs_info = (VarInfo *) nullptr; + + fclm_info = (VarInfo *) nullptr; + oclm_info = (VarInfo *) nullptr; + + clear(); return; } //////////////////////////////////////////////////////////////////////// -double compute_interp(const DataPlaneArray &dpa, - const double obs_x, const double obs_y, - const double obs_v, const double cmn, const double csd, - const InterpMthd method, const int width, - const GridTemplateFactory::GridTemplates shape, - const bool wrap_lon, - const double thresh, - const bool spfh_flag, const LevelType lvl_typ, - const double to_lvl, const int i_blw, const int i_abv, - const SingleThresh *cat_thresh) { - double v, v_blw, v_abv, t; +void VxPairBase::clear() { - // Check for no data - if(dpa.n_planes() == 0) return bad_data_double; + if(fcst_info) { delete fcst_info; fcst_info = (VarInfo *) nullptr; } + if(obs_info) { delete obs_info; obs_info = (VarInfo *) nullptr; } - v_blw = compute_horz_interp(dpa[i_blw], obs_x, obs_y, obs_v, cmn, csd, - method, width, shape, wrap_lon, - thresh, cat_thresh); + if(fclm_info) { delete fclm_info; fclm_info = (VarInfo *) nullptr; } + if(oclm_info) { delete oclm_info; oclm_info = (VarInfo *) nullptr; } - if(i_blw == i_abv) { - v = v_blw; - } - else { - v_abv = compute_horz_interp(dpa[i_abv], obs_x, obs_y, obs_v, cmn, csd, - method, width, shape, wrap_lon, - thresh, cat_thresh); + desc.clear(); - // Check for bad data prior to vertical interpolation - if(is_bad_data(v_blw) || is_bad_data(v_abv)) { - return bad_data_double; - } + interp_thresh = 0; - // If verifying specific humidity, do vertical interpolation in - // the natural log of q - if(spfh_flag) { - t = compute_vert_pinterp(log(v_blw), dpa.lower(i_blw), - log(v_abv), dpa.lower(i_abv), - to_lvl); - v = exp(t); - } - // Vertically interpolate to the observation pressure level - else if(lvl_typ == LevelType_Pres) { - v = compute_vert_pinterp(v_blw, dpa.lower(i_blw), - v_abv, dpa.lower(i_abv), - to_lvl); - } - // Vertically interpolate to the observation height - else { - v = compute_vert_zinterp(v_blw, dpa.lower(i_blw), - v_abv, dpa.lower(i_abv), - to_lvl); - } - } + fcst_dpa.clear(); + fcmn_dpa.clear(); + fcsd_dpa.clear(); + ocmn_dpa.clear(); + ocsd_dpa.clear(); - return v; -} + sid_inc_filt.clear(); + sid_exc_filt.clear(); + obs_qty_inc_filt.clear(); + obs_qty_exc_filt.clear(); + + mpr_column.clear(); + mpr_thresh.clear(); + + fcst_ut = (unixtime) 0; + beg_ut = (unixtime) 0; + end_ut = (unixtime) 0; + + msg_typ_sfc.clear(); + msg_typ_lnd.clear(); + msg_typ_wtr.clear(); + + sfc_info.clear(); + + n_msg_typ = 0; + n_mask = 0; + n_interp = 0; + n_vx = 0; + + pb_ptr.clear(); + + n_try = 0; + rej_sid = 0; + rej_var = 0; + rej_vld = 0; + rej_obs = 0; + rej_grd = 0; + rej_lvl = 0; + rej_topo = 0; + rej_qty = 0; + + rej_typ.clear(); + rej_mask.clear(); + rej_fcst.clear(); + rej_cmn.clear(); + rej_csd.clear(); + rej_mpr.clear(); + rej_dup.clear(); + return; +} //////////////////////////////////////////////////////////////////////// -void get_interp_points(const DataPlaneArray &dpa, - const double obs_x, const double obs_y, - const InterpMthd method, const int width, - const GridTemplateFactory::GridTemplates shape, - const bool wrap_lon, - const double thresh, const bool spfh_flag, - const LevelType lvl_typ, const double to_lvl, - const int i_blw, const int i_abv, - NumArray &interp_pnts) { +void VxPairBase::assign(const VxPairBase &vx_pb) { - // Initialize - interp_pnts.erase(); + clear(); - // Check for no data - if(dpa.n_planes() == 0) return; + set_fcst_info(vx_pb.fcst_info); + set_obs_info(vx_pb.obs_info); - double v; - int i, n_vld; - NumArray pts_blw, pts_abv; - GridTemplateFactory gtf; - const GridTemplate* gt = gtf.buildGT(shape, width, wrap_lon); + set_fcst_climo_info(vx_pb.fclm_info); + set_obs_climo_info(vx_pb.oclm_info); - // Get interpolation points below the observation - pts_blw = interp_points(dpa[i_blw], *gt, obs_x, obs_y); + desc = vx_pb.desc; - // For multiple levels, get interpolation points above - if(i_blw != i_abv) { - pts_abv = interp_points(dpa[i_abv], *gt, obs_x, obs_y); + interp_thresh = vx_pb.interp_thresh; - if(pts_abv.n() != pts_blw.n()) { - mlog << Error << "\nget_interp_points() -> " - << "the number of interpolation points above (" - << pts_abv.n() << ") and below (" << pts_blw.n() - << ") should match!\n\n"; - exit(1); - } - } + fcst_dpa = vx_pb.fcst_dpa; + fcmn_dpa = vx_pb.fcmn_dpa; + fcsd_dpa = vx_pb.fcsd_dpa; + ocmn_dpa = vx_pb.ocmn_dpa; + ocsd_dpa = vx_pb.ocsd_dpa; - // Interpolate each point vertically - for(i=0, n_vld=0; isize()) < thresh) { - interp_pnts.erase(); - } + sfc_info = vx_pb.sfc_info; - if ( gt ) { delete gt; gt = (const GridTemplate *) nullptr; } + set_size(vx_pb.n_msg_typ, vx_pb.n_mask, vx_pb.n_interp); + + pb_ptr = vx_pb.pb_ptr; + + n_try = vx_pb.n_try; + rej_typ = vx_pb.rej_typ; + rej_mask = vx_pb.rej_mask; + rej_fcst = vx_pb.rej_fcst; + rej_cmn = vx_pb.rej_cmn; + rej_csd = vx_pb.rej_csd; + rej_mpr = vx_pb.rej_mpr; + rej_dup = vx_pb.rej_dup; + rej_typ = vx_pb.rej_typ; + rej_mask = vx_pb.rej_mask; + rej_fcst = vx_pb.rej_fcst; + rej_cmn = vx_pb.rej_cmn; + rej_csd = vx_pb.rej_csd; + rej_mpr = vx_pb.rej_mpr; + rej_dup = vx_pb.rej_dup; return; } //////////////////////////////////////////////////////////////////////// -bool set_climo_flag(const NumArray &f_na, const NumArray &c_na) { +void VxPairBase::copy_var_info(const VarInfo *info, VarInfo *©) { + VarInfoFactory f; - // The climo values must have non-zero, consistent length and - // cannot all be bad data - if(c_na.n() != f_na.n() || c_na.n() < 1 || is_bad_data(c_na.max())) { - return false; - } + // Deallocate, if necessary + if(copy) { delete copy; copy = (VarInfo *) nullptr; } - return true; + // Perform a deep copy + copy = f.new_var_info(info->file_type()); + *copy = *info; + + return; } //////////////////////////////////////////////////////////////////////// -void derive_climo_vals(const ClimoCDFInfo *cdf_info_ptr, - double m, double s, - NumArray &climo_vals) { +int VxPairBase::three_to_one(int i_msg_typ, int i_mask, int i_interp) const { - // Initialize - climo_vals.erase(); + int n = (i_interp * n_mask + i_mask)*n_msg_typ + i_msg_typ; - // Check for no work to do - if(!cdf_info_ptr) return; + if(n < 0 || n >= n_vx) { + mlog << Error << "\nVxPairBase::three_to_one() -> " + << "range check error for n (" << n << " < 0 or n >= " << n_vx + << ") for i_msg_typ (" << i_msg_typ << "), i_mask (" + << i_mask << "), i_interp (" << i_interp << "), and n_msg_typ (" + << n_msg_typ << "), n_mask (" << n_mask << "), n_interp (" + << n_interp << ")!\n\n"; + exit(1); + } - // cdf_info_ptr->cdf_ta starts with >=0.0 and ends with >=1.0. - // The number of bins is the number of thresholds minus 1. + return n; +} - // Check for bad mean value - if(is_bad_data(m) || cdf_info_ptr->cdf_ta.n() < 2) { - return; - } - // Single climo bin - else if(cdf_info_ptr->cdf_ta.n() == 2) { - climo_vals.add(m); - } - // Check for bad standard deviation value - else if(is_bad_data(s)) { - return; - } - // Extract climo distribution values - else { +//////////////////////////////////////////////////////////////////////// - // Skip the first and last thresholds - for(int i=1; icdf_ta.n()-1; i++) { - climo_vals.add( - normal_cdf_inv(cdf_info_ptr->cdf_ta[i].get_value(), m, s)); - } - } +void VxPairBase::set_fcst_info(const VarInfo *info) { + + copy_var_info(info, fcst_info); return; } //////////////////////////////////////////////////////////////////////// -NumArray derive_climo_prob(const ClimoCDFInfo *cdf_info_ptr, - const NumArray &mn_na, const NumArray &sd_na, - const SingleThresh &othresh) { - int i, n_mn, n_sd; - NumArray climo_prob, climo_vals; - double prob; +void VxPairBase::set_obs_info(const VarInfo *info) { - // Number of valid climo mean and standard deviation - n_mn = mn_na.n_valid(); - n_sd = sd_na.n_valid(); + copy_var_info(info, obs_info); - // Check for constant climo probability - if(!is_bad_data(prob = othresh.get_climo_prob())) { + return; +} - mlog << Debug(4) - << "For threshold " << othresh.get_str() - << ", using a constant climatological probability value of " - << prob << ".\n"; +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_fcst_climo_info(const VarInfo *info) { + + copy_var_info(info, fclm_info); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_climo_info(const VarInfo *info) { + + copy_var_info(info, oclm_info); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_desc(const char *s) { + + desc = s; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_interp_thresh(double t) { + + interp_thresh = t; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_fcst_dpa(const DataPlaneArray &dpa) { + + fcst_dpa = dpa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_fcst_climo_mn_dpa(const DataPlaneArray &dpa) { + + fcmn_dpa = dpa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_fcst_climo_sd_dpa(const DataPlaneArray &dpa) { + + fcsd_dpa = dpa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_climo_mn_dpa(const DataPlaneArray &dpa) { + + ocmn_dpa = dpa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_climo_sd_dpa(const DataPlaneArray &dpa) { + + ocsd_dpa = dpa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_fcst_ut(const unixtime ut) { + + fcst_ut = ut; + + // Set for all PairBase instances, used for duplicate logic + for(auto &x : pb_ptr) x->set_fcst_ut(ut); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_beg_ut(const unixtime ut) { + + beg_ut = ut; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_end_ut(const unixtime ut) { + + end_ut = ut; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_sid_inc_filt(const StringArray &sa) { + + sid_inc_filt = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_sid_exc_filt(const StringArray &sa) { + + sid_exc_filt = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_qty_inc_filt(const StringArray &sa) { + + obs_qty_inc_filt = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_qty_exc_filt(const StringArray &sa) { + + obs_qty_exc_filt = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_size(int types, int masks, int interps) { + + // Store the dimensions for the PairBase array + n_msg_typ = types; + n_mask = masks; + n_interp = interps; + n_vx = types * masks * interps; + + // Resize the PairBase pointer vector + pb_ptr.resize(n_vx); + + // Initialize 3-D rejection count vectors + vector rej_counts(n_vx, 0); + rej_typ = rej_counts; + rej_mask = rej_counts; + rej_fcst = rej_counts; + rej_cmn = rej_counts; + rej_csd = rej_counts; + rej_mpr = rej_counts; + rej_dup = rej_counts; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_msg_typ(int i_msg_typ, const char *name) { + + for(int i_mask=0; i_maskset_msg_typ(name); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_msg_typ_vals(int i_msg_typ, const StringArray &sa) { + + for(int i_mask=0; i_maskset_msg_typ_vals(sa); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_mask_area(int i_mask, const char *name, + MaskPlane *mp_ptr) { + + for(int i_msg_typ=0; i_msg_typset_mask_name(name); + pb_ptr[n]->set_mask_area_ptr(mp_ptr); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_mask_sid(int i_mask, const char *name, + StringArray *sid_ptr) { + + for(int i_msg_typ=0; i_msg_typset_mask_name(name); + pb_ptr[n]->set_mask_sid_ptr(sid_ptr); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_mask_llpnt(int i_mask, const char *name, + MaskLatLon *llpnt_ptr) { + + for(int i_msg_typ=0; i_msg_typset_mask_name(name); + pb_ptr[n]->set_mask_llpnt_ptr(llpnt_ptr); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_interp(int i_interp, + const char *interp_mthd_str, int width, + GridTemplateFactory::GridTemplates shape) { + + for(int i_msg_typ=0; i_msg_typset_interp_mthd(interp_mthd_str); + pb_ptr[n]->set_interp_wdth(width); + pb_ptr[n]->set_interp_shape(shape); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_interp(int i_interp, + InterpMthd mthd, int width, + GridTemplateFactory::GridTemplates shape) { + + for(int i_msg_typ=0; i_msg_typset_interp_mthd(mthd); + pb_ptr[n]->set_interp_wdth(width); + pb_ptr[n]->set_interp_shape(shape); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_mpr_thresh(const StringArray &sa, const ThreshArray &ta) { + + // Check for constant length + if(sa.n() != ta.n()) { + mlog << Error << "\nVxPairBase::set_mpr_thresh() -> " + << "the \"" << conf_key_mpr_column << "\" (" + << write_css(sa) << ") and \"" << conf_key_mpr_thresh + << "\" (" << write_css(ta) + << ") config file entries must have the same length!\n\n"; + exit(1); + } + + mpr_column = sa; + mpr_thresh = ta; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_climo_cdf_info_ptr(const ClimoCDFInfo *info) { + + for(auto &x : pb_ptr) x->set_climo_cdf_info_ptr(info); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_msg_typ_sfc(const StringArray &sa) { + + msg_typ_sfc = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_msg_typ_lnd(const StringArray &sa) { + + msg_typ_lnd = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_msg_typ_wtr(const StringArray &sa) { + + msg_typ_wtr = sa; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_sfc_info(const SurfaceInfo &si) { + + sfc_info = si; + + return; +} + +//////////////////////////////////////////////////////////////////////// + +int VxPairBase::get_n_pair() const { + + if(n_vx == 0) { + mlog << Warning << "\nVxPairBase::get_n_pair() -> " + << "set_size() has not been called yet!\n\n"; + } + + int n = 0; + + for(auto &x : pb_ptr) n += x->n_obs; + + return n; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_duplicate_flag(DuplicateType duplicate_flag) { + + if(n_vx == 0) { + mlog << Warning << "\nVxPairBase::set_duplicate_flag() -> " + << "set_size() has not been called yet!\n\n"; + } + + for(auto &x : pb_ptr) x->set_check_unique(duplicate_flag == DuplicateType::Unique); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_summary(ObsSummary s) { + + if(n_vx == 0) { + mlog << Warning << "\nVxPairBase::set_obs_summary() -> " + << "set_size() has not been called yet!\n\n"; + } + + for(auto &x : pb_ptr) x->set_obs_summary(s); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::set_obs_perc_value(int percentile) { + + if(n_vx == 0) { + mlog << Warning << "\nVxPairBase::set_obs_perc_value() -> " + << "set_size() has not been called yet!\n\n"; + } + + for(auto &x : pb_ptr) x->set_obs_perc_value(percentile); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::print_obs_summary() const { + + if(n_vx == 0) { + mlog << Warning << "\nVxPairBase::print_obs_summary() -> " + << "set_size() has not been called yet!\n\n"; + } + + for(auto &x : pb_ptr) x->print_obs_summary(); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::calc_obs_summary() { + + if(n_vx == 0) { + mlog << Warning << "\nVxPairBase::calc_obs_summary() -> " + << "set_size() has not been called yet!\n\n"; + } + + for(auto &x : pb_ptr) x->calc_obs_summary(); + + return; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_sid( + const char *pnt_obs_str, const char *hdr_sid_str) { + bool keep = true; + + // Check the station ID inclusion and exclusion lists + if((sid_inc_filt.n() && !sid_inc_filt.has(hdr_sid_str)) || + (sid_exc_filt.n() && sid_exc_filt.has(hdr_sid_str))) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "station id:\n" + << pnt_obs_str << "\n"; + } + + rej_sid++; + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_var( + const char *pnt_obs_str, const char *var_name, int grib_code) { + bool keep = true; + + const auto obs_info_grib = (VarInfoGrib *) obs_info; + + // Check for matching variable name or GRIB code + if((var_name != nullptr) && (m_strlen(var_name) > 0)) { + + if(var_name != obs_info->name()) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "variable name:\n" + << pnt_obs_str << "\n"; + } + + rej_var++; + keep = false; + } + } + else if(obs_info_grib && obs_info_grib->code() != nint(grib_code)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "GRIB code:\n" + << pnt_obs_str << "\n"; + } + + rej_var++; + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_qty( + const char *pnt_obs_str, const char *obs_qty) { + bool keep = true; + + // Check the observation quality include and exclude options + if((obs_qty_inc_filt.n() > 0 && !obs_qty_inc_filt.has(obs_qty)) || + (obs_qty_exc_filt.n() > 0 && obs_qty_exc_filt.has(obs_qty))) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "quality control string:\n" + << pnt_obs_str << "\n"; + } + + rej_qty++; + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_vld( + const char *pnt_obs_str, unixtime hdr_ut) { + bool keep = true; + + // Check the observation valid time + if(hdr_ut < beg_ut || hdr_ut > end_ut) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "valid time:\n" + << pnt_obs_str << "\n"; + } + + rej_vld++; + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_obs( + const char *pnt_obs_str, double &obs_v) { + bool keep = true; + + // Apply observation processing logic + obs_v = pb_ptr[0]->process_obs(obs_info, obs_v); + + // Check whether the observation value contains valid data + if(is_bad_data(obs_v)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "with bad data value:\n" + << pnt_obs_str << "\n"; + } + + rej_obs++; + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_grd( + const char *pnt_obs_str, const Grid &gr, + double hdr_lat, double hdr_lon, + double &obs_x, double &obs_y) { + bool keep = true; + + // Convert the lat/lon value to x/y + gr.latlon_to_xy(hdr_lat, -1.0*hdr_lon, obs_x, obs_y); + int x = nint(obs_x); + int y = nint(obs_y); + + // Check if the observation's lat/lon is on the grid + if(((x < 0 || x >= gr.nx()) && !gr.wrap_lon()) || + y < 0 || y >= gr.ny()) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "off the grid where (x, y) = (" << x << ", " << y + << ") and grid (nx, ny) = (" << gr.nx() << ", " << gr.ny() << "):\n" + << pnt_obs_str << "\n"; + } + + rej_grd++; + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_topo( + const char *pnt_obs_str, const Grid &gr, + double obs_x, double obs_y, + const char *hdr_typ_str, double hdr_elv) { + bool keep = true; + + // Check for a large topography difference + if(sfc_info.topo_ptr && msg_typ_sfc.reg_exp_match(hdr_typ_str)) { + + // Interpolate model topography to observation location + double topo = compute_horz_interp( + *sfc_info.topo_ptr, obs_x, obs_y, hdr_elv, + InterpMthd::Bilin, 2, + GridTemplateFactory::GridTemplates::Square, + gr.wrap_lon(), 1.0); + + // Skip bad topography values + if(is_bad_data(hdr_elv) || is_bad_data(topo)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "due to bad topography values where observation elevation = " + << hdr_elv << " and model topography = " << topo << ":\n" + << pnt_obs_str << "\n"; + } + + rej_topo++; + keep = false; + } + + // Check the topography difference threshold + else if(!sfc_info.topo_use_obs_thresh.check(topo - hdr_elv)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "due to topography difference where observation elevation (" + << hdr_elv << ") minus model topography (" << topo << ") = " + << topo - hdr_elv << " is not " + << sfc_info.topo_use_obs_thresh.get_str() << ":\n" + << pnt_obs_str << "\n"; + } + + rej_topo++; + keep = false; + } + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_lvl( + const char *pnt_obs_str, const char *hdr_typ_str, + double obs_lvl, double obs_hgt) { + bool keep = true; + + // For pressure levels, check if the observation pressure level + // falls in the requested range. + if(obs_info->level().type() == LevelType_Pres) { + + if(obs_lvl < obs_info->level().lower() || + obs_lvl > obs_info->level().upper()) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "pressure level value:\n" + << pnt_obs_str << "\n"; + } + + rej_lvl++; + keep = false; + } + } + // For accumulations, check if the observation accumulation interval + // matches the requested interval. + else if(obs_info->level().type() == LevelType_Accum) { + + if(obs_lvl < obs_info->level().lower() || + obs_lvl > obs_info->level().upper()) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "accumulation interval:\n" + << pnt_obs_str << "\n"; + } + + rej_lvl++; + keep = false; + } + } + // For all other level types (VertLevel, RecNumber, NoLevel), + // check for a surface message type or if the observation height + // falls within the requested range. + else { + + if(!msg_typ_sfc.reg_exp_match(hdr_typ_str) && + (obs_hgt < obs_info->level().lower() || + obs_hgt > obs_info->level().upper())) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "level value:\n" + << pnt_obs_str << "\n"; + } + + rej_lvl++; + keep = false; + } + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_typ( + const char *pnt_obs_str, int i_msg_typ, + const char *hdr_typ_str) { + bool keep = true; + + int n = three_to_one(i_msg_typ, 0, 0); + + // Check for a matching message type + if(!pb_ptr[n]->msg_typ_vals.has(hdr_typ_str)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "message type:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_typ, i_msg_typ); + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_mask( + const char *pnt_obs_str, int i_msg_typ, int i_mask, int x, int y, + const char *hdr_sid_str, double hdr_lat, double hdr_lon) { + bool keep = true; + + int n = three_to_one(i_msg_typ, i_mask, 0); + + // Check for the obs falling within the masking region + if( pb_ptr[n]->mask_area_ptr != nullptr && + !pb_ptr[n]->mask_area_ptr->s_is_on(x, y)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on spatial masking region:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_mask, i_msg_typ, i_mask); + keep = false; + } + // Otherwise, check for the masking SID list + else if( pb_ptr[n]->mask_sid_ptr != nullptr && + !pb_ptr[n]->mask_sid_ptr->has(hdr_sid_str)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on masking station id list:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_mask, i_msg_typ, i_mask); + keep = false; + } + // Otherwise, check observation lat/lon thresholds + else if( pb_ptr[n]->mask_llpnt_ptr != nullptr && + (!pb_ptr[n]->mask_llpnt_ptr->lat_thresh.check(hdr_lat) || + !pb_ptr[n]->mask_llpnt_ptr->lon_thresh.check(hdr_lon))) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on latitude/longitude thesholds:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_mask, i_msg_typ, i_mask); + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_climo( + const char *pnt_obs_str, + int i_msg_typ, int i_mask, int i_interp, + const Grid &gr, double obs_x, double obs_y, + double obs_v, double obs_lvl, double obs_hgt, + ClimoPntInfo &cpi) { + bool keep = true; + + int n = three_to_one(i_msg_typ, i_mask, i_interp); + + bool spfh_flag = fcst_info->is_specific_humidity() && + obs_info->is_specific_humidity(); + + // Compute the interpolated forecast value using the + // observation pressure level or height + double to_lvl = (fcst_info->level().type() == LevelType_Pres ? + obs_lvl : obs_hgt); + int lvl_blw, lvl_abv; + + // Initialize + cpi.clear(); + + // Forecast climatology mean + if(keep && fcmn_dpa.n_planes() > 0) { + + find_vert_lvl(fcmn_dpa, to_lvl, lvl_blw, lvl_abv); + + cpi.fcmn = compute_interp(fcmn_dpa, obs_x, obs_y, obs_v, nullptr, + pb_ptr[n]->interp_mthd, pb_ptr[n]->interp_wdth, + pb_ptr[n]->interp_shape, gr.wrap_lon(), + interp_thresh, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv); + + // Check for bad data + if(is_bad_data(cpi.fcmn)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on bad forecast climatological mean value:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_cmn, i_msg_typ, i_mask, i_interp); + keep = false; + } + } + + // Observation climatology mean + if(keep && ocmn_dpa.n_planes() > 0) { + + find_vert_lvl(ocmn_dpa, to_lvl, lvl_blw, lvl_abv); + + cpi.ocmn = compute_interp(ocmn_dpa, obs_x, obs_y, obs_v, nullptr, + pb_ptr[n]->interp_mthd, pb_ptr[n]->interp_wdth, + pb_ptr[n]->interp_shape, gr.wrap_lon(), + interp_thresh, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv); + + // Check for bad data + if(is_bad_data(cpi.ocmn)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on bad observation climatological mean value:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_cmn, i_msg_typ, i_mask, i_interp); + keep = false; + } + } + + // Check for valid interpolation options + if((fcsd_dpa.n_planes() > 0 || + ocsd_dpa.n_planes() > 0) && + (pb_ptr[n]->interp_mthd == InterpMthd::Min || + pb_ptr[n]->interp_mthd == InterpMthd::Max || + pb_ptr[n]->interp_mthd == InterpMthd::Median || + pb_ptr[n]->interp_mthd == InterpMthd::Best)) { + mlog << Warning << "\nVxPairBase::add_point_obs() -> " + << "applying the " << interpmthd_to_string(pb_ptr[n]->interp_mthd) + << " interpolation method to climatological spread " + << "may cause unexpected results.\n\n"; + } + + // Forecast climatology spread + if(keep && fcsd_dpa.n_planes() > 0) { + + find_vert_lvl(fcsd_dpa, to_lvl, lvl_blw, lvl_abv); + + cpi.fcsd = compute_interp(fcsd_dpa, obs_x, obs_y, obs_v, nullptr, + pb_ptr[n]->interp_mthd, pb_ptr[n]->interp_wdth, + pb_ptr[n]->interp_shape, gr.wrap_lon(), + interp_thresh, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv); + + // Check for bad data + if(is_bad_data(cpi.fcsd)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on bad forecast climatological spread value:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_csd, i_msg_typ, i_mask, i_interp); + keep = false; + } + } + + // Observation climatology spread + if(keep && ocsd_dpa.n_planes() > 0) { + + find_vert_lvl(ocsd_dpa, to_lvl, lvl_blw, lvl_abv); + + cpi.ocsd = compute_interp(ocsd_dpa, obs_x, obs_y, obs_v, nullptr, + pb_ptr[n]->interp_mthd, pb_ptr[n]->interp_wdth, + pb_ptr[n]->interp_shape, gr.wrap_lon(), + interp_thresh, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv); + + // Check for bad data + if(is_bad_data(cpi.ocsd)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on bad observation climatological spread value:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_csd, i_msg_typ, i_mask, i_interp); + keep = false; + } + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +bool VxPairBase::is_keeper_fcst( + const char *pnt_obs_str, + int i_msg_typ, int i_mask, int i_interp, + const char *hdr_typ_str, const Grid &gr, + double obs_x, double obs_y, double hdr_elv, + double obs_v, double obs_lvl, double obs_hgt, + const ClimoPntInfo &cpi, double &fcst_v) { + bool keep = true; + + int n = three_to_one(i_msg_typ, i_mask, i_interp); + + // For surface verification, apply land/sea and topo masks + if((sfc_info.land_ptr || sfc_info.topo_ptr) && + (msg_typ_sfc.reg_exp_match(hdr_typ_str))) { + + bool is_land = msg_typ_lnd.has(hdr_typ_str); + + // Check for a single forecast DataPlane + if(fcst_dpa.n_planes() != 1) { + mlog << Error << "\nVxPairBase::add_point_obs() -> " + << "unexpected number of forecast levels (" + << fcst_dpa.n_planes() + << ") for surface verification! Set \"land_mask.flag\" and " + << "\"topo_mask.flag\" to false to disable this check.\n\n"; + exit(1); + } + + fcst_v = compute_sfc_interp(fcst_dpa[0], obs_x, obs_y, hdr_elv, obs_v, + pb_ptr[n]->interp_mthd, pb_ptr[n]->interp_wdth, + pb_ptr[n]->interp_shape, gr.wrap_lon(), + interp_thresh, sfc_info, is_land); + } + // Otherwise, compute interpolated value + else { + + bool spfh_flag = fcst_info->is_specific_humidity() && + obs_info->is_specific_humidity(); + + // Compute the interpolated forecast value using the + // observation pressure level or height + double to_lvl = (fcst_info->level().type() == LevelType_Pres ? + obs_lvl : obs_hgt); + int lvl_blw, lvl_abv; + + find_vert_lvl(fcst_dpa, to_lvl, lvl_blw, lvl_abv); + + fcst_v = compute_interp(fcst_dpa, obs_x, obs_y, obs_v, &cpi, + pb_ptr[n]->interp_mthd, pb_ptr[n]->interp_wdth, + pb_ptr[n]->interp_shape, gr.wrap_lon(), + interp_thresh, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv); + } + + // Check for bad data + if(is_bad_data(fcst_v)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() << " versus " + << obs_info->magic_str() << ", skipping observation " + << "based on bad data in the " + << interpmthd_to_string(pb_ptr[n]->interp_mthd) << "(" + << pb_ptr[n]->interp_wdth * pb_ptr[n]->interp_wdth + << ") interpolated forecast value:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_fcst, i_msg_typ, i_mask, i_interp); + keep = false; + } + + return keep; +} + +//////////////////////////////////////////////////////////////////////// + +void VxPairBase::inc_count(vector &rej, int i_msg_typ) { + + for(int i_mask=0; i_mask &rej, int i_msg_typ, int i_mask) { + + for(int i_interp=0; i_interp &rej, int i_msg_typ, int i_mask, int i_interp) { + + rej[three_to_one(i_msg_typ, i_mask, i_interp)]++; + + return; +} + +//////////////////////////////////////////////////////////////////////// +// +// Begin miscellaneous utility functions +// +//////////////////////////////////////////////////////////////////////// + +void find_vert_lvl(const DataPlaneArray &dpa, const double obs_lvl, + int &i_blw, int &i_abv) { + int i; + double dist, dist_blw, dist_abv; + + // Initialize + i_blw = i_abv = bad_data_int; + + // Check for no data + if(dpa.n_planes() == 0) return; + + // Find the closest levels above and below the observation + dist_blw = dist_abv = 1.0e30; + for(i=0; i= 0 && fabs(dist) < dist_abv) { + dist_abv = fabs(dist); + i_abv = i; + } + } + + // Check if the observation is above the forecast range + if(is_eq(dist_blw, 1.0e30) && !is_eq(dist_abv, 1.0e30)) { + + // Set the index below to the index above and perform no vertical + // interpolation + i_blw = i_abv; + } + // Check if the observation is below the forecast range + else if(!is_eq(dist_blw, 1.0e30) && is_eq(dist_abv, 1.0e30)) { + + // Set the index above to the index below and perform no vertical + // interpolation + i_abv = i_blw; + } + // Check if an error occurred + else if(is_eq(dist_blw, 1.0e30) && is_eq(dist_abv, 1.0e30)) { + + mlog << Error << "\nfind_vert_lvl() -> " + << "could not find a level above and/or below the " + << "observation level of " << obs_lvl << ".\n\n"; + exit(1); + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +double compute_interp(const DataPlaneArray &dpa, + const double obs_x, const double obs_y, + const double obs_v, const ClimoPntInfo *cpi, + const InterpMthd method, const int width, + const GridTemplateFactory::GridTemplates shape, + const bool wrap_lon, + const double thresh, + const bool spfh_flag, const LevelType lvl_typ, + const double to_lvl, const int i_blw, const int i_abv, + const SingleThresh *cat_thresh) { + double v, v_blw, v_abv, t; + + // Check for no data + if(dpa.n_planes() == 0) return bad_data_double; + + v_blw = compute_horz_interp(dpa[i_blw], obs_x, obs_y, obs_v, cpi, + method, width, shape, wrap_lon, + thresh, cat_thresh); + + if(i_blw == i_abv) { + v = v_blw; + } + else { + v_abv = compute_horz_interp(dpa[i_abv], obs_x, obs_y, obs_v, cpi, + method, width, shape, wrap_lon, + thresh, cat_thresh); + + // Check for bad data prior to vertical interpolation + if(is_bad_data(v_blw) || is_bad_data(v_abv)) { + return bad_data_double; + } + + // If verifying specific humidity, do vertical interpolation in + // the natural log of q + if(spfh_flag) { + t = compute_vert_pinterp(log(v_blw), dpa.lower(i_blw), + log(v_abv), dpa.lower(i_abv), + to_lvl); + v = exp(t); + } + // Vertically interpolate to the observation pressure level + else if(lvl_typ == LevelType_Pres) { + v = compute_vert_pinterp(v_blw, dpa.lower(i_blw), + v_abv, dpa.lower(i_abv), + to_lvl); + } + // Vertically interpolate to the observation height + else { + v = compute_vert_zinterp(v_blw, dpa.lower(i_blw), + v_abv, dpa.lower(i_abv), + to_lvl); + } + } + + return v; +} + + +//////////////////////////////////////////////////////////////////////// + +void get_interp_points(const DataPlaneArray &dpa, + const double obs_x, const double obs_y, + const InterpMthd method, const int width, + const GridTemplateFactory::GridTemplates shape, + const bool wrap_lon, + const double thresh, const bool spfh_flag, + const LevelType lvl_typ, const double to_lvl, + const int i_blw, const int i_abv, + NumArray &interp_pnts) { + + // Initialize + interp_pnts.erase(); + + // Check for no data + if(dpa.n_planes() == 0) return; + + double v; + int i, n_vld; + NumArray pts_blw, pts_abv; + GridTemplateFactory gtf; + const GridTemplate* gt = gtf.buildGT(shape, width, wrap_lon); + + // Get interpolation points below the observation + pts_blw = interp_points(dpa[i_blw], *gt, obs_x, obs_y); + + // For multiple levels, get interpolation points above + if(i_blw != i_abv) { + pts_abv = interp_points(dpa[i_abv], *gt, obs_x, obs_y); + + if(pts_abv.n() != pts_blw.n()) { + mlog << Error << "\nget_interp_points() -> " + << "the number of interpolation points above (" + << pts_abv.n() << ") and below (" << pts_blw.n() + << ") should match!\n\n"; + exit(1); + } + } + + // Interpolate each point vertically + for(i=0, n_vld=0; isize()) < thresh) { + interp_pnts.erase(); + } + + if ( gt ) { delete gt; gt = (const GridTemplate *) nullptr; } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +bool set_climo_flag(const NumArray &f_na, const NumArray &c_na) { + + // The climo values must have non-zero, consistent length and + // cannot all be bad data + if(c_na.n() != f_na.n() || c_na.n() < 1 || is_bad_data(c_na.max())) { + return false; + } + + return true; +} + +//////////////////////////////////////////////////////////////////////// + +void derive_climo_vals(const ClimoCDFInfo *cdf_info_ptr, + double m, double s, + NumArray &climo_vals) { + + // Initialize + climo_vals.erase(); + + // Check for no work to do + if(!cdf_info_ptr) return; + + // cdf_info_ptr->cdf_ta starts with >=0.0 and ends with >=1.0. + // The number of bins is the number of thresholds minus 1. + + // Check for bad mean value + if(is_bad_data(m) || cdf_info_ptr->cdf_ta.n() < 2) { + return; + } + // Single climo bin + else if(cdf_info_ptr->cdf_ta.n() == 2) { + climo_vals.add(m); + } + // Check for bad standard deviation value + else if(is_bad_data(s)) { + return; + } + // Extract climo distribution values + else { + + // Skip the first and last thresholds + for(int i=1; icdf_ta.n()-1; i++) { + climo_vals.add( + normal_cdf_inv(cdf_info_ptr->cdf_ta[i].get_value(), m, s)); + } + } + + return; +} + +//////////////////////////////////////////////////////////////////////// + +NumArray derive_climo_prob(const ClimoCDFInfo *cdf_info_ptr, + const NumArray &mn_na, const NumArray &sd_na, + const SingleThresh &othresh) { + int i, n_mn, n_sd; + NumArray climo_prob, climo_vals; + double prob; + + // Number of valid climo mean and standard deviation + n_mn = mn_na.n_valid(); + n_sd = sd_na.n_valid(); + + // Check for constant climo probability + prob = othresh.get_obs_climo_prob(); + if(!is_bad_data(prob)) { + + mlog << Debug(4) + << "For threshold " << othresh.get_str() + << ", using a constant climatological probability value of " + << prob << ".\n"; climo_prob.add_const(prob, n_mn); } @@ -1206,3 +2558,33 @@ double derive_prob(const NumArray &na, const SingleThresh &st) { } //////////////////////////////////////////////////////////////////////// + +// Write the point observation in the MET point format for logging +ConcatString point_obs_to_string(const float *hdr_arr, const char *hdr_typ_str, + const char *hdr_sid_str, unixtime hdr_ut, + const char *obs_qty, const float *obs_arr, + const char *var_name) { + ConcatString obs_cs, name; + + if((var_name != nullptr) && (0 < m_strlen(var_name))) name << var_name; + else name << nint(obs_arr[1]); + + // + // Write the 11-column MET point format: + // Message_Type Station_ID Valid_Time(YYYYMMDD_HHMMSS) + // Lat(Deg North) Lon(Deg East) Elevation(msl) + // Var_Name(or GRIB_Code) Level Height(msl or agl) + // QC_String Observation_Value + // + obs_cs << " " + << hdr_typ_str << " " << hdr_sid_str << " " + << unix_to_yyyymmdd_hhmmss(hdr_ut) << " " + << hdr_arr[0] << " " << -1.0*hdr_arr[1] << " " + << hdr_arr[2] << " " << name << " " + << obs_arr[2] << " " << obs_arr[3] << " " + << obs_qty << " " << obs_arr[4]; + + return obs_cs; +} + +//////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_statistics/pair_base.h b/src/libcode/vx_statistics/pair_base.h index b0881ae34e..af92385f5c 100644 --- a/src/libcode/vx_statistics/pair_base.h +++ b/src/libcode/vx_statistics/pair_base.h @@ -18,27 +18,39 @@ #include "vx_util.h" #include "vx_data2d.h" +#include "vx_data2d_grib.h" + +//////////////////////////////////////////////////////////////////////// + +static const int REJECT_DEBUG_LEVEL = 9; + +//////////////////////////////////////////////////////////////////////// struct ob_val_t { - unixtime ut; - double val; - std::string qc; + unixtime ut; + double val; + std::string qc; }; struct station_values_t { - std::string sid; - double lat; - double lon; - double x; - double y; - double wgt; - unixtime ut; - double lvl; - double elv; - double cmn; - double csd; - double summary_val; - std::vector obs; + + void clear(); + + std::string sid; + double lat; + double lon; + double x; + double y; + double wgt; + unixtime ut; + double lvl; + double elv; + double fcmn; + double fcsd; + double ocmn; + double ocsd; + double summary_val; + std::vector obs; }; //////////////////////////////////////////////////////////////////////// @@ -91,9 +103,11 @@ class PairBase { NumArray wgt_na; // Weight [n_obs] // Point and Grid Climatology Information - NumArray cmn_na; // Climatology mean [n_obs] - NumArray csd_na; // Climatology standard deviation [n_obs] - NumArray cdf_na; // Climatology cumulative distribution function [n_obs] + NumArray fcmn_na; // Forecast climatology mean [n_obs] + NumArray fcsd_na; // Forecast climatology standard deviation [n_obs] + NumArray ocmn_na; // Observation climatology mean [n_obs] + NumArray ocsd_na; // Observation climatology standard deviation [n_obs] + NumArray ocdf_na; // Observation climatology cumulative distribution function [n_obs] // Point Observation Information StringArray sid_sa; // Station ID [n_obs] @@ -156,24 +170,25 @@ class PairBase { bool add_point_obs(const char *, double, double, double, double, unixtime, double, double, double, const char *, - double, double, double); + const ClimoPntInfo &, double); void set_point_obs(int, const char *, double, double, double, double, unixtime, double, double, double, - const char *, double, double, double); + const char *, const ClimoPntInfo &, double); - void add_grid_obs(double, double, double, double); + void add_grid_obs(double, const ClimoPntInfo &, double); - void add_grid_obs(double, double, double, - double, double, double); - - void add_climo(double, double, double); - void set_climo(int, double, double, double); - void add_climo_cdf(); + void add_grid_obs(double, double, double, const ClimoPntInfo &, double); - double process_obs(VarInfo *, double); + void add_climo(double, const ClimoPntInfo &); - void print_obs_summary(); + void set_climo(int, double, const ClimoPntInfo &); + + void compute_climo_cdf(); + + double process_obs(const VarInfo *, double) const; + + void print_obs_summary() const; void calc_obs_summary(); @@ -183,6 +198,215 @@ class PairBase { inline bool PairBase::is_point_vx() const { return IsPointVx; } +//////////////////////////////////////////////////////////////////////// +// +// Base class for verification tasks +// +//////////////////////////////////////////////////////////////////////// + +class VxPairBase { + + protected: + + void init_from_scratch(); + void assign(const VxPairBase &); + void copy_var_info(const VarInfo *info, VarInfo *©); + + public: + + VxPairBase(); + ~VxPairBase(); + VxPairBase(const VxPairBase &); + VxPairBase & operator=(const VxPairBase &); + + ////////////////////////////////////////////////////////////////// + // + // Information about the fields to be compared + // + ////////////////////////////////////////////////////////////////// + + VarInfo *fcst_info; // Forecast field, allocated by VarInfoFactory + VarInfo *obs_info; // Observation field, allocated by VarInfoFactory + + VarInfo *fclm_info; // Forecast climatology field, allocated by VarInfoFactory + VarInfo *oclm_info; // Observation climatology field, allocated by VarInfoFactory + + ConcatString desc; // User description from config file + + double interp_thresh; // Threshold between 0 and 1 used when + // interpolating the forecasts to the + // observation location. + + ////////////////////////////////////////////////////////////////// + // + // Forecast and climatology fields falling between the requested + // levels. Store the fields in a data plane array. + // + ////////////////////////////////////////////////////////////////// + + DataPlaneArray fcst_dpa; // Forecast data plane array + DataPlaneArray fcmn_dpa; // Forecast climatology mean data plane array + DataPlaneArray fcsd_dpa; // Forecast climatology standard deviation data plane array + DataPlaneArray ocmn_dpa; // Observation climatology mean data plane array + DataPlaneArray ocsd_dpa; // Observation climatology standard deviation data plane array + + ////////////////////////////////////////////////////////////////// + + unixtime fcst_ut; // Forecast valid time + unixtime beg_ut; // Beginning of valid time window + unixtime end_ut; // End of valid time window + + ////////////////////////////////////////////////////////////////// + + StringArray sid_inc_filt; // Station ID inclusion list + StringArray sid_exc_filt; // Station ID exclusion list + StringArray obs_qty_inc_filt; // Observation quality include markers + StringArray obs_qty_exc_filt; // Observation quality exclude markers + + ////////////////////////////////////////////////////////////////// + + StringArray mpr_column; // Names of MPR columns or diffs of columns + ThreshArray mpr_thresh; // Filtering thresholds for the MPR columns + + ////////////////////////////////////////////////////////////////// + + StringArray msg_typ_sfc; // List of surface message types + StringArray msg_typ_lnd; // List of surface land message types + StringArray msg_typ_wtr; // List of surface water message types + + SurfaceInfo sfc_info; // Land/sea mask and topography info + + ////////////////////////////////////////////////////////////////// + + int n_msg_typ; // Number of verifying message types + + int n_mask; // Total number of masking regions + // of masking DataPlane fields or SIDs + + int n_interp; // Number of interpolation techniques + + int n_vx; // n_msg_typ * n_mask * n_interp + + ////////////////////////////////////////////////////////////////// + + // 3-Dim vector of PairBase pointers [n_msg_typ][n_mask][n_interp] + std::vector pb_ptr; + + // Counts for observation rejection reason codes + int n_try; // Number of observations processed + int rej_sid; // Reject based on SID inclusion and exclusion lists + int rej_var; // Reject based on observation variable name + int rej_vld; // Reject based on valid time + int rej_obs; // Reject observation bad data + int rej_grd; // Reject based on location + int rej_topo; // Reject based on topography + int rej_lvl; // Reject based on vertical level + int rej_qty; // Reject based on obs quality + + // 3-Dim vectors for observation rejection reason codes [n_msg_typ][n_mask][n_interp] + std::vector rej_typ; // Reject based on message type + std::vector rej_mask; // Reject based on masking region + std::vector rej_fcst; // Reject forecast bad data + std::vector rej_cmn; // Reject fcst or obs climo mean bad data + std::vector rej_csd; // Reject fcst or obs climo stdev bad data + std::vector rej_mpr; // Reject based on MPR filtering logic + std::vector rej_dup; // Reject based on duplicates logic + + ////////////////////////////////////////////////////////////////// + + void clear(); + + int three_to_one(int, int, int) const; + + void set_fcst_info(const VarInfo *); + void set_obs_info(const VarInfo *); + + void set_fcst_climo_info(const VarInfo *); + void set_obs_climo_info(const VarInfo *); + + void set_desc(const char *); + + void set_interp_thresh(double); + + void set_fcst_dpa(const DataPlaneArray &); + void set_fcst_climo_mn_dpa(const DataPlaneArray &); + void set_fcst_climo_sd_dpa(const DataPlaneArray &); + void set_obs_climo_mn_dpa(const DataPlaneArray &); + void set_obs_climo_sd_dpa(const DataPlaneArray &); + + void set_fcst_ut(const unixtime); + void set_beg_ut(const unixtime); + void set_end_ut(const unixtime); + + void set_sid_inc_filt(const StringArray &); + void set_sid_exc_filt(const StringArray &); + void set_obs_qty_inc_filt(const StringArray &); + void set_obs_qty_exc_filt(const StringArray &); + + // Call set_size before set_msg_typ, set_mask_area, and set_interp + void set_size(int, int, int); + + void set_msg_typ(int, const char *); + void set_msg_typ_vals(int, const StringArray &); + void set_mask_area(int, const char *, MaskPlane *); + void set_mask_sid(int, const char *, StringArray *); + void set_mask_llpnt(int, const char *, MaskLatLon *); + + void set_interp(int i_interp, const char *interp_mthd_str, int width, + GridTemplateFactory::GridTemplates shape); + void set_interp(int i_interp, InterpMthd mthd, + int width, GridTemplateFactory::GridTemplates shape); + + void set_mpr_thresh(const StringArray &, const ThreshArray &); + + void set_climo_cdf_info_ptr(const ClimoCDFInfo *); + + void set_msg_typ_sfc(const StringArray &); + void set_msg_typ_lnd(const StringArray &); + void set_msg_typ_wtr(const StringArray &); + + void set_sfc_info(const SurfaceInfo &); + + int get_n_pair() const; + + void set_duplicate_flag(DuplicateType duplicate_flag); + void set_obs_summary(ObsSummary obs_summary); + void set_obs_perc_value(int percentile); + + void print_obs_summary() const; + void calc_obs_summary(); + + bool is_keeper_sid(const char *, const char *); + bool is_keeper_var(const char *, const char *, int); + bool is_keeper_qty(const char *, const char *); + bool is_keeper_vld(const char *, unixtime); + bool is_keeper_obs(const char *, double &); + bool is_keeper_grd(const char *, const Grid &, + double, double, + double &, double &); + bool is_keeper_topo(const char *, const Grid &, + double, double, + const char *, double); + bool is_keeper_lvl(const char *, const char *, double, double); + bool is_keeper_typ(const char *, int, const char *); + bool is_keeper_mask(const char *, int, int, int, int, + const char *, double, double); + bool is_keeper_climo(const char *, int, int, int, + const Grid &gr, double, double, + double, double, double, + ClimoPntInfo &); + bool is_keeper_fcst(const char *, int, int, int, + const char *, const Grid &gr, + double, double, double, + double, double, double, + const ClimoPntInfo &, double &); + + // Member functions for incrementing the counts + void inc_count(std::vector &, int); + void inc_count(std::vector &, int, int); + void inc_count(std::vector &, int, int, int); +}; + //////////////////////////////////////////////////////////////////////// // // Miscellanous utility functions @@ -194,7 +418,7 @@ extern void find_vert_lvl(const DataPlaneArray &, const double, extern double compute_interp(const DataPlaneArray &dpa, const double obs_x, const double obs_y, - const double obs_v, const double cmn, const double csd, + const double obs_v, const ClimoPntInfo *cpi, const InterpMthd method, const int width, const GridTemplateFactory::GridTemplates shape, const bool wrap_lon, @@ -224,6 +448,13 @@ extern NumArray derive_climo_prob(const ClimoCDFInfo *, extern double derive_prob(const NumArray &, const SingleThresh &); +// Write the point observation in the MET point format for logging +extern ConcatString point_obs_to_string( + const float *hdr_arr, const char *hdr_typ_str, + const char *hdr_sid_str, unixtime hdr_ut, + const char *obs_qty, const float *obs_arr, + const char *var_name); + //////////////////////////////////////////////////////////////////////// #endif // __PAIR_BASE_H__ diff --git a/src/libcode/vx_statistics/pair_data_ensemble.cc b/src/libcode/vx_statistics/pair_data_ensemble.cc index 2cf41de138..af9f80de15 100644 --- a/src/libcode/vx_statistics/pair_data_ensemble.cc +++ b/src/libcode/vx_statistics/pair_data_ensemble.cc @@ -8,7 +8,6 @@ //////////////////////////////////////////////////////////////////////// - #include #include #include @@ -32,7 +31,6 @@ using namespace std; - //////////////////////////////////////////////////////////////////////// // // Code for class PairDataEnsemble @@ -233,9 +231,11 @@ void PairDataEnsemble::assign(const PairDataEnsemble &pd) { cdf_info_ptr = pd.cdf_info_ptr; - cmn_na = pd.cmn_na; - csd_na = pd.csd_na; - cdf_na = pd.cdf_na; + fcmn_na = pd.fcmn_na; + fcsd_na = pd.fcsd_na; + ocmn_na = pd.ocmn_na; + ocsd_na = pd.ocsd_na; + ocdf_na = pd.ocdf_na; // PairDataEnsemble v_na = pd.v_na; @@ -380,24 +380,27 @@ void PairDataEnsemble::compute_pair_vals(const gsl_rng *rng_ptr) { // Check if the ranks have already been computed if(r_na.n() == o_na.n()) return; - // Print the climo data being used - bool cmn_flag = set_climo_flag(o_na, cmn_na); - bool csd_flag = set_climo_flag(o_na, csd_na); + // Print the observation climo data being used + bool ocmn_flag = set_climo_flag(o_na, ocmn_na); + bool ocsd_flag = set_climo_flag(o_na, ocsd_na); - if(cmn_flag && cdf_info_ptr && cdf_info_ptr->cdf_ta.n() == 2) { + if(ocmn_flag && cdf_info_ptr && cdf_info_ptr->cdf_ta.n() == 2) { mlog << Debug(3) << "Computing ensemble statistics relative to the " - << "climatological mean.\n"; + << "observation climatological mean.\n"; } - else if(cmn_flag && csd_flag && cdf_info_ptr && cdf_info_ptr->cdf_ta.n() > 2) { + else if(ocmn_flag && + ocsd_flag && + cdf_info_ptr && + cdf_info_ptr->cdf_ta.n() > 2) { mlog << Debug(3) << "Computing ensemble statistics relative to a " << cdf_info_ptr->cdf_ta.n() - 2 - << "-member climatological ensemble.\n"; + << "-member observation climatological ensemble.\n"; } else { mlog << Debug(3) - << "No reference climatology data provided.\n"; + << "No reference observation climatology data provided.\n"; } // Compute the rank for each observation @@ -533,8 +536,8 @@ void PairDataEnsemble::compute_pair_vals(const gsl_rng *rng_ptr) { r_na.add(nint(dest_na[0])); } - // Derive ensemble from climo mean and standard deviation - derive_climo_vals(cdf_info_ptr, cmn_na[i], csd_na[i], cur_clm); + // Derive ensemble from observation climo mean and standard deviation + derive_climo_vals(cdf_info_ptr, ocmn_na[i], ocsd_na[i], cur_clm); // Store empirical CRPS stats // For crps_emp use temporary, local variable so we can use it @@ -552,7 +555,7 @@ void PairDataEnsemble::compute_pair_vals(const gsl_rng *rng_ptr) { // Store Gaussian CRPS stats crps_gaus_na.add(compute_crps_gaus(o_na[i], mean, stdev)); - crpscl_gaus_na.add(compute_crps_gaus(o_na[i], cmn_na[i], csd_na[i])); + crpscl_gaus_na.add(compute_crps_gaus(o_na[i], ocmn_na[i], ocsd_na[i])); ign_na.add(compute_ens_ign(o_na[i], mean, stdev)); pit_na.add(compute_ens_pit(o_na[i], mean, stdev)); @@ -783,10 +786,7 @@ void PairDataEnsemble::compute_ssvar() { // Sort the bins set sorted_bins; - for( ssvar_bin_map::iterator map_it = bins.begin(); - map_it != bins.end(); map_it++ ){ - sorted_bins.insert( (*map_it).first ); - } + for(auto &x : bins) sorted_bins.insert(x.first); // Report the number of bins built int n_bin = sorted_bins.size(); @@ -876,26 +876,33 @@ PairDataEnsemble PairDataEnsemble::subset_pairs_obs_thresh(const SingleThresh &o pd.obs_error_flag = obs_error_flag; pd.cdf_info_ptr = cdf_info_ptr; - bool cmn_flag = set_climo_flag(o_na, cmn_na); - bool csd_flag = set_climo_flag(o_na, csd_na); - bool wgt_flag = set_climo_flag(o_na, wgt_na); + bool fcmn_flag = set_climo_flag(o_na, fcmn_na); + bool fcsd_flag = set_climo_flag(o_na, fcsd_na); + bool ocmn_flag = set_climo_flag(o_na, ocmn_na); + bool ocsd_flag = set_climo_flag(o_na, ocsd_na); + bool wgt_flag = set_climo_flag(o_na, wgt_na); // Loop over the pairs for(i=0; ifile_type()); - *climo_info = *info; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_obs_info(VarInfo *info) { - VarInfoFactory f; - - // Deallocate, if necessary - if(obs_info) { delete obs_info; obs_info = (VarInfo *) nullptr; } - - // Perform a deep copy - obs_info = f.new_var_info(info->file_type()); - *obs_info = *info; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_desc(const char *s) { - - desc = s; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_interp_thresh(double t) { - - interp_thresh = t; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_msg_typ_sfc(const StringArray &sa) { - - msg_typ_sfc = sa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_fcst_dpa(const DataPlaneArray &dpa) { - - fcst_dpa = dpa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_climo_mn_dpa(const DataPlaneArray &dpa) { - - climo_mn_dpa = dpa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_climo_sd_dpa(const DataPlaneArray &dpa) { - - climo_sd_dpa = dpa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_fcst_ut(const unixtime ut) { - - fcst_ut = ut; - - // set the fcst_ut for all PairBase instances, used for duplicate logic - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_fcst_ut(ut); - } - } - } - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_beg_ut(const unixtime ut) { - - beg_ut = ut; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_end_ut(const unixtime ut) { + ens_info = new EnsVarInfo(*info); - end_ut = ut; + // Set the base pointer + if(!fcst_info) set_fcst_info(ens_info->get_var_info()); return; } //////////////////////////////////////////////////////////////////////// -void VxPairDataEnsemble::set_sid_inc_filt(const StringArray sa) { - - sid_inc_filt = sa; - - return; -} +void VxPairDataEnsemble::set_size(int types, int masks, int interps) { -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_sid_exc_filt(const StringArray sa) { - - sid_exc_filt = sa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_obs_qty_inc_filt(const StringArray q) { - - obs_qty_inc_filt = q; - - return; -} - -//////////////////////////////////////////////////////////////////////// + VxPairBase::set_size(types, masks, interps); -void VxPairDataEnsemble::set_obs_qty_exc_filt(const StringArray q) { + // Resize the PairDataPoint vector + pd.resize(n_vx); - obs_qty_exc_filt = q; + // Set PairBase pointers to the PairDataEnsemble objects + for(int i=0; i " + << "set_size() has not been called yet!\n\n"; } - return; -} - -//////////////////////////////////////////////////////////////////////// + for(auto it = pd.begin(); it != pd.end(); it++) { -void VxPairDataEnsemble::set_msg_typ(int i_msg_typ, const char *name) { - - for(int i=0; iinterp_mthd == InterpMthd::HiRA) { + GridTemplateFactory gtf; + GridTemplate* gt = gtf.buildGT(it->interp_shape, + it->interp_wdth, + false); + it->set_ens_size(n*gt->size()); } - } - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_msg_typ_vals(int i_msg_typ, const StringArray &sa) { - - for(int i=0; iset_ens_size(n); } } @@ -1313,150 +1108,57 @@ void VxPairDataEnsemble::set_msg_typ_vals(int i_msg_typ, const StringArray &sa) //////////////////////////////////////////////////////////////////////// -void VxPairDataEnsemble::set_mask_area(int i_mask, const char *name, - MaskPlane *mp_ptr) { +void VxPairDataEnsemble::set_ssvar_bin_size(double ssvar_bin_size) { - for(int i=0; i " + << "set_size() has not been called yet!\n\n"; } - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_mask_sid(int i_mask, const char *name, - StringArray *sid_ptr) { - - for(int i=0; i " + << "set_size() has not been called yet!\n\n"; } - return; -} - - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_interp(int i_interp, - const char *interp_mthd_str, - int width, GridTemplateFactory::GridTemplates shape) { - - for(int i=0; isize()); - } - else { - pd[i][j][k].set_ens_size(n); - } - } - } + if(n_vx == 0) { + mlog << Warning << "\nVxPairDataEnsemble::set_ctrl_index() -> " + << "set_size() has not been called yet!\n\n"; } - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_climo_cdf_info_ptr(const ClimoCDFInfo *info) { - - for(int i=0; i " + << "set_size() has not been called yet!\n\n"; } - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_phist_bin_size(double phist_bin_size) { - - for(int i=0; ifile_type() != FileType_Gb1) { @@ -1489,104 +1184,49 @@ void VxPairDataEnsemble::add_point_obs(float *hdr_arr, int *hdr_typ_arr, // Create VarInfoGrib pointer VarInfoGrib *obs_info_grib = (VarInfoGrib *) obs_info; - // Check the station ID inclusion and exclusion lists - if((sid_inc_filt.n() && !sid_inc_filt.has(hdr_sid_str)) || - (sid_exc_filt.n() && sid_exc_filt.has(hdr_sid_str))) return; + // Increment the number of tries count + n_try++; - // Check whether the observation variable name matches (rej_var) - if ((var_name != 0) && (0 < m_strlen(var_name))) { - if ( var_name != obs_info->name() ) { - return; - } - } - else if(obs_info_grib->code() != nint(obs_arr[1])) { - return; + // Point observation summary string for rejection log messages + ConcatString pnt_obs_str; + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + pnt_obs_str = point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, + hdr_ut, obs_qty, obs_arr, var_name); } - - // Check the observation quality include and exclude options - if((obs_qty_inc_filt.n() > 0 && !obs_qty_inc_filt.has(obs_qty)) || - (obs_qty_exc_filt.n() > 0 && obs_qty_exc_filt.has(obs_qty))) { - return; - } - - // Check whether the observation time falls within the valid time - // window - if(hdr_ut < beg_ut || hdr_ut > end_ut) return; - hdr_lat = hdr_arr[0]; - hdr_lon = hdr_arr[1]; + // Check the station ID + if(!is_keeper_sid(pnt_obs_str.c_str(), hdr_sid_str)) return; - obs_lvl = obs_arr[2]; - obs_hgt = obs_arr[3]; + // Check observation variable + if(!is_keeper_var(pnt_obs_str.c_str(), var_name, nint(obs_arr[1]))) return; - // Apply observation processing logic - obs_v = pd[0][0][0].process_obs(obs_info, obs_arr[4]); + // Check observation quality + if(!is_keeper_qty(pnt_obs_str.c_str(), obs_qty)) return; - // Check whether the observation value contains valid data - if(is_bad_data(obs_v)) return; + // Check valid time + if(!is_keeper_vld(pnt_obs_str.c_str(), hdr_ut)) return; - // Convert the lat/lon value to x/y - gr.latlon_to_xy(hdr_lat, -1.0*hdr_lon, obs_x, obs_y); - x = nint(obs_x); - y = nint(obs_y); + // Check observation value + double obs_v = obs_arr[4]; + if(!is_keeper_obs(pnt_obs_str.c_str(), obs_v)) return; - // Check if the observation's lat/lon is on the grid - if(((x < 0 || x >= gr.nx()) && !gr.wrap_lon()) || - y < 0 || y >= gr.ny()) return; + // Check location + double hdr_lat = hdr_arr[0]; + double hdr_lon = hdr_arr[1]; + double obs_x, obs_y; + if(!is_keeper_grd(pnt_obs_str.c_str(), gr, hdr_lat, hdr_lon, obs_x, obs_y)) return; - // For pressure levels, check if the observation pressure level - // falls in the requested range. - if(obs_info_grib->level().type() == LevelType_Pres) { + // TODO: Add topography filtering to Ensemble-Stat - if(obs_lvl < obs_info_grib->level().lower() || - obs_lvl > obs_info_grib->level().upper()) return; - } - // For accumulations, check if the observation accumulation interval - // matches the requested interval. - else if(obs_info_grib->level().type() == LevelType_Accum) { - - if(obs_lvl < obs_info_grib->level().lower() || - obs_lvl > obs_info_grib->level().upper()) return; - } - // For all other level types (VertLevel, RecNumber, NoLevel), - // check for a surface message type or if the observation height - // falls within the requested range. - else { + // Check topo + double hdr_elv = hdr_arr[2]; + if(!is_keeper_topo(pnt_obs_str.c_str(), gr, obs_x, obs_y, + hdr_typ_str, hdr_elv)) return; - if(!msg_typ_sfc.reg_exp_match(hdr_typ_str) && - (obs_hgt < obs_info_grib->level().lower() || - obs_hgt > obs_info_grib->level().upper())) { - return; - } - } - - // For a single climatology mean field - if(climo_mn_dpa.n_planes() == 1) { - cmn_lvl_blw = 0; - cmn_lvl_abv = 0; - } - // For multiple climatology mean fields, find the levels above and - // below the observation point. - else { - // Interpolate using the observation pressure level or height - to_lvl = (fcst_info->get_var_info()->level().type() == LevelType_Pres ? - obs_lvl : obs_hgt); - find_vert_lvl(climo_mn_dpa, to_lvl, cmn_lvl_blw, cmn_lvl_abv); - } - - // For a single climatology standard deviation field - if(climo_sd_dpa.n_planes() == 1) { - csd_lvl_blw = 0; - csd_lvl_abv = 0; - } - // For multiple climatology standard deviation fields, find the - // levels above and below the observation point. - else { - // Interpolate using the observation pressure level or height - to_lvl = (fcst_info->get_var_info()->level().type() == LevelType_Pres ? - obs_lvl : obs_hgt); - find_vert_lvl(climo_sd_dpa, to_lvl, csd_lvl_blw, csd_lvl_abv); - } + // Check level + double obs_lvl = obs_arr[2]; + double obs_hgt = obs_arr[3]; + if(!is_keeper_lvl(pnt_obs_str.c_str(), hdr_typ_str, obs_lvl, obs_hgt)) return; // When verifying a vertical level forecast against a surface message type, // set the observation level value to bad data so that it's not used in the @@ -1596,11 +1236,12 @@ void VxPairDataEnsemble::add_point_obs(float *hdr_arr, int *hdr_typ_arr, obs_lvl = bad_data_double; } - // Set flag for specific humidity - bool spfh_flag = fcst_info->get_var_info()->is_specific_humidity() && - obs_info->is_specific_humidity(); + // Set flags + bool spfh_flag = fcst_info->is_specific_humidity() && + obs_info->is_specific_humidity(); // Store pointer to ObsErrorEntry + ObsErrorEntry *oerr_ptr = (ObsErrorEntry *) nullptr; if(obs_error_info->flag) { // Use config file setting, if specified @@ -1637,96 +1278,58 @@ void VxPairDataEnsemble::add_point_obs(float *hdr_arr, int *hdr_typ_arr, FieldType::Obs, oerr_ptr, obs_v); } - // Look through all of the PairData objects to see if the observation - // should be added. + // Loop through the message types + for(int i_msg_typ=0; i_msg_typs_is_on(x, y)) continue; - } - // Otherwise, check for the obs Station ID's presence in the - // masking SID list - else if(pd[i][j][0].mask_sid_ptr != (StringArray *) 0) { - if(!pd[i][j][0].mask_sid_ptr->has(hdr_sid_str)) continue; - } - // Otherwise, check observation Lat/Lon thresholds - else if(pd[i][j][0].mask_llpnt_ptr != (MaskLatLon *) 0) { - if(!pd[i][j][0].mask_llpnt_ptr->lat_thresh.check(hdr_lat) || - !pd[i][j][0].mask_llpnt_ptr->lon_thresh.check(hdr_lon)) { - continue; - } - } + // Loop through the masking regions + for(int i_mask=0; i_maskget_var_info()->level().type() == LevelType_Pres ? - obs_lvl : obs_hgt); - - // Compute the interpolated climatology mean - cmn_v = compute_interp(climo_mn_dpa, obs_x, obs_y, obs_v, - bad_data_double, bad_data_double, - pd[0][0][k].interp_mthd, pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, gr.wrap_lon(), - interp_thresh, spfh_flag, - fcst_info->get_var_info()->level().type(), - to_lvl, cmn_lvl_blw, cmn_lvl_abv); - - // Check for bad data - if(climo_mn_dpa.n_planes() > 0 && is_bad_data(cmn_v)) { - continue; - } + // Check masking region + if(!is_keeper_mask(pnt_obs_str.c_str(), i_msg_typ, i_mask, x, y, + hdr_sid_str, hdr_lat, hdr_lon)) continue; - // Check for valid interpolation options - if(climo_sd_dpa.n_planes() > 0 && - (pd[0][0][k].interp_mthd == InterpMthd::Min || - pd[0][0][k].interp_mthd == InterpMthd::Max || - pd[0][0][k].interp_mthd == InterpMthd::Median || - pd[0][0][k].interp_mthd == InterpMthd::Best)) { - mlog << Warning << "\nVxPairDataEnsemble::add_point_obs() -> " - << "applying the " - << interpmthd_to_string(pd[0][0][k].interp_mthd) - << " interpolation method to climatological spread " - << "may cause unexpected results.\n\n"; - } + // Loop through the interpolation methods + for(int i_interp=0; i_interpget_var_info()->level().type(), - to_lvl, csd_lvl_blw, csd_lvl_abv); - - // Check for bad data - if(climo_sd_dpa.n_planes() > 0 && is_bad_data(csd_v)) { - continue; - } + // Check climatology values + ClimoPntInfo cpi; + if(!is_keeper_climo(pnt_obs_str.c_str(), i_msg_typ, i_mask, i_interp, + gr, obs_x, obs_y, obs_v, obs_lvl, obs_hgt, + cpi)) continue; // Compute weight for current point - wgt_v = (wgt_dp == (DataPlane *) 0 ? - default_grid_weight : wgt_dp->get(x, y)); + double wgt_v = (wgt_dp == nullptr ? + default_grid_weight : + wgt_dp->get(x, y)); // Add the observation value // Weight is from the nearest grid point - pd[i][j][k].add_point_obs(hdr_sid_str, hdr_lat, hdr_lon, - obs_x, obs_y, hdr_ut, obs_lvl, obs_hgt, - obs_v, obs_qty, cmn_v, csd_v, wgt_v); - pd[i][j][k].add_obs_error_entry(oerr_ptr); + int n = three_to_one(i_msg_typ, i_mask, i_interp); + if(!pd[n].add_point_obs(hdr_sid_str, hdr_lat, hdr_lon, + obs_x, obs_y, hdr_ut, obs_lvl, obs_hgt, + obs_v, obs_qty, cpi, wgt_v)) { + + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + mlog << Debug(REJECT_DEBUG_LEVEL) + << "For " << fcst_info->magic_str() + << " versus " << obs_info->magic_str() + << ", skipping observation since it is a duplicate:\n" + << pnt_obs_str << "\n"; + } + + inc_count(rej_dup, i_msg_typ, i_mask, i_interp); + continue; + } + + // Store the observation error pointer + pd[n].add_obs_error_entry(oerr_ptr); + } // end for k } // end for j } // end for i @@ -1737,254 +1340,120 @@ void VxPairDataEnsemble::add_point_obs(float *hdr_arr, int *hdr_typ_arr, //////////////////////////////////////////////////////////////////////// void VxPairDataEnsemble::add_ens(int member, bool mn, Grid &gr) { - int i, j, k, l, m; - int f_lvl_blw, f_lvl_abv, i_mem; - double to_lvl, fcst_v; - NumArray fcst_na; // Set flag for specific humidity - bool spfh_flag = fcst_info->get_var_info()->is_specific_humidity() && - obs_info->is_specific_humidity(); + bool spfh_flag = fcst_info->is_specific_humidity() && + obs_info->is_specific_humidity(); // Loop through all the PairDataEnsemble objects and interpolate - for(i=0; i " - << "the \"" << interpmthd_to_string(pd[0][0][k].interp_mthd) - << "\" interpolation method only applies when verifying a " - << "single level, not " << fcst_dpa.n_planes() - << " levels.\n\n"; - continue; - } - - // Process each of the observations - for(l=0; lget_var_info()->level().type() == LevelType_Pres ? - pd[i][j][k].lvl_na[l] : pd[i][j][k].elv_na[l]); - - // For a single forecast field - if(fcst_dpa.n_planes() == 1) { - f_lvl_blw = 0; - f_lvl_abv = 0; - } - // For multiple forecast fields, find the levels above - // and below the observation point. - else { - find_vert_lvl(fcst_dpa, to_lvl, f_lvl_blw, f_lvl_abv); - } - - // Extract the HiRA neighborhood of values - if(pd[0][0][k].interp_mthd == InterpMthd::HiRA) { - - // For HiRA, set the ensemble mean to bad data - if(mn) { - fcst_na.erase(); - fcst_na.add(bad_data_double); - } - // Otherwise, retrieve all the neighborhood values - // using a valid threshold of 0 - else { - get_interp_points(fcst_dpa, - pd[i][j][k].x_na[l], - pd[i][j][k].y_na[l], - pd[0][0][k].interp_mthd, - pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, - gr.wrap_lon(), - 0, spfh_flag, - fcst_info->get_var_info()->level().type(), - to_lvl, f_lvl_blw, f_lvl_abv, - fcst_na); - } - } - // Otherwise, get a single interpolated ensemble value - else { - fcst_na.add(compute_interp(fcst_dpa, - pd[i][j][k].x_na[l], - pd[i][j][k].y_na[l], - pd[i][j][k].o_na[l], - pd[i][j][k].cmn_na[l], - pd[i][j][k].csd_na[l], - pd[0][0][k].interp_mthd, - pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, - gr.wrap_lon(), - interp_thresh, spfh_flag, - fcst_info->get_var_info()->level().type(), - to_lvl, f_lvl_blw, f_lvl_abv)); - } - - // Store the single ensemble value or HiRA neighborhood - for(m=0; mflag) { - fcst_v = add_obs_error_inc( - obs_error_info->rng_ptr, FieldType::Fcst, - pd[i][j][k].obs_error_entry[l], - pd[i][j][k].o_na[l], fcst_na[m]); - } - else { - fcst_v = fcst_na[m]; - } - - // Determine index of ensemble member - i_mem = member * fcst_na.n() + m; - - // Store perturbed ensemble member value - pd[i][j][k].add_ens(i_mem, fcst_v); - } - - } // end for m - fcst_na - } // end for l - n_obs - } // end for k - n_interp - } // end for j - n_mask - } // end for i - n_msg_typ - - return; -} - -//////////////////////////////////////////////////////////////////////// - -int VxPairDataEnsemble::get_n_pair() const { - int n, i, j, k; - - for(i=0, n=0; iinterp_mthd == InterpMthd::HiRA && + fcst_dpa.n_planes() != 1 ) { + + mlog << Warning << "\nVxPairDataEnsemble::add_ens() -> " + << "the \"" << interpmthd_to_string(it->interp_mthd) + << "\" interpolation method only applies when verifying a " + << "single level, not " << fcst_dpa.n_planes() + << " levels.\n\n"; + continue; } - } - return; -} + // Process each of the observations + NumArray fcst_na; + for(int i_obs=0; i_obsn_obs; i_obs++) { -//////////////////////////////////////////////////////////////////////// + // Initialize + fcst_na.erase(); -void VxPairDataEnsemble::set_obs_summary(ObsSummary s) { + // Interpolate using the observation pressure level or height + double to_lvl = (fcst_info->level().type() == LevelType_Pres ? + it->lvl_na[i_obs] : it->elv_na[i_obs]); + int lvl_blw, lvl_abv; - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_obs_summary(s); + // For a single forecast field + if(fcst_dpa.n_planes() == 1) { + lvl_blw = 0; + lvl_abv = 0; } - } - } - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::set_obs_perc_value(int percentile) { - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_obs_perc_value(percentile); + // For multiple forecast fields, find the levels above + // and below the observation point. + else { + find_vert_lvl(fcst_dpa, to_lvl, lvl_blw, lvl_abv); } - } - } - - return; -} - -//////////////////////////////////////////////////////////////////////// -void VxPairDataEnsemble::print_obs_summary() { + // Extract the HiRA neighborhood of values + if(it->interp_mthd == InterpMthd::HiRA) { - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].print_obs_summary(); + // For HiRA, set the ensemble mean to bad data + if(mn) { + fcst_na.erase(); + fcst_na.add(bad_data_double); + } + // Otherwise, retrieve all the neighborhood values + // using a valid threshold of 0 + else { + get_interp_points(fcst_dpa, + it->x_na[i_obs], it->y_na[i_obs], + it->interp_mthd, it->interp_wdth, it->interp_shape, + gr.wrap_lon(), 0, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv, + fcst_na); + } } - } - } - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataEnsemble::calc_obs_summary() { - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].calc_obs_summary(); + // Otherwise, get a single interpolated ensemble value + else { + ClimoPntInfo cpi(it->fcmn_na[i_obs], it->fcsd_na[i_obs], + it->ocmn_na[i_obs], it->ocsd_na[i_obs]); + + fcst_na.add(compute_interp(fcst_dpa, + it->x_na[i_obs], it->y_na[i_obs], it->o_na[i_obs], &cpi, + it->interp_mthd, it->interp_wdth, it->interp_shape, + gr.wrap_lon(), interp_thresh, spfh_flag, + fcst_info->level().type(), + to_lvl, lvl_blw, lvl_abv)); } - } - } - - return; -} -//////////////////////////////////////////////////////////////////////// + // Store the single ensemble value or HiRA neighborhood + for(int i_fcst=0; i_fcstmn_na.add(fcst_na[i_fcst]); + } + // Store the ensemble member values + else { - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].ctrl_index = index; - } - } - } + // Track unperturbed ensemble variance sums + // Exclude the control member from the variance + if(member != it->ctrl_index) { + it->add_ens_var_sums(i_obs, fcst_na[i_fcst]); + } - return; -} + // Apply observation error perturbation, if requested + double fcst_v; + if(obs_error_info->flag) { + fcst_v = add_obs_error_inc( + obs_error_info->rng_ptr, FieldType::Fcst, + it->obs_error_entry[i_obs], + it->o_na[i_obs], fcst_na[i_fcst]); + } + else { + fcst_v = fcst_na[i_fcst]; + } -//////////////////////////////////////////////////////////////////////// + // Determine index of ensemble member + int i_mem = member * fcst_na.n() + i_fcst; -void VxPairDataEnsemble::set_skip_const(bool tf) { + // Store perturbed ensemble member value + it->add_ens(i_mem, fcst_v); + } - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].skip_const = tf; - } - } - } + } // end for i_fcst + } // end for i_obs + } // end for PairDataEnsemble iterator return; } diff --git a/src/libcode/vx_statistics/pair_data_ensemble.h b/src/libcode/vx_statistics/pair_data_ensemble.h index 9be2eec3c8..f621c23abf 100644 --- a/src/libcode/vx_statistics/pair_data_ensemble.h +++ b/src/libcode/vx_statistics/pair_data_ensemble.h @@ -164,12 +164,11 @@ class PairDataEnsemble : public PairBase { //////////////////////////////////////////////////////////////////////// // -// Class to store a variety of PairDataEnsemble objects for each -// verification task +// Class to store PairDataEnsemble objects for ensemble verification // //////////////////////////////////////////////////////////////////////// -class VxPairDataEnsemble { +class VxPairDataEnsemble : public VxPairBase { private: @@ -189,130 +188,37 @@ class VxPairDataEnsemble { // ////////////////////////////////////////////////////////////////// - EnsVarInfo *fcst_info; // Forecast field, allocated by EnsVarInfo - VarInfo *climo_info; // Climatology field, allocated by VarInfoFactory - VarInfo *obs_info; // Observation field, allocated by VarInfoFactory - - ConcatString desc; // User description from config file - - double interp_thresh; // Threshold between 0 and 1 used when - // interpolating the forecasts to the - // observation location. - - StringArray msg_typ_sfc; // List of surface message types - - ////////////////////////////////////////////////////////////////// - // - // Forecast and climotology fields falling between the requested - // levels. Store the fields in a data plane array. - // - ////////////////////////////////////////////////////////////////// - - DataPlaneArray fcst_dpa; // Forecast data plane array - DataPlaneArray climo_mn_dpa; // Climatology mean data plane array - DataPlaneArray climo_sd_dpa; // Climatology standard deviation data plane array - - ////////////////////////////////////////////////////////////////// - - unixtime fcst_ut; // Ensemble valid time - unixtime beg_ut; // Beginning of valid time window - unixtime end_ut; // End of valid time window + EnsVarInfo *ens_info; // Ensemble data, allocated by EnsVarInfo ////////////////////////////////////////////////////////////////// - StringArray sid_inc_filt; // Station ID inclusion list - StringArray sid_exc_filt; // Station ID exclusion list - StringArray obs_qty_inc_filt; // Observation quality include markers - StringArray obs_qty_exc_filt; // Observation quality exclude markers - - ////////////////////////////////////////////////////////////////// - ObsErrorInfo *obs_error_info; // Pointer for observation error // Not allocated ////////////////////////////////////////////////////////////////// - int n_msg_typ; // Number of verifying message types - - int n_mask; // Total number of masking regions - // of masking DataPlane fields or SIDs - - int n_interp; // Number of interpolation techniques - - ////////////////////////////////////////////////////////////////// - - PairDataEnsemble ***pd; // 3-Dim Array of PairDataEnsemble objects - // as [n_msg_typ][n_mask][n_interp] + // 3-Dim vector of PairDataEnsemble objects [n_msg_typ][n_mask][n_interp] + std::vector pd; ////////////////////////////////////////////////////////////////// void clear(); - void set_fcst_info(EnsVarInfo *); - void set_climo_info(VarInfo *); - void set_obs_info(VarInfo *); - - void set_desc(const char *); - - void set_interp_thresh(double); - void set_msg_typ_sfc(const StringArray &); - - void set_fcst_dpa(const DataPlaneArray &); - void set_climo_mn_dpa(const DataPlaneArray &); - void set_climo_sd_dpa(const DataPlaneArray &); - - void set_fcst_ut(const unixtime); - void set_beg_ut(const unixtime); - void set_end_ut(const unixtime); - - void set_sid_inc_filt(const StringArray); - void set_sid_exc_filt(const StringArray); - void set_obs_qty_inc_filt(const StringArray); - void set_obs_qty_exc_filt(const StringArray); - - // Call set_pd_size before set_msg_typ, set_mask_area, and set_interp - void set_pd_size(int, int, int); - - void set_msg_typ(int, const char *); - void set_msg_typ_vals(int, const StringArray &); - void set_mask_area(int, const char *, MaskPlane *); - void set_mask_sid(int, const char *, StringArray *); - void set_mask_llpnt(int, const char *, MaskLatLon *); - - void set_interp(int i_interp, const char *interp_mthd_str, int width, - GridTemplateFactory::GridTemplates shape); - void set_interp(int i_interp, InterpMthd mthd, int width, - GridTemplateFactory::GridTemplates shape); + void set_ens_info(const EnsVarInfo *); + void set_size(int, int, int); // Call set_ens_size before add_ens void set_ens_size(int n); - void set_climo_cdf_info_ptr(const ClimoCDFInfo *); - void set_ssvar_bin_size(double); void set_phist_bin_size(double); + void set_ctrl_index(int); + void set_skip_const(bool); void add_point_obs(float *, int *, const char *, const char *, unixtime, const char *, float *, Grid &, const char * = 0, const DataPlane * = 0); - void add_ens(int, bool mn, Grid &); - - int get_n_pair() const; - - void set_duplicate_flag(DuplicateType duplicate_flag); - - void set_obs_summary(ObsSummary obs_summary); - - void set_obs_perc_value(int percentile); - - void print_obs_summary(); - - void calc_obs_summary(); - - void set_ctrl_index(int); - - void set_skip_const(bool); }; //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_statistics/pair_data_point.cc b/src/libcode/vx_statistics/pair_data_point.cc index 5088c22564..0d29dda9ed 100644 --- a/src/libcode/vx_statistics/pair_data_point.cc +++ b/src/libcode/vx_statistics/pair_data_point.cc @@ -29,8 +29,6 @@ using namespace std; -static const int REJECT_DEBUG_LEVEL = 9; - //////////////////////////////////////////////////////////////////////// // // Code for class PairDataPoint @@ -71,9 +69,8 @@ PairDataPoint & PairDataPoint::operator=(const PairDataPoint &pd) { void PairDataPoint::init_from_scratch() { - seeps_mpr.clear(); - seeps.clear(); seeps_climo = nullptr; + clear(); return; @@ -87,10 +84,13 @@ void PairDataPoint::clear() { f_na.clear(); for (int idx=0; idxset_p1_thresh(p1_thresh); - else mlog << Warning << "\nPairDataPoint::set_seeps_thresh() ignored t1_threshold." - << " Load SEEPS climo first\n\n"; + else mlog << Warning << "\nPairDataPoint::set_seeps_thresh() -> " + << "ignored t1_threshold. Load SEEPS climo first\n\n"; } //////////////////////////////////////////////////////////////////////// @@ -226,8 +236,8 @@ void PairDataPoint::set_point_pair(int i_obs, const char *sid, double x, double y, unixtime ut, double lvl, double elv, double f, double o, const char *qc, - double cmn, double csd, double wgt, - SeepsScore *seeps) { + const ClimoPntInfo &cpi, + double wgt, const SeepsScore *seeps) { if(i_obs < 0 || i_obs >= n_obs) { mlog << Error << "\nPairDataPoint::set_point_pair() -> " @@ -237,7 +247,7 @@ void PairDataPoint::set_point_pair(int i_obs, const char *sid, } set_point_obs(i_obs, sid, lat, lon, x, y, ut, lvl, elv, - o, qc, cmn, csd, wgt); + o, qc, cpi, wgt); f_na.set(i_obs, f); *seeps_mpr[i_obs] = *seeps; @@ -248,9 +258,10 @@ void PairDataPoint::set_point_pair(int i_obs, const char *sid, //////////////////////////////////////////////////////////////////////// bool PairDataPoint::add_grid_pair(double f, double o, - double cmn, double csd, double wgt) { + const ClimoPntInfo &cpi, + double wgt) { - add_grid_obs(o, cmn, csd, wgt); + add_grid_obs(o, cpi, wgt); f_na.add(f); seeps_mpr.push_back(nullptr); @@ -261,13 +272,16 @@ bool PairDataPoint::add_grid_pair(double f, double o, //////////////////////////////////////////////////////////////////////// bool PairDataPoint::add_grid_pair(const NumArray &f_in, const NumArray &o_in, - const NumArray &cmn_in, const NumArray &csd_in, + const NumArray &fcmn_in, const NumArray &fcsd_in, + const NumArray &ocmn_in, const NumArray &ocsd_in, const NumArray &wgt_in) { // Check for constant length - if(o_in.n() != f_in.n() || - o_in.n() != cmn_in.n() || - o_in.n() != csd_in.n() || + if(o_in.n() != f_in.n() || + o_in.n() != fcmn_in.n() || + o_in.n() != fcsd_in.n() || + o_in.n() != ocmn_in.n() || + o_in.n() != ocsd_in.n() || o_in.n() != wgt_in.n()) { mlog << Error << "\nPairDataPoint::add_grid_pair() -> " << "arrays must all have the same length!\n\n"; @@ -282,7 +296,8 @@ bool PairDataPoint::add_grid_pair(const NumArray &f_in, const NumArray &o_in, wgt_na.add(wgt_in); for(int i=0; ifile_type()); - *fcst_info = *info; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_climo_info(VarInfo *info) { - VarInfoFactory f; - - // Deallocate, if necessary - if(climo_info) { delete climo_info; climo_info = (VarInfo *) nullptr; } - - // Perform a deep copy - climo_info = f.new_var_info(info->file_type()); - *climo_info = *info; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_obs_info(VarInfoGrib *info) { - - // Deallocate, if necessary - if(obs_info) { delete obs_info; obs_info = (VarInfoGrib *) nullptr; } - - // Perform a deep copy - obs_info = new VarInfoGrib; - *obs_info = *info; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_desc(const char *s) { - - desc = s; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_interp_thresh(double t) { - - interp_thresh = t; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_fcst_dpa(const DataPlaneArray &dpa) { - - fcst_dpa = dpa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_climo_mn_dpa(const DataPlaneArray &dpa) { - - climo_mn_dpa = dpa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_climo_sd_dpa(const DataPlaneArray &dpa) { - - climo_sd_dpa = dpa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_fcst_ut(const unixtime ut) { - - fcst_ut = ut; - - // set the fcst_ut for all PairBase instances, used for duplicate logic - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_fcst_ut(ut); - } - } - } - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_beg_ut(const unixtime ut) { - - beg_ut = ut; - - return; -} - -//////////////////////////////////////////////////////////////////////// + VxPairBase::assign(vx_pd); -void VxPairDataPoint::set_end_ut(const unixtime ut) { + set_size(vx_pd.n_msg_typ, vx_pd.n_mask, vx_pd.n_interp); - end_ut = ut; + pd = vx_pd.pd; return; } //////////////////////////////////////////////////////////////////////// -void VxPairDataPoint::set_sid_inc_filt(const StringArray &sa) { - - sid_inc_filt = sa; +void VxPairDataPoint::set_size(int types, int masks, int interps) { - return; -} - -//////////////////////////////////////////////////////////////////////// + VxPairBase::set_size(types, masks, interps); -void VxPairDataPoint::set_sid_exc_filt(const StringArray &sa) { + // Resize the PairDataPoint vector + pd.resize(n_vx); - sid_exc_filt = sa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_obs_qty_inc_filt(const StringArray &sa) { - - obs_qty_inc_filt = sa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_obs_qty_exc_filt(const StringArray &sa) { - - obs_qty_exc_filt = sa; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_pd_size(int types, int masks, int interps) { - int i, j, k; - - // Store the dimensions for the PairDataPoint array - n_msg_typ = types; - n_mask = masks; - n_interp = interps; - - // Allocate space for the PairDataPoint array - pd = new PairDataPoint ** [n_msg_typ]; - rej_typ = new int ** [n_msg_typ]; - rej_mask = new int ** [n_msg_typ]; - rej_fcst = new int ** [n_msg_typ]; - rej_cmn = new int ** [n_msg_typ]; - rej_csd = new int ** [n_msg_typ]; - rej_mpr = new int ** [n_msg_typ]; - rej_dup = new int ** [n_msg_typ]; - - for(i=0; i " - << "the \"" << conf_key_mpr_column << "\" (" - << write_css(sa) << ") and \"" << conf_key_mpr_thresh - << "\" (" << write_css(ta) - << ") config file entries must have the same length!\n\n"; - exit(1); - } - - mpr_column = sa; - mpr_thresh = ta; - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_climo_cdf_info_ptr(const ClimoCDFInfo *info) { - - for(int i=0; i= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation station id:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - rej_sid++; - return; + // Point observation summary string for rejection log messages + ConcatString pnt_obs_str; + if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { + pnt_obs_str = point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, + hdr_ut, obs_qty, obs_arr, var_name); } - // Check whether the GRIB code for the observation matches - // the specified code - if((var_name != 0) && (0 < strlen(var_name))) { - if(var_name != obs_info->name()) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation variable name:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } + // Check the station ID + if(!is_keeper_sid(pnt_obs_str.c_str(), hdr_sid_str)) return; - rej_var++; - return; - } - } - else if(obs_info->code() != nint(obs_arr[1])) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation variable GRIB code:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } + // Check observation variable + if(!is_keeper_var(pnt_obs_str.c_str(), var_name, nint(obs_arr[1]))) return; - rej_var++; - return; - } + // Check observation quality + if(!is_keeper_qty(pnt_obs_str.c_str(), obs_qty)) return; - // Check the observation quality include and exclude options - if((obs_qty_inc_filt.n() > 0 && !obs_qty_inc_filt.has(obs_qty)) || - (obs_qty_exc_filt.n() > 0 && obs_qty_exc_filt.has(obs_qty))) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation quality control string:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } + // Check valid time + if(!is_keeper_vld(pnt_obs_str.c_str(), hdr_ut)) return; - rej_qty++; - return; - } + // Check observation value + double obs_v = obs_arr[4]; + if(!is_keeper_obs(pnt_obs_str.c_str(), obs_v)) return; - // Check whether the observation time falls within the valid time - // window - if(hdr_ut < beg_ut || hdr_ut > end_ut) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation valid time:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } + // Check location + double hdr_lat = hdr_arr[0]; + double hdr_lon = hdr_arr[1]; + double obs_x, obs_y; + if(!is_keeper_grd(pnt_obs_str.c_str(), gr, hdr_lat, hdr_lon, obs_x, obs_y)) return; - rej_vld++; - return; - } + // Check topo + double hdr_elv = hdr_arr[2]; + if(!is_keeper_topo(pnt_obs_str.c_str(), gr, obs_x, obs_y, + hdr_typ_str, hdr_elv)) return; + // Check level + double obs_lvl = obs_arr[2]; + double obs_hgt = obs_arr[3]; + if(!is_keeper_lvl(pnt_obs_str.c_str(), hdr_typ_str, obs_lvl, obs_hgt)) return; + + // Set flags + bool spfh_flag = fcst_info->is_specific_humidity() && + obs_info->is_specific_humidity(); bool precip_flag = fcst_info->is_precipitation() && obs_info->is_precipitation(); int precip_interval = bad_data_int; - if (precip_flag) { - if (wgt_dp) precip_interval = wgt_dp->accum(); + if(precip_flag) { + if(wgt_dp) precip_interval = wgt_dp->accum(); else precip_interval = fcst_dpa[0].accum(); } - hdr_lat = hdr_arr[0]; - hdr_lon = hdr_arr[1]; - hdr_elv = hdr_arr[2]; - - obs_lvl = obs_arr[2]; - obs_hgt = obs_arr[3]; - - // Apply observation processing logic - obs_v = pd[0][0][0].process_obs(obs_info, obs_arr[4]); - - // Check whether the observation value contains valid data - if(is_bad_data(obs_v)) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation with bad data value:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - rej_obs++; - return; - } - - // Convert the lat/lon value to x/y - gr.latlon_to_xy(hdr_lat, -1.0*hdr_lon, obs_x, obs_y); - x = nint(obs_x); - y = nint(obs_y); - - // Check if the observation's lat/lon is on the grid - if(((x < 0 || x >= gr.nx()) && !gr.wrap_lon()) || - y < 0 || y >= gr.ny()) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation off the grid where (x, y) = (" - << x << ", " << y << ") and grid (nx, ny) = (" << gr.nx() - << ", " << gr.ny() << "):\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - rej_grd++; - return; - } - - // Check for a large topography difference - if(sfc_info.topo_ptr && msg_typ_sfc.reg_exp_match(hdr_typ_str)) { - - // Interpolate model topography to observation location - double topo = compute_horz_interp( - *sfc_info.topo_ptr, obs_x, obs_y, hdr_elv, - InterpMthd::Bilin, 2, - GridTemplateFactory::GridTemplates::Square, - gr.wrap_lon(), 1.0); - - // Skip bad topography values - if(is_bad_data(hdr_elv) || is_bad_data(topo)) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation due to bad topography values " - << "where observation elevation = " << hdr_elv - << " and model topography = " << topo << ":\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - rej_topo++; - return; - } - - // Check the topography difference threshold - if(!sfc_info.topo_use_obs_thresh.check(topo - hdr_elv)) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation due to topography difference " - << "where observation elevation (" << hdr_elv - << ") minus model topography (" << topo << ") = " - << topo - hdr_elv << " is not " - << sfc_info.topo_use_obs_thresh.get_str() << ":\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - rej_topo++; - return; - } - } - - // For pressure levels, check if the observation pressure level - // falls in the requested range. - if(obs_info->level().type() == LevelType_Pres) { - - if(obs_lvl < obs_info->level().lower() || - obs_lvl > obs_info->level().upper()) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation pressure level value:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - rej_lvl++; - return; - } - } - // For accumulations, check if the observation accumulation interval - // matches the requested interval. - else if(obs_info->level().type() == LevelType_Accum) { - - if(obs_lvl < obs_info->level().lower() || - obs_lvl > obs_info->level().upper()) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation accumulation interval:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - rej_lvl++; - return; - } - } - // For all other level types (VertLevel, RecNumber, NoLevel), - // check for a surface message type or if the observation height - // falls within the requested range. - else { - - if(!msg_typ_sfc.reg_exp_match(hdr_typ_str) && - (obs_hgt < obs_info->level().lower() || - obs_hgt > obs_info->level().upper())) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation level value:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - rej_lvl++; - return; - } - } - - // For a single forecast field - if(fcst_dpa.n_planes() == 1) { - f_lvl_blw = 0; - f_lvl_abv = 0; - } - // For multiple forecast fields, find the levels above and below - // the observation point. - else { - // Interpolate using the observation pressure level or height - to_lvl = (fcst_info->level().type() == LevelType_Pres ? - obs_lvl : obs_hgt); - find_vert_lvl(fcst_dpa, to_lvl, f_lvl_blw, f_lvl_abv); - } - - // For a single climatology mean field - if(climo_mn_dpa.n_planes() == 1) { - cmn_lvl_blw = 0; - cmn_lvl_abv = 0; - } - // For multiple climatology mean fields, find the levels above and - // below the observation point. - else { - // Interpolate using the observation pressure level or height - to_lvl = (fcst_info->level().type() == LevelType_Pres ? - obs_lvl : obs_hgt); - find_vert_lvl(climo_mn_dpa, to_lvl, cmn_lvl_blw, cmn_lvl_abv); - } - - // For a single climatology standard deviation field - if(climo_sd_dpa.n_planes() == 1) { - csd_lvl_blw = 0; - csd_lvl_abv = 0; - } - // For multiple climatology standard deviation fields, find the - // levels above and below the observation point. - else { - // Interpolate using the observation pressure level or height - to_lvl = (fcst_info->level().type() == LevelType_Pres ? - obs_lvl : obs_hgt); - find_vert_lvl(climo_sd_dpa, to_lvl, csd_lvl_blw, csd_lvl_abv); - } + bool has_seeps = false; + SeepsScore *seeps = nullptr; // When verifying a vertical level forecast against a surface message // type, set the observation level value to bad data so that it's not @@ -1307,367 +565,108 @@ void VxPairDataPoint::add_point_obs(float *hdr_arr, const char *hdr_typ_str, obs_lvl = bad_data_double; } - // Set flag for specific humidity - bool spfh_flag = fcst_info->is_specific_humidity() && - obs_info->is_specific_humidity(); - - // Look through all of the PairDataPoint objects to see if the - // observation should be added. - - bool has_seeps = false; - SeepsScore *seeps = 0; - - // Check the message types - for(i=0; i= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation message type:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - inc_count(rej_typ, i); - continue; - } - - // Check the masking areas and points - for(j=0; js_is_on(x, y)) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation based on spatial masking region:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - inc_count(rej_mask, i, j); - continue; - } - } - // Otherwise, check for the obs Station ID's presence in the - // masking SID list - else if(pd[i][j][0].mask_sid_ptr != (StringArray *) 0) { - if(!pd[i][j][0].mask_sid_ptr->has(hdr_sid_str)) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation based on masking station id list:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - inc_count(rej_mask, i, j); - continue; - } - } - // Otherwise, check observation lat/lon thresholds - else if(pd[i][j][0].mask_llpnt_ptr != (MaskLatLon *) 0) { - if(!pd[i][j][0].mask_llpnt_ptr->lat_thresh.check(hdr_lat) || - !pd[i][j][0].mask_llpnt_ptr->lon_thresh.check(hdr_lon)) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation based on latitude/longitude thesholds:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - inc_count(rej_mask, i, j); - continue; - } - } - - // Compute the interpolated values - for(k=0; klevel().type() == LevelType_Pres ? - obs_lvl : obs_hgt); + // Check message type + if(!is_keeper_typ(pnt_obs_str.c_str(), i_msg_typ, hdr_typ_str)) continue; - // Compute the interpolated climatology mean - cmn_v = compute_interp(climo_mn_dpa, obs_x, obs_y, obs_v, - bad_data_double, bad_data_double, - pd[0][0][k].interp_mthd, pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, gr.wrap_lon(), - interp_thresh, spfh_flag, - fcst_info->level().type(), - to_lvl, cmn_lvl_blw, cmn_lvl_abv); + int x = nint(obs_x); + int y = nint(obs_y); - // Check for bad data - if(climo_mn_dpa.n_planes() > 0 && is_bad_data(cmn_v)) { + // Loop through the masking regions + for(int i_mask=0; i_mask= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation based on bad climatological mean value:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } + // Check masking region + if(!is_keeper_mask(pnt_obs_str.c_str(), i_msg_typ, i_mask, x, y, + hdr_sid_str, hdr_lat, hdr_lon)) continue; - inc_count(rej_cmn, i, j, k); - continue; - } - - // Check for valid interpolation options - if(climo_sd_dpa.n_planes() > 0 && - (pd[0][0][k].interp_mthd == InterpMthd::Min || - pd[0][0][k].interp_mthd == InterpMthd::Max || - pd[0][0][k].interp_mthd == InterpMthd::Median || - pd[0][0][k].interp_mthd == InterpMthd::Best)) { - mlog << Warning << "\nVxPairDataPoint::add_point_obs() -> " - << "applying the " - << interpmthd_to_string(pd[0][0][k].interp_mthd) - << " interpolation method to climatological spread " - << "may cause unexpected results.\n\n"; - } - - // Compute the interpolated climatology standard deviation - csd_v = compute_interp(climo_sd_dpa, obs_x, obs_y, obs_v, - bad_data_double, bad_data_double, - pd[0][0][k].interp_mthd, pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, gr.wrap_lon(), - interp_thresh, spfh_flag, - fcst_info->level().type(), - to_lvl, csd_lvl_blw, csd_lvl_abv); - - // Check for bad data - if(climo_sd_dpa.n_planes() > 0 && is_bad_data(csd_v)) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation based on bad climatological standard deviation value:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - inc_count(rej_csd, i, j, k); - continue; - } + // Loop through the interpolation methods + for(int i_interp=0; i_interp " - << "unexpected number of forecast levels (" - << fcst_dpa.n_planes() - << ") for surface verification! Set \"land_mask.flag\" and " - << "\"topo_mask.flag\" to false to disable this check.\n\n"; - exit(1); - } - - fcst_v = compute_sfc_interp(fcst_dpa[0], obs_x, obs_y, hdr_elv, obs_v, - pd[0][0][k].interp_mthd, pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, gr.wrap_lon(), - interp_thresh, sfc_info, is_land); - } - // Otherwise, compute interpolated value - else { - fcst_v = compute_interp(fcst_dpa, obs_x, obs_y, obs_v, cmn_v, csd_v, - pd[0][0][k].interp_mthd, pd[0][0][k].interp_wdth, - pd[0][0][k].interp_shape, gr.wrap_lon(), - interp_thresh, spfh_flag, - fcst_info->level().type(), - to_lvl, f_lvl_blw, f_lvl_abv); - } - - if(is_bad_data(fcst_v)) { - - if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { - mlog << Debug(REJECT_DEBUG_LEVEL) - << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation based due to bad data in the " - << interpmthd_to_string(pd[0][0][k].interp_mthd) << "(" - << pd[0][0][k].interp_wdth * pd[0][0][k].interp_wdth - << ") interpolated forecast value:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; - } - - inc_count(rej_fcst, i, j, k); - continue; - } + // Check forecast values + double fcst_v; + if(!is_keeper_fcst(pnt_obs_str.c_str(), + i_msg_typ, i_mask, i_interp, + hdr_typ_str, gr, + obs_x, obs_y, hdr_elv, + obs_v, obs_lvl, obs_hgt, + cpi, fcst_v)) continue; // Check matched pair filtering options - if(!check_mpr_thresh(fcst_v, obs_v, cmn_v, csd_v, + ConcatString reason_cs; + if(!check_mpr_thresh(fcst_v, obs_v, cpi, mpr_column, mpr_thresh, &reason_cs)) { if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { mlog << Debug(REJECT_DEBUG_LEVEL) << "For " << fcst_info->magic_str() << " versus " - << obs_info->magic_str() - << ", skipping observation due to matched pair filter since " + << obs_info->magic_str() << ", skipping observation" + << "due to matched pair filter since " << reason_cs << ":\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; + << pnt_obs_str << "\n"; } - inc_count(rej_mpr, i, j, k); + inc_count(rej_mpr, i_msg_typ, i_mask, i_interp); continue; } // Compute weight for current point - wgt_v = (wgt_dp == (DataPlane *) 0 ? - default_grid_weight : wgt_dp->get(x, y)); + double wgt_v = (wgt_dp == nullptr ? + default_grid_weight : + wgt_dp->get(x, y)); // Add the forecast, climatological, and observation data // Weight is from the nearest grid point - if(!pd[i][j][k].add_point_pair(hdr_sid_str, - hdr_lat, hdr_lon, obs_x, obs_y, hdr_ut, obs_lvl, - obs_hgt, fcst_v, obs_v, obs_qty, cmn_v, csd_v, - wgt_v)) { + int n = three_to_one(i_msg_typ, i_mask, i_interp); + if(!pd[n].add_point_pair(hdr_sid_str, + hdr_lat, hdr_lon, obs_x, obs_y, hdr_ut, obs_lvl, + obs_hgt, fcst_v, obs_v, obs_qty, cpi, wgt_v)) { if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { mlog << Debug(REJECT_DEBUG_LEVEL) << "For " << fcst_info->magic_str() << " versus " << obs_info->magic_str() << ", skipping observation since it is a duplicate:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; + << pnt_obs_str << "\n"; } - inc_count(rej_dup, i, j, k); + inc_count(rej_dup, i_msg_typ, i_mask, i_interp); + continue; } - seeps = 0; + + // Compute seeps if (precip_flag && precip_interval == 24*60*60) { // 24 hour precip only - seeps = pd[i][j][k].compute_seeps(hdr_sid_str, fcst_v, obs_v, hdr_ut); + seeps = pd[n].compute_seeps(hdr_sid_str, fcst_v, obs_v, hdr_ut); + } + else { + seeps = nullptr; } - pd[i][j][k].set_seeps_score(seeps); - if (seeps) delete seeps; + pd[n].set_seeps_score(seeps); + if (seeps) { delete seeps; seeps = nullptr; } if(mlog.verbosity_level() >= REJECT_DEBUG_LEVEL) { mlog << Debug(REJECT_DEBUG_LEVEL) << "For " << fcst_info->magic_str() << " versus " << obs_info->magic_str() << ", for observation type " - << pd[i][0][0].msg_typ << ", over region " - << pd[0][j][0].mask_name << ", for interpolation method " - << interpmthd_to_string(pd[0][0][k].interp_mthd) << "(" - << pd[0][0][k].interp_wdth * pd[0][0][k].interp_wdth + << pd[n].msg_typ << ", over region " + << pd[n].mask_name << ", for interpolation method " + << interpmthd_to_string(pd[n].interp_mthd) << "(" + << pd[n].interp_wdth * pd[n].interp_wdth << "), using observation:\n" - << point_obs_to_string(hdr_arr, hdr_typ_str, hdr_sid_str, - hdr_ut, obs_qty, obs_arr, var_name) - << "\n"; + << pnt_obs_str << "\n"; } - } // end for k - } // end for j - } // end for i - - return; -} - -//////////////////////////////////////////////////////////////////////// - -int VxPairDataPoint::get_n_pair() const { - int n, i, j, k; - - if(n_msg_typ == 0 || n_mask == 0 || n_interp == 0) { - mlog << Warning << "\nVxPairDataPoint::get_n_pair() -> " - << "set_pd_size() has not been called yet!\n\n"; - } - - for(i=0, n=0; i " - << "set_pd_size() has not been called yet!\n\n"; - } - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_check_unique(duplicate_flag == DuplicateType::Unique); - } - } - } - -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_obs_summary(ObsSummary s) { - - if(n_msg_typ == 0 || n_mask == 0 || n_interp == 0) { - mlog << Warning << "\nVxPairDataPoint::set_obs_summary() -> " - << "set_pd_size() has not been called yet!\n\n"; - } - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_obs_summary(s); - } - } - } - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::set_obs_perc_value(int percentile) { - - if(n_msg_typ == 0 || n_mask == 0 || n_interp == 0) { - mlog << Warning << "\nVxPairDataPoint::set_obs_perc_value() -> " - << "set_pd_size() has not been called yet!\n\n"; - } - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_obs_perc_value(percentile); - } - } - } + } // end for i_interp + } // end for i_mask + } // end for i_msg_typ return; } @@ -1675,98 +674,27 @@ void VxPairDataPoint::set_obs_perc_value(int percentile) { //////////////////////////////////////////////////////////////////////// void VxPairDataPoint::load_seeps_climo(const ConcatString &seeps_climo_name) { - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].load_seeps_climo(seeps_climo_name); - } - } - } -} - -//////////////////////////////////////////////////////////////////////// -void VxPairDataPoint::set_seeps_thresh(const SingleThresh &p1_thresh) { - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].set_seeps_thresh(p1_thresh); - } - } + if(n_vx == 0) { + mlog << Warning << "\nVxPairDataPoint::load_seeps_climo() -> " + << "set_size() has not been called yet!\n\n"; } -} -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::print_obs_summary() { - - if(n_msg_typ == 0 || n_mask == 0 || n_interp == 0) { - mlog << Warning << "\nVxPairDataPoint::print_obs_summary() -> " - << "set_pd_size() has not been called yet!\n\n"; - } - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].print_obs_summary(); - } - } - } + for(auto &x : pd) x.load_seeps_climo(seeps_climo_name); return; } //////////////////////////////////////////////////////////////////////// -void VxPairDataPoint::calc_obs_summary() { - - if(n_msg_typ == 0 || n_mask == 0 || n_interp == 0) { - mlog << Warning << "\nVxPairDataPoint::calc_obs_summary() -> " - << "set_pd_size() has not been called yet!\n\n"; - } - - for(int i=0; i < n_msg_typ; i++){ - for(int j=0; j < n_mask; j++){ - for(int k=0; k < n_interp; k++){ - pd[i][j][k].calc_obs_summary(); - } - } - } - - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::inc_count(int ***&rej, int i) { - int j, k; - - for(j=0; j " + << "set_size() has not been called yet!\n\n"; } - return; -} - -//////////////////////////////////////////////////////////////////////// - -void VxPairDataPoint::inc_count(int ***&rej, int i, int j, int k) { - - rej[i][j][k]++; + for(auto &x : pd) x.set_seeps_thresh(p1_thresh); return; } @@ -1777,12 +705,12 @@ void VxPairDataPoint::inc_count(int ***&rej, int i, int j, int k) { // //////////////////////////////////////////////////////////////////////// -bool check_fo_thresh(double f, double o, double cmn, double csd, +bool check_fo_thresh(double f, double o, const ClimoPntInfo &cpi, const SingleThresh &ft, const SingleThresh &ot, const SetLogic type) { bool status = true; - bool fcheck = ft.check(f, cmn, csd); - bool ocheck = ot.check(o, cmn, csd); + bool fcheck = ft.check(f, &cpi); + bool ocheck = ot.check(o, &cpi); SetLogic t = type; // If either of the thresholds is NA, reset the logic to intersection @@ -1815,7 +743,7 @@ bool check_fo_thresh(double f, double o, double cmn, double csd, //////////////////////////////////////////////////////////////////////// -bool check_mpr_thresh(double f, double o, double cmn, double csd, +bool check_mpr_thresh(double f, double o, const ClimoPntInfo &cpi, const StringArray &col_sa, const ThreshArray &col_ta, ConcatString *reason_ptr) { // Initialize @@ -1829,10 +757,9 @@ bool check_mpr_thresh(double f, double o, double cmn, double csd, StringArray sa; ConcatString cs; double v, v_cur; - int i, j; // Loop over all the column filter names - for(i=0; i 1) { // Loop through the columns - for(j=1; j " << "unsupported matched pair column name requested in \"" @@ -1914,7 +854,8 @@ double get_mpr_column_value(double f, double o, double cmn, double csd, //////////////////////////////////////////////////////////////////////// void apply_mpr_thresh_mask(DataPlane &fcst_dp, DataPlane &obs_dp, - DataPlane &cmn_dp, DataPlane &csd_dp, + DataPlane &fcmn_dp, DataPlane &fcsd_dp, + DataPlane &ocmn_dp, DataPlane &ocsd_dp, const StringArray &col_sa, const ThreshArray &col_ta) { // Check for no work to be done @@ -1932,33 +873,43 @@ void apply_mpr_thresh_mask(DataPlane &fcst_dp, DataPlane &obs_dp, int nxy = fcst_dp.nx() * fcst_dp.ny(); int n_skip = 0; - bool cmn_flag = !(cmn_dp.is_empty()); - bool csd_flag = !(csd_dp.is_empty()); + bool fcmn_flag = !(fcmn_dp.is_empty()); + bool fcsd_flag = !(fcsd_dp.is_empty()); + bool ocmn_flag = !(ocmn_dp.is_empty()); + bool ocsd_flag = !(ocsd_dp.is_empty()); // Loop over the pairs for(int i=0; i seeps_mpr; - SeepsAggScore seeps; + SeepsAggScore seeps_agg; ////////////////////////////////////////////////////////////////// @@ -58,19 +58,21 @@ class PairDataPoint : public PairBase { bool add_point_pair(const char *, double, double, double, double, unixtime, double, double, double, double, - const char *, double, double, double); + const char *, const ClimoPntInfo &, double); void load_seeps_climo(const ConcatString &seeps_climo_name); void set_seeps_thresh(const SingleThresh &p1_thresh); void set_seeps_score(SeepsScore *, int index=-1); void set_point_pair(int, const char *, double, double, double, double, unixtime, double, double, double, double, - const char *, double, double, double, SeepsScore *); + const char *, const ClimoPntInfo &, + double, const SeepsScore *); - bool add_grid_pair(double, double, double, double, double); + bool add_grid_pair(double, double, const ClimoPntInfo &, double); bool add_grid_pair(const NumArray &f_in, const NumArray &o_in, - const NumArray &cmn_in, const NumArray &csd_in, + const NumArray &fcmn_in, const NumArray &fcsd_in, + const NumArray &ocmn_in, const NumArray &ocsd_in, const NumArray &w_in); PairDataPoint subset_pairs_cnt_thresh(const SingleThresh &ft, @@ -82,12 +84,11 @@ class PairDataPoint : public PairBase { //////////////////////////////////////////////////////////////////////// // -// Class to store a variety of PairDataPoint objects for each -// verification task +// Class to store PairDataPoint objects for point verification // //////////////////////////////////////////////////////////////////////// -class VxPairDataPoint { +class VxPairDataPoint : public VxPairBase { private: @@ -107,159 +108,21 @@ class VxPairDataPoint { // ////////////////////////////////////////////////////////////////// - VarInfo *fcst_info; // Forecast field, allocated by VarInfoFactory - VarInfo *climo_info; // Climatology field, allocated by VarInfoFactory - VarInfoGrib *obs_info; // Observation field, allocated by VarInfoFactory - - ConcatString desc; // User description from config file - - double interp_thresh; // Threshold between 0 and 1 used when - // interpolating the forecasts to the - // observation location. - - ////////////////////////////////////////////////////////////////// - // - // Forecast and climatology fields falling between the requested - // levels. Store the fields in a data plane array. - // - ////////////////////////////////////////////////////////////////// - - DataPlaneArray fcst_dpa; // Forecast data plane array - DataPlaneArray climo_mn_dpa; // Climatology mean data plane array - DataPlaneArray climo_sd_dpa; // Climatology standard deviation data plane array - - ////////////////////////////////////////////////////////////////// - - unixtime fcst_ut; // Forecast valid time - unixtime beg_ut; // Beginning of valid time window - unixtime end_ut; // End of valid time window - - ////////////////////////////////////////////////////////////////// - - StringArray sid_inc_filt; // Station ID inclusion list - StringArray sid_exc_filt; // Station ID exclusion list - StringArray obs_qty_inc_filt; // Observation quality include markers - StringArray obs_qty_exc_filt; // Observation quality exclude markers - - ////////////////////////////////////////////////////////////////// - - StringArray mpr_column; // Names of MPR columns or diffs of columns - ThreshArray mpr_thresh; // Filtering thresholds for the MPR columns - - ////////////////////////////////////////////////////////////////// - - StringArray msg_typ_sfc; // List of surface message types - StringArray msg_typ_lnd; // List of surface land message types - StringArray msg_typ_wtr; // List of surface water message types - - SurfaceInfo sfc_info; // Land/sea mask and topography info - - ////////////////////////////////////////////////////////////////// - - int n_msg_typ; // Number of verifying message types - - int n_mask; // Total number of masking regions - // of masking DataPlane fields or SIDs - - int n_interp; // Number of interpolation techniques - - ////////////////////////////////////////////////////////////////// - - PairDataPoint ***pd; // 3-Dim Array of PairDataPoint objects - // as [n_msg_typ][n_mask][n_interp] - - // Counts for observation rejection reason codes - int n_try; // Number of observations processed - int rej_sid; // Reject based on SID inclusion and exclusion lists - int rej_var; // Reject based on observation variable name - int rej_vld; // Reject based on valid time - int rej_obs; // Reject observation bad data - int rej_grd; // Reject based on location - int rej_topo; // Reject based on topography - int rej_lvl; // Reject based on vertical level - int rej_qty; // Reject based on obs quality - - // 3-Dim Arrays for observation rejection reason codes - int ***rej_typ; // Reject based on message type - int ***rej_mask; // Reject based on masking region - int ***rej_fcst; // Reject forecast bad data - int ***rej_cmn; // Reject climo mean bad data - int ***rej_csd; // Reject climo stdev bad data - int ***rej_mpr; // Reject based on MPR filtering logic - int ***rej_dup; // Reject based on duplicates logic + // 3-Dim vector of PairDataPoint objects [n_msg_typ][n_mask][n_interp] + std::vector pd; ////////////////////////////////////////////////////////////////// void clear(); - void set_fcst_info(VarInfo *); - void set_climo_info(VarInfo *); - void set_obs_info(VarInfoGrib *); - - void set_desc(const char *); - - void set_interp_thresh(double); - - void set_fcst_dpa(const DataPlaneArray &); - void set_climo_mn_dpa(const DataPlaneArray &); - void set_climo_sd_dpa(const DataPlaneArray &); - - void set_fcst_ut(const unixtime); - void set_beg_ut(const unixtime); - void set_end_ut(const unixtime); - - void set_sid_inc_filt(const StringArray &); - void set_sid_exc_filt(const StringArray &); - void set_obs_qty_inc_filt(const StringArray &); - void set_obs_qty_exc_filt(const StringArray &); - - // Call set_pd_size before set_msg_typ, set_mask_area, and set_interp - void set_pd_size(int, int, int); - - void set_msg_typ(int, const char *); - void set_msg_typ_vals(int, const StringArray &); - void set_mask_area(int, const char *, MaskPlane *); - void set_mask_sid(int, const char *, StringArray *); - void set_mask_llpnt(int, const char *, MaskLatLon *); - - void set_interp(int i_interp, const char *interp_mthd_str, int width, - GridTemplateFactory::GridTemplates shape); - void set_interp(int i_interp, InterpMthd mthd, - int width, GridTemplateFactory::GridTemplates shape); - - void set_mpr_thresh(const StringArray &, const ThreshArray &); + void set_size(int, int, int); void load_seeps_climo(const ConcatString &seeps_climo_name); void set_seeps_thresh(const SingleThresh &p1_thresh); - void set_climo_cdf_info_ptr(const ClimoCDFInfo *); - - void set_msg_typ_sfc(const StringArray &); - void set_msg_typ_lnd(const StringArray &); - void set_msg_typ_wtr(const StringArray &); - - void set_sfc_info(const SurfaceInfo &); - void add_point_obs(float *, const char *, const char *, unixtime, const char *, float *, Grid &, const char * = 0, const DataPlane * = 0); - - int get_n_pair() const; - - void set_duplicate_flag(DuplicateType duplicate_flag); - - void set_obs_summary(ObsSummary obs_summary); - - void set_obs_perc_value(int percentile); - - void print_obs_summary(); - - void calc_obs_summary(); - - // Member functions for incrementing the counts - void inc_count(int ***&, int); - void inc_count(int ***&, int, int); - void inc_count(int ***&, int, int, int); }; @@ -269,18 +132,19 @@ class VxPairDataPoint { // //////////////////////////////////////////////////////////////////////// -extern bool check_fo_thresh(double, double, double, double, +extern bool check_fo_thresh(double, double, const ClimoPntInfo &, const SingleThresh &, const SingleThresh &, const SetLogic); -extern bool check_mpr_thresh(double, double, double, double, +extern bool check_mpr_thresh(double, double, const ClimoPntInfo &, const StringArray &, const ThreshArray &, ConcatString * = 0); -extern double get_mpr_column_value(double, double, double, double, +extern double get_mpr_column_value(double, double, const ClimoPntInfo &, const char *); extern void apply_mpr_thresh_mask(DataPlane &, DataPlane &, + DataPlane &, DataPlane &, DataPlane &, DataPlane &, const StringArray &, const ThreshArray &); @@ -304,13 +168,6 @@ extern void subset_wind_pairs(const PairDataPoint &, extern PairDataPoint subset_climo_cdf_bin(const PairDataPoint &, const ThreshArray &, int i_bin); -// Write the point observation in the MET point format for logging -extern ConcatString point_obs_to_string( - float *hdr_arr, const char *hdr_typ_str, - const char *hdr_sid_str, unixtime hdr_ut, - const char *obs_qty, float *obs_arr, - const char *var_name); - //////////////////////////////////////////////////////////////////////// #endif // __PAIR_DATA_POINT_H__ diff --git a/src/libcode/vx_summary/summary_calc_percentile.cc b/src/libcode/vx_summary/summary_calc_percentile.cc index d55ace753d..5ac813944f 100644 --- a/src/libcode/vx_summary/summary_calc_percentile.cc +++ b/src/libcode/vx_summary/summary_calc_percentile.cc @@ -40,15 +40,15 @@ SummaryCalcPercentile::SummaryCalcPercentile(const string &type_string) : !isdigit(type_string[2])) { mlog << Error << "\nSummaryCalcPercentile::SummaryCalcPercentile() -> " - << "invalid percentile type \"" << type_string - << "\" specified in configuration file.\n\n"; + << "invalid percentile type \"" << type_string + << "\" specified in configuration file.\n\n"; exit(1); } - + // Pull the desired percentile from the string _percentile = atof(type_string.substr(1,2).c_str()) / 100.0; - + // Construct the type string ConcatString type_buffer; diff --git a/src/libcode/vx_tc_util/atcf_prob_line.cc b/src/libcode/vx_tc_util/atcf_prob_line.cc index 8b320eb912..8a140997ae 100644 --- a/src/libcode/vx_tc_util/atcf_prob_line.cc +++ b/src/libcode/vx_tc_util/atcf_prob_line.cc @@ -148,7 +148,7 @@ int ATCFProbLine::read_line(LineDataFile * ldf) { status = 0; continue; } - } + } return 1; } diff --git a/src/tools/core/ensemble_stat/ensemble_stat.cc b/src/tools/core/ensemble_stat/ensemble_stat.cc index cab5405f19..3dedd76ef5 100644 --- a/src/tools/core/ensemble_stat/ensemble_stat.cc +++ b/src/tools/core/ensemble_stat/ensemble_stat.cc @@ -75,6 +75,7 @@ // 042 04/29/24 Halley Gotway MET #2870 Ignore MISSING keyword. // 043 04/29/24 Halley Gotway MET #2795 Move level mismatch warning. // 044 06/17/24 Halley Gotway MET #2856 Reinitialize climo_cdf pointer +// 045 07/05/24 Halley Gotway MET #2924 Support forecast climatology. // //////////////////////////////////////////////////////////////////////// @@ -130,6 +131,7 @@ static void process_grid_scores (int, const DataPlane *, const DataPlane *, const DataPlane &, const DataPlane &, const DataPlane &, const DataPlane &, + const DataPlane &, const DataPlane &, const DataPlane &, const MaskPlane &, ObsErrorEntry *, PairDataEnsemble &); @@ -486,7 +488,7 @@ void process_grid(const Grid &fcst_grid) { // Parse regridding logic RegridInfo ri; - ri = conf_info.vx_opt[0].vx_pd.fcst_info->get_var_info()->regrid(); + ri = conf_info.vx_opt[0].vx_pd.ens_info->get_var_info()->regrid(); // Read gridded observation data, if necessary if(ri.field == FieldType::Obs) { @@ -552,15 +554,15 @@ void process_n_vld() { // Loop through the verification fields to be processed for(i_var=0; i_varinputs_n(); + n_ens_inputs = conf_info.vx_opt[i_var].vx_pd.ens_info->inputs_n(); // Loop through the forecast inputs for(i_ens=n_vld=0; i_ensget_file(i_ens); - var_info = conf_info.vx_opt[i_var].vx_pd.fcst_info->get_var_info(i_ens); - j = conf_info.vx_opt[i_var].vx_pd.fcst_info->get_file_index(i_ens); + fcst_file = conf_info.vx_opt[i_var].vx_pd.ens_info->get_file(i_ens); + var_info = conf_info.vx_opt[i_var].vx_pd.ens_info->get_var_info(i_ens); + j = conf_info.vx_opt[i_var].vx_pd.ens_info->get_file_index(i_ens); // Check for valid file if(!ens_file_vld[j]) continue; @@ -592,7 +594,7 @@ void process_n_vld() { << n_vld << " of " << n_ens_inputs << " (" << (double) n_vld/n_ens_inputs << ")" << " forecast fields found for \"" - << conf_info.vx_opt[i_var].vx_pd.fcst_info->get_var_info()->magic_str() + << conf_info.vx_opt[i_var].vx_pd.fcst_info->magic_str() << "\" does not meet the threshold specified by \"" << conf_key_fcst_ens_thresh << "\" (" << conf_info.vld_ens_thresh << ") in the configuration file.\n\n"; @@ -755,7 +757,8 @@ void process_point_vx() { int i, j, i_file, n_miss; unixtime beg_ut, end_ut; DataPlaneArray fcst_dpa, emn_dpa; - DataPlaneArray cmn_dpa, csd_dpa; + DataPlaneArray fcmn_dpa, fcsd_dpa; + DataPlaneArray ocmn_dpa, ocsd_dpa; // Loop through each of the fields to be verified for(i=0; iget_var_info()->magic_str() << ".\n"; + << "For " << conf_info.vx_opt[i].vx_pd.fcst_info->magic_str() << ", found " + << fcmn_dpa.n_planes() << " forecast climatology mean and " + << fcsd_dpa.n_planes() << " standard deviation level(s), and " + << ocmn_dpa.n_planes() << " observation climatology mean and " + << ocsd_dpa.n_planes() << " standard deviation level(s).\n"; // Store climatology information - conf_info.vx_opt[i].vx_pd.set_climo_mn_dpa(cmn_dpa); - conf_info.vx_opt[i].vx_pd.set_climo_sd_dpa(csd_dpa); + conf_info.vx_opt[i].vx_pd.set_fcst_climo_mn_dpa(fcmn_dpa); + conf_info.vx_opt[i].vx_pd.set_fcst_climo_sd_dpa(fcsd_dpa); + conf_info.vx_opt[i].vx_pd.set_obs_climo_mn_dpa(ocmn_dpa); + conf_info.vx_opt[i].vx_pd.set_obs_climo_sd_dpa(ocsd_dpa); } // Process each point observation NetCDF file @@ -809,17 +823,18 @@ void process_point_vx() { // Loop through each of the fields to be verified for(i=0; iget_var_info(); - VarInfo *obs_info = conf_info.vx_opt[i].vx_pd.obs_info; + EnsVarInfo *ens_info = conf_info.vx_opt[i].vx_pd.ens_info; + VarInfo *fcst_info = ens_info->get_var_info(); + VarInfo *obs_info = conf_info.vx_opt[i].vx_pd.obs_info; bool print_level_mismatch_warning = true; // Initialize emn_dpa.clear(); // Loop through the ensemble inputs - for(j=0, n_miss=0; jinputs_n(); j++) { + for(j=0, n_miss=0; jinputs_n(); j++) { - i_file = conf_info.vx_opt[i].vx_pd.fcst_info->get_file_index(j); + i_file = ens_info->get_file_index(j); // If the current forecast file is valid, process it if(!ens_file_vld[i_file]) { @@ -886,7 +901,7 @@ void process_point_vx() { mlog << Debug(2) << "Computing the ensemble mean from the members.\n"; - int n = conf_info.vx_opt[i].vx_pd.fcst_info->inputs_n() - n_miss; + int n = ens_info->inputs_n() - n_miss; if(n <= 0) { mlog << Error << "\nprocess_point_vx() -> " @@ -1099,13 +1114,13 @@ bool process_point_ens(int i_vx, int i_ens, DataPlaneArray &fcst_dpa) { fcst_dpa.clear(); // Get file based on current vx and ensemble index - ConcatString ens_file = conf_info.vx_opt[i_vx].vx_pd.fcst_info->get_file(i_ens); + ConcatString ens_file = conf_info.vx_opt[i_vx].vx_pd.ens_info->get_file(i_ens); mlog << Debug(2) << "\n" << sep_str << "\n\n" << "Processing ensemble member file: " << ens_file << (i_ens == ctrl_file_index ? " (control)\n" : "\n"); - VarInfo *info = conf_info.vx_opt[i_vx].vx_pd.fcst_info->get_var_info(i_ens); + VarInfo *info = conf_info.vx_opt[i_vx].vx_pd.ens_info->get_var_info(i_ens); // Read the gridded data from the input forecast file bool status = get_data_plane_array(ens_file.c_str(), info->file_type(), info, @@ -1143,28 +1158,31 @@ void process_point_scores() { // requested, and write the output. for(i=0; iget_var_info(); + VarInfo *obs_info = conf_info.vx_opt[i].vx_pd.obs_info; + // Set the description shc.set_desc(conf_info.vx_opt[i].vx_pd.desc.c_str()); // Store the forecast variable name - shc.set_fcst_var(conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->name_attr()); + shc.set_fcst_var(fcst_info->name_attr()); // Store the forecast variable units - shc.set_fcst_units(conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->units_attr()); + shc.set_fcst_units(fcst_info->units_attr()); // Set the forecast level name - shc.set_fcst_lev(conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->level_attr().c_str()); + shc.set_fcst_lev(fcst_info->level_attr().c_str()); // Store the observation variable name - shc.set_obs_var(conf_info.vx_opt[i].vx_pd.obs_info->name_attr()); + shc.set_obs_var(obs_info->name_attr()); // Store the observation variable units - cs = conf_info.vx_opt[i].vx_pd.obs_info->units_attr(); + cs = obs_info->units_attr(); if(cs.empty()) cs = na_string; shc.set_obs_units(cs); // Set the observation level name - shc.set_obs_lev(conf_info.vx_opt[i].vx_pd.obs_info->level_attr().c_str()); + shc.set_obs_lev(obs_info->level_attr().c_str()); // Set the observation lead time shc.set_obs_lead_sec(0); @@ -1195,13 +1213,13 @@ void process_point_scores() { shc.set_interp_wdth(conf_info.vx_opt[i].interp_info.width[l]); } - pd_ptr = &conf_info.vx_opt[i].vx_pd.pd[j][k][l]; + int n = conf_info.vx_opt[i].vx_pd.three_to_one(j, k, l); + + pd_ptr = &conf_info.vx_opt[i].vx_pd.pd[n]; mlog << Debug(2) << "Processing point verification " - << conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->magic_str() - << " versus " - << conf_info.vx_opt[i].vx_pd.obs_info->magic_str() + << fcst_info->magic_str() << " versus " << obs_info->magic_str() << ", for observation type " << pd_ptr->msg_typ << ", over region " << pd_ptr->mask_name << ", for interpolation method " @@ -1238,7 +1256,7 @@ void process_grid_vx() { DataPlane *fcst_dp = (DataPlane *) nullptr; DataPlane *fraw_dp = (DataPlane *) nullptr; DataPlane obs_dp, oraw_dp; - DataPlane emn_dp, cmn_dp, csd_dp; + DataPlane emn_dp, fcmn_dp, fcsd_dp, ocmn_dp, ocsd_dp; PairDataEnsemble pd_all, pd; ObsErrorEntry *oerr_ptr = (ObsErrorEntry *) nullptr; VarInfo * var_info; @@ -1251,13 +1269,16 @@ void process_grid_vx() { shc.set_obtype(conf_info.obtype.c_str()); // Allocate space to store the forecast fields - int num_dp = conf_info.vx_opt[0].vx_pd.fcst_info->inputs_n(); + int num_dp = conf_info.vx_opt[0].vx_pd.ens_info->inputs_n(); fcst_dp = new DataPlane [num_dp]; fraw_dp = new DataPlane [num_dp]; // Loop through each of the fields to be verified for(i=0; iget_var_info(); + VarInfo *obs_info = conf_info.vx_opt[i].vx_pd.obs_info; + // Initialize emn_dp.clear(); @@ -1272,13 +1293,13 @@ void process_grid_vx() { shc.set_desc(conf_info.vx_opt[i].vx_pd.desc.c_str()); // Set the forecast variable name - shc.set_fcst_var(conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->name_attr()); + shc.set_fcst_var(fcst_info->name_attr()); // Store the forecast variable units - shc.set_fcst_units(conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->units_attr()); + shc.set_fcst_units(fcst_info->units_attr()); // Set the forecast level name - shc.set_fcst_lev(conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->level_attr().c_str()); + shc.set_fcst_lev(fcst_info->level_attr().c_str()); // Set the ObsErrorEntry pointer if(conf_info.vx_opt[i].obs_error.flag) { @@ -1295,12 +1316,12 @@ void process_grid_vx() { // Check for table entries for this variable and message type if(!obs_error_table.has( - conf_info.vx_opt[i].vx_pd.obs_info->name().c_str(), + obs_info->name().c_str(), conf_info.obtype.c_str())) { mlog << Warning << "\nprocess_grid_vx() -> " << "Disabling observation error logic since the " << "obs error table contains no entry for OBS_VAR(" - << conf_info.vx_opt[i].vx_pd.obs_info->name() + << obs_info->name() << ") and MESSAGE_TYPE(" << conf_info.obtype << ").\nSpecify a custom obs error table using the " << "MET_OBS_ERROR_TABLE environment variable.\n\n"; @@ -1310,7 +1331,7 @@ void process_grid_vx() { // Do a lookup for this variable and message type oerr_ptr = obs_error_table.lookup( - conf_info.vx_opt[i].vx_pd.obs_info->name().c_str(), + obs_info->name().c_str(), conf_info.obtype.c_str()); // If match was found and includes a value range setting, @@ -1333,14 +1354,14 @@ void process_grid_vx() { } // Loop through each of the input ensemble files/variables - for(j=0, n_miss=0; j < conf_info.vx_opt[i].vx_pd.fcst_info->inputs_n(); j++) { + for(j=0, n_miss=0; j < conf_info.vx_opt[i].vx_pd.ens_info->inputs_n(); j++) { // Initialize fcst_dp[j].clear(); - i_file = conf_info.vx_opt[i].vx_pd.fcst_info->get_file_index(j); - var_info = conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info(j); - fcst_file = conf_info.vx_opt[i].vx_pd.fcst_info->get_file(j); + i_file = conf_info.vx_opt[i].vx_pd.ens_info->get_file_index(j); + var_info = conf_info.vx_opt[i].vx_pd.ens_info->get_var_info(j); + fcst_file = conf_info.vx_opt[i].vx_pd.ens_info->get_file(j); // If the current ensemble file is valid, read the field if(ens_file_vld[i_file]) { @@ -1369,7 +1390,7 @@ void process_grid_vx() { mlog << Debug(2) << "Processing ensemble mean file: " << ens_mean_file << "\n"; - VarInfo *info = conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info(); + VarInfo *info = conf_info.vx_opt[i].vx_pd.ens_info->get_var_info(); // Read the gridded data from the mean file found = get_data_plane(ens_mean_file.c_str(), FileType_None, @@ -1388,7 +1409,7 @@ void process_grid_vx() { mlog << Debug(2) << "Computing the ensemble mean from the members.\n"; - int n = conf_info.vx_opt[i].vx_pd.fcst_info->inputs_n() - n_miss; + int n = conf_info.vx_opt[i].vx_pd.ens_info->inputs_n() - n_miss; if(n <= 0) { mlog << Error << "\nprocess_grid_vx() -> " @@ -1400,19 +1421,29 @@ void process_grid_vx() { emn_dp /= (double) n; } - // Read climatology data - cmn_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_mean_field, false), - i, ens_valid_ut, grid); - csd_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_stdev_field, false), - i, ens_valid_ut, grid); + // Read forecast climatology data + fcmn_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_fcst_climo_mean_field, false), + i, ens_valid_ut, grid); + fcsd_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_fcst_climo_stdev_field, false), + i, ens_valid_ut, grid); + + // Read observation climatology data + ocmn_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_obs_climo_mean_field, false), + i, ens_valid_ut, grid); + ocsd_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_obs_climo_stdev_field, false), + i, ens_valid_ut, grid); mlog << Debug(3) - << "Found " << (cmn_dp.nx() == 0 ? 0 : 1) - << " climatology mean field(s) and " << (csd_dp.nx() == 0 ? 0 : 1) - << " climatology standard deviation field(s) for forecast " - << conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->magic_str() << ".\n"; + << "For " << conf_info.vx_opt[i].vx_pd.fcst_info->magic_str() << ", found " + << (fcmn_dp.nx() == 0 ? 0 : 1) << " forecast climatology mean and " + << (fcsd_dp.nx() == 0 ? 0 : 1) << " standard deviation field(s), and " + << (ocmn_dp.nx() == 0 ? 0 : 1) << " observation climatology mean and " + << (ocsd_dp.nx() == 0 ? 0 : 1) << " standard deviation field(s).\n"; + // If requested in the config file, create a NetCDF file to store // the verification matched pairs @@ -1447,13 +1478,13 @@ void process_grid_vx() { } // Set the observation variable name - shc.set_obs_var(conf_info.vx_opt[i].vx_pd.obs_info->name_attr()); + shc.set_obs_var(obs_info->name_attr()); // Store the observation variable units - shc.set_obs_units(conf_info.vx_opt[i].vx_pd.obs_info->units_attr()); + shc.set_obs_units(obs_info->units_attr()); // Set the observation level name - shc.set_obs_lev(conf_info.vx_opt[i].vx_pd.obs_info->level_attr().c_str()); + shc.set_obs_lev(obs_info->level_attr().c_str()); // Set the observation lead time shc.set_obs_lead_sec(obs_dp.lead()); @@ -1513,12 +1544,12 @@ void process_grid_vx() { << "gridded observation data.\n"; obs_dp = add_obs_error_bc(conf_info.rng_ptr, FieldType::Obs, oerr_ptr, oraw_dp, oraw_dp, - conf_info.vx_opt[i].vx_pd.obs_info->name().c_str(), + obs_info->name().c_str(), conf_info.obtype.c_str()); } // Loop through the ensemble members - for(k=0; k < conf_info.vx_opt[i].vx_pd.fcst_info->inputs_n(); k++) { + for(k=0; k < conf_info.vx_opt[i].vx_pd.ens_info->inputs_n(); k++) { // Smooth the forecast field, if requested if(field == FieldType::Fcst || field == FieldType::Both) { @@ -1538,7 +1569,7 @@ void process_grid_vx() { << "ensemble member " << k+1 << ".\n"; fcst_dp[k] = add_obs_error_inc(conf_info.rng_ptr, FieldType::Fcst, oerr_ptr, fraw_dp[k], oraw_dp, - conf_info.vx_opt[i].vx_pd.obs_info->name().c_str(), + obs_info->name().c_str(), conf_info.obtype.c_str()); } } // end for k @@ -1556,22 +1587,22 @@ void process_grid_vx() { pd_all.clear(); pd_all.set_ens_size(n_vx_vld[i]); pd_all.set_climo_cdf_info_ptr(&conf_info.vx_opt[i].cdf_info); - pd_all.ctrl_index = conf_info.vx_opt[i].vx_pd.pd[0][0][0].ctrl_index; - pd_all.skip_const = conf_info.vx_opt[i].vx_pd.pd[0][0][0].skip_const; + pd_all.ctrl_index = conf_info.vx_opt[i].vx_pd.pd[0].ctrl_index; + pd_all.skip_const = conf_info.vx_opt[i].vx_pd.pd[0].skip_const; // Apply the current mask to the fields and compute the pairs process_grid_scores(i, fcst_dp, fraw_dp, obs_dp, oraw_dp, - emn_dp, cmn_dp, csd_dp, + emn_dp, + fcmn_dp, fcsd_dp, + ocmn_dp, ocsd_dp, mask_mp, oerr_ptr, pd_all); mlog << Debug(2) << "Processing gridded verification " - << conf_info.vx_opt[i].vx_pd.fcst_info->get_var_info()->magic_str() - << " versus " - << conf_info.vx_opt[i].vx_pd.obs_info->magic_str() + << fcst_info->magic_str() << " versus " << obs_info->magic_str() << ", for observation type " << shc.get_obtype() << ", over region " << shc.get_mask() << ", for interpolation method " @@ -1615,23 +1646,28 @@ void process_grid_vx() { void process_grid_scores(int i_vx, const DataPlane *fcst_dp, const DataPlane *fraw_dp, const DataPlane &obs_dp, const DataPlane &oraw_dp, - const DataPlane &emn_dp, const DataPlane &cmn_dp, - const DataPlane &csd_dp, const MaskPlane &mask_mp, + const DataPlane &emn_dp, + const DataPlane &fcmn_dp, const DataPlane &fcsd_dp, + const DataPlane &ocmn_dp, const DataPlane &ocsd_dp, + const MaskPlane &mask_mp, ObsErrorEntry *oerr_ptr, PairDataEnsemble &pd) { int i, j, x, y, n_miss; - double cmn, csd; ObsErrorEntry *e = (ObsErrorEntry *) nullptr; // Allocate memory in one big chunk based on grid size pd.extend(nxy); // Climatology flags - bool emn_flag = (emn_dp.nx() == obs_dp.nx() && - emn_dp.ny() == obs_dp.ny()); - bool cmn_flag = (cmn_dp.nx() == obs_dp.nx() && - cmn_dp.ny() == obs_dp.ny()); - bool csd_flag = (csd_dp.nx() == obs_dp.nx() && - csd_dp.ny() == obs_dp.ny()); + bool emn_flag = (emn_dp.nx() == obs_dp.nx() && + emn_dp.ny() == obs_dp.ny()); + bool fcmn_flag = (fcmn_dp.nx() == obs_dp.nx() && + fcmn_dp.ny() == obs_dp.ny()); + bool fcsd_flag = (fcsd_dp.nx() == obs_dp.nx() && + fcsd_dp.ny() == obs_dp.ny()); + bool ocmn_flag = (ocmn_dp.nx() == obs_dp.nx() && + ocmn_dp.ny() == obs_dp.ny()); + bool ocsd_flag = (ocsd_dp.nx() == obs_dp.nx() && + ocsd_dp.ny() == obs_dp.ny()); // Loop through the observation field for(x=0; xinputs_n(); j++) { + for(j=0,n_miss=0; j < conf_info.vx_opt[i_vx].vx_pd.ens_info->inputs_n(); j++) { // Skip missing data if(fcst_dp[j].nx() == 0 || fcst_dp[j].ny() == 0) { @@ -1742,17 +1781,17 @@ void do_rps(const EnsembleStatVxOpt &vx_opt, rps_info.othresh = othresh; rps_info.set_prob_cat_thresh(vx_opt.fcat_ta); - // If prob_cat_thresh is empty and climo data is available, - // use climo_cdf thresholds instead + // If prob_cat_thresh is empty and observation climo + // data is available, use climo_cdf thresholds instead if(rps_info.fthresh.n() == 0 && - pd_ptr->cmn_na.n_valid() > 0 && - pd_ptr->csd_na.n_valid() > 0 && + pd_ptr->ocmn_na.n_valid() > 0 && + pd_ptr->ocsd_na.n_valid() > 0 && vx_opt.cdf_info.cdf_ta.n() > 0) { rps_info.set_cdp_thresh(vx_opt.cdf_info.cdf_ta); } // Compute ensemble RPS statistics from pre-computed binned probabilities - if(vx_opt.vx_pd.fcst_info->get_var_info()->is_prob()) { + if(vx_opt.vx_pd.ens_info->get_var_info()->is_prob()) { rps_info.set_climo_bin_prob(*pd_ptr, vx_opt.ocat_ta); } // Compute ensemble RPS statistics from ensemble member values @@ -1833,7 +1872,7 @@ void setup_txt_files() { // Compute the number of PHIST bins for(i=n_phist_bin=0; i n_phist_bin ? n : n_phist_bin); } @@ -2059,7 +2098,7 @@ void write_txt_files(const EnsembleStatVxOpt &vx_opt, PairDataEnsemble pd; // Check for probabilistic input - bool is_prob = vx_opt.vx_pd.fcst_info->get_var_info()->is_prob(); + bool is_prob = vx_opt.vx_pd.ens_info->get_var_info()->is_prob(); // Process each observation filtering threshold for(i=0; i 0) { @@ -2134,7 +2173,7 @@ void write_txt_files(const EnsembleStatVxOpt &vx_opt, if(!is_prob && vx_opt.output_flag[i_ssvar] != STATOutputType::None) { - pd.ssvar_bin_size = vx_opt.vx_pd.pd[0][0][0].ssvar_bin_size; + pd.ssvar_bin_size = vx_opt.vx_pd.pd[0].ssvar_bin_size; pd.compute_ssvar(); // Make sure there are bins to process @@ -2196,17 +2235,15 @@ void write_txt_files(const EnsembleStatVxOpt &vx_opt, void do_pct(const EnsembleStatVxOpt &vx_opt, const PairDataEnsemble &pd_ens) { - // Flag to indicate the presence of valid climo data - bool have_climo = (pd_ens.cmn_na.n_valid() > 0 && - pd_ens.csd_na.n_valid() > 0); - // If forecast probability thresholds were specified, use them. if(vx_opt.fcat_ta.n() > 0) { do_pct_cat_thresh(vx_opt, pd_ens); } // Otherwise, if climo data is available and bins were requested, // use climo_cdf thresholds instead. - else if(have_climo && vx_opt.cdf_info.cdf_ta.n() > 0) { + else if(pd_ens.ocmn_na.n_valid() > 0 && + pd_ens.ocsd_na.n_valid() > 0 && + vx_opt.cdf_info.cdf_ta.n() > 0) { do_pct_cdp_thresh(vx_opt, pd_ens); } @@ -2232,7 +2269,8 @@ void do_pct_cat_thresh(const EnsembleStatVxOpt &vx_opt, pd_pnt.extend(pd_ens.n_obs); // Determine the number of climo CDF bins - n_bin = (pd_ens.cmn_na.n_valid() > 0 && pd_ens.csd_na.n_valid() > 0 ? + n_bin = (pd_ens.ocmn_na.n_valid() > 0 && + pd_ens.ocsd_na.n_valid() > 0 ? vx_opt.get_n_cdf_bin() : 1); if(n_bin > 1) { @@ -2266,21 +2304,22 @@ void do_pct_cat_thresh(const EnsembleStatVxOpt &vx_opt, // Initialize counts n_vld = n_evt = 0; + // Store climo data + ClimoPntInfo cpi(pd_ens.fcmn_na[i_obs], pd_ens.fcsd_na[i_obs], + pd_ens.ocmn_na[i_obs], pd_ens.ocsd_na[i_obs]); + // Derive the ensemble probability for(i_ens=0; i_ens 0 || (double) (n_vld/pd_ens.n_ens) >= conf_info.vld_data_thresh) { pd_pnt.add_grid_pair((double) n_evt/n_vld, pd_ens.o_na[i_obs], - pd_ens.cmn_na[i_obs], pd_ens.csd_na[i_obs], - pd_ens.wgt_na[i_obs]); + cpi, pd_ens.wgt_na[i_obs]); } } // end for i_obs @@ -2332,18 +2371,19 @@ void do_pct_cdp_thresh(const EnsembleStatVxOpt &vx_opt, int n_vld, n_evt, n_bin; PCTInfo *pct_info = (PCTInfo *) nullptr; PairDataPoint pd_pnt, pd; - ThreshArray cdp_thresh; + ThreshArray ocdp_thresh; // Derive a PairDataPoint object from the PairDataEnsemble input pd_pnt.extend(pd_ens.n_obs); // Derive the climo distribution percentile thresholds - cdp_thresh = derive_cdp_thresh(vx_opt.cdf_info.cdf_ta); - n_bin = cdp_thresh.n(); + ocdp_thresh = derive_ocdp_thresh(vx_opt.cdf_info.cdf_ta); + n_bin = ocdp_thresh.n(); mlog << Debug(2) - << "Computing Probabilistic Statistics for " << cdp_thresh.n() - << " climatological distribution percentile thresholds.\n"; + << "Computing Probabilistic Statistics for " + << ocdp_thresh.n() << " observation climatological " + << "distribution percentile thresholds.\n"; // Allocate memory pct_info = new PCTInfo [n_bin]; @@ -2353,7 +2393,7 @@ void do_pct_cdp_thresh(const EnsembleStatVxOpt &vx_opt, // Set the header columns shc.set_fcst_thresh(vx_opt.fpct_ta); - shc.set_obs_thresh(cdp_thresh[i_bin]); + shc.set_obs_thresh(ocdp_thresh[i_bin]); // Re-initialize pd_pnt.erase(); @@ -2365,21 +2405,22 @@ void do_pct_cdp_thresh(const EnsembleStatVxOpt &vx_opt, // Initialize counts n_vld = n_evt = 0; + // Store climo data + ClimoPntInfo cpi(pd_ens.fcmn_na[i_obs], pd_ens.fcsd_na[i_obs], + pd_ens.ocmn_na[i_obs], pd_ens.ocsd_na[i_obs]); + // Derive the ensemble probability for(i_ens=0; i_ens 0 || (double) (n_vld/pd_ens.n_ens) >= conf_info.vld_data_thresh) { pd_pnt.add_grid_pair((double) n_evt/n_vld, pd_ens.o_na[i_obs], - pd_ens.cmn_na[i_obs], pd_ens.csd_na[i_obs], - pd_ens.wgt_na[i_obs]); + cpi, pd_ens.wgt_na[i_obs]); } } // end for i_obs @@ -2389,7 +2430,7 @@ void do_pct_cdp_thresh(const EnsembleStatVxOpt &vx_opt, // Store thresholds pct_info[i_bin].fthresh = vx_opt.fpct_ta; - pct_info[i_bin].othresh = cdp_thresh[i_bin]; + pct_info[i_bin].othresh = ocdp_thresh[i_bin]; pct_info[i_bin].allocate_n_alpha(vx_opt.get_n_ci_alpha()); for(i=0; iget_var_info(), &nc_var, false, dp, + add_var_att_local(conf_info.vx_opt[i_vx].vx_pd.ens_info->get_var_info(), + &nc_var, false, dp, name_str.c_str(), long_name_str); // Write the data @@ -2696,7 +2738,8 @@ void write_orank_var_int(int i_vx, int i_interp, int i_mask, nc_var = add_var(nc_out, (string)var_name, ncInt, lat_dim, lon_dim); // Add the variable attributes - add_var_att_local(conf_info.vx_opt[i_vx].vx_pd.fcst_info->get_var_info(), &nc_var, true, dp, + add_var_att_local(conf_info.vx_opt[i_vx].vx_pd.ens_info->get_var_info(), + &nc_var, true, dp, name_str.c_str(), long_name_str); // Write the data diff --git a/src/tools/core/ensemble_stat/ensemble_stat_conf_info.cc b/src/tools/core/ensemble_stat/ensemble_stat_conf_info.cc index 6f31073d20..d248124cbd 100644 --- a/src/tools/core/ensemble_stat/ensemble_stat_conf_info.cc +++ b/src/tools/core/ensemble_stat/ensemble_stat_conf_info.cc @@ -646,7 +646,7 @@ void EnsembleStatVxOpt::process_config(GrdFileType ftype, Dictionary &fdict, clear(); // Allocate new EnsVarInfo object for fcst - vx_pd.fcst_info = new EnsVarInfo(); + vx_pd.ens_info = new EnsVarInfo(); // Loop over ensemble member IDs to substitute for(i=0; iadd_input(input_info); + vx_pd.ens_info->add_input(input_info); + + // Set the fcst_info, if needed + if(!vx_pd.fcst_info) vx_pd.set_fcst_info(next_var); // Add InputInfo to fcst info list for each ensemble file provided // set var_info to nullptr to note first VarInfo should be used @@ -672,7 +675,7 @@ void EnsembleStatVxOpt::process_config(GrdFileType ftype, Dictionary &fdict, input_info.var_info = nullptr; input_info.file_index = j; input_info.file_list = ens_files; - vx_pd.fcst_info->add_input(input_info); + vx_pd.ens_info->add_input(input_info); } // end for j } // end for i @@ -691,11 +694,11 @@ void EnsembleStatVxOpt::process_config(GrdFileType ftype, Dictionary &fdict, input_info.var_info = next_var; input_info.file_index = ens_files->n() - 1; input_info.file_list = ens_files; - vx_pd.fcst_info->add_input(input_info); + vx_pd.ens_info->add_input(input_info); } // Allocate new VarInfo object for obs - vx_pd.obs_info = info_factory.new_var_info(otype); + vx_pd.obs_info = info_factory.new_var_info(otype); // Set the VarInfo objects vx_pd.obs_info->set_dict(odict); @@ -704,14 +707,14 @@ void EnsembleStatVxOpt::process_config(GrdFileType ftype, Dictionary &fdict, if(mlog.verbosity_level() >= 5) { mlog << Debug(5) << "Parsed forecast field:\n"; - vx_pd.fcst_info->get_var_info()->dump(cout); + vx_pd.ens_info->get_var_info()->dump(cout); mlog << Debug(5) << "Parsed observation field:\n"; vx_pd.obs_info->dump(cout); } // No support for wind direction - if(vx_pd.fcst_info->get_var_info()->is_wind_direction() || + if(vx_pd.ens_info->get_var_info()->is_wind_direction() || vx_pd.obs_info->is_wind_direction()) { mlog << Error << "\nEnsembleStatVxOpt::process_config() -> " << "wind direction may not be verified using grid_stat.\n\n"; @@ -783,7 +786,7 @@ void EnsembleStatVxOpt::process_config(GrdFileType ftype, Dictionary &fdict, ocat_ta = odict.lookup_thresh_array(conf_key_prob_cat_thresh); // The number of thresholds must match for non-probability forecasts - if(!vx_pd.fcst_info->get_var_info()->is_prob() && + if(!vx_pd.ens_info->get_var_info()->is_prob() && fcat_ta.n() != ocat_ta.n()) { mlog << Error << "\nEnsembleStatVxOpt::process_config() -> " << "The number of forecast (" << write_css(fcat_ta) @@ -936,7 +939,7 @@ void EnsembleStatVxOpt::set_vx_pd(EnsembleStatConfInfo *conf_info, int ctrl_inde } // Define the dimensions - vx_pd.set_pd_size(n_msg_typ, n_mask, n_interp); + vx_pd.set_size(n_msg_typ, n_mask, n_interp); // Store the climo CDF info vx_pd.set_climo_cdf_info_ptr(&cdf_info); @@ -1014,21 +1017,23 @@ void EnsembleStatVxOpt::set_perc_thresh(const PairDataEnsemble *pd_ptr) { // // Sort the input arrays // - NumArray fsort; - for(int i=0; in_ens; i++) fsort.add(pd_ptr->e_na[i]); - NumArray osort = pd_ptr->o_na; - NumArray csort = pd_ptr->cmn_na; - fsort.sort_array(); - osort.sort_array(); - csort.sort_array(); + NumArray f_sort; + for(int i=0; in_ens; i++) f_sort.add(pd_ptr->e_na[i]); + NumArray o_sort = pd_ptr->o_na; + NumArray fcmn_sort = pd_ptr->fcmn_na; + NumArray ocmn_sort = pd_ptr->ocmn_na; + f_sort.sort_array(); + o_sort.sort_array(); + fcmn_sort.sort_array(); + ocmn_sort.sort_array(); // // Compute percentiles, passing the observation filtering // thresholds in for the fcst and obs slots. // - othr_ta.set_perc(&fsort, &osort, &csort, &othr_ta, &othr_ta); - fcat_ta.set_perc(&fsort, &osort, &csort, &fcat_ta, &ocat_ta); - ocat_ta.set_perc(&fsort, &osort, &csort, &fcat_ta, &ocat_ta); + othr_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &othr_ta, &othr_ta); + fcat_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcat_ta, &ocat_ta); + ocat_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcat_ta, &ocat_ta); return; } diff --git a/src/tools/core/grid_stat/grid_stat.cc b/src/tools/core/grid_stat/grid_stat.cc index 56be2071cf..fcd0bc33a9 100644 --- a/src/tools/core/grid_stat/grid_stat.cc +++ b/src/tools/core/grid_stat/grid_stat.cc @@ -109,9 +109,10 @@ // filtering options. // 052 05/28/21 Halley Gotway Add MCTS HSS_EC output. // 053 12/11/21 Halley Gotway MET #1991 Fix VCNT output. -// 054 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main -// 055 10/03/22 Prestopnik MET #2227 Remove using namespace netCDF from header files -// 056 01/29/24 Halley Gotway MET #2801 Configure time difference warnings +// 054 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main. +// 055 10/03/22 Prestopnik MET #2227 Remove using namespace netCDF from header files. +// 056 01/29/24 Halley Gotway MET #2801 Configure time difference warnings. +// 057 07/05/24 Halley Gotway MET #2924 Support forecast climatology. // //////////////////////////////////////////////////////////////////////// @@ -168,6 +169,7 @@ static void get_mask_points(const GridStatVxOpt &, const MaskPlane &, const DataPlane *, const DataPlane *, const DataPlane *, const DataPlane *, const DataPlane *, + const DataPlane *, const DataPlane *, PairDataPoint &); static void do_cts (CTSInfo *&, int, const PairDataPoint *); @@ -672,8 +674,8 @@ void process_scores() { DataPlane fcst_dp_thresh, obs_dp_thresh; // Climatology mean and standard deviation - DataPlane cmn_dp, csd_dp; - DataPlane cmn_dp_smooth; + DataPlane fcmn_dp, fcsd_dp, ocmn_dp, ocsd_dp; + DataPlane fcmn_dp_smooth, ocmn_dp_smooth; // Paired forecast, observation, climatology, and weight values PairDataPoint pd; @@ -682,19 +684,20 @@ void process_scores() { PairDataPoint pd_thr; // Allocate memory in one big chunk based on grid size - pd.extend(grid.nx()*grid.ny()); + pd.extend(grid.nxy()); if(conf_info.output_flag[i_nbrctc] != STATOutputType::None || conf_info.output_flag[i_nbrcts] != STATOutputType::None || conf_info.output_flag[i_nbrcnt] != STATOutputType::None || conf_info.output_flag[i_dmap] != STATOutputType::None) { - pd_thr.extend(grid.nx()*grid.ny()); + pd_thr.extend(grid.nxy()); } // Objects to handle vector winds DataPlane fu_dp, ou_dp; DataPlane fu_dp_smooth, ou_dp_smooth; - DataPlane cmnu_dp, csdu_dp, cmnu_dp_smooth; + DataPlane fcmnu_dp, fcsdu_dp, fcmnu_dp_smooth; + DataPlane ocmnu_dp, ocsdu_dp, ocmnu_dp_smooth; PairDataPoint pd_u; DataPlane seeps_dp, seeps_dp_fcat, seeps_dp_ocat; @@ -784,23 +787,34 @@ void process_scores() { << ".\n\n"; } - // Read climatology data - cmn_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_mean_field, false), - i, fcst_dp.valid(), grid); - csd_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_stdev_field, false), - i, fcst_dp.valid(), grid); + // Read forecast climatology data + fcmn_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_fcst_climo_mean_field, false), + i, fcst_dp.valid(), grid); + fcsd_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_fcst_climo_stdev_field, false), + i, fcst_dp.valid(), grid); + + // Read observation climatology data + ocmn_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_obs_climo_mean_field, false), + i, fcst_dp.valid(), grid); + ocsd_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_obs_climo_stdev_field, false), + i, fcst_dp.valid(), grid); mlog << Debug(3) - << "Found " << (cmn_dp.nx() == 0 ? 0 : 1) - << " climatology mean and " << (csd_dp.nx() == 0 ? 0 : 1) - << " climatology standard deviation field(s) for forecast " - << conf_info.vx_opt[i].fcst_info->magic_str() << ".\n"; + << "For " << conf_info.vx_opt[i].fcst_info->magic_str() << ", found " + << (fcmn_dp.is_empty() ? 0 : 1) << " forecast climatology mean and " + << (fcsd_dp.is_empty() ? 0 : 1) << " standard deviation field(s), and " + << (ocmn_dp.is_empty() ? 0 : 1) << " observation climatology mean and " + << (ocsd_dp.is_empty() ? 0 : 1) << " standard deviation field(s).\n"; // Apply MPR threshold filters if(conf_info.vx_opt[i].mpr_sa.n() > 0) { - apply_mpr_thresh_mask(fcst_dp, obs_dp, cmn_dp, csd_dp, + apply_mpr_thresh_mask(fcst_dp, obs_dp, + fcmn_dp, fcsd_dp, + ocmn_dp, ocsd_dp, conf_info.vx_opt[i].mpr_sa, conf_info.vx_opt[i].mpr_ta); } @@ -879,22 +893,20 @@ void process_scores() { // Store the current mask mask_mp = conf_info.mask_map[conf_info.vx_opt[i].mask_name[k]]; - // Turn off the mask for missing data values + // Turn off the mask for any grid points containing bad data mask_bad_data(mask_mp, fcst_dp_smooth); mask_bad_data(mask_mp, obs_dp_smooth); - if(cmn_dp.nx() == fcst_dp_smooth.nx() && - cmn_dp.ny() == fcst_dp_smooth.ny()) { - mask_bad_data(mask_mp, cmn_dp); - } - if(csd_dp.nx() == fcst_dp_smooth.nx() && - csd_dp.ny() == fcst_dp_smooth.ny()) { - mask_bad_data(mask_mp, csd_dp); - } + if(!fcmn_dp.is_empty()) mask_bad_data(mask_mp, fcmn_dp); + if(!fcsd_dp.is_empty()) mask_bad_data(mask_mp, fcsd_dp); + if(!ocmn_dp.is_empty()) mask_bad_data(mask_mp, ocmn_dp); + if(!ocsd_dp.is_empty()) mask_bad_data(mask_mp, ocsd_dp); // Apply the current mask to the current fields get_mask_points(conf_info.vx_opt[i], mask_mp, &fcst_dp_smooth, &obs_dp_smooth, - &cmn_dp, &csd_dp, &wgt_dp, pd); + &fcmn_dp, &fcsd_dp, + &ocmn_dp, &ocsd_dp, + &wgt_dp, pd); // Set the mask name shc.set_mask(conf_info.vx_opt[i].mask_name[k].c_str()); @@ -1042,13 +1054,21 @@ void process_scores() { if(!read_data_plane(conf_info.vx_opt[ui].obs_info, ou_dp, obs_mtddf, obs_file)) continue; - // Read climatology data for UGRD - cmnu_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_mean_field, false), - ui, fcst_dp.valid(), grid); - csdu_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_stdev_field, false), - ui, fcst_dp.valid(), grid); + // Read the forecast climatology data for UGRD + fcmnu_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_fcst_climo_mean_field, false), + ui, fcst_dp.valid(), grid); + fcsdu_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_fcst_climo_stdev_field, false), + ui, fcst_dp.valid(), grid); + + // Read the observation climatology data for UGRD + ocmnu_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_obs_climo_mean_field, false), + ui, fcst_dp.valid(), grid); + ocsdu_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_obs_climo_stdev_field, false), + ui, fcst_dp.valid(), grid); // If requested in the config file, smooth the forecast // and climatology U-wind fields @@ -1080,7 +1100,9 @@ void process_scores() { // Apply the current mask to the U-wind fields get_mask_points(conf_info.vx_opt[i], mask_mp, &fu_dp_smooth, &ou_dp_smooth, - &cmnu_dp, &csdu_dp, &wgt_dp, pd_u); + &fcmnu_dp, &fcsdu_dp, + &ocmnu_dp, &ocsdu_dp, + &wgt_dp, pd_u); // Compute VL1L2 do_vl1l2(vl1l2_info, i, &pd_u, &pd); @@ -1153,23 +1175,65 @@ void process_scores() { } if(conf_info.vx_opt[i].nc_info.do_diff) { write_nc((string)"DIFF", subtract(fcst_dp_smooth, obs_dp_smooth), - i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); + } + /* MET #2924 Replace this section + if(conf_info.vx_opt[i].nc_info.do_climo && + !fcmn_dp.is_empty()) { + write_nc((string)"FCST_CLIMO_MEAN", fcmn_dp, + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); + } + if(conf_info.vx_opt[i].nc_info.do_climo && + !fcsd_dp.is_empty()) { + write_nc((string)"FCST_CLIMO_STDEV", fcsd_dp, + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); + } + if(conf_info.vx_opt[i].nc_info.do_climo && + !ocmn_dp.is_empty()) { + write_nc((string)"OBS_CLIMO_MEAN", ocmn_dp, + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); + } + if(conf_info.vx_opt[i].nc_info.do_climo && + !ocsd_dp.is_empty()) { + write_nc((string)"OBS_CLIMO_STDEV", ocsd_dp, + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); + } + if(conf_info.vx_opt[i].nc_info.do_climo && + !ocmn_dp.is_empty() && !ocsd_dp.is_empty()) { + write_nc((string)"OBS_CLIMO_CDF", normal_cdf(obs_dp, ocmn_dp, ocsd_dp), + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); } - if(conf_info.vx_opt[i].nc_info.do_climo && !cmn_dp.is_empty()) { - write_nc((string)"CLIMO_MEAN", cmn_dp, i, mthd, pnts, + */ + if(conf_info.vx_opt[i].nc_info.do_climo && + !ocmn_dp.is_empty()) { + write_nc((string)"CLIMO_MEAN", ocmn_dp, + i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); } - if(conf_info.vx_opt[i].nc_info.do_climo && !csd_dp.is_empty()) { - write_nc((string)"CLIMO_STDEV", csd_dp, i, mthd, pnts, + if(conf_info.vx_opt[i].nc_info.do_climo && + !ocsd_dp.is_empty()) { + write_nc((string)"CLIMO_STDEV", fcsd_dp, + i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); } - if(conf_info.vx_opt[i].nc_info.do_climo && !cmn_dp.is_empty() && !csd_dp.is_empty()) { - write_nc((string)"CLIMO_CDF", normal_cdf(obs_dp, cmn_dp, csd_dp), - i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); + if(conf_info.vx_opt[i].nc_info.do_climo && + !ocmn_dp.is_empty() && !ocsd_dp.is_empty()) { + write_nc((string)"CLIMO_CDF", normal_cdf(obs_dp, ocmn_dp, ocsd_dp), + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); } + // MET #2924 End replace // Write out the fields of requested climo distribution percentile threshold values - if(conf_info.vx_opt[i].nc_info.do_climo_cdp && !cmn_dp.is_empty() && !csd_dp.is_empty()) { + if(conf_info.vx_opt[i].nc_info.do_climo_cdp && + ((!fcmn_dp.is_empty() && !fcsd_dp.is_empty()) || + (!ocmn_dp.is_empty() && !ocsd_dp.is_empty()))) { // Construct one list of all thresholds ThreshArray ta; @@ -1185,36 +1249,59 @@ void process_scores() { // Process all CDP thresholds except 0 and 100 for(vector::iterator it = simp.begin(); it != simp.end(); it++) { - if(it->ptype() == perc_thresh_climo_dist && + /* MET #2924 Replace this section + if(it->ptype() == perc_thresh_fcst_climo_dist && !is_eq(it->pvalue(), 0.0) && !is_eq(it->pvalue(), 100.0)) { + cs << cs_erase << "FCST_CLIMO_CDP" << nint(it->pvalue()); + write_nc(cs, normal_cdf_inv(it->pvalue()/100.0, fcmn_dp, fcsd_dp), + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); + } + else if(it->ptype() == perc_thresh_obs_climo_dist && + !is_eq(it->pvalue(), 0.0) && + !is_eq(it->pvalue(), 100.0)) { + cs << cs_erase << "OBS_CLIMO_CDP" << nint(it->pvalue()); + write_nc(cs, normal_cdf_inv(it->pvalue()/100.0, ocmn_dp, ocsd_dp), + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); + } + */ + if(it->ptype() == perc_thresh_obs_climo_dist && + !is_eq(it->pvalue(), 0.0) && + !is_eq(it->pvalue(), 100.0)) { cs << cs_erase << "CLIMO_CDP" << nint(it->pvalue()); - write_nc(cs, normal_cdf_inv(it->pvalue()/100.0, cmn_dp, csd_dp), - i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); + write_nc(cs, normal_cdf_inv(it->pvalue()/100.0, ocmn_dp, ocsd_dp), + i, mthd, pnts, + conf_info.vx_opt[i].interp_info.field); } + // MET #2924 End replace } // end for it } // Write out the fields of requested SEEPS - if(conf_info.vx_opt[i].output_flag[i_seeps] != STATOutputType::None - && conf_info.vx_opt[i].fcst_info->is_precipitation() - && conf_info.vx_opt[i].obs_info->is_precipitation()) { - SeepsAggScore seeps; + if(conf_info.vx_opt[i].output_flag[i_seeps] != STATOutputType::None && + conf_info.vx_opt[i].fcst_info->is_precipitation() && + conf_info.vx_opt[i].obs_info->is_precipitation()) { + SeepsAggScore seeps_agg; int month, day, year, hour, minute, second; unix_to_mdyhms(fcst_dp.valid(), month, day, year, hour, minute, second); compute_aggregated_seeps_grid(fcst_dp_smooth, obs_dp_smooth, seeps_dp, seeps_dp_fcat, seeps_dp_ocat, - &seeps, month, hour, + &seeps_agg, month, hour, conf_info.seeps_p1_thresh, conf_info.seeps_climo_name); - write_nc("SEEPS_MPR_SCORE", seeps_dp, i, mthd, pnts, + write_nc("SEEPS_MPR_SCORE", seeps_dp, + i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); - write_nc("SEEPS_MPR_FCAT", seeps_dp_fcat, i, mthd, pnts, + write_nc("SEEPS_MPR_FCAT", seeps_dp_fcat, + i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); - write_nc("SEEPS_MPR_OCAT", seeps_dp_ocat, i, mthd, pnts, + write_nc("SEEPS_MPR_OCAT", seeps_dp_ocat, + i, mthd, pnts, conf_info.vx_opt[i].interp_info.field); - write_seeps_row(shc, &seeps, conf_info.output_flag[i_seeps], + write_seeps_row(shc, &seeps_agg, conf_info.output_flag[i_seeps], stat_at, i_stat_row, txt_at[i_seeps], i_txt_row[i_seeps]); } @@ -1225,8 +1312,8 @@ void process_scores() { // Allocate memory in one big chunk based on grid size DataPlane fgx_dp, fgy_dp, ogx_dp, ogy_dp; PairDataPoint pd_gx, pd_gy; - pd_gx.extend(grid.nx()*grid.ny()); - pd_gy.extend(grid.nx()*grid.ny()); + pd_gx.extend(grid.nxy()); + pd_gy.extend(grid.nxy()); // Loop over gradient Dx/Dy for(k=0; kwidth[j], nbrhd->shape, grid.wrap_lon(), conf_info.vx_opt[i].fcat_ta[k], - &cmn_dp, &csd_dp, + &fcmn_dp, &fcsd_dp, + &ocmn_dp, &ocsd_dp, nbrhd->vld_thresh); // Compute the binary threshold field @@ -1538,7 +1633,8 @@ void process_scores() { nbrhd->width[j], nbrhd->shape, grid.wrap_lon(), conf_info.vx_opt[i].ocat_ta[k], - &cmn_dp, &csd_dp, + &fcmn_dp, &fcsd_dp, + &ocmn_dp, &ocsd_dp, nbrhd->vld_thresh); // Compute the binary threshold field @@ -1565,7 +1661,7 @@ void process_scores() { } } - // Turn off the mask for bad forecast or observation values + // Turn off the mask for any grid points containing bad data mask_bad_data(mask_mp, fcst_dp_smooth); mask_bad_data(mask_mp, obs_dp_smooth); @@ -1583,14 +1679,18 @@ void process_scores() { // and thresholded fields get_mask_points(conf_info.vx_opt[i], mask_mp, &fcst_dp_smooth, &obs_dp_smooth, - 0, 0, &wgt_dp, pd); + nullptr, nullptr, nullptr, nullptr, + &wgt_dp, pd); get_mask_points(conf_info.vx_opt[i], mask_mp, &fcst_dp_thresh, &obs_dp_thresh, - 0, 0, 0, pd_thr); + nullptr, nullptr, nullptr, nullptr, + nullptr, pd_thr); // Store climatology values as bad data - pd.cmn_na.add_const(bad_data_double, pd.f_na.n()); - pd.csd_na.add_const(bad_data_double, pd.f_na.n()); + pd.fcmn_na.add_const(bad_data_double, pd.f_na.n()); + pd.fcsd_na.add_const(bad_data_double, pd.f_na.n()); + pd.ocmn_na.add_const(bad_data_double, pd.f_na.n()); + pd.ocsd_na.add_const(bad_data_double, pd.f_na.n()); mlog << Debug(2) << "Processing " << conf_info.vx_opt[i].fcst_info->magic_str() @@ -1691,15 +1791,16 @@ void process_scores() { for(j=0; jcmn_na.n_valid() > 0 && - pd_ptr->csd_na.n_valid() > 0 ? + // Determine the number of observation climo CDF bins + n_bin = (pd_ptr->ocmn_na.n_valid() > 0 && + pd_ptr->ocsd_na.n_valid() > 0 ? vx_opt.get_n_cdf_bin() : 1); if(n_bin > 1) { @@ -2310,8 +2459,9 @@ void do_pct(const GridStatVxOpt &vx_opt, const PairDataPoint *pd_ptr) { mlog << Debug(2) << "Computing Probabilistic Statistics.\n"; - // Determine the number of climo CDF bins - n_bin = (pd_ptr->cmn_na.n_valid() > 0 && pd_ptr->csd_na.n_valid() > 0 ? + // Determine the number of observation climo CDF bins + n_bin = (pd_ptr->ocmn_na.n_valid() > 0 && + pd_ptr->ocsd_na.n_valid() > 0 ? vx_opt.get_n_cdf_bin() : 1); if(n_bin > 1) { @@ -2616,7 +2766,7 @@ void write_nc(const ConcatString &field_name, const DataPlane &dp, // Allocate memory float *data = (float *) nullptr; - data = new float [grid.nx()*grid.ny()]; + data = new float [grid.nxy()]; // Set the NetCDF compression level int deflate_level = compress_level; @@ -2665,12 +2815,94 @@ void write_nc(const ConcatString &field_name, const DataPlane &dp, << conf_info.vx_opt[i_vx].fcst_info->units_attr() << " and " << conf_info.vx_opt[i_vx].obs_info->units_attr(); } + else if(field_name == "FCST_CLIMO_MEAN") { + var_name << cs_erase << field_name << "_" + << obs_name << var_suffix << "_" << mask_str; + + // Append interpolation string for Fourier decomposition + if(interp_str.nonempty()) { + if(interp_str.startswith("_WV")) var_name << interp_str; + } + long_att << cs_erase + << "Forecast climatology mean for " + << fcst_long_name; + level_att = shc.get_fcst_lev(); + units_att = conf_info.vx_opt[i_vx].fcst_info->units_attr(); + } + else if(field_name == "FCST_CLIMO_STDEV") { + var_name << cs_erase << field_name << "_" + << obs_name << var_suffix << "_" << mask_str; + long_att << cs_erase + << "Forecast climatology standard deviation for " + << fcst_long_name; + level_att = shc.get_fcst_lev(); + units_att = conf_info.vx_opt[i_vx].fcst_info->units_attr(); + } + /* MET #2924 Replace this section + else if(field_name == "OBS_CLIMO_MEAN") { + var_name << cs_erase << field_name << "_" + << obs_name << var_suffix << "_" << mask_str; + + // Append interpolation string for Fourier decomposition + if(interp_str.nonempty()) { + if(interp_str.startswith("_WV")) var_name << interp_str; + } + long_att << cs_erase + << "Observation climatology mean for " + << obs_long_name; + level_att = shc.get_obs_lev(); + units_att = conf_info.vx_opt[i_vx].obs_info->units_attr(); + } + else if(field_name == "OBS_CLIMO_STDEV") { + var_name << cs_erase << field_name << "_" + << obs_name << var_suffix << "_" << mask_str; + long_att << cs_erase + << "Observation climatology standard deviation for " + << obs_long_name; + level_att = shc.get_obs_lev(); + units_att = conf_info.vx_opt[i_vx].obs_info->units_attr(); + } + else if(field_name == "OBS_CLIMO_CDF") { + var_name << cs_erase << field_name << "_" + << obs_name << var_suffix << "_" << mask_str; + long_att << cs_erase + << "Observation climatology cumulative distribution function for " + << obs_long_name; + level_att = shc.get_obs_lev(); + units_att = conf_info.vx_opt[i_vx].obs_info->units_attr(); + } + else if(field_name.startswith("FCST_CLIMO_CDP")) { + var_name << cs_erase + << field_name << "_" + << conf_info.vx_opt[i_vx].fcst_info->name_attr() << "_" + << conf_info.vx_opt[i_vx].fcst_info->level_attr() + << var_suffix << "_" << mask_str; + long_att << cs_erase + << "Forecast climatology distribution percentile thresholds for " + << fcst_long_name; + level_att = shc.get_fcst_lev(); + units_att = conf_info.vx_opt[i_vx].fcst_info->units_attr(); + } + else if(field_name.startswith("OBS_CLIMO_CDP")) { + var_name << cs_erase + << field_name << "_" + << conf_info.vx_opt[i_vx].obs_info->name_attr() << "_" + << conf_info.vx_opt[i_vx].obs_info->level_attr() + << var_suffix << "_" << mask_str; + long_att << cs_erase + << "Observation climatology distribution percentile thresholds for " + << obs_long_name; + level_att = shc.get_obs_lev(); + units_att = conf_info.vx_opt[i_vx].obs_info->units_attr(); + } + */ else if(field_name == "CLIMO_MEAN") { var_name << cs_erase << field_name << "_" << obs_name << var_suffix << "_" << mask_str; + // Append interpolation string for Fourier decomposition if(interp_str.nonempty()) { - if(strncmp(interp_str.c_str(), "_WV", 3) == 0) var_name << interp_str; + if(interp_str.startswith("_WV")) var_name << interp_str; } long_att << cs_erase << "Climatology mean for " @@ -2696,7 +2928,7 @@ void write_nc(const ConcatString &field_name, const DataPlane &dp, level_att = shc.get_obs_lev(); units_att = conf_info.vx_opt[i_vx].obs_info->units_attr(); } - else if(strncmp(field_name.c_str(), "CLIMO_CDP", 9) == 0) { + else if(field_name.startswith("CLIMO_CDP")) { var_name << cs_erase << field_name << "_" << conf_info.vx_opt[i_vx].obs_info->name_attr() << "_" @@ -2708,6 +2940,7 @@ void write_nc(const ConcatString &field_name, const DataPlane &dp, level_att = shc.get_obs_lev(); units_att = conf_info.vx_opt[i_vx].obs_info->units_attr(); } + // MET #2924 end replace else if(check_reg_exp("FCST_XGRAD_", field_name.c_str()) || check_reg_exp("FCST_YGRAD_", field_name.c_str())) { var_name << cs_erase << field_name << "_" @@ -2762,7 +2995,7 @@ void write_nc(const ConcatString &field_name, const DataPlane &dp, level_att = shc.get_obs_lev(); units_att = conf_info.vx_opt[i_vx].obs_info->units_attr(); } - else if(strncmp(field_name.c_str(), "SEEPS_MPR", 9) == 0) { + else if(field_name.startswith("SEEPS_MPR")) { ConcatString seeps_desc; var_name << cs_erase << field_name << "_" << obs_name << var_suffix << "_" << mask_str; @@ -2770,11 +3003,11 @@ void write_nc(const ConcatString &field_name, const DataPlane &dp, field_type == FieldType::Both) { var_name << interp_str; } - if(strncmp(field_name.c_str(), "SEEPS_MPR_SCORE", 15) == 0) + if(field_name.startswith("SEEPS_MPR_SCORE")) seeps_desc = "score"; - else if(strncmp(field_name.c_str(), "SEEPS_MPR_FCAT", 14) == 0) + else if(field_name.startswith("SEEPS_MPR_FCAT")) seeps_desc = "forecast category"; - else if(strncmp(field_name.c_str(), "SEEPS_MPR_OCAT", 14) == 0) + else if(field_name.startswith("SEEPS_MPR_OCAT")) seeps_desc = "observation category"; long_att << cs_erase << "SEEPS MPR " << seeps_desc << " for " @@ -2917,8 +3150,8 @@ void write_nbrhd_nc(const DataPlane &fcst_dp, const DataPlane &obs_dp, if(!fcst_flag && !obs_flag) return; // Allocate memory for the forecast and observation fields - fcst_data = new float [grid.nx()*grid.ny()]; - obs_data = new float [grid.nx()*grid.ny()]; + fcst_data = new float [grid.nxy()]; + obs_data = new float [grid.nxy()]; // Add the forecast variable if(fcst_flag) { diff --git a/src/tools/core/grid_stat/grid_stat_conf_info.cc b/src/tools/core/grid_stat/grid_stat_conf_info.cc index a2d64873b3..6ec2dd8f98 100644 --- a/src/tools/core/grid_stat/grid_stat_conf_info.cc +++ b/src/tools/core/grid_stat/grid_stat_conf_info.cc @@ -1031,20 +1031,22 @@ void GridStatVxOpt::set_perc_thresh(const PairDataPoint &pd) { // // Sort the input arrays // - NumArray fsort = pd.f_na; - NumArray osort = pd.o_na; - NumArray csort = pd.cmn_na; - fsort.sort_array(); - osort.sort_array(); - csort.sort_array(); + NumArray f_sort = pd.f_na; + NumArray o_sort = pd.o_na; + NumArray fcmn_sort = pd.fcmn_na; + NumArray ocmn_sort = pd.ocmn_na; + f_sort.sort_array(); + o_sort.sort_array(); + fcmn_sort.sort_array(); + ocmn_sort.sort_array(); // // Compute percentiles // - fcat_ta.set_perc(&fsort, &osort, &csort, &fcat_ta, &ocat_ta); - ocat_ta.set_perc(&fsort, &osort, &csort, &fcat_ta, &ocat_ta); - fcnt_ta.set_perc(&fsort, &osort, &csort, &fcnt_ta, &ocnt_ta); - ocnt_ta.set_perc(&fsort, &osort, &csort, &fcnt_ta, &ocnt_ta); + fcat_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcat_ta, &ocat_ta); + ocat_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcat_ta, &ocat_ta); + fcnt_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcnt_ta, &ocnt_ta); + ocnt_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcnt_ta, &ocnt_ta); return; } diff --git a/src/tools/core/point_stat/point_stat.cc b/src/tools/core/point_stat/point_stat.cc index edb59d8814..5532ea7e1d 100644 --- a/src/tools/core/point_stat/point_stat.cc +++ b/src/tools/core/point_stat/point_stat.cc @@ -100,10 +100,11 @@ // Added code for obs_qty_exc. // 049 12/11/21 Halley Gotway MET #1991 Fix VCNT output. // 050 02/11/22 Halley Gotway MET #2045 Fix HiRA output. -// 051 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main +// 051 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main. // 052 09/29/22 Halley Gotway MET #2286 Refine GRIB1 table lookup logic. // 053 10/03/22 Prestopnik MET #2227 Remove using namespace netCDF from header files. // 054 04/29/24 Halley Gotway MET #2795 Move level mismatch warning. +// 055 07/05/24 Halley Gotway MET #2924 Support forecast climatology. // //////////////////////////////////////////////////////////////////////// @@ -600,7 +601,9 @@ void build_outfile_name(unixtime valid_ut, int lead_sec, void process_fcst_climo_files() { int j; int n_fcst; - DataPlaneArray fcst_dpa, cmn_dpa, csd_dpa; + DataPlaneArray fcst_dpa; + DataPlaneArray fcmn_dpa, fcsd_dpa; + DataPlaneArray ocmn_dpa, ocsd_dpa; unixtime file_ut, beg_ut, end_ut; // Loop through each of the fields to be verified and extract @@ -662,18 +665,28 @@ void process_fcst_climo_files() { } } // end for j - // Read climatology data - cmn_dpa = read_climo_data_plane_array( - conf_info.conf.lookup_array(conf_key_climo_mean_field, false), - i, fcst_dpa[0].valid(), grid); - csd_dpa = read_climo_data_plane_array( - conf_info.conf.lookup_array(conf_key_climo_stdev_field, false), - i, fcst_dpa[0].valid(), grid); + // Read forecast climatology data + fcmn_dpa = read_climo_data_plane_array( + conf_info.conf.lookup_array(conf_key_fcst_climo_mean_field, false), + i, fcst_dpa[0].valid(), grid); + fcsd_dpa = read_climo_data_plane_array( + conf_info.conf.lookup_array(conf_key_fcst_climo_stdev_field, false), + i, fcst_dpa[0].valid(), grid); + + // Read observation climatology data + ocmn_dpa = read_climo_data_plane_array( + conf_info.conf.lookup_array(conf_key_obs_climo_mean_field, false), + i, fcst_dpa[0].valid(), grid); + ocsd_dpa = read_climo_data_plane_array( + conf_info.conf.lookup_array(conf_key_obs_climo_stdev_field, false), + i, fcst_dpa[0].valid(), grid); // Store data for the current verification task conf_info.vx_opt[i].vx_pd.set_fcst_dpa(fcst_dpa); - conf_info.vx_opt[i].vx_pd.set_climo_mn_dpa(cmn_dpa); - conf_info.vx_opt[i].vx_pd.set_climo_sd_dpa(csd_dpa); + conf_info.vx_opt[i].vx_pd.set_fcst_climo_mn_dpa(fcmn_dpa); + conf_info.vx_opt[i].vx_pd.set_fcst_climo_sd_dpa(fcsd_dpa); + conf_info.vx_opt[i].vx_pd.set_obs_climo_mn_dpa(ocmn_dpa); + conf_info.vx_opt[i].vx_pd.set_obs_climo_sd_dpa(ocsd_dpa); // Get the valid time for the first field file_ut = fcst_dpa[0].valid(); @@ -697,10 +710,12 @@ void process_fcst_climo_files() { // Dump out the number of levels found mlog << Debug(2) - << "For " << fcst_info->magic_str() << " found " + << "For " << fcst_info->magic_str() << ", found " << n_fcst << " forecast levels, " - << cmn_dpa.n_planes() << " climatology mean levels, and " - << csd_dpa.n_planes() << " climatology standard deviation levels.\n"; + << fcmn_dpa.n_planes() << " forecast climatology mean and " + << fcsd_dpa.n_planes() << " standard deviation level(s), and " + << ocmn_dpa.n_planes() << " observation climatology mean and " + << ocsd_dpa.n_planes() << " standard deviation level(s).\n"; } // end for i @@ -944,7 +959,6 @@ void process_obs_file(int i_nc) { //////////////////////////////////////////////////////////////////////// void process_scores() { - int i, j, k, l, m; int n_cat, n_wind; ConcatString cs; @@ -969,75 +983,77 @@ void process_scores() { vl1l2_info = new VL1L2Info [n_wind]; // Compute scores for each PairData object and write output - for(i=0; iname_attr()); + shc.set_fcst_var(conf_info.vx_opt[i_vx].vx_pd.fcst_info->name_attr()); // Store the forecast variable units - shc.set_fcst_units(conf_info.vx_opt[i].vx_pd.fcst_info->units_attr()); + shc.set_fcst_units(conf_info.vx_opt[i_vx].vx_pd.fcst_info->units_attr()); // Set the forecast level name - shc.set_fcst_lev(conf_info.vx_opt[i].vx_pd.fcst_info->level_attr().c_str()); + shc.set_fcst_lev(conf_info.vx_opt[i_vx].vx_pd.fcst_info->level_attr().c_str()); // Store the observation variable name - shc.set_obs_var(conf_info.vx_opt[i].vx_pd.obs_info->name_attr()); + shc.set_obs_var(conf_info.vx_opt[i_vx].vx_pd.obs_info->name_attr()); // Store the observation variable units - cs = conf_info.vx_opt[i].vx_pd.obs_info->units_attr(); + cs = conf_info.vx_opt[i_vx].vx_pd.obs_info->units_attr(); if(cs.empty()) cs = na_string; shc.set_obs_units(cs); // Set the observation level name - shc.set_obs_lev(conf_info.vx_opt[i].vx_pd.obs_info->level_attr().c_str()); + shc.set_obs_lev(conf_info.vx_opt[i_vx].vx_pd.obs_info->level_attr().c_str()); // Set the forecast lead time - shc.set_fcst_lead_sec(conf_info.vx_opt[i].vx_pd.fcst_dpa[0].lead()); + shc.set_fcst_lead_sec(conf_info.vx_opt[i_vx].vx_pd.fcst_dpa[0].lead()); // Set the forecast valid time - shc.set_fcst_valid_beg(conf_info.vx_opt[i].vx_pd.fcst_dpa[0].valid()); - shc.set_fcst_valid_end(conf_info.vx_opt[i].vx_pd.fcst_dpa[0].valid()); + shc.set_fcst_valid_beg(conf_info.vx_opt[i_vx].vx_pd.fcst_dpa[0].valid()); + shc.set_fcst_valid_end(conf_info.vx_opt[i_vx].vx_pd.fcst_dpa[0].valid()); // Set the observation lead time shc.set_obs_lead_sec(0); // Set the observation valid time - shc.set_obs_valid_beg(conf_info.vx_opt[i].vx_pd.beg_ut); - shc.set_obs_valid_end(conf_info.vx_opt[i].vx_pd.end_ut); + shc.set_obs_valid_beg(conf_info.vx_opt[i_vx].vx_pd.beg_ut); + shc.set_obs_valid_end(conf_info.vx_opt[i_vx].vx_pd.end_ut); // Loop through the message types - for(j=0; jmagic_str() + << conf_info.vx_opt[i_vx].vx_pd.fcst_info->magic_str() << " versus " - << conf_info.vx_opt[i].vx_pd.obs_info->magic_str() + << conf_info.vx_opt[i_vx].vx_pd.obs_info->magic_str() << ", for observation type " << pd_ptr->msg_typ << ", over region " << pd_ptr->mask_name << ", for interpolation method " @@ -1048,22 +1064,22 @@ void process_scores() { // List counts for reasons why observations were rejected cs << cs_erase << "Number of matched pairs = " << pd_ptr->n_obs << "\n" - << "Observations processed = " << conf_info.vx_opt[i].vx_pd.n_try << "\n" - << "Rejected: station id = " << conf_info.vx_opt[i].vx_pd.rej_sid << "\n" - << "Rejected: obs var name = " << conf_info.vx_opt[i].vx_pd.rej_var << "\n" - << "Rejected: valid time = " << conf_info.vx_opt[i].vx_pd.rej_vld << "\n" - << "Rejected: bad obs value = " << conf_info.vx_opt[i].vx_pd.rej_obs << "\n" - << "Rejected: off the grid = " << conf_info.vx_opt[i].vx_pd.rej_grd << "\n" - << "Rejected: topography = " << conf_info.vx_opt[i].vx_pd.rej_topo << "\n" - << "Rejected: level mismatch = " << conf_info.vx_opt[i].vx_pd.rej_lvl << "\n" - << "Rejected: quality marker = " << conf_info.vx_opt[i].vx_pd.rej_qty << "\n" - << "Rejected: message type = " << conf_info.vx_opt[i].vx_pd.rej_typ[j][k][l] << "\n" - << "Rejected: masking region = " << conf_info.vx_opt[i].vx_pd.rej_mask[j][k][l] << "\n" - << "Rejected: bad fcst value = " << conf_info.vx_opt[i].vx_pd.rej_fcst[j][k][l] << "\n" - << "Rejected: bad climo mean = " << conf_info.vx_opt[i].vx_pd.rej_cmn[j][k][l] << "\n" - << "Rejected: bad climo stdev = " << conf_info.vx_opt[i].vx_pd.rej_csd[j][k][l] << "\n" - << "Rejected: mpr filter = " << conf_info.vx_opt[i].vx_pd.rej_mpr[j][k][l] << "\n" - << "Rejected: duplicates = " << conf_info.vx_opt[i].vx_pd.rej_dup[j][k][l] << "\n"; + << "Observations processed = " << conf_info.vx_opt[i_vx].vx_pd.n_try << "\n" + << "Rejected: station id = " << conf_info.vx_opt[i_vx].vx_pd.rej_sid << "\n" + << "Rejected: obs var name = " << conf_info.vx_opt[i_vx].vx_pd.rej_var << "\n" + << "Rejected: valid time = " << conf_info.vx_opt[i_vx].vx_pd.rej_vld << "\n" + << "Rejected: bad obs value = " << conf_info.vx_opt[i_vx].vx_pd.rej_obs << "\n" + << "Rejected: off the grid = " << conf_info.vx_opt[i_vx].vx_pd.rej_grd << "\n" + << "Rejected: topography = " << conf_info.vx_opt[i_vx].vx_pd.rej_topo << "\n" + << "Rejected: level mismatch = " << conf_info.vx_opt[i_vx].vx_pd.rej_lvl << "\n" + << "Rejected: quality marker = " << conf_info.vx_opt[i_vx].vx_pd.rej_qty << "\n" + << "Rejected: message type = " << conf_info.vx_opt[i_vx].vx_pd.rej_typ[n] << "\n" + << "Rejected: masking region = " << conf_info.vx_opt[i_vx].vx_pd.rej_mask[n] << "\n" + << "Rejected: bad fcst value = " << conf_info.vx_opt[i_vx].vx_pd.rej_fcst[n] << "\n" + << "Rejected: bad climo mean = " << conf_info.vx_opt[i_vx].vx_pd.rej_cmn[n] << "\n" + << "Rejected: bad climo stdev = " << conf_info.vx_opt[i_vx].vx_pd.rej_csd[n] << "\n" + << "Rejected: mpr filter = " << conf_info.vx_opt[i_vx].vx_pd.rej_mpr[n] << "\n" + << "Rejected: duplicates = " << conf_info.vx_opt[i_vx].vx_pd.rej_dup[n] << "\n"; // Print report based on the number of matched pairs if(pd_ptr->n_obs > 0) { @@ -1076,88 +1092,88 @@ void process_scores() { } // Process percentile thresholds - conf_info.vx_opt[i].set_perc_thresh(pd_ptr); + conf_info.vx_opt[i_vx].set_perc_thresh(pd_ptr); // Write out the MPR lines - if(conf_info.vx_opt[i].output_flag[i_mpr] != STATOutputType::None) { + if(conf_info.vx_opt[i_vx].output_flag[i_mpr] != STATOutputType::None) { write_mpr_row(shc, pd_ptr, - conf_info.vx_opt[i].output_flag[i_mpr], + conf_info.vx_opt[i_vx].output_flag[i_mpr], stat_at, i_stat_row, txt_at[i_mpr], i_txt_row[i_mpr]); // Reset the observation valid time - shc.set_obs_valid_beg(conf_info.vx_opt[i].vx_pd.beg_ut); - shc.set_obs_valid_end(conf_info.vx_opt[i].vx_pd.end_ut); + shc.set_obs_valid_beg(conf_info.vx_opt[i_vx].vx_pd.beg_ut); + shc.set_obs_valid_end(conf_info.vx_opt[i_vx].vx_pd.end_ut); } // Write out the SEEPS MPR lines - if(conf_info.vx_opt[i].output_flag[i_seeps_mpr] != STATOutputType::None) { + if(conf_info.vx_opt[i_vx].output_flag[i_seeps_mpr] != STATOutputType::None) { write_seeps_mpr_row(shc, pd_ptr, - conf_info.vx_opt[i].output_flag[i_seeps_mpr], + conf_info.vx_opt[i_vx].output_flag[i_seeps_mpr], stat_at, i_stat_row, txt_at[i_seeps_mpr], i_txt_row[i_seeps_mpr]); // Reset the observation valid time - shc.set_obs_valid_beg(conf_info.vx_opt[i].vx_pd.beg_ut); - shc.set_obs_valid_end(conf_info.vx_opt[i].vx_pd.end_ut); + shc.set_obs_valid_beg(conf_info.vx_opt[i_vx].vx_pd.beg_ut); + shc.set_obs_valid_end(conf_info.vx_opt[i_vx].vx_pd.end_ut); } // Write out the SEEPS lines - if(conf_info.vx_opt[i].output_flag[i_seeps] != STATOutputType::None) { - compute_aggregated_seeps(pd_ptr, &pd_ptr->seeps); - write_seeps_row(shc, &pd_ptr->seeps, - conf_info.vx_opt[i].output_flag[i_seeps], + if(conf_info.vx_opt[i_vx].output_flag[i_seeps] != STATOutputType::None) { + compute_aggregated_seeps(pd_ptr, &pd_ptr->seeps_agg); + write_seeps_row(shc, &pd_ptr->seeps_agg, + conf_info.vx_opt[i_vx].output_flag[i_seeps], stat_at, i_stat_row, txt_at[i_seeps], i_txt_row[i_seeps]); } // Compute CTS scores - if(!conf_info.vx_opt[i].vx_pd.fcst_info->is_prob() && - conf_info.vx_opt[i].fcat_ta.n() > 0 && - (conf_info.vx_opt[i].output_flag[i_fho] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_ctc] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_cts] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_eclv] != STATOutputType::None)) { + if(!conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + conf_info.vx_opt[i_vx].fcat_ta.n() > 0 && + (conf_info.vx_opt[i_vx].output_flag[i_fho] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_ctc] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_cts] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_eclv] != STATOutputType::None)) { // Initialize - for(m=0; mis_prob() && - conf_info.vx_opt[i].fcat_ta.n() > 1 && - (conf_info.vx_opt[i].output_flag[i_mctc] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_mcts] != STATOutputType::None)) { + if(!conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + conf_info.vx_opt[i_vx].fcat_ta.n() > 1 && + (conf_info.vx_opt[i_vx].output_flag[i_mctc] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_mcts] != STATOutputType::None)) { // Initialize mcts_info.clear(); // Compute MCTS Info - do_mcts(mcts_info, i, pd_ptr); + do_mcts(mcts_info, i_vx, pd_ptr); // Write out MCTC - if(conf_info.vx_opt[i].output_flag[i_mctc] != STATOutputType::None && + if(conf_info.vx_opt[i_vx].output_flag[i_mctc] != STATOutputType::None && mcts_info.cts.total() > 0) { write_mctc_row(shc, mcts_info, - conf_info.vx_opt[i].output_flag[i_mctc], + conf_info.vx_opt[i_vx].output_flag[i_mctc], stat_at, i_stat_row, txt_at[i_mctc], i_txt_row[i_mctc]); } // Write out MCTS - if(conf_info.vx_opt[i].output_flag[i_mcts] != STATOutputType::None && + if(conf_info.vx_opt[i_vx].output_flag[i_mcts] != STATOutputType::None && mcts_info.cts.total() > 0) { write_mcts_row(shc, mcts_info, - conf_info.vx_opt[i].output_flag[i_mcts], + conf_info.vx_opt[i_vx].output_flag[i_mcts], stat_at, i_stat_row, txt_at[i_mcts], i_txt_row[i_mcts]); } } // end Compute MCTS scores // Compute CNT, SL1L2, and SAL1L2 scores - if(!conf_info.vx_opt[i].vx_pd.fcst_info->is_prob() && - (conf_info.vx_opt[i].output_flag[i_cnt] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_sl1l2] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_sal1l2] != STATOutputType::None)) { - do_cnt_sl1l2(conf_info.vx_opt[i], pd_ptr); + if(!conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + (conf_info.vx_opt[i_vx].output_flag[i_cnt] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_sl1l2] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_sal1l2] != STATOutputType::None)) { + do_cnt_sl1l2(conf_info.vx_opt[i_vx], pd_ptr); } // Compute VL1L2 and VAL1L2 partial sums for UGRD and VGRD - if(!conf_info.vx_opt[i].vx_pd.fcst_info->is_prob() && - conf_info.vx_opt[i].vx_pd.fcst_info->is_v_wind() && - conf_info.vx_opt[i].vx_pd.fcst_info->uv_index() >= 0 && - (conf_info.vx_opt[i].output_flag[i_vl1l2] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_val1l2] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_vcnt] != STATOutputType::None)) { + if(!conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_v_wind() && + conf_info.vx_opt[i_vx].vx_pd.fcst_info->uv_index() >= 0 && + (conf_info.vx_opt[i_vx].output_flag[i_vl1l2] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_val1l2] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_vcnt] != STATOutputType::None)) { // Store the forecast variable name shc.set_fcst_var(ugrd_vgrd_abbr_str); @@ -1220,19 +1236,19 @@ void process_scores() { shc.set_obs_var(ugrd_vgrd_abbr_str); // Initialize - for(m=0; muv_index(); + int u_vx = conf_info.vx_opt[i_vx].vx_pd.fcst_info->uv_index(); // Check to make sure message types, masking regions, // and interpolation methods match - if(conf_info.vx_opt[i].get_n_msg_typ() != - conf_info.vx_opt[ui].get_n_msg_typ() || - conf_info.vx_opt[i].get_n_mask() != - conf_info.vx_opt[ui].get_n_mask() || - conf_info.vx_opt[i].get_n_interp() != - conf_info.vx_opt[ui].get_n_interp()) { + if(conf_info.vx_opt[i_vx].get_n_msg_typ() != + conf_info.vx_opt[u_vx].get_n_msg_typ() || + conf_info.vx_opt[i_vx].get_n_mask() != + conf_info.vx_opt[u_vx].get_n_mask() || + conf_info.vx_opt[i_vx].get_n_interp() != + conf_info.vx_opt[u_vx].get_n_interp()) { mlog << Warning << "\nprocess_scores() -> " << "when computing VL1L2 and/or VAL1L2 vector " << "partial sums, the U and V components must " @@ -1243,106 +1259,110 @@ void process_scores() { } // Compute VL1L2 and VAL1L2 - do_vl1l2(vl1l2_info, i, - &conf_info.vx_opt[ui].vx_pd.pd[j][k][l], - &conf_info.vx_opt[i].vx_pd.pd[j][k][l]); + do_vl1l2(vl1l2_info, i_vx, + &conf_info.vx_opt[u_vx].vx_pd.pd[n], + &conf_info.vx_opt[i_vx].vx_pd.pd[n]); // Loop through all of the wind speed thresholds - for(m=0; m 0) { - write_vl1l2_row(shc, vl1l2_info[m], - conf_info.vx_opt[i].output_flag[i_vl1l2], + if(conf_info.vx_opt[i_vx].output_flag[i_vl1l2] != STATOutputType::None && + vl1l2_info[i_wind].vcount > 0) { + write_vl1l2_row(shc, vl1l2_info[i_wind], + conf_info.vx_opt[i_vx].output_flag[i_vl1l2], stat_at, i_stat_row, txt_at[i_vl1l2], i_txt_row[i_vl1l2]); } // Write out VAL1L2 - if(conf_info.vx_opt[i].output_flag[i_val1l2] != STATOutputType::None && - vl1l2_info[m].vacount > 0) { - write_val1l2_row(shc, vl1l2_info[m], - conf_info.vx_opt[i].output_flag[i_val1l2], + if(conf_info.vx_opt[i_vx].output_flag[i_val1l2] != STATOutputType::None && + vl1l2_info[i_wind].vacount > 0) { + write_val1l2_row(shc, vl1l2_info[i_wind], + conf_info.vx_opt[i_vx].output_flag[i_val1l2], stat_at, i_stat_row, txt_at[i_val1l2], i_txt_row[i_val1l2]); } // Write out VCNT - if(conf_info.vx_opt[i].output_flag[i_vcnt] != STATOutputType::None && - vl1l2_info[m].vcount > 0) { - write_vcnt_row(shc, vl1l2_info[m], - conf_info.vx_opt[i].output_flag[i_vcnt], + if(conf_info.vx_opt[i_vx].output_flag[i_vcnt] != STATOutputType::None && + vl1l2_info[i_wind].vcount > 0) { + write_vcnt_row(shc, vl1l2_info[i_wind], + conf_info.vx_opt[i_vx].output_flag[i_vcnt], stat_at, i_stat_row, txt_at[i_vcnt], i_txt_row[i_vcnt]); } - } // end for m + } // end for i // Reset the forecast variable name - shc.set_fcst_var(conf_info.vx_opt[i].vx_pd.fcst_info->name_attr()); + shc.set_fcst_var(conf_info.vx_opt[i_vx].vx_pd.fcst_info->name_attr()); // Reset the observation variable name - shc.set_obs_var(conf_info.vx_opt[i].vx_pd.obs_info->name_attr()); + shc.set_obs_var(conf_info.vx_opt[i_vx].vx_pd.obs_info->name_attr()); } // end Compute VL1L2 and VAL1L2 // Compute PCT counts and scores - if(conf_info.vx_opt[i].vx_pd.fcst_info->is_prob() && - (conf_info.vx_opt[i].output_flag[i_pct] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_pstd] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_pjc] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_prc] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_eclv] != STATOutputType::None)) { - do_pct(conf_info.vx_opt[i], pd_ptr); + if(conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + (conf_info.vx_opt[i_vx].output_flag[i_pct] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_pstd] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_pjc] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_prc] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_eclv] != STATOutputType::None)) { + do_pct(conf_info.vx_opt[i_vx], pd_ptr); } // Reset the verification masking region - shc.set_mask(conf_info.vx_opt[i].mask_name[k].c_str()); + shc.set_mask(conf_info.vx_opt[i_vx].mask_name[i_mask].c_str()); - } // end for l + } // end for i_interp // Apply HiRA ensemble verification logic - if(!conf_info.vx_opt[i].vx_pd.fcst_info->is_prob() && - conf_info.vx_opt[i].hira_info.flag && - (conf_info.vx_opt[i].output_flag[i_ecnt] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_rps] != STATOutputType::None)) { + if(!conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + conf_info.vx_opt[i_vx].hira_info.flag && + (conf_info.vx_opt[i_vx].output_flag[i_ecnt] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_rps] != STATOutputType::None)) { + + int n = conf_info.vx_opt[i_vx].vx_pd.three_to_one(i_msg_typ, i_mask, 0); - pd_ptr = &conf_info.vx_opt[i].vx_pd.pd[j][k][0]; + pd_ptr = &conf_info.vx_opt[i_vx].vx_pd.pd[n]; // Process percentile thresholds - conf_info.vx_opt[i].set_perc_thresh(pd_ptr); + conf_info.vx_opt[i_vx].set_perc_thresh(pd_ptr); // Appy HiRA verification and write ensemble output - do_hira_ens(i, pd_ptr); + do_hira_ens(i_vx, pd_ptr); } // end HiRA for ensembles // Apply HiRA probabilistic verification logic - if(!conf_info.vx_opt[i].vx_pd.fcst_info->is_prob() && - conf_info.vx_opt[i].hira_info.flag && - (conf_info.vx_opt[i].output_flag[i_mpr] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_pct] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_pstd] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_pjc] != STATOutputType::None || - conf_info.vx_opt[i].output_flag[i_prc] != STATOutputType::None)) { + if(!conf_info.vx_opt[i_vx].vx_pd.fcst_info->is_prob() && + conf_info.vx_opt[i_vx].hira_info.flag && + (conf_info.vx_opt[i_vx].output_flag[i_mpr] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_pct] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_pstd] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_pjc] != STATOutputType::None || + conf_info.vx_opt[i_vx].output_flag[i_prc] != STATOutputType::None)) { - pd_ptr = &conf_info.vx_opt[i].vx_pd.pd[j][k][0]; + int n = conf_info.vx_opt[i_vx].vx_pd.three_to_one(i_msg_typ, i_mask, 0); + + pd_ptr = &conf_info.vx_opt[i_vx].vx_pd.pd[n]; // Process percentile thresholds - conf_info.vx_opt[i].set_perc_thresh(pd_ptr); + conf_info.vx_opt[i_vx].set_perc_thresh(pd_ptr); // Apply HiRA verification and write probabilistic output - do_hira_prob(i, pd_ptr); + do_hira_prob(i_vx, pd_ptr); } // end HiRA for probabilities - } // end for k - } // end for j + } // end for i_mask + } // end for i_msg_typ - mlog << Debug(2) - << "\n" << sep_str << "\n\n"; - } // end for i + mlog << Debug(2) << "\n" << sep_str << "\n\n"; + + } // end for i_vx // Deallocate memory if(cts_info) { delete [] cts_info; cts_info = (CTSInfo *) nullptr; } @@ -1466,8 +1486,9 @@ void do_cnt_sl1l2(const PointStatVxOpt &vx_opt, const PairDataPoint *pd_ptr) { mlog << Debug(2) << "Computing Scalar Partial Sums and Continuous Statistics.\n"; - // Determine the number of climo CDF bins - n_bin = (pd_ptr->cmn_na.n_valid() > 0 && pd_ptr->csd_na.n_valid() > 0 ? + // Determine the number of observation climo CDF bins + n_bin = (pd_ptr->ocmn_na.n_valid() > 0 && + pd_ptr->ocsd_na.n_valid() > 0 ? vx_opt.get_n_cdf_bin() : 1); if(n_bin > 1) { @@ -1702,8 +1723,9 @@ void do_pct(const PointStatVxOpt &vx_opt, const PairDataPoint *pd_ptr) { mlog << Debug(2) << "Computing Probabilistic Statistics.\n"; - // Determine the number of climo CDF bins - n_bin = (pd_ptr->cmn_na.n_valid() > 0 && pd_ptr->csd_na.n_valid() > 0 ? + // Determine the number of observation climo CDF bins + n_bin = (pd_ptr->ocmn_na.n_valid() > 0 && + pd_ptr->ocsd_na.n_valid() > 0 ? vx_opt.get_n_cdf_bin() : 1); if(n_bin > 1) { @@ -1868,9 +1890,17 @@ void do_hira_ens(int i_vx, const PairDataPoint *pd_ptr) { // Check for values if(f_ens.n() == 0) continue; + // TODO: Add has_climo member function instead + // Skip points where climatology has been specified but is bad data - if(conf_info.vx_opt[i_vx].vx_pd.climo_mn_dpa.n_planes() > 0 && - is_bad_data(pd_ptr->cmn_na[j])) continue; + if((conf_info.vx_opt[i_vx].vx_pd.fcmn_dpa.n_planes() > 0 && + is_bad_data(pd_ptr->fcmn_na[j])) || + (conf_info.vx_opt[i_vx].vx_pd.ocmn_dpa.n_planes() > 0 && + is_bad_data(pd_ptr->ocmn_na[j]))) continue; + + // Store climo data + ClimoPntInfo cpi(pd_ptr->fcmn_na[j], pd_ptr->fcsd_na[j], + pd_ptr->ocmn_na[j], pd_ptr->ocsd_na[j]); // Store the observation value hira_pd.add_point_obs(pd_ptr->sid_sa[j].c_str(), @@ -1878,8 +1908,7 @@ void do_hira_ens(int i_vx, const PairDataPoint *pd_ptr) { pd_ptr->x_na[j], pd_ptr->y_na[j], pd_ptr->vld_ta[j], pd_ptr->lvl_na[j], pd_ptr->elv_na[j], pd_ptr->o_na[j], pd_ptr->o_qc_sa[j].c_str(), - pd_ptr->cmn_na[j], pd_ptr->csd_na[j], - pd_ptr->wgt_na[j]); + cpi, pd_ptr->wgt_na[j]); // Store the ensemble mean and member values hira_pd.mn_na.add(f_ens.mean()); @@ -1946,9 +1975,9 @@ void do_hira_ens(int i_vx, const PairDataPoint *pd_ptr) { // If prob_cat_thresh is empty, try to select other thresholds if(rps_info.fthresh.n() == 0) { - // Use climo data, if avaiable - if(hira_pd.cmn_na.n_valid() > 0 && - hira_pd.csd_na.n_valid() > 0 && + // Use observation climo data, if avaiable + if(hira_pd.ocmn_na.n_valid() > 0 && + hira_pd.ocsd_na.n_valid() > 0 && conf_info.vx_opt[i_vx].cdf_info.cdf_ta.n() > 0) { mlog << Debug(3) << "Resetting the empty HiRA \"" << conf_key_prob_cat_thresh << "\" thresholds to " @@ -1995,8 +2024,8 @@ void do_hira_ens(int i_vx, const PairDataPoint *pd_ptr) { void do_hira_prob(int i_vx, const PairDataPoint *pd_ptr) { PairDataPoint hira_pd; int i, j, k, lvl_blw, lvl_abv; - double f_cov, cmn_cov; - NumArray cmn_cov_na; + double f_cov, ocmn_cov; + NumArray ocmn_cov_na; SingleThresh cat_thresh; PCTInfo pct_info; @@ -2023,20 +2052,23 @@ void do_hira_prob(int i_vx, const PairDataPoint *pd_ptr) { // Initialize hira_pd.clear(); pct_info.clear(); - cmn_cov_na.erase(); + ocmn_cov_na.erase(); // Loop through matched pairs and replace the forecast value // with the HiRA fractional coverage. for(k=0; kn_obs; k++) { + // Store climo data + ClimoPntInfo cpi(pd_ptr->fcmn_na[k], pd_ptr->fcsd_na[k], + pd_ptr->ocmn_na[k], pd_ptr->ocsd_na[k]); + // Compute the fractional coverage forecast value using the // observation level value find_vert_lvl(conf_info.vx_opt[i_vx].vx_pd.fcst_dpa, pd_ptr->lvl_na[k], lvl_blw, lvl_abv); f_cov = compute_interp(conf_info.vx_opt[i_vx].vx_pd.fcst_dpa, - pd_ptr->x_na[k], pd_ptr->y_na[k], pd_ptr->o_na[k], - pd_ptr->cmn_na[k], pd_ptr->csd_na[k], + pd_ptr->x_na[k], pd_ptr->y_na[k], pd_ptr->o_na[k], &cpi, InterpMthd::Nbrhd, conf_info.vx_opt[i_vx].hira_info.width[j], conf_info.vx_opt[i_vx].hira_info.shape, grid.wrap_lon(), conf_info.vx_opt[i_vx].hira_info.vld_thresh, spfh_flag, @@ -2046,25 +2078,25 @@ void do_hira_prob(int i_vx, const PairDataPoint *pd_ptr) { // Check for bad data if(is_bad_data(f_cov)) continue; - // Compute the fractional coverage for the climatological mean - if(conf_info.vx_opt[i_vx].vx_pd.climo_mn_dpa.n_planes() > 0) { + // Compute the climatological event probability as the fractional + // coverage of the observation climatology mean field + if(conf_info.vx_opt[i_vx].vx_pd.ocmn_dpa.n_planes() > 0) { // Interpolate to the observation level - find_vert_lvl(conf_info.vx_opt[i_vx].vx_pd.climo_mn_dpa, + find_vert_lvl(conf_info.vx_opt[i_vx].vx_pd.ocmn_dpa, pd_ptr->lvl_na[k], lvl_blw, lvl_abv); - cmn_cov = compute_interp(conf_info.vx_opt[i_vx].vx_pd.climo_mn_dpa, - pd_ptr->x_na[k], pd_ptr->y_na[k], pd_ptr->o_na[k], - pd_ptr->cmn_na[k], pd_ptr->csd_na[k], - InterpMthd::Nbrhd, conf_info.vx_opt[i_vx].hira_info.width[j], - conf_info.vx_opt[i_vx].hira_info.shape, grid.wrap_lon(), - conf_info.vx_opt[i_vx].hira_info.vld_thresh, spfh_flag, - conf_info.vx_opt[i_vx].vx_pd.fcst_info->level().type(), - pd_ptr->lvl_na[k], lvl_blw, lvl_abv, &cat_thresh); + ocmn_cov = compute_interp(conf_info.vx_opt[i_vx].vx_pd.ocmn_dpa, + pd_ptr->x_na[k], pd_ptr->y_na[k], pd_ptr->o_na[k], &cpi, + InterpMthd::Nbrhd, conf_info.vx_opt[i_vx].hira_info.width[j], + conf_info.vx_opt[i_vx].hira_info.shape, grid.wrap_lon(), + conf_info.vx_opt[i_vx].hira_info.vld_thresh, spfh_flag, + conf_info.vx_opt[i_vx].vx_pd.fcst_info->level().type(), + pd_ptr->lvl_na[k], lvl_blw, lvl_abv, &cat_thresh); // Check for bad data - if(is_bad_data(cmn_cov)) continue; - else cmn_cov_na.add(cmn_cov); + if(is_bad_data(ocmn_cov)) continue; + else ocmn_cov_na.add(ocmn_cov); } // Store the fractional coverage pair @@ -2073,8 +2105,7 @@ void do_hira_prob(int i_vx, const PairDataPoint *pd_ptr) { pd_ptr->x_na[k], pd_ptr->y_na[k], pd_ptr->vld_ta[k], pd_ptr->lvl_na[k], pd_ptr->elv_na[k], f_cov, pd_ptr->o_na[k], pd_ptr->o_qc_sa[k].c_str(), - pd_ptr->cmn_na[k], pd_ptr->csd_na[k], pd_ptr->wgt_na[k]); - + cpi, pd_ptr->wgt_na[k]); } // end for k mlog << Debug(2) @@ -2103,8 +2134,8 @@ void do_hira_prob(int i_vx, const PairDataPoint *pd_ptr) { } // Compute the probabilistic counts and statistics - compute_pctinfo(hira_pd, (STATOutputType::None!=conf_info.vx_opt[i_vx].output_flag[i_pstd]), - pct_info, &cmn_cov_na); + bool pstd_flag = conf_info.vx_opt[i_vx].output_flag[i_pstd] != STATOutputType::None; + compute_pctinfo(hira_pd, pstd_flag, pct_info, &ocmn_cov_na); // Set the contents of the output threshold columns shc.set_fcst_thresh (conf_info.vx_opt[i_vx].fcat_ta[i]); diff --git a/src/tools/core/point_stat/point_stat.h b/src/tools/core/point_stat/point_stat.h index abe34d25ac..894a79559b 100644 --- a/src/tools/core/point_stat/point_stat.h +++ b/src/tools/core/point_stat/point_stat.h @@ -15,11 +15,8 @@ // Mod# Date Name Description // ---- ---- ---- ----------- // 000 11/11/08 Halley Gotway New -// 001 09/28/22 Prestopnik MET #2227 Remove namespace std and netCDF from header files +// 001 09/28/22 Prestopnik MET #2227 Remove namespace std and netCDF from header files. // -// -//////////////////////////////////////////////////////////////////////// - //////////////////////////////////////////////////////////////////////// #ifndef __POINT_STAT_H__ diff --git a/src/tools/core/point_stat/point_stat_conf_info.cc b/src/tools/core/point_stat/point_stat_conf_info.cc index 1f28857cdc..0f03bcb632 100644 --- a/src/tools/core/point_stat/point_stat_conf_info.cc +++ b/src/tools/core/point_stat/point_stat_conf_info.cc @@ -841,8 +841,8 @@ void PointStatVxOpt::process_config(GrdFileType ftype, clear(); // Allocate new VarInfo objects - vx_pd.fcst_info = info_factory.new_var_info(ftype); - vx_pd.obs_info = new VarInfoGrib; + vx_pd.set_fcst_info(info_factory.new_var_info(ftype)); + vx_pd.set_obs_info(new VarInfoGrib); // Set the VarInfo objects vx_pd.fcst_info->set_dict(fdict); @@ -1077,7 +1077,7 @@ void PointStatVxOpt::set_vx_pd(PointStatConfInfo *conf_info) { } // Define the dimensions - vx_pd.set_pd_size(n_msg_typ, n_mask, n_interp); + vx_pd.set_size(n_msg_typ, n_mask, n_interp); // Store the MPR filter threshold vx_pd.set_mpr_thresh(mpr_sa, mpr_ta); @@ -1185,20 +1185,22 @@ void PointStatVxOpt::set_perc_thresh(const PairDataPoint *pd_ptr) { // // Sort the input arrays // - NumArray fsort = pd_ptr->f_na; - NumArray osort = pd_ptr->o_na; - NumArray csort = pd_ptr->cmn_na; - fsort.sort_array(); - osort.sort_array(); - csort.sort_array(); + NumArray f_sort = pd_ptr->f_na; + NumArray o_sort = pd_ptr->o_na; + NumArray fcmn_sort = pd_ptr->fcmn_na; + NumArray ocmn_sort = pd_ptr->ocmn_na; + f_sort.sort_array(); + o_sort.sort_array(); + fcmn_sort.sort_array(); + ocmn_sort.sort_array(); // // Compute percentiles // - fcat_ta.set_perc(&fsort, &osort, &csort, &fcat_ta, &ocat_ta); - ocat_ta.set_perc(&fsort, &osort, &csort, &fcat_ta, &ocat_ta); - fcnt_ta.set_perc(&fsort, &osort, &csort, &fcnt_ta, &ocnt_ta); - ocnt_ta.set_perc(&fsort, &osort, &csort, &fcnt_ta, &ocnt_ta); + fcat_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcat_ta, &ocat_ta); + ocat_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcat_ta, &ocat_ta); + fcnt_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcnt_ta, &ocnt_ta); + ocnt_ta.set_perc(&f_sort, &o_sort, &fcmn_sort, &ocmn_sort, &fcnt_ta, &ocnt_ta); return; } diff --git a/src/tools/core/series_analysis/series_analysis.cc b/src/tools/core/series_analysis/series_analysis.cc index ebbb43e27a..9373c0e040 100644 --- a/src/tools/core/series_analysis/series_analysis.cc +++ b/src/tools/core/series_analysis/series_analysis.cc @@ -32,9 +32,10 @@ // 011 05/28/21 Halley Gotway Add MCTS HSS_EC output. // 012 01/20/22 Halley Gotway MET #2003 Add PSTD BRIERCL output. // 013 05/25/22 Halley Gotway MET #2147 Add CTS HSS_EC output. -// 014 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main -// 015 10/03/22 Presotpnik MET #2227 Remove namespace netCDF from header files -// 016 01/29/24 Halley Gotway MET #2801 Configure time difference warnings +// 014 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main. +// 015 10/03/22 Presotpnik MET #2227 Remove namespace netCDF from header files. +// 016 01/29/24 Halley Gotway MET #2801 Configure time difference warnings. +// 017 07/05/24 Halley Gotway MET #2924 Support forecast climatology. // //////////////////////////////////////////////////////////////////////// @@ -687,8 +688,8 @@ void process_scores() { const char *method_name = "process_scores() "; // Climatology mean and standard deviation - DataPlane cmn_dp, csd_dp; - bool cmn_flag, csd_flag; + DataPlane fcmn_dp, fcsd_dp; + DataPlane ocmn_dp, ocsd_dp; // Number of points skipped due to valid data threshold int n_skip_zero = 0; @@ -737,21 +738,34 @@ void process_scores() { } // Read climatology data for the current series entry - cmn_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_mean_field, false), - i_fcst, fcst_dp.valid(), grid); - csd_dp = read_climo_data_plane( - conf_info.conf.lookup_array(conf_key_climo_stdev_field, false), - i_fcst, fcst_dp.valid(), grid); - - cmn_flag = (cmn_dp.nx() == fcst_dp.nx() && cmn_dp.ny() == fcst_dp.ny()); - csd_flag = (csd_dp.nx() == fcst_dp.nx() && csd_dp.ny() == fcst_dp.ny()); + fcmn_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_fcst_climo_mean_field, false), + i_fcst, fcst_dp.valid(), grid); + fcsd_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_fcst_climo_stdev_field, false), + i_fcst, fcst_dp.valid(), grid); + ocmn_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_obs_climo_mean_field, false), + i_fcst, fcst_dp.valid(), grid); + ocsd_dp = read_climo_data_plane( + conf_info.conf.lookup_array(conf_key_obs_climo_stdev_field, false), + i_fcst, fcst_dp.valid(), grid); + + bool fcmn_flag = (fcmn_dp.nx() == fcst_dp.nx() && + fcmn_dp.ny() == fcst_dp.ny()); + bool fcsd_flag = (fcsd_dp.nx() == fcst_dp.nx() && + fcsd_dp.ny() == fcst_dp.ny()); + bool ocmn_flag = (ocmn_dp.nx() == fcst_dp.nx() && + ocmn_dp.ny() == fcst_dp.ny()); + bool ocsd_flag = (ocsd_dp.nx() == fcst_dp.nx() && + ocsd_dp.ny() == fcst_dp.ny()); mlog << Debug(3) - << "Found " << (cmn_flag ? 1 : 0) - << " climatology mean and " << (csd_flag ? 1 : 0) - << " climatology standard deviation field(s) for forecast " - << fcst_info->magic_str() << ".\n"; + << "For " << fcst_info->magic_str() << ", found " + << (fcmn_flag ? 0 : 1) << " forecast climatology mean and " + << (fcsd_flag ? 0 : 1) << " standard deviation field(s), and " + << (ocmn_flag ? 0 : 1) << " observation climatology mean and " + << (ocsd_flag ? 0 : 1) << " standard deviation field(s).\n"; // Setup the output NetCDF file on the first pass if(nc_out == (NcFile *) 0) setup_nc_file(fcst_info, obs_info); @@ -771,16 +785,21 @@ void process_scores() { DefaultTO.one_to_two(grid.nx(), grid.ny(), i_point+i, x, y); // Skip points outside the mask and bad data - if(!conf_info.mask_area(x, y) || - is_bad_data(fcst_dp(x, y)) || - is_bad_data(obs_dp(x,y)) || - (cmn_flag && is_bad_data(cmn_dp(x, y))) || - (csd_flag && is_bad_data(csd_dp(x, y)))) continue; - - pd_ptr[i].add_grid_pair(fcst_dp(x, y), obs_dp(x, y), - (cmn_flag ? cmn_dp(x, y) : bad_data_double), - (csd_flag ? csd_dp(x, y) : bad_data_double), - default_grid_weight); + if(!conf_info.mask_area(x, y) || + is_bad_data(fcst_dp(x, y)) || + is_bad_data(obs_dp(x,y)) || + (fcmn_flag && is_bad_data(fcmn_dp(x, y))) || + (fcsd_flag && is_bad_data(fcsd_dp(x, y))) || + (ocmn_flag && is_bad_data(ocmn_dp(x, y))) || + (ocsd_flag && is_bad_data(ocsd_dp(x, y)))) continue; + + // Store climo data + ClimoPntInfo cpi((fcmn_flag ? fcmn_dp(x, y) : bad_data_double), + (fcsd_flag ? fcsd_dp(x, y) : bad_data_double), + (ocmn_flag ? ocmn_dp(x, y) : bad_data_double), + (ocsd_flag ? ocsd_dp(x, y) : bad_data_double)); + + pd_ptr[i].add_grid_pair(fcst_dp(x, y), obs_dp(x, y), cpi, default_grid_weight); } // end for i @@ -860,8 +879,10 @@ void process_scores() { for(i=0; i #include #include @@ -1770,10 +1770,9 @@ void aggr_wind_lines(LineDataFile &f, STATAnalysisJob &job, // // Append the unit vectors with no climatological values // - m[key].pd_u.add_grid_pair(uf, uo, bad_data_double, - bad_data_double, default_grid_weight); - m[key].pd_v.add_grid_pair(vf, vo, bad_data_double, - bad_data_double, default_grid_weight); + ClimoPntInfo cpi; + m[key].pd_u.add_grid_pair(uf, uo, cpi, default_grid_weight); + m[key].pd_v.add_grid_pair(vf, vo, cpi, default_grid_weight); // // Keep track of the unique header column entries @@ -1797,9 +1796,11 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, VL1L2Info v_info; MPRData cur; ConcatString hdr, key; - double uf, uo, ucmn, ucsd; - double vf, vo, vcmn, vcsd; - double fcst_wind, obs_wind, cmn_wind, csd_wind; + double uf, uo, ufcmn, ufcsd, uocmn, uocsd; + double vf, vo, vfcmn, vfcsd, vocmn, vocsd; + double fcst_wind, obs_wind; + double fcmn_wind, fcsd_wind; + double ocmn_wind, ocsd_wind; bool is_ugrd; int i; map::iterator it; @@ -1819,14 +1820,18 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, parse_mpr_line(line, cur); is_ugrd = (cur.fcst_var == ugrd_abbr_str); - uf = (is_ugrd ? cur.fcst : bad_data_double); - uo = (is_ugrd ? cur.obs : bad_data_double); - ucmn = (is_ugrd ? cur.climo_mean : bad_data_double); - ucsd = (is_ugrd ? cur.climo_stdev : bad_data_double); - vf = (is_ugrd ? bad_data_double : cur.fcst); - vo = (is_ugrd ? bad_data_double : cur.obs); - vcmn = (is_ugrd ? bad_data_double : cur.climo_mean); - vcsd = (is_ugrd ? bad_data_double : cur.climo_stdev); + uf = (is_ugrd ? cur.fcst : bad_data_double); + uo = (is_ugrd ? cur.obs : bad_data_double); + ufcmn = (is_ugrd ? cur.fcst_climo_mean : bad_data_double); + ufcsd = (is_ugrd ? cur.fcst_climo_stdev : bad_data_double); + uocmn = (is_ugrd ? cur.obs_climo_mean : bad_data_double); + uocsd = (is_ugrd ? cur.obs_climo_stdev : bad_data_double); + vf = (is_ugrd ? bad_data_double : cur.fcst); + vo = (is_ugrd ? bad_data_double : cur.obs); + vfcmn = (is_ugrd ? bad_data_double : cur.fcst_climo_mean); + vfcsd = (is_ugrd ? bad_data_double : cur.fcst_climo_stdev); + vocmn = (is_ugrd ? bad_data_double : cur.obs_climo_mean); + vocsd = (is_ugrd ? bad_data_double : cur.obs_climo_stdev); // // Build header string for matching UGRD and VGRD lines @@ -1880,10 +1885,10 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, // Initialize values // aggr.hdr_sa.add(hdr); - aggr.pd_u.add_grid_pair(uf, uo, ucmn, ucsd, - default_grid_weight); - aggr.pd_v.add_grid_pair(vf, vo, vcmn, vcsd, - default_grid_weight); + ClimoPntInfo u_cpi(ufcmn, ufcsd, uocmn, uocsd); + ClimoPntInfo v_cpi(vfcmn, vfcsd, vocmn, vocsd); + aggr.pd_u.add_grid_pair(uf, uo, u_cpi, default_grid_weight); + aggr.pd_v.add_grid_pair(vf, vo, v_cpi, default_grid_weight); // // Add the new map entry @@ -1927,24 +1932,28 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, // // Update the existing values // - if(!is_bad_data(uf)) m[key].pd_u.f_na.set(i, uf); - if(!is_bad_data(uo)) m[key].pd_u.o_na.set(i, uo); - if(!is_bad_data(ucmn)) m[key].pd_u.cmn_na.set(i, ucmn); - if(!is_bad_data(ucsd)) m[key].pd_u.csd_na.set(i, ucsd); - if(!is_bad_data(vf)) m[key].pd_v.f_na.set(i, vf); - if(!is_bad_data(vo)) m[key].pd_v.o_na.set(i, vo); - if(!is_bad_data(vcmn)) m[key].pd_v.cmn_na.set(i, vcmn); - if(!is_bad_data(vcsd)) m[key].pd_v.csd_na.set(i, vcsd); + if(!is_bad_data(uf)) m[key].pd_u.f_na.set(i, uf); + if(!is_bad_data(uo)) m[key].pd_u.o_na.set(i, uo); + if(!is_bad_data(ufcmn)) m[key].pd_u.fcmn_na.set(i, ufcmn); + if(!is_bad_data(ufcsd)) m[key].pd_u.fcsd_na.set(i, ufcsd); + if(!is_bad_data(uocmn)) m[key].pd_u.ocmn_na.set(i, uocmn); + if(!is_bad_data(uocsd)) m[key].pd_u.ocsd_na.set(i, uocsd); + if(!is_bad_data(vf)) m[key].pd_v.f_na.set(i, vf); + if(!is_bad_data(vo)) m[key].pd_v.o_na.set(i, vo); + if(!is_bad_data(vfcmn)) m[key].pd_v.fcmn_na.set(i, vfcmn); + if(!is_bad_data(vfcsd)) m[key].pd_v.fcsd_na.set(i, vfcsd); + if(!is_bad_data(vocmn)) m[key].pd_v.ocmn_na.set(i, vocmn); + if(!is_bad_data(vocsd)) m[key].pd_v.ocsd_na.set(i, vocsd); } // // Add data for a new header entry // else { m[key].hdr_sa.add(hdr); - m[key].pd_u.add_grid_pair(uf, uo, ucmn, ucsd, - default_grid_weight); - m[key].pd_v.add_grid_pair(vf, vo, vcmn, vcsd, - default_grid_weight); + ClimoPntInfo u_cpi(ufcmn, ufcsd, uocmn, uocsd); + ClimoPntInfo v_cpi(vfcmn, vfcsd, vocmn, vocsd); + m[key].pd_u.add_grid_pair(uf, uo, u_cpi, default_grid_weight); + m[key].pd_v.add_grid_pair(vf, vo, v_cpi, default_grid_weight); } } @@ -2011,18 +2020,25 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, job.out_obs_wind_thresh.get_type() != thresh_na) { // Compute wind speeds - fcst_wind = convert_u_v_to_wind(it->second.pd_u.f_na[i], - it->second.pd_v.f_na[i]); - obs_wind = convert_u_v_to_wind(it->second.pd_u.o_na[i], - it->second.pd_v.o_na[i]); - cmn_wind = convert_u_v_to_wind(it->second.pd_u.cmn_na[i], - it->second.pd_v.cmn_na[i]); - csd_wind = convert_u_v_to_wind(it->second.pd_u.csd_na[i], - it->second.pd_v.csd_na[i]); + fcst_wind = convert_u_v_to_wind(it->second.pd_u.f_na[i], + it->second.pd_v.f_na[i]); + obs_wind = convert_u_v_to_wind(it->second.pd_u.o_na[i], + it->second.pd_v.o_na[i]); + fcmn_wind = convert_u_v_to_wind(it->second.pd_u.fcmn_na[i], + it->second.pd_v.fcmn_na[i]); + fcsd_wind = convert_u_v_to_wind(it->second.pd_u.fcsd_na[i], + it->second.pd_v.fcsd_na[i]); + ocmn_wind = convert_u_v_to_wind(it->second.pd_u.ocmn_na[i], + it->second.pd_v.ocmn_na[i]); + ocsd_wind = convert_u_v_to_wind(it->second.pd_u.ocsd_na[i], + it->second.pd_v.ocsd_na[i]); + + // Store climo data + ClimoPntInfo cpi(fcmn_wind, fcsd_wind, ocmn_wind, ocsd_wind); // No climo mean and standard deviation in the input VL1L2 lines, // so just fill with bad data. - if(!check_fo_thresh(fcst_wind, obs_wind, cmn_wind, csd_wind, + if(!check_fo_thresh(fcst_wind, obs_wind, cpi, job.out_fcst_wind_thresh, job.out_obs_wind_thresh, job.out_wind_logic)) { mlog << Debug(4) << "aggr_mpr_wind_lines() -> " @@ -2076,15 +2092,14 @@ void aggr_mpr_wind_lines(LineDataFile &f, STATAnalysisJob &job, // // Convert to and append unit vectors // + ClimoPntInfo cpi; aggr.hdr_sa.add(it->second.hdr_sa[i]); convert_u_v_to_unit(it->second.pd_u.f_na[i], it->second.pd_v.f_na[i], uf, vf); convert_u_v_to_unit(it->second.pd_u.o_na[i], it->second.pd_v.o_na[i], uo, vo); - aggr.pd_u.add_grid_pair(uf, uo, bad_data_double, - bad_data_double, default_grid_weight); - aggr.pd_v.add_grid_pair(vf, vo, bad_data_double, - bad_data_double, default_grid_weight); + aggr.pd_u.add_grid_pair(uf, uo, cpi, default_grid_weight); + aggr.pd_v.add_grid_pair(vf, vo, cpi, default_grid_weight); } // @@ -2151,17 +2166,21 @@ void aggr_mpr_lines(LineDataFile &f, STATAnalysisJob &job, aggr.pd.f_na.clear(); aggr.pd.o_na.clear(); - aggr.pd.cmn_na.clear(); - aggr.pd.csd_na.clear(); - aggr.pd.cdf_na.clear(); + aggr.pd.fcmn_na.clear(); + aggr.pd.fcsd_na.clear(); + aggr.pd.ocmn_na.clear(); + aggr.pd.ocsd_na.clear(); + aggr.pd.ocdf_na.clear(); aggr.pd.wgt_na.clear(); aggr.pd.n_obs = 1; aggr.pd.f_na.add(cur.fcst); aggr.pd.o_na.add(cur.obs); - aggr.pd.cmn_na.add(cur.climo_mean); - aggr.pd.csd_na.add(cur.climo_stdev); - aggr.pd.cdf_na.add(cur.climo_cdf); + aggr.pd.fcmn_na.add(cur.fcst_climo_mean); + aggr.pd.fcsd_na.add(cur.fcst_climo_stdev); + aggr.pd.ocmn_na.add(cur.obs_climo_mean); + aggr.pd.ocsd_na.add(cur.obs_climo_stdev); + aggr.pd.ocdf_na.add(cur.obs_climo_cdf); aggr.pd.wgt_na.add(default_grid_weight); aggr.fcst_var = cur.fcst_var; @@ -2188,9 +2207,11 @@ void aggr_mpr_lines(LineDataFile &f, STATAnalysisJob &job, m[key].pd.n_obs++; m[key].pd.f_na.add(cur.fcst); m[key].pd.o_na.add(cur.obs); - m[key].pd.cmn_na.add(cur.climo_mean); - m[key].pd.csd_na.add(cur.climo_stdev); - m[key].pd.cdf_na.add(cur.climo_cdf); + m[key].pd.fcmn_na.add(cur.fcst_climo_mean); + m[key].pd.fcsd_na.add(cur.fcst_climo_stdev); + m[key].pd.ocmn_na.add(cur.obs_climo_mean); + m[key].pd.ocsd_na.add(cur.obs_climo_stdev); + m[key].pd.ocdf_na.add(cur.obs_climo_cdf); m[key].pd.wgt_na.add(default_grid_weight); // @@ -3086,8 +3107,9 @@ void aggr_orank_lines(LineDataFile &f, STATAnalysisJob &job, // ensemble spread, ensemble member values, and // valid ensemble count // - m[key].ens_pd.add_grid_obs(cur.obs, cur.climo_mean, - cur.climo_stdev, default_grid_weight); + ClimoPntInfo cpi(cur.fcst_climo_mean, cur.fcst_climo_stdev, + cur.obs_climo_mean, cur.obs_climo_stdev); + m[key].ens_pd.add_grid_obs(cur.obs, cpi, default_grid_weight); m[key].ens_pd.skip_ba.add(false); m[key].ens_pd.n_pair++; m[key].ens_pd.r_na.add(cur.rank); @@ -3112,7 +3134,7 @@ void aggr_orank_lines(LineDataFile &f, STATAnalysisJob &job, // Derive ensemble from climo mean and standard deviation derive_climo_vals(&m[key].cdf_info, - cur.climo_mean, cur.climo_stdev, climo_vals); + cur.obs_climo_mean, cur.obs_climo_stdev, climo_vals); // Store empirical CRPS stats and CRPS-Fair double crps_emp = compute_crps_emp(cur.obs, cur.ens_na); @@ -3123,7 +3145,7 @@ void aggr_orank_lines(LineDataFile &f, STATAnalysisJob &job, // Store Gaussian CRPS stats m[key].ens_pd.crps_gaus_na.add(compute_crps_gaus(cur.obs, cur.ens_mean, cur.spread)); - m[key].ens_pd.crpscl_gaus_na.add(compute_crps_gaus(cur.obs, cur.climo_mean, cur.climo_stdev)); + m[key].ens_pd.crpscl_gaus_na.add(compute_crps_gaus(cur.obs, cur.obs_climo_mean, cur.obs_climo_stdev)); m[key].ens_pd.ign_na.add(compute_ens_ign(cur.obs, cur.ens_mean, cur.spread)); m[key].ens_pd.pit_na.add(compute_ens_pit(cur.obs, cur.ens_mean, cur.spread)); @@ -3783,8 +3805,9 @@ void mpr_to_ctc(STATAnalysisJob &job, const AggrMPRInfo &info, // Populate the contingency table // for(i=0; isecond.pd.f_na, it->second.pd.o_na, it->second.pd.cmn_na); + job.set_perc_thresh(it->second.pd.f_na, it->second.pd.o_na, + it->second.pd.fcmn_na, it->second.pd.ocmn_na); // // Prepare the output STAT header columns diff --git a/src/tools/core/wavelet_stat/wavelet_stat_conf_info.cc b/src/tools/core/wavelet_stat/wavelet_stat_conf_info.cc index e2dc49fb9f..2c51c68857 100644 --- a/src/tools/core/wavelet_stat/wavelet_stat_conf_info.cc +++ b/src/tools/core/wavelet_stat/wavelet_stat_conf_info.cc @@ -449,8 +449,8 @@ void WaveletStatConfInfo::set_perc_thresh(const DataPlane &f_dp, // // Compute percentiles // - fcat_ta->set_perc(&fsort, &osort, (NumArray *) 0, fcat_ta, ocat_ta); - ocat_ta->set_perc(&fsort, &osort, (NumArray *) 0, fcat_ta, ocat_ta); + fcat_ta->set_perc(&fsort, &osort, nullptr, nullptr, fcat_ta, ocat_ta); + ocat_ta->set_perc(&fsort, &osort, nullptr, nullptr, fcat_ta, ocat_ta); return; } diff --git a/src/tools/dev_utils/met_nc_file.cc b/src/tools/dev_utils/met_nc_file.cc index 3579c52f8e..5b293ec211 100644 --- a/src/tools/dev_utils/met_nc_file.cc +++ b/src/tools/dev_utils/met_nc_file.cc @@ -127,7 +127,7 @@ bool MetNcFile::readFile(const int desired_grib_code, IS_INVALID_NC(obsArrVar)) { mlog << Error << "\nmain() -> " - << "trouble reading netCDF file " << _filePath << "\n\n"; + << "trouble reading netCDF file " << _filePath << "\n\n"; return false; } diff --git a/src/tools/other/ascii2nc/airnow_locations.cc b/src/tools/other/ascii2nc/airnow_locations.cc index a7908c7a7b..e8d09647b8 100644 --- a/src/tools/other/ascii2nc/airnow_locations.cc +++ b/src/tools/other/ascii2nc/airnow_locations.cc @@ -55,8 +55,8 @@ bool AirnowLocations::initialize(const string &fileName) LineDataFile locFile; if (!locFile.open(monitoringSiteFileName.c_str())) { mlog << Warning << "\n" << method_name << " -> " - << "can't open input ASCII file \"" << monitoringSiteFileName - << "\" for reading\n\n"; + << "can't open input ASCII file \"" << monitoringSiteFileName + << "\" for reading\n\n"; return false; } DataLine data_line; @@ -82,7 +82,7 @@ bool AirnowLocations::initialize(const string &fileName) if (!status) { return false; } - + // this is inefficient, but will work int bad_line_count = 0; while (locFile >> data_line) { @@ -126,12 +126,12 @@ bool AirnowLocations::initialize(const string &fileName) #ifdef DEBUGGING for (size_t i=0; i" - << "can't open input ASCII file \"" << fileName - << "\" for reading\n\n"; + << "can't open input ASCII file \"" << fileName + << "\" for reading\n\n"; return false; } DataLine data_line; @@ -77,7 +77,7 @@ bool NdbcLocations::initialize(const string &fName) string latKey = "lat="; string lonKey = "lon="; string elevKey = "elev="; - + while (locFile >> data_line) { string sline = data_line.get_line(); string stationId; @@ -88,14 +88,14 @@ bool NdbcLocations::initialize(const string &fName) } if (!_parseLineForDouble(sline, latKey, lat)) { mlog << Warning << method_name << "-> " - << "parsing out lat from line '" << sline << "'\n" - << "in file \"" << fileName << "\n\n"; + << "parsing out lat from line '" << sline << "'\n" + << "in file \"" << fileName << "\n\n"; continue; } if (!_parseLineForDouble(sline, lonKey, lon)) { mlog << Warning << method_name << "-> " - << "parsing out lon from line '" << sline << "'\n" - << "in file \"" << fileName << "\n\n"; + << "parsing out lon from line '" << sline << "'\n" + << "in file \"" << fileName << "\n\n"; continue; } if (!_parseLineForDouble(sline, elevKey, elev)) { diff --git a/src/tools/other/gen_ens_prod/gen_ens_prod.cc b/src/tools/other/gen_ens_prod/gen_ens_prod.cc index 46ad8b66df..368fceab6f 100644 --- a/src/tools/other/gen_ens_prod/gen_ens_prod.cc +++ b/src/tools/other/gen_ens_prod/gen_ens_prod.cc @@ -713,6 +713,9 @@ void track_counts(GenEnsProdVarInfo *ens_info, const DataPlane &ens_dp, bool is_ cmn = (cmn_dp.is_empty() ? bad_data_double : cmn_dp.data()[i]); csd = (csd_dp.is_empty() ? bad_data_double : csd_dp.data()[i]); + // MET #2924 Use the same data for the forecast and observation climatologies + ClimoPntInfo cpi(cmn, csd, cmn, csd); + // Skip bad data values if(is_bad_data(ens)) continue; @@ -738,7 +741,7 @@ void track_counts(GenEnsProdVarInfo *ens_info, const DataPlane &ens_dp, bool is_ // Event frequency for(j=0; j::iterator it = simp.begin(); it != simp.end(); it++) { - if(it->ptype() == perc_thresh_climo_dist && + if(it->ptype() == perc_thresh_fcst_climo_dist && !is_eq(it->pvalue(), 0.0) && !is_eq(it->pvalue(), 100.0)) { - snprintf(type_str, sizeof(type_str), "CLIMO_CDP%i", + snprintf(type_str, sizeof(type_str), "CLIMO_FCDP%i", + nint(it->pvalue())); + cdp_dp = normal_cdf_inv(it->pvalue()/100.0, cmn_dp, csd_dp); + write_ens_data_plane(ens_info, cdp_dp, ens_dp, type_str, + "Forecast climatology distribution percentile"); + } + else if(it->ptype() == perc_thresh_obs_climo_dist && + !is_eq(it->pvalue(), 0.0) && + !is_eq(it->pvalue(), 100.0)) { + snprintf(type_str, sizeof(type_str), "CLIMO_OCDP%i", nint(it->pvalue())); cdp_dp = normal_cdf_inv(it->pvalue()/100.0, cmn_dp, csd_dp); - write_ens_data_plane(ens_info, cdp_dp, ens_dp, - type_str, - "Climatology distribution percentile"); + write_ens_data_plane(ens_info, cdp_dp, ens_dp, type_str, + "Observation climatology distribution percentile"); } } // end for it } diff --git a/src/tools/other/gen_vx_mask/gen_vx_mask.cc b/src/tools/other/gen_vx_mask/gen_vx_mask.cc index e4c14b557b..7612f0d7d2 100644 --- a/src/tools/other/gen_vx_mask/gen_vx_mask.cc +++ b/src/tools/other/gen_vx_mask/gen_vx_mask.cc @@ -1081,7 +1081,8 @@ void apply_data_mask(DataPlane &dp) { for(int i=0; i #include #include @@ -51,7 +50,6 @@ using namespace std; using namespace netCDF; - //////////////////////////////////////////////////////////////////////// static void process_command_line(int, char **); @@ -431,9 +429,9 @@ void setup_histograms(void) { bin_max.clear(); bin_mid.clear(); for(int k=0; kmagic_str_attr() - << " histogram with " << n_bins << " bins from " - << min << " to " << max << ".\n"; - + << "Initializing " << data_info->magic_str_attr() + << " histogram with " << n_bins << " bins from " + << min << " to " << max << ".\n"; + histograms[i_var_str] = vector(); init_pdf(n_bins, histograms[i_var_str]); // Keep track of unique output variable names if(nc_var_sa.has( data_info->magic_str_attr() )) unique_variable_names = false; nc_var_sa.add(data_info->magic_str_attr()); - + } // for i_var } @@ -462,7 +460,7 @@ void setup_histograms(void) { void setup_joint_histograms(void) { ConcatString i_var_str, j_var_str, ij_var_str; - for(int i_var=0; i_varn_bins(); @@ -487,7 +485,7 @@ void setup_joint_histograms(void) { init_joint_pdf(n_bins, n_joint_bins, joint_histograms[ij_var_str]); } // end for j_var - } // end for i_var + } // end for i_var } //////////////////////////////////////////////////////////////////////// diff --git a/src/tools/other/gsi_tools/gsid2mpr.cc b/src/tools/other/gsi_tools/gsid2mpr.cc index 579d41bd19..e5a770f8e3 100644 --- a/src/tools/other/gsi_tools/gsid2mpr.cc +++ b/src/tools/other/gsi_tools/gsid2mpr.cc @@ -16,12 +16,12 @@ // ---- ---- ---- ----------- // 000 06/09/15 Bullock New // 001 01/26/16 Halley Gotway Add -no_check_dup option. -// 002 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main -// 003 10/03/22 Prestopnik MET #2227 Remove using namespace std from header files +// 002 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main. +// 003 10/03/22 Prestopnik MET #2227 Remove using namespace std from header files. +// 004 07/17/24 Halley Gotway MET #2924 Support forecast climatology. // //////////////////////////////////////////////////////////////////////// - #include #include #include @@ -389,9 +389,13 @@ void write_mpr_row_conv(AsciiTable &at, int row, const ConvData &d) { at.set_entry(row, col++, d.guess); // FCST at.set_entry(row, col++, d.obs); // OBS at.set_entry(row, col++, d.obs_qc[0]); // OBS_QC - at.set_entry(row, col++, na_str); // CLIMO_MEAN - at.set_entry(row, col++, na_str); // CLIMO_STDEV - at.set_entry(row, col++, na_str); // CLIMO_CDF + at.set_entry(row, col++, na_str); // OBS_CLIMO_MEAN + at.set_entry(row, col++, na_str); // OBS_CLIMO_STDEV + at.set_entry(row, col++, na_str); // OBS_CLIMO_CDF + /* MET #2924 Uncomment this section + at.set_entry(row, col++, na_str); // FCST_CLIMO_MEAN + at.set_entry(row, col++, na_str); // FCST_CLIMO_STDEV + */ // Write extra columns at.set_entry(row, col++, d.prs); // OBS_PRS @@ -443,9 +447,13 @@ void write_mpr_row_rad(AsciiTable &at, int row, const RadData & d) { at.set_entry(row, col++, d.guess); // FCST at.set_entry(row, col++, d.obs); // OBS at.set_entry(row, col++, d.obs_qc[0]); // OBS_QC - at.set_entry(row, col++, na_str); // CLIMO_MEAN - at.set_entry(row, col++, na_str); // CLIMO_STDEV - at.set_entry(row, col++, na_str); // CLIMO_CDF + at.set_entry(row, col++, na_str); // OBS_CLIMO_MEAN + at.set_entry(row, col++, na_str); // OBS_CLIMO_STDEV + at.set_entry(row, col++, na_str); // OBS_CLIMO_CDF + /* MET #2924 Uncomment this section + at.set_entry(row, col++, na_str); // FCST_CLIMO_MEAN + at.set_entry(row, col++, na_str); // FCST_CLIMO_STDEV + */ // Write extra columns at.set_entry(row, col++, d.use); // CHAN_USE diff --git a/src/tools/other/gsi_tools/gsidens2orank.cc b/src/tools/other/gsi_tools/gsidens2orank.cc index 0c75717fa8..fed035bd69 100644 --- a/src/tools/other/gsi_tools/gsidens2orank.cc +++ b/src/tools/other/gsi_tools/gsidens2orank.cc @@ -15,8 +15,9 @@ // Mod# Date Name Description // ---- ---- ---- ----------- // 000 07/09/15 Halley Gotway New -// 001 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main -// 002 10/03/22 Prestopnik MET #2227 Remove namespace std from header files +// 001 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main. +// 002 10/03/22 Prestopnik MET #2227 Remove namespace std from header files. +// 003 07/17/24 Halley Gotway MET #2924 Support forecast climatology. // //////////////////////////////////////////////////////////////////////// @@ -266,10 +267,11 @@ void process_conv_data(ConvData &d, int i_mem) { conv_data.push_back(d); // Store the current observation info + ClimoPntInfo cpi(bad_data_double, bad_data_double, + bad_data_double, bad_data_double); ens_pd.add_point_obs(d.sid.c_str(), d.lat, d.lon, bad_data_double, bad_data_double, d.obs_ut, d.prs, - d.elv, d.obs, na_str, bad_data_double, bad_data_double, - default_grid_weight); + d.elv, d.obs, na_str, cpi, default_grid_weight); // Initialize ensemble members and mean to bad data for(i=0; in_use); // N_USE @@ -696,12 +704,17 @@ void write_orank_row_rad(AsciiTable &at, int row, int i_obs) { } at.set_entry(row, col++, cs); // OBS_QC at.set_entry(row, col++, ens_pd.mn_na[i_obs]); // ENS_MEAN - at.set_entry(row, col++, bad_data_double); // CLIMO - at.set_entry(row, col++, square_root(ens_pd.var_na[i_obs])); // ENS_SPREAD + at.set_entry(row, col++, bad_data_double); // OBS_CLIMO_MEAN + at.set_entry(row, col++, square_root(ens_pd.var_na[i_obs])); // SPREAD at.set_entry(row, col++, bad_data_double); // ENS_MEAN_OERR at.set_entry(row, col++, bad_data_double); // SPREAD_OERR at.set_entry(row, col++, bad_data_double); // SPREAD_PLUS_OERR + at.set_entry(row, col++, bad_data_double); // OBS_CLIMO_STDEV + /* MET #2924 Uncomment this section + at.set_entry(row, col++, bad_data_double); // FCST_CLIMO_MEAN + at.set_entry(row, col++, bad_data_double); // FCST_CLIMO_STDEV + */ // Write extra columns at.set_entry(row, col++, d->n_use); // N_USE diff --git a/src/tools/other/madis2nc/madis2nc_conf_info.cc b/src/tools/other/madis2nc/madis2nc_conf_info.cc index c4b864ab13..953aa296f1 100644 --- a/src/tools/other/madis2nc/madis2nc_conf_info.cc +++ b/src/tools/other/madis2nc/madis2nc_conf_info.cc @@ -59,7 +59,7 @@ void Madis2NcConfInfo::clear() //////////////////////////////////////////////////////////////////////// void Madis2NcConfInfo::read_config(const string &default_filename, - const string &user_filename) + const string &user_filename) { // Read the config file constants diff --git a/src/tools/other/modis_regrid/data_plane_to_netcdf.cc b/src/tools/other/modis_regrid/data_plane_to_netcdf.cc index e13719a321..819a67885b 100644 --- a/src/tools/other/modis_regrid/data_plane_to_netcdf.cc +++ b/src/tools/other/modis_regrid/data_plane_to_netcdf.cc @@ -79,57 +79,57 @@ void write_grid_to_netcdf(const DataPlane & plane, const Grid & grid, const char NcDim lat_dim ; NcDim lon_dim ; NcVar f_var ; - - + + // Create a new NetCDF file and open it f_out = open_ncfile(out_filename, true); - + if(IS_INVALID_NC_P(f_out)) { mlog << Error << "\nwrite_netcdf() -> " - << "trouble opening output file " << out_filename - << "\n\n"; + << "trouble opening output file " << out_filename + << "\n\n"; delete f_out; f_out = (NcFile *) nullptr; exit(1); } - + // Add global attributes const char * program_name = "data_plane_to_netcdf"; write_netcdf_global(f_out, out_filename, program_name); - + // Add the projection information write_netcdf_proj(f_out, grid, lat_dim, lon_dim); // Add the lat/lon variables write_netcdf_latlon(f_out, &lat_dim, &lon_dim, grid); - + int deflate_level = get_compress(); //if (deflate_level < 0) deflate_level = 0; - + // Define variable f_var = add_var(f_out, (string)var_info.name(), ncFloat, lat_dim, lon_dim, deflate_level); - + // Add variable attributes add_att(&f_var, "name", (string)var_info.name()); add_att(&f_var, "units", (string)var_info.units()); add_att(&f_var, "long_name", (string)var_info.long_name()); add_att(&f_var, "_FillValue", bad_data_float); - + // Write out the times write_netcdf_var_times(&f_var, plane); - + // Write the data if (!put_nc_data_with_dims(&f_var, plane.data(), plane.ny(), plane.nx())) { mlog << Error << "\nwrite_netcdf() -> " - << "error with f_var->put()\n\n"; + << "error with f_var->put()\n\n"; exit(1); } - + // Close and clean up delete f_out; f_out = (NcFile *) nullptr; - + return; } diff --git a/src/tools/tc_utils/tc_diag/python_tc_diag.cc b/src/tools/tc_utils/tc_diag/python_tc_diag.cc index 991295370f..f7791d8bcf 100644 --- a/src/tools/tc_utils/tc_diag/python_tc_diag.cc +++ b/src/tools/tc_utils/tc_diag/python_tc_diag.cc @@ -306,7 +306,7 @@ bool parse_python_diag_data(PyObject *module_obj, // Get the diag_data item PyObject *data_obj = PyDict_GetItem(module_dict_obj, - PyUnicode_FromString(diag_data_dict_name)); + PyUnicode_FromString(diag_data_dict_name)); if(!data_obj || !PyDict_Check(data_obj)) { mlog << Warning << "\n" << method_name @@ -384,7 +384,7 @@ bool parse_python_string_value_map(PyObject *dict, long pos; PyObject *data_obj = PyDict_GetItem(dict, - PyUnicode_FromString(name)); + PyUnicode_FromString(name)); if(!data_obj || !PyDict_Check(data_obj)) { mlog << Warning << "\n" << method_name