diff --git a/data/table_files/met_header_columns_V12.0.txt b/data/table_files/met_header_columns_V12.0.txt index 7ed1e5a187..299e6cb4d6 100644 --- a/data/table_files/met_header_columns_V12.0.txt +++ b/data/table_files/met_header_columns_V12.0.txt @@ -19,7 +19,7 @@ V12.0 : STAT : PJC : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID V12.0 : STAT : PRC : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL (N_THRESH) THRESH_[0-9]* PODY_[0-9]* POFD_[0-9]* V12.0 : STAT : PSTD : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL (N_THRESH) BASER BASER_NCL BASER_NCU RELIABILITY RESOLUTION UNCERTAINTY ROC_AUC BRIER BRIER_NCL BRIER_NCU BRIERCL BRIERCL_NCL BRIERCL_NCU BSS BSS_SMPL THRESH_[0-9]* V12.0 : STAT : ECLV : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL BASER VALUE_BASER (N_PTS) CL_[0-9]* VALUE_[0-9]* -V12.0 : STAT : ECNT : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL N_ENS CRPS CRPSS IGN ME RMSE SPREAD ME_OERR RMSE_OERR SPREAD_OERR SPREAD_PLUS_OERR CRPSCL CRPS_EMP CRPSCL_EMP CRPSS_EMP CRPS_EMP_FAIR SPREAD_MD MAE MAE_OERR BIAS_RATIO N_GE_OBS ME_GE_OBS N_LT_OBS ME_LT_OBS +V12.0 : STAT : ECNT : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL N_ENS CRPS CRPSS IGN ME RMSE SPREAD ME_OERR RMSE_OERR SPREAD_OERR SPREAD_PLUS_OERR CRPSCL CRPS_EMP CRPSCL_EMP CRPSS_EMP CRPS_EMP_FAIR SPREAD_MD MAE MAE_OERR BIAS_RATIO N_GE_OBS ME_GE_OBS N_LT_OBS ME_LT_OBS IGN_CONV_OERR IGN_CORR_OERR V12.0 : STAT : RPS : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL N_PROB RPS_REL RPS_RES RPS_UNC RPS RPSS RPSS_SMPL RPS_COMP V12.0 : STAT : RHIST : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL (N_RANK) RANK_[0-9]* V12.0 : STAT : PHIST : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL BIN_SIZE (N_BIN) BIN_[0-9]* diff --git a/docs/Users_Guide/appendixC.rst b/docs/Users_Guide/appendixC.rst index 7c5c618a43..037bd93455 100644 --- a/docs/Users_Guide/appendixC.rst +++ b/docs/Users_Guide/appendixC.rst @@ -401,7 +401,7 @@ SEEPS scores are expected to lie between 0 and 1, with a perfect forecast having MET Verification Measures for Continuous Variables ================================================== -For continuous variables, many verification measures are based on the forecast error (i.e., **f - o**). However, it also is of interest to investigate characteristics of the forecasts, and the observations, as well as their relationship. These concepts are consistent with the general framework for verification outlined by :ref:`Murphy and Winkler, (1987) `. The statistics produced by MET for continuous forecasts represent this philosophy of verification, which focuses on a variety of aspects of performance rather than a single measure. +For continuous variables, many verification measures are based on the forecast error (i.e., **f - o**). However, it also is of interest to investigate characteristics of the forecasts, and the observations, as well as their relationship. These concepts are consistent with the general framework for verification outlined by :ref:`Murphy and Winkler, 1987 `. The statistics produced by MET for continuous forecasts represent this philosophy of verification, which focuses on a variety of aspects of performance rather than a single measure. The verification measures currently evaluated by the Point-Stat tool are defined and described in the subsections below. In these definitions, **f** represents the forecasts, **o** represents the observation, and **n** is the number of forecast-observation pairs. @@ -894,7 +894,7 @@ Calibration Called "CALIBRATION" in PJC output :numref:`table_PS_format_info_PJC` -Calibration is the conditional probability of an event given each probability forecast category (i.e. each row in the **nx2** contingency table). This set of measures is paired with refinement in the calibration-refinement factorization discussed in :ref:`Wilks, (2011) `. A well-calibrated forecast will have calibration values that are near the forecast probability. For example, a 50% probability of precipitation should ideally have a calibration value of 0.5. If the calibration value is higher, then the probability has been underestimated, and vice versa. +Calibration is the conditional probability of an event given each probability forecast category (i.e. each row in the **nx2** contingency table). This set of measures is paired with refinement in the calibration-refinement factorization discussed in :ref:`Wilks, 2011 `. A well-calibrated forecast will have calibration values that are near the forecast probability. For example, a 50% probability of precipitation should ideally have a calibration value of 0.5. If the calibration value is higher, then the probability has been underestimated, and vice versa. .. math:: \text{Calibration}(i) = \frac{n_{i1}}{n_{1.}} = \text{probability}(o_1|p_i) @@ -903,7 +903,7 @@ Refinement Called "REFINEMENT" in PJC output :numref:`table_PS_format_info_PJC` -The relative frequency associated with each forecast probability, sometimes called the marginal distribution or row probability. This measure ignores the event outcome, and simply provides information about the frequency of forecasts for each probability category. This set of measures is paired with the calibration measures in the calibration-refinement factorization discussed by :ref:`Wilks (2011) `. +The relative frequency associated with each forecast probability, sometimes called the marginal distribution or row probability. This measure ignores the event outcome, and simply provides information about the frequency of forecasts for each probability category. This set of measures is paired with the calibration measures in the calibration-refinement factorization discussed by :ref:`Wilks, 2011 `. .. math:: \text{Refinement}(i) = \frac{n_{i.}}{T} = \text{probability}(p_i) @@ -912,7 +912,7 @@ Likelihood Called "LIKELIHOOD" in PJC output :numref:`table_PS_format_info_PJC` -Likelihood is the conditional probability for each forecast category (row) given an event and a component of the likelihood-base rate factorization; see :ref:`Wilks (2011) ` for details. This set of measures considers the distribution of forecasts for only the cases when events occur. Thus, as the forecast probability increases, so should the likelihood. For example, 10% probability of precipitation forecasts should have a much smaller likelihood value than 90% probability of precipitation forecasts. +Likelihood is the conditional probability for each forecast category (row) given an event and a component of the likelihood-base rate factorization; see :ref:`Wilks, 2011 ` for details. This set of measures considers the distribution of forecasts for only the cases when events occur. Thus, as the forecast probability increases, so should the likelihood. For example, 10% probability of precipitation forecasts should have a much smaller likelihood value than 90% probability of precipitation forecasts. .. math:: \text{Likelihood}(i) = \frac{n_{i1}}{n_{.1}} = \text{probability}(p_i|o_1) @@ -923,7 +923,7 @@ Base Rate Called "BASER" in PJC output :numref:`table_PS_format_info_PJC` -This is the probability of an event for each forecast category :math:`p_i` (row), i.e. the conditional base rate. This set of measures is paired with likelihood in the likelihood-base rate factorization, see :ref:`Wilks (2011) ` for further information. This measure is calculated for each row of the contingency table. Ideally, the event should become more frequent as the probability forecast increases. +This is the probability of an event for each forecast category :math:`p_i` (row), i.e. the conditional base rate. This set of measures is paired with likelihood in the likelihood-base rate factorization, see :ref:`Wilks, 2011 ` for further information. This measure is calculated for each row of the contingency table. Ideally, the event should become more frequent as the probability forecast increases. .. math:: \text{Base Rate}(i) = \frac{n_{i1}}{n_{i.}} = \text{probability}(o_{i1}) @@ -976,18 +976,18 @@ RPS Called "RPS" in RPS output :numref:`table_ES_header_info_es_out_ECNT` -While the above probabilistic verification measures utilize dichotomous observations, the Ranked Probability Score (RPS, :ref:`Epstein, 1969 `, :ref:`Murphy, 1969 `) is the only probabilistic verification measure for discrete multiple-category events available in MET. It is assumed that the categories are ordinal as nominal categorical variables can be collapsed into sequences of binary predictands, which can in turn be evaluated with the above measures for dichotomous variables (:ref:`Wilks, 2011 `). The RPS is the multi-category extension of the Brier score (:ref:`Tödter and Ahrens, 2012`), and is a proper score (:ref:`Mason, 2008`). +While the above probabilistic verification measures utilize dichotomous observations, the Ranked Probability Score (RPS, :ref:`Epstein, 1969 `, :ref:`Murphy, 1969 `) is the only probabilistic verification measure for discrete multiple-category events available in MET. It is assumed that the categories are ordinal as nominal categorical variables can be collapsed into sequences of binary predictands, which can in turn be evaluated with the above measures for dichotomous variables (:ref:`Wilks, 2011 `). The RPS is the multi-category extension of the Brier score (:ref:`Tödter and Ahrens, 2012 `), and is a proper score (:ref:`Mason, 2008 `). Let :math:`\text{J}` be the number of categories, then both the forecast, :math:`\text{f} = (f_1,…,f_J)`, and observation, :math:`\text{o} = (o_1,…,o_J)`, are length-:math:`\text{J}` vectors, where the components of :math:`\text{f}` include the probabilities forecast for each category :math:`\text{1,…,J}` and :math:`\text{o}` contains 1 in the category that is realized and zero everywhere else. The cumulative forecasts, :math:`F_m`, and observations, :math:`O_m`, are defined to be: :math:`F_m = \sum_{j=1}^m (f_j)` and :math:`O_m = \sum_{j=1}^m (o_j), m = 1,…,J`. -To clarify, :math:`F_1 = f_1` is the first component of :math:`F_m`, :math:`F_2 = f_1+f_2`, etc., and :math:`F_J = 1`. Similarly, if :math:`o_j = 1` and :math:`i < j`, then :math:`O_i = 0` and when :math:`i >= j`, :math:`O_i = 1`, and of course, :math:`O_J = 1`. Finally, the RPS is defined to be: +To clarify, :math:`F_1 = f_1` is the first component of :math:`F_m`, :math:`F_2 = f_1+f_2`, etc., and :math:`F_J = 1`. Similarly, if :math:`o_j = 1` and :math:`i < j`, then :math:`O_i = 0` and when :math:`i >= j`, :math:`O_i = 1`, and of course, :math:`O_J = 1`. Finally, the RPS is defined to be: .. math:: \text{RPS} = \sum_{m=1}^J (F_m - O_m)^2 = \sum_{m=1}^J BS_m, -where :math:`BS_m` is the Brier score for the m-th category (:ref:`Tödter and Ahrens, 2012`). Subsequently, the RPS lends itself to a decomposition into reliability, resolution and uncertainty components, noting that each component is aggregated over the different categories; these are written to the columns named "RPS_REL", "RPS_RES" and "RPS_UNC" in RPS output :numref:`table_ES_header_info_es_out_ECNT`. +where :math:`BS_m` is the Brier score for the m-th category (:ref:`Tödter and Ahrens, 2012 `). Subsequently, the RPS lends itself to a decomposition into reliability, resolution and uncertainty components, noting that each component is aggregated over the different categories; these are written to the columns named "RPS_REL", "RPS_RES" and "RPS_UNC" in RPS output :numref:`table_ES_header_info_es_out_ECNT`. CRPS ---- @@ -1066,7 +1066,7 @@ The continuous ranked probability skill score (CRPSS) is similar to the MSESS an .. math:: \text{CRPSS} = 1 - \frac{\text{CRPS}_{fcst}}{ \text{CRPS}_{ref}} -For the normal distribution fit (CRPSS), the reference CRPS is computed using the climatological mean and standard deviation. For the empirical distribution (CRPSS_EMP), the reference CRPS is computed by sampling from the assumed normal climatological distribution defined by the mean and standard deviation. +For the normal distribution fit (CRPSS), the reference CRPS is computed using the climatological mean and standard deviation. For the empirical distribution (CRPSS_EMP), the reference CRPS is computed by sampling from the assumed normal climatological distribution defined by the mean and standard deviation. Bias Ratio ---------- @@ -1105,6 +1105,37 @@ Called "PIT" in ORANK output :numref:`table_ES_header_info_es_out_ORANK` The probability integral transform (PIT) is the analog of the rank histogram for a probability distribution forecast (:ref:`Dawid, 1984 `). Its interpretation is the same as that of the verification rank histogram: Calibrated probabilistic forecasts yield PIT histograms that are flat, or uniform. Under-dispersed (not enough spread in the ensemble) forecasts have U-shaped PIT histograms while over-dispersed forecasts have bell-shaped histograms. In MET, the PIT calculation uses a normal distribution fit to the ensemble forecasts. In many cases, use of other distributions would be better. +Observation Error Logarithmic Scoring Rules +------------------------------------------- + +Called "IGN_CONV_OERR" and "IGN_CORR_OERR" in ECNT output :numref:`table_ES_header_info_es_out_ECNT` + +One approach that is used to take observation error into account in a summary measure is to add error to the forecast by a convolution with the observation model (e.g., :ref:`Anderson, 1996 `; :ref:`Hamill, 2001 `; :ref:`Saetra et. al., 2004 `; :ref:`Bröcker and Smith, 2007 `; :ref:`Candille et al., 2007 `; :ref:`Candille and Talagrand, 2008 `; :ref:`Röpnack et al., 2013 `). Specifically, suppose :math:`y=x+w`, where :math:`y` is the observed value, :math:`x` is the true value, and :math:`w` is the error. Then, if :math:`f` is the density forecast for :math:`x` and :math:`\nu` is the observation model, then the implied density forecast for :math:`y` is given by the convolution: + +.. math:: (f*\nu)(y) = \int\nu(y|x)f(x)dx + +:ref:`Ferro, 2017 ` gives the error-convolved version of the ignorance scoring rule (referred to therein as the error-convolved logarithmic scoring rule), which is proper under the model where :math:`w\sim N(0,c^2)`) when the forecast for :math:`x` is :math:`N(\mu,\sigma^2)` with density function :math:`f`, by + +.. only:: latex + + .. math:: \text{IGN\_CONV\_OERR} = s(f,y) = \frac{1}{2}\log(2 \pi (\sigma^2 + c^2)) + \frac{(y - \mu)^2}{2 (\sigma^2 + c^2)} + +.. only:: html + + .. math:: \text{IGN_CONV_OERR} = s(f,y) = \frac{1}{2}\log(2 \pi (\sigma^2 + c^2)) + \frac{(y - \mu)^2}{2 (\sigma^2 + c^2)} + +Another approach to incorporation of observation uncertainty into a measure is the error-correction approach. The approach merely ensures that the scoring rule, :math:`s`, is unbiased for a scoring rule :math:`s_0` if they have the same expected value. :ref:`Ferro, 2017 ` gives the error-corrected ignorance scoring rule (which is also proposer when :math:`w\sim N(0,c^2)`) as + +.. only:: latex + + .. math:: \text{IGN\_CORR\_OERR} = s(f,y) = \log\sigma + \frac{(y - \mu)^2 - c^2}{2\sigma^2} + +.. only:: html + + .. math:: \text{IGN_CORR_OERR} = s(f,y) = \log\sigma + \frac{(y - \mu)^2 - c^2}{2\sigma^2} + +The expected score for the error-convolved ignorance scoring rule typically differs from the expected score that would be achieved if there were no observation error. The error-corrected score, on the other hand, has the same expectation. + RANK ---- @@ -1160,7 +1191,7 @@ The traditional contingency table statistics computed by the Grid-Stat neighborh All of these measures are defined in :numref:`categorical variables`. -In addition to these standard statistics, the neighborhood analysis provides additional continuous measures, the Fractions Brier Score and the Fractions Skill Score. For reference, the Asymptotic Fractions Skill Score and Uniform Fractions Skill Score are also calculated. These measures are defined here, but are explained in much greater detail in :ref:`Ebert (2008) ` and :ref:`Roberts and Lean (2008) `. :ref:`Roberts and Lean (2008) ` also present an application of the methodology. +In addition to these standard statistics, the neighborhood analysis provides additional continuous measures, the Fractions Brier Score and the Fractions Skill Score. For reference, the Asymptotic Fractions Skill Score and Uniform Fractions Skill Score are also calculated. These measures are defined here, but are explained in much greater detail in :ref:`Ebert, 2008 ` and :ref:`Roberts and Lean, 2008 `. :ref:`Roberts and Lean, 2008 ` also present an application of the methodology. Fractions Brier Score --------------------- @@ -1225,7 +1256,7 @@ A mathematical metric, :math:`m(A,B)\geq 0`, must have the following three prope The first establishes that a perfect score is zero and that the only way to obtain a perfect score is if the two sets are identical according to the metric. The second requirement ensures that the order by which the two sets are evaluated will not change the result. The third property ensures that if *C* is closer to *A* than *B* is to *A*, then :math:`m(A,C) < m(A,B)`. -It has been argued in :ref:`Gilleland (2017) ` that the second property of symmetry is not necessarily an important quality to have for a summary measure for verification purposes because lack of symmetry allows for information about false alarms and misses. +It has been argued in :ref:`Gilleland, 2017 ` that the second property of symmetry is not necessarily an important quality to have for a summary measure for verification purposes because lack of symmetry allows for information about false alarms and misses. The results of the distance map verification approaches that are included in the Grid-Stat tool are summarized using a variety of measures. These measures include Baddeley's :math:`\Delta` Metric, the Hausdorff Distance, the Mean-error Distance, Pratt's Figure of Merit, and Zhu's Measure. Their equations are listed below. diff --git a/docs/Users_Guide/ensemble-stat.rst b/docs/Users_Guide/ensemble-stat.rst index 42443aa315..7eba9f1aa9 100644 --- a/docs/Users_Guide/ensemble-stat.rst +++ b/docs/Users_Guide/ensemble-stat.rst @@ -66,7 +66,9 @@ The climatological distribution is also used for the RPSS. The forecast RPS stat Ensemble Observation Error -------------------------- -In an attempt to ameliorate the effect of observation errors on the verification of forecasts, a random perturbation approach has been implemented. A great deal of user flexibility has been built in, but the methods detailed in :ref:`Candille and Talagrand (2008) `. can be replicated using the appropriate options. The user selects a distribution for the observation error, along with parameters for that distribution. Rescaling and bias correction can also be specified prior to the perturbation. Random draws from the distribution can then be added to either, or both, of the forecast and observed fields, including ensemble members. Details about the effects of the choices on verification statistics should be considered, with many details provided in the literature (*e.g.* :ref:`Candille and Talagrand, 2008 `; :ref:`Saetra et al., 2004 `; :ref:`Santos and Ghelli, 2012 `). Generally, perturbation makes verification statistics better when applied to ensemble members, and worse when applied to the observations themselves. +In an attempt to ameliorate the effect of observation errors on the verification of forecasts, a random perturbation approach has been implemented. A great deal of user flexibility has been built in, but the methods detailed in :ref:`Candille and Talagrand (2008) ` can be replicated using the appropriate options. Additional probabilistic measures that include observational uncertainty recommended by :ref:`Ferro, 2017 ` are also provided. + +Observation error information can be defined directly in the Ensemble-Stat configuration file or through a more flexible observation error table lookup. The user selects a distribution for the observation error, along with parameters for that distribution. Rescaling and bias correction can also be specified prior to the perturbation. Random draws from the distribution can then be added to either, or both, of the forecast and observed fields, including ensemble members. Details about the effects of the choices on verification statistics should be considered, with many details provided in the literature (*e.g.* :ref:`Candille and Talagrand, 2008 `; :ref:`Saetra et al., 2004 `; :ref:`Santos and Ghelli, 2012 `). Generally, perturbation makes verification statistics better when applied to ensemble members, and worse when applied to the observations themselves. Normal and uniform are common choices for the observation error distribution. The uniform distribution provides the benefit of being bounded on both sides, thus preventing the perturbation from taking on extreme values. Normal is the most common choice for observation error. However, the user should realize that with the very large samples typical in NWP, some large outliers will almost certainly be introduced with the perturbation. For variables that are bounded below by 0, and that may have inconsistent observation errors (e.g. larger errors with larger measurements), a lognormal distribution may be selected. Wind speeds and precipitation measurements are the most common of this type of NWP variable. The lognormal error perturbation prevents measurements of 0 from being perturbed, and applies larger perturbations when measurements are larger. This is often the desired behavior in these cases, but this distribution can also lead to some outliers being introduced in the perturbation step. @@ -647,6 +649,12 @@ The format of the STAT and ASCII output of the Ensemble-Stat tool are described * - 49 - ME_LT_OBS - The Mean Error of the ensemble values less than or equal to their observations + * - 50 + - IGN_CONV_OERR + - Error-convolved logarithmic scoring rule (i.e. ignornance score) from Equation 5 of :ref:`Ferro, 2017 ` + * - 51 + - IGN_CORR_OERR + - Error-corrected logarithmic scoring rule (i.e. ignornance score) from Equation 7 of :ref:`Ferro, 2017 ` .. _table_ES_header_info_es_out_RPS: diff --git a/docs/Users_Guide/refs.rst b/docs/Users_Guide/refs.rst index b4045f90b6..1c328014cb 100644 --- a/docs/Users_Guide/refs.rst +++ b/docs/Users_Guide/refs.rst @@ -14,11 +14,18 @@ References | Ahijevych, D., E. Gilleland, B.G. Brown, and E.E. Ebert, 2009: Application of | spatial verification methods to idealized and NWP-gridded precipitation forecasts. -| *Weather and Forecasting*, 24 (6), 1485 - 1497, doi: 10.1175/2009WAF2222298.1. +| *Weather and Forecasting*, 24 (6), 1485 - 1497. +| doi: https://doi.org/10.1175/2009WAF2222298.1 | -.. _Barker-1991: +.. _Andersen-1996: + +| Anderson JL., 1996: A method for producing and evaluating probabilistic forecasts +| from ensemble model integrations. *J. Clim.* 9: 1518-1530. +| doi: `https://doi.org/10.1175/1520-0442(1996)009<1518:AMFPAE>2.0.CO;2 2.0.CO;2>`_ +| +.. _Barker-1991: | Barker, T. W., 1991: The relationship between spread and forecast error in | extended-range forecasts. *Journal of Climate*, 4, 733-742. @@ -29,14 +36,14 @@ References | Bradley, A.A., S.S. Schwartz, and T. Hashino, 2008: Sampling Uncertainty | and Confidence Intervals for the Brier Score and Brier Skill Score. | *Weather and Forecasting*, 23, 992-1006. -| +| .. _Brill-2009: | Brill, K. F., and F. Mesinger, 2009: Applying a general analytic method | for assessing bias sensitivity to bias-adjusted threat and equitable | threat scores. *Weather and Forecasting*, 24, 1748-1754. -| +| .. _Brown-2007: @@ -49,32 +56,47 @@ References | http://ams.confex.com/ams/pdfpapers/124856.pdf. | +.. _Bröcker-2007: + +| Bröcker J, Smith LA., 2007: Scoring probabilistic forecasts: The importance +| of being proper. *Weather Forecasting*, 22, 382-388. +| doi: https://doi.org/10.1175/WAF966.1 +| + .. _Buizza-1997: | Buizza, R., 1997: Potential forecast skill of ensemble prediction and spread | and skill distributions of the ECMWF ensemble prediction system. *Monthly* -| *Weather Review*,125, 99-119. -| +| *Weather Review*, 125, 99-119. +| .. _Bullock-2016: | Bullock, R., T. Fowler, and B. Brown, 2016: Method for Object-Based | Diagnostic Evaluation. *NCAR Technical Note* NCAR/TN-532+STR, 66 pp. -| +| + +.. _Candille-2007: + +| Candille G, Côté C, Houtekamer PL, Pellerin G, 2007: Verification of an +| ensemble prediction system against observations. *Mon. Weather Rev.* +| 135: 2688-2699. +| doi: https://doi.org/10.1175/MWR3414.1 +| .. _Candille-2008: | Candille, G., and O. Talagrand, 2008: Impact of observational error on the | validation of ensemble prediction systems. *Quarterly Journal of the Royal* | *Meteorological Society* 134: 959-971. -| +| .. _Casati-2004: | Casati, B., G. Ross, and D. Stephenson, 2004: A new intensity-scale approach | for the verification of spatial precipitation forecasts. *Meteorological* | *Applications* 11, 141-154. -| +| .. _Davis-2006: @@ -86,37 +108,45 @@ References | Davis, C.A., B.G. Brown, and R.G. Bullock, 2006b: Object-based verification | of precipitation forecasts, Part II: Application to convective rain systems. | *Monthly Weather Review*, 134, 1785-1795. -| +| .. _Dawid-1984: | Dawid, A.P., 1984: Statistical theory: The prequential approach. *Journal of* | *the Royal Statistical Society* A147, 278-292. -| +| .. _Ebert-2008: | Ebert, E.E., 2008: Fuzzy verification of high-resolution gridded forecasts: -| a review and proposed framework. *Meteorological Applications,* 15, 51-64. -| +| a review and proposed framework. *Meteorological Applications*, 15, 51-64. +| .. _Eckel-2012: | Eckel, F. A., M.S. Allen, M. C. Sittel, 2012: Estimation of Ambiguity in -| Ensemble Forecasts. *Weather Forecasting,* 27, 50-69. -| doi: http://dx.doi.org/10.1175/WAF-D-11-00015.1 +| Ensemble Forecasts. *Weather Forecasting*, 27, 50-69. +| doi: https://doi.org/10.1175/WAF-D-11-00015.1 | .. _Efron-2007: | Efron, B. 2007: Correlation and large-scale significance testing. *Journal* -| of the American Statistical Association,* 102(477), 93-103. +| of the American Statistical Association*, 102(477), 93-103. | .. _Epstein-1969: | Epstein, E. S., 1969: A scoring system for probability forecasts of ranked categories. -| *J. Appl. Meteor.*, 8, 985-987, 10.1175/1520-0450(1969)008<0985:ASSFPF>2.0.CO;2. +| *J. Appl. Meteor.*, 8, 985-987. +| doi: `https://doi.org/10.1175/1520-0450(1969)008<0985:ASSFPF>2.0.CO;2 2.0.CO;2>`_ +| + +.. _Ferro-2017: + +| Ferro C. A. T., 2017: Measuring forecast performance in the presence of observation error. +| *Q. J. R. Meteorol. Soc.*, 143 (708), 2665-2676. +| doi: https://doi.org/10.1002/qj.3115 | .. _Gilleland-2010: @@ -129,29 +159,32 @@ References | Gilleland, E., 2017: A new characterization in the spatial verification | framework for false alarms, misses, and overall patterns. -| *Weather and Forecasting*, 32 (1), 187 - 198, doi: 10.1175/WAF-D-16-0134.1. +| *Weather and Forecasting*, 32 (1), 187 - 198. +| doi: https://doi.org/10.1175/WAF-D-16-0134.1 | .. _Gilleland_PartI-2020: | Gilleland, E., 2020: Bootstrap methods for statistical inference. | Part I: Comparative forecast verification for continuous variables. -| *Journal of Atmospheric and Oceanic Technology*, 37 (11), 2117 - 2134, -| doi: 10.1175/JTECH-D-20-0069.1. +| *Journal of Atmospheric and Oceanic Technology*, 37 (11), 2117 - 2134. +| doi: https://doi.org/10.1175/JTECH-D-20-0069.1 | .. _Gilleland_PartII-2020: | Gilleland, E., 2020: Bootstrap methods for statistical inference. | Part II: Extreme-value analysis. *Journal of Atmospheric and Oceanic* -| *Technology*, 37 (11), 2135 - 2144, doi: 10.1175/JTECH-D-20-0070.1. +| *Technology*, 37 (11), 2135 - 2144. +| doi: https://doi.org/10.1175/JTECH-D-20-0070.1 | .. _Gilleland-2021: | Gilleland, E., 2021: Novel measures for summarizing high-resolution forecast | performance. *Advances in Statistical Climatology, Meteorology and Oceanography*, -| 7 (1), 13 - 34, doi: 10.5194/ascmo-7-13-2021. +| 7 (1), 13 - 34. +| doi: https://doi.org/10.5194/ascmo-7-13-2021 | .. _Gneiting-2004: @@ -161,7 +194,7 @@ References | *Minimum CRPS Estimation*. Technical Report no. 449, Department of | Statistics, University of Washington. Available at | http://www.stat.washington.edu/www/research/reports/ -| +| .. _Haiden-2012: @@ -175,41 +208,41 @@ References | Hamill, T. M., 2001: Interpretation of rank histograms for verifying ensemble | forecasts. *Monthly Weather Review*, 129, 550-560. -| +| .. _Hersbach-2000: | Hersbach, H., 2000: Decomposition of the Continuous Ranked Probability Score | for Ensemble Prediction Systems. *Weather and Forecasting*, 15, 559-570. -| +| .. _Jolliffe-2012: | Jolliffe, I.T., and D.B. Stephenson, 2012: *Forecast verification. A* | *practitioner's guide in atmospheric science.* Wiley and Sons Ltd, 240 pp. -| +| .. _Knaff-2003: | Knaff, J.A., M. DeMaria, C.R. Sampson, and J.M. Gross, 2003: Statistical, | Five-Day Tropical Cyclone Intensity Forecasts Derived from Climatology -| and Persistence. *Weather and Forecasting,* Vol. 18 Issue 2, p. 80-92. -| +| and Persistence. *Weather and Forecasting*, Vol. 18 Issue 2, p. 80-92. +| .. _Mason-2004: | Mason, S. J., 2004: On Using "Climatology" as a Reference Strategy | in the Brier and Ranked Probability Skill Scores. *Monthly Weather Review*, | 132, 1891-1895. -| +| .. _Mason-2008: | Mason, S. J., 2008: Understanding forecast verification statistics. -| *Meteor. Appl.*, 15, 31-40, doi: 10.1002/met.51. +| *Meteor. Appl.*, 15, 31-40. +| doi: https://doi.org/10.1002/met.51 | - .. _Mittermaier-2014: | Mittermaier, M., 2014: A strategy for verifying near-convection-resolving @@ -220,21 +253,20 @@ References | Mood, A. M., F. A. Graybill and D. C. Boes, 1974: *Introduction to the* | *Theory of Statistics*, McGraw-Hill, 299-338. -| +| .. _Murphy-1969: | Murphy, A.H., 1969: On the ranked probability score. *Journal of Applied* | *Meteorology and Climatology*, 8 (6), 988 - 989, -| doi: 10.1175/1520-0450(1969)008<0988:OTPS>2.0.CO;2. +| doi: `https://doi.org/10.1175/1520-0450(1969)008<0988:OTPS>2.0.CO;2 2.0.CO;2>`_ | .. _Murphy-1987: | Murphy, A.H., and R.L. Winkler, 1987: A general framework for forecast | verification. *Monthly Weather Review*, 115, 1330-1338. -| - +| .. _North-2022: @@ -256,7 +288,7 @@ References | Roberts, N.M., and H.W. Lean, 2008: Scale-selective verification of rainfall | accumulations from high-resolution forecasts of convective events. | *Monthly Weather Review*, 136, 78-97. -| +| .. _Rodwell-2010: @@ -273,11 +305,18 @@ References | https://www.ecmwf.int/node/14595 | +.. _Röpnack-2013: + +| Röpnack A, Hense A, Gebhardt C, Majewski D., 2013: Bayesian model verification +| of NWP ensemble forecasts. *Mon. Weather Rev.* 141: 375–387. +| doi: https://doi.org/10.1175/MWR-D-11-00350.1 +| + .. _Saetra-2004: -| Saetra O., H. Hersbach, J-R Bidlot, D. Richardson, 2004: Effects of +| Saetra Ø., H. Hersbach, J-R Bidlot, D. Richardson, 2004: Effects of | observation errors on the statistics for ensemble spread and -| reliability. *Monthly Weather Review* 132: 1487-1501. +| reliability. *Monthly Weather Review*, 132: 1487-1501. | .. _Santos-2012: @@ -285,7 +324,7 @@ References | Santos C. and A. Ghelli, 2012: Observational probability method to assess | ensemble precipitation forecasts. *Quarterly Journal of the Royal* | *Meteorological Society* 138: 209-221. -| +| .. _Schwartz-2017: @@ -298,20 +337,21 @@ References | Stephenson, D.B., 2000: Use of the "Odds Ratio" for diagnosing | forecast skill. *Weather and Forecasting*, 15, 221-232. -| +| .. _Stephenson-2008: | Stephenson, D.B., B. Casati, C.A.T. Ferro, and C.A. Wilson, 2008: The extreme | dependency score: A non-vanishing measure for forecasts of rare events. | *Meteorological Applications* 15, 41-50. -| +| -.. _Todter-2012: +.. _Tödter-2012: | Tödter, J. and B. Ahrens, 2012: Generalization of the Ignorance Score: | Continuous ranked version and its decomposition. *Monthly Weather Review*, -| 140 (6), 2005 - 2017, doi: 10.1175/MWR-D-11-00266.1. +| 140 (6), 2005 - 2017. +| doi: https://doi.org/10.1175/MWR-D-11-00266.1 | .. _Weniger-2016: @@ -319,17 +359,18 @@ References | Weniger, M., F. Kapp, and P. Friederichs, 2016: Spatial Verification Using | Wavelet Transforms: A Review. *Quarterly Journal of the Royal* | *Meteorological Society*, 143, 120-136. -| +| .. _Wilks-2010: | Wilks, D.S. 2010: Sampling distributions of the Brier score and Brier skill | score under serial dependence. *Quarterly Journal of the Royal* -| *Meteorological Society*, 136, 2109-2118. doi:10.1002/qj.709 -| +| *Meteorological Society*, 136, 2109-2118. +| doi: https://doi.org/10.1002/qj.709 +| .. _Wilks-2011: | Wilks, D., 2011: *Statistical methods in the atmospheric sciences.* | Elsevier, San Diego. -| +| diff --git a/internal/test_unit/hdr/met_12_0.hdr b/internal/test_unit/hdr/met_12_0.hdr index d6bf9fb0b9..a1113d5102 100644 --- a/internal/test_unit/hdr/met_12_0.hdr +++ b/internal/test_unit/hdr/met_12_0.hdr @@ -19,7 +19,7 @@ PJC : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_L PRC : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL N_THRESH _VAR_ PSTD : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL N_THRESH BASER BASER_NCL BASER_NCU RELIABILITY RESOLUTION UNCERTAINTY ROC_AUC BRIER BRIER_NCL BRIER_NCU BRIERCL BRIERCL_NCL BRIERCL_NCU BSS BSS_SMPL _VAR_ ECLV : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL BASE N_PTS _VAR_ -ECNT : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL N_ENS CRPS CRPSS IGN ME RMSE SPREAD ME_OERR RMSE_OERR SPREAD_OERR SPREAD_PLUS_OERR CRPSCL CRPS_EMP CRPSCL_EMP CRPSS_EMP CRPS_EMP_FAIR SPREAD_MD MAE MAE_OERR BIAS_RATIO N_GE_OBS ME_GE_OBS N_LT_OBS ME_LT_OBS +ECNT : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL N_ENS CRPS CRPSS IGN ME RMSE SPREAD ME_OERR RMSE_OERR SPREAD_OERR SPREAD_PLUS_OERR CRPSCL CRPS_EMP CRPSCL_EMP CRPSS_EMP CRPS_EMP_FAIR SPREAD_MD MAE MAE_OERR BIAS_RATIO N_GE_OBS ME_GE_OBS N_LT_OBS ME_LT_OBS IGN_CONV_OERR IGN_CORR_OERR RPS : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL N_PROB RPS_REL RPS_RES RPS_UNC RPS RPSS RPSS_SMPL RPS_COMP RHIST : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL CRPS IGN N_RANK CRPSS SPREAD _VAR_ PHIST : VERSION MODEL DESC FCST_LEAD FCST_VALID_BEG FCST_VALID_END OBS_LEAD OBS_VALID_BEG OBS_VALID_END FCST_VAR FCST_UNITS FCST_LEV OBS_VAR OBS_UNITS OBS_LEV OBTYPE VX_MASK INTERP_MTHD INTERP_PNTS FCST_THRESH OBS_THRESH COV_THRESH ALPHA LINE_TYPE TOTAL BIN_SIZE N_BIN _VAR_ diff --git a/src/basic/vx_util/num_array.cc b/src/basic/vx_util/num_array.cc index 20632a8978..ddcd9ba45b 100644 --- a/src/basic/vx_util/num_array.cc +++ b/src/basic/vx_util/num_array.cc @@ -1077,7 +1077,9 @@ NumArray NumArray::subset(const NumArray &keep) const // Check bounds if ( keep.n_elements() != n_elements() ) { mlog << Error << "\nNumArray::subset(const NumArray &) -> " - << "the number of elements do not match\n\n"; + << "the number of elements do not match (" + << keep.n_elements() << " keep flags != " + << n_elements() << " array elements)\n\n"; exit ( 1 ); } @@ -1158,7 +1160,9 @@ double NumArray::wmean(const NumArray &wgt) const if ( wgt.n_elements() != n_elements() ) { mlog << Error << "\nNumArray::wmean(const NumArray &) -> " - << "the number of elements do not match\n\n"; + << "the number of elements do not match (" + << wgt.n_elements() << " weights != " + << n_elements() << " array elements)\n\n"; exit ( 1 ); } diff --git a/src/basic/vx_util/stat_column_defs.h b/src/basic/vx_util/stat_column_defs.h index 909441b5ba..826d5101c9 100644 --- a/src/basic/vx_util/stat_column_defs.h +++ b/src/basic/vx_util/stat_column_defs.h @@ -276,7 +276,7 @@ static const char * ecnt_columns [] = { "CRPSS_EMP", "CRPS_EMP_FAIR", "SPREAD_MD", "MAE", "MAE_OERR", "BIAS_RATIO", "N_GE_OBS", "ME_GE_OBS", "N_LT_OBS", - "ME_LT_OBS" + "ME_LT_OBS", "IGN_CONV_OERR", "IGN_CORR_OERR" }; static const char * rps_columns [] = { diff --git a/src/libcode/vx_gsl_prob/gsl_randist.h b/src/libcode/vx_gsl_prob/gsl_randist.h index d562bfe733..e66c312230 100644 --- a/src/libcode/vx_gsl_prob/gsl_randist.h +++ b/src/libcode/vx_gsl_prob/gsl_randist.h @@ -58,9 +58,9 @@ extern void ran_sample(const gsl_rng *r, double *, int, double *, int); extern void ran_sample(const gsl_rng *r, NumArray &, NumArray &, int); extern double ran_draw(const gsl_rng *r, DistType, - double p1, double p2 = bad_data_int); + double p1, double p2 = bad_data_double); extern double dist_var(DistType, - double p1, double p2 = bad_data_int); + double p1, double p2 = bad_data_double); //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_stat_out/stat_columns.cc b/src/libcode/vx_stat_out/stat_columns.cc index 034c3c1961..3e89a1ea46 100644 --- a/src/libcode/vx_stat_out/stat_columns.cc +++ b/src/libcode/vx_stat_out/stat_columns.cc @@ -4275,7 +4275,7 @@ void write_ecnt_cols(const ECNTInfo &ecnt_info, // CRPSS_EMP CRPS_EMP_FAIR, SPREAD_MD, // MAE, MAE_OERR, BIAS_RATIO, // N_GE_OBS, ME_GE_OBS, N_LT_OBS, - // ME_LT_OBS + // ME_LT_OBS, IGN_CONV_OERR, IGN_CORR_OERR // at.set_entry(r, c+0, // Total Number of Pairs ecnt_info.n_pair); @@ -4352,6 +4352,12 @@ void write_ecnt_cols(const ECNTInfo &ecnt_info, at.set_entry(r, c+24, // ME of ensemble values < observations ecnt_info.me_lt_obs); + at.set_entry(r, c+25, // Ignorance Score, observation error convolved + ecnt_info.ign_conv_oerr); + + at.set_entry(r, c+26, // Ignorance Score, observation error corrected + ecnt_info.ign_corr_oerr); + return; } diff --git a/src/libcode/vx_statistics/ens_stats.cc b/src/libcode/vx_statistics/ens_stats.cc index 4aae9e7471..29b3a6db54 100644 --- a/src/libcode/vx_statistics/ens_stats.cc +++ b/src/libcode/vx_statistics/ens_stats.cc @@ -181,13 +181,15 @@ void ECNTInfo::clear() { crps_emp_fair = spread_md = bad_data_double; crps_gaus = crpscl_gaus = crpss_gaus = bad_data_double; ign = bad_data_double; - me = mae = rmse = spread = bad_data_double; - me_oerr = mae_oerr = rmse_oerr = spread_oerr = bad_data_double; + me = mae = rmse = spread = bad_data_double; + me_oerr = mae_oerr = rmse_oerr = spread_oerr = bad_data_double; spread_plus_oerr = bad_data_double; - n_ge_obs = n_lt_obs = 0; - me_ge_obs = me_lt_obs = bias_ratio = bad_data_double; - + ign_conv_oerr = ign_corr_oerr = bad_data_double; + + n_ge_obs = n_lt_obs = 0; + me_ge_obs = me_lt_obs = bias_ratio = bad_data_double; + return; } @@ -221,6 +223,8 @@ void ECNTInfo::assign(const ECNTInfo &c) { rmse_oerr = c.rmse_oerr; spread_oerr = c.spread_oerr; spread_plus_oerr = c.spread_plus_oerr; + ign_conv_oerr = c.ign_conv_oerr; + ign_corr_oerr = c.ign_corr_oerr; n_ge_obs = c.n_ge_obs; n_lt_obs = c.n_lt_obs; @@ -361,6 +365,10 @@ void ECNTInfo::set(const PairDataEnsemble &pd) { // Compute the square root of the average variance plus oerr spread_plus_oerr = square_root(pd.var_plus_oerr_na.wmean(pd.wgt_na)); + // Compute log scores with observational uncertainty + ign_conv_oerr = pd.ign_conv_oerr_na.wmean(pd.wgt_na); + ign_corr_oerr = pd.ign_corr_oerr_na.wmean(pd.wgt_na); + // Compute bias ratio terms n_ge_obs = nint(pd.n_ge_obs_na.sum()); me_ge_obs = pd.me_ge_obs_na.wmean(pd.n_ge_obs_na); diff --git a/src/libcode/vx_statistics/ens_stats.h b/src/libcode/vx_statistics/ens_stats.h index 17e864fb5e..69eca9f4e1 100644 --- a/src/libcode/vx_statistics/ens_stats.h +++ b/src/libcode/vx_statistics/ens_stats.h @@ -82,6 +82,10 @@ class ECNTInfo { double me_oerr, mae_oerr, rmse_oerr, spread_oerr; double spread_plus_oerr; + // Log scores that incorporate observational uncertainty, + // as advised in Ferro (2017) + double ign_conv_oerr, ign_corr_oerr; + // Bias ratio information int n_ge_obs, n_lt_obs; double me_ge_obs, me_lt_obs; diff --git a/src/libcode/vx_statistics/met_stats.cc b/src/libcode/vx_statistics/met_stats.cc index a33f1adc73..67901bab1d 100644 --- a/src/libcode/vx_statistics/met_stats.cc +++ b/src/libcode/vx_statistics/met_stats.cc @@ -3456,6 +3456,25 @@ int min_int(const int *v_int, int n) { return(v_min); } +//////////////////////////////////////////////////////////////////////// +// +// Compute mean from a sum +// +//////////////////////////////////////////////////////////////////////// + +double compute_mean(double sum, int n) { + double v; + + if(is_bad_data(sum) || is_bad_data(n) || n == 0) { + v = bad_data_double; + } + else { + v = sum / n; + } + + return(v); +} + //////////////////////////////////////////////////////////////////////// // // Compute variance from sums of squares diff --git a/src/libcode/vx_statistics/met_stats.h b/src/libcode/vx_statistics/met_stats.h index f0715d45b3..1d5def71a7 100644 --- a/src/libcode/vx_statistics/met_stats.h +++ b/src/libcode/vx_statistics/met_stats.h @@ -726,6 +726,8 @@ extern int min_int(const int *, int); // //////////////////////////////////////////////////////////////////////// +extern double compute_mean(double, int); + extern double compute_variance(double, double, int); extern double compute_stdev(double, double, int); diff --git a/src/libcode/vx_statistics/obs_error.cc b/src/libcode/vx_statistics/obs_error.cc index 6ccb4a30f5..751cf58581 100644 --- a/src/libcode/vx_statistics/obs_error.cc +++ b/src/libcode/vx_statistics/obs_error.cc @@ -5,6 +5,7 @@ // ** Research Applications Lab (RAL) // ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* + //////////////////////////////////////////////////////////////////////// using namespace std; @@ -184,6 +185,12 @@ void ObsErrorEntry::dump(ostream & out, int depth) const { //////////////////////////////////////////////////////////////////////// +double ObsErrorEntry::variance() const { + return dist_var(dist_type, dist_parm[0], dist_parm[1]); +} + +//////////////////////////////////////////////////////////////////////// + bool ObsErrorEntry::parse_line(const DataLine &dl) { // Initialize diff --git a/src/libcode/vx_statistics/obs_error.h b/src/libcode/vx_statistics/obs_error.h index 732c4c3ced..11cce141dc 100644 --- a/src/libcode/vx_statistics/obs_error.h +++ b/src/libcode/vx_statistics/obs_error.h @@ -5,6 +5,7 @@ // ** Research Applications Lab (RAL) // ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* + //////////////////////////////////////////////////////////////////////// #ifndef __OBS_ERROR_H__ @@ -68,6 +69,8 @@ class ObsErrorEntry { // get stuff // + double variance() const; + // // do stuff // diff --git a/src/libcode/vx_statistics/pair_data_ensemble.cc b/src/libcode/vx_statistics/pair_data_ensemble.cc index 41f527351b..1d577ef65c 100644 --- a/src/libcode/vx_statistics/pair_data_ensemble.cc +++ b/src/libcode/vx_statistics/pair_data_ensemble.cc @@ -108,6 +108,9 @@ void PairDataEnsemble::clear() { ign_na.clear(); pit_na.clear(); + ign_conv_oerr_na.clear(); + ign_corr_oerr_na.clear(); + n_ge_obs_na.clear(); me_ge_obs_na.clear(); n_lt_obs_na.clear(); @@ -179,6 +182,8 @@ void PairDataEnsemble::extend(int n) { crpscl_gaus_na.extend (n); ign_na.extend (n); pit_na.extend (n); + ign_conv_oerr_na.extend (n); + ign_corr_oerr_na.extend (n); n_ge_obs_na.extend (n); me_ge_obs_na.extend (n); n_lt_obs_na.extend (n); @@ -235,15 +240,20 @@ void PairDataEnsemble::assign(const PairDataEnsemble &pd) { // PairDataEnsemble v_na = pd.v_na; r_na = pd.r_na; + crps_emp_na = pd.crps_emp_na; crps_emp_fair_na = pd.crps_emp_fair_na; spread_md_na = pd.spread_md_na; crpscl_emp_na = pd.crpscl_emp_na; crps_gaus_na = pd.crps_gaus_na; crpscl_gaus_na = pd.crpscl_gaus_na; + ign_na = pd.ign_na; pit_na = pd.pit_na; + ign_conv_oerr_na = pd.ign_conv_oerr_na; + ign_corr_oerr_na = pd.ign_corr_oerr_na; + n_ge_obs_na = pd.n_ge_obs_na; me_ge_obs_na = pd.me_ge_obs_na; n_lt_obs_na = pd.n_lt_obs_na; @@ -449,6 +459,8 @@ void PairDataEnsemble::compute_pair_vals(const gsl_rng *rng_ptr) { crpscl_gaus_na.add(bad_data_double); ign_na.add(bad_data_double); pit_na.add(bad_data_double); + ign_conv_oerr_na.add(bad_data_double); + ign_corr_oerr_na.add(bad_data_double); n_ge_obs_na.add(bad_data_double); me_ge_obs_na.add(bad_data_double); n_lt_obs_na.add(bad_data_double); @@ -461,22 +473,41 @@ void PairDataEnsemble::compute_pair_vals(const gsl_rng *rng_ptr) { var_unperturbed = compute_variance(esum_na[i], esumsq_na[i], esumn_na[i]); var_na.add(var_unperturbed); - // Process the observation error information. + // Process the observation error information ObsErrorEntry * e = (has_obs_error() ? obs_error_entry[i] : 0); if(e) { + // Get observation error variance + double oerr_var = e->variance(); + + // Compute the observation error log scores + double v_conv, v_corr; + compute_obs_error_log_scores( + compute_mean(esum_na[i], esumn_na[i]), + compute_stdev(esum_na[i], esumsq_na[i], esumn_na[i]), + o_na[i], oerr_var, + v_conv, v_corr); + ign_conv_oerr_na.add(v_conv); + ign_corr_oerr_na.add(v_corr); + // Compute perturbed ensemble mean and variance // Exclude the control member from the variance mn_oerr_na.add(cur_ens.mean()); var_oerr_na.add(cur_ens.variance(ctrl_index)); - // Compute the variance plus observation error variance. - var_plus_oerr_na.add(var_unperturbed + - dist_var(e->dist_type, - e->dist_parm[0], e->dist_parm[1])); + // Compute the variance plus observation error variance + if(is_bad_data(var_unperturbed) || + is_bad_data(oerr_var)) { + var_plus_oerr_na.add(bad_data_double); + } + else { + var_plus_oerr_na.add(var_unperturbed + oerr_var); + } } - // If no observation error specified, store bad data values. + // If no observation error specified, store bad data values else { + ign_conv_oerr_na.add(bad_data_double); + ign_corr_oerr_na.add(bad_data_double); mn_oerr_na.add(bad_data_double); var_oerr_na.add(bad_data_double); var_plus_oerr_na.add(bad_data_double); @@ -506,8 +537,8 @@ void PairDataEnsemble::compute_pair_vals(const gsl_rng *rng_ptr) { derive_climo_vals(cdf_info_ptr, cmn_na[i], csd_na[i], cur_clm); // Store empirical CRPS stats - // - // For crps_emp use temporary, local variable so we can use it for the crps_emp_fair calculation + // For crps_emp use temporary, local variable so we can use it + // for the crps_emp_fair calculation double crps_emp = compute_crps_emp(o_na[i], cur_ens); crps_emp_na.add(crps_emp); crps_emp_fair_na.add(crps_emp - cur_ens.wmean_abs_diff()); @@ -528,9 +559,10 @@ void PairDataEnsemble::compute_pair_vals(const gsl_rng *rng_ptr) { // Compute the Bias Ratio terms int n_ge_obs, n_lt_obs; double me_ge_obs, me_lt_obs; - compute_bias_ratio_terms(o_na[i], cur_ens, - n_ge_obs, me_ge_obs, - n_lt_obs, me_lt_obs); + compute_bias_ratio_terms( + o_na[i], cur_ens, + n_ge_obs, me_ge_obs, + n_lt_obs, me_lt_obs); // Store the Bias Ratio terms n_ge_obs_na.add(n_ge_obs); @@ -864,8 +896,11 @@ PairDataEnsemble PairDataEnsemble::subset_pairs_obs_thresh(const SingleThresh &o // // Include in subset: // wgt_na, o_na, cmn_na, csd_na, v_na, r_na, - // crps_emp_na, crps_emp_fair_na, spread_md_na, crpscl_emp_na, crps_gaus_na, crpscl_gaus_na, - // ign_na, pit_na, n_gt_obs_na, me_gt_obs_na, n_lt_obs_na, me_lt_obs_na, + // crps_emp_na, crps_emp_fair_na, spread_md_na, + // crpscl_emp_na, crps_gaus_na, crpscl_gaus_na, + // ign_na, pit_na, + // ign_conv_oerr, ign_corr_oerr, + // n_gt_obs_na, me_gt_obs_na, n_lt_obs_na, me_lt_obs_na, // var_na, var_oerr_na, var_plus_oerr_na, // mn_na, mn_oerr_na, e_na // @@ -888,6 +923,8 @@ PairDataEnsemble PairDataEnsemble::subset_pairs_obs_thresh(const SingleThresh &o pd.crpscl_gaus_na.add(crpscl_gaus_na[i]); pd.ign_na.add(ign_na[i]); pd.pit_na.add(pit_na[i]); + pd.ign_conv_oerr_na.add(ign_conv_oerr_na[i]); + pd.ign_corr_oerr_na.add(ign_corr_oerr_na[i]); pd.n_ge_obs_na.add(n_ge_obs_na[i]); pd.me_ge_obs_na.add(me_ge_obs_na[i]); pd.n_lt_obs_na.add(n_lt_obs_na[i]); @@ -1498,7 +1535,7 @@ void VxPairDataEnsemble::add_point_obs(float *hdr_arr, int *hdr_typ_arr, y < 0 || y >= gr.ny()) return; // For pressure levels, check if the observation pressure level - // falls in the requsted range. + // falls in the requested range. if(obs_info_grib->level().type() == LevelType_Pres) { if(obs_lvl < obs_info_grib->level().lower() || @@ -1593,7 +1630,8 @@ void VxPairDataEnsemble::add_point_obs(float *hdr_arr, int *hdr_typ_arr, } } - // Apply observation error logic bias correction, if requested + // Apply observation error additive and multiplicative + // bias correction, if requested if(obs_error_info->flag) { obs_v = add_obs_error_bc(obs_error_info->rng_ptr, FieldType_Obs, oerr_ptr, obs_v); @@ -2111,5 +2149,52 @@ double compute_bias_ratio(double me_ge_obs, double me_lt_obs) { return(v); } - + +//////////////////////////////////////////////////////////////////////// + +void compute_obs_error_log_scores(double emn, double esd, + double obs, double oerr_var, + double &v_conv, double &v_corr) { + + const char *method_name = "compute_obs_error_log_scores() -> "; + + // Check for bad input data + if(is_bad_data(emn) || + is_bad_data(esd) || + is_bad_data(obs) || + is_bad_data(oerr_var)) { + v_conv = v_corr = bad_data_double; + } + else { + double sigma2 = esd * esd; + double ov2 = oerr_var * oerr_var; + + // Error-convolved logarithmic scoring rule in + // Ferro (2017, Eq 5) doi:10.1002/qj.3115 + // Scale by 2.0 * pi for consistency with ignorance score + v_conv = 0.5 * log(2.0 * pi * (sigma2 + ov2)) + + (obs - emn) * (obs - emn) / + (2.0 * (sigma2 + ov2)); + + // Error-corrected logarithmic scoring rule in + // Ferro (2017, Eq 7) doi:10.1002/qj.3115 + // Scale by 2.0 * pi for consistency with ignorance score + v_corr = log(esd) + + ((obs - emn) * (obs - emn) - ov2) / + (2.0 * sigma2); + } + + if(mlog.verbosity_level() >= 10) { + mlog << Debug(10) << method_name + << "inputs (emn = " << emn + << ", esd = " << esd + << ", obs = " << obs + << ", oerr_var = " << oerr_var + << ") and outputs (ign_oerr_conv = " << v_conv + << ", ign_oerr_corr = " << v_corr << ")\n"; + } + + return; +} + //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_statistics/pair_data_ensemble.h b/src/libcode/vx_statistics/pair_data_ensemble.h index e6014b59fb..9be2eec3c8 100644 --- a/src/libcode/vx_statistics/pair_data_ensemble.h +++ b/src/libcode/vx_statistics/pair_data_ensemble.h @@ -6,6 +6,8 @@ // ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA // *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* +//////////////////////////////////////////////////////////////////////// + #ifndef __PAIR_DATA_ENSEMBLE_H__ #define __PAIR_DATA_ENSEMBLE_H__ @@ -74,64 +76,67 @@ class PairDataEnsemble : public PairBase { bool obs_error_flag; // Ensemble, valid count, and rank values - NumArray *e_na; // Ensemble values [n_ens][n_obs] - NumArray v_na; // Number of valid ensemble values [n_obs] - NumArray r_na; // Observation ranks [n_obs] + NumArray *e_na; // Ensemble values [n_ens][n_obs] + NumArray v_na; // Number of valid ensemble values [n_obs] + NumArray r_na; // Observation ranks [n_obs] + + NumArray crps_emp_na; // Empirical Continuous Ranked Probability Score [n_obs] + NumArray crps_emp_fair_na; // Fair Empirical Continuous Ranked Probability Score [n_obs] + NumArray spread_md_na; // Mean absolute difference of ensemble members [n_obs] + NumArray crpscl_emp_na; // Empirical climatological CRPS [n_obs] - NumArray crps_emp_na; // Empirical Continuous Ranked Probability Score [n_obs] - NumArray crps_emp_fair_na; // Fair Empirical Continuous Ranked Probability Score [n_obs] - NumArray spread_md_na; // Mean absolute difference of ensemble members [n_obs] - NumArray crpscl_emp_na; // Empirical climatological CRPS [n_obs] + NumArray crps_gaus_na; // Gaussian CRPS [n_obs] + NumArray crpscl_gaus_na; // Gaussian climatological CRPS [n_obs] - NumArray crps_gaus_na; // Gaussian CRPS [n_obs] - NumArray crpscl_gaus_na; // Gaussian climatological CRPS [n_obs] + NumArray ign_na; // Ignorance Score [n_obs] + NumArray pit_na; // Probability Integral Transform [n_obs] - NumArray ign_na; // Ignorance Score [n_obs] - NumArray pit_na; // Probability Integral Transform [n_obs] + NumArray ign_conv_oerr_na; // Error convolved log score [n_obs] + NumArray ign_corr_oerr_na; // Error corrected log score [n_obs] - NumArray n_ge_obs_na; // Number of ensemble memebers >= obs [n_obs] - NumArray me_ge_obs_na; // Mean error of ensemble members >= obs [n_obs] - NumArray n_lt_obs_na; // Number of ensemble members < obs [n_obs] - NumArray me_lt_obs_na; // Mean error of ensemble members < obs [n_obs] + NumArray n_ge_obs_na; // Number of ensemble memebers >= obs [n_obs] + NumArray me_ge_obs_na; // Mean error of ensemble members >= obs [n_obs] + NumArray n_lt_obs_na; // Number of ensemble members < obs [n_obs] + NumArray me_lt_obs_na; // Mean error of ensemble members < obs [n_obs] - int n_ens; // Number of ensemble members - int n_pair; // Number of valid pairs, n_obs - sum(skip_ba) - int ctrl_index; // Index of the control member - bool skip_const; // Skip cases where the observation and - // all ensemble members are constant - BoolArray skip_ba; // Flag for each observation [n_obs] + int n_ens; // Number of ensemble members + int n_pair; // Number of valid pairs, n_obs - sum(skip_ba) + int ctrl_index; // Index of the control member + bool skip_const; // Skip cases where the observation and + // all ensemble members are constant + BoolArray skip_ba; // Flag for each observation [n_obs] - NumArray rhist_na; // Ranked Histogram [n_ens+1] - NumArray relp_na; // Relative Position Histogram [n_ens] + NumArray rhist_na; // Ranked Histogram [n_ens+1] + NumArray relp_na; // Relative Position Histogram [n_ens] - double phist_bin_size; // Ensemble PIT histogram bin width - NumArray phist_na; // PIT Histogram [n_phist_bin] + double phist_bin_size; // Ensemble PIT histogram bin width + NumArray phist_na; // PIT Histogram [n_phist_bin] NumArray var_na; // Variance of unperturbed members [n_obs] NumArray var_oerr_na; // Variance of perturbed members [n_obs] NumArray var_plus_oerr_na; // Unperturbed variance plus observation error variance [n_obs] - NumArray esum_na; // Sum of unperturbed ensemble values [n_obs] - NumArray esumsq_na; // Sum of unperturbed ensemble squared values [n_obs] - NumArray esumn_na; // Count of ensemble values [n_obs] + NumArray esum_na; // Sum of unperturbed ensemble values [n_obs] + NumArray esumsq_na; // Sum of unperturbed ensemble squared values [n_obs] + NumArray esumn_na; // Count of ensemble values [n_obs] - NumArray mn_na; // Ensemble mean value [n_obs] - NumArray mn_oerr_na; // Mean of perturbed members [n_obs] + NumArray mn_na; // Ensemble mean value [n_obs] + NumArray mn_oerr_na; // Mean of perturbed members [n_obs] - double ssvar_bin_size; // Variance bin size for spread/skill - SSVARInfo *ssvar_bins; // Ensemble spread/skill bin information [n_ssvar_bin] + double ssvar_bin_size; // Variance bin size for spread/skill + SSVARInfo *ssvar_bins; // Ensemble spread/skill bin information [n_ssvar_bin] - double crpss_emp; // Empirical CRPS skill score - double crpss_gaus; // Guassian CRPS skill score + double crpss_emp; // Empirical CRPS skill score + double crpss_gaus; // Guassian CRPS skill score - double me; // ME for ensemble mean - double mae; // MAE for ensemble mean - double rmse; // RMSE for ensemble mean - double me_oerr; // ME for mean of perturbed members - double mae_oerr; // MAE for mean of perturbed members - double rmse_oerr; // RMSE for mean of perturbed members + double me; // ME for ensemble mean + double mae; // MAE for ensemble mean + double rmse; // RMSE for ensemble mean + double me_oerr; // ME for mean of perturbed members + double mae_oerr; // MAE for mean of perturbed members + double rmse_oerr; // RMSE for mean of perturbed members - double bias_ratio; // Bias ratio + double bias_ratio; // Bias ratio ////////////////////////////////////////////////////////////////// @@ -323,6 +328,9 @@ extern double compute_ens_pit(double, double, double); extern void compute_bias_ratio_terms(double, const NumArray &, int &, double &, int &, double &); extern double compute_bias_ratio(double, double); +extern void compute_obs_error_log_scores( + double, double, double, double, + double &, double &); //////////////////////////////////////////////////////////////////////// diff --git a/src/libcode/vx_statistics/pair_data_point.cc b/src/libcode/vx_statistics/pair_data_point.cc index 15aa9133f0..86a6ec71b8 100644 --- a/src/libcode/vx_statistics/pair_data_point.cc +++ b/src/libcode/vx_statistics/pair_data_point.cc @@ -1119,7 +1119,7 @@ void VxPairDataPoint::add_point_obs(float *hdr_arr, const char *hdr_typ_str, } // For pressure levels, check if the observation pressure level - // falls in the requsted range. + // falls in the requested range. if(obs_info->level().type() == LevelType_Pres) { if(obs_lvl < obs_info->level().lower() || diff --git a/src/tools/core/stat_analysis/aggr_stat_line.cc b/src/tools/core/stat_analysis/aggr_stat_line.cc index 99b5906429..8c191e16f6 100644 --- a/src/tools/core/stat_analysis/aggr_stat_line.cc +++ b/src/tools/core/stat_analysis/aggr_stat_line.cc @@ -39,6 +39,8 @@ // line types. // 018 02/13/24 Halley Gotway MET #2395 Add wind direction stats // to VL1L2, VAL1L2, and VCNT. +// 019 02/21/24 Halley Gotway MET #2583 Add observation error +// ECNT statistics. // //////////////////////////////////////////////////////////////////////// @@ -2654,6 +2656,8 @@ void aggr_ecnt_lines(LineDataFile &f, STATAnalysisJob &job, m[key].ens_pd.crps_gaus_na.add(cur.crps_gaus); m[key].ens_pd.crpscl_gaus_na.add(cur.crpscl_gaus); m[key].ens_pd.ign_na.add(cur.ign); + m[key].ens_pd.ign_conv_oerr_na.add(cur.ign_conv_oerr); + m[key].ens_pd.ign_corr_oerr_na.add(cur.ign_corr_oerr); m[key].ens_pd.n_ge_obs_na.add(cur.n_ge_obs); m[key].ens_pd.me_ge_obs_na.add(cur.me_ge_obs); m[key].ens_pd.n_lt_obs_na.add(cur.n_lt_obs); @@ -3227,17 +3231,34 @@ void aggr_orank_lines(LineDataFile &f, STATAnalysisJob &job, m[key].ens_pd.ign_na.add(compute_ens_ign(cur.obs, cur.ens_mean, cur.spread)); m[key].ens_pd.pit_na.add(compute_ens_pit(cur.obs, cur.ens_mean, cur.spread)); + // Back out the observation error variance + double oerr_var = bad_data_double; + if(!is_bad_data(cur.spread_plus_oerr) && + !is_bad_data(cur.spread)) { + oerr_var = square(cur.spread_plus_oerr) - + square(cur.spread); + } + // Store BIAS_RATIO terms int n_ge_obs, n_lt_obs; double me_ge_obs, me_lt_obs; - compute_bias_ratio_terms(cur.obs, cur.ens_na, - n_ge_obs, me_ge_obs, - n_lt_obs, me_lt_obs); + compute_bias_ratio_terms( + cur.obs, cur.ens_na, + n_ge_obs, me_ge_obs, + n_lt_obs, me_lt_obs); m[key].ens_pd.n_ge_obs_na.add(n_ge_obs); m[key].ens_pd.me_ge_obs_na.add(me_ge_obs); m[key].ens_pd.n_lt_obs_na.add(n_lt_obs); m[key].ens_pd.me_lt_obs_na.add(me_lt_obs); + // Compute observation error log scores + double v_conv, v_corr; + compute_obs_error_log_scores( + cur.ens_mean, cur.spread, cur.obs, oerr_var, + v_conv, v_corr); + m[key].ens_pd.ign_conv_oerr_na.add(v_conv); + m[key].ens_pd.ign_corr_oerr_na.add(v_corr); + // // Increment the RHIST counts // diff --git a/src/tools/core/stat_analysis/parse_stat_line.cc b/src/tools/core/stat_analysis/parse_stat_line.cc index 57bc92eb46..a0c3265161 100644 --- a/src/tools/core/stat_analysis/parse_stat_line.cc +++ b/src/tools/core/stat_analysis/parse_stat_line.cc @@ -33,6 +33,8 @@ // line types. // 013 02/13/24 Halley Gotway MET #2395 Add wind direction stats // to VL1L2, VAL1L2, and VCNT. +// 014 02/21/24 Halley Gotway MET #2583 Add observation error +// ECNT statistics. // //////////////////////////////////////////////////////////////////////// @@ -397,6 +399,9 @@ void parse_ecnt_line(STATLine &l, ECNTData &e_data) { e_data.n_lt_obs = atoi(l.get_item("N_LT_OBS")); e_data.me_lt_obs = atof(l.get_item("ME_LT_OBS")); + e_data.ign_conv_oerr = atof(l.get_item("IGN_CONV_OERR")); + e_data.ign_corr_oerr = atof(l.get_item("IGN_CORR_OERR")); + return; } diff --git a/src/tools/core/stat_analysis/parse_stat_line.h b/src/tools/core/stat_analysis/parse_stat_line.h index 529fdb2df9..c890b45ff7 100644 --- a/src/tools/core/stat_analysis/parse_stat_line.h +++ b/src/tools/core/stat_analysis/parse_stat_line.h @@ -28,6 +28,8 @@ // 011 09/28/22 Prestopnik MET #2227 Remove namespace std // 012 11/10/22 Halley Gotway MET #2339 Add SEEPS and SEEPS_MPR // line types. +// 013 02/21/24 Halley Gotway MET #2583 Add observation error +// ECNT statistics. // //////////////////////////////////////////////////////////////////////// @@ -72,6 +74,7 @@ struct ECNTData { double bias_ratio; int n_ge_obs, n_lt_obs; double me_ge_obs, me_lt_obs; + double ign_conv_oerr, ign_corr_oerr; }; // Ranked Histogram (RHIST) data structure diff --git a/src/tools/core/wavelet_stat/wavelet_stat.cc b/src/tools/core/wavelet_stat/wavelet_stat.cc index b182a4538d..f87e4b7f29 100644 --- a/src/tools/core/wavelet_stat/wavelet_stat.cc +++ b/src/tools/core/wavelet_stat/wavelet_stat.cc @@ -39,7 +39,7 @@ // 014 07/09/21 Linden MET #1746 Skip thresholding. // 015 07/06/22 Howard Soh METplus-Internal #19 Rename main to met_main // 016 10/03/22 Prestopnik MET #2227 Remove using namespace netCDF from header files -// 017 01/29/24 Halley Gotway MET #2801 Configure time difference warnings +// 017 01/29/24 Halley Gotway MET #2801 Configure time difference warnings // //////////////////////////////////////////////////////////////////////// @@ -423,7 +423,7 @@ void process_scores() { mlog << Debug(2) << "Observation field: "; fill_bad_data(obs_dp_fill, obs_fill); - // Pad the fields out to the nearest power of two if requsted + // Pad the fields out to the nearest power of two if requested if(conf_info.grid_decomp_flag == GridDecompType_Pad) { mlog << Debug(2) << "Padding the fields out to the nearest integer " << "power of two.\n";