-
Notifications
You must be signed in to change notification settings - Fork 0
/
evaluate.py
651 lines (594 loc) · 26.3 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
import argparse
from functools import reduce
import logging
import os
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import warnings
import matplotlib.pyplot as plt
import pandas as pd
import xarray as xr
import yaml
from evaluation.forecast_reformat_catalog import reformat_forecast
from evaluation.obs_reformat_catalog import get_interp_station_list, obs_to_verification, reformat_and_filter_obs
from evaluation.metric_catalog import get_metric_func
from evaluation.utils import configure_logging, get_metric_multiple_stations, generate_forecast_cache_path, \
cache_reformat_forecast, load_reformat_forecast, ForecastData, ForecastInfo, MetricData, get_ideal_xticks
warnings.filterwarnings("ignore")
logger = logging.getLogger(__name__)
def intersect_all_forecast(forecast_list: List[ForecastData]) -> List[ForecastData]:
"""
Generates new versions of the forecast data objects where the dataframes have been aligned between all
forecasts in the sequence. If only one forecast is provided, the data is returned as is.
"""
raise NotImplementedError("Forecast alignment is not yet implemented.")
def get_forecast_data(forecast_info: ForecastInfo, cache_forecast: bool) -> ForecastData:
"""
Open all the forecasts from the forecast_info_list and reformat them.
Parameters
----------
forecast_info: ForecastInfo
The forecast information.
cache_forecast: bool
If true, cache the reformat forecast data, next time will load from cache.
Returns
-------
forecast: ForecastData
Forecast data and the forecast information.
"""
info = forecast_info
cache_path = generate_forecast_cache_path(info)
if not os.path.exists(cache_path) or not cache_forecast:
logger.info(f"open forecast file: {info.path}")
if info.file_type is None:
if Path(info.path).is_dir():
forecast = xr.open_zarr(info.path)
else:
forecast = xr.open_dataset(info.path, chunks={})
elif info.file_type == 'zarr':
forecast = xr.open_zarr(info.path)
else:
forecast = xr.open_dataset(info.path, chunks={})
forecast_ds = reformat_forecast(forecast, info)
if cache_forecast:
cache_reformat_forecast(forecast_ds, cache_path)
logger.info(f"save forecast file to cache: {cache_path}")
else:
forecast_ds = load_reformat_forecast(cache_path)
logger.info(f"load forecast: {info.forecast_name}, from cache: {cache_path}")
logger.debug(f"opened forecast dataset: {forecast_ds}")
return ForecastData(info=info, forecast=forecast_ds)
def get_observation_data(obs_base_path: str, obs_var_name: str, station_metadata_path: str,
obs_file_type: str, obs_start_month: str, obs_end_month: str,
precip_threshold: Optional[float] = None) -> xr.Dataset:
"""
Open the observation file and reformat it. Required fields: station, valid_time, obs_var_name.
Parameters
----------
obs_base_path: str
Path to the observation file.
obs_var_name: str
Name of the observation variable.
station_metadata_path: str
Path to the station metadata file.
obs_file_type: str
Type of the observation file.
obs_start_month: str
Obs start month, for multi-file netCDF data.
obs_end_month: str
Obs end month, for multi-file netCDF data.
precip_threshold: float, optional
Threshold for converting precipitation amount to binary. Default is no conversion.
Returns
-------
obs: xr.Dataset
Observation data with required fields.
"""
if obs_start_month is not None and obs_end_month is not None:
if obs_start_month is None or obs_end_month is None:
raise ValueError("Both obs_start_month and obs_end_month must be provided.")
month_list = pd.date_range(obs_start_month, obs_end_month, freq='MS')
suffix = obs_file_type or 'nc'
obs_path = [os.path.join(obs_base_path, month.strftime(f'%Y%m.{suffix}')) for month in month_list]
obs_path_filter = []
for path in obs_path:
if not os.path.exists(path):
logger.warning(f"expected observation path does not exist: {path}")
else:
obs_path_filter.append(path)
obs = xr.open_mfdataset(obs_path_filter, chunks={})
else:
if obs_file_type is None:
if Path(obs_base_path).is_dir():
obs = xr.open_zarr(obs_base_path)
else:
obs = xr.open_dataset(obs_base_path, chunks={})
elif obs_file_type == 'zarr':
obs = xr.open_zarr(obs_base_path)
else:
obs = xr.open_dataset(obs_base_path, chunks={})
obs = reformat_and_filter_obs(obs, obs_var_name, station_metadata_path, precip_threshold)
logger.debug(f"opened observation dataset: {obs}")
return obs
def merge_forecast_obs(forecast: ForecastData, obs: xr.Dataset) -> ForecastData:
"""
Merge the forecast and observation data.
"""
new_obs = obs_to_verification(
obs,
steps=forecast.forecast.lead_time.values,
max_lead=forecast.forecast.lead_time.values.max(),
issue_times=forecast.forecast.issue_time.values
)
merge_data = xr.merge([forecast.forecast, new_obs], compat='override')
merge_data['delta'] = merge_data['fc'] - merge_data['obs']
result = ForecastData(info=forecast.info, forecast=forecast.forecast, merge_data=merge_data)
logger.debug(f"after merge forecast and obs: {result.merge_data}")
return result
def filter_by_region(forecast: ForecastData, region_name: str, station_list: List[str]) \
-> ForecastData:
"""
Apply a selection on forecast based on the region_name and station_list.
"""
if region_name == 'all':
return forecast
merge_data = forecast.merge_data
filtered_merge_data = ForecastData(
merge_data=merge_data.sel(station=merge_data.station.values.isin(station_list)),
info=forecast.info
)
return filtered_merge_data
def calculate_all_metrics(forecast_data: ForecastData, group_dim: str, metrics_params: Dict[str, Any]) \
-> MetricData:
"""
Calculate all the metrics together for dask graph efficiency.
Parameters
----------
forecast_data: ForecastData
The forecast data.
group_dim: str
The dimension to group the metric calculation.
metrics_params: dict
Dictionary containing the metrics configs.
Returns
-------
metric_data: MetricData
Metric data and the forecast information.
"""
metrics = MetricData(info=forecast_data.info, metric_data=xr.Dataset())
for metric_name in metrics_params.keys():
metric_func = get_metric_func(metrics_params[metric_name])
metrics.metric_data[metric_name] = metric_func(forecast_data.merge_data, group_dim)
metrics.metric_data = metrics.metric_data.compute()
return metrics
def get_plot_detail(forecast_data: ForecastData, group_dim: str):
"""
Get some added data to show on plots
"""
merge_data = forecast_data.merge_data
counts = {key: coord.size for key, coord in merge_data.coords.items() if key not in ['lat', 'lon']}
counts.update({'fc': merge_data.fc.size})
all_dims = ['valid_time', 'issue_time', 'lead_time']
all_dims.remove(group_dim)
dim_info = []
for dim in all_dims:
if dim == 'lead_time':
vmax, vmin = merge_data[dim].max(), merge_data[dim].min()
else:
vmax, vmin = pd.Timestamp(merge_data[dim].values.max()).strftime("%Y-%m-%d %H:%M:%S"), \
pd.Timestamp(merge_data[dim].values.min()).strftime("%Y-%m-%d %H:%M:%S")
dim_info.append(f'{dim} min: {vmin}, max: {vmax}')
data_distribution = f"dim count: {str(counts)}\n{dim_info[0]}\n{dim_info[1]}"
return data_distribution
def plot_metric(
example_data: ForecastData,
metric_data_list: List[MetricData],
group_dim: str,
metric_name: str,
base_plot_setting: Dict[str, Any],
metrics_params: Dict[str, Any],
output_dir: str,
region_name: str,
plot_save_format: Optional[str] = 'png'
) -> plt.Figure:
"""
A generic, basic plot for a single metric.
Parameters
----------
example_data: ForecastData
Example forecast data to get some extra information for the plot.
metric_data_list: list of MetricData
List of MetricData objects containing the metric data and the forecast information.
group_dim: str
The dimension to group the metric calculation.
metric_name: str
The name of the metric.
base_plot_setting: dict
Dictionary containing the base plot settings.
metrics_params: dict
Dictionary containing the metric method and other kwargs.
output_dir: str
The output directory for the plots.
region_name: str
The name of the region.
plot_save_format: str, optional
The format to save the plot in. Default is 'png'.
Returns
-------
fig: plt.Figure
The plot figure.
"""
data_distribution = get_plot_detail(example_data, group_dim)
fig = plt.figure(figsize=(5.5, 6.5))
font = {'weight': 'medium', 'fontsize': 11}
title = base_plot_setting['title']
xlabel = base_plot_setting['xlabel']
if 'plot_setting' in metrics_params:
plot_setting = metrics_params['plot_setting']
title = plot_setting.get('title', title)
xlabel = plot_setting.get('xlabel', xlabel)
plt.title(title)
plt.suptitle(data_distribution, fontsize=7)
plt.gca().set_xlabel(xlabel[group_dim], fontdict=font)
plt.gca().set_ylabel(metric_name, fontdict=font)
for metrics in metric_data_list:
metric_data = metrics.metric_data
forecast_name = metrics.info.forecast_name
plt.plot(metric_data[group_dim], metric_data[metric_name], label=forecast_name, linewidth=1.5)
if group_dim == 'lead_time':
plt.gca().set_xticks(get_ideal_xticks(metric_data[group_dim].min(), metric_data[group_dim].max(), 8))
plt.grid(linestyle=':')
plt.legend(loc='upper center', bbox_to_anchor=(0.45, -0.14), frameon=False, ncol=3, fontsize=10)
plt.tight_layout()
plt.subplots_adjust(top=0.85)
plot_path = os.path.join(output_dir, region_name)
os.makedirs(plot_path, exist_ok=True)
plt.savefig(os.path.join(plot_path, f"{metric_name}.{plot_save_format}"))
return fig
def metrics_to_csv(
metric_data_list: List[MetricData],
group_dim: str,
output_dir: str,
region_name: str
):
"""
A generic, basic function to save the metric data to a CSV file.
Parameters
----------
metric_data_list: list of MetricData
List of MetricData objects containing the metric data and the forecast information.
group_dim: str
The dimension to group the metric calculation.
output_dir: str
The output directory for the CSV files.
region_name: str
The name of the region.
Returns
-------
merged_df: pd.DataFrame
The merged DataFrame with metric data.
"""
df_list = []
for metrics in metric_data_list:
df_list.append(metrics.metric_data.rename(
{metric: f"{metrics.info.forecast_name}_{metric}" for metric in metrics.metric_data.data_vars.keys()}
).to_dataframe())
merged_df = reduce(lambda left, right: pd.merge(left, right, on=group_dim, how='inner'), df_list)
output_path = os.path.join(output_dir, region_name)
os.makedirs(output_path, exist_ok=True)
merged_df.to_csv(os.path.join(output_path, "metrics.csv"))
return merged_df
def parse_args(args: argparse.Namespace) -> Tuple[List[ForecastInfo], Any, Any, Union[
Dict[str, List[Any]], Any], Any, Any, Any, Any, Any, Any, Any, str, bool, bool, Optional[float]]:
forecast_info_list = []
forecast_name_list = args.forecast_names
forecast_var_name_list = args.forecast_var_names
forecast_reformat_func_list = args.forecast_reformat_funcs
station_metadata_path = args.station_metadata_path
for index, forecast_path in enumerate(args.forecast_paths):
forecast_info = ForecastInfo(
path=forecast_path,
forecast_name=forecast_name_list[index] if index < len(forecast_name_list) else f"forecast_{index}",
fc_var_name=forecast_var_name_list[index] if index < len(forecast_var_name_list) else
forecast_var_name_list[0],
reformat_func=forecast_reformat_func_list[index] if index < len(forecast_reformat_func_list) else
forecast_reformat_func_list[0],
file_type=args.forecast_file_types[index] if index < len(args.forecast_file_types) else None,
station_metadata_path=station_metadata_path,
interp_station_path=station_metadata_path,
output_directory=args.output_directory,
start_date=args.start_date,
end_date=args.end_date,
issue_time_freq=args.issue_time_freq,
start_lead=args.start_lead,
end_lead=args.end_lead,
convert_temperature=args.convert_fcst_temperature_k_to_c,
convert_pressure=args.convert_fcst_pressure_pa_to_hpa,
convert_cloud=args.convert_fcst_cloud_to_okta,
precip_proba_threshold=args.precip_proba_threshold_conversion,
)
forecast_info_list.append(forecast_info)
metrics_settings_path = args.config_path if args.config_path is not None else os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'metric_config.yml')
with open(metrics_settings_path, 'r') as fs: # pylint: disable=unspecified-encoding
metrics_settings = yaml.safe_load(fs)
try:
metrics_settings = metrics_settings[args.variable_type]
except KeyError as exc:
raise ValueError(f"Unknown variable type: {args.variable_type}. Check config file {metrics_settings_path}") \
from exc
metrics_dict = metrics_settings['metrics']
base_plot_setting = metrics_settings['base_plot_setting']
if args.eval_region_files is not None:
try:
region_dict = get_metric_multiple_stations(','.join(args.eval_region_files))
except Exception as e: # pylint: disable=broad-exception-caught
logger.info(f"get_metric_multiple_stations failed, use default region: all {e}")
region_dict = {}
else:
region_dict = {}
region_dict['all'] = []
return (forecast_info_list, metrics_dict, base_plot_setting,
region_dict, args.group_dim, args.obs_var_name, args.obs_path, args.obs_file_type,
args.obs_start_month, args.obs_end_month, args.output_directory, station_metadata_path,
bool(args.cache_forecast), bool(args.align_forecasts),
args.precip_proba_threshold_conversion)
def main(args):
logger.info("===================== parse args =====================")
(forecast_info_list, metrics_dict, base_plot_setting, region_dict, group_dim, obs_var_name,
obs_base_path, obs_file_type, obs_start_month, obs_end_month, output_dir, station_metadata_path,
cache_forecast, align_forecasts, precip_threshold) = parse_args(args)
logger.info("===================== start get_observation_data =====================")
obs_ds = get_observation_data(obs_base_path, obs_var_name, station_metadata_path, obs_file_type, obs_start_month,
obs_end_month, precip_threshold=precip_threshold)
# Get metadata and set it on all the forecast info objects
if station_metadata_path is not None:
metadata = get_interp_station_list(station_metadata_path)
else:
try:
metadata = pd.DataFrame({'lat': obs_ds.lat.values, 'lon': obs_ds.lon.values,
'station': obs_ds.station.values})
except KeyError as exc:
raise ValueError("--station-metadata-path is required if lat/lon/station keys are not in the observation "
"file") from exc
for forecast_info in forecast_info_list:
forecast_info.metadata = metadata
if align_forecasts:
# First load all forecasts, then compute and return metrics.
logger.info("===================== start get_forecast_data =====================")
forecast_list = [get_forecast_data(fi, cache_forecast) for fi in forecast_info_list]
logger.info("===================== start intersect_all_forecast =====================")
forecast_list = intersect_all_forecast(forecast_list)
else:
forecast_list = [None] * len(forecast_info_list)
# For each forecast, compute all its metrics in every region.
metric_data = {r: [] for r in region_dict}
for forecast, forecast_info in zip(forecast_list, forecast_info_list):
try:
del merged_forecast # noqa: F821
except NameError:
pass
logger.info(f"===================== compute metrics for forecast {forecast_info.forecast_name} "
f"=====================")
if forecast is None:
logger.info("===================== get_forecast_data =====================")
forecast = get_forecast_data(forecast_info, cache_forecast)
logger.info("===================== start merge_forecast_obs =====================")
merged_forecast = merge_forecast_obs(forecast, obs_ds)
for region_name, region_data in region_dict.items():
logger.info(f"===================== filter_by_region: {region_name} =====================")
filtered_forecast = filter_by_region(merged_forecast, region_name, region_data)
if region_name != 'all':
logger.info(f"after filter_by_region: {region_name}; "
f"stations: {filtered_forecast.merge_data.station.size}")
logger.info(f"start calculate_metrics, region: {region_name}")
metric_data[region_name].append(
calculate_all_metrics(filtered_forecast, group_dim, metrics_dict)
)
forecast = None
# Plot all metrics and save data
for region_name in region_dict:
for metric_name in metrics_dict:
logger.info(f"===================== plot_metric: {metric_name}, region: {region_name} "
f"=====================")
plot_metric(merged_forecast, metric_data[region_name], group_dim, metric_name,
base_plot_setting, metrics_dict[metric_name], output_dir, region_name)
metrics_to_csv(metric_data[region_name], group_dim, output_dir, region_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Forecast evaluation script. Given a set of forecasts and a file of reference observations, "
"computes requested metrics as specified in the `metric_catalog.yml` file. Includes the ability "
"to interpret either grid-based or point-based forecasts. Grid-based forecasts are interpolated "
"to observation locations. Point-based forecasts are directly compared to nearest observations."
)
parser.add_argument(
"--forecast-paths",
nargs='+',
type=str,
required=True,
help="List of paths containing forecasts. If a directory is provided, assumes forecast is a zarr store, "
"and calls xarray's `open_zarr` method. "
"Required dimensions: lead_time (or step), issue_time (or time), lat (or latitude), lon (or longitude)."
)
parser.add_argument(
'--forecast-names',
type=str,
nargs='+',
default=[],
help="List of names to assign to the forecasts. If there are more forecast paths than names, fills in the "
"remaining names with 'forecast_{index}'"
)
parser.add_argument(
'--forecast-var-names',
type=str,
nargs='+',
required=True,
help="List of names (one per forecast path) of the forecast variable of interest in each file. If only one "
"value is provided, assumes all forecast files have the same variable name. Raises an error if the "
"number of listed values is less than the number of forecast paths."
)
parser.add_argument(
'--forecast-reformat-funcs',
type=str,
nargs='+',
required=True,
help="For each forecast path, provide the name of the reformat function to apply. This function is based on "
"the schema of the forecast file. Can be only a single value to apply to all forecasts. Options: "
"\n - 'grid_standard': input is a grid forecast with dimensions lead_time, issue_time, lat, lon."
"\n - 'point_standard': input is a point forecast with dimensions lead_time, issue_time, station."
"\n - 'grid_v1': custom reformat function for grid forecasts with dims time, step, latitude, longitude."
)
parser.add_argument(
'--forecast-file-types',
type=str,
nargs='+',
default=[],
help="List of file types for each forecast path. Options: 'nc', 'zarr'. If not provided, or not enough "
"entries, will assume zarr store if forecast is a directory, and otherwise will use xarray's "
"`open_dataset` method."
)
parser.add_argument(
"--obs-path",
type=str,
required=True,
help="Path to the verification folder or file"
)
parser.add_argument(
"--obs-file-type",
type=str,
default=None,
help="Type of the observation file. Options: 'nc', 'zarr'. If not provided, will assume zarr store if this is "
"a directory, and otherwise will use xarray's `open_dataset` method."
)
parser.add_argument(
"--obs-start-month",
type=str,
default=None,
help="Option to read multiple netCDF files as a single dataset. These files are named 'YYYYMM.nc'. Provide "
"the start month in the format 'YYYY-MM'. Not needed if obs-path is a single nc/zarr store."
)
parser.add_argument(
"--obs-end-month",
type=str,
default=None,
help="Option to read multiple netCDF files as a single dataset. These files are named 'YYYYMM.nc'. Provide "
"the end month in the format 'YYYY-MM'. Not needed if obs-path is a single nc/zarr store."
)
parser.add_argument(
"--obs-var-name",
type=str,
help="Name of the variable of interest in the observation data.",
required=True
)
parser.add_argument(
"--station-metadata-path",
type=str,
help="Path to the station list containing metadata. Must include columns 'station', 'lat', 'lon'. "
"If not provided, assumes the station lat/lon are coordinates in the observation file.",
required=False
)
parser.add_argument(
"--config-path",
type=str,
help="Path to custom config yml file containing metric settings. Defaults to `metric_config.yml` in this "
"script directory.",
default=None
)
parser.add_argument(
"--variable-type",
type=str,
help="The type of the variable, as used in `--config-path` to select the appropriate metric settings. For "
"example, 'temperature' or 'wind'.",
required=True
)
parser.add_argument(
"--output-directory",
type=str,
help="Output directory for all evaluation artifacts",
required=True
)
parser.add_argument(
"--eval-region-files",
type=str,
default=None,
nargs='+',
help="A list of files containing station lists for evaluation in certain regions"
)
parser.add_argument(
"--start-date",
type=pd.Timestamp,
default=None,
help="First forecast issue time (as Timestamp) to include in evaluation"
)
parser.add_argument(
"--end-date",
type=pd.Timestamp,
default=None,
help="Last forecast issue time (as Timestamp) to include in evaluation"
)
parser.add_argument(
"--issue-time-freq",
type=str,
default=None,
help="Frequency of issue times (e.g., '1D') to include in evaluation. Default is None (all issue times)"
)
parser.add_argument(
"--start-lead",
type=int,
default=None,
help="First lead time (in hours) to include in evaluation"
)
parser.add_argument(
"--end-lead",
type=int,
default=None,
help="Last lead time (in hours) to include in evaluation"
)
parser.add_argument(
"--group-dim",
type=str,
default="lead_time",
help="Group dimension for metric computation, options: lead_time, issue_time, valid_time"
)
parser.add_argument(
"--precip-proba-threshold-conversion",
type=float,
default=None,
help="Convert observation and forecast fields from precipitation rate to probability of precipitation. Provide"
" a threshold in mm/hr to use as positive precipitation class. Use only for evaluating precipitation!"
)
parser.add_argument(
"--convert-fcst-temperature-k-to-c",
action='store_true',
help="Convert forecast field from Kelvin to Celsius. Use only for evaluating temperature!"
)
parser.add_argument(
"--convert-fcst-pressure-pa-to-hpa",
action='store_true',
help="Convert forecast field from Pa to hPa. Use only for evaluating pressure!"
)
parser.add_argument(
"--convert-fcst-cloud-to-okta",
action='store_true',
help="Convert forecast field from cloud fraction to okta. Use only for evaluating cloud!"
)
parser.add_argument(
"--cache-forecast",
action='store_true',
help="If true, cache the intermediate interpolated forecast data in the output directory."
)
parser.add_argument(
"--align-forecasts",
action='store_true',
help="If set, load all forecasts first and then align them based on the intersection of issue/lead times. "
"Note this uses substantially more memory to store all data at once."
)
parser.add_argument(
'--verbose',
type=int,
default=1,
help="Verbosity level for logging. Options are 0 (WARNING), 1 (INFO), 2 (DEBUG), 3 (NOTSET). Default is 1."
)
run_args = parser.parse_args()
configure_logging(run_args.verbose)
main(run_args)