forked from mom-ocean/MOM6
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Tests for the diagnostic system, run with:
$ py.test Or if you don't have py.test installed: $ python tests/runtests.py Asl use --help, paying particular attention to the 'custom options' section. See CommerceGov/NOAA-GFDL-MOM6/mom-ocean#147.
- Loading branch information
Showing
8 changed files
with
4,024 additions
and
0 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,18 @@ | ||
|
||
# How to run Python tests | ||
|
||
This directory contains model tests written in Python. You can run these tests with: | ||
``` | ||
$ py.test | ||
``` | ||
Or a subset of the tests with: | ||
``` | ||
$ py.test <test_file.py> | ||
``` | ||
Also see: | ||
``` | ||
$ py.test --help | ||
``` | ||
Pay particular attention to the 'custom options' section. | ||
|
||
If you don't have py.test installed on your machine, then you can do all of the above by replacing py.test with: python runtest.py |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,79 @@ | ||
|
||
import os | ||
|
||
import pytest | ||
from dump_all_diagnostics import dump_diags | ||
from experiment import experiment_dict, exp_id_from_path | ||
|
||
def pytest_addoption(parser): | ||
parser.addoption('--exps', default=None, | ||
help="""comma-separated no spaces list of experiments to | ||
pass to test functions. Also you must use the '=' | ||
sign otherwise py.test gets confused, e.g: | ||
$ py.test --exps=ice_ocean_SIS2/Baltic/,ocean_only/benchmark""") | ||
parser.addoption('--full', action='store_true', default=False, | ||
help="""Run on all experiments/test cases. By default | ||
tests are run on a 'fast' subset of experiments. | ||
Note that this overrides the --exps option.""") | ||
|
||
def pytest_generate_tests(metafunc): | ||
""" | ||
Parameterize tests. Presently handles those that have 'exp' as an argument. | ||
""" | ||
if 'exp' in metafunc.fixturenames: | ||
if metafunc.config.option.full: | ||
# Run tests on all experiments. | ||
exps = experiment_dict.values() | ||
elif metafunc.config.option.exps is not None: | ||
# Only run on the given experiments. | ||
exps = [] | ||
for p in metafunc.config.option.exps.split(','): | ||
assert(os.path.exists(p)) | ||
id = exp_id_from_path(os.path.abspath(p)) | ||
exps.append(experiment_dict[id]) | ||
else: | ||
# Default is to run on a fast subset of the experiments. | ||
exps = [experiment_dict['ice_ocean_SIS2/Baltic']] | ||
|
||
metafunc.parametrize('exp', exps, indirect=True) | ||
|
||
@pytest.fixture | ||
def exp(request): | ||
""" | ||
Called before each test, use this to dump all the experiment data. | ||
""" | ||
exp = request.param | ||
|
||
# Run the experiment to get latest code changes. This will do nothing if | ||
# the experiment has already been run. | ||
exp.run() | ||
# Dump all available diagnostics, if they haven't been already. | ||
if not exp.has_dumped_diags: | ||
# Before dumping we delete old ones if they exist. | ||
diags = exp.get_available_diags() | ||
for d in diags: | ||
try: | ||
os.remove(d.output) | ||
except OSError: | ||
pass | ||
|
||
dump_diags(exp, diags) | ||
exp.has_dumped_diags = True | ||
return exp | ||
|
||
def restore_after_test(): | ||
""" | ||
Restore experiment state after running a test. | ||
- The diag_table files needs to be switched back (?) | ||
""" | ||
pass | ||
|
||
@pytest.fixture(scope='module') | ||
def prepare_to_test(): | ||
""" | ||
Called once for a test module. | ||
- Make a backup of the diag_table, to be restored later. (?) | ||
""" | ||
pass |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,54 @@ | ||
#!/usr/bin/env python | ||
|
||
from __future__ import print_function | ||
|
||
import sys, os | ||
import argparse | ||
from experiment import Experiment | ||
|
||
""" | ||
This script is used to run an experiment/test case and dump all available | ||
diagnostics. This can be useful for testing or to get a feel for the range of | ||
model outputs. | ||
""" | ||
|
||
def dump_diags(exp, diags): | ||
""" | ||
Run the model dumping the given diagnostics into individual files. | ||
""" | ||
|
||
# Create a new diag_table that puts diagnostics into individual files. This | ||
# is a trick to get the highest frequency output for each diagnostic. | ||
# | ||
# By default if only a single file is used, when '0' is set as the | ||
# frequency, the diag manager will choose the minimum frequency and dump | ||
# all diagnostics with that. This will result in the slower diagnostics | ||
# being filled up with missing values which is not desirable. | ||
|
||
with open(os.path.join(exp.path, 'diag_table'), 'w') as f: | ||
print('All {} diags'.format(exp.name), file=f) | ||
print('1 1 1 0 0 0', file=f) | ||
for d in diags: | ||
print('"{}_{}", 0, "seconds", 1, "seconds",' \ | ||
'"time"'.format(d.model, d.name), file=f) | ||
for d in diags: | ||
m = d.model | ||
n = d.name | ||
print('"{}", "{}", "{}", "{}_{}", "all",' \ | ||
'.false., "none", 2'.format(m, n, n, m, n), file=f) | ||
return exp.force_run() | ||
|
||
def main(): | ||
|
||
description = "Run an experiment and dump all it's available diagnostics." | ||
parser = argparse.ArgumentParser(description=description) | ||
parser.add_argument('experiment_path', | ||
help='path to experiment to run.') | ||
|
||
args = parser.parse_args() | ||
exp = Experiment(path=os.path.abspath(args.experiment_path)) | ||
diags = exp.get_available_diags() | ||
return dump_diags(exp, diags) | ||
|
||
if __name__ == '__main__': | ||
sys.exit(main()) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,191 @@ | ||
|
||
from __future__ import print_function | ||
|
||
import sys | ||
import os | ||
import re | ||
import subprocess as sp | ||
|
||
_file_dir = os.path.dirname(os.path.abspath(__file__)) | ||
_mom_examples_path = os.path.normpath(os.path.join(_file_dir, '../../')) | ||
|
||
class Diagnostic: | ||
|
||
def __init__(self, model, name, path): | ||
self.model = model | ||
self.name = name | ||
self.full_name = '{}_{}'.format(model, name) | ||
self.output = os.path.join(path, '00010101.{}.nc'.format(self.full_name)) | ||
|
||
def __eq__(self, other): | ||
return ((self.model, self.name, self.output) == | ||
(other.model, other.name, other.output)) | ||
|
||
def __hash__(self): | ||
return hash(self.model + self.name + self.output) | ||
|
||
|
||
# Unfinished diagnostics are those which have been registered but have not been | ||
# implemented, so no post_data called. This list should to be updated as the | ||
# diags are completed. | ||
_unfinished_diags = [('ocean_model', 'uml_restrat'), | ||
('ocean_model', 'vml_restrat'), | ||
('ocean_model', 'created_H'), | ||
('ocean_model', 'seaice_melt'), | ||
('ocean_model', 'fsitherm'), | ||
('ocean_model', 'total_seaice_melt'), | ||
('ocean_model', 'heat_restore'), | ||
('ocean_model', 'total_heat_restore'), | ||
('ice_model', 'Cor_ui'), | ||
('ice_model', 'Cor_vi'), | ||
('ice_model', 'OBI'), | ||
('ice_model', 'RDG_OPEN'), | ||
('ice_model', 'RDG_RATE'), | ||
('ice_model', 'RDG_VOSH'), | ||
('ice_model', 'STRAIN_ANGLE'), | ||
('ice_model', 'SW_DIF'), | ||
('ice_model', 'SW_DIR'), | ||
('ice_model', 'TA')] | ||
|
||
def exp_id_from_path(path): | ||
""" | ||
Return an experiment id string of the form <model>/<exp>/<variation> from a | ||
full path. | ||
""" | ||
path = os.path.normpath(path) | ||
return path.replace(_mom_examples_path, '')[1:] | ||
|
||
|
||
class Experiment: | ||
|
||
def __init__(self, id, platform='gnu'): | ||
""" | ||
Python representation of an experiment/test case. | ||
The id is a string of the form <model>/<exp>/<variation>. | ||
""" | ||
|
||
self.platform = platform | ||
id = id.split('/') | ||
self.model = id[0] | ||
self.name = id[1] | ||
if len(id) == 3: | ||
self.variation = id[2] | ||
else: | ||
self.variation = None | ||
|
||
self.path = os.path.join(_mom_examples_path, self.model, self.name) | ||
if self.variation is not None: | ||
self.path = os.path.join(self.path, self.variation) | ||
|
||
# Path to executable, may not exist yet. | ||
self.exec_path = os.path.join(_mom_examples_path, | ||
'build/{}/{}/repro/MOM6'.format(self.platform, self.model)) | ||
# Lists of available and unfinished diagnostics. | ||
self.available_diags = self._parse_available_diags() | ||
self.unfinished_diags = [Diagnostic(m, d, self.path) \ | ||
for m, d in _unfinished_diags] | ||
# Available diags is not what you think! Need to remove the unfinished | ||
# diags. | ||
self.available_diags = list(set(self.available_diags) - \ | ||
set(self.unfinished_diags)) | ||
|
||
# Whether this experiment has been run/built. Want to try to avoid | ||
# repeating this if possible. | ||
self.has_run = False | ||
# Another thing to avoid repeating. | ||
self.has_dumped_diags = False | ||
|
||
def _parse_available_diags(self): | ||
""" | ||
Create a list of available diags for the experiment by parsing | ||
available_diags.000001 and SIS.available_diags. | ||
""" | ||
mom_av_file = os.path.join(self.path, 'available_diags.000000') | ||
sis_av_file = os.path.join(self.path, 'SIS.available_diags') | ||
|
||
diags = [] | ||
for fname in [mom_av_file, sis_av_file]: | ||
# If available diags file doesn't exist then just skip for now. | ||
if not os.path.exists(fname): | ||
continue | ||
with open(fname) as f: | ||
# Search or strings like: "ocean_model", "N2_u" [Unused]. | ||
# Pull out the model name and variable name. | ||
matches = re.findall('^\"(\w+)\", \"(\w+)\".*$', | ||
f.read(), re.MULTILINE) | ||
diags.extend([Diagnostic(m, d, self.path) for m, d in matches]) | ||
return diags | ||
|
||
def force_build(self): | ||
""" | ||
Do a clean build of the configuration. | ||
""" | ||
raise NotImplementedError | ||
|
||
def build(self): | ||
""" | ||
Build the configuration for this experiment. | ||
""" | ||
raise NotImplementedError | ||
|
||
def run(self): | ||
""" | ||
Run the experiment if it hasn't already. | ||
""" | ||
|
||
if not self.has_run: | ||
self.force_run() | ||
|
||
def force_run(self): | ||
""" | ||
Run the experiment. | ||
""" | ||
|
||
print('Experiment: running {}'.format(self.exec_path)) | ||
assert(os.path.exists(self.exec_path)) | ||
|
||
ret = 0 | ||
saved_path = os.getcwd() | ||
|
||
os.chdir(self.path) | ||
try: | ||
output = sp.check_output([self.exec_path], stderr=sp.STDOUT) | ||
self.has_run = True | ||
except sp.CalledProcessError as e: | ||
ret = e.returncode | ||
print(e.output, file=sys.stderr) | ||
finally: | ||
os.chdir(saved_path) | ||
|
||
return ret | ||
|
||
def get_available_diags(self): | ||
""" | ||
Return a list of the available diagnostics for this experiment. | ||
""" | ||
return self.available_diags | ||
|
||
def get_unfinished_diags(self): | ||
""" | ||
Return a list of the unfinished diagnostics for this experiment. | ||
""" | ||
return self.unfinished_diags | ||
|
||
|
||
def discover_experiments(): | ||
""" | ||
Return a dictionary of Experiment objects representing all the test cases. | ||
""" | ||
|
||
# Path to top level MOM-examples | ||
exps = {} | ||
for path, _, filenames in os.walk(_mom_examples_path): | ||
for fname in filenames: | ||
if fname == 'input.nml': | ||
id = exp_id_from_path(path) | ||
exps[id] = Experiment(id) | ||
return exps | ||
|
||
# A dictionary of available experiments. | ||
experiment_dict = discover_experiments() |
Oops, something went wrong.