Skip to content

Commit

Permalink
Merge pull request #197 from sot/shiny-tests
Browse files Browse the repository at this point in the history
Shiny tests
  • Loading branch information
taldcroft authored Jun 24, 2020
2 parents 8fcecdf + e23db29 commit 58c6b19
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 31 deletions.
15 changes: 9 additions & 6 deletions Ska/engarchive/tests/test_comps.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,11 @@ def test_mups_valve():
colnames = ['vals', 'times', 'bads', 'vals_raw',
'vals_nan', 'vals_corr', 'vals_model', 'source']

# Use the 3.30 release always for testing.
dat = fetch_eng.MSID('PM2THV1T_clean_3.30', '2020:001', '2020:010')
# Use the chandra_models e1a900cc commit for testing. This is a commit of
# chandra_models that has the epoch dates changes to fully-qualified values
# like 2017:123:12:00:00 (instead of 2017:123). This allows these regression
# tests to pass with Chandra.Time 3.x or 4.0+.
dat = fetch_eng.MSID('PM2THV1T_clean_e1a900cc', '2020:001:12:00:00', '2020:010:12:00:00')
assert dat.unit == 'DEGF'
assert len(dat.vals) == 36661
ok = dat.source != 0
Expand All @@ -133,7 +136,7 @@ def test_mups_valve():
for attr in colnames:
assert len(dat.vals) == len(getattr(dat, attr))

dat = fetch_sci.Msid('PM2THV1T_clean_3.30', '2020:001', '2020:010')
dat = fetch_sci.Msid('PM2THV1T_clean_e1a900cc', '2020:001:12:00:00', '2020:010:12:00:00')
assert dat.unit == 'DEGC'
ok = dat.source != 0
# Temps are reasonable for degC
Expand All @@ -144,7 +147,7 @@ def test_mups_valve():
if attr != 'bads':
assert len(dat.vals) == len(getattr(dat, attr))

dat = fetch_cxc.MSID('PM1THV2T_clean_3.30', '2020:001', '2020:010')
dat = fetch_cxc.MSID('PM1THV2T_clean_e1a900cc', '2020:001:12:00:00', '2020:010:12:00:00')
ok = dat.source != 0
# Temps are reasonable for K
assert np.all((dat.vals[ok] > 280) & (dat.vals[ok] < 380))
Expand All @@ -154,7 +157,7 @@ def test_mups_valve():
assert len(dat.vals) == len(getattr(dat, attr))

# Check using default master branch
dat = fetch_eng.Msid('pm1thv2t_clean', '2020:001', '2020:010')
dat = fetch_eng.Msid('pm1thv2t_clean', '2020:001:12:00:00', '2020:010:12:00:00')
assert len(dat.vals) == 36240 # Some bad values
assert len(dat.source) == 36240 # Filtering applies to sources
assert dat.colnames == colnames
Expand Down Expand Up @@ -183,7 +186,7 @@ def test_cmd_states():

@pytest.mark.parametrize('stat', ['5min', 'daily'])
def test_stats(stat):
start, stop = '2020:001', '2020:010'
start, stop = '2020:001:12:00:00', '2020:010:12:00:00'

dat = fetch_eng.Msid('pitch', start, stop, stat=stat)
datc = fetch_eng.Msid('passthru_pitch', start, stop, stat=stat)
Expand Down
49 changes: 26 additions & 23 deletions Ska/engarchive/tests/test_fetch.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def test_filter_bad_times_overlap():
OK to supply overlapping bad times
"""
msid_bad_times_cache = deepcopy(fetch.msid_bad_times)
dat = fetch.MSID('aogbias1', '2008:290', '2008:300', stat='daily')
dat = fetch.MSID('aogbias1', '2008:290:12:00:00', '2008:300:12:00:00', stat='daily')
fetch.read_bad_times(['aogbias1 2008:292:00:00:00 2008:297:00:00:00'])
fetch.read_bad_times(['aogbias1 2008:292:00:00:00 2008:297:00:00:00'])
dat.filter_bad_times()
Expand All @@ -54,7 +54,7 @@ def test_filter_bad_times_overlap():


def test_filter_bad_times_list():
dat = fetch.MSID('aogyrct1', '2008:291', '2008:298')
dat = fetch.MSID('aogyrct1', '2008:291:12:00:00', '2008:298:12:00:00')
# 2nd test of repr here where we have an MSID object handy
assert repr(dat) == ('<MSID start=2008:291:12:00:00.000 '
'stop=2008:298:12:00:00.000 len=2360195 dtype=int16>')
Expand All @@ -63,61 +63,61 @@ def test_filter_bad_times_list():
dates = DateTime(dat.times[168581:168588]).date
assert np.all(dates == DATES_EXPECT1)

dat = fetch.Msid('aogyrct1', '2008:291', '2008:298')
dat = fetch.Msid('aogyrct1', '2008:291:12:00:00', '2008:298:12:00:00')
dat.filter_bad_times(table=BAD_TIMES)
dates = DateTime(dat.times[168581:168588]).date
assert np.all(dates == DATES_EXPECT1)


def test_msidset_filter_bad_times_list():
dat = fetch.MSIDset(['aogyrct1'], '2008:291', '2008:298')
dat = fetch.MSIDset(['aogyrct1'], '2008:291:12:00:00', '2008:298:12:00:00')
dat.filter_bad_times(table=BAD_TIMES)
dates = DateTime(dat['aogyrct1'].times[168581:168588]).date
assert np.all(dates == DATES_EXPECT1)

dat = fetch.Msidset(['aogyrct1'], '2008:291', '2008:298')
dat = fetch.Msidset(['aogyrct1'], '2008:291:12:00:00', '2008:298:12:00:00')
dat.filter_bad_times(table=BAD_TIMES)
dates = DateTime(dat['aogyrct1'].times[168581:168588]).date
assert np.all(dates == DATES_EXPECT1)


def test_filter_bad_times_default():
"""Test bad times that come from msid_bad_times.dat"""
dat = fetch.MSID('aogbias1', '2008:291', '2008:298')
dat = fetch.MSID('aogbias1', '2008:291:12:00:00', '2008:298:12:00:00')
dat.filter_bad_times()
dates = DateTime(dat.times[42140:42150]).date
assert np.all(dates == DATES_EXPECT2)


def test_filter_bad_times_list_copy():
dat = fetch.MSID('aogyrct1', '2008:291', '2008:298')
dat = fetch.MSID('aogyrct1', '2008:291:12:00:00', '2008:298:12:00:00')
dat2 = dat.filter_bad_times(table=BAD_TIMES, copy=True)
dates = DateTime(dat2.times[168581:168588]).date
assert np.all(dates == DATES_EXPECT1)
assert len(dat.vals) != len(dat2.vals)

dat = fetch.Msid('aogyrct1', '2008:291', '2008:298')
dat = fetch.Msid('aogyrct1', '2008:291:12:00:00', '2008:298:12:00:00')
dat2 = dat.filter_bad_times(table=BAD_TIMES, copy=True)
dates = DateTime(dat2.times[168581:168588]).date
assert np.all(dates == DATES_EXPECT1)
assert len(dat.vals) != len(dat2.vals)


def test_msidset_filter_bad_times_list_copy():
dat = fetch.MSIDset(['aogyrct1'], '2008:291', '2008:298')
dat = fetch.MSIDset(['aogyrct1'], '2008:291:12:00:00', '2008:298:12:00:00')
dat2 = dat.filter_bad_times(table=BAD_TIMES, copy=True)
dates = DateTime(dat2['aogyrct1'].times[168581:168588]).date
assert np.all(dates == DATES_EXPECT1)

dat = fetch.Msidset(['aogyrct1'], '2008:291', '2008:298')
dat = fetch.Msidset(['aogyrct1'], '2008:291:12:00:00', '2008:298:12:00:00')
dat2 = dat.filter_bad_times(table=BAD_TIMES, copy=True)
dates = DateTime(dat2['aogyrct1'].times[168581:168588]).date
assert np.all(dates == DATES_EXPECT1)


def test_filter_bad_times_default_copy():
"""Test bad times that come from msid_bad_times.dat"""
dat = fetch.MSID('aogbias1', '2008:291', '2008:298')
dat = fetch.MSID('aogbias1', '2008:291:12:00:00', '2008:298:12:00:00')
dat2 = dat.filter_bad_times(copy=True)
dates = DateTime(dat2.times[42140:42150]).date
assert np.all(dates == DATES_EXPECT2)
Expand Down Expand Up @@ -239,7 +239,7 @@ def test_interpolate_time_precision():
"""
Check that floating point error is < 0.01 msec over 100 days
"""
dat = fetch.Msid('tephin', '2010:001', '2010:100')
dat = fetch.Msid('tephin', '2010:001:12:00:00', '2010:100:12:00:00')
dt = 60.06
times = dat.tstart + np.arange((dat.tstop - dat.tstart) // dt + 3) * dt

Expand All @@ -248,7 +248,7 @@ def test_interpolate_time_precision():
dt_frac = dt * 100 - round(dt * 100)
assert abs(dt_frac) > 0.001

dat = fetch.Msid('tephin', '2010:001', '2010:100')
dat = fetch.Msid('tephin', '2010:001:12:00:00', '2010:100:12:00:00')
dat.interpolate(times=times)
dt = dat.times[-1] - dat.times[0]
dt_frac = dt * 100 - round(dt * 100)
Expand All @@ -265,7 +265,7 @@ def _assert_msid_equal(msid1, msid2):

def test_msid_copy():
for MsidClass in (fetch.Msid, fetch.MSID):
msid1 = MsidClass('aogbias1', '2008:291', '2008:298')
msid1 = MsidClass('aogbias1', '2008:291:12:00:00', '2008:298:12:00:00')
msid2 = msid1.copy()
_assert_msid_equal(msid1, msid2)

Expand All @@ -276,7 +276,7 @@ def test_msid_copy():

def test_msidset_copy():
for MsidsetClass in (fetch.MSIDset, fetch.Msidset):
msidset1 = MsidsetClass(['aogbias1', 'aogbias2'], '2008:291', '2008:298')
msidset1 = MsidsetClass(['aogbias1', 'aogbias2'], '2008:291:12:00:00', '2008:298:12:00:00')
msidset2 = msidset1.copy()

for attr in ('tstart', 'tstop', 'datestart', 'datestop'):
Expand Down Expand Up @@ -359,18 +359,21 @@ def test_intervals_fetch_unit():
"""
Test that fetches with multiple intervals get the units right
"""
dat = fetch_eng.Msid('tephin', [('1999:350', '1999:355'), ('2000:010', '2000:015')])
dat = fetch_eng.Msid('tephin', [('1999:350:12:00:00', '1999:355:12:00:00'),
('2000:010:12:00:00', '2000:015:12:00:00')])
assert np.allclose(np.mean(dat.vals), 41.713467)

dat = fetch_eng.Msid('tephin', [('1999:350', '1999:355'), ('2000:010', '2000:015')],
dat = fetch_eng.Msid('tephin', [('1999:350:12:00:00', '1999:355:12:00:00'),
('2000:010:12:00:00', '2000:015:12:00:00')],
stat='5min')
assert np.allclose(np.mean(dat.vals), 40.290966)

dat = fetch_eng.Msid('tephin', [('1999:350', '1999:355'), ('2000:010', '2000:015')],
dat = fetch_eng.Msid('tephin', [('1999:350:12:00:00', '1999:355:12:00:00'),
('2000:010:12:00:00', '2000:015:12:00:00')],
stat='daily')
assert np.allclose(np.mean(dat.vals), 40.303955)

dat = fetch_eng.Msid('tephin', '1999:350', '2000:010')
dat = fetch_eng.Msid('tephin', '1999:350:12:00:00', '2000:010:12:00:00')
assert np.allclose(np.mean(dat.vals), 41.646729)


Expand All @@ -379,23 +382,23 @@ def test_ctu_dwell_telem():
Ensure that bad values are filtered appropriately for dwell mode telem.
This
"""
dat = fetch_eng.Msid('dwell01', '2015:294', '2015:295')
dat = fetch_eng.Msid('dwell01', '2015:294:12:00:00', '2015:295:12:00:00')
assert np.all(dat.vals < 190)
assert np.all(dat.vals > 150)

dat = fetch_eng.Msid('airu1bt', '2015:294', '2015:295')
dat = fetch_eng.Msid('airu1bt', '2015:294:12:00:00', '2015:295:12:00:00')
assert np.all(dat.vals < -4.95)
assert np.all(dat.vals > -5.05)


def test_nonexistent_msids():
with pytest.raises(ValueError) as err:
fetch.Msid('asdfasdfasdfasdf', '2015:001', '2015:002')
fetch.Msid('asdfasdfasdfasdf', '2015:001:12:00:00', '2015:002:12:00:00')
assert "MSID 'asdfasdfasdfasdf' is not" in str(err.value)


def test_daily_state_bins():
dat = fetch.Msid('aoacaseq', '2016:232', '2016:235', stat='daily')
dat = fetch.Msid('aoacaseq', '2016:232:12:00:00', '2016:235:12:00:00', stat='daily')
for attr, val in (('n_BRITs', [0, 136, 0]),
('n_KALMs', [83994, 83812, 83996]),
('n_AQXNs', [159, 240, 113]),
Expand Down
5 changes: 3 additions & 2 deletions Ska/engarchive/tests/test_intervals.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def test_remove_subclassed_eventquery_interval():

@pytest.mark.skipif("not HAS_EVENTS")
def test_remove_intervals_stat():
start, stop = '2012:002', '2012:003'
start, stop = '2012:002:12:00:00', '2012:003:12:00:00'
for stat in (None, '5min'):
intervals = kadi.events.dwells.intervals(start, stop)
for filt in (kadi.events.dwells, intervals):
Expand Down Expand Up @@ -175,7 +175,8 @@ def test_util_logical_intervals():
"""
Test utils.logical_intervals()
"""
dat = fetch.Msidset(['3tscmove', 'aorwbias', 'coradmen'], '2012:190', '2012:205')
dat = fetch.Msidset(['3tscmove', 'aorwbias', 'coradmen'],
'2012:190:12:00:00', '2012:205:12:00:00')
dat.interpolate(32.8) # Sample MSIDs onto 32.8 second intervals (like 3TSCMOVE)
scs107 = ((dat['3tscmove'].vals == 'T')
& (dat['aorwbias'].vals == 'DISA')
Expand Down
6 changes: 6 additions & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[pytest]
filterwarnings =
# See https://github.com/numpy/numpy/issues/11788 for why this is benign
ignore:numpy.ufunc size changed:RuntimeWarning
ignore:the imp module is deprecated in favour of importlib:DeprecationWarning
ignore:parse functions are required to provide a named argument:PendingDeprecationWarning

0 comments on commit 58c6b19

Please sign in to comment.