Skip to content

Commit

Permalink
Merge pull request #205 from simpeg/fix_issue_204
Browse files Browse the repository at this point in the history
Issue #204 not complete but added one example of unittest
  • Loading branch information
kkappler authored Aug 8, 2022
2 parents 0efb86a + ce6c5e2 commit 6640986
Show file tree
Hide file tree
Showing 10 changed files with 213 additions and 229 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ jobs:
fail-fast: false
matrix:
os: ["ubuntu-latest"]
python-version: [3.8, 3.7, 3.6]
python-version: [3.8,]# 3.7, 3.6]

steps:
- uses: actions/checkout@v2
Expand Down
16 changes: 3 additions & 13 deletions aurora/config/metadata/channel_nomenclature.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,14 +38,6 @@
"ey": "e4",
}

THE_BEATLES = {
"hx": "john",
"hy": "paul",
"hz": "george",
"ex": "ringo",
"ey": "the fifth beatle",
}


# =============================================================================
class ChannelNomenclature(Base):
Expand Down Expand Up @@ -104,16 +96,14 @@ def keyword(self, keyword):
self._update_by_keyword(keyword)

def get_channel_map(self, keyword):
if keyword == "LEMI12":
if keyword == "default":
channel_map = DEFAULT_CHANNEL_MAP
elif keyword == "LEMI12":
channel_map = LEMI_CHANNEL_MAP_12
elif keyword == "LEMI34":
channel_map = LEMI_CHANNEL_MAP_34
elif keyword == "NIMS":
channel_map = DEFAULT_CHANNEL_MAP
elif keyword == "beatles":
channel_map = THE_BEATLES
elif keyword == "default":
channel_map = DEFAULT_CHANNEL_MAP
else:
print(f"whoops mt_system {keyword} unknown")
raise NotImplementedError
Expand Down
11 changes: 9 additions & 2 deletions aurora/pipelines/time_series_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,7 @@ def apply_prewhitening(decimation_obj, run_xrts_input):
run_xrts = run_xrts_input.differentiate("time")
else:
print(f"{decimation_obj.prewhitening_type} prehitening not yet implemented")
print("returning original time series")
run_xrts = run_xrts_input
raise NotImplementedError
return run_xrts


Expand Down Expand Up @@ -84,6 +83,14 @@ def apply_recoloring(decimation_obj, stft_obj):
if prewhitening_correction[0] == 0.0:
cond = stft_obj.frequency != 0.0
stft_obj = stft_obj.where(cond, complex(0.0))
# elif decimation_obj.prewhitening_type == "ARMA":
# from statsmodels.tsa.arima.model import ARIMA
# AR = 3 # add this to processing config
# MA = 4 # add this to processing config

else:
print(f"{decimation_obj.prewhitening_type} recoloring not yet implemented")
raise NotImplementedError

return stft_obj

Expand Down
7 changes: 2 additions & 5 deletions aurora/time_series/decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import xarray as xr


# <PATTERN>
# Here is the decorator pattern
def decorator(func):
@functools.wraps(func)
def wrapper_decorator(*args, **kwargs):
Expand All @@ -14,9 +14,6 @@ def wrapper_decorator(*args, **kwargs):
return wrapper_decorator


# </PATTERN>


def can_use_xr_dataarray(func):
"""
Intended as a decorator. Most of the windowed time series methods are
Expand Down Expand Up @@ -50,7 +47,7 @@ def wrapper_decorator(*args, **kwargs):
processed_obj = func(*args, **kwargs)

if input_was_dataarray:
processed_obj = processed_obj.to_dataarray()
processed_obj = processed_obj.to_array("channel")
return processed_obj

return wrapper_decorator
36 changes: 6 additions & 30 deletions aurora/time_series/windowed_time_series.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,30 +4,6 @@
from aurora.time_series.decorators import can_use_xr_dataarray


def schur_product_windowed_data(ensemblized_data, taper):
"""
The axes are set up so that each window is tapered
In particular, each "window" is a row of windowed_array. Thus taper
operates by multiplying, point-by-point (Schur) each row or windowed_array.
TODO: either take an argument for which axis the taper applies along or
make the calling function confirm that each row is a window and each
column is a window-advance-delta-t
Parameters
----------
data
Returns
-------
"""
tapered_windowed_data = ensemblized_data * taper # seems to do sparse diag mult
# time trial it against a few other methods
return tapered_windowed_data


def validate_coordinate_ordering_time_domain(dataset):
"""
Check that the data dimensions are what you expect. THis may evolve some
Expand Down Expand Up @@ -117,12 +93,12 @@ def apply_taper(data=None, taper=None, in_place=True):
@staticmethod
def detrend(data=None, detrend_axis=None, detrend_type=None, inplace=True):
"""
TODO: overwrite data=True probably best for most applications but
be careful with that. Do we want to avoid this in general?
could we be possibly overwriting stuff on MTH5 in future?
Also, is overwrite even working how I think it is here?
TODO: overwrite_data not working right in scipy.signal, dont use it
for now
Notes: overwrite data=True probably best for most applications but be careful
with that. Do we want to avoid this in general? Could we be possibly
overwriting stuff on MTH5 in future?
Also, is overwrite even working how I think it is here?
Overwrite_data not working right in scipy.signal, dont use it for now
Parameters
----------
data : xarray Dataset
Expand Down
42 changes: 18 additions & 24 deletions aurora/time_series/windowing_scheme.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
"""
The windowing scheme defines the chunking and chopping of the time series for
the Short Time Fourier Transform. Often referred to as a "sliding window" or
a "striding window". It is basically a taper with a rule to say how far to
advance at each stride (or step).
a "striding window". Iin its most basic form it is a taper with a rule to
say how far to advance at each stride (or step).
To generate an array of data-windows from a data series we only need the
two parameters window_length (L) and window_overlap (V). The parameter
Expand All @@ -13,26 +13,22 @@
advance.
Choices L and V are usually made with some knowledge of time series sample
rate, duration, and the frequency band of interest. We can create a
module that "suggests" L, V, based on these metadata to make the default
processing configuration parameters.
Note: In general we will need one instance of this class per decimation level,
but in the current implementation we will probably leave the windowing scheme
the same for each decimation level.
This class is a key part of the "gateway" to frequency domain, so what
frequency domain considerations do we want to think about here.. certainly
the window length and the sampling rate define the frequency resolution, and as
such should be considered in context of the "band averaging scheme"
Indeed the frequencies come from this class if it has a sampling rate. While
sampling rate is a property of the data, and not the windowing scheme per se,
it is good for this class to be aware of the sampling rate. ... or should we
push the frequency stuffs to a combination of TS plus WindowingScheme?
The latter feels more appropriate.
<20210510>
rate, duration, and the frequency band of interest. In aurora because this is used
to prep for STFT, L is typically a power of 2.
In general we will need one instance of this class per decimation level,
but in practice often leave the windowing scheme the same for each decimation level.
This class is a key part of the "gateway" to frequency domain, so it has been given
a sampling_rate attribute. While sampling rate is a property of the data, and not
the windowing scheme per se, it is good for this class to be aware of the sampling
rate.
Future modifications could involve:
- binding this class with a time series.
- Making a subclass with only L, V, and then having an extension with sample_rate
When 2D arrays are generated how should we index them?
[[ 0 1 2]
[ 2 3 4]
Expand All @@ -51,8 +47,6 @@
[0, dt, 2*dt] for that axis to keep it general. We can call this the
"within-window sample time axis"
</20210510>
TODO: Regarding the optional time_vector input to self.apply_sliding_window()
... this current implementation takes as input numpy array data. We need to
Expand Down
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@
sphinx_gallery_conf = {
# path to your examples scripts
"examples_dirs": [
"../examples",
"../tutorials",
],
"gallery_dirs": [
"examples",
Expand Down
Loading

0 comments on commit 6640986

Please sign in to comment.