Skip to content

Commit

Permalink
update lemi424 from issue107 branch
Browse files Browse the repository at this point in the history
Needed to update precommit stuff as well, not sure why,
may have been triggered by change to .flake8

[Issue(s): #105, #107]
  • Loading branch information
kkappler committed Aug 12, 2022
1 parent df611a3 commit b81ee20
Show file tree
Hide file tree
Showing 3 changed files with 64 additions and 28 deletions.
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[flake8]
ignore = E203, E266, E501, W503, F403, F401
ignore = E203, E266, E501, W503, F403, F401, W605
max-line-length = 79
max-complexity = 18
select = B,C,E,F,W,T4,B9
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
repos:
- repo: https://github.com/ambv/black
rev: stable
rev: 22.6.0
hooks:
- id: black
language_version: python3.6
- repo: https://gitlab.com/pycqa/flake8
rev: 3.7.9
rev: 3.9.2
hooks:
- id: flake8
86 changes: 61 additions & 25 deletions mth5/io/lemi424.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"""
Created on Tue May 11 15:31:31 2021
:copyright:
:copyright:
Jared Peacock (jpeacock@usgs.gov)
:license: MIT
Expand All @@ -25,7 +25,7 @@ class LEMI424:
"""

def __init__(self, fn=None):
def __init__(self, fn=[]):
self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
self.fn = fn
self._has_data = False
Expand Down Expand Up @@ -61,16 +61,44 @@ def __init__(self, fn=None):
if self.fn:
self.read()

@property
def num_source_files(self):
return len(self.fn)

@property
def fn(self):
return self._fn

@property
def validate_fn(self):
"""
Need to check that the filenames are sequential
"""
return True

@fn.setter
def fn(self, value):
if value is not None:
value = Path(value)
if not value.exists():
raise IOError(f"Could not find {value}")
def fn(self, value, sort=True):
"""
Parameters
----------
value:string or pathlib.Path, or list of these
"""
if isinstance(value, list):
value = [Path(x) for x in value]
exists = [x.exists() for x in value]
for i_fn, cond in enumerate(exists):
if not cond:
raise IOError(f"Could not find {value[i_fn]}")
elif value is not None:
value = [
Path(value),
]
if not value[0].exists():
raise IOError(f"Could not find {value[0]}")
if sort:
value.sort()
self._fn = value

@property
Expand All @@ -80,16 +108,16 @@ def start(self):
[
"-".join(
[
f"{self._df.year.min()}",
f"{self._df.month.min():02d}",
f"{self._df.day.min():02d}",
f"{self._df.iloc[0].year}",
f"{self._df.iloc[0].month:02d}",
f"{self._df.iloc[0].day:02d}",
]
),
":".join(
[
f"{self._df.hour.min():02d}",
f"{self._df.minute.min():02d}",
f"{self._df.second.min():02d}",
f"{self._df.iloc[0].hour:02d}",
f"{self._df.iloc[0].minute:02d}",
f"{self._df.iloc[0].second:02d}",
]
),
]
Expand All @@ -102,16 +130,16 @@ def end(self):
[
"-".join(
[
f"{self._df.year.max()}",
f"{self._df.month.max():02d}",
f"{self._df.day.max():02d}",
f"{self._df.iloc[-1].year}",
f"{self._df.iloc[-1].month:02d}",
f"{self._df.iloc[-1].day:02d}",
]
),
":".join(
[
f"{self._df.hour.max():02d}",
f"{self._df.minute.max():02d}",
f"{self._df.second.max():02d}",
f"{self._df.iloc[-1].hour:02d}",
f"{self._df.iloc[-1].minute:02d}",
f"{self._df.iloc[-1].second:02d}",
]
),
]
Expand Down Expand Up @@ -160,7 +188,7 @@ def run_metadata(self):
r.time_period.start = self.start
r.time_period.end = self.end

def read(self, fn=None):
def read(self, fn=[]):
"""
Read a LEMI424 file using pandas
Expand All @@ -170,16 +198,24 @@ def read(self, fn=None):
:rtype: TYPE
"""
if fn is not None:
if fn:
self.fn = fn

if not self.fn.exists():
exists = [x.exists() for x in self.fn]
if all(x for x in exists):
pass
else:
msg = "Could not find file %s"
self.logger.error(msg, self.fn)
raise IOError(msg % self.fn)
for i_fn, cond in enumerate(exists):
if not cond:
self.logger.error(msg, self.fn[i_fn])
raise IOError(msg % self.fn[i_fn])

self._df = pd.read_csv(self.fn, delimiter="\s+", names=self.column_names)
dfs = self.num_source_files * [None]
for i, fn in enumerate(self.fn):
dfs[i] = pd.read_csv(fn, delimiter="\s+", names=self.column_names)

self._df = pd.concat(dfs)
self._has_data = True

def to_run_ts(self, fn=None, e_channels=["e1", "e2"]):
Expand Down

0 comments on commit b81ee20

Please sign in to comment.