Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev #301

Merged
merged 12 commits into from
Oct 24, 2023
2 changes: 1 addition & 1 deletion .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ jobs:
- name: "Upload coverage to Codecov"
uses: codecov/codecov-action@v1
with:
fail_ci_if_error: true
fail_ci_if_error: false

- name: Build Doc
if: ${{ (github.ref == 'refs/heads/main') && (matrix.python-version == '3.8')}}
Expand Down
1 change: 1 addition & 0 deletions aurora/config/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from aurora.config.emtf_band_setup import (
BANDS_DEFAULT_FILE,
BANDS_TEST_FAST_FILE,
BANDS_256_26_FILE,
BANDS_256_29_FILE,
)
2 changes: 1 addition & 1 deletion aurora/config/emtf_band_setup/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from aurora.general_helper_functions import BAND_SETUP_PATH
import pathlib

BANDS_DEFAULT_FILE = BAND_SETUP_PATH.joinpath("bs_test.cfg")
BANDS_TEST_FAST_FILE = BAND_SETUP_PATH.joinpath("bs_test_fast.cfg")
BANDS_256_26_FILE = BAND_SETUP_PATH.joinpath("bs_256_26.cfg")
BANDS_256_29_FILE = BAND_SETUP_PATH.joinpath("bs_256_29.cfg")
9 changes: 9 additions & 0 deletions aurora/config/emtf_band_setup/bs_test_fast.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
8
1 6 7
1 5 5
2 6 6
2 5 5
3 6 6
3 5 5
4 7 9
4 5 6
32 changes: 25 additions & 7 deletions aurora/pipelines/process_mth5.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,13 +299,22 @@
tfk.make_processing_summary()
tfk.validate()
# See Note #1
tfk.initialize_mth5s(mode="a")
tfk.check_if_fc_levels_already_exist() # populate the "fc" column of dataset_df
print(f"fc_levels_already_exist = {tfk.dataset_df['fc']}")
print(
f"Processing config indicates {len(tfk.config.decimations)} "
f"decimation levels "
)
if config.decimations[0].save_fcs:
mth5_mode = "a"
else:
mth5_mode = "r"
tfk.initialize_mth5s(mode=mth5_mode)
try:
tfk.check_if_fc_levels_already_exist() # populate the "fc" column of dataset_df
print(f"fc_levels_already_exist = {tfk.dataset_df['fc']}")
print(
f"Processing config indicates {len(tfk.config.decimations)} "
f"decimation levels "
)
except:
msg = "WARNING -- Unable to execute check for FC Levels"
msg = f"{msg} Possibly FCs not present at all (file from old MTH5 version)?"
print(f"{msg}")

tf_dict = {}

Expand Down Expand Up @@ -333,6 +342,15 @@

run_xrds = row["run_dataarray"].to_dataset("channel")
run_obj = row.mth5_obj.from_reference(row.run_reference)

# Musgraves workaround for old MT data
try:
assert row.run_id == run_obj.metadata.id
except AssertionError:
print("WARNING Run ID in dataset_df does not match run_obj")
print("WARNING Forcing run metadata to match dataset_df")
run_obj.metadata.id = row.run_id

Check warning on line 352 in aurora/pipelines/process_mth5.py

View check run for this annotation

Codecov / codecov/patch

aurora/pipelines/process_mth5.py#L349-L352

Added lines #L349 - L352 were not covered by tests

stft_obj = make_stft_objects(
tfk.config, i_dec_level, run_obj, run_xrds, units, row.station_id
)
Expand Down
19 changes: 7 additions & 12 deletions aurora/pipelines/run_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,14 @@
import copy
import pandas as pd


from mt_metadata.transfer_functions.processing.aurora.channel_nomenclature import ALLOWED_INPUT_CHANNELS
from mt_metadata.transfer_functions.processing.aurora.channel_nomenclature import ALLOWED_OUTPUT_CHANNELS
import mth5
from mth5.utils.helpers import initialize_mth5


INPUT_CHANNELS = [
"hx",
"hy",
]
OUTPUT_CHANNELS = [
"ex",
"ey",
"hz",
]

RUN_SUMMARY_COLUMNS = [
"survey",
"station_id",
Expand Down Expand Up @@ -149,8 +144,8 @@ def drop_invalid_rows(self):

def channel_summary_to_run_summary(
ch_summary,
allowed_input_channels=INPUT_CHANNELS,
allowed_output_channels=OUTPUT_CHANNELS,
allowed_input_channels=ALLOWED_INPUT_CHANNELS,
allowed_output_channels=ALLOWED_OUTPUT_CHANNELS,
sortby=["station_id", "start"],
):
"""
Expand Down Expand Up @@ -321,7 +316,7 @@ def extract_run_summaries_from_mth5s(mth5_list, summary_type="run", deduplicate=
if isinstance(mth5_elt, mth5.mth5.MTH5):
mth5_obj = mth5_elt
else: # mth5_elt is a path or a string
mth5_obj = initialize_mth5(mth5_elt, mode="a")
mth5_obj = initialize_mth5(mth5_elt, mode="r")

df = extract_run_summary_from_mth5(mth5_obj, summary_type=summary_type)

Expand Down
2 changes: 1 addition & 1 deletion aurora/pipelines/transfer_function_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -483,7 +483,7 @@ def export_tf_collection(self, tf_collection):

def make_decimation_dict_for_tf(tf_collection, processing_config):
"""
Decimation dict is used by mt_metadata's TF class when it is writng z-files.
Decimation dict is used by mt_metadata's TF class when it is writing z-files.
If no z-files will be written this is not needed

sample element of decimation_dict:
Expand Down
28 changes: 17 additions & 11 deletions aurora/test_utils/synthetic/make_mth5_from_asc.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,18 @@ def create_run_ts_from_synthetic_run(run, df, channel_nomenclature="default"):
Loop over stations and make ChannelTS objects.
Need to add a tag in the channels
so that when you call a run it will get all the filters with it.

Parameters
----------
config: dict
one-off data structure used to hold information mth5 needs to initialize
run: aurora.test_utils.synthetic.station_config.SyntheticRun
One-off data structure with information mth5 needs to initialize
Specifically sample_rate, filters,
df : pandas.DataFrame
time series data in columns labelled from ["ex", "ey", "hx", "hy", "hz"]
channel_nomenclature : string
Keyword corresponding to channel nomenclature mapping in CHANNEL_MAPS variable
from channel_nomenclature.py module in mt_metadata.
Supported values are ['default', 'lemi12', 'lemi34', 'phoenix123']

Returns
-------
Expand Down Expand Up @@ -109,19 +114,20 @@ def create_mth5_synthetic_file(
Where the mth5 will be stored. This is generated by the station_config,
but may change in this method based on add_nan_values or channel_nomenclature
plot: bool
If true plots get made
Set to false unless you want to look at a plot of the time series
add_nan_values: bool
If true, some np.nan are sprinkled into the time series. Intended to be used for tests.
file_version: string
channel_nomenclature: string
One of ["0.1.0", "0.2.0"], corresponding to the version of mth5 to create
channel_nomenclature : string
Keyword corresponding to channel nomenclature mapping in CHANNEL_MAPS variable
from channel_nomenclature.py module in mt_metadata.
Supported values are ['default', 'lemi12', 'lemi34', 'phoenix123']
force_make_mth5: bool
If set to true, the file will be made, even if it already exists.
If false, and file already exists, skip the make job.


Returns
-------

plot : bool
set to false unless you want to look at a plot of the time series

Returns
-------
mth5_path: pathlib.Path
Expand Down Expand Up @@ -192,7 +198,7 @@ def create_mth5_synthetic_file(
raise NotImplementedError

m.close_mth5()
# Following lines used to visually confirm start/end times were
# Following lines used to visually confirm start/end times were packed
# m.open_mth5(mth5_path, mode="a")
# channel_summary_df = m.channel_summary.to_dataframe()
# print(channel_summary_df[["start", "end"]])
Expand Down
8 changes: 7 additions & 1 deletion aurora/transfer_function/kernel_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,13 @@

@property
def local_survey_metadata(self):
return self.survey_metadata[self.local_survey_id]
try:
return self.survey_metadata[self.local_survey_id]
except KeyError:
msg = f"Unexpected key {self.local_survey_id} not found in survey_metadata"
msg += f"{msg} WARNING -- Maybe old MTH5 -- trying to use key '0'"
print(msg)
return self.survey_metadata["0"]

Check warning on line 197 in aurora/transfer_function/kernel_dataset.py

View check run for this annotation

Codecov / codecov/patch

aurora/transfer_function/kernel_dataset.py#L193-L197

Added lines #L193 - L197 were not covered by tests

def _add_duration_column(self):
""" """
Expand Down
133 changes: 64 additions & 69 deletions aurora/transfer_function/plot/comparison_plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,43 +52,47 @@ def compare_two_z_files(
print(f"scale_factor1: {scale_factor1}")
fig, axs = plt.subplots(nrows=2, dpi=300, sharex=True) # figsize=(8, 6.),
markersize = kwargs.get("markersize", 3)
# Make LaTeX symbol strings
rho_phi_strings = {}
rho_phi_strings["rho"] = {}
rho_phi_strings["phi"] = {}
for xy_or_yx in ["xy", "yx"]:
rho_phi_strings["rho"][xy_or_yx] = f"$\\rho_{{{xy_or_yx}}}$"
rho_phi_strings["phi"][xy_or_yx] = f"$\phi_{{{xy_or_yx}}}$"

markers = {}
markers["xy"] = "^"
markers["yx"] = "o"
file1_colors = {}
file2_colors = {}
file1_colors["xy"] = "black"
file1_colors["yx"] = "black"
file2_colors["xy"] = "red"
file2_colors["yx"] = "blue"

rho_or_phi = "rho"
for xy_or_yx in ["xy", "yx"]:
plot_rho(
axs[0],
zfile1.periods,
zfile1.rho(xy_or_yx) * scale_factor1,
label=f"{label1} {rho_phi_strings[rho_or_phi][xy_or_yx]}",
markersize=markersize,
marker=markers[xy_or_yx],
color=file1_colors[xy_or_yx],
)
plot_rho(
axs[0],
zfile2.periods,
zfile2.rho(xy_or_yx) * scale_factor2,
label=f"{label2} {rho_phi_strings[rho_or_phi][xy_or_yx]}",
markersize=markersize,
marker=markers[xy_or_yx],
color=file2_colors[xy_or_yx],
)

plot_rho(
axs[0],
zfile1.periods,
zfile1.rxy * scale_factor1,
label=f"{label1} rxy",
markersize=markersize,
marker="^",
color="red",
)
plot_rho(
axs[0],
zfile2.periods,
zfile2.rxy * scale_factor2,
label=f"{label2} rxy",
markersize=markersize,
marker="^",
color="black",
)
plot_rho(
axs[0],
zfile1.periods,
zfile1.ryx * scale_factor1,
label=f"{label1} ryx",
markersize=markersize,
color="blue",
)
plot_rho(
axs[0],
zfile2.periods,
zfile2.ryx,
label=f"{label2} ryx",
markersize=markersize,
color="black",
)
axs[0].legend(prop={"size": 6})
# axs[0].set_ylabel("$\rho_a$")
# axs[0].set_ylabel("$\\rho_a$")
axs[0].set_ylabel("Apparent Resistivity $\Omega$-m")
rho_ylims = kwargs.get("rho_ylims", [1, 1e3])
if use_ylims:
Expand All @@ -97,44 +101,35 @@ def compare_two_z_files(
if use_xlims:
axs[0].set_xlim(xlims[0], xlims[1])

plot_phi(
axs[1],
zfile1.periods,
zfile1.pxy,
label=f"{label1} pxy",
markersize=markersize,
marker="^",
color="red",
)
plot_phi(
axs[1],
zfile2.periods,
zfile2.pxy,
label=f"{label2} pxy",
markersize=markersize,
marker="^",
color="black",
)
plot_phi(
axs[1],
zfile1.periods,
zfile1.pyx,
label=f"{label1} pyx",
markersize=markersize,
color="blue",
)
plot_phi(
axs[1],
zfile2.periods,
zfile2.pyx,
label=f"{label1} pyx",
markersize=markersize,
color="black",
)
rho_or_phi = "phi"
for xy_or_yx in ["xy", "yx"]:
plot_phi(
axs[1],
zfile1.periods,
zfile1.phi(xy_or_yx) * scale_factor1,
label=f"{label1} {rho_phi_strings[rho_or_phi][xy_or_yx]}",
markersize=markersize,
marker=markers[xy_or_yx],
color=file1_colors[xy_or_yx],
)
plot_phi(
axs[1],
zfile2.periods,
zfile2.phi(xy_or_yx) * scale_factor2,
label=f"{label2} {rho_phi_strings[rho_or_phi][xy_or_yx]}",
markersize=markersize,
marker=markers[xy_or_yx],
color=file2_colors[xy_or_yx],
)

axs[1].legend(prop={"size": 6})
axs[1].set_xlabel("Period (s)")
axs[1].set_ylabel("Phase (degrees)")
phi_ylims = kwargs.get("phi_ylims", [0, 90])
axs[1].set_ylim(phi_ylims[0], phi_ylims[1])

axs[0].grid(which = 'both', axis = 'both',)
axs[1].grid(which='both', axis='both', )
if out_file:
# if out_file[-3:] != ".png":
# out_file+=".png"
Expand Down
Loading