Skip to content

Commit

Permalink
pre-commit changes
Browse files Browse the repository at this point in the history
  • Loading branch information
zerafachris committed May 27, 2024
1 parent 85d67db commit 992adc5
Show file tree
Hide file tree
Showing 11 changed files with 101 additions and 117 deletions.
12 changes: 6 additions & 6 deletions .github/workflows/requirements_precommit.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
pycln==2.4.0
isort==5.12.0
yapf==0.40.1
docformatter==1.7.5
flake8==7.0.0
pylint==3.1.0
interrogate==1.7.0
docformatter==1.7.5
pre-commit
isort==5.12.0
pre-commit
pycln==2.4.0
pylint==3.1.0
yapf==0.40.1
2 changes: 0 additions & 2 deletions docs/source/getting_started.rst
Original file line number Diff line number Diff line change
Expand Up @@ -17,5 +17,3 @@ These series of notebook-based tutorials are designed to introduce the reader to
getting_started/01_processing_03_processing_workflow.ipynb
getting_started/02_analysis_01_dendrograms_waveforms.ipynb
getting_started/02_analysis_02_timewindow_cluster_comparison.ipynb


10 changes: 5 additions & 5 deletions pre-commit-hooks/.pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ repos:
- id: pycln
args: [--all]
- repo: https://github.com/pycqa/isort
rev: 5.12.0
rev: 5.13.2
hooks:
- id: isort
args: [-l 120]
- repo: https://github.com/google/yapf
rev: v0.40.1
rev: v0.40.2
hooks:
- id: yapf
args: [-vv, -r, "--style={based_on_style: pep8, column_limit: 120}", -i]
Expand All @@ -42,12 +42,12 @@ repos:
- id: python-bandit-vulnerability-check
args: [-ll, --skip, "B321,B402,B301", --recursive, .]
- repo: https://github.com/Lucas-C/pre-commit-hooks-safety
rev: v1.3.1
rev: v1.3.3
hooks:
- id: python-safety-dependencies-check
files: requirements.txt
- repo: https://github.com/pylint-dev/pylint
rev: v3.1.0
rev: v3.2.2
hooks:
- id: pylint
name: pylint
Expand All @@ -62,7 +62,7 @@ repos:
"--load-plugins=pylint.extensions.docparams", # Load an extension
]
- repo: https://github.com/MarcoGorelli/madforhooks # sorts environment.yml
rev: 0.3.0
rev: 0.4.1
hooks:
- id: conda-env-sorter
- repo: https://github.com/pryorda/dockerfilelint-precommit-hooks
Expand Down
2 changes: 1 addition & 1 deletion readme.MD
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ To help you get started with ScatCluster, we have created a set of notebooks loc

The Processing section includes three notebooks that prepare your data for interpretation:

- **01_processing_01_choose_network.ipynb**: This notebook offers an interactive means to determine the best network architecture for your dataset.
- **01_processing_01_choose_network.ipynb**: This notebook offers an interactive means to determine the best network architecture for your dataset.

- **01_processing_02_set_config.ipynb**: In this notebook, you will set the parameters for your experiment and specify the location where results will be stored. This step is crucial for ensuring that your data processing and analysis are properly configured.

Expand Down
2 changes: 1 addition & 1 deletion scatcluster/analysis/crosstab.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def _build_crosstab_data(self,
else:
raise ValueError(
f"Provided Normalization is not valid. This can be 'None' or [{normalization_options}]. Kindly see "
f"https://pandas.pydata.org/docs/reference/api/pandas.crosstab.html for more info.")
f'https://pandas.pydata.org/docs/reference/api/pandas.crosstab.html for more info.')
ct_data = ct_data.sort_index(ascending=False)
return ct_data, factor_difference, fowlkes_mallows_score(df_preds.predictions_1, df_preds.predictions_2)

Expand Down
2 changes: 1 addition & 1 deletion scatcluster/analysis/waveforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def plot_waveforms_per_cluster(self, clusters: List[int] = None, waveforms_n_sam
stream = self.load_data(starttime=UTCDateTime(self.data_sample_starttime),
endtime=UTCDateTime(self.data_sample_endtime),
channel=self.data_channel)
channel_list = sorted(list(set([trace.stats.channel for trace in stream])))
channel_list = sorted(trace.stats.channel for trace in stream)

if clusters is not None:
classes = clusters
Expand Down
2 changes: 1 addition & 1 deletion scatcluster/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def tqdm_importer():
"""
A function that imports the tqdm module based on the current environment.
"""
if is_notebook:
if is_notebook():
pass
else:
pass
Expand Down
15 changes: 7 additions & 8 deletions scatcluster/processing/ica.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
from scipy.signal import medfilt
from sklearn.decomposition import FastICA
from sklearn.metrics import explained_variance_score, mean_squared_error
from sklearn.preprocessing import RobustScaler

from scatcluster.helper import demad, list_of_strings

Expand Down Expand Up @@ -86,7 +85,8 @@ def process_ICA_single(self,
# check if reduction exists already
if os.path.exists(ica_model_path) and self.ica_overwrite_previous_models is False:
print(' Using pre-calculated model')
model = pickle.load(open(ica_model_path, 'rb'))
with open(ica_model_path, 'rb') as f:
model = pickle.load(f)
else:
# else fit
kwargs['max_iter'] = 1000 if kwargs.get('max_iter') is None else kwargs.get('max_iter')
Expand Down Expand Up @@ -180,14 +180,13 @@ def preload_ICA(self, num_ICA: int) -> None:
if not os.path.exists(f'{self.data_savepath}ICA/{self.data_network}_{self.data_station}_{self.data_location}_'
f'{self.network_name}_dimension_{num_ICA}.pickle'):
raise ValueError(
f"The supplied number of ICAs {num_ICA} has not been computed. Choose another number for the ICAs or "
f'The supplied number of ICAs {num_ICA} has not been computed. Choose another number for the ICAs or '
f"calcate using 'process_ICA_single'")

self.ica = pickle.load(
open(
f'{self.data_savepath}ICA/{self.data_network}_{self.data_station}_{self.data_location}_'
f'{self.network_name}_dimension_{num_ICA}.pickle', 'rb'))

ica_file = f'{self.data_savepath}ICA/{self.data_network}_{self.data_station}_{self.data_location}_' + \
f'{self.network_name}_dimension_{num_ICA}.pickle'
with open(ica_file, 'rb') as f:
self.ica = pickle.load(f)
ica_results = np.load(f'{self.data_savepath}data/{self.data_network}_{self.data_station}_{self.data_location}_'
f'{self.network_name}_features_{num_ICA}.npz')
self.ica_features = ica_results['features']
Expand Down
Loading

0 comments on commit 992adc5

Please sign in to comment.