Skip to content
This repository has been archived by the owner on Dec 7, 2021. It is now read-only.

Commit

Permalink
Merge branch 'develop'
Browse files Browse the repository at this point in the history
  • Loading branch information
AlessioZanga committed Sep 28, 2020
2 parents a88f62a + 3617781 commit 6d81ffa
Show file tree
Hide file tree
Showing 76 changed files with 1,444 additions and 1,285 deletions.
1 change: 1 addition & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ language: python
cache: pip
jobs:
allow_failures:
- os: windows
- os: osx
include:
- name: "Python 3.6.0 on Xenial Linux"
Expand Down
25 changes: 25 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,31 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Fixed


## [0.10.0] - 2020-09-28
### Added
* Added new dataset versioning system,
you can work with multiple versions
of the same dataset during creation
* Added new in-Python download system,
you can download a specific version
of the dataset during initialization

### Changed
* Refactored dataset indexing system,
a dedicated directory will be created
for the index database during initialization
* Refactored preprocessing cache system,
a dedicated directory will be created
and a faster version for cache coherence
check has been implemented
* Refactored logging system
* Update package requirements
* Update README

### Removed
* Deprecated Makefile


## [0.9.3] - 2020-07-05

### Change
Expand Down
21 changes: 0 additions & 21 deletions Makefile

This file was deleted.

23 changes: 15 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Here is a simple quickstart:

from pyeeglab import *
dataset = TUHEEGAbnormalDataset()
pipeline = Pipeline([
preprocessing = Pipeline([
CommonChannelSet(),
LowestFrequency(),
ToDataframe(),
Expand All @@ -35,10 +35,6 @@ PyEEGLab is distributed using the pip repository:

pip install PyEEGLab

If you use Python 3.6, the dataclasses package must be installed as backport of Python 3.7 dataclasses:

pip install dataclasses

If you need a bleeding edge version, you can install it directly from GitHub:

pip install git+https://github.com/AlessioZanga/PyEEGLab@develop
Expand All @@ -57,6 +53,8 @@ The following datasets will work upon downloading:

## How to Class Meaning - From the TUH Seizure docs

<div style="font-size: 85%;">

| **Class&nbsp;Code** | **Event&nbsp;Name** | **Description** |
| -------------- | -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ |
| _NULL_ | No Event | An unclassified event |
Expand Down Expand Up @@ -91,15 +89,24 @@ The following datasets will work upon downloading:
| _TRIP_ | Triphasic Wave | Large, three-phase waves frequently caused by an underlying metabolic condition. |
| _ELEC_ | Electrode Artifact | Electrode pop, Electrostatic artifacts, Lead artifacts. |

</div>

## How to Get a Dataset

> **WARNING**: Retriving the TUH EEG datasets require valid credentials, you can get your own at: https://www.isip.piconepress.com/projects/tuh_eeg/html/request_access.php.
In the root directory of this project there is a Makefile, by typing:
Given the dataset instance, trigger the download using the "download" method:

from pyeeglab import *
dataset = TUHEEGAbnormalDataset()
dataset.download(user='USER', password='PASSWORD')
dataset.index()

then index the new downloaded files.

make tuh_eeg_abnormal
It should be noted that the download mechanism work on Unix-like systems given the following packages:

you will trigger the dataset download.
sudo apt install sshpass rsync wget

## Documentation

Expand Down
30 changes: 17 additions & 13 deletions examples/tensorboard/example_tensorboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
import numpy as np
from random import shuffle
from itertools import product
from networkx import to_numpy_matrix
from sklearn.model_selection import train_test_split
from tensorflow.python.keras.utils.np_utils import to_categorical

Expand All @@ -38,8 +37,6 @@
from pyeeglab import *

def build_data(dataset):
dataset.set_cache_manager(PickleCache('../../export'))

preprocessing = Pipeline([
CommonChannelSet(),
LowestFrequency(),
Expand Down Expand Up @@ -96,7 +93,7 @@ def get_correlation_matrix(x, frame, N, F):

input_0 = tf.keras.Input((frames, N, F + N))

gans = []
layers = []
for frame in range(frames):
feature_matrix = tf.keras.layers.Lambda(
get_feature_matrix,
Expand All @@ -110,9 +107,9 @@ def get_correlation_matrix(x, frame, N, F):

x = sp.layers.GraphAttention(hparams['output_shape'])([feature_matrix, correlation_matrix])
x = tf.keras.layers.Flatten()(x)
gans.append(x)
layers.append(x)

combine = tf.keras.layers.Concatenate()(gans)
combine = tf.keras.layers.Concatenate()(layers)
reshape = tf.keras.layers.Reshape((frames, N * hparams['output_shape']))(combine)
lstm = tf.keras.layers.LSTM(hparams['hidden_units'])(reshape)
dropout = tf.keras.layers.Dropout(hparams['dropout'])(lstm)
Expand All @@ -125,28 +122,35 @@ def get_correlation_matrix(x, frame, N, F):
metrics=[
'accuracy',
Recall(class_id=0, name='recall'),
Specificity(class_id=0, name='specificity'),
Precision(class_id=0, name='precision'),
F1Score(class_id=0, name='f1score'),
]
)
model.summary()
model.save('logs/plot_gat.h5')
return model

def run_trial(path, step, model, hparams, x_train, y_train, x_val, y_val, x_test, y_test, epochs):
with tf.summary.create_file_writer(path).as_default():
hp.hparams(hparams)
model.fit(x_train, y_train, epochs=epochs, batch_size=32, shuffle=True, validation_data=(x_val, y_val))
loss, accuracy, recall, precision = model.evaluate(x_test, y_test)
loss, accuracy, recall, specificity, precision, f1score = model.evaluate(x_test, y_test)
tf.summary.scalar('accuracy', accuracy, step=step)
tf.summary.scalar('recall', recall, step=step)
tf.summary.scalar('specificity', specificity, step=step)
tf.summary.scalar('precision', precision, step=step)
tf.summary.scalar('f1score', f1score, step=step)

def hparams_combinations(hparams):
hp.hparams_config(
hparams=list(hparams.values()),
metrics=[
hp.Metric('accuracy', display_name='Accuracy'),
hp.Metric('recall', display_name='Recall'),
hp.Metric('specificity', display_name='Specificity'),
hp.Metric('precision', display_name='Precision'),
hp.Metric('f1score', display_name='F1Score'),
]
)
hparams_keys = list(hparams.keys())
Expand All @@ -162,7 +166,7 @@ def hparams_combinations(hparams):
return hparams

def tune_model(dataset_name, data):
LOGS_DIR = join('./logs/generic', dataset_name)
LOGS_DIR = join('./logs/gat', dataset_name)
os.makedirs(LOGS_DIR, exist_ok=True)
# Prepare the data
x_train, y_train, x_val, y_val, x_test, y_test = adapt_data(data)
Expand Down Expand Up @@ -206,18 +210,18 @@ def tune_model(dataset_name, data):
if __name__ == '__main__':
dataset = {}

dataset['tuh_eeg_abnormal'] = TUHEEGAbnormalDataset('../../data/tuh_eeg_abnormal/v2.0.0/edf')
dataset['tuh_eeg_abnormal'] = TUHEEGAbnormalDataset('../../data/tuh_eeg_abnormal/')

dataset['tuh_eeg_artifact'] = TUHEEGArtifactDataset('../../data/tuh_eeg_artifact/v1.0.0/edf')
dataset['tuh_eeg_artifact'] = TUHEEGArtifactDataset('../../data/tuh_eeg_artifact/')
dataset['tuh_eeg_artifact'].set_minimum_event_duration(4)

dataset['tuh_eeg_seizure'] = TUHEEGSeizureDataset('../../data/tuh_eeg_seizure/v1.5.2/edf')
dataset['tuh_eeg_seizure'] = TUHEEGSeizureDataset('../../data/tuh_eeg_seizure/')
dataset['tuh_eeg_seizure'].set_minimum_event_duration(4)

# dataset['eegmmidb'] = EEGMMIDBDataset('../../data/physionet.org/files/eegmmidb/1.0.0')
# dataset['eegmmidb'] = PhysioNetEEGMMIDBDataset('../../data/physionet.org/files/eegmmidb/')
# dataset['eegmmidb'].set_minimum_event_duration(4)

dataset['chbmit'] = CHBMITDataset('../../data/physionet.org/files/chbmit/1.0.0')
dataset['chbmit'] = PhysioNetCHBMITDataset('../../data/physionet.org/files/chbmit/')
dataset['chbmit'].set_minimum_event_duration(4)

"""
Expand Down
Loading

0 comments on commit 6d81ffa

Please sign in to comment.