diff --git a/.readthedocs.yml b/.readthedocs.yml index 2b5c655..0e5193d 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -17,10 +17,18 @@ sphinx: formats: - pdf +# Optionally set the version of Python and requirements required to build your docs +build: + os: ubuntu-22.04 + tools: + python: "3.6" + +sphinx: + configuration: docs/source/conf.py + # Optionally set the version of Python and requirements required to build your docs python: - version: 3.6 install: - requirements: docs/requirements.txt - method: pip - path: . + path: . \ No newline at end of file diff --git a/README.rst b/README.rst index 0ef316a..faaffa7 100644 --- a/README.rst +++ b/README.rst @@ -29,25 +29,47 @@ Installation Cite """"""""""""""""""""""""""" -When use whole, parts, or are inspired by, we appreciate you acknowledge and refer these journal papers: +This toolbox was developed during multiple projects we appreciate you acknowledge when using or inspired by this toolbox. +The toolbox as a whole was developed during the following projects: +| F. Mivalt et V. Kremen et al., “Electrical brain stimulation and continuous behavioral state tracking in ambulatory humans,” J. Neural Eng., vol. 19, no. 1, p. 016019, Feb. 2022, doi: 10.1088/1741-2552/ac4bfd. +| V. Sladky et al., “Distributed brain co-processor for tracking spikes, seizures and behaviour during electrical brain stimulation,” Brain Commun., vol. 4, no. 3, May 2022, doi: 10.1093/braincomms/fcac115. +For different modules, please see below. -| Mivalt F, Kremen V, Sladky V, Balzekas I, Nejedly P, Gregg N, Lundstrom BN, Lepkova K, Pridalova T, Brinkmann BH, et al. Electrical brain stimulation and continuous behavioral state tracking in ambulatory humans. J Neural Eng (2022) Available at: http://iopscience.iop.org/article/10.1088/1741-2552/ac4bfd +Sleep classification and feature extraction +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +| F. Mivalt et V. Kremen et al., “Electrical brain stimulation and continuous behavioral state tracking in ambulatory humans,” J. Neural Eng., vol. 19, no. 1, p. 016019, Feb. 2022, doi: 10.1088/1741-2552/ac4bfd. -| Gerla, V., Kremen, V., Macas, M., Dudysova, D., Mladek, A., Sos, P., & Lhotska, L. (2019). Iterative expert-in-the-loop classification of sleep PSG recordings using a hierarchical clustering. Journal of Neuroscience Methods, 317(February), 61?70. https://doi.org/10.1016/j.jneumeth.2019.01.013 +| F. Mivalt et V. Sladky et al., “Automated sleep classification with chronic neural implants in freely behaving canines,” J. Neural Eng., vol. 20, no. 4, p. 046025, Aug. 2023, doi: 10.1088/1741-2552/aced21. +| Gerla, V., Kremen, V., Macas, M., Dudysova, D., Mladek, A., Sos, P., & Lhotska, L. (2019). Iterative expert-in-the-loop classification of sleep PSG recordings using a hierarchical clustering. Journal of Neuroscience Methods, 317(February), 61?70. https://doi.org/10.1016/j.jneumeth.2019.01.013 | Kremen, V., Brinkmann, B. H., Van Gompel, J. J., Stead, S. (Matt) M., St Louis, E. K., & Worrell, G. A. (2018). Automated Unsupervised Behavioral State Classification using Intracranial Electrophysiology. Journal of Neural Engineering. https://doi.org/10.1088/1741-2552/aae5ab - | Kremen, V., Duque, J. J., Brinkmann, B. H., Berry, B. M., Kucewicz, M. T., Khadjevand, F., G.A. Worrell, G. A. (2017). Behavioral state classification in epileptic brain using intracranial electrophysiology. Journal of Neural Engineering, 14(2), 026001. https://doi.org/10.1088/1741-2552/aa5688 +Seizure detection +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +| V. Sladky et al., “Distributed brain co-processor for tracking spikes, seizures and behaviour during electrical brain stimulation,” Brain Commun., vol. 4, no. 3, May 2022, doi: 10.1093/braincomms/fcac115. + +Artificial Signal Generation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +| F. Mivalt et al., “Deep Generative Networks for Algorithm Development in Implantable Neural Technology,” in 2022 IEEE International Conference on Systems, Man, and Cybernetics (SMC), Oct. 2022, pp. 1736–1741, doi: 10.1109/SMC53654.2022.9945379. + + +Evoked Response Potential Analysis +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +| K. J. Miller et al., “Canonical Response Parameterization: Quantifying the structure of responses to single-pulse intracranial electrical brain stimulation,” PLOS Comput. Biol., vol. 19, no. 5, p. e1011105, May 2023, doi: 10.1371/journal.pcbi.1011105. + + Acknowledgement -""""""""" +""""""""""""""""""" BEST was developed under projects supported by NIH Brain Initiative UH2&3 NS095495 Neurophysiologically-Based Brain State Tracking & Modulation in Focal Epilepsy, DARPA HR0011-20-2-0028 Manipulating and Optimizing Brain Rhythms for Enhancement of Sleep (Morpheus). Filip Mivalt was also partially supported by the grant FEKT-K-22-7649 realized within the project Quality Internal Grants of the Brno University of Technology (KInG BUT), Reg. No. CZ.02.2.69/0.0/0.0/19_073/0016948, which is financed from the OP RDE. diff --git a/best/__init__.py b/best/__init__.py index d4cfcd5..55c11f7 100644 --- a/best/__init__.py +++ b/best/__init__.py @@ -5,7 +5,7 @@ # LICENSE file in the root directory of this source tree. import os -__version__ = '0.0.6' +__version__ = '0.1.0a' diff --git a/best/hypnogram/CyberPSG.py b/best/annotations/CyberPSG.py similarity index 97% rename from best/hypnogram/CyberPSG.py rename to best/annotations/CyberPSG.py index 9feb8c9..9a9a641 100644 --- a/best/hypnogram/CyberPSG.py +++ b/best/annotations/CyberPSG.py @@ -200,6 +200,12 @@ class CyberPSG_XML_Writter: 'Sleep stage W': '00000001-898a-4b80-8ed8-000000000005', 'IED': '00000001-898a-4b80-8ed8-000000000010', 'IED_best': '00000001-898a-4b80-8ed8-000000000011', + 'seizure': '00000001-898a-4b80-8ed8-000000000012', + 'seizure_best': '00000001-898a-4b80-8ed8-000000000013', + 'seizure_05': '00000001-898a-4b80-8ed8-000000000014', + 'seizure_05_best': '00000001-898a-4b80-8ed8-000000000015', + 'seizure_08': '00000001-898a-4b80-8ed8-000000000016', + 'seizure_08_best': '00000001-898a-4b80-8ed8-000000000017', } diff --git a/best/hypnogram/NSRR.py b/best/annotations/NSRR.py similarity index 100% rename from best/hypnogram/NSRR.py rename to best/annotations/NSRR.py diff --git a/best/hypnogram/__init__.py b/best/annotations/__init__.py similarity index 67% rename from best/hypnogram/__init__.py rename to best/annotations/__init__.py index e63b9e4..a1c2ca3 100644 --- a/best/hypnogram/__init__.py +++ b/best/annotations/__init__.py @@ -8,7 +8,7 @@ """ -A package for a standardized manipulation with hypnogram annotations, saving, loading across different file formats (CyberP{SG, NSRR, ...) but also +A package for a standardized manipulation with annotations annotations, saving, loading across different file formats (CyberP{SG, NSRR, ...) but also basic analysis of hypnograms. The package is based on pandas.DataFrame. """ diff --git a/best/hypnogram/analysis.py b/best/annotations/analysis.py similarity index 98% rename from best/hypnogram/analysis.py rename to best/annotations/analysis.py index eab81fa..a33ed34 100644 --- a/best/hypnogram/analysis.py +++ b/best/annotations/analysis.py @@ -14,8 +14,8 @@ from copy import deepcopy from tqdm import tqdm -from best.hypnogram.visualisation import plot_hypnogram -from best.hypnogram.utils import merge_annotations, filter_by_key +from best.annotations.visualisation import plot_hypnogram +from best.annotations.utils import merge_annotations, filter_by_key """ diff --git a/best/hypnogram/correct.py b/best/annotations/correct.py similarity index 100% rename from best/hypnogram/correct.py rename to best/annotations/correct.py diff --git a/best/hypnogram/io.py b/best/annotations/io.py similarity index 94% rename from best/hypnogram/io.py rename to best/annotations/io.py index df0e0f5..aa55707 100644 --- a/best/hypnogram/io.py +++ b/best/annotations/io.py @@ -18,15 +18,15 @@ from mef_tools.io import MefReader -from best.hypnogram.CyberPSG import CyberPSGFile, CyberPSG_XML_Writter -from best.hypnogram.NSRR import NSRRSleepFile -from best.hypnogram.utils import time_to_utc, create_duration, tile_annotations +from best.annotations.CyberPSG import CyberPSGFile, CyberPSG_XML_Writter +from best.annotations.NSRR import NSRRSleepFile +from best.annotations.utils import time_to_utc, create_duration, tile_annotations from copy import deepcopy """ -Tools for loading and saving of hypnogram files. +Tools for loading and saving of annotations files. """ _hypnogram_colors = { @@ -141,7 +141,7 @@ def _load_CyberPSG(path, tile=None): def _load_CyberPSG_dataset(paths: list, tile=None, verbose=True): if verbose: - print('Loading hypnogram Dataset') + print('Loading annotations Dataset') return [_load_CyberPSG(pth, tile) for pth in tqdm(paths)] else: return [_load_CyberPSG(pth, tile) for pth in paths] diff --git a/best/hypnogram/utils.py b/best/annotations/utils.py similarity index 96% rename from best/hypnogram/utils.py rename to best/annotations/utils.py index f09cf0a..88f7643 100644 --- a/best/hypnogram/utils.py +++ b/best/annotations/utils.py @@ -64,7 +64,7 @@ def _convert_to_timezone(x, tzinfo): def time_to_local(dfHyp): """ - Converts the time into the local timezone. Default by python and PC. Does not enter the timezone explicitely. Cannot be used for creating a hypnogram figure. + Converts the time into the local timezone. Default by python and PC. Does not enter the timezone explicitely. Cannot be used for creating a annotations figure. """ def convert(x, col_key): return _convert_to_local(x[col_key]) @@ -90,7 +90,7 @@ def convert(x, col_key): def time_to_timezone(dfHyp, tzinfo): """ - Converts the hypnogram into a timezone. The timezone has to be from a python library dateutil + Converts the annotations into a timezone. The timezone has to be from a python library dateutil """ def convert(x, col_key): return _convert_to_timezone(x[col_key], tzinfo) @@ -102,7 +102,7 @@ def convert(x, col_key): def time_to_timestamp(dfHyp): """ - Converts the hypnogram time to timestamp. + Converts the annotations time to timestamp. """ def convert(x, col_key): return _convert_to_timestamp(x[col_key]) @@ -114,7 +114,7 @@ def convert(x, col_key): def create_duration(dfHyp): """ - Creates duration for each epoch within the hypnogram. (Faster on timestamp) + Creates duration for each epoch within the annotations. (Faster on timestamp) """ def duration(x): if type(x['start']) in (datetime, Timestamp): @@ -127,7 +127,7 @@ def duration(x): def create_day_indexes(dfHyp, hour=12, tzinfo=tz.tzlocal): """ - Creates a day index for each epoch within the hypnogram, given the day-time hour supplied as input parameter. + Creates a day index for each epoch within the annotations, given the day-time hour supplied as input parameter. The format of start and end has to be an integer or a float in a form representing a timestamp, or a timezone aware datetime or Timestamp object. If the start and end format do not include timezone, the local timezone will be used. """ diff --git a/best/hypnogram/visualisation.py b/best/annotations/visualisation.py similarity index 98% rename from best/hypnogram/visualisation.py rename to best/annotations/visualisation.py index 2ad1980..9df1fa7 100644 --- a/best/hypnogram/visualisation.py +++ b/best/annotations/visualisation.py @@ -18,11 +18,11 @@ def plot_hypnogram(orig_df, hypnogram_values=None, hypnogram_colors=None, fontsize=12, fig=None, night_start=22): """ - Creates a Matplotlib figure of spectrogram from the hypnogram. Time must be in a time-zone aware format. + Creates a Matplotlib figure of spectrogram from the annotations. Time must be in a time-zone aware format. Parameters ---------- - orig_df : hypnogram + orig_df : annotations hypnogram_values : dict dict of a y-axis values for each sleep state hypnogram_colors : dict diff --git a/best/cloud/db.py b/best/cloud/db.py index 5a27702..221b00e 100644 --- a/best/cloud/db.py +++ b/best/cloud/db.py @@ -18,7 +18,7 @@ from sqlalchemy.pool import NullPool from sshtunnel import SSHTunnelForwarder -from best.hypnogram.utils import create_day_indexes, time_to_timezone, time_to_timestamp, tile_annotations, create_duration +from best.annotations.utils import create_day_indexes, time_to_timezone, time_to_timestamp, tile_annotations, create_duration from best.cloud._db_connection_variables import * import pandas as pd @@ -451,8 +451,7 @@ def load_stimulation_day_info(self, patient_id, start_uutc, stop_uutc): freqs = [] ampls = [] pws = [] - for row in df_data.iterrows(): - row = row[1] + for _, row in df_data.iterrows(): ampl = [row['curr_Prog'+str(k)+'AmpInMilliamps'] for k in range(4) if not isinstance(row['curr_Prog'+str(k)+'AmpInMilliamps'], type(None))] freq = row['curr_rate_in_hz'] @@ -472,9 +471,9 @@ def load_stimulation_day_info(self, patient_id, start_uutc, stop_uutc): if ampls.__len__() == 0: ampls = None if pws.__len__() == 0: pws = None stim_info = { - 'freq': np.max(freqs), - 'ampl': np.max(ampls), - 'pulsewidth': np.max(pws) + 'freq': np.nanmax(freqs), + 'ampl': np.nanmax(ampls), + 'pulsewidth': np.nanmax(pws) } return stim_info diff --git a/best/erp/__init__.py b/best/erp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/best/erp/crp.py b/best/erp/crp.py new file mode 100755 index 0000000..890b55b --- /dev/null +++ b/best/erp/crp.py @@ -0,0 +1,240 @@ +import warnings +import numpy as np +from scipy.stats import ttest_1samp + + +def crp_method(v, t_win, prune_the_data): + """ + :param v: ndarray + Voltage matrix of shape (n_samples, n_channels). + :param t_win: ndarray + Time array of shape (n_samples,) representing the time window. + :param prune_the_data: bool + Flag indicating whether to prune the data or not. + :return: tuple + Tuple containing the output parameters and projections. + + The function `crp_method` implements the Canonical Response Parametrization (CRP) method for ERP analysis of brain electrophysiology data. It takes in a voltage matrix `v`, a time array `t_win`, and a flag `prune_the_data + *`. It returns a tuple containing the output parameters and projections. Note that the methodology behind this is described in the manuscript: + "Canonical Response Parametrization: Quantifying the structure of responses to single-pulse intracranial electrical brain stimulation" + by Kai J. Miller, et al., 2022. Python implementation by Vaclav Kremen, 1/2024 version 1.0. + + The voltage matrix `v` is a 2D ndarray of shape `(n_samples, n_channels)`, where `n_samples` is the number of samples and `n_channels` is the number of channels. + + The time array `t_win` is a 1D ndarray of shape `(n_samples,)` representing the time window. + + The flag `prune_the_data` is a boolean indicating whether to prune the data or not. + + The function performs the following steps: + + 1. Calculates the sampling rate based on the time window. + 2. Calculates sets of normalized single stimulation cross-projection magnitudes using a specified time step. + 3. Parameterizes the trials by reducing the voltage matrix to the response duration, performing kernel trick PCA to capture structure, and calculating the first principal component and + * residual epsilon. + 4. Returns the projections data, including projection timepoints, mean and variance of projection profiles, response duration index, average and standard deviation of input traces. + 5. Calculates significance statistics, such as the t-value and p-value at the response duration and full time sent in. + 6. Returns the parameterization data, including the reduced voltage matrix, alpha coefficient weights, canonical shape, residual epsilon, response time, parameter timepoints, average + * and standard deviation of response traces, alpha coefficient weights normalized by the square root of the length of C, and the square root of the diagonal elements of ep.T @ ep. + 7. Calculates extracted single-trial quantities, such as signal-to-noise ratio (Vsnr) and explained variance (expl_var) for each trial. + 8. Optionally prunes the data if requested, by removing trials that are too far from the given template and outliers. + 9. Returns the final output parameters and projections. + + Example usage: + ```python + v = np.zeros((10, 1000)) + t_win = np.arange(0, 1000/fs, 1/fs) + prune_the_data = True + + crp_parameters, crp_projections = crp_method(v, t_win, prune_the_data) + ``` + """ + + # region For testing purposes + # Define the time range and sampling rate 1 kHz + # fs = 1000 + # t_win = np.arange(0, 1000/fs, 1/fs) + # fs = 1 / (t_win[1] - t_win[0]) + # # Create the ndarray 'v' with 1 sinusoid for testing + # v = np.zeros((10, 1000)) + # for i in range(10): + # v[i] = np.sin(2 * np.pi * t_win) + # v = np.transpose(v) + # plt.plot(t_win, v[:, 9]) + # plt.show() + # endregion For testing purposes + + # Initial housekeeping + sampling_rate = 1 / np.mean(np.diff(t_win)) # Get sampling rate + + # Calculate sets of normalized single stimulation cross-projection magnitudes + t_step = 5 # Timestep between timepoints (in samples) + proj_tpts = np.arange(10, v.shape[0], t_step) # Timepoints for calculation of profile (in samples) + m = [] # Mean projection magnitudes + v2 = [] # Variance of projection magnitudes + for k in proj_tpts: # Parse through time and perform projections for different data lengths + # Get projection magnitudes for this duration + s = ccep_proj(v[:k, :]) + # Change units from uV*sqrt(samples) to sqrt(seconds) + s = s / np.sqrt(sampling_rate) + # Calculate mean and variance of projections for this duration + m.append(np.mean(s)) + v2.append(np.var(s)) + try: + s_all + except NameError: + s_all = np.zeros((len(s), 1)) + s_all = np.append(s_all, s.reshape((-1, 1)), axis=1) # Store projection weights + else: + s_all = np.append(s_all, s.reshape((-1, 1)), axis=1) # Store projection weights + s_all = s_all[:, 1:] # Remove the first column of zeros + tt = np.argmax(m) # tt is the sample corresponding to response duration + + # Parameterize trials + v_t_r = v[:proj_tpts[tt], :] # Reduced length voltage matrix (to response duration) + e_t_r, _ = kt_pca(v_t_r) # Linear kernel trick PCA method to capture structure + # 1st PC, canonical shape, C(t) from paper + c = e_t_r[:, 0] + # Mean shape + # c = np.mean(v_tR, axis=1) + # c = c / np.linalg.norm(c) + al = np.dot(c, v_t_r) # Alpha coefficient weights for C into V + ep = v_t_r - np.outer(c, al) # Residual epsilon after removal of a form of CCEP + + # Output variables, package data out + # Projections data + crp_projections = {'proj_tpts': t_win[proj_tpts], 's_all': s_all, 'mean_proj_profile': m, 'var_proj_profile': v2, + 'tR_index': tt, 'avg_trace_input': np.mean(v, axis=1), 'std_trace_input': np.std(v, axis=1)} + + # Significance statistics - note that have to send in only non-overlapping trials. + # Each trial is represented half of the time as the normalized projected, and half as non-normalized projected-into + stat_indices = get_stat_indices(v.shape[1]) + crp_projections['stat_indices'] = stat_indices + + # t-statistic at response duration \tau_R + crp_projections['t_value_tR'] = np.mean(s_all[stat_indices, tt]) / ( + np.std(s_all[stat_indices, tt]) / np.sqrt(len(s_all[stat_indices, tt]))) # Calculate t-statistic + + # p-value at response duration \tau_R (extraction significance) + _, crp_projections['p_value_tR'] = ttest_1samp(s_all[stat_indices, tt], 0, alternative='greater') + + # t-statistic at full time sent in + crp_projections['t_value_full'] = np.mean(s_all[stat_indices, -1]) / ( + np.std(s_all[stat_indices, -1]) / np.sqrt(len(s_all[stat_indices, -1]))) # Calculate t-statistic + + # p-value at full time sent in (extraction significance) + _, crp_projections['p_value_full'] = ttest_1samp(s_all[stat_indices, -1], 0, alternative='greater') + + # Parameterization + crp_parameters = {'V_tR': v_t_r, 'al': al, 'C': c, 'ep': ep, 'tR': t_win[proj_tpts[tt]], + 'parms_times': t_win[:proj_tpts[tt]], 'avg_trace_tR': np.mean(v_t_r, axis=1), + 'std_trace_tR': np.std(v_t_r, axis=1), 'al_p': al / (len(c) ** 0.5), + 'epep_root': np.sqrt(np.diag(ep.T @ ep))} + + # Extracted single-trial quantities (e.g. Table 1 in manuscript) + denominator = np.sqrt(np.diag(ep.T @ ep)) + denominator[denominator == 0] = 1 # Set the denominator to 1 if it is zero + crp_parameters['Vsnr'] = al / denominator # "signal-to-noise" for each trial + denominator = np.diag(np.dot(v_t_r.T, v_t_r)) + denominator = denominator.copy() + denominator[denominator == 0] = 1 # Set the denominator to 1 if it is zero + crp_parameters['expl_var'] = 1 - np.diag(np.dot(ep.T, ep)) / denominator + + # If the data pruning was requested, then prune the data that are too far from given template + # (just one more cycle and get rid of outliers) + # Aggregated epsilon per trials + eps = np.sum(np.abs(ep), axis=0) + # Select the indexes of eps that are higher than the 2*std + high_eps_indexes = np.where(eps < 2 * np.std(eps))[0] + if len(high_eps_indexes) > 12: + if prune_the_data and len(high_eps_indexes) > 6: + # Prune the data - rerun the CRP with only selected trials + [crp_parameters, crp_projections] = crp_method(v[:, high_eps_indexes], t_win, False) + + return crp_parameters, crp_projections + + +def ccep_proj(V): + """ + Perform projections of each trial onto all other trials, and return the internal projections. + + :param V: The input matrix of shape (M, N), collector of trials (each trial is a column). + :type V: numpy.ndarray + :return: The calculated internal projections vector after removing self-projections. + :rtype: numpy.ndarray + """ + + # Normalize (L2 norm) each trial + denominator = np.sqrt(np.sum(V ** 2, axis=0))[np.newaxis, :] + denominator[denominator == 0] = 1 # Set the denominator to 1 if it is zero + V0 = V / denominator + V0[np.isnan(V0)] = 0 # Taking care of a divide-by-zero situation in normalization + + # Calculate internal projections (semi-normalized - optimal) + P = np.dot(V0.T, V) # Calculate internal projections (semi-normalized - optimal) + + # Get only off-diagonal elements of P (i.e., ignore self-projections) + p0 = P.copy() + np.fill_diagonal(p0, np.nan) + S0 = np.reshape(p0, (1, -1)) # Reshaping to 1D array + S0 = S0[~np.isnan(S0)] # Removing diagonal elements (self-projections) + return S0 + + +def kt_pca(X): + """ + This is an implementation of the linear kernel PCA method ("kernel trick") + described in "Kernel PCA Pattern Reconstruction via Approximate Pre-Images" + by Scholkopf et al., ICANN, 1998, pp 147-15. + + param: X - Matrix of data in. Only need this trick if T>>N + + :return: E, S - Eigenvectors and Eigenvalues of X in descending order + """ + + # Use the "kernel trick" to estimate eigenvectors of this cluster of pair groups + S2, F = np.linalg.eig(X.T @ X) # Eigenvector decomposition of (covariance of transpose) + + idx = np.argsort(S2)[::-1] # Indices to sort eigenvectors in descending order + S2 = S2[idx] # Sort eigenvalues in descending order + F = F[:, idx] # Sort eigenvectors in descending order + # Ignore warnings of the DeprecationWarning category + warnings.filterwarnings("ignore", category=DeprecationWarning) + # A statement that may raise a warning + # S = np.sqrt(S2) # Estimated eigenvalues of both X.T @ X and X @ X. + # Catch any remaining warnings + with warnings.catch_warnings(record=True) as w: + # Execute another statement that may raise a warning + # TODO: check with Kai or somewhere if all these exceptions handling + S = np.sqrt(np.abs(S2)) # Estimated eigenvalues of both X.T @ X and X @ X. + # Print any warnings that were caught + for warning in w: + print(warning) + + ES = X @ F # Kernel trick + denominator = (np.ones((X.shape[0], 1)) @ S.reshape(1, -1)) # Denominator for normalization + denominator[denominator == 0] = 1 # Set the denominator to 1 if it is zero + E = ES / denominator # Divide through to obtain unit-normalized eigenvectors + + return E, S + + +def get_stat_indices(N): + """ + This function picks out the indices of S that can be used for statistical comparison. + For each trial, half of normalized projections to other trials are used, + and the other half of trials are the projected into ones. No overlapping comparison pairs are used. + + :param N: Scalar - number of trials + :return: stat_indices (N^2-N,1) - Vector of indices to be used for statistical comparison + """ + stat_indices = np.arange(1, N ** 2 - N + 1, 2) # Indices used for statistics + + if N % 2 == 1: # Odd number of trials - need to offset every other column in the original P matrix + b = np.zeros_like(stat_indices) # Initializes what is indexed + for k in range(1, N + 1): + if k % 2 == 0: # Offset what would have been every even column in the original matrix + b[((k - 1) * ((N - 1) // 2) + 1):(k * ((N - 1) // 2))] = 1 + + stat_indices = stat_indices + b + + return stat_indices diff --git a/best/files.py b/best/files.py index bdbe4e7..cfd4513 100644 --- a/best/files.py +++ b/best/files.py @@ -9,7 +9,6 @@ from shutil import rmtree from time import sleep - def create_folder(PATH_TO_CREATE): """ Creates a folder on PATH_TO_CREATE position. diff --git a/best/sleep_classification/MultiChannelClassifier.py b/best/sleep_classification/MultiChannelClassifier.py index 2c38ca6..729deb0 100644 --- a/best/sleep_classification/MultiChannelClassifier.py +++ b/best/sleep_classification/MultiChannelClassifier.py @@ -9,7 +9,7 @@ from tqdm import tqdm from best.files import create_folder, get_files, get_folders -from best.hypnogram.utils import time_to_timestamp +from best.annotations.utils import time_to_timestamp from best.cloud.mef import MefClient from best import DELIMITER @@ -24,14 +24,14 @@ from best.cloud.db import SessionFinder from best.dbs.artifact_removal.trainer import * from best.signal import get_datarate, buffer -from best.hypnogram.io import load_CyberPSG +from best.annotations.io import load_CyberPSG import pandas as pd from best.feature_extraction.FeatureExtractor import SleepSpectralFeatureExtractor from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.metrics import cohen_kappa_score, roc_curve, f1_score, accuracy_score from best.modules import ZScoreModule, PCAModule -from best.hypnogram.utils import merge_annotations +from best.annotations.utils import merge_annotations from best.feature_extraction.WaveDetector import WaveDetector @@ -1034,7 +1034,7 @@ def load_stim_info(self, hyp): hyp['ampl'] = 0 return hyp - # prepare for tiling the hypnogram based on stim + # prepare for tiling the annotations based on stim t = np.arange(stim_info.start.min(), stim_info.end.max(), 1) stim_ref = np.zeros_like(t) - 1 ampl_ref = np.zeros_like(t) - 1 @@ -1042,7 +1042,7 @@ def load_stim_info(self, hyp): stim_ref[(t >= row_['start']) & (t <= row_['end'])] = row_['frequency'] ampl_ref[(t >= row_['start']) & (t <= row_['end'])] = row_['amplitude'] - # tile hypnogram with stim annots + # tile annotations with stim annots for k, row_hyp in tqdm(list(hyp.iterrows())): t_ = t[(t >= row_hyp['start']) & (t <= row_hyp['end'])] @@ -1098,7 +1098,7 @@ def load_hypnogram_annotations(self, files_hyp, round_timestamps=True): SFndr = SessionFinder(self.db_id) hyp = [] for f in files_hyp: - # Load hypnogram + # Load annotations hyp_ = load_CyberPSG(f) hyp_ = time_to_timestamp(hyp_) if round_timestamps: diff --git a/best/sleep_classification/models.py b/best/sleep_classification/models.py index 7d39a2a..2d29018 100644 --- a/best/sleep_classification/models.py +++ b/best/sleep_classification/models.py @@ -25,7 +25,7 @@ from scipy.linalg import norm -from best.hypnogram.utils import time_to_timestamp, time_to_utc, merge_annotations +from best.annotations.utils import time_to_timestamp, time_to_utc, merge_annotations from best.feature import augment_features, balance_classes from best.signal import unify_sampling_frequency, get_datarate, buffer from best.stats import kl_divergence_nonparametric diff --git a/best/test/test_best_hypnogram.py b/best/test/test_best_hypnogram.py index 16a10f9..8a7ecee 100644 --- a/best/test/test_best_hypnogram.py +++ b/best/test/test_best_hypnogram.py @@ -14,17 +14,17 @@ import unittest from unittest import TestCase -from best.hypnogram.analysis import * -from best.hypnogram.visualisation import * -from best.hypnogram.utils import * -from best.hypnogram.io import * -from best.hypnogram.correct import * -from best.hypnogram.CyberPSG import * -from best.hypnogram.NSRR import * +from best.annotations.analysis import * +from best.annotations.visualisation import * +from best.annotations.utils import * +from best.annotations.io import * +from best.annotations.correct import * +from best.annotations.CyberPSG import * +from best.annotations.NSRR import * class TestHypnogram(TestCase): def test_import(self): - print("Testing import 'from best.hypnogram'" ) + print("Testing import 'from best.annotations'" ) diff --git a/docs/source/best.rst b/docs/source/best.rst index f238698..a362f44 100644 --- a/docs/source/best.rst +++ b/docs/source/best.rst @@ -22,6 +22,7 @@ modules signal_generation sleep_classification + erp diff --git a/docs/source/erp.crp.rst b/docs/source/erp.crp.rst new file mode 100644 index 0000000..356f052 --- /dev/null +++ b/docs/source/erp.crp.rst @@ -0,0 +1,9 @@ +Evoked Response Potential (ERP) Analysis +========================================== + +.. automodule:: best.erp.crp + :members: + :undoc-members: + + + diff --git a/docs/source/erp.rst b/docs/source/erp.rst new file mode 100644 index 0000000..8d635a6 --- /dev/null +++ b/docs/source/erp.rst @@ -0,0 +1,13 @@ +DBS +============ + +.. automodule:: best.erp + :members: + :undoc-members: + + +.. toctree:: + + erp.crp + + diff --git a/setup.py b/setup.py index e45bd94..e856e2a 100644 --- a/setup.py +++ b/setup.py @@ -84,7 +84,8 @@ 'sshtunnel', 'torch', 'pyyaml', - 'mne' + 'mne', + 'epycom' ] )