From bece03d58b05b6962c04f7ef6d00b9d49f0e3955 Mon Sep 17 00:00:00 2001 From: CBroz1 Date: Fri, 20 Oct 2023 16:11:17 -0500 Subject: [PATCH] #630 - Reduce varchar approach --- src/spyglass/common/common_nwbfile.py | 4 +++- src/spyglass/figurl_views/SpikeSortingView.py | 6 ++---- src/spyglass/lfp/v1/lfp_artifact.py | 12 ++++++++---- src/spyglass/lock/file_lock.py | 2 ++ .../position_linearization/v1/__init__.py | 2 +- .../spikesorting/spikesorting_artifact.py | 6 +++++- .../spikesorting/spikesorting_curation.py | 9 +++++++-- .../spikesorting/spikesorting_recording.py | 12 +++++++++--- .../spikesorting/spikesorting_sorting.py | 17 ++++++++++------- 9 files changed, 47 insertions(+), 23 deletions(-) diff --git a/src/spyglass/common/common_nwbfile.py b/src/spyglass/common/common_nwbfile.py index 4458a1162..05ade6a44 100644 --- a/src/spyglass/common/common_nwbfile.py +++ b/src/spyglass/common/common_nwbfile.py @@ -42,7 +42,7 @@ class Nwbfile(dj.Manual): definition = """ # Table for holding the NWB files. - nwb_file_name: varchar(255) # name of the NWB file + nwb_file_name: varchar(64) # name of the NWB file --- nwb_file_abs_path: filepath@raw INDEX (nwb_file_abs_path) @@ -50,6 +50,8 @@ class Nwbfile(dj.Manual): # NOTE the INDEX above is implicit from filepath@... above but needs to be explicit # so that alter() can work + # NOTE: No existing entries impacted my varchar reduction from 255 to 64 + @classmethod def insert_from_relative_file_name(cls, nwb_file_name): """Insert a new session from an existing NWB file. diff --git a/src/spyglass/figurl_views/SpikeSortingView.py b/src/spyglass/figurl_views/SpikeSortingView.py index 7f460d767..027f24882 100644 --- a/src/spyglass/figurl_views/SpikeSortingView.py +++ b/src/spyglass/figurl_views/SpikeSortingView.py @@ -1,11 +1,9 @@ import datajoint as dj import kachery_client as kc import spikeinterface as si -from sortingview.SpikeSortingView import ( - SpikeSortingView as SortingViewSpikeSortingView, -) +from sortingview.SpikeSortingView import SpikeSortingView as SortingViewSpikeSortingView -from ..common.common_spikesorting import SpikeSorting, SpikeSortingRecording +from ..spikesorting import SpikeSorting, SpikeSortingRecording from .prepare_spikesortingview_data import prepare_spikesortingview_data schema = dj.schema("figurl_view_spike_sorting_recording") diff --git a/src/spyglass/lfp/v1/lfp_artifact.py b/src/spyglass/lfp/v1/lfp_artifact.py index 3c6389af4..b4648dad1 100644 --- a/src/spyglass/lfp/v1/lfp_artifact.py +++ b/src/spyglass/lfp/v1/lfp_artifact.py @@ -1,13 +1,13 @@ import datajoint as dj +import numpy as np +from spyglass.common import get_electrode_indices from spyglass.common.common_interval import IntervalList from spyglass.lfp.v1.lfp import LFPV1 from spyglass.lfp.v1.lfp_artifact_difference_detection import ( difference_artifact_detector, ) from spyglass.lfp.v1.lfp_artifact_MAD_detection import mad_artifact_detector -import numpy as np -from spyglass.common import get_electrode_indices schema = dj.schema("lfp_v1") @@ -182,7 +182,7 @@ def make(self, key): key["target_interval_list_name"], "LFP", key["artifact_params_name"], - "artifact_removed_valid_times", + # "artifact_removed_valid_times", ] ), ) @@ -204,9 +204,13 @@ class LFPArtifactRemovedIntervalList(dj.Manual): definition = """ # Stores intervals without detected artifacts. Entries can come from either # ArtifactDetection() or alternative artifact removal analyses. - artifact_removed_interval_list_name: varchar(200) + artifact_removed_interval_list_name: varchar(128) --- -> LFPArtifactDetectionSelection artifact_removed_valid_times: longblob artifact_times: longblob # np.array of artifact intervals """ + + # NOTE: 200 existing enties over this new limit. + # Existing names could be significantly cut by reducing redundancy. + # Removing final string above from existing entries means all below new 128 diff --git a/src/spyglass/lock/file_lock.py b/src/spyglass/lock/file_lock.py index 08fd278a3..079a4723d 100644 --- a/src/spyglass/lock/file_lock.py +++ b/src/spyglass/lock/file_lock.py @@ -4,6 +4,8 @@ schema = dj.schema("file_lock") +from ..common.common_nwbfile import AnalysisNwbfile, Nwbfile + @schema class NwbfileLock(dj.Manual): diff --git a/src/spyglass/position_linearization/v1/__init__.py b/src/spyglass/position_linearization/v1/__init__.py index ec3dffc65..46d287a45 100644 --- a/src/spyglass/position_linearization/v1/__init__.py +++ b/src/spyglass/position_linearization/v1/__init__.py @@ -1,7 +1,7 @@ +from spyglass.common.common_position import NodePicker from spyglass.position_linearization.v1.linearization import ( LinearizationParameters, LinearizationSelection, LinearizedPositionV1, - NodePicker, TrackGraph, ) diff --git a/src/spyglass/spikesorting/spikesorting_artifact.py b/src/spyglass/spikesorting/spikesorting_artifact.py index 0532357c1..fa291aa9f 100644 --- a/src/spyglass/spikesorting/spikesorting_artifact.py +++ b/src/spyglass/spikesorting/spikesorting_artifact.py @@ -140,13 +140,17 @@ class ArtifactRemovedIntervalList(dj.Manual): definition = """ # Stores intervals without detected artifacts. # Note that entries can come from either ArtifactDetection() or alternative artifact removal analyses. - artifact_removed_interval_list_name: varchar(200) + artifact_removed_interval_list_name: varchar(180) --- -> ArtifactDetectionSelection artifact_removed_valid_times: longblob artifact_times: longblob # np array of artifact intervals """ + # NOTE: current max is 165 + # Current entries are very messy concatenation that look like pks elsewhere + # Why name a list with 165 chars? When you could fk ref the data itself? + def _get_artifact_times( recording: si.BaseRecording, diff --git a/src/spyglass/spikesorting/spikesorting_curation.py b/src/spyglass/spikesorting/spikesorting_curation.py index b0699ba42..ac2790c86 100644 --- a/src/spyglass/spikesorting/spikesorting_curation.py +++ b/src/spyglass/spikesorting/spikesorting_curation.py @@ -385,10 +385,13 @@ def _get_waveform_extractor_name(self, key): class MetricParameters(dj.Manual): definition = """ # Parameters for computing quality metrics of sorted units - metric_params_name: varchar(200) + metric_params_name: varchar(64) --- metric_params: blob """ + + # NOTE: No existing entries impacted by this change + metric_default_params = { "snr": { "peak_sign": "neg", @@ -645,12 +648,14 @@ def _get_num_spikes( @schema class AutomaticCurationParameters(dj.Manual): definition = """ - auto_curation_params_name: varchar(200) # name of this parameter set + auto_curation_params_name: varchar(36) # name of this parameter set --- merge_params: blob # dictionary of params to merge units label_params: blob # dictionary params to label units """ + # NOTE: No existing entries impacted by this change + def insert1(self, key, **kwargs): # validate the labels and then insert # TODO: add validation for merge_params diff --git a/src/spyglass/spikesorting/spikesorting_recording.py b/src/spyglass/spikesorting/spikesorting_recording.py index cdad793e9..d4a63f184 100644 --- a/src/spyglass/spikesorting/spikesorting_recording.py +++ b/src/spyglass/spikesorting/spikesorting_recording.py @@ -20,8 +20,8 @@ from ..common.common_lab import LabTeam # noqa: F401 from ..common.common_nwbfile import Nwbfile from ..common.common_session import Session # noqa: F401 -from ..utils.dj_helper_fn import dj_replace from ..settings import recording_dir +from ..utils.dj_helper_fn import dj_replace schema = dj.schema("spikesorting_recording") @@ -321,19 +321,23 @@ def get_geometry(self, sort_group_id, nwb_file_name): class SortInterval(dj.Manual): definition = """ -> Session - sort_interval_name: varchar(200) # name for this interval + sort_interval_name: varchar(64) # name for this interval --- sort_interval: longblob # 1D numpy array with start and end time for a single interval to be used for spike sorting """ + # NOTE: Reduced key less than 2 existing entries + # All existing entries are below 69 @schema class SpikeSortingPreprocessingParameters(dj.Manual): definition = """ - preproc_params_name: varchar(200) + preproc_params_name: varchar(32) --- preproc_params: blob """ + # NOTE: Reduced key less than 2 existing entries + # All existing entries are below 48 def insert_default(self): # set up the default filter parameters @@ -365,6 +369,8 @@ class SpikeSortingRecordingSelection(dj.Manual): -> IntervalList """ + # NOTE: Too make pks? + @schema class SpikeSortingRecording(dj.Computed): diff --git a/src/spyglass/spikesorting/spikesorting_sorting.py b/src/spyglass/spikesorting/spikesorting_sorting.py index 8178ef513..72cb8450d 100644 --- a/src/spyglass/spikesorting/spikesorting_sorting.py +++ b/src/spyglass/spikesorting/spikesorting_sorting.py @@ -14,7 +14,7 @@ from ..common.common_lab import LabMember, LabTeam from ..common.common_nwbfile import AnalysisNwbfile -from ..settings import temp_dir, sorting_dir +from ..settings import sorting_dir, temp_dir from .spikesorting_artifact import ArtifactRemovedIntervalList from .spikesorting_recording import ( SpikeSortingRecording, @@ -27,12 +27,14 @@ @schema class SpikeSorterParameters(dj.Manual): definition = """ - sorter: varchar(200) - sorter_params_name: varchar(200) + sorter: varchar(32) + sorter_params_name: varchar(64) --- sorter_params: blob """ + # NOTE no existing entries impacted by this change + def insert_default(self): """Default params from spike sorters available via spikeinterface""" sorters = sis.available_sorters() @@ -236,10 +238,11 @@ def make(self, key: dict): self.insert1(key) def delete(self): - """Extends the delete method of base class to implement permission checking. - Note that this is NOT a security feature, as anyone that has access to source code - can disable it; it just makes it less likely to accidentally delete entries. - """ + """Extends the delete method of base class to implement permission + checking. Note that this is NOT a security feature, as anyone that has + access to source code can disable it; it just makes it less likely to + accidentally delete entries.""" + current_user_name = dj.config["database.user"] entries = self.fetch() permission_bool = np.zeros((len(entries),))