diff --git a/.circleci/config.yml b/.circleci/config.yml index 992391aa5..e1f39ce38 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -35,6 +35,7 @@ jobs: - run: name: Build the documentation + no_output_timeout: 30m command: | make build-doc @@ -117,6 +118,7 @@ jobs: - run: name: Check links + no_output_timeout: 30m command: | make -C doc linkcheck diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 000000000..553fc4f78 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1 @@ +58d9cf0b3a916af3e48fbb63b85b699c998c7f7a diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index b4c15c756..cad80130a 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -24,6 +24,7 @@ jobs: python-version: ["3.11"] steps: - uses: actions/checkout@v3 + - uses: psf/black@stable - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: @@ -35,7 +36,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install pydocstyle flake8 check-manifest + pip install pydocstyle flake8 check-manifest black - name: Display versions and environment information run: | python --version diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index be209dc89..612b1c041 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ Contributions are welcome in the form of feedback and discussion in issues, or pull requests for changes to the code. Once the implementation of a piece of functionality is considered to be bug -free and properly documented (both API docs and an example script), +free and properly documented (both in the API docs and with an example script), it can be incorporated into the `main` branch. To help developing `mne-bids`, you will need a few adjustments to your @@ -24,10 +24,10 @@ To start with, you should install `mne-bids` as described in our [installation documentation](https://mne.tools/mne-bids/dev/install.html). For a development environment we recommend that you perform the installation in a dedicated Python environment, -for example using `conda`. +for example using `conda` (see: https://docs.conda.io/en/latest/miniconda.html). Afterwards, a few additional steps need to be performed. -For all of the steps below we assume that you work in your dedicated `mne-bids` -Python environment. + +**For all of the steps below we assume that you work in your dedicated `mne-bids` Python environment.** ### Clone MNE-Python and install it from the git repository @@ -37,7 +37,7 @@ then navigate to the cloned repository using the `cd` command. Then from the `mne-python` root directory call: ```Shell -pip uninstall mne +pip uninstall mne --yes pip install -e . ``` @@ -54,8 +54,10 @@ Then, `git clone` your fork and install it in "editable" mode. git clone https://github.com//mne-bids cd ./mne-bids pip install -e .[full] +git config --local blame.ignoreRevsFile .git-blame-ignore-revs ``` +The last command is needed for `git diff` to work properly. You should now have both the `mne` and `mne-bids` development versions available in your Python environment. ### Install additional Python packages required for development @@ -67,7 +69,7 @@ pip install -r test_requirements.txt pip install -r doc/requirements.txt ``` -This will install several packages to run tests, and build the documentation for `mne-bids`. +This will install several packages for running tests and building the documentation for `mne-bids`. ### Install the BIDS validator @@ -119,17 +121,29 @@ figure out how to run the commands without invoking `make`. We run several style checks on `mne-bids`. If you have accurately followed the steps to setup your `mne-bids` development version, -you can simply call from the root of the `mne-bids` repository: +you can simply use the following command from the root of the `mne-bids` repository: ```Shell make pep ``` +We use [Black](https://github.com/psf/black) to format our code. +You can simply call `black .` from the root of the `mne-bids` repository +to automatically convert your code to follow the appropriate style. + ## Running tests We run tests using `pytest`. + +First you will need to download the MNE-Python testing data. +Use the following command: + +```Shell +python -c 'import mne; mne.datasets.testing.data_path(verbose=True)' +``` + If you have accurately followed the steps to setup your `mne-bids` development version, -you can simply call from the root of the `mne-bids` repository: +you can then simply use the following command from the root of the `mne-bids` repository: ```Shell make test @@ -146,7 +160,7 @@ VALIDATOR_EXECUTABLE=../bids-validator/bids-validator/bin/bids-validator pytest The documentation can be built using [Sphinx](https://www.sphinx-doc.org). If you have accurately followed the steps to setup your `mne-bids` development version, -you can simply call from the root of the `mne-bids` repository: +you can simply use the following command from the root of the `mne-bids` repository: ```Shell make build-doc diff --git a/MANIFEST.in b/MANIFEST.in index ba04ee528..188f5408d 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -23,6 +23,7 @@ exclude tools prune .circleci prune paper +exclude .git-blame-ignore-revs exclude CITATION.cff exclude Makefile exclude CONTRIBUTING.md diff --git a/Makefile b/Makefile index ea33b4c38..c3deb90f9 100755 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: all clean-pyc clean-so clean-build clean-ctags clean-cache clean-e clean inplace test check-manifest flake pydocstyle pep build-doc dist-build +.PHONY: all clean-pyc clean-so clean-build clean-ctags clean-cache clean-e clean inplace test check-manifest flake black pydocstyle pep build-doc dist-build all: clean inplace pep test build-doc dist-build @@ -45,11 +45,15 @@ flake: @echo "Running flake8" @flake8 --count mne_bids examples +black: + @echo "Running black" + @black --check . + pydocstyle: @echo "Running pydocstyle" @pydocstyle . -pep: flake pydocstyle check-manifest +pep: flake pydocstyle check-manifest black build-doc: @echo "Building documentation" diff --git a/doc/conf.py b/doc/conf.py index 93820ba9d..e1b003df8 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -13,32 +13,32 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. curdir = os.path.dirname(__file__) -sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids'))) -sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext'))) +sys.path.append(os.path.abspath(os.path.join(curdir, "..", "mne_bids"))) +sys.path.append(os.path.abspath(os.path.join(curdir, "sphinxext"))) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # -needs_sphinx = '2.0' +needs_sphinx = "2.0" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.githubpages', - 'sphinx.ext.autodoc', - 'sphinx.ext.mathjax', - 'sphinx.ext.viewcode', - 'sphinx.ext.autosummary', - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx_gallery.gen_gallery', - 'numpydoc', - 'sphinx_copybutton', - 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py - 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py + "sphinx.ext.githubpages", + "sphinx.ext.autodoc", + "sphinx.ext.mathjax", + "sphinx.ext.viewcode", + "sphinx.ext.autosummary", + "sphinx.ext.doctest", + "sphinx.ext.intersphinx", + "sphinx_gallery.gen_gallery", + "numpydoc", + "sphinx_copybutton", + "gen_cli", # custom extension, see ./sphinxext/gen_cli.py + "gh_substitutions", # custom extension, see ./sphinxext/gh_substitutions.py ] # configure sphinx-copybutton @@ -50,51 +50,52 @@ numpydoc_class_members_toctree = False numpydoc_attributes_as_param_list = True numpydoc_xref_aliases = { - 'BIDSPath': ':class:`BIDSPath `', - 'path-like': ':term:`path-like `', - 'array-like': ':term:`array_like `', - 'int': ':class:`int `', - 'bool': ':class:`bool `', - 'float': ':class:`float `', - 'list': ':class:`list `', - 'tuple': ':class:`tuple `', - 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage', + "BIDSPath": ":class:`BIDSPath `", + "path-like": ":term:`path-like `", + "array-like": ":term:`array_like `", + "int": ":class:`int `", + "bool": ":class:`bool `", + "float": ":class:`float `", + "list": ":class:`list `", + "tuple": ":class:`tuple `", + "NibabelImageObject": "nibabel.spatialimages.SpatialImage", } numpydoc_xref_ignore = { # words - 'instance', 'instances', 'of' + "instance", + "instances", + "of", } # generate autosummary even if no references autosummary_generate = True -autodoc_default_options = {'inherited-members': None} -default_role = 'autolink' # XXX silently allows bad syntax, someone should fix +autodoc_default_options = {"inherited-members": None} +default_role = "autolink" # XXX silently allows bad syntax, someone should fix # configure linkcheck # https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder linkcheck_retries = 2 linkcheck_rate_limit_timeout = 15.0 linkcheck_ignore = [ - r'https://www.researchgate.net/profile/.*', + r"https://www.researchgate.net/profile/.*", ] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'MNE-BIDS' +project = "MNE-BIDS" td = date.today() -copyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year, - td.isoformat()) +copyright = "2017-%s, MNE Developers. Last updated on %s" % (td.year, td.isoformat()) -author = u'MNE Developers' +author = "MNE Developers" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -108,39 +109,42 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db', - '.DS_Store'] +exclude_patterns = ["auto_examples/index.rst", "_build", "Thumbs.db", ".DS_Store"] # HTML options (e.g., theme) html_show_sourcelink = False html_copy_source = False -html_theme = 'pydata_sphinx_theme' +html_theme = "pydata_sphinx_theme" # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] -html_static_path = ['_static'] -html_css_files = ['style.css'] +templates_path = ["_templates"] +html_static_path = ["_static"] +html_css_files = ["style.css"] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. switcher_version_match = "dev" if "dev" in release else version html_theme_options = { - 'icon_links': [ - dict(name='GitHub', - url='https://github.com/mne-tools/mne-bids', - icon='fab fa-github-square'), - dict(name='Discourse', - url='https://mne.discourse.group/tags/mne-bids', - icon='fab fa-discourse'), + "icon_links": [ + dict( + name="GitHub", + url="https://github.com/mne-tools/mne-bids", + icon="fab fa-github-square", + ), + dict( + name="Discourse", + url="https://mne.discourse.group/tags/mne-bids", + icon="fab fa-discourse", + ), ], - 'icon_links_label': 'Quick Links', # for screen reader - 'use_edit_page_button': False, - 'navigation_with_keys': False, - 'show_toc_level': 1, - 'navbar_end': ['theme-switcher', 'version-switcher', 'navbar-icon-links'], - 'analytics': dict(google_analytics_id='G-C8SH9E98QC'), + "icon_links_label": "Quick Links", # for screen reader + "use_edit_page_button": False, + "navigation_with_keys": False, + "show_toc_level": 1, + "navbar_end": ["theme-switcher", "version-switcher", "navbar-icon-links"], + "analytics": dict(google_analytics_id="G-C8SH9E98QC"), "switcher": { "json_url": "https://raw.githubusercontent.com/mne-tools/mne-bids/main/doc/_static/versions.json", # noqa: E501 "version_match": switcher_version_match, @@ -156,15 +160,15 @@ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - 'python': ('https://docs.python.org/3', None), - 'mne': ('https://mne.tools/dev', None), - 'mne-gui-addons': ('https://mne.tools/mne-gui-addons', None), - 'numpy': ('https://numpy.org/devdocs', None), - 'scipy': ('https://scipy.github.io/devdocs', None), - 'matplotlib': ('https://matplotlib.org', None), - 'nilearn': ('http://nilearn.github.io/stable', None), - 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None), - 'nibabel': ('https://nipy.org/nibabel', None), + "python": ("https://docs.python.org/3", None), + "mne": ("https://mne.tools/dev", None), + "mne-gui-addons": ("https://mne.tools/mne-gui-addons", None), + "numpy": ("https://numpy.org/devdocs", None), + "scipy": ("https://scipy.github.io/devdocs", None), + "matplotlib": ("https://matplotlib.org/stable", None), + "nilearn": ("http://nilearn.github.io/stable", None), + "pandas": ("https://pandas.pydata.org/pandas-docs/dev", None), + "nibabel": ("https://nipy.org/nibabel", None), } intersphinx_timeout = 5 @@ -174,31 +178,31 @@ # instead of in the root." # we will store dev docs in a `dev` subdirectory and all other docs in a # directory "v" + version_str. E.g., "v0.3" -if 'dev' in version: - filepath_prefix = 'dev' +if "dev" in version: + filepath_prefix = "dev" else: - filepath_prefix = 'v{}'.format(version) + filepath_prefix = "v{}".format(version) sphinx_gallery_conf = { - 'doc_module': 'mne_bids', - 'reference_url': { - 'mne_bids': None, + "doc_module": "mne_bids", + "reference_url": { + "mne_bids": None, }, - 'backreferences_dir': 'generated', - 'examples_dirs': '../examples', - 'within_subsection_order': ExampleTitleSortKey, - 'gallery_dirs': 'auto_examples', - 'filename_pattern': '^((?!sgskip).)*$', - 'binder': { + "backreferences_dir": "generated", + "examples_dirs": "../examples", + "within_subsection_order": ExampleTitleSortKey, + "gallery_dirs": "auto_examples", + "filename_pattern": "^((?!sgskip).)*$", + "binder": { # Required keys - 'org': 'mne-tools', - 'repo': 'mne-bids', - 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs. - 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org). - 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links. - 'dependencies': [ - '../test_requirements.txt', - './requirements.txt', + "org": "mne-tools", + "repo": "mne-bids", + "branch": "gh-pages", # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs. + "binderhub_url": "https://mybinder.org", # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org). + "filepath_prefix": filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links. + "dependencies": [ + "../test_requirements.txt", + "./requirements.txt", ], - } + }, } diff --git a/doc/sphinxext/gen_cli.py b/doc/sphinxext/gen_cli.py index b8f2b4a50..d2a35c8f8 100644 --- a/doc/sphinxext/gen_cli.py +++ b/doc/sphinxext/gen_cli.py @@ -11,7 +11,6 @@ import os import glob from os import path as op -import subprocess import sys import sphinx.util @@ -20,7 +19,7 @@ def setup(app): """Set up the app.""" - app.connect('builder-inited', generate_cli_rst) + app.connect("builder-inited", generate_cli_rst) # Header markings go: @@ -58,25 +57,28 @@ def setup(app): def generate_cli_rst(app=None): """Generate the command line interface docs.""" - out_dir = op.abspath(op.join(op.dirname(__file__), '..', 'generated')) + out_dir = op.abspath(op.join(op.dirname(__file__), "..", "generated")) if not op.isdir(out_dir): os.mkdir(out_dir) - out_fname = op.join(out_dir, 'cli.rst.new') + out_fname = op.join(out_dir, "cli.rst.new") cli_path = op.abspath( - op.join(os.path.dirname(__file__), '..', '..', 'mne_bids', 'commands')) - fnames = sorted([ - op.basename(fname) - for fname in glob.glob(op.join(cli_path, 'mne_bids*.py'))]) - iterator = sphinx.util.status_iterator( - fnames, 'generating MNE-BIDS cli help ... ', length=len(fnames)) - with open(out_fname, 'w', encoding='utf-8') as f: + op.join(os.path.dirname(__file__), "..", "..", "mne_bids", "commands") + ) + fnames = sorted( + [op.basename(fname) for fname in glob.glob(op.join(cli_path, "mne_bids*.py"))] + ) + iterator = sphinx.util.display.status_iterator( + fnames, "generating MNE-BIDS cli help ... ", length=len(fnames) + ) + with open(out_fname, "w", encoding="utf-8") as f: f.write(header) for fname in iterator: cmd_name = fname[:-3] run_name = op.join(cli_path, fname) - output, _ = run_subprocess([sys.executable, run_name, '--help'], - verbose=False) + output, _ = run_subprocess( + [sys.executable, run_name, "--help"], verbose=False + ) output = output.splitlines() # Swap usage and title lines @@ -84,31 +86,36 @@ def generate_cli_rst(app=None): # Add header marking for idx in (1, 0): - output.insert(idx, '-' * len(output[0])) + output.insert(idx, "-" * len(output[0])) # Add code styling for the "Usage: " line for li, line in enumerate(output): - if line.startswith('Usage: mne_bids '): - output[li] = 'Usage: ``%s``' % line[7:] + if line.startswith("Usage: mne_bids "): + output[li] = "Usage: ``%s``" % line[7:] break # Turn "Options:" into field list - if 'Options:' in output: - ii = output.index('Options:') - output[ii] = 'Options' - output.insert(ii + 1, '-------') - output.insert(ii + 2, '') - output.insert(ii + 3, '.. rst-class:: field-list cmd-list') - output.insert(ii + 4, '') - output = '\n'.join(output) - f.write(command_rst % (cmd_name, - cmd_name.replace('mne_bids_', 'mne_bids '), - '=' * len(cmd_name), - output)) + if "Options:" in output: + ii = output.index("Options:") + output[ii] = "Options" + output.insert(ii + 1, "-------") + output.insert(ii + 2, "") + output.insert(ii + 3, ".. rst-class:: field-list cmd-list") + output.insert(ii + 4, "") + output = "\n".join(output) + f.write( + command_rst + % ( + cmd_name, + cmd_name.replace("mne_bids_", "mne_bids "), + "=" * len(cmd_name), + output, + ) + ) _replace_md5(out_fname) - print('[Done]') + print("[Done]") # This is useful for testing/iterating to see what the result looks like -if __name__ == '__main__': +if __name__ == "__main__": generate_cli_rst() diff --git a/doc/sphinxext/gh_substitutions.py b/doc/sphinxext/gh_substitutions.py index 3d9842cec..93e4eb76e 100644 --- a/doc/sphinxext/gh_substitutions.py +++ b/doc/sphinxext/gh_substitutions.py @@ -19,9 +19,9 @@ def gh_role(name, rawtext, text, lineno, inliner, options={}, content=[]): # direct link mode slug = text else: - slug = 'issues/' + text - text = '#' + text - ref = 'https://github.com/mne-tools/mne-bids/' + slug + slug = "issues/" + text + text = "#" + text + ref = "https://github.com/mne-tools/mne-bids/" + slug set_classes(options) node = reference(rawtext, text, refuri=ref, **options) return [node], [] @@ -29,5 +29,5 @@ def gh_role(name, rawtext, text, lineno, inliner, options={}, content=[]): def setup(app): """Do setup.""" - app.add_role('gh', gh_role) + app.add_role("gh", gh_role) return diff --git a/examples/anonymize_dataset.py b/examples/anonymize_dataset.py index 814589116..4c0bf0481 100644 --- a/examples/anonymize_dataset.py +++ b/examples/anonymize_dataset.py @@ -26,23 +26,34 @@ from pathlib import Path import mne from mne_bids import ( - BIDSPath, write_raw_bids, write_anat, write_meg_calibration, - write_meg_crosstalk, anonymize_dataset, print_dir_tree + BIDSPath, + write_raw_bids, + write_anat, + write_meg_calibration, + write_meg_crosstalk, + anonymize_dataset, + print_dir_tree, ) data_path = Path(mne.datasets.sample.data_path()) -event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} +event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, +} -raw_path = data_path / 'MEG' / 'sample' / 'sample_audvis_raw.fif' -raw_er_path = data_path / 'MEG' / 'sample' / 'ernoise_raw.fif' # empty-room -events_path = data_path / 'MEG' / 'sample' / 'sample_audvis_raw-eve.fif' -cal_path = data_path / 'SSS' / 'sss_cal_mgh.dat' -ct_path = data_path / 'SSS' / 'ct_sparse_mgh.fif' -t1w_path = data_path / 'subjects' / 'sample' / 'mri' / 'T1.mgz' +raw_path = data_path / "MEG" / "sample" / "sample_audvis_raw.fif" +raw_er_path = data_path / "MEG" / "sample" / "ernoise_raw.fif" # empty-room +events_path = data_path / "MEG" / "sample" / "sample_audvis_raw-eve.fif" +cal_path = data_path / "SSS" / "sss_cal_mgh.dat" +ct_path = data_path / "SSS" / "ct_sparse_mgh.fif" +t1w_path = data_path / "subjects" / "sample" / "mri" / "T1.mgz" -bids_root = data_path.parent / 'MNE-sample-data-bids' -bids_root_anon = data_path.parent / 'MNE-sample-data-bids-anon' +bids_root = data_path.parent / "MNE-sample-data-bids" +bids_root_anon = data_path.parent / "MNE-sample-data-bids-anon" # %% # To ensure the output paths don't contain any leftover files from previous @@ -58,25 +69,29 @@ # %% bids_path = BIDSPath( - subject='ABC123', task='audiovisual', root=bids_root, datatype='meg' + subject="ABC123", task="audiovisual", root=bids_root, datatype="meg" ) bids_path_er = bids_path.copy().update( - subject='emptyroom', task='noise', session='20021206' + subject="emptyroom", task="noise", session="20021206" ) raw = mne.io.read_raw_fif(raw_path, verbose=False) raw_er = mne.io.read_raw_fif(raw_er_path, verbose=False) # specify power line frequency as required by BIDS -raw.info['line_freq'] = 60 -raw_er.info['line_freq'] = 60 +raw.info["line_freq"] = 60 +raw_er.info["line_freq"] = 60 # Write empty-room data write_raw_bids(raw=raw_er, bids_path=bids_path_er, verbose=False) # Write experimental MEG data, fine-calibration and crosstalk files write_raw_bids( - raw=raw, bids_path=bids_path, events=events_path, event_id=event_id, - empty_room=bids_path_er, verbose=False + raw=raw, + bids_path=bids_path, + events=events_path, + event_id=event_id, + empty_room=bids_path_er, + verbose=False, ) write_meg_calibration(cal_path, bids_path=bids_path, verbose=False) write_meg_crosstalk(ct_path, bids_path=bids_path, verbose=False) @@ -88,12 +103,10 @@ lpa=[66.08580, 51.33362, 46.52982], nasion=[41.87363, 32.24694, 74.55314], rpa=[17.23812, 53.08294, 47.01789], - coord_frame='mri_voxel' -) -bids_path.datatype = 'anat' -write_anat( - image=t1w_path, bids_path=bids_path, landmarks=mri_landmarks, verbose=False + coord_frame="mri_voxel", ) +bids_path.datatype = "anat" +write_anat(image=t1w_path, bids_path=bids_path, landmarks=mri_landmarks, verbose=False) # %% # Basic anonymization @@ -126,7 +139,7 @@ anonymize_dataset( bids_root_in=bids_root, bids_root_out=bids_root_anon, - datatypes='anat' # Only anatomical data + datatypes="anat", # Only anatomical data ) print_dir_tree(bids_root_anon) @@ -143,10 +156,10 @@ anonymize_dataset( bids_root_in=bids_root, bids_root_out=bids_root_anon, - datatypes='meg', # Only MEG data - daysback=10 + datatypes="meg", # Only MEG data + daysback=10, ) -print_dir_tree(bids_root_anon / 'sub-emptyroom') # Easy to see effects here +print_dir_tree(bids_root_anon / "sub-emptyroom") # Easy to see effects here # %% # Specifying subject IDs @@ -160,16 +173,13 @@ shutil.rmtree(bids_root_anon) -subject_mapping = { - 'ABC123': 'anonymous', - 'emptyroom': 'emptyroom' -} +subject_mapping = {"ABC123": "anonymous", "emptyroom": "emptyroom"} anonymize_dataset( bids_root_in=bids_root, bids_root_out=bids_root_anon, - datatypes='meg', - subject_mapping=subject_mapping + datatypes="meg", + subject_mapping=subject_mapping, ) print_dir_tree(bids_root_anon) @@ -201,12 +211,12 @@ # anonymize the entire dataset again. for i in range(2): - print(f'\n\nRun {i+1}\n') + print(f"\n\nRun {i+1}\n") shutil.rmtree(bids_root_anon) anonymize_dataset( bids_root_in=bids_root, bids_root_out=bids_root_anon, - datatypes='meg', - random_state=42 + datatypes="meg", + random_state=42, ) print_dir_tree(bids_root_anon) diff --git a/examples/bidspath.py b/examples/bidspath.py index 5029e9ad3..65587f609 100644 --- a/examples/bidspath.py +++ b/examples/bidspath.py @@ -36,7 +36,7 @@ # We are using a pathlib.Path object for convenience, but you could just use # a string to specify ``bids_root`` here. -bids_root = Path(mne_bids.__file__).parent / 'tests' / 'data' / 'tiny_bids' +bids_root = Path(mne_bids.__file__).parent / "tests" / "data" / "tiny_bids" # %% # This refers to a folder named ``my_bids_root`` in the current working @@ -51,7 +51,7 @@ # identifiers**. We can either create a new ``BIDSPath``, or update our # existing one. The value can be retrieved via the ``.subject`` attribute. -subject = '01' +subject = "01" # Option 1: Create an entirely new BIDSPath. bids_path_new = BIDSPath(subject=subject, root=bids_root) @@ -72,7 +72,7 @@ # information on our experimental session, and try to retrieve it again via # ``.session``. -session = 'eeg' +session = "eeg" bids_path.update(session=session) print(bids_path.session) @@ -84,7 +84,7 @@ # using `mne_bids.write_raw_bids`. For the sake of this example, however, we # are going to specify the data type explicitly. -datatype = 'eeg' +datatype = "eeg" bids_path.update(datatype=datatype) print(bids_path.datatype) @@ -128,7 +128,7 @@ # and implies that no value has been set. Let us add a ``run`` entity, and # remove the ``session``: -run = '01' +run = "01" session = None bids_path.update(run=run, session=session) bids_path @@ -151,7 +151,7 @@ # For now, let's revert to the last working iteration of our ``bids_path`` # instance. -bids_path.update(run=None, session='eeg') +bids_path.update(run=None, session="eeg") print(bids_path.fpath) # %% @@ -171,7 +171,7 @@ # ``.tsv``. # Let's put our new knowledge to use! -bids_path.update(suffix='eeg', extension='.vhdr') +bids_path.update(suffix="eeg", extension=".vhdr") print(bids_path.fpath) bids_path diff --git a/examples/convert_eeg_to_bids.py b/examples/convert_eeg_to_bids.py index 3509d366e..54019b3eb 100644 --- a/examples/convert_eeg_to_bids.py +++ b/examples/convert_eeg_to_bids.py @@ -68,8 +68,8 @@ # of the directory tree. # get MNE directory with example data -mne_data_dir = mne.get_config('MNE_DATASETS_EEGBCI_PATH') -data_dir = op.join(mne_data_dir, 'MNE-eegbci-data') +mne_data_dir = mne.get_config("MNE_DATASETS_EEGBCI_PATH") +data_dir = op.join(mne_data_dir, "MNE-eegbci-data") print_dir_tree(data_dir) @@ -98,7 +98,7 @@ # Load the data from "2 minutes eyes closed rest" edf_path = eegbci.load_data(subject=subject, runs=run)[0] raw = mne.io.read_raw_edf(edf_path, preload=False) -raw.info['line_freq'] = 50 # specify power line frequency as required by BIDS +raw.info["line_freq"] = 50 # specify power line frequency as required by BIDS # %% # For the sake of the example we will also pretend that we have the electrode @@ -116,7 +116,7 @@ # Get the electrode coordinates testing_data = mne.datasets.testing.data_path() -captrak_path = op.join(testing_data, 'montage', 'captrak_coords.bvct') +captrak_path = op.join(testing_data, "montage", "captrak_coords.bvct") montage = mne.channels.read_dig_captrak(captrak_path) # Rename the montage channel names only for this example, because as said @@ -154,11 +154,11 @@ # In the second run of the experiment, the task was to rest with closed eyes. # zero padding to account for >100 subjects in this dataset -subject_id = '001' +subject_id = "001" # define a task name and a directory where to save the data to -task = 'RestEyesClosed' -bids_root = op.join(mne_data_dir, 'eegmmidb_bids_eeg_example') +task = "RestEyesClosed" +bids_root = op.join(mne_data_dir, "eegmmidb_bids_eeg_example") # %% # To ensure the output path doesn't contain any leftover files from previous @@ -220,8 +220,8 @@ # # If you are preparing a manuscript, please make sure to also cite MNE-BIDS # there. -readme = op.join(bids_root, 'README') -with open(readme, 'r', encoding='utf-8-sig') as fid: +readme = op.join(bids_root, "README") +with open(readme, "r", encoding="utf-8-sig") as fid: text = fid.read() print(text) diff --git a/examples/convert_empty_room.py b/examples/convert_empty_room.py index 0b95802e9..5cca3d9d8 100644 --- a/examples/convert_empty_room.py +++ b/examples/convert_empty_room.py @@ -37,16 +37,15 @@ import mne from mne.datasets import sample -from mne_bids import (write_raw_bids, read_raw_bids, - BIDSPath, print_dir_tree) +from mne_bids import write_raw_bids, read_raw_bids, BIDSPath, print_dir_tree # %% # And define the paths and event_id dictionary. data_path = sample.data_path() -raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif') +raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_raw.fif") -bids_root = op.join(data_path, '..', 'MNE-sample-data-bids') +bids_root = op.join(data_path, "..", "MNE-sample-data-bids") # %% # To ensure the output path doesn't contain any leftover files from previous @@ -62,44 +61,46 @@ # Specify the raw file and write the BIDS data. raw = mne.io.read_raw_fif(raw_fname) -raw.info['line_freq'] = 60 # specify power line frequency as required by BIDS +raw.info["line_freq"] = 60 # specify power line frequency as required by BIDS -bids_path = BIDSPath(subject='01', session='01', - task='audiovisual', run='01', root=bids_root) +bids_path = BIDSPath( + subject="01", session="01", task="audiovisual", run="01", root=bids_root +) write_raw_bids(raw, bids_path, overwrite=True) # %% # Specify some empty room data and run BIDS conversion on it. -er_raw_fname = op.join(data_path, 'MEG', 'sample', 'ernoise_raw.fif') +er_raw_fname = op.join(data_path, "MEG", "sample", "ernoise_raw.fif") er_raw = mne.io.read_raw_fif(er_raw_fname) -er_raw.info['line_freq'] = 60 # specify power line frequency as req. by BIDS +er_raw.info["line_freq"] = 60 # specify power line frequency as req. by BIDS # For empty room data we need to specify the recording date in the format # YYYYMMDD for the session id. -er_date = er_raw.info['meas_date'].strftime('%Y%m%d') +er_date = er_raw.info["meas_date"].strftime("%Y%m%d") print(er_date) # %% # The measurement date is -raw_date = raw.info['meas_date'].strftime('%Y%m%d') +raw_date = raw.info["meas_date"].strftime("%Y%m%d") print(raw_date) # %% # We also need to specify that the subject ID is 'emptyroom', and that the # task is 'noise' (these are BIDS rules). -er_bids_path = BIDSPath(subject='emptyroom', session=er_date, - task='noise', root=bids_root) +er_bids_path = BIDSPath( + subject="emptyroom", session=er_date, task="noise", root=bids_root +) write_raw_bids(er_raw, er_bids_path, overwrite=True) # %% # Just to illustrate, we can save more than one empty room file for different # dates. Here, they will all contain the same data but in your study, they # will be different on different days. -dates = ['20021204', '20021201', '20021001'] +dates = ["20021204", "20021201", "20021001"] for date in dates: er_bids_path.update(session=date) - er_meas_date = datetime.strptime(date, '%Y%m%d') + er_meas_date = datetime.strptime(date, "%Y%m%d") er_raw.set_meas_date(er_meas_date.replace(tzinfo=timezone.utc)) write_raw_bids(er_raw, er_bids_path, overwrite=True) diff --git a/examples/convert_group_studies.py b/examples/convert_group_studies.py index e6404c8d4..37b6fa191 100644 --- a/examples/convert_group_studies.py +++ b/examples/convert_group_studies.py @@ -25,9 +25,13 @@ import mne from mne.datasets import eegbci -from mne_bids import (write_raw_bids, BIDSPath, - get_anonymization_daysback, make_report, - print_dir_tree) +from mne_bids import ( + write_raw_bids, + BIDSPath, + get_anonymization_daysback, + make_report, + print_dir_tree, +) from mne_bids.stats import count_events # %% @@ -39,8 +43,8 @@ # documentation to get the 1st, 2nd, and 3rd run of one of the the motor # imagery task runs = [ - 4, # This is run #1 of imagining to open/close left or right fist - 8, # ... run #2 + 4, # This is run #1 of imagining to open/close left or right fist + 8, # ... run #2 12, # ... run #3 ] @@ -51,14 +55,14 @@ eegbci.load_data(subject=subject_id, runs=runs, update_path=True) # get path to MNE directory with the downloaded example data -mne_data_dir = mne.get_config('MNE_DATASETS_EEGBCI_PATH') -data_dir = op.join(mne_data_dir, 'MNE-eegbci-data') +mne_data_dir = mne.get_config("MNE_DATASETS_EEGBCI_PATH") +data_dir = op.join(mne_data_dir, "MNE-eegbci-data") # %% # Let us loop over the subjects and create BIDS-compatible folder # Make a path where we can save the data to -bids_root = op.join(mne_data_dir, 'eegmmidb_bids_group_conversion') +bids_root = op.join(mne_data_dir, "eegmmidb_bids_group_conversion") # %% # To ensure the output path doesn't contain any leftover files from previous @@ -81,12 +85,15 @@ for run in runs: raw_fname = eegbci.load_data(subject=subject_id, runs=run)[0] raw = mne.io.read_raw_edf(raw_fname) - raw.info['line_freq'] = 50 # specify power line frequency + raw.info["line_freq"] = 50 # specify power line frequency raw_list.append(raw) - bids_path = BIDSPath(subject=f'{subject_id:03}', - session='01', task='MotorImagery', - run=f'{run_map[run]:02}', - root=bids_root) + bids_path = BIDSPath( + subject=f"{subject_id:03}", + session="01", + task="MotorImagery", + run=f"{run_map[run]:02}", + root=bids_root, + ) bids_list.append(bids_path) daysback_min, daysback_max = get_anonymization_daysback(raw_list) @@ -101,9 +108,9 @@ # Note that we do not need to pass any events, as the dataset is already # equipped with annotations, which will be converted to BIDS events # automatically. - write_raw_bids(raw, bids_path, - anonymize=dict(daysback=daysback_min + 2117), - overwrite=True) + write_raw_bids( + raw, bids_path, anonymize=dict(daysback=daysback_min + 2117), overwrite=True + ) # %% # Now let's see the structure of the BIDS folder we created. diff --git a/examples/convert_ieeg_to_bids.py b/examples/convert_ieeg_to_bids.py index b6ad366db..659dbd886 100644 --- a/examples/convert_ieeg_to_bids.py +++ b/examples/convert_ieeg_to_bids.py @@ -57,11 +57,18 @@ from nilearn.plotting import plot_anat import mne -from mne_bids import (BIDSPath, write_raw_bids, write_anat, - get_anat_landmarks, read_raw_bids, - search_folder_for_text, print_dir_tree, - template_to_head, convert_montage_to_ras, - convert_montage_to_mri) +from mne_bids import ( + BIDSPath, + write_raw_bids, + write_anat, + get_anat_landmarks, + read_raw_bids, + search_folder_for_text, + print_dir_tree, + template_to_head, + convert_montage_to_ras, + convert_montage_to_mri, +) # %% # Step 1: Download the data @@ -74,10 +81,9 @@ # The electrode coords data are in the tsv file format # which is easily read in using numpy -raw = mne.io.read_raw_fif(op.join( - misc_path, 'seeg', 'sample_seeg_ieeg.fif')) -raw.info['line_freq'] = 60 # specify power line frequency as required by BIDS -subjects_dir = op.join(misc_path, 'seeg') # Freesurfer recon-all directory +raw = mne.io.read_raw_fif(op.join(misc_path, "seeg", "sample_seeg_ieeg.fif")) +raw.info["line_freq"] = 60 # specify power line frequency as required by BIDS +subjects_dir = op.join(misc_path, "seeg") # Freesurfer recon-all directory # %% # When the locations of the channels in this dataset were found in @@ -95,13 +101,13 @@ # is the best option. # estimate the transformation from "head" to "mri" space -trans = mne.coreg.estimate_head_mri_t('sample_seeg', subjects_dir) +trans = mne.coreg.estimate_head_mri_t("sample_seeg", subjects_dir) # %% # Now let's convert the montage to "ras" montage = raw.get_montage() montage.apply_trans(trans) # head->mri -convert_montage_to_ras(montage, 'sample_seeg', subjects_dir) # mri->ras +convert_montage_to_ras(montage, "sample_seeg", subjects_dir) # mri->ras # %% # BIDS vs MNE-Python Coordinate Systems @@ -147,14 +153,14 @@ # Let us initialize some of the necessary data for the subject. # There is a subject, and specific task for the dataset. -subject_id = '1' -task = 'motor' +subject_id = "1" +task = "motor" # get MNE-Python directory w/ example data -mne_data_dir = mne.get_config('MNE_DATASETS_MISC_PATH') +mne_data_dir = mne.get_config("MNE_DATASETS_MISC_PATH") # There is the root directory for where we will write our data. -bids_root = op.join(mne_data_dir, 'ieeg_bids') +bids_root = op.join(mne_data_dir, "ieeg_bids") # %% # To ensure the output path doesn't contain any leftover files from previous @@ -180,18 +186,26 @@ # plot T1 to show that it is ACPC-aligned # note that the origin is centered on the anterior commissure (AC) # with the y-axis passing through the posterior commissure (PC) -T1_fname = op.join(subjects_dir, 'sample_seeg', 'mri', 'T1.mgz') +T1_fname = op.join(subjects_dir, "sample_seeg", "mri", "T1.mgz") fig = plot_anat(T1_fname, cut_coords=(0, 0, 0)) -fig.axes['x'].ax.annotate('AC', (2., -2.), (30., -40.), color='w', - arrowprops=dict(facecolor='w', alpha=0.5)) -fig.axes['x'].ax.annotate('PC', (-31., -2.), (-80., -40.), color='w', - arrowprops=dict(facecolor='w', alpha=0.5)) +fig.axes["x"].ax.annotate( + "AC", + (2.0, -2.0), + (30.0, -40.0), + color="w", + arrowprops=dict(facecolor="w", alpha=0.5), +) +fig.axes["x"].ax.annotate( + "PC", + (-31.0, -2.0), + (-80.0, -40.0), + color="w", + arrowprops=dict(facecolor="w", alpha=0.5), +) # write ACPC-aligned T1 -landmarks = get_anat_landmarks(T1_fname, raw.info, trans, - 'sample_seeg', subjects_dir) -T1_bids_path = write_anat(T1_fname, bids_path, deface=True, - landmarks=landmarks) +landmarks = get_anat_landmarks(T1_fname, raw.info, trans, "sample_seeg", subjects_dir) +T1_bids_path = write_anat(T1_fname, bids_path, deface=True, landmarks=landmarks) # write `raw` to BIDS and anonymize it (converts to BrainVision format) # @@ -200,8 +214,14 @@ # # `acpc_aligned=True` affirms that our MRI is aligned to ACPC # if this is not true, convert to `fsaverage` (see below)! -write_raw_bids(raw, bids_path, anonymize=dict(daysback=40000), - montage=montage, acpc_aligned=True, overwrite=True) +write_raw_bids( + raw, + bids_path, + anonymize=dict(daysback=40000), + montage=montage, + acpc_aligned=True, + overwrite=True, +) # check our output print_dir_tree(bids_root) @@ -216,7 +236,7 @@ # describe ``iEEGReference`` and ``iEEGGround`` yourself. # It's easy to find these by searching for ``"n/a"`` in the sidecar files. -search_folder_for_text('n/a', bids_root) +search_folder_for_text("n/a", bids_root) # %% # Remember that there is a convenient JavaScript tool to validate all your BIDS @@ -246,14 +266,13 @@ montage2 = raw2.get_montage() # we need to go from scanner RAS back to surface RAS (requires recon-all) -convert_montage_to_mri(montage2, 'sample_seeg', subjects_dir=subjects_dir) +convert_montage_to_mri(montage2, "sample_seeg", subjects_dir=subjects_dir) # this uses Freesurfer recon-all subject directory -montage2.add_estimated_fiducials('sample_seeg', subjects_dir=subjects_dir) +montage2.add_estimated_fiducials("sample_seeg", subjects_dir=subjects_dir) # get head->mri trans, invert from mri->head -trans2 = mne.transforms.invert_transform( - mne.channels.compute_native_head_t(montage2)) +trans2 = mne.transforms.invert_transform(mne.channels.compute_native_head_t(montage2)) # now the montage is properly in "head" and ready for analysis in MNE raw2.set_montage(montage2) @@ -269,10 +288,13 @@ montage2.apply_trans(trans2) # compare with standard -print('Recovered coordinate: {recovered}\n' - 'Saved coordinate: {saved}'.format( - recovered=montage2.get_positions()['ch_pos']['LENT 1'], - saved=montage.get_positions()['ch_pos']['LENT 1'])) +print( + "Recovered coordinate: {recovered}\n" + "Saved coordinate: {saved}".format( + recovered=montage2.get_positions()["ch_pos"]["LENT 1"], + saved=montage.get_positions()["ch_pos"]["LENT 1"], + ) +) # %% # Step 4: Cite mne-bids @@ -280,8 +302,8 @@ # We can see that the appropriate citations are already written in the README. # If you are preparing a manuscript, please make sure to also cite MNE-BIDS # there. -readme = op.join(bids_root, 'README') -with open(readme, 'r', encoding='utf-8-sig') as fid: +readme = op.join(bids_root, "README") +with open(readme, "r", encoding="utf-8-sig") as fid: text = fid.read() print(text) @@ -319,12 +341,11 @@ shutil.rmtree(bids_root) # load our raw data again -raw = mne.io.read_raw_fif(op.join( - misc_path, 'seeg', 'sample_seeg_ieeg.fif')) -raw.info['line_freq'] = 60 # specify power line frequency as required by BIDS +raw = mne.io.read_raw_fif(op.join(misc_path, "seeg", "sample_seeg_ieeg.fif")) +raw.info["line_freq"] = 60 # specify power line frequency as required by BIDS # get Talairach transform -mri_mni_t = mne.read_talxfm('sample_seeg', subjects_dir) +mri_mni_t = mne.read_talxfm("sample_seeg", subjects_dir) # %% # Now let's convert the montage to MNI Talairach ("mni_tal"). @@ -333,8 +354,9 @@ montage.apply_trans(mri_mni_t) # write to BIDS, this time with a template coordinate system -write_raw_bids(raw, bids_path, anonymize=dict(daysback=40000), - montage=montage, overwrite=True) +write_raw_bids( + raw, bids_path, anonymize=dict(daysback=40000), montage=montage, overwrite=True +) # read in the BIDS dataset raw2 = read_raw_bids(bids_path=bids_path) @@ -352,8 +374,7 @@ # use `coord_frame='mri'` to indicate that the montage is in surface RAS # and `unit='m'` to indicate that the units are in meters -trans2 = template_to_head( - raw2.info, space='fsaverage', coord_frame='mri', unit='m')[1] +trans2 = template_to_head(raw2.info, space="fsaverage", coord_frame="mri", unit="m")[1] # this a bit confusing since we transformed from mri->mni and now we're # saying we're back in 'mri' but that is because we were in the surface RAS # coordinate frame of `sample_seeg` and transformed to 'mni_tal', which is the @@ -367,28 +388,35 @@ # ``head -> mri`` ``trans`` which is the setup MNE-Python is designed around. # check that we can recover the coordinates -print('Recovered coordinate head: {recovered}\n' - 'Original coordinate head: {original}'.format( - recovered=raw2.info['chs'][0]['loc'][:3], - original=raw.info['chs'][0]['loc'][:3])) +print( + "Recovered coordinate head: {recovered}\n" + "Original coordinate head: {original}".format( + recovered=raw2.info["chs"][0]["loc"][:3], original=raw.info["chs"][0]["loc"][:3] + ) +) # check difference in trans -print('Recovered trans:\n{recovered}\n' - 'Original trans:\n{original}'.format( - recovered=trans2['trans'].round(3), - # combine head->mri with mri->mni to get head->mni - # and then invert to get mni->head - original=np.linalg.inv(np.dot(trans['trans'], mri_mni_t['trans']) - ).round(3))) +print( + "Recovered trans:\n{recovered}\n" + "Original trans:\n{original}".format( + recovered=trans2["trans"].round(3), + # combine head->mri with mri->mni to get head->mni + # and then invert to get mni->head + original=np.linalg.inv(np.dot(trans["trans"], mri_mni_t["trans"])).round(3), + ) +) # ensure that the data in MNI coordinates is exactly the same # (within computer precision) montage2 = raw2.get_montage() # get montage after transformed back to head montage2.apply_trans(trans2) -print('Recovered coordinate: {recovered}\n' - 'Original coordinate: {original}'.format( - recovered=montage2.get_positions()['ch_pos']['LENT 1'], - original=montage.get_positions()['ch_pos']['LENT 1'])) +print( + "Recovered coordinate: {recovered}\n" + "Original coordinate: {original}".format( + recovered=montage2.get_positions()["ch_pos"]["LENT 1"], + original=montage.get_positions()["ch_pos"]["LENT 1"], + ) +) # %% # As you can see the coordinates stored in the ``raw`` object are slightly off. @@ -447,40 +475,44 @@ shutil.rmtree(bids_root) # get a template mgz image to transform the montage to voxel coordinates -subjects_dir = op.join(mne.datasets.sample.data_path(), 'subjects') -template_T1 = nib.load(op.join(subjects_dir, 'fsaverage', 'mri', 'T1.mgz')) +subjects_dir = op.join(mne.datasets.sample.data_path(), "subjects") +template_T1 = nib.load(op.join(subjects_dir, "fsaverage", "mri", "T1.mgz")) # get voxels to surface RAS and scanner RAS transforms vox_mri_t = template_T1.header.get_vox2ras_tkr() # surface RAS vox_ras_t = template_T1.header.get_vox2ras() # scanner RAS -raw = mne.io.read_raw_fif(op.join( # load our raw data again - misc_path, 'seeg', 'sample_seeg_ieeg.fif')) +raw = mne.io.read_raw_fif( + op.join(misc_path, "seeg", "sample_seeg_ieeg.fif") # load our raw data again +) montage = raw.get_montage() # get the original montage montage.apply_trans(trans) # head->mri montage.apply_trans(mri_mni_t) # mri->mni pos = montage.get_positions() -ch_pos = np.array(list(pos['ch_pos'].values())) # get an array of positions +ch_pos = np.array(list(pos["ch_pos"].values())) # get an array of positions # mri -> vox and m -> mm ch_pos = mne.transforms.apply_trans(np.linalg.inv(vox_mri_t), ch_pos * 1000) ch_pos = mne.transforms.apply_trans(vox_ras_t, ch_pos) montage_ras = mne.channels.make_dig_montage( - ch_pos=dict(zip(pos['ch_pos'].keys(), ch_pos)), coord_frame='ras') + ch_pos=dict(zip(pos["ch_pos"].keys(), ch_pos)), coord_frame="ras" +) # specify our standard template coordinate system space -bids_path.update(datatype='ieeg', space='fsaverage') +bids_path.update(datatype="ieeg", space="fsaverage") # write to BIDS, this time with a template coordinate system in voxels -write_raw_bids(raw, bids_path, anonymize=dict(daysback=40000), - montage=montage_ras, overwrite=True) +write_raw_bids( + raw, bids_path, anonymize=dict(daysback=40000), montage=montage_ras, overwrite=True +) # %% # Now, let's load our data and convert our montage to ``head``. raw2 = read_raw_bids(bids_path=bids_path) trans2 = template_to_head( # unit='auto' automatically determines it's in mm - raw2.info, space='fsaverage', coord_frame='ras', unit='auto')[1] + raw2.info, space="fsaverage", coord_frame="ras", unit="auto" +)[1] # %% # Let's check to make sure again that the original coordinates from the BIDS @@ -488,10 +520,13 @@ montage2 = raw2.get_montage() # get montage after transformed back to head montage2.apply_trans(trans2) # apply trans to go back to 'mri' -print('Recovered coordinate: {recovered}\n' - 'Original coordinate: {original}'.format( - recovered=montage2.get_positions()['ch_pos']['LENT 1'], - original=montage.get_positions()['ch_pos']['LENT 1'])) +print( + "Recovered coordinate: {recovered}\n" + "Original coordinate: {original}".format( + recovered=montage2.get_positions()["ch_pos"]["LENT 1"], + original=montage.get_positions()["ch_pos"]["LENT 1"], + ) +) # %% # In summary, as we saw, these standard template spaces that are allowable by diff --git a/examples/convert_mne_sample.py b/examples/convert_mne_sample.py index c8841f952..5a58ff817 100644 --- a/examples/convert_mne_sample.py +++ b/examples/convert_mne_sample.py @@ -35,9 +35,15 @@ import mne from mne.datasets import sample -from mne_bids import (write_raw_bids, read_raw_bids, write_meg_calibration, - write_meg_crosstalk, BIDSPath, print_dir_tree, - make_dataset_description) +from mne_bids import ( + write_raw_bids, + read_raw_bids, + write_meg_calibration, + write_meg_crosstalk, + BIDSPath, + print_dir_tree, + make_dataset_description, +) from mne_bids.stats import count_events # %% @@ -48,13 +54,19 @@ # from. `output_path` determines where we will write the BIDS conversion to. data_path = sample.data_path() -event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} - -raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif') -er_fname = op.join(data_path, 'MEG', 'sample', 'ernoise_raw.fif') # empty room -events_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-eve.fif') -output_path = op.join(data_path, '..', 'MNE-sample-data-bids') +event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, +} + +raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_raw.fif") +er_fname = op.join(data_path, "MEG", "sample", "ernoise_raw.fif") # empty room +events_fname = op.join(data_path, "MEG", "sample", "sample_audvis_raw-eve.fif") +output_path = op.join(data_path, "..", "MNE-sample-data-bids") # %% # To ensure the output path doesn't contain any leftover files from previous @@ -85,17 +97,12 @@ raw_er = mne.io.read_raw(er_fname) # specify power line frequency as required by BIDS -raw.info['line_freq'] = 60 -raw_er.info['line_freq'] = 60 +raw.info["line_freq"] = 60 +raw_er.info["line_freq"] = 60 -task = 'audiovisual' +task = "audiovisual" bids_path = BIDSPath( - subject='01', - session='01', - task=task, - run='1', - datatype='meg', - root=output_path + subject="01", session="01", task=task, run="1", datatype="meg", root=output_path ) write_raw_bids( raw=raw, @@ -103,7 +110,7 @@ events=events_fname, event_id=event_id, empty_room=raw_er, - overwrite=True + overwrite=True, ) # %% @@ -111,12 +118,8 @@ # sidecar files that describe our data is correct. # Get the sidecar ``.json`` file -sidecar_json_bids_path = bids_path.copy().update( - suffix='meg', extension='.json' -) -sidecar_json_content = sidecar_json_bids_path.fpath.read_text( - encoding='utf-8-sig' -) +sidecar_json_bids_path = bids_path.copy().update(suffix="meg", extension=".json") +sidecar_json_content = sidecar_json_bids_path.fpath.read_text(encoding="utf-8-sig") print(sidecar_json_content) # %% @@ -124,8 +127,8 @@ # are required when processing Elekta/Neuromag/MEGIN data using MaxFilter®. # Let's store these data in appropriate places, too. -cal_fname = op.join(data_path, 'SSS', 'sss_cal_mgh.dat') -ct_fname = op.join(data_path, 'SSS', 'ct_sparse_mgh.fif') +cal_fname = op.join(data_path, "SSS", "sss_cal_mgh.dat") +ct_fname = op.join(data_path, "SSS", "ct_sparse_mgh.fif") write_meg_calibration(cal_fname, bids_path) write_meg_crosstalk(ct_fname, bids_path) @@ -154,7 +157,7 @@ events, event_id = mne.events_from_annotations(raw) epochs = mne.Epochs(raw, events, event_id) -epochs['Auditory'].average().plot() +epochs["Auditory"].average().plot() # %% # We can easily get the :class:`mne_bids.BIDSPath` of the empty-room recording @@ -176,8 +179,8 @@ # The README created by :func:`write_raw_bids` also takes care of the citation # for mne-bids. If you are preparing a manuscript, please make sure to also # cite MNE-BIDS there. -readme = op.join(output_path, 'README') -with open(readme, 'r', encoding='utf-8-sig') as fid: +readme = op.join(output_path, "README") +with open(readme, "r", encoding="utf-8-sig") as fid: text = fid.read() print(text) @@ -205,8 +208,10 @@ acknowledgements="""\ Alexandre Gramfort, Mainak Jas, and Stefan Appelhoff prepared and updated the \ data in BIDS format.""", - data_license='CC0', - ethics_approvals=['Human Subjects Division at the University of Washington'], # noqa: E501 + data_license="CC0", + ethics_approvals=[ + "Human Subjects Division at the University of Washington" + ], # noqa: E501 funding=[ "NIH 5R01EB009048", "NIH 1R01EB009048", @@ -218,18 +223,18 @@ "ANR-11-IDEX-0003-02", "ERC-StG-263584", "ERC-StG-676943", - "ANR-14-NEUC-0002-01" + "ANR-14-NEUC-0002-01", ], references_and_links=[ "https://doi.org/10.1016/j.neuroimage.2014.02.017", "https://doi.org/10.3389/fnins.2013.00267", - "https://mne.tools/stable/overview/datasets_index.html#sample" + "https://mne.tools/stable/overview/datasets_index.html#sample", ], doi="doi:10.18112/openneuro.ds000248.v1.2.4", - overwrite=True + overwrite=True, ) -desc_json_path = bids_path.root / 'dataset_description.json' -with open(desc_json_path, 'r', encoding='utf-8-sig') as fid: +desc_json_path = bids_path.root / "dataset_description.json" +with open(desc_json_path, "r", encoding="utf-8-sig") as fid: pprint(json.loads(fid.read())) # %% diff --git a/examples/convert_mri_and_trans.py b/examples/convert_mri_and_trans.py index 10bd21d36..d65685840 100644 --- a/examples/convert_mri_and_trans.py +++ b/examples/convert_mri_and_trans.py @@ -49,8 +49,14 @@ from mne.datasets import sample from mne import head_to_mri -from mne_bids import (write_raw_bids, BIDSPath, write_anat, get_anat_landmarks, - get_head_mri_trans, print_dir_tree) +from mne_bids import ( + write_raw_bids, + BIDSPath, + write_anat, + get_anat_landmarks, + get_head_mri_trans, + print_dir_tree, +) # %% # We will be using the `MNE sample data `_ and write a basic @@ -58,12 +64,18 @@ # :ref:`example `. data_path = sample.data_path() -event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} -raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif') -events_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-eve.fif') -output_path = op.abspath(op.join(data_path, '..', 'MNE-sample-data-bids')) -fs_subjects_dir = op.join(data_path, 'subjects') # FreeSurfer subjects dir +event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, +} +raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_raw.fif") +events_fname = op.join(data_path, "MEG", "sample", "sample_audvis_raw-eve.fif") +output_path = op.abspath(op.join(data_path, "..", "MNE-sample-data-bids")) +fs_subjects_dir = op.join(data_path, "subjects") # FreeSurfer subjects dir # %% # To ensure the output path doesn't contain any leftover files from previous @@ -79,16 +91,14 @@ # Read the input data and store it as BIDS data. raw = mne.io.read_raw_fif(raw_fname) -raw.info['line_freq'] = 60 # specify power line frequency as required by BIDS +raw.info["line_freq"] = 60 # specify power line frequency as required by BIDS -sub = '01' -ses = '01' -task = 'audiovisual' -run = '01' -bids_path = BIDSPath(subject=sub, session=ses, task=task, - run=run, root=output_path) -write_raw_bids(raw, bids_path, events=events_fname, - event_id=event_id, overwrite=True) +sub = "01" +ses = "01" +task = "audiovisual" +run = "01" +bids_path = BIDSPath(subject=sub, session=ses, task=task, run=run, root=output_path) +write_raw_bids(raw, bids_path, events=events_fname, event_id=event_id, overwrite=True) # %% # Print the directory tree @@ -104,11 +114,10 @@ # matrix :code:`trans`. # Get the path to our MRI scan -t1_fname = op.join(fs_subjects_dir, 'sample', 'mri', 'T1.mgz') +t1_fname = op.join(fs_subjects_dir, "sample", "mri", "T1.mgz") # Load the transformation matrix and show what it looks like -trans_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_raw-trans.fif') +trans_fname = op.join(data_path, "MEG", "sample", "sample_audvis_raw-trans.fif") trans = mne.read_trans(trans_fname) print(trans) @@ -121,8 +130,7 @@ # w.r.t. the T1 image. # First create the BIDSPath object. -t1w_bids_path = BIDSPath(subject=sub, session=ses, root=output_path, - suffix='T1w') +t1w_bids_path = BIDSPath(subject=sub, session=ses, root=output_path, suffix="T1w") # use ``trans`` to transform landmarks from the ``raw`` file to # the voxel space of the image @@ -130,7 +138,7 @@ t1_fname, # path to the MRI scan info=raw.info, # the MEG data file info from the same subject as the MRI trans=trans, # our transformation matrix - fs_subject='sample', # FreeSurfer subject + fs_subject="sample", # FreeSurfer subject fs_subjects_dir=fs_subjects_dir, # FreeSurfer subjects directory ) @@ -139,7 +147,7 @@ image=t1_fname, # path to the MRI scan bids_path=t1w_bids_path, landmarks=landmarks, # the landmarks in MRI voxel space - verbose=True # this will print out the sidecar file + verbose=True, # this will print out the sidecar file ) anat_dir = t1w_bids_path.directory @@ -159,8 +167,9 @@ # .. note:: If this dataset were shared with you, you would first have to use # the T1 image as input for the FreeSurfer recon-all, see # :ref:`tut-freesurfer-mne`. -estim_trans = get_head_mri_trans(bids_path=bids_path, fs_subject='sample', - fs_subjects_dir=fs_subjects_dir) +estim_trans = get_head_mri_trans( + bids_path=bids_path, fs_subject="sample", fs_subjects_dir=fs_subjects_dir +) # %% # Finally, let's use the T1 weighted MRI image and plot the anatomical @@ -171,29 +180,32 @@ # Get Landmarks from MEG file, 0, 1, and 2 correspond to LPA, NAS, RPA # and the 'r' key will provide us with the xyz coordinates. The coordinates # are expressed here in MEG Head coordinate system. -pos = np.asarray((raw.info['dig'][0]['r'], - raw.info['dig'][1]['r'], - raw.info['dig'][2]['r'])) +pos = np.asarray( + (raw.info["dig"][0]["r"], raw.info["dig"][1]["r"], raw.info["dig"][2]["r"]) +) # We now use the ``head_to_mri`` function from MNE-Python to convert MEG # coordinates to MRI scanner RAS space. For the conversion we use our # estimated transformation matrix and the MEG coordinates extracted from the # raw file. `subjects` and `subjects_dir` are used internally, to point to # the T1-weighted MRI file: `t1_mgh_fname`. Coordinates are is mm. -mri_pos = head_to_mri(pos=pos, - subject='sample', - mri_head_t=estim_trans, - subjects_dir=fs_subjects_dir) +mri_pos = head_to_mri( + pos=pos, subject="sample", mri_head_t=estim_trans, subjects_dir=fs_subjects_dir +) # Our MRI written to BIDS, we got `anat_dir` from our `write_anat` function -t1_nii_fname = op.join(anat_dir, 'sub-01_ses-01_T1w.nii.gz') +t1_nii_fname = op.join(anat_dir, "sub-01_ses-01_T1w.nii.gz") # Plot it -fig, axs = plt.subplots(3, 1, figsize=(7, 7), facecolor='k') -for point_idx, label in enumerate(('LPA', 'NAS', 'RPA')): - plot_anat(t1_nii_fname, axes=axs[point_idx], - cut_coords=mri_pos[point_idx, :], - title=label, vmax=160) +fig, axs = plt.subplots(3, 1, figsize=(7, 7), facecolor="k") +for point_idx, label in enumerate(("LPA", "NAS", "RPA")): + plot_anat( + t1_nii_fname, + axes=axs[point_idx], + cut_coords=mri_pos[point_idx, :], + title=label, + vmax=160, + ) plt.show() # %% @@ -202,16 +214,11 @@ # # We can write another types of MRI data such as FLASH images for BEM models -flash_fname = op.join(fs_subjects_dir, 'sample', 'mri', 'flash', 'mef05.mgz') +flash_fname = op.join(fs_subjects_dir, "sample", "mri", "flash", "mef05.mgz") -flash_bids_path = \ - BIDSPath(subject=sub, session=ses, root=output_path, suffix='FLASH') +flash_bids_path = BIDSPath(subject=sub, session=ses, root=output_path, suffix="FLASH") -write_anat( - image=flash_fname, - bids_path=flash_bids_path, - verbose=True -) +write_anat(image=flash_fname, bids_path=flash_bids_path, verbose=True) # %% # Writing defaced and anonymized T1 image @@ -224,16 +231,16 @@ landmarks=landmarks, deface=True, overwrite=True, - verbose=True # this will print out the sidecar file + verbose=True, # this will print out the sidecar file ) anat_dir = t1w_bids_path.directory # Our MRI written to BIDS, we got `anat_dir` from our `write_anat` function -t1_nii_fname = op.join(anat_dir, 'sub-01_ses-01_T1w.nii.gz') +t1_nii_fname = op.join(anat_dir, "sub-01_ses-01_T1w.nii.gz") # Plot it fig, ax = plt.subplots() -plot_anat(t1_nii_fname, axes=ax, title='Defaced', vmax=160) +plot_anat(t1_nii_fname, axes=ax, title="Defaced", vmax=160) plt.show() # %% @@ -248,7 +255,7 @@ flash_fname, # path to the FLASH scan info=raw.info, # the MEG data file info from the same subject as the MRI trans=trans, # our transformation matrix - fs_subject='sample', # freesurfer subject + fs_subject="sample", # freesurfer subject fs_subjects_dir=fs_subjects_dir, # freesurfer subjects directory ) @@ -258,15 +265,15 @@ landmarks=landmarks, deface=True, overwrite=True, - verbose=True # this will print out the sidecar file + verbose=True, # this will print out the sidecar file ) # Our MRI written to BIDS, we got `anat_dir` from our `write_anat` function -flash_nii_fname = op.join(anat_dir, 'sub-01_ses-01_FLASH.nii.gz') +flash_nii_fname = op.join(anat_dir, "sub-01_ses-01_FLASH.nii.gz") # Plot it fig, ax = plt.subplots() -plot_anat(flash_nii_fname, axes=ax, title='Defaced', vmax=700) +plot_anat(flash_nii_fname, axes=ax, title="Defaced", vmax=700) plt.show() # %% @@ -280,16 +287,22 @@ # .. note:: In FreeView, you need to use "RAS" and not "TkReg RAS" for this. # You can also use voxel coordinates but, in FreeView, they # are integers and so not as precise as the "RAS" decimal numbers. -flash_ras_landmarks = \ - np.array([[-74.53102838, 19.62854953, -52.2888194], - [-1.89454315, 103.69850925, 4.97120376], - [72.01200673, 21.09274883, -57.53678375]]) / 1e3 # mm -> m +flash_ras_landmarks = ( + np.array( + [ + [-74.53102838, 19.62854953, -52.2888194], + [-1.89454315, 103.69850925, 4.97120376], + [72.01200673, 21.09274883, -57.53678375], + ] + ) + / 1e3 +) # mm -> m landmarks = mne.channels.make_dig_montage( lpa=flash_ras_landmarks[0], nasion=flash_ras_landmarks[1], rpa=flash_ras_landmarks[2], - coord_frame='ras' + coord_frame="ras", ) flash_bids_path = write_anat( @@ -298,12 +311,12 @@ landmarks=landmarks, deface=True, overwrite=True, - verbose=True # this will print out the sidecar file + verbose=True, # this will print out the sidecar file ) # Plot it fig, ax = plt.subplots() -plot_anat(flash_nii_fname, axes=ax, title='Defaced', vmax=700) +plot_anat(flash_nii_fname, axes=ax, title="Defaced", vmax=700) plt.show() # %% diff --git a/examples/convert_nirs_to_bids.py b/examples/convert_nirs_to_bids.py index 8a50e238e..2b80e5c5c 100644 --- a/examples/convert_nirs_to_bids.py +++ b/examples/convert_nirs_to_bids.py @@ -71,7 +71,7 @@ # Load the data raw = mne.io.read_raw_snirf(file_path, preload=False) -raw.info['line_freq'] = 50 # specify power line frequency as required by BIDS +raw.info["line_freq"] = 50 # specify power line frequency as required by BIDS # Sanity check, show the optode positions raw.plot_sensors() @@ -80,9 +80,7 @@ # I also like to rename the annotations to something meaningful and # set the duration of each stimulus -trigger_info = {'1.0': 'Control', - '2.0': 'Tapping/Left', - '3.0': 'Tapping/Right'} +trigger_info = {"1.0": "Control", "2.0": "Tapping/Left", "3.0": "Tapping/Right"} raw.annotations.rename(trigger_info) raw.annotations.set_durations(5.0) @@ -106,11 +104,11 @@ print(write_raw_bids.__doc__) # zero padding to account for >100 subjects in this dataset -subject_id = '01' +subject_id = "01" # define a task name and a directory where to save the data to -task = 'Tapping' -bids_root = data_dir.with_name(data_dir.name + '-bids') +task = "Tapping" +bids_root = data_dir.with_name(data_dir.name + "-bids") print(bids_root) # %% @@ -173,8 +171,8 @@ # # If you are preparing a manuscript, please make sure to also cite MNE-BIDS # there. -readme = op.join(bids_root, 'README') -with open(readme, 'r', encoding='utf-8-sig') as fid: +readme = op.join(bids_root, "README") +with open(readme, "r", encoding="utf-8-sig") as fid: text = fid.read() print(text) diff --git a/examples/create_bids_folder.py b/examples/create_bids_folder.py index 13929970b..ba3c53220 100644 --- a/examples/create_bids_folder.py +++ b/examples/create_bids_folder.py @@ -31,15 +31,16 @@ # pieces of metadata, ensuring that they are in the correct order in the # final file path. Omitted keys will not be included in the file path. -bids_path = BIDSPath(subject='test', session='two', task='mytask', - suffix='events', extension='.tsv') +bids_path = BIDSPath( + subject="test", session="two", task="mytask", suffix="events", extension=".tsv" +) print(bids_path) # %% # You may also omit the suffix, which will result in *only* a prefix for a # file name. This could then prepended to many more files. -bids_path = BIDSPath(subject='test', task='mytask') +bids_path = BIDSPath(subject="test", task="mytask") print(bids_path) # %% @@ -48,6 +49,7 @@ # # You can also use MNE-BIDS to create folder hierarchies. -bids_path = BIDSPath(subject='01', session='mysession', - datatype='meg', root='path/to/project').mkdir() +bids_path = BIDSPath( + subject="01", session="mysession", datatype="meg", root="path/to/project" +).mkdir() print(bids_path.directory) diff --git a/examples/mark_bad_channels.py b/examples/mark_bad_channels.py index 24ece4cfa..5d65de70e 100644 --- a/examples/mark_bad_channels.py +++ b/examples/mark_bad_channels.py @@ -30,17 +30,29 @@ import shutil import mne -from mne_bids import (BIDSPath, write_raw_bids, read_raw_bids, - inspect_dataset, mark_channels) +from mne_bids import ( + BIDSPath, + write_raw_bids, + read_raw_bids, + inspect_dataset, + mark_channels, +) data_path = mne.datasets.sample.data_path() -raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif') -events_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-eve.fif') -event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} -bids_root = op.join(data_path, '..', 'MNE-sample-data-bids') -bids_path = BIDSPath(subject='01', session='01', task='audiovisual', run='01', - root=bids_root) +raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_raw.fif") +events_fname = op.join(data_path, "MEG", "sample", "sample_audvis_raw-eve.fif") +event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, +} +bids_root = op.join(data_path, "..", "MNE-sample-data-bids") +bids_path = BIDSPath( + subject="01", session="01", task="audiovisual", run="01", root=bids_root +) # %% # To ensure the output path doesn't contain any leftover files from previous @@ -56,9 +68,15 @@ # Now write the raw data to BIDS. raw = mne.io.read_raw_fif(raw_fname, verbose=False) -raw.info['line_freq'] = 60 # Specify power line frequency as required by BIDS. -write_raw_bids(raw, bids_path=bids_path, events=events_fname, - event_id=event_id, overwrite=True, verbose=False) +raw.info["line_freq"] = 60 # Specify power line frequency as required by BIDS. +write_raw_bids( + raw, + bids_path=bids_path, + events=events_fname, + event_id=event_id, + overwrite=True, + verbose=False, +) # %% # Interactive use @@ -81,7 +99,7 @@ # high-frequency artifacts. This can make visual inspection easier. Let's # apply filters with a 1-Hz high-pass cutoff, and a 30-Hz low-pass cutoff: -inspect_dataset(bids_path, l_freq=1., h_freq=30.) +inspect_dataset(bids_path, l_freq=1.0, h_freq=30.0) # %% # By pressing the ``A`` key, you can toggle annotation mode to add, edit, or @@ -98,8 +116,10 @@ # marked as bad. raw = read_raw_bids(bids_path=bids_path, verbose=False) -print(f'The following channels are currently marked as bad:\n' - f' {", ".join(raw.info["bads"])}\n') +print( + f"The following channels are currently marked as bad:\n" + f' {", ".join(raw.info["bads"])}\n' +) # %% # So currently, two channels are marked as bad: ``EEG 053`` and ``MEG 2443``. @@ -108,16 +128,17 @@ # To do that, we simply add them to a list, which we then pass to # :func:`mne_bids.mark_channels`: -bads = ['MEG 0112', 'MEG 0131'] -mark_channels(bids_path=bids_path, ch_names=bads, status='bad', - verbose=False) +bads = ["MEG 0112", "MEG 0131"] +mark_channels(bids_path=bids_path, ch_names=bads, status="bad", verbose=False) # %% # That's it! Let's verify the result. raw = read_raw_bids(bids_path=bids_path, verbose=False) -print(f'After marking MEG 0112 and MEG 0131 as bad, the following channels ' - f'are now marked as bad:\n {", ".join(raw.info["bads"])}\n') +print( + f"After marking MEG 0112 and MEG 0131 as bad, the following channels " + f'are now marked as bad:\n {", ".join(raw.info["bads"])}\n' +) # %% # As you can see, now a total of **four** channels is marked as bad: the ones @@ -131,21 +152,25 @@ # If you instead would like to **replace** the collection of bad channels # entirely, pass the argument ``overwrite=True``: -bads = ['MEG 0112', 'MEG 0131'] -mark_channels(bids_path=bids_path, ch_names=bads, status='bad', verbose=False) +bads = ["MEG 0112", "MEG 0131"] +mark_channels(bids_path=bids_path, ch_names=bads, status="bad", verbose=False) raw = read_raw_bids(bids_path=bids_path, verbose=False) -print(f'After marking MEG 0112 and MEG 0131 as bad and passing ' - f'`overwrite=True`, the following channels ' - f'are now marked as bad:\n {", ".join(raw.info["bads"])}\n') +print( + f"After marking MEG 0112 and MEG 0131 as bad and passing " + f"`overwrite=True`, the following channels " + f'are now marked as bad:\n {", ".join(raw.info["bads"])}\n' +) # %% # Lastly, if you're looking for a way to mark all channels as good, simply # pass an empty list as ``ch_names``, combined with ``overwrite=True``: bads = [] -mark_channels(bids_path=bids_path, ch_names=bads, status='bad', verbose=False) +mark_channels(bids_path=bids_path, ch_names=bads, status="bad", verbose=False) raw = read_raw_bids(bids_path=bids_path, verbose=False) -print(f'After passing `ch_names=[]` and `overwrite=True`, the following ' - f'channels are now marked as bad:\n {", ".join(raw.info["bads"])}\n') +print( + f"After passing `ch_names=[]` and `overwrite=True`, the following " + f'channels are now marked as bad:\n {", ".join(raw.info["bads"])}\n' +) diff --git a/examples/read_bids_datasets.py b/examples/read_bids_datasets.py index 809ece6a7..8ab48b0d4 100644 --- a/examples/read_bids_datasets.py +++ b/examples/read_bids_datasets.py @@ -39,8 +39,14 @@ import openneuro from mne.datasets import sample -from mne_bids import (BIDSPath, read_raw_bids, print_dir_tree, make_report, - find_matching_paths, get_entity_vals) +from mne_bids import ( + BIDSPath, + read_raw_bids, + print_dir_tree, + make_report, + find_matching_paths, + get_entity_vals, +) # %% # Download a subject's data from an OpenNeuro BIDS dataset @@ -57,16 +63,15 @@ # We're just using data from one subject to reduce the time # it takes to run the example. -dataset = 'ds002778' -subject = 'pd6' +dataset = "ds002778" +subject = "pd6" # Download one subject's data from each dataset bids_root = op.join(op.dirname(sample.data_path()), dataset) if not op.isdir(bids_root): os.makedirs(bids_root) -openneuro.download(dataset=dataset, target_dir=bids_root, - include=[f'sub-{subject}']) +openneuro.download(dataset=dataset, target_dir=bids_root, include=[f"sub-{subject}"]) # %% # Explore the dataset contents @@ -96,11 +101,12 @@ # one where they abstained for more than twelve hours. For now, we are # not interested in the on-medication session. -sessions = get_entity_vals(bids_root, 'session', ignore_sessions='on') -datatype = 'eeg' +sessions = get_entity_vals(bids_root, "session", ignore_sessions="on") +datatype = "eeg" extensions = [".bdf", ".tsv"] # ignore .json files -bids_paths = find_matching_paths(bids_root, datatypes=datatype, - sessions=sessions, extensions=extensions) +bids_paths = find_matching_paths( + bids_root, datatypes=datatype, sessions=sessions, extensions=extensions +) # %% # We can now retrieve a list of all MEG-related files in the dataset: @@ -108,7 +114,7 @@ # %% # Note that this is the same as running: -session = 'off' +session = "off" bids_path = BIDSPath(root=bids_root, session=session, datatype=datatype) print(bids_path.match(ignore_json=True)) @@ -132,8 +138,8 @@ # our example, and ``'eeg'`` for EEG raw data. For MEG and EEG raw data, the # suffix is identical to the datatype, so don't let yourself be confused here! -task = 'rest' -suffix = 'eeg' +task = "rest" +suffix = "eeg" bids_path = bids_path.update(subject=subject, task=task, suffix=suffix) @@ -190,17 +196,17 @@ # # Basic subject metadata is here. -print(raw.info['subject_info']) +print(raw.info["subject_info"]) # %% # Power line frequency is here. -print(raw.info['line_freq']) +print(raw.info["line_freq"]) # %% # Sampling frequency is here. -print(raw.info['sfreq']) +print(raw.info["sfreq"]) # %% # Events are now Annotations diff --git a/examples/rename_brainvision_files.py b/examples/rename_brainvision_files.py index e10d05271..9a4ee4629 100644 --- a/examples/rename_brainvision_files.py +++ b/examples/rename_brainvision_files.py @@ -51,7 +51,7 @@ # .. warning:: This will download 1.6 GB of data! data_path = mne.datasets.testing.data_path() -examples_dir = op.join(data_path, 'Brainvision') +examples_dir = op.join(data_path, "Brainvision") # %% # Rename the recording @@ -73,8 +73,8 @@ # Here, we rename a test file name: # Rename the file -vhdr_file = op.join(examples_dir, 'Analyzer_nV_Export.vhdr') -vhdr_file_renamed = op.join(examples_dir, 'test_renamed.vhdr') +vhdr_file = op.join(examples_dir, "Analyzer_nV_Export.vhdr") +vhdr_file_renamed = op.join(examples_dir, "test_renamed.vhdr") copyfile_brainvision(vhdr_file, vhdr_file_renamed, verbose=True) # Check that MNE-Python can read in both, the original as well as the renamed diff --git a/examples/update_bids_datasets.py b/examples/update_bids_datasets.py index 0a6446c42..6b0ae077d 100644 --- a/examples/update_bids_datasets.py +++ b/examples/update_bids_datasets.py @@ -21,8 +21,13 @@ # We are importing everything we need for this example: from mne.datasets import somato -from mne_bids import (read_raw_bids, find_matching_paths, - print_dir_tree, make_report, update_sidecar_json) +from mne_bids import ( + read_raw_bids, + find_matching_paths, + print_dir_tree, + make_report, + update_sidecar_json, +) # %% # We will be using the `MNE somato data `_, which @@ -62,19 +67,18 @@ # Search for all matching BIDSPaths in the root directory bids_root = somato.data_path() -suffix = 'meg' -extension = '.fif' +suffix = "meg" +extension = ".fif" -bids_paths = find_matching_paths(bids_root, suffixes=suffix, - extensions=extension) +bids_paths = find_matching_paths(bids_root, suffixes=suffix, extensions=extension) # We can now retrieve a list of all MEG-related files in the dataset: print(bids_paths) # Define a sidecar update as a dictionary entries = { - 'PowerLineFrequency': 60, - 'Manufacturer': "MEGIN", - 'InstitutionName': "Martinos Center" + "PowerLineFrequency": 60, + "Manufacturer": "MEGIN", + "InstitutionName": "Martinos Center", } # Note: ``update_sidecar_json`` will perform essentially a @@ -86,7 +90,7 @@ # # Now update all sidecar fields according to our updating dictionary bids_path = bids_paths[0] -sidecar_path = bids_path.copy().update(extension='.json') +sidecar_path = bids_path.copy().update(extension=".json") update_sidecar_json(bids_path=sidecar_path, entries=entries) # %% @@ -95,7 +99,7 @@ # new line frequency is now 60 Hz raw = read_raw_bids(bids_path=bids_path) -print(raw.info['line_freq']) +print(raw.info["line_freq"]) # %% # Generate a new report based on the updated metadata. @@ -107,8 +111,8 @@ # We can revert the changes by updating the sidecar again. # update the sidecar data to have a new PowerLineFrequency -entries['Manufacturer'] = "Elekta" -entries['PowerLineFrequency'] = 50 +entries["Manufacturer"] = "Elekta" +entries["PowerLineFrequency"] = 50 update_sidecar_json(bids_path=sidecar_path, entries=entries) # %% @@ -117,7 +121,7 @@ # The power line frequency should now change back to 50 Hz raw = read_raw_bids(bids_path=bids_path) -print(raw.info['line_freq']) +print(raw.info["line_freq"]) # Generate the report with updated fields print(make_report(bids_root)) diff --git a/mne_bids/__init__.py b/mne_bids/__init__.py index 215fbb2d7..6ad19b988 100644 --- a/mne_bids/__init__.py +++ b/mne_bids/__init__.py @@ -1,19 +1,34 @@ """MNE software for easily interacting with BIDS compatible datasets.""" -__version__ = '0.13.dev0' +__version__ = "0.13.dev0" from mne_bids import commands from mne_bids.report import make_report -from mne_bids.path import (BIDSPath, get_datatypes, get_entity_vals, - print_dir_tree, get_entities_from_fname, - search_folder_for_text, get_bids_path_from_fname, - find_matching_paths) +from mne_bids.path import ( + BIDSPath, + get_datatypes, + get_entity_vals, + print_dir_tree, + get_entities_from_fname, + search_folder_for_text, + get_bids_path_from_fname, + find_matching_paths, +) from mne_bids.read import get_head_mri_trans, read_raw_bids from mne_bids.utils import get_anonymization_daysback -from mne_bids.write import (make_dataset_description, write_anat, - write_raw_bids, mark_channels, - write_meg_calibration, write_meg_crosstalk, - get_anat_landmarks, anonymize_dataset) +from mne_bids.write import ( + make_dataset_description, + write_anat, + write_raw_bids, + mark_channels, + write_meg_calibration, + write_meg_crosstalk, + get_anat_landmarks, + anonymize_dataset, +) from mne_bids.sidecar_updates import update_sidecar_json, update_anat_landmarks from mne_bids.inspect import inspect_dataset -from mne_bids.dig import (template_to_head, convert_montage_to_ras, - convert_montage_to_mri) +from mne_bids.dig import ( + template_to_head, + convert_montage_to_ras, + convert_montage_to_mri, +) diff --git a/mne_bids/commands/mne_bids_calibration_to_bids.py b/mne_bids/commands/mne_bids_calibration_to_bids.py index 5f4ec7f49..100dc6ee5 100644 --- a/mne_bids/commands/mne_bids_calibration_to_bids.py +++ b/mne_bids/commands/mne_bids_calibration_to_bids.py @@ -19,42 +19,49 @@ def run(): """Run the calibration_to_bids command.""" from mne.commands.utils import get_optparser - parser = get_optparser(__file__, usage="usage: %prog options args", - prog_prefix='mne_bids', - version=mne_bids.__version__) - - parser.add_option('--bids_root', dest='bids_root', - help='The path of the folder containing the BIDS ' - 'dataset') - parser.add_option('--subject_id', dest='subject', - help=('Subject name')) - parser.add_option('--session_id', dest='session', - help='Session name') - parser.add_option('--file', dest='fname', - help='The path of the crosstalk file') - parser.add_option('--verbose', dest='verbose', action='store_true', - help='Whether do generate additional diagnostic output') + parser = get_optparser( + __file__, + usage="usage: %prog options args", + prog_prefix="mne_bids", + version=mne_bids.__version__, + ) + + parser.add_option( + "--bids_root", + dest="bids_root", + help="The path of the folder containing the BIDS " "dataset", + ) + parser.add_option("--subject_id", dest="subject", help=("Subject name")) + parser.add_option("--session_id", dest="session", help="Session name") + parser.add_option("--file", dest="fname", help="The path of the crosstalk file") + parser.add_option( + "--verbose", + dest="verbose", + action="store_true", + help="Whether do generate additional diagnostic output", + ) opt, args = parser.parse_args() if args: parser.print_help() - parser.error(f'Please do not specify arguments without flags. ' - f'Got: {args}.\n') + parser.error( + f"Please do not specify arguments without flags. " f"Got: {args}.\n" + ) if opt.bids_root is None: parser.print_help() - parser.error('You must specify bids_root') + parser.error("You must specify bids_root") if opt.subject is None: parser.print_help() - parser.error('You must specify a subject') + parser.error("You must specify a subject") - bids_path = BIDSPath(subject=opt.subject, session=opt.session, - root=opt.bids_root) + bids_path = BIDSPath(subject=opt.subject, session=opt.session, root=opt.bids_root) - logger.info(f'Writing fine-calibration file {bids_path.basename} …') - write_meg_calibration(calibration=opt.fname, bids_path=bids_path, - verbose=opt.verbose) + logger.info(f"Writing fine-calibration file {bids_path.basename} …") + write_meg_calibration( + calibration=opt.fname, bids_path=bids_path, verbose=opt.verbose + ) -if __name__ == '__main__': +if __name__ == "__main__": run() diff --git a/mne_bids/commands/mne_bids_count_events.py b/mne_bids/commands/mne_bids_count_events.py index 261435222..d3cc64689 100644 --- a/mne_bids/commands/mne_bids_count_events.py +++ b/mne_bids/commands/mne_bids_count_events.py @@ -17,43 +17,65 @@ def run(): import pandas as pd from mne.commands.utils import get_optparser - parser = get_optparser(__file__, usage="usage: %prog options args", - prog_prefix='mne_bids', - version=mne_bids.__version__) - - parser.add_option('--bids_root', dest='bids_root', - help='The path of the BIDS compatible folder.') - - parser.add_option('--datatype', dest='datatype', default='auto', - help='The datatype to consider.') - - parser.add_option('--describe', dest='describe', action="store_true", - help=('If set print the descriptive statistics ' - '(min, max, etc.).')) - - parser.add_option('--output', dest='output', default=None, - help='Path to the CSV file where to store the results.') - - parser.add_option('--overwrite', dest='overwrite', action='store_true', - help='If set, overwrite an existing output file.') - - parser.add_option('--silent', dest='silent', action='store_true', - help='Whether to print the event counts on the screen.') + parser = get_optparser( + __file__, + usage="usage: %prog options args", + prog_prefix="mne_bids", + version=mne_bids.__version__, + ) + + parser.add_option( + "--bids_root", dest="bids_root", help="The path of the BIDS compatible folder." + ) + + parser.add_option( + "--datatype", dest="datatype", default="auto", help="The datatype to consider." + ) + + parser.add_option( + "--describe", + dest="describe", + action="store_true", + help=("If set print the descriptive statistics " "(min, max, etc.)."), + ) + + parser.add_option( + "--output", + dest="output", + default=None, + help="Path to the CSV file where to store the results.", + ) + + parser.add_option( + "--overwrite", + dest="overwrite", + action="store_true", + help="If set, overwrite an existing output file.", + ) + + parser.add_option( + "--silent", + dest="silent", + action="store_true", + help="Whether to print the event counts on the screen.", + ) opt, args = parser.parse_args() if len(args) > 0: parser.print_help() - parser.error('Do not specify arguments without flags. Found: "{}".\n' - .format(args)) + parser.error( + 'Do not specify arguments without flags. Found: "{}".\n'.format(args) + ) if not all([opt.bids_root]): parser.print_help() - parser.error('Arguments missing. You need to specify the ' - '--bids_root parameter.') + parser.error( + "Arguments missing. You need to specify the " "--bids_root parameter." + ) if opt.output and Path(opt.output).exists() and not opt.overwrite: - parser.error('Output file exists. To overwrite, pass --overwrite') + parser.error("Output file exists. To overwrite, pass --overwrite") counts = count_events(opt.bids_root, datatype=opt.datatype) @@ -62,16 +84,14 @@ def run(): if not opt.silent: with pd.option_context( - 'display.max_rows', 1000, - 'display.max_columns', 80, - 'display.width', 1000 + "display.max_rows", 1000, "display.max_columns", 80, "display.width", 1000 ): print(counts) if opt.output: counts.to_csv(opt.output) - print(f'\nOutput stored in {opt.output}') + print(f"\nOutput stored in {opt.output}") -if __name__ == '__main__': +if __name__ == "__main__": run() diff --git a/mne_bids/commands/mne_bids_cp.py b/mne_bids/commands/mne_bids_cp.py index 76cd21a18..ffb5e984e 100644 --- a/mne_bids/commands/mne_bids_cp.py +++ b/mne_bids/commands/mne_bids_cp.py @@ -6,31 +6,47 @@ # # License: BSD-3-Clause import mne_bids -from mne_bids.copyfiles import (copyfile_brainvision, copyfile_eeglab, - copyfile_ctf) +from mne_bids.copyfiles import copyfile_brainvision, copyfile_eeglab, copyfile_ctf def run(): """Run the cp command.""" from mne.commands.utils import get_optparser - accepted_formats_msg = ('(accepted formats: BrainVision .vhdr, ' - 'EEGLAB .set, CTF .ds)') + accepted_formats_msg = ( + "(accepted formats: BrainVision .vhdr, " "EEGLAB .set, CTF .ds)" + ) - parser = get_optparser(__file__, usage="usage: %prog -i INPUT -o OUTPUT", - prog_prefix='mne_bids', - version=mne_bids.__version__) + parser = get_optparser( + __file__, + usage="usage: %prog -i INPUT -o OUTPUT", + prog_prefix="mne_bids", + version=mne_bids.__version__, + ) - parser.add_option('-i', '--input', dest='input', - help=('path to the input file. {}' - .format(accepted_formats_msg)), metavar='INPUT') + parser.add_option( + "-i", + "--input", + dest="input", + help=("path to the input file. {}".format(accepted_formats_msg)), + metavar="INPUT", + ) - parser.add_option('-o', '--output', dest='output', - help=('path to the output file (MUST be same format ' - 'as input file)'), metavar='OUTPUT') + parser.add_option( + "-o", + "--output", + dest="output", + help=("path to the output file (MUST be same format " "as input file)"), + metavar="OUTPUT", + ) - parser.add_option('-v', '--verbose', dest="verbose", - help='set logging level to verbose', action="store_true") + parser.add_option( + "-v", + "--verbose", + dest="verbose", + help="set logging level to verbose", + action="store_true", + ) opt, args = parser.parse_args() opt_dict = vars(opt) @@ -38,26 +54,29 @@ def run(): # Check the usage and raise error if invalid if len(args) > 0: parser.print_help() - parser.error('Do not specify arguments without flags. Found: "{}".\n' - 'Did you forget to provide -i and -o?' - .format(args)) + parser.error( + 'Do not specify arguments without flags. Found: "{}".\n' + "Did you forget to provide -i and -o?".format(args) + ) - if not opt_dict.get('input') or not opt_dict.get('output'): + if not opt_dict.get("input") or not opt_dict.get("output"): parser.print_help() - parser.error('Incorrect number of arguments. Supply one input and one ' - 'output file. You supplied: "{}"'.format(opt)) + parser.error( + "Incorrect number of arguments. Supply one input and one " + 'output file. You supplied: "{}"'.format(opt) + ) # Attempt to do the copying. Errors will be raised by the copyfile # functions if there are issues with the file formats - if opt.input.endswith('.vhdr'): + if opt.input.endswith(".vhdr"): copyfile_brainvision(opt.input, opt.output, opt.verbose) - elif opt.input.endswith('.set'): + elif opt.input.endswith(".set"): copyfile_eeglab(opt.input, opt.output) - elif opt.input.endswith('.ds'): + elif opt.input.endswith(".ds"): copyfile_ctf(opt.input, opt.output) else: parser.error('{} You supplied: "{}"'.format(accepted_formats_msg, opt)) -if __name__ == '__main__': +if __name__ == "__main__": run() diff --git a/mne_bids/commands/mne_bids_crosstalk_to_bids.py b/mne_bids/commands/mne_bids_crosstalk_to_bids.py index 4ff306a47..d91886255 100644 --- a/mne_bids/commands/mne_bids_crosstalk_to_bids.py +++ b/mne_bids/commands/mne_bids_crosstalk_to_bids.py @@ -19,42 +19,47 @@ def run(): """Run the crosstalk_to_bids command.""" from mne.commands.utils import get_optparser - parser = get_optparser(__file__, usage="usage: %prog options args", - prog_prefix='mne_bids', - version=mne_bids.__version__) - - parser.add_option('--bids_root', dest='bids_root', - help='The path of the folder containing the BIDS ' - 'dataset') - parser.add_option('--subject_id', dest='subject', - help=('Subject name')) - parser.add_option('--session_id', dest='session', - help='Session name') - parser.add_option('--file', dest='fname', - help='The path of the crosstalk file') - parser.add_option('--verbose', dest='verbose', action='store_true', - help='Whether do generate additional diagnostic output') + parser = get_optparser( + __file__, + usage="usage: %prog options args", + prog_prefix="mne_bids", + version=mne_bids.__version__, + ) + + parser.add_option( + "--bids_root", + dest="bids_root", + help="The path of the folder containing the BIDS " "dataset", + ) + parser.add_option("--subject_id", dest="subject", help=("Subject name")) + parser.add_option("--session_id", dest="session", help="Session name") + parser.add_option("--file", dest="fname", help="The path of the crosstalk file") + parser.add_option( + "--verbose", + dest="verbose", + action="store_true", + help="Whether do generate additional diagnostic output", + ) opt, args = parser.parse_args() if args: parser.print_help() - parser.error(f'Please do not specify arguments without flags. ' - f'Got: {args}.\n') + parser.error( + f"Please do not specify arguments without flags. " f"Got: {args}.\n" + ) if opt.bids_root is None: parser.print_help() - parser.error('You must specify bids_root') + parser.error("You must specify bids_root") if opt.subject is None: parser.print_help() - parser.error('You must specify a subject') + parser.error("You must specify a subject") - bids_path = BIDSPath(subject=opt.subject, session=opt.session, - root=opt.bids_root) + bids_path = BIDSPath(subject=opt.subject, session=opt.session, root=opt.bids_root) - logger.info(f'Writing crosstalk file {bids_path.basename} …') - write_meg_crosstalk(fname=opt.fname, bids_path=bids_path, - verbose=opt.verbose) + logger.info(f"Writing crosstalk file {bids_path.basename} …") + write_meg_crosstalk(fname=opt.fname, bids_path=bids_path, verbose=opt.verbose) -if __name__ == '__main__': +if __name__ == "__main__": run() diff --git a/mne_bids/commands/mne_bids_inspect.py b/mne_bids/commands/mne_bids_inspect.py index 231ce6001..6e7d187e3 100644 --- a/mne_bids/commands/mne_bids_inspect.py +++ b/mne_bids/commands/mne_bids_inspect.py @@ -19,71 +19,94 @@ def run(): """Run the mark_channels command.""" from mne.commands.utils import get_optparser - parser = get_optparser(__file__, usage="usage: %prog options args", - prog_prefix='mne_bids', - version=mne_bids.__version__) + parser = get_optparser( + __file__, + usage="usage: %prog options args", + prog_prefix="mne_bids", + version=mne_bids.__version__, + ) - parser.add_option('--bids_root', dest='bids_root', - help='The path of the folder containing the BIDS ' - 'dataset') - parser.add_option('--subject_id', dest='subject', - help=('Subject name')) - parser.add_option('--session_id', dest='session', - help='Session name') - parser.add_option('--task', dest='task', - help='Task name') - parser.add_option('--acq', dest='acquisition', - help='Acquisition parameter') - parser.add_option('--run', dest='run', - help='Run number') - parser.add_option('--proc', dest='processing', - help='Processing label.') - parser.add_option('--rec', dest='recording', - help='Recording name') - parser.add_option('--type', dest='datatype', - help='Recording data type, e.g. meg, ieeg or eeg') - parser.add_option('--suffix', dest='suffix', - help='The filename suffix, i.e. the last part before ' - 'the extension') - parser.add_option('--ext', dest='extension', - help='The filename extension, including the leading ' - 'period, e.g. .fif') - parser.add_option('--find_flat', dest='find_flat', - help='Whether to auto-detect flat channels and time ' - 'segments') - parser.add_option('--l_freq', dest='l_freq', - help='The high-pass filter cutoff frequency') - parser.add_option('--h_freq', dest='h_freq', - help='The low-pass filter cutoff frequency') - parser.add_option('--verbose', dest='verbose', action='store_true', - help='Whether do generate additional diagnostic output') + parser.add_option( + "--bids_root", + dest="bids_root", + help="The path of the folder containing the BIDS " "dataset", + ) + parser.add_option("--subject_id", dest="subject", help=("Subject name")) + parser.add_option("--session_id", dest="session", help="Session name") + parser.add_option("--task", dest="task", help="Task name") + parser.add_option("--acq", dest="acquisition", help="Acquisition parameter") + parser.add_option("--run", dest="run", help="Run number") + parser.add_option("--proc", dest="processing", help="Processing label.") + parser.add_option("--rec", dest="recording", help="Recording name") + parser.add_option( + "--type", dest="datatype", help="Recording data type, e.g. meg, ieeg or eeg" + ) + parser.add_option( + "--suffix", + dest="suffix", + help="The filename suffix, i.e. the last part before " "the extension", + ) + parser.add_option( + "--ext", + dest="extension", + help="The filename extension, including the leading " "period, e.g. .fif", + ) + parser.add_option( + "--find_flat", + dest="find_flat", + help="Whether to auto-detect flat channels and time " "segments", + ) + parser.add_option( + "--l_freq", dest="l_freq", help="The high-pass filter cutoff frequency" + ) + parser.add_option( + "--h_freq", dest="h_freq", help="The low-pass filter cutoff frequency" + ) + parser.add_option( + "--verbose", + dest="verbose", + action="store_true", + help="Whether do generate additional diagnostic output", + ) opt, args = parser.parse_args() if args: parser.print_help() - parser.error(f'Please do not specify arguments without flags. ' - f'Got: {args}.\n') + parser.error( + f"Please do not specify arguments without flags. " f"Got: {args}.\n" + ) if opt.bids_root is None: parser.print_help() - parser.error('You must specify bids_root') + parser.error("You must specify bids_root") - bids_path = BIDSPath(subject=opt.subject, session=opt.session, - task=opt.task, acquisition=opt.acquisition, - run=opt.run, processing=opt.processing, - recording=opt.recording, datatype=opt.datatype, - suffix=opt.suffix, extension=opt.extension, - root=opt.bids_root) + bids_path = BIDSPath( + subject=opt.subject, + session=opt.session, + task=opt.task, + acquisition=opt.acquisition, + run=opt.run, + processing=opt.processing, + recording=opt.recording, + datatype=opt.datatype, + suffix=opt.suffix, + extension=opt.extension, + root=opt.bids_root, + ) find_flat = True if opt.find_flat is None else bool(opt.find_flat) l_freq = None if opt.l_freq is None else float(opt.l_freq) h_freq = None if opt.h_freq is None else float(opt.h_freq) - logger.info(f'Inspecting {bids_path.basename} …') - inspect_dataset(bids_path=bids_path, find_flat=find_flat, - l_freq=l_freq, h_freq=h_freq, - verbose=opt.verbose) + logger.info(f"Inspecting {bids_path.basename} …") + inspect_dataset( + bids_path=bids_path, + find_flat=find_flat, + l_freq=l_freq, + h_freq=h_freq, + verbose=opt.verbose, + ) -if __name__ == '__main__': +if __name__ == "__main__": run() diff --git a/mne_bids/commands/mne_bids_mark_channels.py b/mne_bids/commands/mne_bids_mark_channels.py index 5a22a3fb6..b213ec39c 100644 --- a/mne_bids/commands/mne_bids_mark_channels.py +++ b/mne_bids/commands/mne_bids_mark_channels.py @@ -22,93 +22,125 @@ def run(): """Run the mark_channels command.""" from mne.commands.utils import get_optparser - parser = get_optparser(__file__, usage="usage: %prog options args", - prog_prefix='mne_bids', - version=mne_bids.__version__) - - parser.add_option('--ch_name', dest='ch_names', action='append', - default=[], - help='The names of the bad channels. If multiple ' - 'channels are bad, pass the --ch_name parameter ' - 'multiple times.') - parser.add_option('--status', - default='bad', - help='Status of the channels (Either "good", or "bad").') - parser.add_option('--description', dest='descriptions', action='append', - default=[], - help='Descriptions as to why the channels are bad. ' - 'Must match the number of bad channels provided. ' - 'Pass multiple times to supply more than one ' - 'value in that case.') - parser.add_option('--bids_root', dest='bids_root', - help='The path of the folder containing the BIDS ' - 'dataset') - parser.add_option('--subject_id', dest='subject', - help=('Subject name')) - parser.add_option('--session_id', dest='session', - help='Session name') - parser.add_option('--task', dest='task', - help='Task name') - parser.add_option('--acq', dest='acquisition', - help='Acquisition parameter') - parser.add_option('--run', dest='run', - help='Run number') - parser.add_option('--proc', dest='processing', - help='Processing label.') - parser.add_option('--rec', dest='recording', - help='Recording name') - parser.add_option('--type', dest='datatype', - help='Recording data type, e.g. meg, ieeg or eeg') - parser.add_option('--suffix', dest='suffix', - help='The filename suffix, i.e. the last part before ' - 'the extension') - parser.add_option('--ext', dest='extension', - help='The filename extension, including the leading ' - 'period, e.g. .fif') - parser.add_option('--verbose', dest='verbose', action='store_true', - help='Whether do generate additional diagnostic output') + parser = get_optparser( + __file__, + usage="usage: %prog options args", + prog_prefix="mne_bids", + version=mne_bids.__version__, + ) + + parser.add_option( + "--ch_name", + dest="ch_names", + action="append", + default=[], + help="The names of the bad channels. If multiple " + "channels are bad, pass the --ch_name parameter " + "multiple times.", + ) + parser.add_option( + "--status", + default="bad", + help='Status of the channels (Either "good", or "bad").', + ) + parser.add_option( + "--description", + dest="descriptions", + action="append", + default=[], + help="Descriptions as to why the channels are bad. " + "Must match the number of bad channels provided. " + "Pass multiple times to supply more than one " + "value in that case.", + ) + parser.add_option( + "--bids_root", + dest="bids_root", + help="The path of the folder containing the BIDS " "dataset", + ) + parser.add_option("--subject_id", dest="subject", help=("Subject name")) + parser.add_option("--session_id", dest="session", help="Session name") + parser.add_option("--task", dest="task", help="Task name") + parser.add_option("--acq", dest="acquisition", help="Acquisition parameter") + parser.add_option("--run", dest="run", help="Run number") + parser.add_option("--proc", dest="processing", help="Processing label.") + parser.add_option("--rec", dest="recording", help="Recording name") + parser.add_option( + "--type", dest="datatype", help="Recording data type, e.g. meg, ieeg or eeg" + ) + parser.add_option( + "--suffix", + dest="suffix", + help="The filename suffix, i.e. the last part before " "the extension", + ) + parser.add_option( + "--ext", + dest="extension", + help="The filename extension, including the leading " "period, e.g. .fif", + ) + parser.add_option( + "--verbose", + dest="verbose", + action="store_true", + help="Whether do generate additional diagnostic output", + ) opt, args = parser.parse_args() if args: parser.print_help() - parser.error(f'Please do not specify arguments without flags. ' - f'Got: {args}.\n') + parser.error( + f"Please do not specify arguments without flags. " f"Got: {args}.\n" + ) if opt.bids_root is None: parser.print_help() - parser.error('You must specify bids_root') + parser.error("You must specify bids_root") if opt.ch_names is None: parser.print_help() - parser.error('You must specify some --ch_name parameters.') + parser.error("You must specify some --ch_name parameters.") status = opt.status - ch_names = [] if opt.ch_names == [''] else opt.ch_names - bids_path = BIDSPath(subject=opt.subject, session=opt.session, - task=opt.task, acquisition=opt.acquisition, - run=opt.run, processing=opt.processing, - recording=opt.recording, datatype=opt.datatype, - suffix=opt.suffix, extension=opt.extension, - root=opt.bids_root) + ch_names = [] if opt.ch_names == [""] else opt.ch_names + bids_path = BIDSPath( + subject=opt.subject, + session=opt.session, + task=opt.task, + acquisition=opt.acquisition, + run=opt.run, + processing=opt.processing, + recording=opt.recording, + datatype=opt.datatype, + suffix=opt.suffix, + extension=opt.extension, + root=opt.bids_root, + ) bids_paths = bids_path.match() # Only keep data we can actually read & write. allowed_extensions = list(reader.keys()) - bids_paths = [p for p in bids_paths - if p.extension in allowed_extensions] + bids_paths = [p for p in bids_paths if p.extension in allowed_extensions] if not bids_paths: - logger.info('No matching files found. Please consider using a less ' - 'restrictive set of entities to broaden the search.') + logger.info( + "No matching files found. Please consider using a less " + "restrictive set of entities to broaden the search." + ) return # XXX should be return with an error code? - logger.info(f'Marking channels {", ".join(ch_names)} as bad in ' - f'{len(bids_paths)} recording(s) …') + logger.info( + f'Marking channels {", ".join(ch_names)} as bad in ' + f"{len(bids_paths)} recording(s) …" + ) for bids_path in bids_paths: - logger.info(f'Processing: {bids_path.basename}') - mark_channels(bids_path=bids_path, ch_names=ch_names, - status=status, descriptions=opt.descriptions, - verbose=opt.verbose) + logger.info(f"Processing: {bids_path.basename}") + mark_channels( + bids_path=bids_path, + ch_names=ch_names, + status=status, + descriptions=opt.descriptions, + verbose=opt.verbose, + ) -if __name__ == '__main__': +if __name__ == "__main__": run() diff --git a/mne_bids/commands/mne_bids_raw_to_bids.py b/mne_bids/commands/mne_bids_raw_to_bids.py index 012cad6bb..0132e4c07 100644 --- a/mne_bids/commands/mne_bids_raw_to_bids.py +++ b/mne_bids/commands/mne_bids_raw_to_bids.py @@ -19,83 +19,113 @@ def run(): """Run the raw_to_bids command.""" from mne.commands.utils import get_optparser - parser = get_optparser(__file__, usage="usage: %prog options args", - prog_prefix='mne_bids', - version=mne_bids.__version__) - - parser.add_option('--subject_id', dest='subject_id', - help=('subject name in BIDS compatible format ' - '(01, 02, etc.)')) - parser.add_option('--task', dest='task', - help='name of the task the data is based on') - parser.add_option('--raw', dest='raw_fname', - help='path to the raw MEG file') - parser.add_option('--bids_root', dest='bids_root', - help='The path of the BIDS compatible folder.') - parser.add_option('--session_id', dest='session_id', - help='session name in BIDS compatible format') - parser.add_option('--run', dest='run', - help='run number for this dataset') - parser.add_option('--acq', dest='acq', - help='acquisition parameter for this dataset') - parser.add_option('--events_data', dest='events_data', - help='Deprecated. Pass --events instead.') - parser.add_option('--events', dest='events', - help='events file (events.tsv)') - parser.add_option('--event_id', dest='event_id', - help='event id dict', metavar='eid') - parser.add_option('--hpi', dest='hpi', - help='path to the MEG marker points') - parser.add_option('--electrode', dest='electrode', - help='path to head-native digitizer points') - parser.add_option('--hsp', dest='hsp', - help='path to headshape points') - parser.add_option('--config', dest='config', - help='path to the configuration file') - parser.add_option('--overwrite', dest='overwrite', - help="whether to overwrite existing data (BOOLEAN)") - parser.add_option('--line_freq', dest='line_freq', - help="The frequency of the line noise in Hz " - "(e.g. 50 or 60). If unknown, pass None") + parser = get_optparser( + __file__, + usage="usage: %prog options args", + prog_prefix="mne_bids", + version=mne_bids.__version__, + ) + + parser.add_option( + "--subject_id", + dest="subject_id", + help=("subject name in BIDS compatible format " "(01, 02, etc.)"), + ) + parser.add_option( + "--task", dest="task", help="name of the task the data is based on" + ) + parser.add_option("--raw", dest="raw_fname", help="path to the raw MEG file") + parser.add_option( + "--bids_root", dest="bids_root", help="The path of the BIDS compatible folder." + ) + parser.add_option( + "--session_id", dest="session_id", help="session name in BIDS compatible format" + ) + parser.add_option("--run", dest="run", help="run number for this dataset") + parser.add_option( + "--acq", dest="acq", help="acquisition parameter for this dataset" + ) + parser.add_option( + "--events_data", dest="events_data", help="Deprecated. Pass --events instead." + ) + parser.add_option("--events", dest="events", help="events file (events.tsv)") + parser.add_option( + "--event_id", dest="event_id", help="event id dict", metavar="eid" + ) + parser.add_option("--hpi", dest="hpi", help="path to the MEG marker points") + parser.add_option( + "--electrode", dest="electrode", help="path to head-native digitizer points" + ) + parser.add_option("--hsp", dest="hsp", help="path to headshape points") + parser.add_option("--config", dest="config", help="path to the configuration file") + parser.add_option( + "--overwrite", + dest="overwrite", + help="whether to overwrite existing data (BOOLEAN)", + ) + parser.add_option( + "--line_freq", + dest="line_freq", + help="The frequency of the line noise in Hz " + "(e.g. 50 or 60). If unknown, pass None", + ) opt, args = parser.parse_args() if len(args) > 0: parser.print_help() - parser.error('Do not specify arguments without flags. Found: "{}".\n' - .format(args)) + parser.error( + 'Do not specify arguments without flags. Found: "{}".\n'.format(args) + ) if not all([opt.subject_id, opt.task, opt.raw_fname, opt.bids_root]): parser.print_help() - parser.error('Arguments missing. You need to specify at least the' - 'following: --subject_id, --task, --raw, --bids_root.') + parser.error( + "Arguments missing. You need to specify at least the" + "following: --subject_id, --task, --raw, --bids_root." + ) bids_path = BIDSPath( - subject=opt.subject_id, session=opt.session_id, run=opt.run, - acquisition=opt.acq, task=opt.task, root=opt.bids_root) + subject=opt.subject_id, + session=opt.session_id, + run=opt.run, + acquisition=opt.acq, + task=opt.task, + root=opt.bids_root, + ) allow_maxshield = False - if opt.raw_fname.endswith('.fif'): - allow_maxshield = 'yes' - - raw = _read_raw(opt.raw_fname, hpi=opt.hpi, electrode=opt.electrode, - hsp=opt.hsp, config_path=opt.config, - allow_maxshield=allow_maxshield) + if opt.raw_fname.endswith(".fif"): + allow_maxshield = "yes" + + raw = _read_raw( + opt.raw_fname, + hpi=opt.hpi, + electrode=opt.electrode, + hsp=opt.hsp, + config_path=opt.config, + allow_maxshield=allow_maxshield, + ) if opt.line_freq is not None: line_freq = None if opt.line_freq == "None" else float(opt.line_freq) - raw.info['line_freq'] = line_freq + raw.info["line_freq"] = line_freq if opt.overwrite is not None: truthy = [1, "True", "true"] falsy = [0, "False", "false"] - bool_mapping = dict( - chain(zip(truthy, repeat(True)), zip(falsy, repeat(False)))) + bool_mapping = dict(chain(zip(truthy, repeat(True)), zip(falsy, repeat(False)))) opt.overwrite = bool_mapping[opt.overwrite] - write_raw_bids(raw, bids_path, event_id=opt.event_id, - events=opt.events, overwrite=opt.overwrite, - events_data=opt.events_data, verbose=True) + write_raw_bids( + raw, + bids_path, + event_id=opt.event_id, + events=opt.events, + overwrite=opt.overwrite, + events_data=opt.events_data, + verbose=True, + ) -if __name__ == '__main__': +if __name__ == "__main__": run() diff --git a/mne_bids/commands/mne_bids_report.py b/mne_bids/commands/mne_bids_report.py index cd317ad28..ff8dbc99c 100644 --- a/mne_bids/commands/mne_bids_report.py +++ b/mne_bids/commands/mne_bids_report.py @@ -14,30 +14,36 @@ def run(): """Run the raw_to_bids command.""" from mne.commands.utils import get_optparser - parser = get_optparser(__file__, usage="usage: %prog options args", - prog_prefix='mne_bids', - version=mne_bids.__version__) + parser = get_optparser( + __file__, + usage="usage: %prog options args", + prog_prefix="mne_bids", + version=mne_bids.__version__, + ) - parser.add_option('--bids_root', dest='bids_root', - help='The path of the BIDS compatible folder.') + parser.add_option( + "--bids_root", dest="bids_root", help="The path of the BIDS compatible folder." + ) opt, args = parser.parse_args() if len(args) > 0: parser.print_help() - parser.error('Do not specify arguments without flags. Found: "{}".\n' - .format(args)) + parser.error( + 'Do not specify arguments without flags. Found: "{}".\n'.format(args) + ) if not all([opt.bids_root]): parser.print_help() - parser.error('Arguments missing. You need to specify the ' - '--bids_root parameter.') + parser.error( + "Arguments missing. You need to specify the " "--bids_root parameter." + ) report = make_report(opt.bids_root) - print('-' * 36 + ' REPORT ' + '-' * 36) + print("-" * 36 + " REPORT " + "-" * 36) print(report) - assert ' ' not in report + assert " " not in report -if __name__ == '__main__': +if __name__ == "__main__": run() diff --git a/mne_bids/commands/run.py b/mne_bids/commands/run.py index 002bf9a54..3e80bbc04 100755 --- a/mne_bids/commands/run.py +++ b/mne_bids/commands/run.py @@ -11,8 +11,7 @@ import mne_bids mne_bin_dir = op.abspath(op.dirname(mne_bids.__file__)) -valid_commands = sorted(glob.glob(op.join(mne_bin_dir, - 'commands', 'mne_bids_*.py'))) +valid_commands = sorted(glob.glob(op.join(mne_bin_dir, "commands", "mne_bids_*.py"))) valid_commands = [c.split(op.sep)[-1][9:-3] for c in valid_commands] @@ -22,8 +21,10 @@ def print_help(): print("Accepted commands :\n") for c in valid_commands: print("\t- %s" % c) - print("\nExample : mne_bids raw_to_bids --subject_id sub01 --task rest", - "--raw_file data.edf --bids_root new_path") + print( + "\nExample : mne_bids raw_to_bids --subject_id sub01 --task rest", + "--raw_file data.edf --bids_root new_path", + ) sys.exit(0) @@ -31,7 +32,7 @@ def main(): """Run main command.""" if len(sys.argv) == 1: print_help() - elif ("help" in sys.argv[1] or "-h" in sys.argv[1]): + elif "help" in sys.argv[1] or "-h" in sys.argv[1]: print_help() elif sys.argv[1] == "--version": print("MNE-BIDS %s" % mne_bids.__version__) @@ -41,5 +42,5 @@ def main(): sys.exit(0) else: cmd = sys.argv[1] - cmd_path = op.join(mne_bin_dir, 'commands', 'mne_bids_%s.py' % cmd) + cmd_path = op.join(mne_bin_dir, "commands", "mne_bids_%s.py" % cmd) sys.exit(subprocess.call([sys.executable, cmd_path] + sys.argv[2:])) diff --git a/mne_bids/commands/tests/test_cli.py b/mne_bids/commands/tests/test_cli.py index 73fb656c1..28b05dec4 100644 --- a/mne_bids/commands/tests/test_cli.py +++ b/mne_bids/commands/tests/test_cli.py @@ -11,82 +11,129 @@ # This is here to handle mne-python <0.20 import warnings + with warnings.catch_warnings(): - warnings.filterwarnings(action='ignore', - message="can't resolve package", - category=ImportWarning) + warnings.filterwarnings( + action="ignore", message="can't resolve package", category=ImportWarning + ) import mne from mne.datasets import testing from mne.utils import ArgvSetter, requires_pandas from mne.utils._testing import requires_module -from mne_bids.commands import (mne_bids_raw_to_bids, - mne_bids_cp, - mne_bids_mark_channels, - mne_bids_calibration_to_bids, - mne_bids_crosstalk_to_bids, - mne_bids_count_events, - mne_bids_inspect) +from mne_bids.commands import ( + mne_bids_raw_to_bids, + mne_bids_cp, + mne_bids_mark_channels, + mne_bids_calibration_to_bids, + mne_bids_crosstalk_to_bids, + mne_bids_count_events, + mne_bids_inspect, +) from mne_bids import BIDSPath, read_raw_bids, write_raw_bids -requires_matplotlib = partial(requires_module, name='matplotlib', - call='import matplotlib') +requires_matplotlib = partial( + requires_module, name="matplotlib", call="import matplotlib" +) data_path = testing.data_path(download=False) -base_path = op.join(op.dirname(mne.__file__), 'io') -subject_id = '01' -task = 'testing' -datatype = 'meg' - -event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32, 'Nothing': 0} +base_path = op.join(op.dirname(mne.__file__), "io") +subject_id = "01" +task = "testing" +datatype = "meg" + +event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + "Nothing": 0, +} def check_usage(module, force_help=False): """Ensure we print usage.""" - args = ('--help',) if force_help else () + args = ("--help",) if force_help else () with ArgvSetter(args) as out: try: module.run() except SystemExit: pass - assert 'Usage: ' in out.stdout.getvalue() + assert "Usage: " in out.stdout.getvalue() @testing.requires_testing_data def test_raw_to_bids(tmp_path): """Test mne_bids raw_to_bids.""" output_path = str(tmp_path) - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") # Check that help is printed check_usage(mne_bids_raw_to_bids) # Should work - with ArgvSetter(('--subject_id', subject_id, '--task', task, '--raw', - raw_fname, '--bids_root', output_path, - '--line_freq', "60")): + with ArgvSetter( + ( + "--subject_id", + subject_id, + "--task", + task, + "--raw", + raw_fname, + "--bids_root", + output_path, + "--line_freq", + "60", + ) + ): mne_bids_raw_to_bids.run() # Test line_freq == 'None' - with ArgvSetter(('--subject_id', subject_id, '--task', task, '--raw', - raw_fname, '--bids_root', output_path, - '--line_freq', 'None', '--overwrite', 1)): + with ArgvSetter( + ( + "--subject_id", + subject_id, + "--task", + task, + "--raw", + raw_fname, + "--bids_root", + output_path, + "--line_freq", + "None", + "--overwrite", + 1, + ) + ): mne_bids_raw_to_bids.run() # Test EDF files as well - edf_data_path = op.join(base_path, 'edf', 'tests', 'data') - edf_fname = op.join(edf_data_path, 'test.edf') - with ArgvSetter(('--subject_id', subject_id, '--task', task, '--raw', - edf_fname, '--bids_root', output_path, - '--overwrite', "false", '--line_freq', 60)): + edf_data_path = op.join(base_path, "edf", "tests", "data") + edf_fname = op.join(edf_data_path, "test.edf") + with ArgvSetter( + ( + "--subject_id", + subject_id, + "--task", + task, + "--raw", + edf_fname, + "--bids_root", + output_path, + "--overwrite", + "false", + "--line_freq", + 60, + ) + ): mne_bids_raw_to_bids.run() # Too few input args with pytest.raises(SystemExit): - with ArgvSetter(('--subject_id', subject_id)): + with ArgvSetter(("--subject_id", subject_id)): mne_bids_cp.run() @@ -94,20 +141,20 @@ def test_raw_to_bids(tmp_path): def test_cp(tmp_path): """Test mne_bids cp.""" output_path = str(tmp_path) - data_path = op.join(base_path, 'brainvision', 'tests', 'data') - raw_fname = op.join(data_path, 'test.vhdr') - outname = op.join(output_path, 'test2.vhdr') + data_path = op.join(base_path, "brainvision", "tests", "data") + raw_fname = op.join(data_path, "test.vhdr") + outname = op.join(output_path, "test2.vhdr") # Check that help is printed check_usage(mne_bids_cp) # Should work - with ArgvSetter(('--input', raw_fname, '--output', outname)): + with ArgvSetter(("--input", raw_fname, "--output", outname)): mne_bids_cp.run() # Too few input args with pytest.raises(SystemExit): - with ArgvSetter(('--input', raw_fname)): + with ArgvSetter(("--input", raw_fname)): mne_bids_cp.run() @@ -119,29 +166,52 @@ def test_mark_bad_channels_single_file(tmp_path): # Create test dataset. output_path = str(tmp_path) - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') - old_bads = mne.io.read_raw_fif(raw_fname).info['bads'] - bids_path = BIDSPath(subject=subject_id, task=task, root=output_path, - datatype=datatype) - - with ArgvSetter(('--subject_id', subject_id, '--task', task, - '--raw', raw_fname, '--bids_root', output_path, - '--line_freq', 60, - '--events', events_fname, '--event_id', event_id)): + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) + old_bads = mne.io.read_raw_fif(raw_fname).info["bads"] + bids_path = BIDSPath( + subject=subject_id, task=task, root=output_path, datatype=datatype + ) + + with ArgvSetter( + ( + "--subject_id", + subject_id, + "--task", + task, + "--raw", + raw_fname, + "--bids_root", + output_path, + "--line_freq", + 60, + "--events", + events_fname, + "--event_id", + event_id, + ) + ): mne_bids_raw_to_bids.run() # Update the dataset. - ch_names = ['MEG 0112', 'MEG 0131'] - descriptions = ['Really bad!', 'Even worse.'] - - args = ['--subject_id', subject_id, '--task', task, - '--bids_root', output_path, '--type', datatype] + ch_names = ["MEG 0112", "MEG 0131"] + descriptions = ["Really bad!", "Even worse."] + + args = [ + "--subject_id", + subject_id, + "--task", + task, + "--bids_root", + output_path, + "--type", + datatype, + ] for ch_name, description in zip(ch_names, descriptions): - args.extend(['--ch_name', ch_name]) - args.extend(['--description', description]) + args.extend(["--ch_name", ch_name]) + args.extend(["--description", description]) args = tuple(args) with ArgvSetter(args): @@ -149,19 +219,30 @@ def test_mark_bad_channels_single_file(tmp_path): # Check the data was properly written raw = read_raw_bids(bids_path=bids_path, verbose=False) - assert set(old_bads + ch_names) == set(raw.info['bads']) + assert set(old_bads + ch_names) == set(raw.info["bads"]) # Test resetting bad channels. - args = ('--subject_id', subject_id, '--task', task, - '--bids_root', output_path, '--type', datatype, - '--status', 'good', '--ch_name', '') + args = ( + "--subject_id", + subject_id, + "--task", + task, + "--bids_root", + output_path, + "--type", + datatype, + "--status", + "good", + "--ch_name", + "", + ) with ArgvSetter(args): mne_bids_mark_channels.run() - print('Finished running the reset...') + print("Finished running the reset...") # Check the data was properly written raw = read_raw_bids(bids_path=bids_path) - assert raw.info['bads'] == [] + assert raw.info["bads"] == [] @testing.requires_testing_data @@ -172,29 +253,43 @@ def test_mark_bad_channels_multiple_files(tmp_path): # Create test dataset. output_path = str(tmp_path) - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') - old_bads = mne.io.read_raw_fif(raw_fname).info['bads'] + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) + old_bads = mne.io.read_raw_fif(raw_fname).info["bads"] bids_path = BIDSPath(task=task, root=output_path, datatype=datatype) - subjects = ['01', '02', '03'] + subjects = ["01", "02", "03"] for subject in subjects: - with ArgvSetter(('--subject_id', subject, '--task', task, - '--raw', raw_fname, '--bids_root', output_path, - '--line_freq', 60, - '--events', events_fname, '--event_id', event_id)): + with ArgvSetter( + ( + "--subject_id", + subject, + "--task", + task, + "--raw", + raw_fname, + "--bids_root", + output_path, + "--line_freq", + 60, + "--events", + events_fname, + "--event_id", + event_id, + ) + ): mne_bids_raw_to_bids.run() # Update the dataset. - ch_names = ['MEG 0112', 'MEG 0131'] - descriptions = ['Really bad!', 'Even worse.'] + ch_names = ["MEG 0112", "MEG 0131"] + descriptions = ["Really bad!", "Even worse."] - args = ['--task', task, '--bids_root', output_path, '--type', datatype] + args = ["--task", task, "--bids_root", output_path, "--type", datatype] for ch_name, description in zip(ch_names, descriptions): - args.extend(['--ch_name', ch_name]) - args.extend(['--description', description]) + args.extend(["--ch_name", ch_name]) + args.extend(["--description", description]) args = tuple(args) with ArgvSetter(args): @@ -203,7 +298,7 @@ def test_mark_bad_channels_multiple_files(tmp_path): # Check the data was properly written for subject in subjects: raw = read_raw_bids(bids_path=bids_path.copy().update(subject=subject)) - assert set(old_bads + ch_names) == set(raw.info['bads']) + assert set(old_bads + ch_names) == set(raw.info["bads"]) @testing.requires_testing_data @@ -213,12 +308,18 @@ def test_calibration_to_bids(tmp_path): check_usage(mne_bids_calibration_to_bids) output_path = str(tmp_path) - fine_cal_fname = data_path / 'SSS' / 'sss_cal_mgh.dat' + fine_cal_fname = data_path / "SSS" / "sss_cal_mgh.dat" bids_path = BIDSPath(subject=subject_id, root=output_path) # Write fine-calibration file and check that it was actually written. - args = ('--file', fine_cal_fname, '--subject', subject_id, - '--bids_root', output_path) + args = ( + "--file", + fine_cal_fname, + "--subject", + subject_id, + "--bids_root", + output_path, + ) with ArgvSetter(args): mne_bids_calibration_to_bids.run() @@ -232,13 +333,19 @@ def test_crosstalk_to_bids(tmp_path): check_usage(mne_bids_crosstalk_to_bids) output_path = str(tmp_path) - crosstalk_fname = data_path / 'SSS' / 'ct_sparse.fif' + crosstalk_fname = data_path / "SSS" / "ct_sparse.fif" bids_path = BIDSPath(subject=subject_id, root=output_path) # Write fine-calibration file and check that it was actually written. # Write fine-calibration file and check that it was actually written. - args = ('--file', crosstalk_fname, '--subject', subject_id, - '--bids_root', output_path) + args = ( + "--file", + crosstalk_fname, + "--subject", + subject_id, + "--bids_root", + output_path, + ) with ArgvSetter(args): mne_bids_crosstalk_to_bids.run() assert bids_path.meg_crosstalk_fpath.exists() @@ -253,34 +360,39 @@ def test_count_events(tmp_path): # Create test dataset. output_path = str(tmp_path) - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = mne.io.read_raw(raw_fname) - raw.info['line_freq'] = 60. + raw.info["line_freq"] = 60.0 events = mne.find_events(raw) - bids_path = BIDSPath(subject='01', root=output_path, task='foo') - write_raw_bids(raw, bids_path, events=events, event_id=event_id, - overwrite=True, verbose=False) + bids_path = BIDSPath(subject="01", root=output_path, task="foo") + write_raw_bids( + raw, bids_path, events=events, event_id=event_id, overwrite=True, verbose=False + ) - with ArgvSetter(('--bids_root', output_path)): + with ArgvSetter(("--bids_root", output_path)): mne_bids_count_events.run() - with ArgvSetter(('--bids_root', output_path, '--describe')): + with ArgvSetter(("--bids_root", output_path, "--describe")): mne_bids_count_events.run() - with ArgvSetter(('--bids_root', output_path, '--silent')): + with ArgvSetter(("--bids_root", output_path, "--silent")): mne_bids_count_events.run() - with ArgvSetter(('--bids_root', output_path, - '--output', str(Path(output_path) / 'counts.csv'))): + with ArgvSetter( + ("--bids_root", output_path, "--output", str(Path(output_path) / "counts.csv")) + ): mne_bids_count_events.run() with ArgvSetter( - ('--bids_root', output_path, - '--output', str(Path(output_path) / 'counts.csv'), - '--overwrite') + ( + "--bids_root", + output_path, + "--output", + str(Path(output_path) / "counts.csv"), + "--overwrite", + ) ): mne_bids_count_events.run() @@ -294,25 +406,23 @@ def test_inspect(tmp_path): # Create test dataset. bids_root = str(tmp_path) - subject = '01' - task = 'test' - datatype = 'meg' - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + subject = "01" + task = "test" + datatype = "meg" + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = mne.io.read_raw(raw_fname) - raw.info['line_freq'] = 60. + raw.info["line_freq"] = 60.0 - bids_path = BIDSPath(subject=subject, task=task, datatype=datatype, - root=bids_root) + bids_path = BIDSPath(subject=subject, task=task, datatype=datatype, root=bids_root) write_raw_bids(raw, bids_path, overwrite=True, verbose=False) import matplotlib - matplotlib.use('agg') - h_freqs = (30.0, 30, '30') + matplotlib.use("agg") + + h_freqs = (30.0, 30, "30") for h_freq in h_freqs: - args = ('--bids_root', bids_root, '--h_freq', h_freq, - '--find_flat', 0) + args = ("--bids_root", bids_root, "--h_freq", h_freq, "--find_flat", 0) with ArgvSetter(args): mne_bids_inspect.run() diff --git a/mne_bids/config.py b/mne_bids/config.py index e291a90f0..b101e4f2a 100644 --- a/mne_bids/config.py +++ b/mne_bids/config.py @@ -9,81 +9,99 @@ DOI = """https://doi.org/10.21105/joss.01896""" -EPHY_ALLOWED_DATATYPES = ['meg', 'eeg', 'ieeg', 'nirs'] +EPHY_ALLOWED_DATATYPES = ["meg", "eeg", "ieeg", "nirs"] -ALLOWED_DATATYPES = EPHY_ALLOWED_DATATYPES + ['anat', 'beh'] +ALLOWED_DATATYPES = EPHY_ALLOWED_DATATYPES + ["anat", "beh"] -MEG_CONVERT_FORMATS = ['FIF', 'auto'] -EEG_CONVERT_FORMATS = ['BrainVision', 'auto'] -IEEG_CONVERT_FORMATS = ['BrainVision', 'auto'] -NIRS_CONVERT_FORMATS = ['auto'] +MEG_CONVERT_FORMATS = ["FIF", "auto"] +EEG_CONVERT_FORMATS = ["BrainVision", "auto"] +IEEG_CONVERT_FORMATS = ["BrainVision", "auto"] +NIRS_CONVERT_FORMATS = ["auto"] CONVERT_FORMATS = { - 'meg': MEG_CONVERT_FORMATS, - 'eeg': EEG_CONVERT_FORMATS, - 'ieeg': IEEG_CONVERT_FORMATS, - 'nirs': NIRS_CONVERT_FORMATS, + "meg": MEG_CONVERT_FORMATS, + "eeg": EEG_CONVERT_FORMATS, + "ieeg": IEEG_CONVERT_FORMATS, + "nirs": NIRS_CONVERT_FORMATS, } # Orientation of the coordinate system dependent on manufacturer ORIENTATION = { - '.con': 'KitYokogawa', - '.ds': 'CTF', - '.fif': 'ElektaNeuromag', - '.pdf': '4DBti', - '.sqd': 'KitYokogawa', + ".con": "KitYokogawa", + ".ds": "CTF", + ".fif": "ElektaNeuromag", + ".pdf": "4DBti", + ".sqd": "KitYokogawa", } -EXT_TO_UNIT_MAP = { - '.con': 'm', - '.ds': 'cm', - '.fif': 'm', - '.pdf': 'm', - '.sqd': 'm' -} +EXT_TO_UNIT_MAP = {".con": "m", ".ds": "cm", ".fif": "m", ".pdf": "m", ".sqd": "m"} UNITS_MNE_TO_BIDS_MAP = { - 'C': 'oC', # temperature in deg. C + "C": "oC", # temperature in deg. C } meg_manufacturers = { - '.con': 'KIT/Yokogawa', - '.ds': 'CTF', - '.fif': 'Elekta', - '.meg4': 'CTF', - '.pdf': '4D Magnes', - '.sqd': 'KIT/Yokogawa' + ".con": "KIT/Yokogawa", + ".ds": "CTF", + ".fif": "Elekta", + ".meg4": "CTF", + ".pdf": "4D Magnes", + ".sqd": "KIT/Yokogawa", } -eeg_manufacturers = {'.vhdr': 'Brain Products', '.eeg': 'Brain Products', - '.edf': 'n/a', '.EDF': 'n/a', '.bdf': 'Biosemi', - '.BDF': 'Biosemi', - '.set': 'n/a', '.fdt': 'n/a', - '.lay': 'Persyst', '.dat': 'Persyst', - '.EEG': 'Nihon Kohden', - '.cnt': 'Neuroscan', '.CNT': 'Neuroscan', - '.bin': 'EGI', - '.cdt': 'Curry'} +eeg_manufacturers = { + ".vhdr": "Brain Products", + ".eeg": "Brain Products", + ".edf": "n/a", + ".EDF": "n/a", + ".bdf": "Biosemi", + ".BDF": "Biosemi", + ".set": "n/a", + ".fdt": "n/a", + ".lay": "Persyst", + ".dat": "Persyst", + ".EEG": "Nihon Kohden", + ".cnt": "Neuroscan", + ".CNT": "Neuroscan", + ".bin": "EGI", + ".cdt": "Curry", +} -ieeg_manufacturers = {'.vhdr': 'Brain Products', '.eeg': 'Brain Products', - '.edf': 'n/a', '.EDF': 'n/a', '.set': 'n/a', - '.fdt': 'n/a', '.mef': 'n/a', '.nwb': 'n/a', - '.lay': 'Persyst', '.dat': 'Persyst', - '.EEG': 'Nihon Kohden'} +ieeg_manufacturers = { + ".vhdr": "Brain Products", + ".eeg": "Brain Products", + ".edf": "n/a", + ".EDF": "n/a", + ".set": "n/a", + ".fdt": "n/a", + ".mef": "n/a", + ".nwb": "n/a", + ".lay": "Persyst", + ".dat": "Persyst", + ".EEG": "Nihon Kohden", +} -nirs_manufacturers = {'.snirf': 'SNIRF'} +nirs_manufacturers = {".snirf": "SNIRF"} # file-extension map to mne-python readers -reader = {'.con': io.read_raw_kit, '.sqd': io.read_raw_kit, - '.fif': io.read_raw_fif, '.pdf': io.read_raw_bti, - '.ds': io.read_raw_ctf, '.vhdr': io.read_raw_brainvision, - '.edf': io.read_raw_edf, '.EDF': io.read_raw_edf, - '.bdf': io.read_raw_bdf, - '.set': io.read_raw_eeglab, '.lay': io.read_raw_persyst, - '.EEG': io.read_raw_nihon, - '.cnt': io.read_raw_cnt, '.CNT': io.read_raw_cnt, - '.bin': io.read_raw_egi, - '.snirf': io.read_raw_snirf, - '.cdt': io.read_raw_curry} +reader = { + ".con": io.read_raw_kit, + ".sqd": io.read_raw_kit, + ".fif": io.read_raw_fif, + ".pdf": io.read_raw_bti, + ".ds": io.read_raw_ctf, + ".vhdr": io.read_raw_brainvision, + ".edf": io.read_raw_edf, + ".EDF": io.read_raw_edf, + ".bdf": io.read_raw_bdf, + ".set": io.read_raw_eeglab, + ".lay": io.read_raw_persyst, + ".EEG": io.read_raw_nihon, + ".cnt": io.read_raw_cnt, + ".CNT": io.read_raw_cnt, + ".bin": io.read_raw_egi, + ".snirf": io.read_raw_snirf, + ".cdt": io.read_raw_curry, +} # Merge the manufacturer dictionaries in a python2 / python3 compatible way @@ -98,189 +116,236 @@ # List of synthetic channels by manufacturer that are to be excluded from the # channel list. Currently this is only for stimulus channels. -IGNORED_CHANNELS = {'KIT/Yokogawa': ['STI 014'], - 'BrainProducts': ['STI 014'], - 'n/a': ['STI 014'], # for unknown manufacturers, ignore it - 'Biosemi': ['STI 014']} - -allowed_extensions_meg = ['.con', '.sqd', '.fif', '.pdf', '.ds'] -allowed_extensions_eeg = ['.vhdr', # BrainVision, accompanied by .vmrk, .eeg - '.edf', # European Data Format - '.bdf', # Biosemi - '.set', # EEGLAB, potentially accompanied by .fdt - ] - -allowed_extensions_ieeg = ['.vhdr', # BrainVision, accompanied by .vmrk, .eeg - '.edf', # European Data Format - '.set', # EEGLAB, potentially accompanied by .fdt - '.mef', # MEF: Multiscale Electrophysiology File - '.nwb', # Neurodata without borders - ] - -allowed_extensions_nirs = ['.snirf', # SNIRF - ] +IGNORED_CHANNELS = { + "KIT/Yokogawa": ["STI 014"], + "BrainProducts": ["STI 014"], + "n/a": ["STI 014"], # for unknown manufacturers, ignore it + "Biosemi": ["STI 014"], +} + +allowed_extensions_meg = [".con", ".sqd", ".fif", ".pdf", ".ds"] +allowed_extensions_eeg = [ + ".vhdr", # BrainVision, accompanied by .vmrk, .eeg + ".edf", # European Data Format + ".bdf", # Biosemi + ".set", # EEGLAB, potentially accompanied by .fdt +] + +allowed_extensions_ieeg = [ + ".vhdr", # BrainVision, accompanied by .vmrk, .eeg + ".edf", # European Data Format + ".set", # EEGLAB, potentially accompanied by .fdt + ".mef", # MEF: Multiscale Electrophysiology File + ".nwb", # Neurodata without borders +] + +allowed_extensions_nirs = [ + ".snirf", # SNIRF +] # allowed extensions (data formats) in BIDS spec ALLOWED_DATATYPE_EXTENSIONS = { - 'meg': allowed_extensions_meg, - 'eeg': allowed_extensions_eeg, - 'ieeg': allowed_extensions_ieeg, - 'nirs': allowed_extensions_nirs + "meg": allowed_extensions_meg, + "eeg": allowed_extensions_eeg, + "ieeg": allowed_extensions_ieeg, + "nirs": allowed_extensions_nirs, } # allow additional extensions that are not BIDS # compliant, but we will convert to the # recommended formats -ALLOWED_INPUT_EXTENSIONS = \ - allowed_extensions_meg + allowed_extensions_eeg + \ - allowed_extensions_ieeg + allowed_extensions_nirs + \ - ['.lay', '.EEG', '.cnt', '.CNT', '.bin', '.cdt'] +ALLOWED_INPUT_EXTENSIONS = ( + allowed_extensions_meg + + allowed_extensions_eeg + + allowed_extensions_ieeg + + allowed_extensions_nirs + + [".lay", ".EEG", ".cnt", ".CNT", ".bin", ".cdt"] +) # allowed suffixes (i.e. last "_" delimiter in the BIDS filenames before # the extension) ALLOWED_FILENAME_SUFFIX = [ - 'meg', 'markers', 'eeg', 'ieeg', 'T1w', 'FLASH', # datatype - 'participants', 'scans', 'sessions', - 'electrodes', 'optodes', 'channels', 'coordsystem', 'events', # sidecars - 'headshape', 'digitizer', # meg-specific sidecars - 'beh', 'physio', 'stim', # behavioral - 'nirs' + "meg", + "markers", + "eeg", + "ieeg", + "T1w", + "FLASH", # datatype + "participants", + "scans", + "sessions", + "electrodes", + "optodes", + "channels", + "coordsystem", + "events", # sidecars + "headshape", + "digitizer", # meg-specific sidecars + "beh", + "physio", + "stim", # behavioral + "nirs", ] # converts suffix to known path modalities SUFFIX_TO_DATATYPE = { - 'meg': 'meg', 'headshape': 'meg', 'digitizer': 'meg', 'markers': 'meg', - 'eeg': 'eeg', 'ieeg': 'ieeg', - 'T1w': 'anat', 'FLASH': 'anat' + "meg": "meg", + "headshape": "meg", + "digitizer": "meg", + "markers": "meg", + "eeg": "eeg", + "ieeg": "ieeg", + "T1w": "anat", + "FLASH": "anat", } # allowed BIDS extensions (extension in the BIDS filename) ALLOWED_FILENAME_EXTENSIONS = ( - ALLOWED_INPUT_EXTENSIONS + - ['.json', '.tsv', '.tsv.gz', '.nii', '.nii.gz'] + - ['.pos', '.eeg', '.vmrk'] + # extra datatype-specific metadata files. - ['.dat', '.EEG'] + # extra eeg extensions - ['.mrk'] # KIT/Yokogawa/Ricoh marker coil + ALLOWED_INPUT_EXTENSIONS + + [".json", ".tsv", ".tsv.gz", ".nii", ".nii.gz"] + + [".pos", ".eeg", ".vmrk"] + + [".dat", ".EEG"] # extra datatype-specific metadata files. + + [".mrk"] # extra eeg extensions # KIT/Yokogawa/Ricoh marker coil ) # allowed BIDSPath entities -ALLOWED_PATH_ENTITIES = ('subject', 'session', 'task', 'run', - 'processing', 'recording', 'space', - 'acquisition', 'split', 'description', - 'suffix', 'extension') -ALLOWED_PATH_ENTITIES_SHORT = {'sub': 'subject', 'ses': 'session', - 'task': 'task', 'acq': 'acquisition', - 'run': 'run', 'proc': 'processing', - 'space': 'space', 'rec': 'recording', - 'split': 'split', 'desc': 'description'} +ALLOWED_PATH_ENTITIES = ( + "subject", + "session", + "task", + "run", + "processing", + "recording", + "space", + "acquisition", + "split", + "description", + "suffix", + "extension", +) +ALLOWED_PATH_ENTITIES_SHORT = { + "sub": "subject", + "ses": "session", + "task": "task", + "acq": "acquisition", + "run": "run", + "proc": "processing", + "space": "space", + "rec": "recording", + "split": "split", + "desc": "description", +} # Annotations to never remove during reading or writing -ANNOTATIONS_TO_KEEP = ('BAD_ACQ_SKIP',) +ANNOTATIONS_TO_KEEP = ("BAD_ACQ_SKIP",) BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS = [ - 'ICBM452AirSpace', - 'ICBM452Warp5Space', - 'IXI549Space', - 'fsaverage', - 'fsaverageSym', - 'fsLR', - 'MNIColin27', - 'MNI152Lin', - 'MNI152NLin2009aSym', - 'MNI152NLin2009bSym', - 'MNI152NLin2009cSym', - 'MNI152NLin2009aAsym', - 'MNI152NLin2009bAsym', - 'MNI152NLin2009cAsym', - 'MNI152NLin6Sym', - 'MNI152NLin6ASym', - 'MNI305', - 'NIHPD', - 'OASIS30AntsOASISAnts', - 'OASIS30Atropos', - 'Talairach', - 'UNCInfant', + "ICBM452AirSpace", + "ICBM452Warp5Space", + "IXI549Space", + "fsaverage", + "fsaverageSym", + "fsLR", + "MNIColin27", + "MNI152Lin", + "MNI152NLin2009aSym", + "MNI152NLin2009bSym", + "MNI152NLin2009cSym", + "MNI152NLin2009aAsym", + "MNI152NLin2009bAsym", + "MNI152NLin2009cAsym", + "MNI152NLin6Sym", + "MNI152NLin6ASym", + "MNI305", + "NIHPD", + "OASIS30AntsOASISAnts", + "OASIS30Atropos", + "Talairach", + "UNCInfant", ] coordsys_standard_template_deprecated = [ - 'fsaverage3', - 'fsaverage4', - 'fsaverage5', - 'fsaverage6', - 'fsaveragesym', - 'UNCInfant0V21', - 'UNCInfant1V21', - 'UNCInfant2V21', - 'UNCInfant0V22', - 'UNCInfant1V22', - 'UNCInfant2V22', - 'UNCInfant0V23', - 'UNCInfant1V23', - 'UNCInfant2V23', + "fsaverage3", + "fsaverage4", + "fsaverage5", + "fsaverage6", + "fsaveragesym", + "UNCInfant0V21", + "UNCInfant1V21", + "UNCInfant2V21", + "UNCInfant0V22", + "UNCInfant1V22", + "UNCInfant2V22", + "UNCInfant0V23", + "UNCInfant1V23", + "UNCInfant2V23", ] # accepted BIDS formats, which may be subject to change # depending on the specification -BIDS_IEEG_COORDINATE_FRAMES = ['ACPC', 'Pixels'] -BIDS_MEG_COORDINATE_FRAMES = ['CTF', 'ElektaNeuromag', - '4DBti', 'KitYokogawa', - 'ChietiItab'] -BIDS_EEG_COORDINATE_FRAMES = ['CapTrak'] +BIDS_IEEG_COORDINATE_FRAMES = ["ACPC", "Pixels"] +BIDS_MEG_COORDINATE_FRAMES = [ + "CTF", + "ElektaNeuromag", + "4DBti", + "KitYokogawa", + "ChietiItab", +] +BIDS_EEG_COORDINATE_FRAMES = ["CapTrak"] # accepted coordinate SI units -BIDS_COORDINATE_UNITS = ['m', 'cm', 'mm'] -coordsys_wildcard = ['Other'] -BIDS_SHARED_COORDINATE_FRAMES = (BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS + - coordsys_standard_template_deprecated + - coordsys_wildcard) +BIDS_COORDINATE_UNITS = ["m", "cm", "mm"] +coordsys_wildcard = ["Other"] +BIDS_SHARED_COORDINATE_FRAMES = ( + BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS + + coordsys_standard_template_deprecated + + coordsys_wildcard +) ALLOWED_SPACES = dict() -ALLOWED_SPACES['meg'] = ALLOWED_SPACES['eeg'] = \ - BIDS_SHARED_COORDINATE_FRAMES + BIDS_MEG_COORDINATE_FRAMES + \ - BIDS_EEG_COORDINATE_FRAMES -ALLOWED_SPACES['ieeg'] = \ - BIDS_SHARED_COORDINATE_FRAMES + BIDS_IEEG_COORDINATE_FRAMES -ALLOWED_SPACES['anat'] = None -ALLOWED_SPACES['beh'] = None +ALLOWED_SPACES["meg"] = ALLOWED_SPACES["eeg"] = ( + BIDS_SHARED_COORDINATE_FRAMES + + BIDS_MEG_COORDINATE_FRAMES + + BIDS_EEG_COORDINATE_FRAMES +) +ALLOWED_SPACES["ieeg"] = BIDS_SHARED_COORDINATE_FRAMES + BIDS_IEEG_COORDINATE_FRAMES +ALLOWED_SPACES["anat"] = None +ALLOWED_SPACES["beh"] = None # See: https://bids-specification.readthedocs.io/en/latest/appendices/entity-table.html#encephalography-eeg-ieeg-and-meg # noqa ENTITY_VALUE_TYPE = { - 'subject': 'label', - 'session': 'label', - 'task': 'label', - 'run': 'index', - 'processing': 'label', - 'recording': 'label', - 'space': 'label', - 'acquisition': 'label', - 'split': 'index', - 'description': 'label', - 'suffix': 'label', - 'extension': 'label', + "subject": "label", + "session": "label", + "task": "label", + "run": "index", + "processing": "label", + "recording": "label", + "space": "label", + "acquisition": "label", + "split": "index", + "description": "label", + "suffix": "label", + "extension": "label", } # mapping from supported BIDs coordinate frames -> MNE BIDS_TO_MNE_FRAMES = { - 'CTF': 'ctf_head', - '4DBti': 'ctf_head', - 'KitYokogawa': 'ctf_head', - 'ElektaNeuromag': 'head', - 'ChietiItab': 'head', - 'CapTrak': 'head', - 'ACPC': 'ras', # assumes T1 is ACPC-aligned, if not the coordinates are lost # noqa - 'fsaverage': 'mni_tal', # XXX: note fsaverage and MNI305 are the same # noqa - 'MNI305': 'mni_tal' + "CTF": "ctf_head", + "4DBti": "ctf_head", + "KitYokogawa": "ctf_head", + "ElektaNeuromag": "head", + "ChietiItab": "head", + "CapTrak": "head", + "ACPC": "ras", # assumes T1 is ACPC-aligned, if not the coordinates are lost # noqa + "fsaverage": "mni_tal", # XXX: note fsaverage and MNI305 are the same # noqa + "MNI305": "mni_tal", } # mapping from supported MNE coordinate frames -> BIDS # XXX: note that there are a lot fewer MNE available coordinate # systems so the range of BIDS supported coordinate systems we # can write is limited. -MNE_TO_BIDS_FRAMES = { - 'ctf_head': 'CTF', - 'head': 'CapTrak', - 'mni_tal': 'fsaverage' -} +MNE_TO_BIDS_FRAMES = {"ctf_head": "CTF", "head": "CapTrak", "mni_tal": "fsaverage"} # these coordinate frames in mne-python are related to scalp/meg # 'meg', 'ctf_head', 'ctf_meg', 'head', 'unknown' @@ -295,171 +360,170 @@ fs_tal=FIFF.FIFFV_MNE_COORD_FS_TAL, ctf_head=FIFF.FIFFV_MNE_COORD_CTF_HEAD, ctf_meg=FIFF.FIFFV_MNE_COORD_CTF_DEVICE, - unknown=FIFF.FIFFV_COORD_UNKNOWN + unknown=FIFF.FIFFV_COORD_UNKNOWN, ) MNE_FRAME_TO_STR = {val: key for key, val in MNE_STR_TO_FRAME.items()} # see BIDS specification for description we copied over from each BIDS_COORD_FRAME_DESCRIPTIONS = { - 'acpc': 'The origin of the coordinate system is at the Anterior ' - 'Commissure and the negative y-axis is passing through the ' - 'Posterior Commissure. The positive z-axis is passing through ' - 'a mid-hemispheric point in the superior direction.', - 'pixels': 'If electrodes are localized in 2D space (only x and y are ' - 'specified and z is n/a), then the positions in this file ' - 'must correspond to the locations expressed in pixels on ' - 'the photo/drawing/rendering of the electrodes on the brain. ' - 'In this case, coordinates must be (row,column) pairs, with ' - '(0,0) corresponding to the upper left pixel and (N,0) ' - 'corresponding to the lower left pixel.', - 'ctf': 'ALS orientation and the origin between the ears', - 'elektaneuromag': 'RAS orientation and the origin between the ears', - '4dbti': 'ALS orientation and the origin between the ears', - 'kityokogawa': 'ALS orientation and the origin between the ears', - 'chietiitab': 'RAS orientation and the origin between the ears', - 'captrak': ( - 'The X-axis goes from the left preauricular point (LPA) through ' - 'the right preauricular point (RPA). ' - 'The Y-axis goes orthogonally to the X-axis through the nasion (NAS). ' - 'The Z-axis goes orthogonally to the XY-plane through the vertex of ' - 'the head. ' + "acpc": "The origin of the coordinate system is at the Anterior " + "Commissure and the negative y-axis is passing through the " + "Posterior Commissure. The positive z-axis is passing through " + "a mid-hemispheric point in the superior direction.", + "pixels": "If electrodes are localized in 2D space (only x and y are " + "specified and z is n/a), then the positions in this file " + "must correspond to the locations expressed in pixels on " + "the photo/drawing/rendering of the electrodes on the brain. " + "In this case, coordinates must be (row,column) pairs, with " + "(0,0) corresponding to the upper left pixel and (N,0) " + "corresponding to the lower left pixel.", + "ctf": "ALS orientation and the origin between the ears", + "elektaneuromag": "RAS orientation and the origin between the ears", + "4dbti": "ALS orientation and the origin between the ears", + "kityokogawa": "ALS orientation and the origin between the ears", + "chietiitab": "RAS orientation and the origin between the ears", + "captrak": ( + "The X-axis goes from the left preauricular point (LPA) through " + "the right preauricular point (RPA). " + "The Y-axis goes orthogonally to the X-axis through the nasion (NAS). " + "The Z-axis goes orthogonally to the XY-plane through the vertex of " + "the head. " 'This corresponds to a "RAS" orientation with the origin of the ' - 'coordinate system approximately between the ears. ' - 'See Appendix VIII in the BIDS specification.'), - 'fsaverage': 'Defined by FreeSurfer, the MRI (surface RAS) origin is ' - 'at the center of a 256×256×256 mm^3 anisotropic volume ' - '(may not be in the center of the head).', - 'icbm452airspace': 'Reference space defined by the "average of 452 ' - 'T1-weighted MRIs of normal young adult brains" ' - 'with "linear transforms of the subjects into the ' - 'atlas space using a 12-parameter affine ' - 'transformation"', - 'icbm452warp5space': 'Reference space defined by the "average of 452 ' - 'T1-weighted MRIs of normal young adult brains" ' - '"based on a 5th order polynomial transformation ' - 'into the atlas space"', - 'ixi549space': 'Reference space defined by the average of the "549 (...) ' - 'subjects from the IXI dataset" linearly transformed to ' - 'ICBM MNI 452.', - 'fsaveragesym': 'The fsaverage is a dual template providing both ' - 'volumetric and surface coordinates references. The ' - 'volumetric template corresponds to a FreeSurfer variant ' - 'of MNI305 space. The fsaverageSym atlas also defines a ' - 'symmetric surface reference system (formerly described ' - 'as fsaveragesym).', - 'fslr': 'The fsLR is a dual template providing both volumetric and ' - 'surface coordinates references. The volumetric template ' - 'corresponds to MNI152NLin6Asym. Surface templates are given ' - 'at several sampling densities: 164k (used by HCP pipelines ' - 'for 3T and 7T anatomical analysis), 59k (used by HCP pipelines ' - 'for 7T MRI bold and DWI analysis), 32k (used by HCP pipelines ' - 'for 3T MRI bold and DWI analysis), or 4k (used by HCP ' - 'pipelines for MEG analysis) fsaverage_LR surface ' - 'reconstructed from the T1w image.', - 'mnicolin27': 'Average of 27 T1 scans of a single subject.', - 'mni152lin': 'Also known as ICBM (version with linear coregistration).', - 'mni152nlin6sym': 'Also known as symmetric ICBM 6th generation ' - '(non-linear coregistration).', - 'mni152nlin6asym': 'A variation of MNI152NLin6Sym built by A. Janke that ' - 'is released as the MNI template of FSL. Volumetric ' - 'templates included with HCP-Pipelines correspond to ' - 'this template too.', - 'mni305': 'Also known as avg305.', - 'nihpd': 'Pediatric templates generated from the NIHPD sample. Available ' - 'for different age groups (4.5–18.5 y.o., 4.5–8.5 y.o., ' - '7–11 y.o., 7.5–13.5 y.o., 10–14 y.o., 13–18.5 y.o. This ' - 'template also comes in either -symmetric or -asymmetric flavor.', - 'oasis30antsoasisants': - 'See https://figshare.com/articles/ANTs_ANTsR_Brain_Templates/915436', - 'oasis30atropos': - 'See https://mindboggle.info/data.html', - 'talairach': 'Piecewise linear scaling of the brain is implemented as ' - 'described in TT88.', - 'uncinfant': 'Infant Brain Atlases from Neonates to 1- and 2-year-olds.' + "coordinate system approximately between the ears. " + "See Appendix VIII in the BIDS specification." + ), + "fsaverage": "Defined by FreeSurfer, the MRI (surface RAS) origin is " + "at the center of a 256×256×256 mm^3 anisotropic volume " + "(may not be in the center of the head).", + "icbm452airspace": 'Reference space defined by the "average of 452 ' + 'T1-weighted MRIs of normal young adult brains" ' + 'with "linear transforms of the subjects into the ' + "atlas space using a 12-parameter affine " + 'transformation"', + "icbm452warp5space": 'Reference space defined by the "average of 452 ' + 'T1-weighted MRIs of normal young adult brains" ' + '"based on a 5th order polynomial transformation ' + 'into the atlas space"', + "ixi549space": 'Reference space defined by the average of the "549 (...) ' + 'subjects from the IXI dataset" linearly transformed to ' + "ICBM MNI 452.", + "fsaveragesym": "The fsaverage is a dual template providing both " + "volumetric and surface coordinates references. The " + "volumetric template corresponds to a FreeSurfer variant " + "of MNI305 space. The fsaverageSym atlas also defines a " + "symmetric surface reference system (formerly described " + "as fsaveragesym).", + "fslr": "The fsLR is a dual template providing both volumetric and " + "surface coordinates references. The volumetric template " + "corresponds to MNI152NLin6Asym. Surface templates are given " + "at several sampling densities: 164k (used by HCP pipelines " + "for 3T and 7T anatomical analysis), 59k (used by HCP pipelines " + "for 7T MRI bold and DWI analysis), 32k (used by HCP pipelines " + "for 3T MRI bold and DWI analysis), or 4k (used by HCP " + "pipelines for MEG analysis) fsaverage_LR surface " + "reconstructed from the T1w image.", + "mnicolin27": "Average of 27 T1 scans of a single subject.", + "mni152lin": "Also known as ICBM (version with linear coregistration).", + "mni152nlin6sym": "Also known as symmetric ICBM 6th generation " + "(non-linear coregistration).", + "mni152nlin6asym": "A variation of MNI152NLin6Sym built by A. Janke that " + "is released as the MNI template of FSL. Volumetric " + "templates included with HCP-Pipelines correspond to " + "this template too.", + "mni305": "Also known as avg305.", + "nihpd": "Pediatric templates generated from the NIHPD sample. Available " + "for different age groups (4.5–18.5 y.o., 4.5–8.5 y.o., " + "7–11 y.o., 7.5–13.5 y.o., 10–14 y.o., 13–18.5 y.o. This " + "template also comes in either -symmetric or -asymmetric flavor.", + "oasis30antsoasisants": "See https://figshare.com/articles/ANTs_ANTsR_Brain_Templates/915436", # noqa: E501 + "oasis30atropos": "See https://mindboggle.info/data.html", + "talairach": "Piecewise linear scaling of the brain is implemented as " + "described in TT88.", + "uncinfant": "Infant Brain Atlases from Neonates to 1- and 2-year-olds.", } -for letter in ('a', 'b', 'c'): - for sym in ('Sym', 'Asym'): - BIDS_COORD_FRAME_DESCRIPTIONS[f'mni152nlin2009{letter}{sym}'] = \ - 'Also known as ICBM (non-linear coregistration with 40 iterations,' - ' released in 2009). It comes in either three different flavours ' - 'each in symmetric or asymmetric version.' - -REFERENCES = {'mne-bids': - 'Appelhoff, S., Sanderson, M., Brooks, T., Vliet, M., ' - 'Quentin, R., Holdgraf, C., Chaumon, M., Mikulan, E., ' - 'Tavabi, K., Höchenberger, R., Welke, D., Brunner, C., ' - 'Rockhill, A., Larson, E., Gramfort, A. and Jas, M. (2019). ' - 'MNE-BIDS: Organizing electrophysiological data into the ' - 'BIDS format and facilitating their analysis. Journal of ' - 'Open Source Software 4: (1896). ' - 'https://doi.org/10.21105/joss.01896', - 'meg': - 'Niso, G., Gorgolewski, K. J., Bock, E., Brooks, T. L., ' - 'Flandin, G., Gramfort, A., Henson, R. N., Jas, M., Litvak, ' - 'V., Moreau, J., Oostenveld, R., Schoffelen, J., Tadel, F., ' - 'Wexler, J., Baillet, S. (2018). MEG-BIDS, the brain ' - 'imaging data structure extended to magnetoencephalography. ' - 'Scientific Data, 5, 180110. ' - 'https://doi.org/10.1038/sdata.2018.110', - 'eeg': - 'Pernet, C. R., Appelhoff, S., Gorgolewski, K. J., ' - 'Flandin, G., Phillips, C., Delorme, A., Oostenveld, R. (2019). ' - 'EEG-BIDS, an extension to the brain imaging data structure ' - 'for electroencephalography. Scientific Data, 6, 103. ' - 'https://doi.org/10.1038/s41597-019-0104-8', - 'ieeg': - 'Holdgraf, C., Appelhoff, S., Bickel, S., Bouchard, K., ' - 'D\'Ambrosio, S., David, O., … Hermes, D. (2019). iEEG-BIDS, ' - 'extending the Brain Imaging Data Structure specification ' - 'to human intracranial electrophysiology. Scientific Data, ' - '6, 102. https://doi.org/10.1038/s41597-019-0105-7', - 'nirs': - 'In preperation'} +for letter in ("a", "b", "c"): + for sym in ("Sym", "Asym"): + BIDS_COORD_FRAME_DESCRIPTIONS[ + f"mni152nlin2009{letter}{sym}" + ] = "Also known as ICBM (non-linear coregistration with 40 iterations," + " released in 2009). It comes in either three different flavours " + "each in symmetric or asymmetric version." + +REFERENCES = { + "mne-bids": "Appelhoff, S., Sanderson, M., Brooks, T., Vliet, M., " + "Quentin, R., Holdgraf, C., Chaumon, M., Mikulan, E., " + "Tavabi, K., Höchenberger, R., Welke, D., Brunner, C., " + "Rockhill, A., Larson, E., Gramfort, A. and Jas, M. (2019). " + "MNE-BIDS: Organizing electrophysiological data into the " + "BIDS format and facilitating their analysis. Journal of " + "Open Source Software 4: (1896). " + "https://doi.org/10.21105/joss.01896", + "meg": "Niso, G., Gorgolewski, K. J., Bock, E., Brooks, T. L., " + "Flandin, G., Gramfort, A., Henson, R. N., Jas, M., Litvak, " + "V., Moreau, J., Oostenveld, R., Schoffelen, J., Tadel, F., " + "Wexler, J., Baillet, S. (2018). MEG-BIDS, the brain " + "imaging data structure extended to magnetoencephalography. " + "Scientific Data, 5, 180110. " + "https://doi.org/10.1038/sdata.2018.110", + "eeg": "Pernet, C. R., Appelhoff, S., Gorgolewski, K. J., " + "Flandin, G., Phillips, C., Delorme, A., Oostenveld, R. (2019). " + "EEG-BIDS, an extension to the brain imaging data structure " + "for electroencephalography. Scientific Data, 6, 103. " + "https://doi.org/10.1038/s41597-019-0104-8", + "ieeg": "Holdgraf, C., Appelhoff, S., Bickel, S., Bouchard, K., " + "D'Ambrosio, S., David, O., … Hermes, D. (2019). iEEG-BIDS, " + "extending the Brain Imaging Data Structure specification " + "to human intracranial electrophysiology. Scientific Data, " + "6, 102. https://doi.org/10.1038/s41597-019-0105-7", + "nirs": "In preperation", +} # Mapping subject information between MNE-BIDS and MNE-Python. HAND_BIDS_TO_MNE = { - ('n/a',): 0, - ('right', 'r', 'R', 'RIGHT', 'Right'): 1, - ('left', 'l', 'L', 'LEFT', 'Left'): 2, - ('ambidextrous', 'a', 'A', 'AMBIDEXTROUS', 'Ambidextrous'): 3 + ("n/a",): 0, + ("right", "r", "R", "RIGHT", "Right"): 1, + ("left", "l", "L", "LEFT", "Left"): 2, + ("ambidextrous", "a", "A", "AMBIDEXTROUS", "Ambidextrous"): 3, } -HAND_MNE_TO_BIDS = {0: 'n/a', 1: 'R', 2: 'L', 3: 'A'} +HAND_MNE_TO_BIDS = {0: "n/a", 1: "R", 2: "L", 3: "A"} SEX_BIDS_TO_MNE = { - ('n/a', 'other', 'o', 'O', 'OTHER', 'Other'): 0, - ('male', 'm', 'M', 'MALE', 'Male'): 1, - ('female', 'f', 'F', 'FEMALE', 'Female'): 2 + ("n/a", "other", "o", "O", "OTHER", "Other"): 0, + ("male", "m", "M", "MALE", "Male"): 1, + ("female", "f", "F", "FEMALE", "Female"): 2, } -SEX_MNE_TO_BIDS = {0: 'n/a', 1: 'M', 2: 'F'} +SEX_MNE_TO_BIDS = {0: "n/a", 1: "M", 2: "F"} def _map_options(what, key, fro, to): - if what == 'sex': + if what == "sex": mapping_bids_mne = SEX_BIDS_TO_MNE mapping_mne_bids = SEX_MNE_TO_BIDS - elif what == 'hand': + elif what == "hand": mapping_bids_mne = HAND_BIDS_TO_MNE mapping_mne_bids = HAND_MNE_TO_BIDS else: - raise ValueError('Can only map `sex` and `hand`.') + raise ValueError("Can only map `sex` and `hand`.") - if fro == 'bids' and to == 'mne': + if fro == "bids" and to == "mne": # Many-to-one mapping mapped_option = None for bids_keys, mne_option in mapping_bids_mne.items(): if key in bids_keys: mapped_option = mne_option break - elif fro == 'mne' and to == 'bids': + elif fro == "mne" and to == "bids": # One-to-one mapping mapped_option = mapping_mne_bids.get(key, None) else: - raise RuntimeError("fro value {} and to value {} are not " - "accepted. Use 'mne', or 'bids'.".format(fro, to)) + raise RuntimeError( + "fro value {} and to value {} are not " + "accepted. Use 'mne', or 'bids'.".format(fro, to) + ) return mapped_option @@ -469,84 +533,84 @@ def _map_options(what, key, fro, to): # information ANONYMIZED_JSON_KEY_WHITELIST = [ # Common - 'Manufacturer', - 'ManufacturersModelName', - 'InstitutionName', - 'InstitutionalDepartmentName', - 'InstitutionAddress', - 'DeviceSerialNumber', + "Manufacturer", + "ManufacturersModelName", + "InstitutionName", + "InstitutionalDepartmentName", + "InstitutionAddress", + "DeviceSerialNumber", # MRI # Many of these are not standardized, but produced by dcm2niix. - 'Modality', - 'MagneticFieldStrength', - 'ImagingFrequency', - 'StationName', - 'SeriesInstanceUID', - 'StudyInstanceUID', - 'StudyID', - 'BodyPartExamined', - 'PatientPosition', - 'ProcedureStepDescription', - 'SoftwareVersions', - 'MRAcquisitionType', - 'SeriesDescription', - 'ProtocolName', - 'ScanningSequence', - 'SequenceVariant', - 'ScanOptions', - 'SequenceName', - 'ImageType', - 'SeriesNumber', - 'AcquisitionNumber', - 'SliceThickness', - 'SAR', - 'EchoTime', - 'RepetitionTime', - 'InversionTime', - 'FlipAngle', - 'PartialFourier', - 'BaseResolution', - 'ShimSetting', - 'TxRefAmp', - 'PhaseResolution', - 'ReceiveCoilName', - 'ReceiveCoilActiveElements', - 'PulseSequenceDetails', - 'ConsistencyInfo', - 'PercentPhaseFOV', - 'PercentSampling', - 'PhaseEncodingSteps', - 'AcquisitionMatrixPE', - 'PixelBandwidth', - 'DwellTime', - 'ImageOrientationPatientDICOM', - 'InPlanePhaseEncodingDirectionDICOM', - 'ConversionSoftware', - 'ConversionSoftwareVersion', + "Modality", + "MagneticFieldStrength", + "ImagingFrequency", + "StationName", + "SeriesInstanceUID", + "StudyInstanceUID", + "StudyID", + "BodyPartExamined", + "PatientPosition", + "ProcedureStepDescription", + "SoftwareVersions", + "MRAcquisitionType", + "SeriesDescription", + "ProtocolName", + "ScanningSequence", + "SequenceVariant", + "ScanOptions", + "SequenceName", + "ImageType", + "SeriesNumber", + "AcquisitionNumber", + "SliceThickness", + "SAR", + "EchoTime", + "RepetitionTime", + "InversionTime", + "FlipAngle", + "PartialFourier", + "BaseResolution", + "ShimSetting", + "TxRefAmp", + "PhaseResolution", + "ReceiveCoilName", + "ReceiveCoilActiveElements", + "PulseSequenceDetails", + "ConsistencyInfo", + "PercentPhaseFOV", + "PercentSampling", + "PhaseEncodingSteps", + "AcquisitionMatrixPE", + "PixelBandwidth", + "DwellTime", + "ImageOrientationPatientDICOM", + "InPlanePhaseEncodingDirectionDICOM", + "ConversionSoftware", + "ConversionSoftwareVersion", # Electrophys common - 'TaskName', - 'TaskDescription', - 'Instructions', - 'PowerLineFrequency', - 'SamplingFrequency', - 'SoftwareFilters', - 'RecordingType', - 'EEGChannelCount', - 'EOGChannelCount', - 'ECGChannelCount', - 'EMGChannelCount', - 'MiscChannelCount', - 'TriggerChannelCount', - 'RecordingDuration', + "TaskName", + "TaskDescription", + "Instructions", + "PowerLineFrequency", + "SamplingFrequency", + "SoftwareFilters", + "RecordingType", + "EEGChannelCount", + "EOGChannelCount", + "ECGChannelCount", + "EMGChannelCount", + "MiscChannelCount", + "TriggerChannelCount", + "RecordingDuration", # EEG - 'EEGReference', - 'EEGPlacementScheme', + "EEGReference", + "EEGPlacementScheme", # MEG - 'DewarPosition', - 'DigitizedLandmarks', - 'DigitizedHeadPoints', - 'MEGChannelCount', - 'MEGREFChannelCount', - 'ContinuousHeadLocalization', - 'HeadCoilFrequency' + "DewarPosition", + "DigitizedLandmarks", + "DigitizedHeadPoints", + "MEGChannelCount", + "MEGREFChannelCount", + "ContinuousHeadLocalization", + "HeadCoilFrequency", ] diff --git a/mne_bids/conftest.py b/mne_bids/conftest.py index d6e524330..0cbcfd5e2 100644 --- a/mne_bids/conftest.py +++ b/mne_bids/conftest.py @@ -6,11 +6,13 @@ def pytest_configure(config): """Configure pytest options.""" # Fixtures - config.addinivalue_line('usefixtures', 'monkeypatch_mne') + config.addinivalue_line("usefixtures", "monkeypatch_mne") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def monkeypatch_mne(): """Monkeypatch MNE to ensure we have download=False everywhere in tests.""" - mne.datasets.utils._MODULES_TO_ENSURE_DOWNLOAD_IS_FALSE_IN_TESTS = \ - ('mne', 'mne_bids') + mne.datasets.utils._MODULES_TO_ENSURE_DOWNLOAD_IS_FALSE_IN_TESTS = ( + "mne", + "mne_bids", + ) diff --git a/mne_bids/copyfiles.py b/mne_bids/copyfiles.py index ed11eefed..cf441c2c5 100644 --- a/mne_bids/copyfiles.py +++ b/mne_bids/copyfiles.py @@ -23,8 +23,7 @@ from scipy.io import loadmat, savemat import mne -from mne.io import (read_raw_brainvision, read_raw_edf, read_raw_bdf, - anonymize_info) +from mne.io import read_raw_brainvision, read_raw_edf, read_raw_bdf, anonymize_info from mne.utils import logger, verbose from mne_bids.path import BIDSPath, _parse_ext, _mkdir_p @@ -39,7 +38,7 @@ def _copytree(src, dst, **kwargs): except sh.Error as error: # `copytree` throws an error if copying to + from NFS even though # the copy is successful (see https://bugs.python.org/issue24564) - if '[Errno 22]' not in str(error) or not op.exists(dst): + if "[Errno 22]" not in str(error) or not op.exists(dst): raise @@ -59,17 +58,17 @@ def _get_brainvision_encoding(vhdr_file): in the header. """ - with open(vhdr_file, 'rb') as ef: + with open(vhdr_file, "rb") as ef: enc = ef.read() - if enc.find(b'Codepage=') != -1: - enc = enc[enc.find(b'Codepage=') + 9:] + if enc.find(b"Codepage=") != -1: + enc = enc[enc.find(b"Codepage=") + 9 :] enc = enc.split()[0] enc = enc.decode() - src = '(read from header)' + src = "(read from header)" else: - enc = 'UTF-8' - src = '(default)' - logger.debug(f'Detected file encoding: {enc} {src}.') + enc = "UTF-8" + src = "(default)" + logger.debug(f"Detected file encoding: {enc} {src}.") return enc @@ -89,32 +88,29 @@ def _get_brainvision_paths(vhdr_path): """ fname, ext = _parse_ext(vhdr_path) - if ext != '.vhdr': - raise ValueError(f'Expecting file ending in ".vhdr",' - f' but got {ext}') + if ext != ".vhdr": + raise ValueError(f'Expecting file ending in ".vhdr",' f" but got {ext}") # Header file seems fine # extract encoding from brainvision header file, or default to utf-8 enc = _get_brainvision_encoding(vhdr_path) # ..and read it - with open(vhdr_path, 'r', encoding=enc) as f: + with open(vhdr_path, "r", encoding=enc) as f: lines = f.readlines() # Try to find data file .eeg/.dat - eeg_file_match = re.search(r'DataFile=(.*\.(eeg|dat))', ' '.join(lines)) + eeg_file_match = re.search(r"DataFile=(.*\.(eeg|dat))", " ".join(lines)) if not eeg_file_match: - raise ValueError('Could not find a .eeg or .dat file link in' - f' {vhdr_path}') + raise ValueError("Could not find a .eeg or .dat file link in" f" {vhdr_path}") else: eeg_file = eeg_file_match.groups()[0] # Try to find marker file .vmrk - vmrk_file_match = re.search(r'MarkerFile=(.*\.vmrk)', ' '.join(lines)) + vmrk_file_match = re.search(r"MarkerFile=(.*\.vmrk)", " ".join(lines)) if not vmrk_file_match: - raise ValueError('Could not find a .vmrk file link in' - f' {vhdr_path}') + raise ValueError("Could not find a .vmrk file link in" f" {vhdr_path}") else: vmrk_file = vmrk_file_match.groups()[0] @@ -156,19 +152,27 @@ def copyfile_ctf(src, dest): """ _copytree(src, dest) # list of file types to rename - file_types = ('.acq', '.eeg', '.dat', '.hc', '.hist', '.infods', '.bak', - '.meg4', '.newds', '.res4') + file_types = ( + ".acq", + ".eeg", + ".dat", + ".hc", + ".hist", + ".infods", + ".bak", + ".meg4", + ".newds", + ".res4", + ) # Rename files in dest with the name of the dest directory fnames = [f for f in os.listdir(dest) if f.endswith(file_types)] bids_folder_name = op.splitext(op.split(dest)[-1])[0] for fname in fnames: ext = op.splitext(fname)[-1] - os.replace(op.join(dest, fname), - op.join(dest, bids_folder_name + ext)) + os.replace(op.join(dest, fname), op.join(dest, bids_folder_name + ext)) -def copyfile_kit(src, dest, subject_id, session_id, - task, run, _init_kwargs): +def copyfile_kit(src, dest, subject_id, session_id, task, run, _init_kwargs): """Copy and rename KIT files to a new location. Parameters @@ -203,49 +207,60 @@ def copyfile_kit(src, dest, subject_id, session_id, # KIT data requires the marker file to be copied over too sh.copyfile(src, dest) data_path = op.split(dest)[0] - datatype = 'meg' + datatype = "meg" - if 'mrk' in _init_kwargs and _init_kwargs['mrk'] is not None: - hpi = _init_kwargs['mrk'] + if "mrk" in _init_kwargs and _init_kwargs["mrk"] is not None: + hpi = _init_kwargs["mrk"] acq_map = dict() if isinstance(hpi, list): if _get_mrk_meas_date(hpi[0]) > _get_mrk_meas_date(hpi[1]): - raise ValueError('Markers provided in incorrect order.') + raise ValueError("Markers provided in incorrect order.") _, marker_ext = _parse_ext(hpi[0]) - acq_map = dict(zip(['pre', 'post'], hpi)) + acq_map = dict(zip(["pre", "post"], hpi)) else: _, marker_ext = _parse_ext(hpi) acq_map[None] = hpi for key, value in acq_map.items(): marker_path = BIDSPath( - subject=subject_id, session=session_id, task=task, run=run, - acquisition=key, suffix='markers', extension=marker_ext, - datatype=datatype) + subject=subject_id, + session=session_id, + task=task, + run=run, + acquisition=key, + suffix="markers", + extension=marker_ext, + datatype=datatype, + ) sh.copyfile(value, op.join(data_path, marker_path.basename)) - for acq in ['elp', 'hsp']: + for acq in ["elp", "hsp"]: if acq in _init_kwargs and _init_kwargs[acq] is not None: position_file = _init_kwargs[acq] task, run, acq = None, None, acq.upper() - position_ext = '.pos' + position_ext = ".pos" position_path = BIDSPath( - subject=subject_id, session=session_id, task=task, run=run, - acquisition=acq, suffix='headshape', extension=position_ext, - datatype=datatype) - sh.copyfile(position_file, - op.join(data_path, position_path.basename)) + subject=subject_id, + session=session_id, + task=task, + run=run, + acquisition=acq, + suffix="headshape", + extension=position_ext, + datatype=datatype, + ) + sh.copyfile(position_file, op.join(data_path, position_path.basename)) def _replace_file(fname, pattern, replace): """Overwrite file, replacing end of lines matching pattern with replace.""" new_content = [] - for line in open(fname, 'r'): + for line in open(fname, "r"): match = re.match(pattern, line) if match: - line = match.group()[:-len(replace)] + replace + '\n' + line = match.group()[: -len(replace)] + replace + "\n" new_content.append(line) - with open(fname, 'w', encoding='utf-8') as fout: + with open(fname, "w", encoding="utf-8") as fout: fout.writelines(new_content) @@ -254,12 +269,12 @@ def _anonymize_brainvision(vhdr_file, date): _, vmrk_file = _get_brainvision_paths(vhdr_file) # Go through VMRK - pattern = re.compile(r'^Mk\d+=New Segment,.*,\d+,\d+,\d+,\d{20}$') - replace = date.strftime('%Y%m%d%H%M%S%f') + pattern = re.compile(r"^Mk\d+=New Segment,.*,\d+,\d+,\d+,\d{20}$") + replace = date.strftime("%Y%m%d%H%M%S%f") _replace_file(vmrk_file, pattern, replace) # Go through VHDR - pattern = re.compile(r'^Impedance \[kOhm\] at \d\d:\d\d:\d\d :$') + pattern = re.compile(r"^Impedance \[kOhm\] at \d\d:\d\d:\d\d :$") replace = f'at {date.strftime("%H:%M:%S")} :' _replace_file(vhdr_file, pattern, replace) @@ -314,8 +329,10 @@ def copyfile_brainvision(vhdr_src, vhdr_dest, anonymize=None, verbose=None): fname_src, ext_src = _parse_ext(vhdr_src) fname_dest, ext_dest = _parse_ext(vhdr_dest) if ext_src != ext_dest: - raise ValueError(f'Need to move data with same extension, ' - f' but got "{ext_src}" and "{ext_dest}"') + raise ValueError( + f"Need to move data with same extension, " + f' but got "{ext_src}" and "{ext_dest}"' + ) eeg_file_path, vmrk_file_path = _get_brainvision_paths(vhdr_src) @@ -323,37 +340,40 @@ def copyfile_brainvision(vhdr_src, vhdr_dest, anonymize=None, verbose=None): enc = _get_brainvision_encoding(vhdr_src) # raise warning if binary file has .dat extension - if '.dat' in eeg_file_path: - warn("The file extension of your binary EEG data file is .dat, while " - "the expected extension for raw data is .eeg. " - "This might imply it's preprocessed or processed data: " - "We copied the files and changed the extension to .eeg, " - "but please ensure that this is actually BIDS compatible data!") + if ".dat" in eeg_file_path: + warn( + "The file extension of your binary EEG data file is .dat, while " + "the expected extension for raw data is .eeg. " + "This might imply it's preprocessed or processed data: " + "We copied the files and changed the extension to .eeg, " + "but please ensure that this is actually BIDS compatible data!" + ) # Copy data .eeg/.dat ... no links to repair - sh.copyfile(eeg_file_path, fname_dest + '.eeg') + sh.copyfile(eeg_file_path, fname_dest + ".eeg") # Write new header and marker files, fixing the file pointer links # For that, we need to replace an old "basename" with a new one # assuming that all .eeg/.dat, .vhdr, .vmrk share one basename __, basename_src = op.split(fname_src) - assert op.split(eeg_file_path)[-1] in [ - basename_src + '.eeg', basename_src + '.dat'] - assert basename_src + '.vmrk' == op.split(vmrk_file_path)[-1] + assert op.split(eeg_file_path)[-1] in [basename_src + ".eeg", basename_src + ".dat"] + assert basename_src + ".vmrk" == op.split(vmrk_file_path)[-1] __, basename_dest = op.split(fname_dest) - search_lines = ['DataFile=' + basename_src + '.eeg', - 'DataFile=' + basename_src + '.dat', - 'MarkerFile=' + basename_src + '.vmrk'] - - with open(vhdr_src, 'r', encoding=enc) as fin: - with open(vhdr_dest, 'w', encoding=enc) as fout: + search_lines = [ + "DataFile=" + basename_src + ".eeg", + "DataFile=" + basename_src + ".dat", + "MarkerFile=" + basename_src + ".vmrk", + ] + + with open(vhdr_src, "r", encoding=enc) as fin: + with open(vhdr_dest, "w", encoding=enc) as fout: for line in fin.readlines(): if line.strip() in search_lines: line = line.replace(basename_src, basename_dest) fout.write(line) - with open(vmrk_file_path, 'r', encoding=enc) as fin: - with open(fname_dest + '.vmrk', 'w', encoding=enc) as fout: + with open(vmrk_file_path, "r", encoding=enc) as fin: + with open(fname_dest + ".vmrk", "w", encoding=enc) as fout: for line in fin.readlines(): if line.strip() in search_lines: line = line.replace(basename_src, basename_dest) @@ -361,18 +381,16 @@ def copyfile_brainvision(vhdr_src, vhdr_dest, anonymize=None, verbose=None): if anonymize is not None: raw = read_raw_brainvision(vhdr_src, preload=False, verbose=0) - daysback, keep_his, _ = _check_anonymize(anonymize, raw, '.vhdr') - raw.info = anonymize_info(raw.info, daysback=daysback, - keep_his=keep_his) - _anonymize_brainvision(fname_dest + '.vhdr', - date=raw.info['meas_date']) + daysback, keep_his, _ = _check_anonymize(anonymize, raw, ".vhdr") + raw.info = anonymize_info(raw.info, daysback=daysback, keep_his=keep_his) + _anonymize_brainvision(fname_dest + ".vhdr", date=raw.info["meas_date"]) - for ext in ['.eeg', '.vhdr', '.vmrk']: + for ext in [".eeg", ".vhdr", ".vmrk"]: _, fname = os.path.split(fname_dest + ext) dirname = op.dirname(op.realpath(vhdr_dest)) logger.info(f'Created "{fname}" in "{dirname}".') if anonymize: - logger.info('Anonymized all dates in VHDR and VMRK.') + logger.info("Anonymized all dates in VHDR and VMRK.") def copyfile_edf(src, dest, anonymize=None): @@ -438,12 +456,16 @@ def copyfile_edf(src, dest, anonymize=None): fname_dest, ext_dest = _parse_ext(dest) if ext_src.lower() != ext_dest.lower(): - raise ValueError(f'Need to move data with same extension, ' - f' but got "{ext_src}" and "{ext_dest}"') - - if ext_dest in ['.EDF', '.BDF']: - warn('Upper-case extension for EDF/BDF files is not supported ' - 'in BIDS. Converting destination extension to lower-case.') + raise ValueError( + f"Need to move data with same extension, " + f' but got "{ext_src}" and "{ext_dest}"' + ) + + if ext_dest in [".EDF", ".BDF"]: + warn( + "Upper-case extension for EDF/BDF files is not supported " + "in BIDS. Converting destination extension to lower-case." + ) ext_dest = ext_dest.lower() dest = Path(dest).with_suffix(ext_dest) @@ -452,32 +474,32 @@ def copyfile_edf(src, dest, anonymize=None): # Anonymize EDF/BDF data, if requested if anonymize is not None: - if ext_src in ['.bdf', '.BDF']: + if ext_src in [".bdf", ".BDF"]: raw = read_raw_bdf(dest, preload=False, verbose=0) - elif ext_src in ['.edf', '.EDF']: + elif ext_src in [".edf", ".EDF"]: raw = read_raw_edf(dest, preload=False, verbose=0) else: - raise ValueError('Unsupported file type ({0})'.format(ext_src)) + raise ValueError("Unsupported file type ({0})".format(ext_src)) # Get subject info, recording info, and recording date - with open(dest, 'rb') as f: + with open(dest, "rb") as f: f.seek(8) # id_info field starts 8 bytes in - id_info = f.read(80).decode('ascii').rstrip() - rec_info = f.read(80).decode('ascii').rstrip() + id_info = f.read(80).decode("ascii").rstrip() + rec_info = f.read(80).decode("ascii").rstrip() # Parse metadata from file - if len(id_info) == 0 or len(id_info.split(' ')) != 4: + if len(id_info) == 0 or len(id_info.split(" ")) != 4: id_info = "X X X X" - if len(rec_info) == 0 or len(rec_info.split(' ')) != 5: + if len(rec_info) == 0 or len(rec_info.split(" ")) != 5: rec_info = "Startdate X X X X" - pid, sex, birthdate, name = id_info.split(' ') - start_date, admin_code, tech, equip = rec_info.split(' ')[1:5] + pid, sex, birthdate, name = id_info.split(" ") + start_date, admin_code, tech, equip = rec_info.split(" ")[1:5] # Try to anonymize the recording date - daysback, keep_his, _ = _check_anonymize(anonymize, raw, '.edf') + daysback, keep_his, _ = _check_anonymize(anonymize, raw, ".edf") anonymize_info(raw.info, daysback=daysback, keep_his=keep_his) - start_date = '01-JAN-1985' - meas_date = '01.01.85' + start_date = "01-JAN-1985" + meas_date = "01.01.85" # Anonymize ID info and write to file if keep_his: @@ -486,13 +508,12 @@ def copyfile_edf(src, dest, anonymize=None): rec_info = ["Startdate", start_date, admin_code, tech, equip] else: id_info = ["0", "X", "X", "X"] - rec_info = ["Startdate", start_date, "X", - "mne-bids_anonymize", "X"] - with open(dest, 'r+b') as f: + rec_info = ["Startdate", start_date, "X", "mne-bids_anonymize", "X"] + with open(dest, "r+b") as f: f.seek(8) # id_info field starts 8 bytes in - f.write(bytes(" ".join(id_info).ljust(80), 'ascii')) - f.write(bytes(" ".join(rec_info).ljust(80), 'ascii')) - f.write(bytes(meas_date, 'ascii')) + f.write(bytes(" ".join(id_info).ljust(80), "ascii")) + f.write(bytes(" ".join(rec_info).ljust(80), "ascii")) + f.write(bytes(meas_date, "ascii")) def copyfile_eeglab(src, dest): @@ -518,44 +539,50 @@ def copyfile_eeglab(src, dest): copyfile_kit """ - if not mne.utils.check_version('scipy', '1.5.0'): # pragma: no cover - raise ImportError('SciPy >=1.5.0 is required handling EEGLAB data.') + if not mne.utils.check_version("scipy", "1.5.0"): # pragma: no cover + raise ImportError("SciPy >=1.5.0 is required handling EEGLAB data.") # Get extension of the EEGLAB file _, ext_src = _parse_ext(src) fname_dest, ext_dest = _parse_ext(dest) if ext_src != ext_dest: - raise ValueError(f'Need to move data with same extension' - f' but got {ext_src}, {ext_dest}') + raise ValueError( + f"Need to move data with same extension" f" but got {ext_src}, {ext_dest}" + ) # Load the EEG struct # NOTE: do *not* simplify cells, because this changes the underlying # structure and potentially breaks re-reading of the file uint16_codec = None - eeg = loadmat(file_name=src, simplify_cells=False, - appendmat=False, uint16_codec=uint16_codec, mat_dtype=True) + eeg = loadmat( + file_name=src, + simplify_cells=False, + appendmat=False, + uint16_codec=uint16_codec, + mat_dtype=True, + ) oldstyle = False - if 'EEG' in eeg: - eeg = eeg['EEG'] + if "EEG" in eeg: + eeg = eeg["EEG"] oldstyle = True has_fdt_link = False try: # If the data field is a string, it points to a .fdt file in src dir - if isinstance(eeg['data'][0, 0][0], str): + if isinstance(eeg["data"][0, 0][0], str): has_fdt_link = True except IndexError: pass if has_fdt_link: - fdt_fname = eeg['data'][0, 0][0] + fdt_fname = eeg["data"][0, 0][0] - assert fdt_fname.endswith('.fdt'), f'Unexpected fdt name: {fdt_fname}' + assert fdt_fname.endswith(".fdt"), f"Unexpected fdt name: {fdt_fname}" head, _ = op.split(src) fdt_path = op.join(head, fdt_fname) # Copy the .fdt file and give it a new name - fdt_name_new = fname_dest + '.fdt' + fdt_name_new = fname_dest + ".fdt" sh.copyfile(fdt_path, fdt_name_new) # Now adjust the pointer in the .set file @@ -563,7 +590,7 @@ def copyfile_eeglab(src, dest): _, tail = op.split(fdt_name_new) new_value = np.empty((1, 1), dtype=object) new_value[0, 0] = np.atleast_1d(np.array(tail)) - eeg['data'] = new_value + eeg["data"] = new_value # Save the EEG dictionary as a Matlab struct again mdict = dict(EEG=eeg) if oldstyle else eeg @@ -593,12 +620,9 @@ def copyfile_bti(raw, dest): copyfile_kit """ - pdf_fname = 'c,rfDC' - if raw.info['highpass'] is not None: - pdf_fname = 'c,rf%0.1fHz' % raw.info['highpass'] - sh.copyfile(raw._init_kwargs['pdf_fname'], - op.join(dest, pdf_fname)) - sh.copyfile(raw._init_kwargs['config_fname'], - op.join(dest, 'config')) - sh.copyfile(raw._init_kwargs['head_shape_fname'], - op.join(dest, 'hs_file')) + pdf_fname = "c,rfDC" + if raw.info["highpass"] is not None: + pdf_fname = "c,rf%0.1fHz" % raw.info["highpass"] + sh.copyfile(raw._init_kwargs["pdf_fname"], op.join(dest, pdf_fname)) + sh.copyfile(raw._init_kwargs["config_fname"], op.join(dest, "config")) + sh.copyfile(raw._init_kwargs["head_shape_fname"], op.join(dest, "hs_file")) diff --git a/mne_bids/dig.py b/mne_bids/dig.py index 2416927a7..56ffcd3e0 100644 --- a/mne_bids/dig.py +++ b/mne_bids/dig.py @@ -14,33 +14,41 @@ import mne import numpy as np from mne.io.constants import FIFF -from mne.utils import (logger, _validate_type, _check_option, - get_subjects_dir) +from mne.utils import logger, _validate_type, _check_option, get_subjects_dir from mne.io.pick import _picks_to_idx -from mne_bids.config import (ALLOWED_SPACES, BIDS_COORDINATE_UNITS, - MNE_TO_BIDS_FRAMES, BIDS_TO_MNE_FRAMES, - MNE_FRAME_TO_STR, MNE_STR_TO_FRAME, - BIDS_COORD_FRAME_DESCRIPTIONS, - BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS) +from mne_bids.config import ( + ALLOWED_SPACES, + BIDS_COORDINATE_UNITS, + MNE_TO_BIDS_FRAMES, + BIDS_TO_MNE_FRAMES, + MNE_FRAME_TO_STR, + MNE_STR_TO_FRAME, + BIDS_COORD_FRAME_DESCRIPTIONS, + BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS, +) from mne_bids.tsv_handler import _from_tsv -from mne_bids.utils import (_scale_coord_to_meters, _write_json, _write_tsv, - verbose, warn, _import_nibabel) +from mne_bids.utils import ( + _scale_coord_to_meters, + _write_json, + _write_tsv, + verbose, + warn, + _import_nibabel, +) from mne_bids.path import BIDSPath -data_dir = Path(__file__).parent / 'data' +data_dir = Path(__file__).parent / "data" -def _handle_electrodes_reading(electrodes_fname, coord_frame, - coord_unit): +def _handle_electrodes_reading(electrodes_fname, coord_frame, coord_unit): """Read associated electrodes.tsv and populate raw. Handle xyz coordinates and coordinate frame of each channel. """ - logger.info('Reading electrode ' - 'coords from {}.'.format(electrodes_fname)) + logger.info("Reading electrode " "coords from {}.".format(electrodes_fname)) electrodes_dict = _from_tsv(electrodes_fname) - ch_names_tsv = electrodes_dict['name'] + ch_names_tsv = electrodes_dict["name"] def _float_or_nan(val): if val == "n/a": @@ -49,22 +57,20 @@ def _float_or_nan(val): return float(val) # convert coordinates to float and create list of tuples - electrodes_dict['x'] = [_float_or_nan(x) for x in electrodes_dict['x']] - electrodes_dict['y'] = [_float_or_nan(x) for x in electrodes_dict['y']] - electrodes_dict['z'] = [_float_or_nan(x) for x in electrodes_dict['z']] - ch_names_raw = [x for i, x in enumerate(ch_names_tsv) - if electrodes_dict['x'][i] != "n/a"] - ch_locs = np.c_[electrodes_dict['x'], - electrodes_dict['y'], - electrodes_dict['z']] + electrodes_dict["x"] = [_float_or_nan(x) for x in electrodes_dict["x"]] + electrodes_dict["y"] = [_float_or_nan(x) for x in electrodes_dict["y"]] + electrodes_dict["z"] = [_float_or_nan(x) for x in electrodes_dict["z"]] + ch_names_raw = [ + x for i, x in enumerate(ch_names_tsv) if electrodes_dict["x"][i] != "n/a" + ] + ch_locs = np.c_[electrodes_dict["x"], electrodes_dict["y"], electrodes_dict["z"]] # convert coordinates to meters ch_locs = _scale_coord_to_meters(ch_locs, coord_unit) # create mne.DigMontage ch_pos = dict(zip(ch_names_raw, ch_locs)) - montage = mne.channels.make_dig_montage(ch_pos=ch_pos, - coord_frame=coord_frame) + montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame=coord_frame) return montage @@ -74,42 +80,39 @@ def _handle_coordsystem_reading(coordsystem_fpath, datatype): Handle reading the coordinate frame and coordinate unit of each electrode. """ - with open(coordsystem_fpath, 'r', encoding='utf-8-sig') as fin: + with open(coordsystem_fpath, "r", encoding="utf-8-sig") as fin: coordsystem_json = json.load(fin) - if datatype == 'meg': - coord_frame = coordsystem_json['MEGCoordinateSystem'] - coord_unit = coordsystem_json['MEGCoordinateUnits'] - coord_frame_desc = coordsystem_json.get('MEGCoordinateDescription', - None) - elif datatype == 'eeg': - coord_frame = coordsystem_json['EEGCoordinateSystem'] - coord_unit = coordsystem_json['EEGCoordinateUnits'] - coord_frame_desc = coordsystem_json.get('EEGCoordinateDescription', - None) - elif datatype == 'ieeg': - coord_frame = coordsystem_json['iEEGCoordinateSystem'] - coord_unit = coordsystem_json['iEEGCoordinateUnits'] - coord_frame_desc = coordsystem_json.get('iEEGCoordinateDescription', - None) - - msg = f'Reading coordinate system frame {coord_frame}' + if datatype == "meg": + coord_frame = coordsystem_json["MEGCoordinateSystem"] + coord_unit = coordsystem_json["MEGCoordinateUnits"] + coord_frame_desc = coordsystem_json.get("MEGCoordinateDescription", None) + elif datatype == "eeg": + coord_frame = coordsystem_json["EEGCoordinateSystem"] + coord_unit = coordsystem_json["EEGCoordinateUnits"] + coord_frame_desc = coordsystem_json.get("EEGCoordinateDescription", None) + elif datatype == "ieeg": + coord_frame = coordsystem_json["iEEGCoordinateSystem"] + coord_unit = coordsystem_json["iEEGCoordinateUnits"] + coord_frame_desc = coordsystem_json.get("iEEGCoordinateDescription", None) + + msg = f"Reading coordinate system frame {coord_frame}" if coord_frame_desc: - msg += f': {coord_frame_desc}' + msg += f": {coord_frame_desc}" return coord_frame, coord_unit def _get_impedances(raw, names): """Get the impedance values in kOhm from raw.impedances.""" - if not hasattr(raw, 'impedances'): # pragma: no cover - return ['n/a'] * len(names) - no_info = {'imp': np.nan, 'imp_unit': 'kOhm'} + if not hasattr(raw, "impedances"): # pragma: no cover + return ["n/a"] * len(names) + no_info = {"imp": np.nan, "imp_unit": "kOhm"} impedance_dicts = [raw.impedances.get(name, no_info) for name in names] # If we encounter a unit not defined in `scalings`, return NaN - scalings = {'kOhm': 1, 'Ohm': 0.001} + scalings = {"kOhm": 1, "Ohm": 0.001} impedances = [ - imp_dict['imp'] * scalings.get(imp_dict['imp_unit'], np.nan) + imp_dict["imp"] * scalings.get(imp_dict["imp_unit"], np.nan) for imp_dict in impedance_dicts ] # replace np.nan with BIDS 'n/a' representation @@ -138,47 +141,49 @@ def _write_electrodes_tsv(raw, fname, datatype, overwrite=False): """ # create list of channel coordinates and names x, y, z, names = list(), list(), list(), list() - for ch in raw.info['chs']: - if ch['kind'] == FIFF.FIFFV_STIM_CH: - logger.debug(f"Not writing stim chan {ch['ch_name']} " - f"to electrodes.tsv") + for ch in raw.info["chs"]: + if ch["kind"] == FIFF.FIFFV_STIM_CH: + logger.debug(f"Not writing stim chan {ch['ch_name']} " f"to electrodes.tsv") continue - elif ( - np.isnan(ch['loc'][:3]).any() or - np.allclose(ch['loc'][:3], 0) - ): - x.append('n/a') - y.append('n/a') - z.append('n/a') + elif np.isnan(ch["loc"][:3]).any() or np.allclose(ch["loc"][:3], 0): + x.append("n/a") + y.append("n/a") + z.append("n/a") else: - x.append(ch['loc'][0]) - y.append(ch['loc'][1]) - z.append(ch['loc'][2]) - names.append(ch['ch_name']) + x.append(ch["loc"][0]) + y.append(ch["loc"][1]) + z.append(ch["loc"][2]) + names.append(ch["ch_name"]) # create OrderedDict to write to tsv file if datatype == "ieeg": # XXX: size should be included in the future - sizes = ['n/a'] * len(names) - data = OrderedDict([('name', names), - ('x', x), - ('y', y), - ('z', z), - ('size', sizes), - ]) - elif datatype == 'eeg': - data = OrderedDict([('name', names), - ('x', x), - ('y', y), - ('z', z), - ]) + sizes = ["n/a"] * len(names) + data = OrderedDict( + [ + ("name", names), + ("x", x), + ("y", y), + ("z", z), + ("size", sizes), + ] + ) + elif datatype == "eeg": + data = OrderedDict( + [ + ("name", names), + ("x", x), + ("y", y), + ("z", z), + ] + ) else: # pragma: no cover raise RuntimeError("datatype {} not supported.".format(datatype)) # Add impedance values if available, currently only BrainVision: # https://github.com/mne-tools/mne-python/pull/7974 - if hasattr(raw, 'impedances'): - data['impedance'] = _get_impedances(raw, names) + if hasattr(raw, "impedances"): + data["impedance"] = _get_impedances(raw, names) # note that any coordsystem.json file shared within sessions # will be the same across all runs (currently). So @@ -189,13 +194,18 @@ def _write_electrodes_tsv(raw, fname, datatype, overwrite=False): electrodes_tsv = _from_tsv(fname) # cast values to str to make equality check work - if any([list(map(str, vals1)) != list(vals2) for vals1, vals2 in - zip(data.values(), electrodes_tsv.values())]): + if any( + [ + list(map(str, vals1)) != list(vals2) + for vals1, vals2 in zip(data.values(), electrodes_tsv.values()) + ] + ): raise RuntimeError( - f'Trying to write electrodes.tsv, but it already ' - f'exists at {fname} and the contents do not match. ' - f'You must differentiate this electrodes.tsv file ' - f'from the existing one, or set "overwrite" to True.') + f"Trying to write electrodes.tsv, but it already " + f"exists at {fname} and the contents do not match. " + f"You must differentiate this electrodes.tsv file " + f'from the existing one, or set "overwrite" to True.' + ) _write_tsv(fname, data, overwrite=True) @@ -214,7 +224,7 @@ def _write_optodes_tsv(raw, fname, overwrite=False, verbose=True): verbose : bool Set verbose output to True or False. """ - picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True) + picks = _picks_to_idx(raw.info, "fnirs", exclude=[], allow_empty=True) sources = np.zeros(picks.shape) detectors = np.zeros(picks.shape) for ii in picks: @@ -223,16 +233,18 @@ def _write_optodes_tsv(raw, fname, overwrite=False, verbose=True): # pair, followed by the wavelength frequency. # The following code extracts the source and detector # numbers from the channel name. - ch1_name_info = re.match(r'S(\d+)_D(\d+) (\d+)', - raw.info['chs'][ii]['ch_name']) + ch1_name_info = re.match(r"S(\d+)_D(\d+) (\d+)", raw.info["chs"][ii]["ch_name"]) sources[ii] = ch1_name_info.groups()[0] detectors[ii] = ch1_name_info.groups()[1] unique_sources = np.unique(sources) n_sources = len(unique_sources) unique_detectors = np.unique(detectors) - names = np.concatenate(( - ["S" + str(s) for s in unique_sources.astype(int)], - ["D" + str(d) for d in unique_detectors.astype(int)])) + names = np.concatenate( + ( + ["S" + str(s) for s in unique_sources.astype(int)], + ["D" + str(d) for d in unique_detectors.astype(int)], + ) + ) xs = np.zeros(names.shape) ys = np.zeros(names.shape) @@ -249,21 +261,30 @@ def _write_optodes_tsv(raw, fname, overwrite=False, verbose=True): zs[i + n_sources] = raw.info["chs"][d_idx]["loc"][8] ch_data = { - 'name': names, - 'type': np.concatenate( - (np.full(len(unique_sources), 'source'), - np.full(len(unique_detectors), 'detector')) + "name": names, + "type": np.concatenate( + ( + np.full(len(unique_sources), "source"), + np.full(len(unique_detectors), "detector"), + ) ), - 'x': xs, - 'y': ys, - 'z': zs, + "x": xs, + "y": ys, + "z": zs, } _write_tsv(fname, ch_data, overwrite, verbose) -def _write_coordsystem_json(*, raw, unit, hpi_coord_system, - sensor_coord_system, fname, datatype, - overwrite=False): +def _write_coordsystem_json( + *, + raw, + unit, + hpi_coord_system, + sensor_coord_system, + fname, + datatype, + overwrite=False, +): """Create a coordsystem.json file and save it. Parameters @@ -297,53 +318,55 @@ def _write_coordsystem_json(*, raw, unit, hpi_coord_system, pos = montage.get_positions() dig = list() if montage.dig is None else montage.dig coords = dict( - NAS=list() if pos['nasion'] is None else pos['nasion'].tolist(), - LPA=list() if pos['lpa'] is None else pos['lpa'].tolist(), - RPA=list() if pos['rpa'] is None else pos['rpa'].tolist()) + NAS=list() if pos["nasion"] is None else pos["nasion"].tolist(), + LPA=list() if pos["lpa"] is None else pos["lpa"].tolist(), + RPA=list() if pos["rpa"] is None else pos["rpa"].tolist(), + ) # get the coordinate frame description - sensor_coord_system_descr = (BIDS_COORD_FRAME_DESCRIPTIONS - .get(sensor_coord_system.lower(), "n/a")) + sensor_coord_system_descr = BIDS_COORD_FRAME_DESCRIPTIONS.get( + sensor_coord_system.lower(), "n/a" + ) # create the coordinate json data structure based on 'datatype' - if datatype == 'meg': - hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI} + if datatype == "meg": + hpi = {d["ident"]: d for d in dig if d["kind"] == FIFF.FIFFV_POINT_HPI} if hpi: for ident in hpi.keys(): - coords['coil%d' % ident] = hpi[ident]['r'].tolist() + coords["coil%d" % ident] = hpi[ident]["r"].tolist() fid_json = { - 'MEGCoordinateSystem': sensor_coord_system, - 'MEGCoordinateUnits': unit, # XXX validate this - 'MEGCoordinateSystemDescription': sensor_coord_system_descr, - 'HeadCoilCoordinates': coords, - 'HeadCoilCoordinateSystem': hpi_coord_system, - 'HeadCoilCoordinateUnits': unit, # XXX validate this - 'AnatomicalLandmarkCoordinates': coords, - 'AnatomicalLandmarkCoordinateSystem': sensor_coord_system, - 'AnatomicalLandmarkCoordinateUnits': unit + "MEGCoordinateSystem": sensor_coord_system, + "MEGCoordinateUnits": unit, # XXX validate this + "MEGCoordinateSystemDescription": sensor_coord_system_descr, + "HeadCoilCoordinates": coords, + "HeadCoilCoordinateSystem": hpi_coord_system, + "HeadCoilCoordinateUnits": unit, # XXX validate this + "AnatomicalLandmarkCoordinates": coords, + "AnatomicalLandmarkCoordinateSystem": sensor_coord_system, + "AnatomicalLandmarkCoordinateUnits": unit, } - elif datatype == 'eeg': + elif datatype == "eeg": fid_json = { - 'EEGCoordinateSystem': sensor_coord_system, - 'EEGCoordinateUnits': unit, - 'EEGCoordinateSystemDescription': sensor_coord_system_descr, - 'AnatomicalLandmarkCoordinates': coords, - 'AnatomicalLandmarkCoordinateSystem': sensor_coord_system, - 'AnatomicalLandmarkCoordinateUnits': unit, + "EEGCoordinateSystem": sensor_coord_system, + "EEGCoordinateUnits": unit, + "EEGCoordinateSystemDescription": sensor_coord_system_descr, + "AnatomicalLandmarkCoordinates": coords, + "AnatomicalLandmarkCoordinateSystem": sensor_coord_system, + "AnatomicalLandmarkCoordinateUnits": unit, } elif datatype == "ieeg": fid_json = { # (Other, Pixels, ACPC) - 'iEEGCoordinateSystem': sensor_coord_system, - 'iEEGCoordinateSystemDescription': sensor_coord_system_descr, - 'iEEGCoordinateUnits': unit, # m (MNE), mm, cm , or pixels + "iEEGCoordinateSystem": sensor_coord_system, + "iEEGCoordinateSystemDescription": sensor_coord_system_descr, + "iEEGCoordinateUnits": unit, # m (MNE), mm, cm , or pixels } elif datatype == "nirs": fid_json = { - 'NIRSCoordinateSystem': sensor_coord_system, - 'NIRSCoordinateSystemDescription': sensor_coord_system_descr, - 'NIRSCoordinateUnits': unit, + "NIRSCoordinateSystem": sensor_coord_system, + "NIRSCoordinateSystemDescription": sensor_coord_system_descr, + "NIRSCoordinateUnits": unit, } # note that any coordsystem.json file shared within sessions @@ -352,19 +375,19 @@ def _write_coordsystem_json(*, raw, unit, hpi_coord_system, # XXX: improve later when BIDS is updated # check that there already exists a coordsystem.json if Path(fname).exists() and not overwrite: - with open(fname, 'r', encoding='utf-8-sig') as fin: + with open(fname, "r", encoding="utf-8-sig") as fin: coordsystem_dict = json.load(fin) if fid_json != coordsystem_dict: raise RuntimeError( - f'Trying to write coordsystem.json, but it already ' - f'exists at {fname} and the contents do not match. ' - f'You must differentiate this coordsystem.json file ' - f'from the existing one, or set "overwrite" to True.') + f"Trying to write coordsystem.json, but it already " + f"exists at {fname} and the contents do not match. " + f"You must differentiate this coordsystem.json file " + f'from the existing one, or set "overwrite" to True.' + ) _write_json(fname, fid_json, overwrite=True) -def _write_dig_bids(bids_path, raw, montage=None, acpc_aligned=False, - overwrite=False): +def _write_dig_bids(bids_path, raw, montage=None, acpc_aligned=False, overwrite=False): """Write BIDS formatted DigMontage from Raw instance. Handles coordsystem.json and electrodes.tsv writing @@ -396,92 +419,114 @@ def _write_dig_bids(bids_path, raw, montage=None, acpc_aligned=False, montage = raw.get_montage() else: # assign montage to raw but supress any coordinate transforms montage = montage.copy() # don't modify original - montage_coord_frame = montage.get_positions()['coord_frame'] - fids = [d for d in montage.dig # save to add back - if d['kind'] == FIFF.FIFFV_POINT_CARDINAL] + montage_coord_frame = montage.get_positions()["coord_frame"] + fids = [ + d + for d in montage.dig # save to add back + if d["kind"] == FIFF.FIFFV_POINT_CARDINAL + ] montage.remove_fiducials() # prevent coordinate transform with warnings.catch_warnings(): - warnings.filterwarnings(action='ignore', category=RuntimeWarning, - message='.*nasion not found', module='mne') + warnings.filterwarnings( + action="ignore", + category=RuntimeWarning, + message=".*nasion not found", + module="mne", + ) raw.set_montage(montage) - for ch in raw.info['chs']: - ch['coord_frame'] = MNE_STR_TO_FRAME[montage_coord_frame] - for d in raw.info['dig']: - d['coord_frame'] = MNE_STR_TO_FRAME[montage_coord_frame] + for ch in raw.info["chs"]: + ch["coord_frame"] = MNE_STR_TO_FRAME[montage_coord_frame] + for d in raw.info["dig"]: + d["coord_frame"] = MNE_STR_TO_FRAME[montage_coord_frame] with raw.info._unlock(): # add back fiducials - raw.info['dig'] = fids + raw.info['dig'] + raw.info["dig"] = fids + raw.info["dig"] # get the accepted mne-python coordinate frames - coord_frame_int = int(montage.dig[0]['coord_frame']) + coord_frame_int = int(montage.dig[0]["coord_frame"]) mne_coord_frame = MNE_FRAME_TO_STR.get(coord_frame_int, None) coord_frame = MNE_TO_BIDS_FRAMES.get(mne_coord_frame, None) - if coord_frame == 'CapTrak' and bids_path.datatype in ('eeg', 'nirs'): + if coord_frame == "CapTrak" and bids_path.datatype in ("eeg", "nirs"): pos = raw.get_montage().get_positions() - if any([pos[fid_key] is None for fid_key in ('nasion', 'lpa', 'rpa')]): - raise RuntimeError("'head' coordinate frame must contain nasion " - "and left and right pre-auricular point " - "landmarks") - - if bids_path.datatype == 'ieeg' and bids_path.space in (None, 'ACPC') and \ - mne_coord_frame == 'ras': + if any([pos[fid_key] is None for fid_key in ("nasion", "lpa", "rpa")]): + raise RuntimeError( + "'head' coordinate frame must contain nasion " + "and left and right pre-auricular point " + "landmarks" + ) + + if ( + bids_path.datatype == "ieeg" + and bids_path.space in (None, "ACPC") + and mne_coord_frame == "ras" + ): if not acpc_aligned: raise RuntimeError( - '`acpc_aligned` is False, if your T1 is not aligned ' - 'to ACPC and the coordinates are in fact in ACPC ' - 'space there will be no way to relate the coordinates ' - 'to the T1. If the T1 is ACPC-aligned, use ' - '`acpc_aligned=True`') - coord_frame = 'ACPC' + "`acpc_aligned` is False, if your T1 is not aligned " + "to ACPC and the coordinates are in fact in ACPC " + "space there will be no way to relate the coordinates " + "to the T1. If the T1 is ACPC-aligned, use " + "`acpc_aligned=True`" + ) + coord_frame = "ACPC" if bids_path.space is None: # no space, use MNE coord frame if coord_frame is None: # if no MNE coord frame, skip - warn("Coordinate frame could not be inferred from the raw object " - "and the BIDSPath.space was none, skipping the writing of " - "channel positions") + warn( + "Coordinate frame could not be inferred from the raw object " + "and the BIDSPath.space was none, skipping the writing of " + "channel positions" + ) return else: # either a space and an MNE coord frame or just a space if coord_frame is None: # just a space, use that coord_frame = bids_path.space else: # space and raw have coordinate frame, check match if bids_path.space != coord_frame and not ( - coord_frame == 'fsaverage' and - bids_path.space == 'MNI305'): # fsaverage == MNI305 - raise ValueError('Coordinates in the raw object or montage ' - f'are in the {coord_frame} coordinate ' - 'frame but BIDSPath.space is ' - f'{bids_path.space}') + coord_frame == "fsaverage" and bids_path.space == "MNI305" + ): # fsaverage == MNI305 + raise ValueError( + "Coordinates in the raw object or montage " + f"are in the {coord_frame} coordinate " + "frame but BIDSPath.space is " + f"{bids_path.space}" + ) # create electrodes/coordsystem files using a subset of entities # that are specified for these files in the specification coord_file_entities = { - 'root': bids_path.root, - 'datatype': bids_path.datatype, - 'subject': bids_path.subject, - 'session': bids_path.session, - 'acquisition': bids_path.acquisition, - 'space': None if bids_path.datatype == 'nirs' else coord_frame + "root": bids_path.root, + "datatype": bids_path.datatype, + "subject": bids_path.subject, + "session": bids_path.session, + "acquisition": bids_path.acquisition, + "space": None if bids_path.datatype == "nirs" else coord_frame, } - channels_suffix = \ - 'optodes' if bids_path.datatype == 'nirs' else 'electrodes' - _channels_fun = _write_optodes_tsv if bids_path.datatype == 'nirs' else \ - _write_electrodes_tsv - channels_path = BIDSPath(**coord_file_entities, suffix=channels_suffix, - extension='.tsv') - coordsystem_path = BIDSPath(**coord_file_entities, suffix='coordsystem', - extension='.json') + channels_suffix = "optodes" if bids_path.datatype == "nirs" else "electrodes" + _channels_fun = ( + _write_optodes_tsv if bids_path.datatype == "nirs" else _write_electrodes_tsv + ) + channels_path = BIDSPath( + **coord_file_entities, suffix=channels_suffix, extension=".tsv" + ) + coordsystem_path = BIDSPath( + **coord_file_entities, suffix="coordsystem", extension=".json" + ) # Now write the data to the elec coords and the coordsystem _channels_fun(raw, channels_path, bids_path.datatype, overwrite) - _write_coordsystem_json(raw=raw, unit=unit, hpi_coord_system='n/a', - sensor_coord_system=coord_frame, - fname=coordsystem_path, - datatype=bids_path.datatype, - overwrite=overwrite) - - -def _read_dig_bids(electrodes_fpath, coordsystem_fpath, - datatype, raw): + _write_coordsystem_json( + raw=raw, + unit=unit, + hpi_coord_system="n/a", + sensor_coord_system=coord_frame, + fname=coordsystem_path, + datatype=bids_path.datatype, + overwrite=overwrite, + ) + + +def _read_dig_bids(electrodes_fpath, coordsystem_fpath, datatype, raw): """Read MNE-Python formatted DigMontage from BIDS files. Handles coordinatesystem.json and electrodes.tsv reading @@ -501,32 +546,40 @@ def _read_dig_bids(electrodes_fpath, coordsystem_fpath, will be set in place. """ bids_coord_frame, bids_coord_unit = _handle_coordsystem_reading( - coordsystem_fpath, datatype) + coordsystem_fpath, datatype + ) if bids_coord_frame not in ALLOWED_SPACES[datatype]: - warn(f'"{bids_coord_frame}" is not a BIDS-acceptable coordinate frame ' - f'for {datatype.upper()}. The supported coordinate frames are: ' - '{}'.format(ALLOWED_SPACES[datatype])) + warn( + f'"{bids_coord_frame}" is not a BIDS-acceptable coordinate frame ' + f"for {datatype.upper()}. The supported coordinate frames are: " + "{}".format(ALLOWED_SPACES[datatype]) + ) coord_frame = None elif bids_coord_frame in BIDS_TO_MNE_FRAMES: coord_frame = BIDS_TO_MNE_FRAMES.get(bids_coord_frame, None) else: - warn(f"{bids_coord_frame} is not an MNE-Python coordinate frame " - f"for {datatype.upper()} data and so will be set to 'unknown'") - coord_frame = 'unknown' + warn( + f"{bids_coord_frame} is not an MNE-Python coordinate frame " + f"for {datatype.upper()} data and so will be set to 'unknown'" + ) + coord_frame = "unknown" # check coordinate units if bids_coord_unit not in BIDS_COORDINATE_UNITS: - warn(f"Coordinate unit is not an accepted BIDS unit for " - f"{electrodes_fpath}. Please specify to be one of " - f"{BIDS_COORDINATE_UNITS}. Skipping electrodes.tsv reading...") + warn( + f"Coordinate unit is not an accepted BIDS unit for " + f"{electrodes_fpath}. Please specify to be one of " + f"{BIDS_COORDINATE_UNITS}. Skipping electrodes.tsv reading..." + ) coord_frame = None # montage is interpretable only if coordinate frame was properly parsed if coord_frame is not None: # read in electrode coordinates as a DigMontage object - montage = _handle_electrodes_reading(electrodes_fpath, coord_frame, - bids_coord_unit) + montage = _handle_electrodes_reading( + electrodes_fpath, coord_frame, bids_coord_unit + ) else: montage = None @@ -535,32 +588,37 @@ def _read_dig_bids(electrodes_fpath, coordsystem_fpath, ch_pos = montage._get_ch_pos() nan_chs = [] for ch_name, ch_coord in ch_pos.items(): - if any(np.isnan(ch_coord)) and ch_name not in raw.info['bads']: + if any(np.isnan(ch_coord)) and ch_name not in raw.info["bads"]: nan_chs.append(ch_name) if len(nan_chs) > 0: - warn(f"There are channels without locations " - f"(n/a) that are not marked as bad: {nan_chs}") + warn( + f"There are channels without locations " + f"(n/a) that are not marked as bad: {nan_chs}" + ) # add montage to Raw object # XXX: Starting with mne 0.24, this will raise a RuntimeWarning # if channel types are included outside of # (EEG/sEEG/ECoG/DBS/fNIRS). Probably needs a fix in the future. with warnings.catch_warnings(): - warnings.filterwarnings(action='ignore', category=RuntimeWarning, - message='.*nasion not found', module='mne') - raw.set_montage(montage, on_missing='warn') + warnings.filterwarnings( + action="ignore", + category=RuntimeWarning, + message=".*nasion not found", + module="mne", + ) + raw.set_montage(montage, on_missing="warn") # put back in unknown for unknown coordinate frame - if coord_frame == 'unknown': - for ch in raw.info['chs']: - ch['coord_frame'] = MNE_STR_TO_FRAME['unknown'] - for d in raw.info['dig']: - d['coord_frame'] = MNE_STR_TO_FRAME['unknown'] + if coord_frame == "unknown": + for ch in raw.info["chs"]: + ch["coord_frame"] = MNE_STR_TO_FRAME["unknown"] + for d in raw.info["dig"]: + d["coord_frame"] = MNE_STR_TO_FRAME["unknown"] @verbose -def template_to_head(info, space, coord_frame='auto', unit='auto', - verbose=None): +def template_to_head(info, space, coord_frame="auto", unit="auto", verbose=None): """Transform a BIDS standard template montage to the head coordinate frame. Parameters @@ -601,58 +659,53 @@ def template_to_head(info, space, coord_frame='auto', unit='auto', """ _validate_type(info, mne.io.Info) - _check_option('space', space, BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS) - _check_option('coord_frame', coord_frame, - ('auto', 'mri', 'mri_voxel', 'ras')) - _check_option('unit', unit, ('auto', 'm', 'mm')) + _check_option("space", space, BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS) + _check_option("coord_frame", coord_frame, ("auto", "mri", "mri_voxel", "ras")) + _check_option("unit", unit, ("auto", "m", "mm")) montage = info.get_montage() if montage is None: - raise RuntimeError('No montage found in the `raw` object') + raise RuntimeError("No montage found in the `raw` object") montage.remove_fiducials() # we will add fiducials so remove any pos = montage.get_positions() - if pos['coord_frame'] not in ('mni_tal', 'unknown'): + if pos["coord_frame"] not in ("mni_tal", "unknown"): raise RuntimeError( "Montage coordinate frame '{}' not expected for a template " - "montage, should be 'unknown' or 'mni_tal'".format( - pos['coord_frame'])) - locs = np.array(list(pos['ch_pos'].values())) + "montage, should be 'unknown' or 'mni_tal'".format(pos["coord_frame"]) + ) + locs = np.array(list(pos["ch_pos"].values())) locs = locs[~np.any(np.isnan(locs), axis=1)] # only channels with loc if locs.size == 0: - raise RuntimeError('No channel locations found in the montage') - if unit == 'auto': - unit = 'm' if abs(locs - locs.mean(axis=0)).max() < 1 else 'mm' - if coord_frame == 'auto': - coord_frame = 'mri_voxel' if locs.min() >= 0 else 'ras' + raise RuntimeError("No channel locations found in the montage") + if unit == "auto": + unit = "m" if abs(locs - locs.mean(axis=0)).max() < 1 else "mm" + if coord_frame == "auto": + coord_frame = "mri_voxel" if locs.min() >= 0 else "ras" # transform montage to head # set to the right coordinate frame as specified by the user for d in montage.dig: # ensure same coordinate frame - d['coord_frame'] = MNE_STR_TO_FRAME[coord_frame] + d["coord_frame"] = MNE_STR_TO_FRAME[coord_frame] # do the transforms, first ras -> vox if needed - if montage.get_positions()['coord_frame'] == 'ras': - ras_vox_trans = mne.read_trans( - data_dir / f'space-{space}_ras-vox_trans.fif') - if unit == 'm': # must be in mm here + if montage.get_positions()["coord_frame"] == "ras": + ras_vox_trans = mne.read_trans(data_dir / f"space-{space}_ras-vox_trans.fif") + if unit == "m": # must be in mm here for d in montage.dig: - d['r'] *= 1000 + d["r"] *= 1000 montage.apply_trans(ras_vox_trans) - if montage.get_positions()['coord_frame'] == 'mri_voxel': - vox_mri_trans = mne.read_trans( - data_dir / f'space-{space}_vox-mri_trans.fif') + if montage.get_positions()["coord_frame"] == "mri_voxel": + vox_mri_trans = mne.read_trans(data_dir / f"space-{space}_vox-mri_trans.fif") montage.apply_trans(vox_mri_trans) - assert montage.get_positions()['coord_frame'] == 'mri' - if not (unit == 'm' and coord_frame == 'mri'): # if so, already in m + assert montage.get_positions()["coord_frame"] == "mri" + if not (unit == "m" and coord_frame == "mri"): # if so, already in m for d in montage.dig: - d['r'] /= 1000 # mm -> m + d["r"] /= 1000 # mm -> m # now add fiducials (in mri coordinates) - fids = mne.io.read_fiducials( - data_dir / f'space-{space}_fiducials.fif' - )[0] + fids = mne.io.read_fiducials(data_dir / f"space-{space}_fiducials.fif")[0] montage.dig = fids + montage.dig # add fiducials for fid in fids: # ensure also in mri - fid['coord_frame'] = MNE_STR_TO_FRAME['mri'] + fid["coord_frame"] = MNE_STR_TO_FRAME["mri"] info.set_montage(montage) # transform to head # finally return montage - return info, mne.read_trans(data_dir / f'space-{space}_trans.fif') + return info, mne.read_trans(data_dir / f"space-{space}_trans.fif") @verbose @@ -667,29 +720,31 @@ def convert_montage_to_ras(montage, subject, subjects_dir=None, verbose=None): %(subjects_dir)s %(verbose)s """ - nib = _import_nibabel('converting a montage to RAS') + nib = _import_nibabel("converting a montage to RAS") subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) - T1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz') + T1_fname = op.join(subjects_dir, subject, "mri", "T1.mgz") if not op.isfile(T1_fname): - raise RuntimeError(f'Freesurfer subject ({subject}) and/or ' - f'subjects_dir ({subjects_dir}, incorrectly ' - 'formatted, T1.mgz not found') + raise RuntimeError( + f"Freesurfer subject ({subject}) and/or " + f"subjects_dir ({subjects_dir}, incorrectly " + "formatted, T1.mgz not found" + ) T1 = nib.load(T1_fname) # transform from "mri" (Freesurfer surface RAS) to "ras" (scanner RAS) mri_vox_t = np.linalg.inv(T1.header.get_vox2ras_tkr()) mri_vox_t[:3, :3] *= 1000 # scale from mm to m - mri_vox_trans = mne.transforms.Transform( - fro='mri', to='mri_voxel', trans=mri_vox_t) + mri_vox_trans = mne.transforms.Transform(fro="mri", to="mri_voxel", trans=mri_vox_t) vox_ras_t = T1.header.get_vox2ras() vox_ras_t[:3] /= 1000 # scale from mm to m - vox_ras_trans = mne.transforms.Transform( - fro='mri_voxel', to='ras', trans=vox_ras_t) + vox_ras_trans = mne.transforms.Transform(fro="mri_voxel", to="ras", trans=vox_ras_t) montage.apply_trans( # mri->vox + vox->ras = mri->ras - mne.transforms.combine_transforms(mri_vox_trans, vox_ras_trans, - fro='mri', to='ras')) + mne.transforms.combine_transforms( + mri_vox_trans, vox_ras_trans, fro="mri", to="ras" + ) + ) @verbose @@ -710,26 +765,28 @@ def convert_montage_to_mri(montage, subject, subjects_dir=None, verbose=None): The transformation matrix from ``'ras'`` (``scanner RAS``) to ``'mri'`` (``surface RAS``). """ - nib = _import_nibabel('converting a montage to MRI') + nib = _import_nibabel("converting a montage to MRI") subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) - T1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz') + T1_fname = op.join(subjects_dir, subject, "mri", "T1.mgz") if not op.isfile(T1_fname): - raise RuntimeError(f'Freesurfer subject ({subject}) and/or ' - f'subjects_dir ({subjects_dir}, incorrectly ' - 'formatted, T1.mgz not found') + raise RuntimeError( + f"Freesurfer subject ({subject}) and/or " + f"subjects_dir ({subjects_dir}, incorrectly " + "formatted, T1.mgz not found" + ) T1 = nib.load(T1_fname) # transform from "ras" (scanner RAS) to "mri" (Freesurfer surface RAS) ras_vox_t = T1.header.get_ras2vox() ras_vox_t[:3, :3] *= 1000 # scale from mm to m - ras_vox_trans = mne.transforms.Transform( - fro='ras', to='mri_voxel', trans=ras_vox_t) + ras_vox_trans = mne.transforms.Transform(fro="ras", to="mri_voxel", trans=ras_vox_t) vox_mri_t = T1.header.get_vox2ras_tkr() vox_mri_t[:3] /= 1000 # scale from mm to m - vox_mri_trans = mne.transforms.Transform( - fro='mri_voxel', to='mri', trans=vox_mri_t) + vox_mri_trans = mne.transforms.Transform(fro="mri_voxel", to="mri", trans=vox_mri_t) montage.apply_trans( # ras->vox + vox->mri = ras->mri - mne.transforms.combine_transforms(ras_vox_trans, vox_mri_trans, - fro='ras', to='mri')) + mne.transforms.combine_transforms( + ras_vox_trans, vox_mri_trans, fro="ras", to="mri" + ) + ) diff --git a/mne_bids/inspect.py b/mne_bids/inspect.py index 6ef2a8d9c..4559d3634 100644 --- a/mne_bids/inspect.py +++ b/mne_bids/inspect.py @@ -14,11 +14,13 @@ from mne.fixes import _compare_version from mne.viz import use_browser_backend -if _compare_version(mne.__version__, '<', '1.0.dev0'): # pragma: no cover +if _compare_version(mne.__version__, "<", "1.0.dev0"): # pragma: no cover from mne.preprocessing import annotate_flat + _annotate_flat_func = annotate_flat else: from mne.preprocessing import annotate_amplitude + _annotate_flat_func = annotate_amplitude from mne_bids import read_raw_bids, mark_channels @@ -28,8 +30,14 @@ @verbose -def inspect_dataset(bids_path, find_flat=True, l_freq=None, h_freq=None, - show_annotations=True, verbose=None): +def inspect_dataset( + bids_path, + find_flat=True, + l_freq=None, + h_freq=None, + show_annotations=True, + verbose=None, +): """Inspect and annotate BIDS raw data. This function allows you to browse MEG, EEG, and iEEG raw data stored in a @@ -101,24 +109,31 @@ def inspect_dataset(bids_path, find_flat=True, l_freq=None, h_freq=None, >>> inspect_dataset(bids_path=bids_path, find_flat=False, # doctest: +SKIP ... l_freq=1, h_freq=30) """ - allowed_extensions = set(ALLOWED_DATATYPE_EXTENSIONS['meg'] + - ALLOWED_DATATYPE_EXTENSIONS['eeg'] + - ALLOWED_DATATYPE_EXTENSIONS['ieeg']) - - bids_paths = [p for p in bids_path.match(check=True) - if (p.extension is None or - p.extension in allowed_extensions) and - p.acquisition != 'crosstalk'] + allowed_extensions = set( + ALLOWED_DATATYPE_EXTENSIONS["meg"] + + ALLOWED_DATATYPE_EXTENSIONS["eeg"] + + ALLOWED_DATATYPE_EXTENSIONS["ieeg"] + ) + + bids_paths = [ + p + for p in bids_path.match(check=True) + if (p.extension is None or p.extension in allowed_extensions) + and p.acquisition != "crosstalk" + ] for bids_path_ in bids_paths: - _inspect_raw(bids_path=bids_path_, l_freq=l_freq, h_freq=h_freq, - find_flat=find_flat, show_annotations=show_annotations) + _inspect_raw( + bids_path=bids_path_, + l_freq=l_freq, + h_freq=h_freq, + find_flat=find_flat, + show_annotations=show_annotations, + ) # XXX This this should probably be refactored into a class attribute someday. -_global_vars = dict(raw_fig=None, - dialog_fig=None, - mne_close_key=None) +_global_vars = dict(raw_fig=None, dialog_fig=None, mne_close_key=None) def _inspect_raw(*, bids_path, l_freq, h_freq, find_flat, show_annotations): @@ -128,41 +143,38 @@ def _inspect_raw(*, bids_path, l_freq, h_freq, find_flat, show_annotations): import matplotlib.pyplot as plt extra_params = dict() - if bids_path.extension == '.fif': - extra_params['allow_maxshield'] = 'yes' - raw = read_raw_bids(bids_path, extra_params=extra_params, verbose='error') - old_bads = raw.info['bads'].copy() + if bids_path.extension == ".fif": + extra_params["allow_maxshield"] = "yes" + raw = read_raw_bids(bids_path, extra_params=extra_params, verbose="error") + old_bads = raw.info["bads"].copy() old_annotations = raw.annotations.copy() if find_flat: raw.load_data() # Speeds up processing dramatically - if _annotate_flat_func.__name__ == 'annotate_amplitude': + if _annotate_flat_func.__name__ == "annotate_amplitude": flat_annot, flat_chans = annotate_amplitude( - raw=raw, - flat=0, - min_duration=0.05, - bad_percent=5 + raw=raw, flat=0, min_duration=0.05, bad_percent=5 ) else: # pragma: no cover - flat_annot, flat_chans = annotate_flat( - raw=raw, - min_duration=0.05 - ) + flat_annot, flat_chans = annotate_flat(raw=raw, min_duration=0.05) new_annot = raw.annotations + flat_annot raw.set_annotations(new_annot) - raw.info['bads'] = list(set(raw.info['bads'] + flat_chans)) + raw.info["bads"] = list(set(raw.info["bads"] + flat_chans)) del new_annot, flat_annot else: flat_chans = [] - show_options = bids_path.datatype == 'meg' + show_options = bids_path.datatype == "meg" - with use_browser_backend('matplotlib'): + with use_browser_backend("matplotlib"): fig = raw.plot( - title=f'{bids_path.root.name}: {bids_path.basename}', - highpass=l_freq, lowpass=h_freq, + title=f"{bids_path.root.name}: {bids_path.basename}", + highpass=l_freq, + lowpass=h_freq, show_options=show_options, - block=False, show=False, verbose='warning' + block=False, + show=False, + verbose="warning", ) # Add our own event handlers so that when the MNE Raw Browser is being @@ -171,29 +183,33 @@ def _handle_close(event): mne_raw_fig = event.canvas.figure # Bads alterations are only transferred to `inst` once the figure is # closed; Annotation changes are immediately reflected in `inst` - new_bads = mne_raw_fig.mne.info['bads'].copy() + new_bads = mne_raw_fig.mne.info["bads"].copy() new_annotations = mne_raw_fig.mne.inst.annotations.copy() if not new_annotations: # Ensure it's not an empty list, but an empty set of Annotations. new_annotations = mne.Annotations( - onset=[], duration=[], description=[], - orig_time=mne_raw_fig.mne.info['meas_date'] + onset=[], + duration=[], + description=[], + orig_time=mne_raw_fig.mne.info["meas_date"], ) - _save_raw_if_changed(old_bads=old_bads, - new_bads=new_bads, - flat_chans=flat_chans, - old_annotations=old_annotations, - new_annotations=new_annotations, - bids_path=bids_path) - _global_vars['raw_fig'] = None + _save_raw_if_changed( + old_bads=old_bads, + new_bads=new_bads, + flat_chans=flat_chans, + old_annotations=old_annotations, + new_annotations=new_annotations, + bids_path=bids_path, + ) + _global_vars["raw_fig"] = None def _keypress_callback(event): - if event.key == _global_vars['mne_close_key']: + if event.key == _global_vars["mne_close_key"]: _handle_close(event) - fig.canvas.mpl_connect('close_event', _handle_close) - fig.canvas.mpl_connect('key_press_event', _keypress_callback) + fig.canvas.mpl_connect("close_event", _handle_close) + fig.canvas.mpl_connect("key_press_event", _keypress_callback) if not show_annotations: # Remove annotations and kill `_toggle_annotation_fig` method, since @@ -203,27 +219,24 @@ def _keypress_callback(event): fig._toggle_annotation_fig = lambda: None # Ensure it's not an empty list, but an empty set of Annotations. old_annotations = mne.Annotations( - onset=[], duration=[], description=[], - orig_time=raw.info['meas_date'] + onset=[], duration=[], description=[], orig_time=raw.info["meas_date"] ) - if matplotlib.get_backend() != 'agg': + if matplotlib.get_backend() != "agg": plt.show(block=True) - _global_vars['raw_fig'] = fig - _global_vars['mne_close_key'] = fig.mne.close_key + _global_vars["raw_fig"] = fig + _global_vars["mne_close_key"] = fig.mne.close_key def _annotations_almost_equal(old_annotations, new_annotations): """Allow for a tiny bit of floating point precision loss.""" - if (np.array_equal(old_annotations.description, - new_annotations.description) and - np.array_equal(old_annotations.orig_time, - new_annotations.orig_time) and - np.allclose(old_annotations.onset, - new_annotations.onset) and - np.allclose(old_annotations.duration, - new_annotations.duration)): + if ( + np.array_equal(old_annotations.description, new_annotations.description) + and np.array_equal(old_annotations.orig_time, new_annotations.orig_time) + and np.allclose(old_annotations.onset, new_annotations.onset) + and np.allclose(old_annotations.duration, new_annotations.duration) + ): return True else: return False @@ -233,34 +246,40 @@ def _save_annotations(*, annotations, bids_path): # Attach the new Annotations to our raw data so we can easily convert them # to events, which will be stored in the *_events.tsv sidecar. extra_params = dict() - if bids_path.extension == '.fif': - extra_params['allow_maxshield'] = 'yes' + if bids_path.extension == ".fif": + extra_params["allow_maxshield"] = "yes" - raw = read_raw_bids(bids_path=bids_path, extra_params=extra_params, - verbose='warning') + raw = read_raw_bids( + bids_path=bids_path, extra_params=extra_params, verbose="warning" + ) raw.set_annotations(annotations) - events, durs, descrs = _read_events(events=None, event_id=None, - bids_path=bids_path, raw=raw) + events, durs, descrs = _read_events( + events=None, event_id=None, bids_path=bids_path, raw=raw + ) # Write sidecar – or remove it if no events are left. - events_tsv_fname = (bids_path.copy() - .update(suffix='events', - extension='.tsv') - .fpath) + events_tsv_fname = bids_path.copy().update(suffix="events", extension=".tsv").fpath if len(events) > 0: - _events_tsv(events=events, durations=durs, raw=raw, - fname=events_tsv_fname, trial_type=descrs, - overwrite=True) + _events_tsv( + events=events, + durations=durs, + raw=raw, + fname=events_tsv_fname, + trial_type=descrs, + overwrite=True, + ) elif events_tsv_fname.exists(): - logger.info(f'No events remaining after interactive inspection, ' - f'removing {events_tsv_fname.name}') + logger.info( + f"No events remaining after interactive inspection, " + f"removing {events_tsv_fname.name}" + ) events_tsv_fname.unlink() -def _save_raw_if_changed(*, old_bads, new_bads, flat_chans, - old_annotations, new_annotations, - bids_path): +def _save_raw_if_changed( + *, old_bads, new_bads, flat_chans, old_annotations, new_annotations, bids_path +): """Save bad channel selection if it has been changed. Parameters @@ -288,27 +307,30 @@ def _save_raw_if_changed(*, old_bads, new_bads, flat_chans, bad_descriptions = [] # Generate entries for the `status_description` column. - channels_tsv_fname = (bids_path.copy() - .update(suffix='channels', extension='.tsv') - .fpath) + channels_tsv_fname = ( + bids_path.copy().update(suffix="channels", extension=".tsv").fpath + ) channels_tsv_data = _from_tsv(channels_tsv_fname) for ch_name in bads: - idx = channels_tsv_data['name'].index(ch_name) - if channels_tsv_data['status'][idx] == 'bad': + idx = channels_tsv_data["name"].index(ch_name) + if channels_tsv_data["status"][idx] == "bad": # Channel was already marked as bad in the data, so retain # existing description. - description = channels_tsv_data['status_description'][idx] + description = channels_tsv_data["status_description"][idx] elif ch_name in flat_chans: - description = 'Flat channel, auto-detected via MNE-BIDS' + description = "Flat channel, auto-detected via MNE-BIDS" else: # Channel has been manually marked as bad during inspection - description = 'Interactive inspection via MNE-BIDS' + description = "Interactive inspection via MNE-BIDS" bad_descriptions.append(description) del ch_name, description - del channels_tsv_data, channels_tsv_fname, + del ( + channels_tsv_data, + channels_tsv_fname, + ) if _annotations_almost_equal(old_annotations, new_annotations): annotations = None @@ -319,10 +341,12 @@ def _save_raw_if_changed(*, old_bads, new_bads, flat_chans, # Nothing has changed, so we can just exit. return None - return _save_raw_dialog_box(bads=bads, - bad_descriptions=bad_descriptions, - annotations=annotations, - bids_path=bids_path) + return _save_raw_dialog_box( + bads=bads, + bad_descriptions=bad_descriptions, + annotations=annotations, + bids_path=bids_path, + ) def _save_raw_dialog_box(*, bads, bad_descriptions, annotations, bids_path): @@ -333,54 +357,62 @@ def _save_raw_dialog_box(*, bads, bad_descriptions, annotations, bids_path): from matplotlib.widgets import Button from mne.viz.utils import figure_nobar - title = 'Save changes?' - message = 'You have modified ' + title = "Save changes?" + message = "You have modified " if bads is not None and annotations is None: - message += 'the bad channel selection ' + message += "the bad channel selection " figsize = (7.5, 2.5) elif bads is None and annotations is not None: - message += 'the bad segments selection ' + message += "the bad segments selection " figsize = (7.5, 2.5) else: - message += 'the bad channel and\nannotations selection ' + message += "the bad channel and\nannotations selection " figsize = (8.5, 3) - message += (f'of\n' - f'{bids_path.basename}.\n\n' - f'Would you like to save these changes to the\n' - f'BIDS dataset?') - icon_fname = str(Path(__file__).parent / 'assets' / 'help-128px.png') + message += ( + f"of\n" + f"{bids_path.basename}.\n\n" + f"Would you like to save these changes to the\n" + f"BIDS dataset?" + ) + icon_fname = str(Path(__file__).parent / "assets" / "help-128px.png") icon = plt.imread(icon_fname) fig = figure_nobar(figsize=figsize) - fig.canvas.manager.set_window_title('MNE-BIDS Inspector') - fig.suptitle(title, y=0.95, fontsize='xx-large', fontweight='bold') + fig.canvas.manager.set_window_title("MNE-BIDS Inspector") + fig.suptitle(title, y=0.95, fontsize="xx-large", fontweight="bold") gs = fig.add_gridspec(1, 2, width_ratios=(1.5, 5)) # The dialog box tet. ax_msg = fig.add_subplot(gs[0, 1]) - ax_msg.text(x=0, y=0.8, s=message, fontsize='large', - verticalalignment='top', horizontalalignment='left', - multialignment='left') - ax_msg.axis('off') + ax_msg.text( + x=0, + y=0.8, + s=message, + fontsize="large", + verticalalignment="top", + horizontalalignment="left", + multialignment="left", + ) + ax_msg.axis("off") # The help icon. ax_icon = fig.add_subplot(gs[0, 0]) ax_icon.imshow(icon) - ax_icon.axis('off') + ax_icon.axis("off") # Buttons. ax_save = fig.add_axes([0.6, 0.05, 0.3, 0.1]) ax_dont_save = fig.add_axes([0.1, 0.05, 0.3, 0.1]) - save_button = Button(ax=ax_save, label='Save') - save_button.label.set_fontsize('medium') - save_button.label.set_fontweight('bold') + save_button = Button(ax=ax_save, label="Save") + save_button.label.set_fontsize("medium") + save_button.label.set_fontweight("bold") dont_save_button = Button(ax=ax_dont_save, label="Don't save") - dont_save_button.label.set_fontsize('medium') - dont_save_button.label.set_fontweight('bold') + dont_save_button.label.set_fontsize("medium") + dont_save_button.label.set_fontweight("bold") # Store references to keep buttons alive. fig.save_button = save_button @@ -389,34 +421,33 @@ def _save_raw_dialog_box(*, bads, bad_descriptions, annotations, bids_path): # Define callback functions. def _save_callback(event): plt.close(event.canvas.figure) # Close dialog - _global_vars['dialog_fig'] = None + _global_vars["dialog_fig"] = None if bads is not None: - _save_bads(bads=bads, descriptions=bad_descriptions, - bids_path=bids_path) + _save_bads(bads=bads, descriptions=bad_descriptions, bids_path=bids_path) if annotations is not None: _save_annotations(annotations=annotations, bids_path=bids_path) def _dont_save_callback(event): plt.close(event.canvas.figure) # Close dialog - _global_vars['dialog_fig'] = None + _global_vars["dialog_fig"] = None def _keypress_callback(event): - if event.key in ['enter', 'return']: + if event.key in ["enter", "return"]: _save_callback(event) - elif event.key == _global_vars['mne_close_key']: + elif event.key == _global_vars["mne_close_key"]: _dont_save_callback(event) # Connect events to callback functions. save_button.on_clicked(_save_callback) dont_save_button.on_clicked(_dont_save_callback) - fig.canvas.mpl_connect('close_event', _dont_save_callback) - fig.canvas.mpl_connect('key_press_event', _keypress_callback) + fig.canvas.mpl_connect("close_event", _dont_save_callback) + fig.canvas.mpl_connect("key_press_event", _keypress_callback) - if matplotlib.get_backend() != 'agg': + if matplotlib.get_backend() != "agg": fig.show() - _global_vars['dialog_fig'] = fig + _global_vars["dialog_fig"] = fig def _save_bads(*, bads, descriptions, bids_path): @@ -430,6 +461,7 @@ def _save_bads(*, bads, descriptions, bids_path): The values to be written to the `status_description` column. """ # We first make all channels not passed as bad here to be marked as good. - mark_channels(bids_path=bids_path, ch_names=[], status='good') - mark_channels(bids_path=bids_path, ch_names=bads, status='bad', - descriptions=descriptions) + mark_channels(bids_path=bids_path, ch_names=[], status="good") + mark_channels( + bids_path=bids_path, ch_names=bads, status="bad", descriptions=descriptions + ) diff --git a/mne_bids/path.py b/mne_bids/path.py index 890ded288..ac0b433de 100644 --- a/mne_bids/path.py +++ b/mne_bids/path.py @@ -20,13 +20,23 @@ from mne.utils import logger, _validate_type, verbose, _check_fname from mne_bids.config import ( - ALLOWED_PATH_ENTITIES, ALLOWED_FILENAME_EXTENSIONS, - ALLOWED_FILENAME_SUFFIX, ALLOWED_PATH_ENTITIES_SHORT, - ALLOWED_DATATYPES, ALLOWED_DATATYPE_EXTENSIONS, + ALLOWED_PATH_ENTITIES, + ALLOWED_FILENAME_EXTENSIONS, + ALLOWED_FILENAME_SUFFIX, + ALLOWED_PATH_ENTITIES_SHORT, + ALLOWED_DATATYPES, + ALLOWED_DATATYPE_EXTENSIONS, ALLOWED_SPACES, - reader, ENTITY_VALUE_TYPE) -from mne_bids.utils import (_check_key_val, _check_empty_room_basename, - param_regex, _ensure_tuple, warn) + reader, + ENTITY_VALUE_TYPE, +) +from mne_bids.utils import ( + _check_key_val, + _check_empty_room_basename, + param_regex, + _ensure_tuple, + warn, +) def _find_empty_room_candidates(bids_path): @@ -34,23 +44,28 @@ def _find_empty_room_candidates(bids_path): # Check whether we have a BIDS root. bids_root = bids_path.root if bids_root is None: - raise ValueError('The root of the "bids_path" must be set. ' - 'Please use `bids_path.update(root="")` ' - 'to set the root of the BIDS folder to read.') + raise ValueError( + 'The root of the "bids_path" must be set. ' + 'Please use `bids_path.update(root="")` ' + "to set the root of the BIDS folder to read." + ) bids_path = bids_path.copy() - datatype = 'meg' # We're only concerned about MEG data here + datatype = "meg" # We're only concerned about MEG data here bids_fname = bids_path.update(suffix=datatype).fpath _, ext = _parse_ext(bids_fname) - emptyroom_dir = BIDSPath(root=bids_root, subject='emptyroom').directory + emptyroom_dir = BIDSPath(root=bids_root, subject="emptyroom").directory if not emptyroom_dir.exists(): return list() # Find the empty-room recording sessions. - emptyroom_session_dirs = [x for x in emptyroom_dir.iterdir() - if x.is_dir() and str(x.name).startswith('ses-')] + emptyroom_session_dirs = [ + x + for x in emptyroom_dir.iterdir() + if x.is_dir() and str(x.name).startswith("ses-") + ] if not emptyroom_session_dirs: # No session sub-directories found emptyroom_session_dirs = [emptyroom_dir] @@ -59,25 +74,27 @@ def _find_empty_room_candidates(bids_path): allowed_extensions = list(reader.keys()) # `.pdf` is just a "virtual" extension for BTi data (which is stored inside # a dedicated directory that doesn't have an extension) - del allowed_extensions[allowed_extensions.index('.pdf')] + del allowed_extensions[allowed_extensions.index(".pdf")] candidate_er_fnames = [] for session_dir in emptyroom_session_dirs: - dir_contents = glob.glob(op.join(session_dir, datatype, - f'sub-emptyroom_*_{datatype}*')) + dir_contents = glob.glob( + op.join(session_dir, datatype, f"sub-emptyroom_*_{datatype}*") + ) for item in dir_contents: item = Path(item) - if ((item.suffix in allowed_extensions) or - (not item.suffix and item.is_dir())): # Hopefully BTi? + if (item.suffix in allowed_extensions) or ( + not item.suffix and item.is_dir() + ): # Hopefully BTi? candidate_er_fnames.append(item.name) candidates = list() for er_fname in candidate_er_fnames: # get entities from filenamme er_bids_path = get_bids_path_from_fname(er_fname, check=False) - er_bids_path.subject = 'emptyroom' # er subject entity is different + er_bids_path.subject = "emptyroom" # er subject entity is different er_bids_path.root = bids_root - er_bids_path.datatype = 'meg' + er_bids_path.datatype = "meg" candidates.append(er_bids_path) return candidates @@ -85,6 +102,7 @@ def _find_empty_room_candidates(bids_path): def _find_matched_empty_room(bids_path): from mne_bids import read_raw_bids # avoid circular import. + candidates = _find_empty_room_candidates(bids_path) # Walk through recordings, trying to extract the recording date: @@ -93,12 +111,14 @@ def _find_matched_empty_room(bids_path): min_delta_t = np.inf date_tie = False failed_to_get_er_date_count = 0 - bids_path = bids_path.copy().update(datatype='meg') + bids_path = bids_path.copy().update(datatype="meg") raw = read_raw_bids(bids_path=bids_path) - if raw.info['meas_date'] is None: - raise ValueError('The provided recording does not have a measurement ' - 'date set. Cannot get matching empty-room file.') - ref_date = raw.info['meas_date'] + if raw.info["meas_date"] is None: + raise ValueError( + "The provided recording does not have a measurement " + "date set. Cannot get matching empty-room file." + ) + ref_date = raw.info["meas_date"] del bids_path, raw for er_bids_path in candidates: # get entities from filenamme @@ -107,8 +127,7 @@ def _find_matched_empty_room(bids_path): # Try to extract date from filename. if er_bids_path.session is not None: try: - er_meas_date = datetime.strptime( - er_bids_path.session, '%Y%m%d') + er_meas_date = datetime.strptime(er_bids_path.session, "%Y%m%d") except (ValueError, TypeError): # There is a session in the filename, but it doesn't encode a # valid date. @@ -117,13 +136,12 @@ def _find_matched_empty_room(bids_path): if er_meas_date is None: # No luck so far! Check info['meas_date'] _, ext = _parse_ext(er_bids_path.fpath) extra_params = None - if ext == '.fif': - extra_params = dict(allow_maxshield='yes') + if ext == ".fif": + extra_params = dict(allow_maxshield="yes") - er_raw = read_raw_bids(bids_path=er_bids_path, - extra_params=extra_params) + er_raw = read_raw_bids(bids_path=er_bids_path, extra_params=extra_params) - er_meas_date = er_raw.info['meas_date'] + er_meas_date = er_raw.info["meas_date"] if er_meas_date is None: # There's nothing we can do. failed_to_get_er_date_count += 1 continue @@ -139,13 +157,17 @@ def _find_matched_empty_room(bids_path): date_tie = False if failed_to_get_er_date_count > 0: - msg = (f'Could not retrieve the empty-room measurement date from ' - f'a total of {failed_to_get_er_date_count} recording(s).') + msg = ( + f"Could not retrieve the empty-room measurement date from " + f"a total of {failed_to_get_er_date_count} recording(s)." + ) warn(msg) if date_tie: - msg = ('Found more than one matching empty-room measurement with the ' - 'same recording date. Selecting the first match.') + msg = ( + "Found more than one matching empty-room measurement with the " + "same recording date. Selecting the first match." + ) warn(msg) return best_er_bids_path @@ -302,39 +324,76 @@ class BIDSPath(object): functional MRI paths. """ - def __init__(self, subject=None, session=None, - task=None, acquisition=None, run=None, processing=None, - recording=None, space=None, split=None, description=None, - root=None, suffix=None, extension=None, - datatype=None, check=True): - if all(ii is None for ii in [subject, session, task, - acquisition, run, processing, - recording, space, description, - root, suffix, extension]): + def __init__( + self, + subject=None, + session=None, + task=None, + acquisition=None, + run=None, + processing=None, + recording=None, + space=None, + split=None, + description=None, + root=None, + suffix=None, + extension=None, + datatype=None, + check=True, + ): + if all( + ii is None + for ii in [ + subject, + session, + task, + acquisition, + run, + processing, + recording, + space, + description, + root, + suffix, + extension, + ] + ): raise ValueError("At least one parameter must be given.") self.check = check - self.update(subject=subject, session=session, task=task, - acquisition=acquisition, run=run, processing=processing, - recording=recording, space=space, split=split, - description=description, root=root, datatype=datatype, - suffix=suffix, extension=extension) + self.update( + subject=subject, + session=session, + task=task, + acquisition=acquisition, + run=run, + processing=processing, + recording=recording, + space=space, + split=split, + description=description, + root=root, + datatype=datatype, + suffix=suffix, + extension=extension, + ) @property def entities(self): """Return dictionary of the BIDS entities.""" return { - 'subject': self.subject, - 'session': self.session, - 'task': self.task, - 'acquisition': self.acquisition, - 'run': self.run, - 'processing': self.processing, - 'space': self.space, - 'recording': self.recording, - 'split': self.split, - 'description': self.description, + "subject": self.subject, + "session": self.session, + "task": self.task, + "acquisition": self.acquisition, + "run": self.run, + "processing": self.processing, + "space": self.space, + "recording": self.recording, + "split": self.split, + "description": self.description, } @property @@ -342,22 +401,21 @@ def basename(self): """Path basename.""" basename = [] for key, val in self.entities.items(): - if val is not None and key != 'datatype': + if val is not None and key != "datatype": # convert certain keys to shorthand long_to_short_entity = { - val: key for key, val - in ALLOWED_PATH_ENTITIES_SHORT.items() + val: key for key, val in ALLOWED_PATH_ENTITIES_SHORT.items() } key = long_to_short_entity[key] - basename.append(f'{key}-{val}') + basename.append(f"{key}-{val}") if self.suffix is not None: if self.extension is not None: - basename.append(f'{self.suffix}{self.extension}') + basename.append(f"{self.suffix}{self.extension}") else: basename.append(self.suffix) - basename = '_'.join(basename) + basename = "_".join(basename) return basename @property @@ -378,11 +436,11 @@ def directory(self): """ # Create the data path based on the available entities: # root, subject, session, and datatype - data_path = '' if self.root is None else self.root + data_path = "" if self.root is None else self.root if self.subject is not None: - data_path = op.join(data_path, f'sub-{self.subject}') + data_path = op.join(data_path, f"sub-{self.subject}") if self.session is not None: - data_path = op.join(data_path, f'ses-{self.session}') + data_path = op.join(data_path, f"ses-{self.session}") # datatype will allow 'meg', 'eeg', 'ieeg', 'anat' if self.datatype is not None: data_path = op.join(data_path, self.datatype) @@ -522,10 +580,12 @@ def __repr__(self): """Representation in the style of `pathlib.Path`.""" root = self.root.as_posix() if self.root is not None else None - return f'{self.__class__.__name__}(\n' \ - f'root: {root}\n' \ - f'datatype: {self.datatype}\n' \ - f'basename: {self.basename})' + return ( + f"{self.__class__.__name__}(\n" + f"root: {root}\n" + f"datatype: {self.datatype}\n" + f"basename: {self.basename})" + ) def __fspath__(self): """Return the string representation for any fs functions.""" @@ -584,20 +644,19 @@ def fpath(self): # account for MEG data that are directory-based # else, all other file paths attempt to match - if self.suffix == 'meg' and self.extension == '.ds': + if self.suffix == "meg" and self.extension == ".ds": bids_fpath = op.join(data_path, self.basename) - elif self.suffix == 'meg' and self.extension == '.pdf': - bids_fpath = op.join(data_path, - op.splitext(self.basename)[0]) + elif self.suffix == "meg" and self.extension == ".pdf": + bids_fpath = op.join(data_path, op.splitext(self.basename)[0]) else: # if suffix and/or extension is missing, and root is # not None, then BIDSPath will infer the dataset # else, return the relative path with the basename - if (self.suffix is None or self.extension is None) and \ - self.root is not None: + if ( + self.suffix is None or self.extension is None + ) and self.root is not None: # get matching BIDSPaths inside the bids root - matching_paths = \ - _get_matching_bidspaths_from_filesystem(self) + matching_paths = _get_matching_bidspaths_from_filesystem(self) # FIXME This will break # FIXME e.g. with FIFF data split across multiple files. @@ -606,20 +665,19 @@ def fpath(self): if self.suffix is None or self.suffix in ALLOWED_DATATYPES: # now only use valid datatype extension if self.extension is None: - valid_exts = \ - sum(ALLOWED_DATATYPE_EXTENSIONS.values(), []) + valid_exts = sum(ALLOWED_DATATYPE_EXTENSIONS.values(), []) else: valid_exts = [self.extension] - matching_paths = [p for p in matching_paths - if _parse_ext(p)[1] in valid_exts] + matching_paths = [ + p for p in matching_paths if _parse_ext(p)[1] in valid_exts + ] - if (self.split is None and - (not matching_paths or - '_split-' in matching_paths[0])): + if self.split is None and ( + not matching_paths or "_split-" in matching_paths[0] + ): # try finding FIF split files (only first one) - this_self = self.copy().update(split='01') - matching_paths = \ - _get_matching_bidspaths_from_filesystem(this_self) + this_self = self.copy().update(split="01") + matching_paths = _get_matching_bidspaths_from_filesystem(this_self) # found no matching paths if not matching_paths: @@ -627,15 +685,17 @@ def fpath(self): # if paths still cannot be resolved, then there is an error elif len(matching_paths) > 1: matching_paths_str = "\n".join(sorted(matching_paths)) - msg = ('Found more than one matching data file for the ' - 'requested recording. While searching:\n' - f'{indent(repr(self), " ")}\n' - f'Found {len(matching_paths)} paths:\n' - f'{indent(matching_paths_str, " ")}\n' - 'Cannot proceed due to the ' - 'ambiguity. This is likely a problem with your ' - 'BIDS dataset. Please run the BIDS validator on ' - 'your data.') + msg = ( + "Found more than one matching data file for the " + "requested recording. While searching:\n" + f'{indent(repr(self), " ")}\n' + f"Found {len(matching_paths)} paths:\n" + f'{indent(matching_paths_str, " ")}\n' + "Cannot proceed due to the " + "ambiguity. This is likely a problem with your " + "BIDS dataset. Please run the BIDS validator on " + "your data." + ) raise RuntimeError(msg) else: bids_fpath = matching_paths[0] @@ -712,44 +772,45 @@ def update(self, *, check=None, **kwargs): self.check = check for key, val in kwargs.items(): - if key == 'root': - _validate_type(val, types=('path-like', None), item_name=key) + if key == "root": + _validate_type(val, types=("path-like", None), item_name=key) continue - if key == 'datatype': - if val is not None and val not in ALLOWED_DATATYPES \ - and self.check: - raise ValueError(f'datatype ({val}) is not valid. ' - f'Should be one of ' - f'{ALLOWED_DATATYPES}') + if key == "datatype": + if val is not None and val not in ALLOWED_DATATYPES and self.check: + raise ValueError( + f"datatype ({val}) is not valid. " + f"Should be one of " + f"{ALLOWED_DATATYPES}" + ) else: continue if key not in ENTITY_VALUE_TYPE: - raise ValueError(f'Key must be one of ' - f'{ALLOWED_PATH_ENTITIES}, got {key}') + raise ValueError( + f"Key must be one of " f"{ALLOWED_PATH_ENTITIES}, got {key}" + ) - if ENTITY_VALUE_TYPE[key] == 'label': - _validate_type(val, types=(None, str), - item_name=key) + if ENTITY_VALUE_TYPE[key] == "label": + _validate_type(val, types=(None, str), item_name=key) else: - assert ENTITY_VALUE_TYPE[key] == 'index' + assert ENTITY_VALUE_TYPE[key] == "index" _validate_type(val, types=(int, str, None), item_name=key) if isinstance(val, str) and not val.isdigit(): - raise ValueError(f'{key} is not an index (Got {val})') + raise ValueError(f"{key} is not an index (Got {val})") elif isinstance(val, int): - kwargs[key] = '{:02}'.format(val) + kwargs[key] = "{:02}".format(val) # ensure extension starts with a '.' - extension = kwargs.get('extension') - if extension is not None and not extension.startswith('.'): + extension = kwargs.get("extension") + if extension is not None and not extension.startswith("."): warn( f'extension should start with a period ".", but got: ' f'"{extension}". Prepending "." to form: ".{extension}". ' - f'This will raise an exception starting with MNE-BIDS 0.12.', - category=FutureWarning + f"This will raise an exception starting with MNE-BIDS 0.12.", + category=FutureWarning, ) - kwargs['extension'] = f'.{extension}' + kwargs["extension"] = f".{extension}" # Uncomment in 0.12, and remove above code: # # raise ValueError( @@ -761,10 +822,9 @@ def update(self, *, check=None, **kwargs): # error check entities old_kwargs = dict() for key, val in kwargs.items(): - # check if there are any characters not allowed - if val is not None and key != 'root': - if key == 'suffix' and not self.check: + if val is not None and key != "root": + if key == "suffix" and not self.check: # suffix may skip a check if check=False to allow # things like "dataset_description.json" pass @@ -772,11 +832,12 @@ def update(self, *, check=None, **kwargs): _check_key_val(key, val) # set entity value, ensuring `root` is a Path - if val is not None and key == 'root': + if val is not None and key == "root": val = Path(val).expanduser() - old_kwargs[key] = \ - getattr(self, f'{key}') if hasattr(self, f'_{key}') else None - setattr(self, f'_{key}', val) + old_kwargs[key] = ( + getattr(self, f"{key}") if hasattr(self, f"_{key}") else None + ) + setattr(self, f"_{key}", val) # Perform a check of the entities and revert changes if check fails try: @@ -811,17 +872,20 @@ def match(self, ignore_json=True, check=False): The matching paths. """ if self.root is None: - raise RuntimeError('Cannot match basenames if `root` ' - 'attribute is not set. Please set the' - 'BIDS root directory path to `root` via ' - 'BIDSPath.update().') + raise RuntimeError( + "Cannot match basenames if `root` " + "attribute is not set. Please set the" + "BIDS root directory path to `root` via " + "BIDSPath.update()." + ) - paths = _return_root_paths(self.root, datatype=self.datatype, - ignore_json=ignore_json) + paths = _return_root_paths( + self.root, datatype=self.datatype, ignore_json=ignore_json + ) - fnames = _filter_fnames(paths, suffix=self.suffix, - extension=self.extension, - **self.entities) + fnames = _filter_fnames( + paths, suffix=self.suffix, extension=self.extension, **self.entities + ) bids_paths = _fnames_to_bidspaths(fnames, self.root, check=check) return bids_paths @@ -831,15 +895,18 @@ def _check(self): self.basename # run basename to check validity of arguments # perform error check on scans - if (self.suffix == 'scans' and self.extension == '.tsv') \ - and _check_non_sub_ses_entity(self): - raise ValueError('scans.tsv file name can only contain ' - 'subject and session entities. BIDSPath ' - f'currently contains {self.entities}.') + if ( + self.suffix == "scans" and self.extension == ".tsv" + ) and _check_non_sub_ses_entity(self): + raise ValueError( + "scans.tsv file name can only contain " + "subject and session entities. BIDSPath " + f"currently contains {self.entities}." + ) # perform deeper check if user has it turned on if self.check: - if self.subject == 'emptyroom': + if self.subject == "emptyroom": _check_empty_room_basename(self) # ensure extension starts with a '.' @@ -847,37 +914,45 @@ def _check(self): if extension is not None: # check validity of the extension if extension not in ALLOWED_FILENAME_EXTENSIONS: - raise ValueError(f'Extension {extension} is not ' - f'allowed. Use one of these extensions ' - f'{ALLOWED_FILENAME_EXTENSIONS}.') + raise ValueError( + f"Extension {extension} is not " + f"allowed. Use one of these extensions " + f"{ALLOWED_FILENAME_EXTENSIONS}." + ) # labels from space entity must come from list (appendix VIII) space = self.space if space is not None: - datatype = getattr(self, 'datatype', None) + datatype = getattr(self, "datatype", None) if datatype is None: - raise ValueError('You must define datatype if you want to ' - 'use space in your BIDSPath.') + raise ValueError( + "You must define datatype if you want to " + "use space in your BIDSPath." + ) allowed_spaces_for_dtype = ALLOWED_SPACES.get(datatype, None) if allowed_spaces_for_dtype is None: - raise ValueError(f'space entity is not valid for datatype ' - f'{self.datatype}') + raise ValueError( + f"space entity is not valid for datatype " f"{self.datatype}" + ) elif space not in allowed_spaces_for_dtype: - raise ValueError(f'space ({space}) is not valid for ' - f'datatype ({self.datatype}).\n' - f'Should be one of ' - f'{allowed_spaces_for_dtype}') + raise ValueError( + f"space ({space}) is not valid for " + f"datatype ({self.datatype}).\n" + f"Should be one of " + f"{allowed_spaces_for_dtype}" + ) else: pass # error check suffix suffix = self.suffix - if suffix is not None and \ - suffix not in ALLOWED_FILENAME_SUFFIX: - raise ValueError(f'Suffix {suffix} is not allowed. ' - f'Use one of these suffixes ' - f'{ALLOWED_FILENAME_SUFFIX}.') + if suffix is not None and suffix not in ALLOWED_FILENAME_SUFFIX: + raise ValueError( + f"Suffix {suffix} is not allowed. " + f"Use one of these suffixes " + f"{ALLOWED_FILENAME_SUFFIX}." + ) @verbose def find_empty_room(self, use_sidecar_only=False, *, verbose=None): @@ -901,49 +976,55 @@ def find_empty_room(self, use_sidecar_only=False, *, verbose=None): The path corresponding to the best-matching empty-room measurement. Returns ``None`` if none was found. """ - if self.datatype not in ('meg', None): - raise ValueError('Empty-room data is only supported for MEG ' - 'datasets') + if self.datatype not in ("meg", None): + raise ValueError("Empty-room data is only supported for MEG " "datasets") if self.root is None: - raise ValueError('The root of the "bids_path" must be set. ' - 'Please use `bids_path.update(root="")` ' - 'to set the root of the BIDS folder to read.') + raise ValueError( + 'The root of the "bids_path" must be set. ' + 'Please use `bids_path.update(root="")` ' + "to set the root of the BIDS folder to read." + ) # needed to deal with inheritance principle - sidecar_fname = self.copy().update( - datatype=None, suffix='meg' - ).find_matching_sidecar(extension='.json') - with open(sidecar_fname, 'r', encoding='utf-8') as f: + sidecar_fname = ( + self.copy() + .update(datatype=None, suffix="meg") + .find_matching_sidecar(extension=".json") + ) + with open(sidecar_fname, "r", encoding="utf-8") as f: sidecar_json = json.load(f) - if 'AssociatedEmptyRoom' in sidecar_json: - logger.info('Using "AssociatedEmptyRoom" entry from MEG sidecar ' - 'file to retrieve empty-room path.') - emptytoom_path = sidecar_json['AssociatedEmptyRoom'] + if "AssociatedEmptyRoom" in sidecar_json: + logger.info( + 'Using "AssociatedEmptyRoom" entry from MEG sidecar ' + "file to retrieve empty-room path." + ) + emptytoom_path = sidecar_json["AssociatedEmptyRoom"] er_bids_path = get_bids_path_from_fname(emptytoom_path) er_bids_path.root = self.root - er_bids_path.datatype = 'meg' + er_bids_path.datatype = "meg" elif use_sidecar_only: logger.info( - 'The MEG sidecar file does not contain an ' + "The MEG sidecar file does not contain an " '"AssociatedEmptyRoom" entry. Aborting search for an ' - 'empty-room recording, as you passed use_sidecar_only=True' + "empty-room recording, as you passed use_sidecar_only=True" ) return None else: logger.info( - 'The MEG sidecar file does not contain an ' + "The MEG sidecar file does not contain an " '"AssociatedEmptyRoom" entry. Will try to find a matching ' - 'empty-room recording based on the measurement date …' + "empty-room recording based on the measurement date …" ) er_bids_path = _find_matched_empty_room(self) if er_bids_path is not None and not er_bids_path.fpath.exists(): raise FileNotFoundError( - f'Empty-room BIDS path resolved but not found:\n' - f'{er_bids_path}\n' - 'Check your BIDS dataset for completeness.') + f"Empty-room BIDS path resolved but not found:\n" + f"{er_bids_path}\n" + "Check your BIDS dataset for completeness." + ) return er_bids_path @@ -962,8 +1043,7 @@ def get_empty_room_candidates(self): """ return _find_empty_room_candidates(self) - def find_matching_sidecar(self, suffix=None, extension=None, *, - on_error='raise'): + def find_matching_sidecar(self, suffix=None, extension=None, *, on_error="raise"): """Get the matching sidecar JSON path. Parameters @@ -1005,14 +1085,19 @@ def meg_calibration_fpath(self): be found. """ if self.root is None or self.subject is None: - raise ValueError('root and subject must be set.') - if self.datatype not in (None, 'meg'): - raise ValueError('Can only find fine-calibration file for MEG ' - 'datasets.') - - path = BIDSPath(subject=self.subject, session=self.session, - acquisition='calibration', suffix='meg', - extension='.dat', datatype='meg', root=self.root).fpath + raise ValueError("root and subject must be set.") + if self.datatype not in (None, "meg"): + raise ValueError("Can only find fine-calibration file for MEG " "datasets.") + + path = BIDSPath( + subject=self.subject, + session=self.session, + acquisition="calibration", + suffix="meg", + extension=".dat", + datatype="meg", + root=self.root, + ).fpath if not path.exists(): path = None @@ -1032,13 +1117,19 @@ def meg_crosstalk_fpath(self): found. """ if self.root is None or self.subject is None: - raise ValueError('root and subject must be set.') - if self.datatype not in (None, 'meg'): - raise ValueError('Can only find crosstalk file for MEG datasets.') - - path = BIDSPath(subject=self.subject, session=self.session, - acquisition='crosstalk', suffix='meg', - extension='.fif', datatype='meg', root=self.root).fpath + raise ValueError("root and subject must be set.") + if self.datatype not in (None, "meg"): + raise ValueError("Can only find crosstalk file for MEG datasets.") + + path = BIDSPath( + subject=self.subject, + session=self.session, + acquisition="crosstalk", + suffix="meg", + extension=".fif", + datatype="meg", + root=self.root, + ).fpath if not path.exists(): path = None @@ -1056,8 +1147,7 @@ def _get_matching_bidspaths_from_filesystem(bids_path): basename, bids_root = bids_path.basename, bids_path.root if datatype is None: - datatype = _infer_datatype(root=bids_root, - sub=sub, ses=ses) + datatype = _infer_datatype(root=bids_root, sub=sub, ses=ses) data_dir = BIDSPath( subject=sub, session=ses, datatype=datatype, root=bids_root @@ -1065,44 +1155,47 @@ def _get_matching_bidspaths_from_filesystem(bids_path): # For BTI data, just return the directory with a '.pdf' extension # to facilitate reading in mne-bids - bti_dir = op.join(data_dir, f'{basename}') + bti_dir = op.join(data_dir, f"{basename}") if op.isdir(bti_dir): - logger.info(f'Assuming BTi data in {bti_dir}') - matching_paths = [f'{bti_dir}.pdf'] + logger.info(f"Assuming BTi data in {bti_dir}") + matching_paths = [f"{bti_dir}.pdf"] # otherwise, search for valid file paths else: search_str = bids_root # parse down the BIDS directory structure if sub is not None: - search_str = op.join(search_str, f'sub-{sub}') + search_str = op.join(search_str, f"sub-{sub}") if ses is not None: - search_str = op.join(search_str, f'ses-{ses}') + search_str = op.join(search_str, f"ses-{ses}") if datatype is not None: search_str = op.join(search_str, datatype) else: - search_str = op.join(search_str, '**') - search_str = op.join(search_str, f'{basename}*') + search_str = op.join(search_str, "**") + search_str = op.join(search_str, f"{basename}*") # Find all matching files in all supported formats. valid_exts = ALLOWED_FILENAME_EXTENSIONS matching_paths = glob.glob(search_str) - matching_paths = [p for p in matching_paths - if _parse_ext(p)[1] in valid_exts] + matching_paths = [p for p in matching_paths if _parse_ext(p)[1] in valid_exts] return matching_paths def _check_non_sub_ses_entity(bids_path): """Check existence of non subject/session entities in BIDSPath.""" - if bids_path.task or bids_path.acquisition or \ - bids_path.run or bids_path.space or \ - bids_path.recording or bids_path.split or \ - bids_path.processing: + if ( + bids_path.task + or bids_path.acquisition + or bids_path.run + or bids_path.space + or bids_path.recording + or bids_path.split + or bids_path.processing + ): return True return False -def _print_lines_with_entry(file, entry, folder, is_tsv, line_numbers, - outfile): +def _print_lines_with_entry(file, entry, folder, is_tsv, line_numbers, outfile): """Print the lines that contain the entry. Parameters @@ -1122,39 +1215,43 @@ def _print_lines_with_entry(file, entry, folder, is_tsv, line_numbers, prints to the console, else a string is printed to. """ entry_lines = list() - with open(file, 'r', encoding='utf-8-sig') as fid: + with open(file, "r", encoding="utf-8-sig") as fid: if is_tsv: # format tsv files nicely header = _truncate_tsv_line(fid.readline()) if line_numbers: - header = f'1 {header}' + header = f"1 {header}" header = header.rstrip() for i, line in enumerate(fid): if entry in line: if is_tsv: line = _truncate_tsv_line(line) if line_numbers: - line = str(i + 2) + (5 - len(str(i + 2))) * ' ' + line + line = str(i + 2) + (5 - len(str(i + 2))) * " " + line entry_lines.append(line.rstrip()) if entry_lines: print(op.relpath(file, folder), file=outfile) if is_tsv: - print(f' {header}', file=outfile) + print(f" {header}", file=outfile) if len(entry_lines) > 10: entry_lines = entry_lines[:10] - entry_lines.append('...') + entry_lines.append("...") for line in entry_lines: - print(f' {line}', file=outfile) + print(f" {line}", file=outfile) def _truncate_tsv_line(line, lim=10): """Truncate a line to the specified number of characters.""" - return ''.join([str(val) + (lim - len(val)) * ' ' if - len(val) < lim else f'{val[:lim - 1]} ' - for val in line.split('\t')]) + return "".join( + [ + str(val) + (lim - len(val)) * " " if len(val) < lim else f"{val[:lim - 1]} " + for val in line.split("\t") + ] + ) -def search_folder_for_text(entry, folder, extensions=('.json', '.tsv'), - line_numbers=True, return_str=False): +def search_folder_for_text( + entry, folder, extensions=(".json", ".tsv"), line_numbers=True, return_str=False +): """Find any particular string entry in the text files of a folder. .. note:: This is a search function like `grep @@ -1182,21 +1279,22 @@ def search_folder_for_text(entry, folder, extensions=('.json', '.tsv'), If `return_str` is ``True``, the fields are returned as a string. Else, ``None`` is returned and the fields are printed. """ - _validate_type(entry, str, 'entry') + _validate_type(entry, str, "entry") if not op.isdir(folder): - raise ValueError('{folder} is not a directory') + raise ValueError("{folder} is not a directory") folder = Path(folder) # ensure pathlib.Path extensions = (extensions,) if isinstance(extensions, str) else extensions _validate_type(extensions, (tuple, list)) - _validate_type(line_numbers, bool, 'line_numbers') - _validate_type(return_str, bool, 'return_str') + _validate_type(line_numbers, bool, "line_numbers") + _validate_type(return_str, bool, "return_str") outfile = StringIO() if return_str else None for extension in extensions: - for file in folder.rglob('*' + extension): - _print_lines_with_entry(file, entry, folder, extension == '.tsv', - line_numbers, outfile) + for file in folder.rglob("*" + extension): + _print_lines_with_entry( + file, entry, folder, extension == ".tsv", line_numbers, outfile + ) if outfile is not None: return outfile.getvalue() @@ -1204,11 +1302,11 @@ def search_folder_for_text(entry, folder, extensions=('.json', '.tsv'), def _check_max_depth(max_depth): """Check that max depth is a proper input.""" - msg = '`max_depth` must be a positive integer or None' + msg = "`max_depth` must be a positive integer or None" if not isinstance(max_depth, (int, type(None))): raise ValueError(msg) if max_depth is None: - max_depth = float('inf') + max_depth = float("inf") if max_depth < 0: raise ValueError(msg) # Use max_depth same as the -L param in the unix `tree` command @@ -1237,15 +1335,11 @@ def print_dir_tree(folder, max_depth=None, return_str=False): string. Else, ``None`` is returned and the directory tree is printed. """ folder = _check_fname( - fname=folder, - overwrite='read', - must_exist=True, - name='Folder', - need_dir=True + fname=folder, overwrite="read", must_exist=True, name="Folder", need_dir=True ) max_depth = _check_max_depth(max_depth) - _validate_type(return_str, bool, 'return_str') + _validate_type(return_str, bool, "return_str") outfile = StringIO() if return_str else None # Base length of a tree branch, to normalize each tree's start to 0 @@ -1267,17 +1361,19 @@ def print_dir_tree(folder, max_depth=None, return_str=False): # Only print if this is up to the depth we asked if branchlen <= max_depth: if branchlen <= 1: - print('|{}'.format(op.basename(root) + os.sep), file=outfile) + print("|{}".format(op.basename(root) + os.sep), file=outfile) else: - print('|{} {}'.format((branchlen - 1) * '---', - op.basename(root) + os.sep), - file=outfile) + print( + "|{} {}".format( + (branchlen - 1) * "---", op.basename(root) + os.sep + ), + file=outfile, + ) # Only print files if we are NOT yet up to max_depth or beyond if branchlen < max_depth: for file in files: - print('|{} {}'.format(branchlen * '---', file), - file=outfile) + print("|{} {}".format(branchlen * "---", file), file=outfile) if outfile is not None: return outfile.getvalue() @@ -1288,13 +1384,15 @@ def _parse_ext(raw_fname): raw_fname = str(raw_fname) fname, ext = os.path.splitext(raw_fname) # BTi data is the only file format that does not have a file extension - if ext == '' or 'c,rf' in fname: - logger.info('Found no extension for raw file, assuming "BTi" format ' - 'and appending extension .pdf') - ext = '.pdf' + if ext == "" or "c,rf" in fname: + logger.info( + 'Found no extension for raw file, assuming "BTi" format ' + "and appending extension .pdf" + ) + ext = ".pdf" # If ending on .gz, check whether it is an .nii.gz file - elif ext == '.gz' and raw_fname.endswith('.nii.gz'): - ext = '.nii.gz' + elif ext == ".gz" and raw_fname.endswith(".nii.gz"): + ext = ".nii.gz" fname = fname[:-4] # cut off the .nii return fname, ext @@ -1303,10 +1401,10 @@ def _infer_datatype_from_path(fname: Path): # get the parent if fname.exists(): datatype = fname.parent.name - if any([datatype.startswith(entity) for entity in ['sub', 'ses']]): + if any([datatype.startswith(entity) for entity in ["sub", "ses"]]): datatype = None - elif fname.stem.split('_')[-1] in ('meg', 'eeg', 'ieeg'): - datatype = fname.stem.split('_')[-1] + elif fname.stem.split("_")[-1] in ("meg", "eeg", "ieeg"): + datatype = fname.stem.split("_")[-1] else: datatype = None @@ -1338,32 +1436,32 @@ def get_bids_path_from_fname(fname, check=True, verbose=None): entities = get_entities_from_fname(fname) # parse suffix and extension - last_entity = fname.split('-')[-1] - if '_' in last_entity: - suffix = last_entity.split('_')[-1] + last_entity = fname.split("-")[-1] + if "_" in last_entity: + suffix = last_entity.split("_")[-1] suffix, extension = _get_bids_suffix_and_ext(suffix) else: suffix = None extension = Path(fname).suffix # already starts with a period - if extension == '': + if extension == "": extension = None if extension is not None: - assert extension.startswith('.') # better safe than sorry + assert extension.startswith(".") # better safe than sorry datatype = _infer_datatype_from_path(fpath) # find root and datatype if it exists - if fpath.parent == '': + if fpath.parent == "": root = None else: root_level = 0 # determine root if it's there - if entities['subject'] is not None: + if entities["subject"] is not None: root_level += 1 - if entities['session'] is not None: + if entities["session"] is not None: root_level += 1 - if suffix != 'scans': + if suffix != "scans": root_level += 1 if root_level: @@ -1371,15 +1469,21 @@ def get_bids_path_from_fname(fname, check=True, verbose=None): for _ in range(root_level): root = root.parent - bids_path = BIDSPath(root=root, datatype=datatype, suffix=suffix, - extension=extension, **entities, check=check) + bids_path = BIDSPath( + root=root, + datatype=datatype, + suffix=suffix, + extension=extension, + **entities, + check=check, + ) if verbose: - logger.info(f'From {fpath}, formed a BIDSPath: {bids_path}.') + logger.info(f"From {fpath}, formed a BIDSPath: {bids_path}.") return bids_path @verbose -def get_entities_from_fname(fname, on_error='raise', verbose=None): +def get_entities_from_fname(fname, on_error="raise", verbose=None): """Retrieve a dictionary of BIDS entities from a filename. Entities not present in ``fname`` will be assigned the value of ``None``. @@ -1420,9 +1524,11 @@ def get_entities_from_fname(fname, on_error='raise', verbose=None): 'split': None, \ 'description': None} """ - if on_error not in ('warn', 'raise', 'ignore'): - raise ValueError(f'Acceptable values for on_error are: warn, raise, ' - f'ignore, but got: {on_error}') + if on_error not in ("warn", "raise", "ignore"): + raise ValueError( + f"Acceptable values for on_error are: warn, raise, " + f"ignore, but got: {on_error}" + ) fname = str(fname) # to accept also BIDSPath or Path instances @@ -1435,19 +1541,20 @@ def get_entities_from_fname(fname, on_error='raise', verbose=None): for match in re.finditer(param_regex, op.basename(fname)): key, value = match.groups() - if on_error in ('raise', 'warn'): + if on_error in ("raise", "warn"): if key not in fname_vals: - msg = (f'Unexpected entity "{key}" found in ' - f'filename "{fname}"') - if on_error == 'raise': + msg = f'Unexpected entity "{key}" found in ' f'filename "{fname}"' + if on_error == "raise": raise KeyError(msg) - elif on_error == 'warn': + elif on_error == "warn": warn(msg) continue if fname_vals.index(key) < idx_key: - msg = (f'Entities in filename not ordered correctly.' - f' "{key}" should have occurred earlier in the ' - f'filename "{fname}"') + msg = ( + f"Entities in filename not ordered correctly." + f' "{key}" should have occurred earlier in the ' + f'filename "{fname}"' + ) raise ValueError(msg) idx_key = fname_vals.index(key) @@ -1456,8 +1563,7 @@ def get_entities_from_fname(fname, on_error='raise', verbose=None): return params -def _find_matching_sidecar(bids_path, suffix=None, - extension=None, on_error='raise'): +def _find_matching_sidecar(bids_path, suffix=None, extension=None, on_error="raise"): """Try to find a sidecar file with a given suffix for a data file. Parameters @@ -1482,14 +1588,16 @@ def _find_matching_sidecar(bids_path, suffix=None, and ``on_error`` was set to ``'warn'`` or ``'ignore'``. """ - if on_error not in ('warn', 'raise', 'ignore'): - raise ValueError(f'Acceptable values for on_error are: warn, raise, ' - f'ignore, but got: {on_error}') + if on_error not in ("warn", "raise", "ignore"): + raise ValueError( + f"Acceptable values for on_error are: warn, raise, " + f"ignore, but got: {on_error}" + ) bids_root = bids_path.root # search suffix is BIDS-suffix and extension - search_suffix = '' + search_suffix = "" if suffix is None and bids_path.suffix is not None: search_suffix = bids_path.suffix elif suffix is not None: @@ -1512,26 +1620,23 @@ def _find_matching_sidecar(bids_path, suffix=None, # We only use subject and session as identifier, because all other # parameters are potentially not binding for metadata sidecar files - search_str_filename = f'sub-{bids_path.subject}' + search_str_filename = f"sub-{bids_path.subject}" if bids_path.session is not None: - search_str_filename += f'_ses-{bids_path.session}' + search_str_filename += f"_ses-{bids_path.session}" # Find all potential sidecar files, doing a recursive glob # from bids_root/sub-*, potentially taking into account the data type - search_dir = Path(bids_root) / f'sub-{bids_path.subject}' + search_dir = Path(bids_root) / f"sub-{bids_path.subject}" # ** -> don't forget about potentially present session directories if bids_path.datatype is None: - search_dir = search_dir / '**' + search_dir = search_dir / "**" else: - search_dir = search_dir / '**' / bids_path.datatype + search_dir = search_dir / "**" / bids_path.datatype - search_str_complete = str( - search_dir / f'{search_str_filename}*{search_suffix}' - ) + search_str_complete = str(search_dir / f"{search_str_filename}*{search_suffix}") candidate_list = glob.glob(search_str_complete, recursive=True) - best_candidates = _find_best_candidates(bids_path.entities, - candidate_list) + best_candidates = _find_best_candidates(bids_path.entities, candidate_list) if len(best_candidates) == 1: # Success return Path(best_candidates[0]) @@ -1540,18 +1645,21 @@ def _find_matching_sidecar(bids_path, suffix=None, # If this was expected, simply return None, otherwise, raise an exception. msg = None if len(best_candidates) == 0: - msg = (f'Did not find any {search_suffix} ' - f'associated with {bids_path.basename}.') + msg = ( + f"Did not find any {search_suffix} " + f"associated with {bids_path.basename}." + ) elif len(best_candidates) > 1: # More than one candidates were tied for best match - msg = (f'Expected to find a single {search_suffix} file ' - f'associated with {bids_path.basename}, ' - f'but found {len(candidate_list)}:\n\n' + - "\n".join(candidate_list)) + msg = ( + f"Expected to find a single {search_suffix} file " + f"associated with {bids_path.basename}, " + f"but found {len(candidate_list)}:\n\n" + "\n".join(candidate_list) + ) msg += f'\n\nThe search_str was "{search_str_complete}"' - if on_error == 'raise': + if on_error == "raise": raise RuntimeError(msg) - elif on_error == 'warn': + elif on_error == "warn": warn(msg) return None @@ -1562,12 +1670,12 @@ def _get_bids_suffix_and_ext(str_suffix): # no matter what the suffix is, suffix and extension are last suffix = str_suffix ext = None - if '.' in str_suffix: + if "." in str_suffix: # handle case of multiple '.' in extension - split_str = str_suffix.split('.') + split_str = str_suffix.split(".") suffix = split_str[0] - ext = '.'.join(split_str[1:]) - ext = f'.{ext}' # prepend period + ext = ".".join(split_str[1:]) + ext = f".{ext}" # prepend period return suffix, ext @@ -1591,8 +1699,7 @@ def get_datatypes(root, verbose=None): # Take all possible data types from "entity" table # (Appendix in BIDS spec) # https://bids-specification.readthedocs.io/en/latest/appendices/entity-table.html # noqa - datatype_list = ('anat', 'func', 'dwi', 'fmap', 'beh', - 'meg', 'eeg', 'ieeg', 'nirs') + datatype_list = ("anat", "func", "dwi", "fmap", "beh", "meg", "eeg", "ieeg", "nirs") datatypes = list() for root, dirs, files in os.walk(root): for dir in dirs: @@ -1603,14 +1710,25 @@ def get_datatypes(root, verbose=None): @verbose -def get_entity_vals(root, entity_key, *, ignore_subjects='emptyroom', - ignore_sessions=None, ignore_tasks=None, ignore_runs=None, - ignore_processings=None, ignore_spaces=None, - ignore_acquisitions=None, ignore_splits=None, - ignore_descriptions=None, ignore_modalities=None, - ignore_datatypes=None, - ignore_dirs=('derivatives', 'sourcedata'), with_key=False, - verbose=None): +def get_entity_vals( + root, + entity_key, + *, + ignore_subjects="emptyroom", + ignore_sessions=None, + ignore_tasks=None, + ignore_runs=None, + ignore_processings=None, + ignore_spaces=None, + ignore_acquisitions=None, + ignore_splits=None, + ignore_descriptions=None, + ignore_modalities=None, + ignore_datatypes=None, + ignore_dirs=("derivatives", "sourcedata"), + with_key=False, + verbose=None, +): """Get list of values associated with an `entity_key` in a BIDS dataset. BIDS file names are organized by key-value pairs called "entities" [1]_. @@ -1701,22 +1819,43 @@ def get_entity_vals(root, entity_key, *, ignore_subjects='emptyroom', """ root = _check_fname( fname=root, - overwrite='read', + overwrite="read", must_exist=True, need_dir=True, - name='Root directory' + name="Root directory", ) root = Path(root).expanduser() - entities = ('subject', 'task', 'session', 'run', 'processing', 'space', - 'acquisition', 'split', 'description', 'suffix') - entities_abbr = ('sub', 'task', 'ses', 'run', 'proc', 'space', 'acq', - 'split', 'desc', 'suffix') + entities = ( + "subject", + "task", + "session", + "run", + "processing", + "space", + "acquisition", + "split", + "description", + "suffix", + ) + entities_abbr = ( + "sub", + "task", + "ses", + "run", + "proc", + "space", + "acq", + "split", + "desc", + "suffix", + ) entity_long_abbr_map = dict(zip(entities, entities_abbr)) if entity_key not in entities: - raise ValueError(f'`key` must be one of: {", ".join(entities)}. ' - f'Got: {entity_key}') + raise ValueError( + f'`key` must be one of: {", ".join(entities)}. ' f"Got: {entity_key}" + ) ignore_subjects = _ensure_tuple(ignore_subjects) ignore_sessions = _ensure_tuple(ignore_sessions) @@ -1731,61 +1870,65 @@ def get_entity_vals(root, entity_key, *, ignore_subjects='emptyroom', ignore_dirs = _ensure_tuple(ignore_dirs) existing_ignore_dirs = [ - root / d for d in ignore_dirs - if (root / d).exists() and (root / d).is_dir() + root / d for d in ignore_dirs if (root / d).exists() and (root / d).is_dir() ] ignore_dirs = _ensure_tuple(existing_ignore_dirs) - p = re.compile(r'{}-(.*?)_'.format(entity_long_abbr_map[entity_key])) + p = re.compile(r"{}-(.*?)_".format(entity_long_abbr_map[entity_key])) values = list() - filenames = root.glob(f'**/*{entity_long_abbr_map[entity_key]}-*_*') + filenames = root.glob(f"**/*{entity_long_abbr_map[entity_key]}-*_*") for filename in filenames: # Skip ignored directories # XXX In Python 3.9, we can use Path.is_relative_to() here - if any([ - str(filename).startswith(str(ignore_dir)) - for ignore_dir in ignore_dirs - ]): + if any( + [str(filename).startswith(str(ignore_dir)) for ignore_dir in ignore_dirs] + ): continue if ignore_datatypes and filename.parent.name in ignore_datatypes: continue - if ignore_subjects and any([filename.stem.startswith(f'sub-{s}_') - for s in ignore_subjects]): + if ignore_subjects and any( + [filename.stem.startswith(f"sub-{s}_") for s in ignore_subjects] + ): continue - if ignore_sessions and any([f'_ses-{s}_' in filename.stem - for s in ignore_sessions]): + if ignore_sessions and any( + [f"_ses-{s}_" in filename.stem for s in ignore_sessions] + ): continue - if ignore_tasks and any([f'_task-{t}_' in filename.stem - for t in ignore_tasks]): + if ignore_tasks and any([f"_task-{t}_" in filename.stem for t in ignore_tasks]): continue - if ignore_runs and any([f'_run-{r}_' in filename.stem - for r in ignore_runs]): + if ignore_runs and any([f"_run-{r}_" in filename.stem for r in ignore_runs]): continue - if ignore_processings and any([f'_proc-{p}_' in filename.stem - for p in ignore_processings]): + if ignore_processings and any( + [f"_proc-{p}_" in filename.stem for p in ignore_processings] + ): continue - if ignore_spaces and any([f'_space-{s}_' in filename.stem - for s in ignore_spaces]): + if ignore_spaces and any( + [f"_space-{s}_" in filename.stem for s in ignore_spaces] + ): continue - if ignore_acquisitions and any([f'_acq-{a}_' in filename.stem - for a in ignore_acquisitions]): + if ignore_acquisitions and any( + [f"_acq-{a}_" in filename.stem for a in ignore_acquisitions] + ): continue - if ignore_splits and any([f'_split-{s}_' in filename.stem - for s in ignore_splits]): + if ignore_splits and any( + [f"_split-{s}_" in filename.stem for s in ignore_splits] + ): continue - if ignore_descriptions and any([f'_desc-{d}_' in filename.stem - for d in ignore_descriptions]): + if ignore_descriptions and any( + [f"_desc-{d}_" in filename.stem for d in ignore_descriptions] + ): continue - if ignore_modalities and any([f'_{k}' in filename.stem - for k in ignore_modalities]): + if ignore_modalities and any( + [f"_{k}" in filename.stem for k in ignore_modalities] + ): continue match = p.search(filename.stem) value = match.group(1) if with_key: - value = f'{entity_long_abbr_map[entity_key]}-{value}' + value = f"{entity_long_abbr_map[entity_key]}-{value}" if value not in values: values.append(value) return sorted(values) @@ -1801,11 +1944,11 @@ def _mkdir_p(path, overwrite=False): """ if overwrite and op.isdir(path): sh.rmtree(path) - logger.info(f'Clearing path: {path}') + logger.info(f"Clearing path: {path}") os.makedirs(path, exist_ok=True) if not op.isdir(path): - logger.info(f'Creating folder: {path}') + logger.info(f"Creating folder: {path}") def _find_best_candidates(params, candidate_list): @@ -1858,9 +2001,9 @@ def _find_best_candidates(params, candidate_list): def _get_datatypes_for_sub(*, root, sub, ses=None): """Retrieve data modalities for a specific subject and session.""" - subject_dir = op.join(root, f'sub-{sub}') + subject_dir = op.join(root, f"sub-{sub}") if ses is not None: - subject_dir = op.join(subject_dir, f'ses-{ses}') + subject_dir = op.join(subject_dir, f"ses-{ses}") # TODO We do this to ensure we don't accidentally pick up any "spurious" # TODO sub-directories. But is that really necessary with valid BIDS data? @@ -1874,18 +2017,19 @@ def _infer_datatype(*, root, sub, ses): # Check which suffix is available for this particular # subject & session. If we get no or multiple hits, throw an error. - modalities = _get_datatypes_for_sub(root=root, sub=sub, - ses=ses) + modalities = _get_datatypes_for_sub(root=root, sub=sub, ses=ses) # We only want to handle electrophysiological data here. - allowed_recording_modalities = ['meg', 'eeg', 'ieeg'] + allowed_recording_modalities = ["meg", "eeg", "ieeg"] modalities = list(set(modalities) & set(allowed_recording_modalities)) if not modalities: - raise ValueError('No electrophysiological data found.') + raise ValueError("No electrophysiological data found.") elif len(modalities) >= 2: - msg = (f'Found data of more than one recording datatype. Please ' - f'pass the `suffix` parameter to specify which data to load. ' - f'Found the following modalitiess: {modalities}') + msg = ( + f"Found data of more than one recording datatype. Please " + f"pass the `suffix` parameter to specify which data to load. " + f"Found the following modalitiess: {modalities}" + ) raise RuntimeError(msg) assert len(modalities) == 1 @@ -1895,16 +2039,30 @@ def _infer_datatype(*, root, sub, ses): def _path_to_str(var): """Make sure var is a string or Path, return string representation.""" if not isinstance(var, (Path, str)): - raise ValueError(f"All path parameters must be either strings or " - f"pathlib.Path objects. Found type {type(var)}.") + raise ValueError( + f"All path parameters must be either strings or " + f"pathlib.Path objects. Found type {type(var)}." + ) else: return str(var) -def _filter_fnames(fnames, *, subject=None, session=None, task=None, - acquisition=None, run=None, processing=None, recording=None, - space=None, split=None, description=None, suffix=None, - extension=None): +def _filter_fnames( + fnames, + *, + subject=None, + session=None, + task=None, + acquisition=None, + run=None, + processing=None, + recording=None, + space=None, + split=None, + description=None, + suffix=None, + extension=None, +): """Filter a list of BIDS filenames / paths based on BIDS entity values. Input can be str or list of str. @@ -1931,35 +2089,40 @@ def _filter_fnames(fnames, *, subject=None, session=None, task=None, suffix = _ensure_tuple(suffix) extension = _ensure_tuple(extension) - leading_path_str = r'.*\/?' # nothing or something ending with a `/` - sub_str = (r'sub-(' + '|'.join(subject) + ')' - if subject else r'sub-([^_]+)') - ses_str = (r'_ses-(' + '|'.join(session) + ')' - if session else r'(|_ses-([^_]+))') - task_str = (r'_task-(' + '|'.join(task) + ')' - if task else r'(|_task-([^_]+))') - acq_str = (r'_acq-(' + '|'.join(acquisition) + ')' - if acquisition else r'(|_acq-([^_]+))') - run_str = (r'_run-(' + '|'.join(run) + ')' - if run else r'(|_run-([^_]+))') - proc_str = (r'_proc-(' + '|'.join(processing) + ')' - if processing else r'(|_proc-([^_]+))') - space_str = (r'_space-(' + '|'.join(space) + ')' - if space else r'(|_space-([^_]+))') - rec_str = (r'_rec-(' + '|'.join(recording) + ')' - if recording else r'(|_rec-([^_]+))') - split_str = (r'_split-(' + '|'.join(split) + ')' - if split else r'(|_split-([^_]+))') - desc_str = (r'_desc-(' + '|'.join(description) + ')' - if description else r'(|_desc-([^_]+))') - suffix_str = (r'_(' + '|'.join(suffix) + ')' if suffix - else r'_([^_]+)') - ext_str = r'(' + '|'.join(extension) + ')' if extension else r'.([^_]+)' + leading_path_str = r".*\/?" # nothing or something ending with a `/` + sub_str = r"sub-(" + "|".join(subject) + ")" if subject else r"sub-([^_]+)" + ses_str = r"_ses-(" + "|".join(session) + ")" if session else r"(|_ses-([^_]+))" + task_str = r"_task-(" + "|".join(task) + ")" if task else r"(|_task-([^_]+))" + acq_str = ( + r"_acq-(" + "|".join(acquisition) + ")" if acquisition else r"(|_acq-([^_]+))" + ) + run_str = r"_run-(" + "|".join(run) + ")" if run else r"(|_run-([^_]+))" + proc_str = ( + r"_proc-(" + "|".join(processing) + ")" if processing else r"(|_proc-([^_]+))" + ) + space_str = r"_space-(" + "|".join(space) + ")" if space else r"(|_space-([^_]+))" + rec_str = r"_rec-(" + "|".join(recording) + ")" if recording else r"(|_rec-([^_]+))" + split_str = r"_split-(" + "|".join(split) + ")" if split else r"(|_split-([^_]+))" + desc_str = ( + r"_desc-(" + "|".join(description) + ")" if description else r"(|_desc-([^_]+))" + ) + suffix_str = r"_(" + "|".join(suffix) + ")" if suffix else r"_([^_]+)" + ext_str = r"(" + "|".join(extension) + ")" if extension else r".([^_]+)" regexp = ( - leading_path_str + - sub_str + ses_str + task_str + acq_str + run_str + proc_str + - space_str + rec_str + split_str + desc_str + suffix_str + ext_str + leading_path_str + + sub_str + + ses_str + + task_str + + acq_str + + run_str + + proc_str + + space_str + + rec_str + + split_str + + desc_str + + suffix_str + + ext_str ) # Convert to str so we can apply the regexp ... @@ -1973,11 +2136,23 @@ def _filter_fnames(fnames, *, subject=None, session=None, task=None, return fnames_filtered -def find_matching_paths(root, subjects=None, sessions=None, tasks=None, - acquisitions=None, runs=None, processings=None, - recordings=None, spaces=None, splits=None, - descriptions=None, suffixes=None, extensions=None, - datatypes=None, check=False): +def find_matching_paths( + root, + subjects=None, + sessions=None, + tasks=None, + acquisitions=None, + runs=None, + processings=None, + recordings=None, + spaces=None, + splits=None, + descriptions=None, + suffixes=None, + extensions=None, + datatypes=None, + check=False, +): """Get list of all matching paths for all matching entity values. Input can be str or list of str. None matches all found values. @@ -2045,19 +2220,21 @@ def find_matching_paths(root, subjects=None, sessions=None, tasks=None, """ fpaths = _return_root_paths(root, datatype=datatypes, ignore_json=False) - fpaths_filtered = _filter_fnames(fpaths, - subject=subjects, - session=sessions, - task=tasks, - acquisition=acquisitions, - run=runs, - processing=processings, - recording=recordings, - space=spaces, - split=splits, - description=descriptions, - suffix=suffixes, - extension=extensions) + fpaths_filtered = _filter_fnames( + fpaths, + subject=subjects, + session=sessions, + task=tasks, + acquisition=acquisitions, + run=runs, + processing=processings, + recording=recordings, + space=spaces, + split=splits, + description=descriptions, + suffix=suffixes, + extension=extensions, + ) bids_paths = _fnames_to_bidspaths(fpaths_filtered, root, check=check) return bids_paths @@ -2080,14 +2257,13 @@ def _return_root_paths(root, datatype=None, ignore_json=True): datatype = _ensure_tuple(datatype) search_str = f'*/{"|".join(datatype)}/*' else: - search_str = '*.*' + search_str = "*.*" paths = root.rglob(search_str) # Only keep files (not directories), and omit the JSON sidecars # if ignore_json is True. if ignore_json: - paths = [p for p in paths - if p.is_file() and p.suffix != '.json'] + paths = [p for p in paths if p.is_file() and p.suffix != ".json"] else: paths = [p for p in paths if p.is_file()] diff --git a/mne_bids/pick.py b/mne_bids/pick.py index 695437b80..c9b4baad6 100644 --- a/mne_bids/pick.py +++ b/mne_bids/pick.py @@ -15,40 +15,52 @@ def get_coil_types(): corresponding values in the info['chs'][idx]['kind'] """ - return dict(meggradaxial=(FIFF.FIFFV_COIL_KIT_GRAD, - FIFF.FIFFV_COIL_CTF_GRAD, - # Support for gradient-compensated data: - int(FIFF.FIFFV_COIL_CTF_GRAD | (3 << 16)), - int(FIFF.FIFFV_COIL_CTF_GRAD | (2 << 16)), - FIFF.FIFFV_COIL_AXIAL_GRAD_5CM, - FIFF.FIFFV_COIL_BABY_GRAD), - megrefgradaxial=(FIFF.FIFFV_COIL_CTF_REF_GRAD, - FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD, - FIFF.FIFFV_COIL_MAGNES_REF_GRAD, - FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD), - meggradplanar=(FIFF.FIFFV_COIL_VV_PLANAR_T1, - FIFF.FIFFV_COIL_VV_PLANAR_T2, - FIFF.FIFFV_COIL_VV_PLANAR_T3), - megmag=(FIFF.FIFFV_COIL_POINT_MAGNETOMETER, - FIFF.FIFFV_COIL_VV_MAG_W, - FIFF.FIFFV_COIL_VV_MAG_T1, - FIFF.FIFFV_COIL_VV_MAG_T2, - FIFF.FIFFV_COIL_VV_MAG_T3, - FIFF.FIFFV_COIL_NM_122, - FIFF.FIFFV_COIL_MAGNES_MAG, - FIFF.FIFFV_COIL_BABY_MAG), - megrefmag=(FIFF.FIFFV_COIL_KIT_REF_MAG, - FIFF.FIFFV_COIL_CTF_REF_MAG, - FIFF.FIFFV_COIL_MAGNES_REF_MAG, - FIFF.FIFFV_COIL_BABY_REF_MAG, - FIFF.FIFFV_COIL_BABY_REF_MAG2, - FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG, - FIFF.FIFFV_COIL_MAGNES_REF_MAG), - eeg=(FIFF.FIFFV_COIL_EEG,), - misc=(FIFF.FIFFV_COIL_NONE,)) + return dict( + meggradaxial=( + FIFF.FIFFV_COIL_KIT_GRAD, + FIFF.FIFFV_COIL_CTF_GRAD, + # Support for gradient-compensated data: + int(FIFF.FIFFV_COIL_CTF_GRAD | (3 << 16)), + int(FIFF.FIFFV_COIL_CTF_GRAD | (2 << 16)), + FIFF.FIFFV_COIL_AXIAL_GRAD_5CM, + FIFF.FIFFV_COIL_BABY_GRAD, + ), + megrefgradaxial=( + FIFF.FIFFV_COIL_CTF_REF_GRAD, + FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD, + FIFF.FIFFV_COIL_MAGNES_REF_GRAD, + FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD, + ), + meggradplanar=( + FIFF.FIFFV_COIL_VV_PLANAR_T1, + FIFF.FIFFV_COIL_VV_PLANAR_T2, + FIFF.FIFFV_COIL_VV_PLANAR_T3, + ), + megmag=( + FIFF.FIFFV_COIL_POINT_MAGNETOMETER, + FIFF.FIFFV_COIL_VV_MAG_W, + FIFF.FIFFV_COIL_VV_MAG_T1, + FIFF.FIFFV_COIL_VV_MAG_T2, + FIFF.FIFFV_COIL_VV_MAG_T3, + FIFF.FIFFV_COIL_NM_122, + FIFF.FIFFV_COIL_MAGNES_MAG, + FIFF.FIFFV_COIL_BABY_MAG, + ), + megrefmag=( + FIFF.FIFFV_COIL_KIT_REF_MAG, + FIFF.FIFFV_COIL_CTF_REF_MAG, + FIFF.FIFFV_COIL_MAGNES_REF_MAG, + FIFF.FIFFV_COIL_BABY_REF_MAG, + FIFF.FIFFV_COIL_BABY_REF_MAG2, + FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG, + FIFF.FIFFV_COIL_MAGNES_REF_MAG, + ), + eeg=(FIFF.FIFFV_COIL_EEG,), + misc=(FIFF.FIFFV_COIL_NONE,), + ) -def coil_type(info, idx, ch_type='n/a'): +def coil_type(info, idx, ch_type="n/a"): """Get coil type. Parameters @@ -68,8 +80,8 @@ def coil_type(info, idx, ch_type='n/a'): Type of coil """ - ch = info['chs'][idx] + ch = info["chs"][idx] for key, values in get_coil_types().items(): - if ch['coil_type'] in values: + if ch["coil_type"] in values: return key return ch_type diff --git a/mne_bids/read.py b/mne_bids/read.py index 56f2922bc..d20563c51 100644 --- a/mne_bids/read.py +++ b/mne_bids/read.py @@ -24,54 +24,73 @@ from mne_bids.dig import _read_dig_bids from mne_bids.tsv_handler import _from_tsv, _drop -from mne_bids.config import (ALLOWED_DATATYPE_EXTENSIONS, - ANNOTATIONS_TO_KEEP, - reader, _map_options) +from mne_bids.config import ( + ALLOWED_DATATYPE_EXTENSIONS, + ANNOTATIONS_TO_KEEP, + reader, + _map_options, +) from mne_bids.utils import _get_ch_type_mapping, verbose, warn, _import_nibabel -from mne_bids.path import (BIDSPath, _parse_ext, _find_matching_sidecar, - _infer_datatype, get_bids_path_from_fname) - - -def _read_raw(raw_path, electrode=None, hsp=None, hpi=None, - allow_maxshield=False, config_path=None, **kwargs): +from mne_bids.path import ( + BIDSPath, + _parse_ext, + _find_matching_sidecar, + _infer_datatype, + get_bids_path_from_fname, +) + + +def _read_raw( + raw_path, + electrode=None, + hsp=None, + hpi=None, + allow_maxshield=False, + config_path=None, + **kwargs, +): """Read a raw file into MNE, making inferences based on extension.""" _, ext = _parse_ext(raw_path) # KIT systems - if ext in ['.con', '.sqd']: - raw = io.read_raw_kit(raw_path, elp=electrode, hsp=hsp, - mrk=hpi, preload=False, **kwargs) + if ext in [".con", ".sqd"]: + raw = io.read_raw_kit( + raw_path, elp=electrode, hsp=hsp, mrk=hpi, preload=False, **kwargs + ) # BTi systems - elif ext == '.pdf': + elif ext == ".pdf": raw = io.read_raw_bti( pdf_fname=str(raw_path), # FIXME MNE should accept Path! config_fname=str(config_path), # FIXME MNE should accept Path! head_shape_fname=hsp, preload=False, - **kwargs + **kwargs, ) - elif ext == '.fif': + elif ext == ".fif": raw = reader[ext](raw_path, allow_maxshield, **kwargs) - elif ext in ['.ds', '.vhdr', '.set', '.edf', '.bdf', '.EDF', '.snirf', - '.cdt']: + elif ext in [".ds", ".vhdr", ".set", ".edf", ".bdf", ".EDF", ".snirf", ".cdt"]: raw_path = Path(raw_path) raw = reader[ext](raw_path, **kwargs) # MEF and NWB are allowed, but not yet implemented - elif ext in ['.mef', '.nwb']: - raise ValueError(f'Got "{ext}" as extension. This is an allowed ' - f'extension but there is no IO support for this ' - f'file format yet.') + elif ext in [".mef", ".nwb"]: + raise ValueError( + f'Got "{ext}" as extension. This is an allowed ' + f"extension but there is no IO support for this " + f"file format yet." + ) # No supported data found ... # --------------------------- else: - raise ValueError(f'Raw file name extension must be one ' - f'of {ALLOWED_DATATYPE_EXTENSIONS}\n' - f'Got {ext}') + raise ValueError( + f"Raw file name extension must be one " + f"of {ALLOWED_DATATYPE_EXTENSIONS}\n" + f"Got {ext}" + ) return raw @@ -111,11 +130,12 @@ def _read_events(events, event_id, raw, bids_path=None): # retrieve events if isinstance(events, np.ndarray): if events.ndim != 2: - raise ValueError('Events must have two dimensions, ' - f'found {events.ndim}') + raise ValueError("Events must have two dimensions, " f"found {events.ndim}") if events.shape[1] != 3: - raise ValueError('Events must have second dimension of length 3, ' - f'found {events.shape[1]}') + raise ValueError( + "Events must have second dimension of length 3, " + f"found {events.shape[1]}" + ) events = events elif events is None: events = np.empty(shape=(0, 3), dtype=int) @@ -125,9 +145,9 @@ def _read_events(events, event_id, raw, bids_path=None): if raw.annotations: if event_id is None: logger.info( - 'The provided raw data contains annotations, but you did not ' + "The provided raw data contains annotations, but you did not " 'pass an "event_id" mapping from annotation descriptions to ' - 'event codes. We will generate arbitrary event codes. ' + "event codes. We will generate arbitrary event codes. " 'To specify custom event codes, please pass "event_id".' ) else: @@ -136,9 +156,9 @@ def _read_events(events, event_id, raw, bids_path=None): ) if desc_without_id: raise ValueError( - f'The provided raw data contains annotations, but ' + f"The provided raw data contains annotations, but " f'"event_id" does not contain entries for all annotation ' - f'descriptions. The following entries are missing: ' + f"descriptions. The following entries are missing: " f'{", ".join(desc_without_id)}' ) @@ -148,10 +168,10 @@ def _read_events(events, event_id, raw, bids_path=None): ids_without_desc = set(events[:, 2]) - set(event_id.values()) if ids_without_desc: raise ValueError( - f'No description was specified for the following event(s): ' + f"No description was specified for the following event(s): " f'{", ".join([str(x) for x in sorted(ids_without_desc)])}. ' - f'Please add them to the event_id dictionary, or drop them ' - f'from the events array.' + f"Please add them to the event_id dictionary, or drop them " + f"from the events array." ) # Append events to raw.annotations. All event onsets are relative to @@ -160,8 +180,11 @@ def _read_events(events, event_id, raw, bids_path=None): # We don't pass `first_samp`, as set_annotations() below will take # care of this shift automatically. new_annotations = mne.annotations_from_events( - events=events, sfreq=raw.info['sfreq'], event_desc=id_to_desc_map, - orig_time=raw.annotations.orig_time) + events=events, + sfreq=raw.info["sfreq"], + event_desc=id_to_desc_map, + orig_time=raw.annotations.orig_time, + ) raw = raw.copy() # Don't alter the original. annotations = raw.annotations.copy() @@ -176,27 +199,21 @@ def _read_events(events, event_id, raw, bids_path=None): all_events, all_desc = events_from_annotations( raw, event_id=event_id, - regexp=None # Include `BAD_` and `EDGE_` Annotations, too. + regexp=None, # Include `BAD_` and `EDGE_` Annotations, too. ) all_dur = raw.annotations.duration # Warn about missing events if not rest or empty-room data - if ( - ( - all_events.size == 0 and - bids_path.task is not None - ) and ( - not bids_path.task.startswith('rest') or - not ( - bids_path.subject == 'emptyroom' and - bids_path.task == 'noise' - ) - ) + if (all_events.size == 0 and bids_path.task is not None) and ( + not bids_path.task.startswith("rest") + or not (bids_path.subject == "emptyroom" and bids_path.task == "noise") ): - warn('No events found or provided. Please add annotations to the raw ' - 'data, or provide the events and event_id parameters. For ' - 'resting state data, BIDS recommends naming the task using ' - 'labels beginning with "rest".') + warn( + "No events found or provided. Please add annotations to the raw " + "data, or provide the events and event_id parameters. For " + "resting state data, BIDS recommends naming the task using " + 'labels beginning with "rest".' + ) return all_events, all_dur, all_desc @@ -211,45 +228,48 @@ def _verbose_list_index(lst, val, *, allow_all=False): extra = get_close_matches(str(val), [str(ll) for ll in lst]) if allow_all and not extra: extra = lst - extra = f'. Did you mean one of {extra}?' if extra else '' - raise ValueError(f'{exc}{extra}') from None + extra = f". Did you mean one of {extra}?" if extra else "" + raise ValueError(f"{exc}{extra}") from None def _handle_participants_reading(participants_fname, raw, subject): participants_tsv = _from_tsv(participants_fname) - subjects = participants_tsv['participant_id'] + subjects = participants_tsv["participant_id"] row_ind = _verbose_list_index(subjects, subject, allow_all=True) - raw.info['subject_info'] = dict() # start from scratch + raw.info["subject_info"] = dict() # start from scratch # set data from participants tsv into subject_info for col_name, value in participants_tsv.items(): - if col_name in ('sex', 'hand'): - value = _map_options(what=col_name, key=value[row_ind], - fro='bids', to='mne') + if col_name in ("sex", "hand"): + value = _map_options( + what=col_name, key=value[row_ind], fro="bids", to="mne" + ) # We don't know how to translate to MNE, so skip. if value is None: - if col_name == 'sex': - info_str = 'subject sex' + if col_name == "sex": + info_str = "subject sex" else: - info_str = 'subject handedness' - warn(f'Unable to map "{col_name}" value "{value}" to MNE. ' - f'Not setting {info_str}.') - elif col_name in ('height', 'weight'): + info_str = "subject handedness" + warn( + f'Unable to map "{col_name}" value "{value}" to MNE. ' + f"Not setting {info_str}." + ) + elif col_name in ("height", "weight"): try: value = float(value[row_ind]) except ValueError: value = None else: - if value[row_ind] == 'n/a': + if value[row_ind] == "n/a": value = None else: value = value[row_ind] # add data into raw.Info - key = 'his_id' if col_name == 'participant_id' else col_name + key = "his_id" if col_name == "participant_id" else col_name if value is not None: - assert key not in raw.info['subject_info'] - raw.info['subject_info'][key] = value + assert key not in raw.info["subject_info"] + raw.info["subject_info"][key] = value return raw @@ -259,20 +279,20 @@ def _handle_scans_reading(scans_fname, raw, bids_path): scans_tsv = _from_tsv(scans_fname) fname = bids_path.fpath.name - if fname.endswith('.pdf'): + if fname.endswith(".pdf"): # for BTI files, the scan is an entire directory - fname = fname.split('.')[0] + fname = fname.split(".")[0] # get the row corresponding to the file # use string concatenation instead of os.path # to work nicely with windows data_fname = Path(bids_path.datatype) / fname - fnames = scans_tsv['filename'] + fnames = scans_tsv["filename"] fnames = [Path(fname) for fname in fnames] - if 'acq_time' in scans_tsv: - acq_times = scans_tsv['acq_time'] + if "acq_time" in scans_tsv: + acq_times = scans_tsv["acq_time"] else: - acq_times = ['n/a'] * len(fnames) + acq_times = ["n/a"] * len(fnames) # There are three possible extensions for BrainVision # First gather all the possible extensions @@ -280,41 +300,43 @@ def _handle_scans_reading(scans_fname, raw, bids_path): # Add the filename extension for the bids folder acq_suffixes.add(Path(data_fname).suffix) - if all(suffix in ('.vhdr', '.eeg', '.vmrk') for suffix in acq_suffixes): + if all(suffix in (".vhdr", ".eeg", ".vmrk") for suffix in acq_suffixes): ext = fnames[0].suffix data_fname = Path(data_fname).with_suffix(ext) row_ind = _verbose_list_index(fnames, data_fname) # check whether all split files have the same acq_time # and throw an error if they don't - if '_split-' in fname: - split_idx = fname.find('split-') - pattern = re.compile(bids_path.datatype + '/' + - bids_path.basename[:split_idx] + - r'split-\d+_' + bids_path.datatype + - bids_path.fpath.suffix) - split_fnames = list(filter( - lambda x: pattern.match(x.as_posix()), fnames - )) + if "_split-" in fname: + split_idx = fname.find("split-") + pattern = re.compile( + bids_path.datatype + + "/" + + bids_path.basename[:split_idx] + + r"split-\d+_" + + bids_path.datatype + + bids_path.fpath.suffix + ) + split_fnames = list(filter(lambda x: pattern.match(x.as_posix()), fnames)) split_acq_times = [] for split_f in split_fnames: - split_acq_times.append( - acq_times[_verbose_list_index(fnames, split_f)]) + split_acq_times.append(acq_times[_verbose_list_index(fnames, split_f)]) if len(set(split_acq_times)) != 1: raise ValueError("Split files must have the same acq_time.") # extract the acquisition time from scans file acq_time = acq_times[row_ind] - if acq_time != 'n/a': + if acq_time != "n/a": # microseconds in the acquisition time is optional - if '.' not in acq_time: + if "." not in acq_time: # acquisition time ends with '.%fZ' microseconds string - acq_time += '.0Z' - acq_time = datetime.strptime(acq_time, '%Y-%m-%dT%H:%M:%S.%fZ') + acq_time += ".0Z" + acq_time = datetime.strptime(acq_time, "%Y-%m-%dT%H:%M:%S.%fZ") acq_time = acq_time.replace(tzinfo=timezone.utc) - logger.debug(f'Loaded {scans_fname} scans file to set ' - f'acq_time as {acq_time}.') + logger.debug( + f"Loaded {scans_fname} scans file to set " f"acq_time as {acq_time}." + ) # First set measurement date to None and then call call anonymize() to # remove any traces of the measurement date we wish # to replace – it might lurk out in more places than just @@ -335,7 +357,7 @@ def _handle_info_reading(sidecar_fname, raw): Handle PowerLineFrequency of recording. """ - with open(sidecar_fname, 'r', encoding='utf-8-sig') as fin: + with open(sidecar_fname, "r", encoding="utf-8-sig") as fin: sidecar_json = json.load(fin) # read in the sidecar JSON's and raw object's line frequency @@ -345,12 +367,12 @@ def _handle_info_reading(sidecar_fname, raw): # If both are defined, warn if there is a conflict, else all is fine if (json_linefreq is not None) and (raw_linefreq is not None): if json_linefreq != raw_linefreq: - msg = ( f"Line frequency in sidecar JSON does not match the info " f"data structure of the mne.Raw object:\n" f"Sidecar JSON is -> {json_linefreq}\n" - f"Raw is -> {raw_linefreq}\n\n") + f"Raw is -> {raw_linefreq}\n\n" + ) if json_linefreq == "n/a": msg += "Defaulting to the info from mne.Raw object." @@ -368,7 +390,7 @@ def _handle_info_reading(sidecar_fname, raw): pass # line freq is either defined or None in mne.Raw # get cHPI info - chpi = sidecar_json.get('ContinuousHeadLocalization') + chpi = sidecar_json.get("ContinuousHeadLocalization") if chpi is None: # no cHPI info in the sidecar – leave raw.info unchanged pass @@ -376,25 +398,26 @@ def _handle_info_reading(sidecar_fname, raw): from mne.io.ctf import RawCTF from mne.io.kit.kit import RawKIT - msg = ('Cannot verify that the cHPI frequencies from ' - 'the MEG JSON sidecar file correspond to the raw data{}') + msg = ( + "Cannot verify that the cHPI frequencies from " + "the MEG JSON sidecar file correspond to the raw data{}" + ) if isinstance(raw, RawCTF): # Pick channels corresponding to the cHPI positions - hpi_picks = pick_channels_regexp(raw.info['ch_names'], - 'HLC00[123][123].*') + hpi_picks = pick_channels_regexp(raw.info["ch_names"], "HLC00[123][123].*") if len(hpi_picks) != 9: raise ValueError( - f'Could not find all cHPI channels that we expected for ' - f'CTF data. Expected: 9, found: {len(hpi_picks)}' + f"Could not find all cHPI channels that we expected for " + f"CTF data. Expected: 9, found: {len(hpi_picks)}" ) logger.info(msg.format(" for CTF files.")) elif isinstance(raw, RawKIT): logger.info(msg.format(" for KIT files.")) - elif 'HeadCoilFrequency' in sidecar_json: - hpi_freqs_json = sidecar_json['HeadCoilFrequency'] + elif "HeadCoilFrequency" in sidecar_json: + hpi_freqs_json = sidecar_json["HeadCoilFrequency"] try: hpi_freqs_raw, _, _ = mne.chpi.get_chpi_info(raw.info) except ValueError: @@ -403,24 +426,27 @@ def _handle_info_reading(sidecar_fname, raw): # XXX: Set chpi info in mne.Raw to what is in the sidecar if not np.allclose(hpi_freqs_json, hpi_freqs_raw): warn( - f'The cHPI coil frequencies in the sidecar file ' - f'{sidecar_fname}:\n {hpi_freqs_json}\n ' - f'differ from what is stored in the raw data:\n' - f' {hpi_freqs_raw}.\n' - f'Defaulting to the info from mne.Raw object.' + f"The cHPI coil frequencies in the sidecar file " + f"{sidecar_fname}:\n {hpi_freqs_json}\n " + f"differ from what is stored in the raw data:\n" + f" {hpi_freqs_raw}.\n" + f"Defaulting to the info from mne.Raw object." ) else: - addmsg = (".\n(Because no 'HeadCoilFrequency' data " - "was found in the sidecar.)") + addmsg = ( + ".\n(Because no 'HeadCoilFrequency' data " "was found in the sidecar.)" + ) logger.info(msg.format(addmsg)) else: - if raw.info['hpi_subsystem']: - logger.info('Dropping cHPI information stored in raw data, ' - 'following specification in sidecar file') + if raw.info["hpi_subsystem"]: + logger.info( + "Dropping cHPI information stored in raw data, " + "following specification in sidecar file" + ) with raw.info._unlock(): - raw.info['hpi_subsystem'] = None - raw.info['hpi_meas'] = [] + raw.info["hpi_subsystem"] = None + raw.info["hpi_meas"] = [] return raw @@ -430,28 +456,30 @@ def _handle_events_reading(events_fname, raw): Handle onset, duration, and description of each event. """ - logger.info('Reading events from {}.'.format(events_fname)) + logger.info("Reading events from {}.".format(events_fname)) events_dict = _from_tsv(events_fname) # Get the descriptions of the events - if 'trial_type' in events_dict: - trial_type_col_name = 'trial_type' - elif 'stim_type' in events_dict: # Backward-compat with old datasets. - trial_type_col_name = 'stim_type' - warn(f'The events file, {events_fname}, contains a "stim_type" ' - f'column. This column should be renamed to "trial_type" for ' - f'BIDS compatibility.') + if "trial_type" in events_dict: + trial_type_col_name = "trial_type" + elif "stim_type" in events_dict: # Backward-compat with old datasets. + trial_type_col_name = "stim_type" + warn( + f'The events file, {events_fname}, contains a "stim_type" ' + f'column. This column should be renamed to "trial_type" for ' + f"BIDS compatibility." + ) else: trial_type_col_name = None if trial_type_col_name is not None: # Drop events unrelated to a trial type - events_dict = _drop(events_dict, 'n/a', trial_type_col_name) + events_dict = _drop(events_dict, "n/a", trial_type_col_name) - if 'value' in events_dict: + if "value" in events_dict: # Check whether the `trial_type` <> `value` mapping is unique. trial_types = events_dict[trial_type_col_name] - values = np.asarray(events_dict['value'], dtype=str) + values = np.asarray(events_dict["value"], dtype=str) for trial_type in np.unique(trial_types): idx = np.where(trial_type == np.atleast_1d(trial_types))[0] matching_values = values[idx] @@ -461,36 +489,37 @@ def _handle_events_reading(events_fname, raw): # event descriptors. logger.info( f'The event "{trial_type}" refers to multiple event ' - f'values. Creating hierarchical event names.') + f"values. Creating hierarchical event names." + ) for ii in idx: value = values[ii] - value = 'na' if value == 'n/a' else value - new_name = f'{trial_type}/{value}' - logger.info(f' Renaming event: {trial_type} -> ' - f'{new_name}') + value = "na" if value == "n/a" else value + new_name = f"{trial_type}/{value}" + logger.info( + f" Renaming event: {trial_type} -> " f"{new_name}" + ) trial_types[ii] = new_name descriptions = np.asarray(trial_types, dtype=str) else: - descriptions = np.asarray(events_dict[trial_type_col_name], - dtype=str) - elif 'value' in events_dict: + descriptions = np.asarray(events_dict[trial_type_col_name], dtype=str) + elif "value" in events_dict: # If we don't have a proper description of the events, perhaps we have # at least an event value? # Drop events unrelated to value - events_dict = _drop(events_dict, 'n/a', 'value') - descriptions = np.asarray(events_dict['value'], dtype=str) + events_dict = _drop(events_dict, "n/a", "value") + descriptions = np.asarray(events_dict["value"], dtype=str) # Worst case, we go with 'n/a' for all events else: - descriptions = np.array(['n/a'] * len(events_dict['onset']), dtype=str) + descriptions = np.array(["n/a"] * len(events_dict["onset"]), dtype=str) # Deal with "n/a" strings before converting to float onsets = np.array( - [np.nan if on == 'n/a' else on for on in events_dict['onset']], - dtype=float) + [np.nan if on == "n/a" else on for on in events_dict["onset"]], dtype=float + ) durations = np.array( - [0 if du == 'n/a' else du for du in events_dict['duration']], - dtype=float) + [0 if du == "n/a" else du for du in events_dict["duration"]], dtype=float + ) # Keep only events where onset is known good_events_idx = ~np.isnan(onsets) @@ -503,14 +532,16 @@ def _handle_events_reading(events_fname, raw): # raw file annot_from_raw = raw.annotations.copy() - annot_from_events = mne.Annotations(onset=onsets, - duration=durations, - description=descriptions) + annot_from_events = mne.Annotations( + onset=onsets, duration=durations, description=descriptions + ) raw.set_annotations(annot_from_events) - annot_idx_to_keep = [idx for idx, descr - in enumerate(annot_from_raw.description) - if descr in ANNOTATIONS_TO_KEEP] + annot_idx_to_keep = [ + idx + for idx, descr in enumerate(annot_from_raw.description) + if descr in ANNOTATIONS_TO_KEEP + ] annot_to_keep = annot_from_raw[annot_idx_to_keep] if len(annot_to_keep): @@ -522,11 +553,11 @@ def _handle_events_reading(events_fname, raw): def _get_bads_from_tsv_data(tsv_data): """Extract names of bads from data read from channels.tsv.""" idx = [] - for ch_idx, status in enumerate(tsv_data['status']): - if status.lower() == 'bad': + for ch_idx, status in enumerate(tsv_data["status"]): + if status.lower() == "bad": idx.append(ch_idx) - bads = [tsv_data['name'][i] for i in idx] + bads = [tsv_data["name"][i] for i in idx] return bads @@ -535,9 +566,9 @@ def _handle_channels_reading(channels_fname, raw): Updates status (bad) and types of channels. """ - logger.info('Reading channel info from {}.'.format(channels_fname)) + logger.info("Reading channel info from {}.".format(channels_fname)) channels_dict = _from_tsv(channels_fname) - ch_names_tsv = channels_dict['name'] + ch_names_tsv = channels_dict["name"] # Now we can do some work. # The "type" column is mandatory in BIDS. We can use it to set channel @@ -545,14 +576,18 @@ def _handle_channels_reading(channels_fname, raw): channel_type_bids_mne_map = dict() # Get the best mapping we currently have from BIDS to MNE nomenclature - bids_to_mne_ch_types = _get_ch_type_mapping(fro='bids', to='mne') - ch_types_json = channels_dict['type'] + bids_to_mne_ch_types = _get_ch_type_mapping(fro="bids", to="mne") + ch_types_json = channels_dict["type"] for ch_name, ch_type in zip(ch_names_tsv, ch_types_json): # We don't map MEG channels for now, as there's no clear 1:1 mapping # from BIDS to MNE coil types. if ch_type.upper() in ( - 'MEGGRADAXIAL', 'MEGMAG', 'MEGREFGRADAXIAL', 'MEGGRADPLANAR', - 'MEGREFMAG', 'MEGOTHER' + "MEGGRADAXIAL", + "MEGMAG", + "MEGREFGRADAXIAL", + "MEGGRADPLANAR", + "MEGREFMAG", + "MEGOTHER", ): continue @@ -567,14 +602,16 @@ def _handle_channels_reading(channels_fname, raw): # XXX x-ref https://github.com/mne-tools/mne-bids/issues/481 updated_ch_type = bids_to_mne_ch_types.get(ch_type.upper(), None) if updated_ch_type is not None: - msg = ('The BIDS dataset contains channel types in lowercase ' - 'spelling. This violates the BIDS specification and ' - 'will raise an error in the future.') + msg = ( + "The BIDS dataset contains channel types in lowercase " + "spelling. This violates the BIDS specification and " + "will raise an error in the future." + ) warn(msg) if updated_ch_type is None: # We don't have an appropriate mapping, so make it a "misc" channel - channel_type_bids_mne_map[ch_name] = 'misc' + channel_type_bids_mne_map[ch_name] = "misc" warn( f'No BIDS -> MNE mapping found for channel type "{ch_type}". ' f'Type of channel "{ch_name}" will be set to "misc".' @@ -584,24 +621,28 @@ def _handle_channels_reading(channels_fname, raw): channel_type_bids_mne_map[ch_name] = updated_ch_type # Special handling for (synthesized) stimulus channel - synthesized_stim_ch_name = 'STI 014' - if (synthesized_stim_ch_name in raw.ch_names and - synthesized_stim_ch_name not in ch_names_tsv): + synthesized_stim_ch_name = "STI 014" + if ( + synthesized_stim_ch_name in raw.ch_names + and synthesized_stim_ch_name not in ch_names_tsv + ): logger.info( f'The stimulus channel "{synthesized_stim_ch_name}" is present in ' - f'the raw data, but not included in channels.tsv. Removing the ' - f'channel.') + f"the raw data, but not included in channels.tsv. Removing the " + f"channel." + ) raw.drop_channels([synthesized_stim_ch_name]) # Rename channels in loaded Raw to match those read from the BIDS sidecar if len(ch_names_tsv) != len(raw.ch_names): - warn(f'The number of channels in the channels.tsv sidecar file ' - f'({len(ch_names_tsv)}) does not match the number of channels ' - f'in the raw data file ({len(raw.ch_names)}). Will not try to ' - f'set channel names.') + warn( + f"The number of channels in the channels.tsv sidecar file " + f"({len(ch_names_tsv)}) does not match the number of channels " + f"in the raw data file ({len(raw.ch_names)}). Will not try to " + f"set channel names." + ) else: - for bids_ch_name, raw_ch_name in zip(ch_names_tsv, - raw.ch_names.copy()): + for bids_ch_name, raw_ch_name in zip(ch_names_tsv, raw.ch_names.copy()): if bids_ch_name != raw_ch_name: raw.rename_channels({raw_ch_name: bids_ch_name}) @@ -611,28 +652,30 @@ def _handle_channels_reading(channels_fname, raw): for ch_name, ch_type in channel_type_bids_mne_map.items() if ch_name in raw.ch_names } - ch_diff = ( - set(channel_type_bids_mne_map.keys()) - - set(channel_type_bids_mne_map_available_channels.keys()) + ch_diff = set(channel_type_bids_mne_map.keys()) - set( + channel_type_bids_mne_map_available_channels.keys() ) if ch_diff: - warn(f'Cannot set channel type for the following channels, as they ' - f'are missing in the raw data: {", ".join(sorted(ch_diff))}') + warn( + f"Cannot set channel type for the following channels, as they " + f'are missing in the raw data: {", ".join(sorted(ch_diff))}' + ) raw.set_channel_types(channel_type_bids_mne_map_available_channels) # Set bad channels based on _channels.tsv sidecar - if 'status' in channels_dict: + if "status" in channels_dict: bads_tsv = _get_bads_from_tsv_data(channels_dict) - bads_avail = [ch_name for ch_name in bads_tsv - if ch_name in raw.ch_names] + bads_avail = [ch_name for ch_name in bads_tsv if ch_name in raw.ch_names] ch_diff = set(bads_tsv) - set(bads_avail) if ch_diff: - warn(f'Cannot set "bad" status for the following channels, as ' - f'they are missing in the raw data: ' - f'{", ".join(sorted(ch_diff))}') + warn( + f'Cannot set "bad" status for the following channels, as ' + f"they are missing in the raw data: " + f'{", ".join(sorted(ch_diff))}' + ) - raw.info['bads'] = bads_avail + raw.info["bads"] = bads_avail return raw @@ -687,8 +730,10 @@ def read_raw_bids(bids_path, extra_params=None, verbose=None): """ if not isinstance(bids_path, BIDSPath): - raise RuntimeError('"bids_path" must be a BIDSPath object. Please ' - 'instantiate using mne_bids.BIDSPath().') + raise RuntimeError( + '"bids_path" must be a BIDSPath object. Please ' + "instantiate using mne_bids.BIDSPath()." + ) bids_path = bids_path.copy() sub = bids_path.subject @@ -699,9 +744,11 @@ def read_raw_bids(bids_path, extra_params=None, verbose=None): # check root available if bids_root is None: - raise ValueError('The root of the "bids_path" must be set. ' - 'Please use `bids_path.update(root="")` ' - 'to set the root of the BIDS folder to read.') + raise ValueError( + 'The root of the "bids_path" must be set. ' + 'Please use `bids_path.update(root="")` ' + "to set the root of the BIDS folder to read." + ) # infer the datatype and suffix if they are not present in the BIDSPath if datatype is None: @@ -710,27 +757,26 @@ def read_raw_bids(bids_path, extra_params=None, verbose=None): if suffix is None: bids_path.update(suffix=datatype) - if bids_path.fpath.suffix == '.pdf': - bids_raw_folder = bids_path.directory / f'{bids_path.basename}' - raw_path = list(bids_raw_folder.glob('c,rf*'))[0] - config_path = bids_raw_folder / 'config' + if bids_path.fpath.suffix == ".pdf": + bids_raw_folder = bids_path.directory / f"{bids_path.basename}" + raw_path = list(bids_raw_folder.glob("c,rf*"))[0] + config_path = bids_raw_folder / "config" else: raw_path = bids_path.fpath # Resolve for FIFF files if ( - raw_path.suffix == '.fif' and - bids_path.split is None and - raw_path.is_symlink() + raw_path.suffix == ".fif" + and bids_path.split is None + and raw_path.is_symlink() ): target_path = raw_path.resolve() - logger.info(f'Resolving symbolic link: ' - f'{raw_path} -> {target_path}') + logger.info(f"Resolving symbolic link: " f"{raw_path} -> {target_path}") raw_path = target_path config_path = None # Special-handle EDF filenames: we accept upper- and lower-case extensions - if raw_path.suffix.lower() == '.edf': - for extension in ('.edf', '.EDF'): + if raw_path.suffix.lower() == ".edf": + for extension in (".edf", ".EDF"): candidate_path = raw_path.with_suffix(extension) if candidate_path.exists(): raw_path = candidate_path @@ -739,41 +785,46 @@ def read_raw_bids(bids_path, extra_params=None, verbose=None): if not raw_path.exists(): options = os.listdir(bids_path.directory) matches = get_close_matches(bids_path.basename, options) - msg = f'File does not exist:\n{raw_path}' + msg = f"File does not exist:\n{raw_path}" if matches: msg += ( - '\nDid you mean one of:\n' + - '\n'.join(matches) + - '\ninstead of:\n' + - bids_path.basename + "\nDid you mean one of:\n" + + "\n".join(matches) + + "\ninstead of:\n" + + bids_path.basename ) raise FileNotFoundError(msg) if config_path is not None and not config_path.exists(): - raise FileNotFoundError(f'config directory not found: {config_path}') + raise FileNotFoundError(f"config directory not found: {config_path}") if extra_params is None: extra_params = dict() - elif 'exclude' in extra_params: - del extra_params['exclude'] + elif "exclude" in extra_params: + del extra_params["exclude"] logger.info('"exclude" parameter is not supported by read_raw_bids') - if raw_path.suffix == '.fif' and 'allow_maxshield' not in extra_params: - extra_params['allow_maxshield'] = True - raw = _read_raw(raw_path, electrode=None, hsp=None, hpi=None, - config_path=config_path, **extra_params) + if raw_path.suffix == ".fif" and "allow_maxshield" not in extra_params: + extra_params["allow_maxshield"] = True + raw = _read_raw( + raw_path, + electrode=None, + hsp=None, + hpi=None, + config_path=config_path, + **extra_params, + ) # Try to find an associated events.tsv to get information about the # events in the recorded data if ( - (bids_path.subject == 'emptyroom' and bids_path.task == 'noise') or - bids_path.task.startswith('rest') - ): - on_error = 'ignore' + bids_path.subject == "emptyroom" and bids_path.task == "noise" + ) or bids_path.task.startswith("rest"): + on_error = "ignore" else: - on_error = 'warn' + on_error = "warn" events_fname = _find_matching_sidecar( - bids_path, suffix='events', extension='.tsv', on_error=on_error + bids_path, suffix="events", extension=".tsv", on_error=on_error ) if events_fname is not None: @@ -781,74 +832,80 @@ def read_raw_bids(bids_path, extra_params=None, verbose=None): # Try to find an associated channels.tsv to get information about the # status and type of present channels - channels_fname = _find_matching_sidecar(bids_path, - suffix='channels', - extension='.tsv', - on_error='warn') + channels_fname = _find_matching_sidecar( + bids_path, suffix="channels", extension=".tsv", on_error="warn" + ) if channels_fname is not None: raw = _handle_channels_reading(channels_fname, raw) # Try to find an associated electrodes.tsv and coordsystem.json # to get information about the status and type of present channels - on_error = 'warn' if suffix == 'ieeg' else 'ignore' - electrodes_fname = _find_matching_sidecar(bids_path, - suffix='electrodes', - extension='.tsv', - on_error=on_error) - coordsystem_fname = _find_matching_sidecar(bids_path, - suffix='coordsystem', - extension='.json', - on_error=on_error) + on_error = "warn" if suffix == "ieeg" else "ignore" + electrodes_fname = _find_matching_sidecar( + bids_path, suffix="electrodes", extension=".tsv", on_error=on_error + ) + coordsystem_fname = _find_matching_sidecar( + bids_path, suffix="coordsystem", extension=".json", on_error=on_error + ) if electrodes_fname is not None: if coordsystem_fname is None: - raise RuntimeError(f"BIDS mandates that the coordsystem.json " - f"should exist if electrodes.tsv does. " - f"Please create coordsystem.json for" - f"{bids_path.basename}") - if datatype in ['meg', 'eeg', 'ieeg']: - _read_dig_bids(electrodes_fname, coordsystem_fname, - raw=raw, datatype=datatype) + raise RuntimeError( + f"BIDS mandates that the coordsystem.json " + f"should exist if electrodes.tsv does. " + f"Please create coordsystem.json for" + f"{bids_path.basename}" + ) + if datatype in ["meg", "eeg", "ieeg"]: + _read_dig_bids( + electrodes_fname, coordsystem_fname, raw=raw, datatype=datatype + ) # Try to find an associated sidecar .json to get information about the # recording snapshot - sidecar_fname = _find_matching_sidecar(bids_path, - suffix=datatype, - extension='.json', - on_error='warn') + sidecar_fname = _find_matching_sidecar( + bids_path, suffix=datatype, extension=".json", on_error="warn" + ) if sidecar_fname is not None: raw = _handle_info_reading(sidecar_fname, raw) # read in associated scans filename scans_fname = BIDSPath( - subject=bids_path.subject, session=bids_path.session, - suffix='scans', extension='.tsv', - root=bids_path.root + subject=bids_path.subject, + session=bids_path.session, + suffix="scans", + extension=".tsv", + root=bids_path.root, ).fpath if scans_fname.exists(): raw = _handle_scans_reading(scans_fname, raw, bids_path) # read in associated subject info from participants.tsv - participants_tsv_path = bids_root / 'participants.tsv' + participants_tsv_path = bids_root / "participants.tsv" subject = f"sub-{bids_path.subject}" if op.exists(participants_tsv_path): raw = _handle_participants_reading( - participants_fname=participants_tsv_path, - raw=raw, - subject=subject + participants_fname=participants_tsv_path, raw=raw, subject=subject ) else: warn(f"participants.tsv file not found for {raw_path}") - raw.info['subject_info'] = dict() + raw.info["subject_info"] = dict() - assert raw.annotations.orig_time == raw.info['meas_date'] + assert raw.annotations.orig_time == raw.info["meas_date"] return raw @verbose -def get_head_mri_trans(bids_path, extra_params=None, t1_bids_path=None, - fs_subject=None, fs_subjects_dir=None, *, kind=None, - verbose=None): +def get_head_mri_trans( + bids_path, + extra_params=None, + t1_bids_path=None, + fs_subject=None, + fs_subjects_dir=None, + *, + kind=None, + verbose=None, +): """Produce transformation matrix from MEG and MRI landmark points. Will attempt to read the landmarks of Nasion, LPA, and RPA from the sidecar @@ -909,44 +966,50 @@ def get_head_mri_trans(bids_path, extra_params=None, t1_bids_path=None, trans : mne.transforms.Transform The data transformation matrix from head to MRI coordinates. """ - nib = _import_nibabel('get a head to MRI transform') + nib = _import_nibabel("get a head to MRI transform") if not isinstance(bids_path, BIDSPath): - raise RuntimeError('"bids_path" must be a BIDSPath object. Please ' - 'instantiate using mne_bids.BIDSPath().') + raise RuntimeError( + '"bids_path" must be a BIDSPath object. Please ' + "instantiate using mne_bids.BIDSPath()." + ) # check root available meg_bids_path = bids_path.copy() del bids_path if meg_bids_path.root is None: - raise ValueError('The root of the "bids_path" must be set. ' - 'Please use `bids_path.update(root="")` ' - 'to set the root of the BIDS folder to read.') + raise ValueError( + 'The root of the "bids_path" must be set. ' + 'Please use `bids_path.update(root="")` ' + "to set the root of the BIDS folder to read." + ) # if the bids_path is underspecified, only get info for MEG data if meg_bids_path.datatype is None: - meg_bids_path.datatype = 'meg' - warn('bids_path did not have a datatype set. Assuming "meg". This ' - 'will raise an exception in the future.', module='mne_bids', - category=DeprecationWarning) + meg_bids_path.datatype = "meg" + warn( + 'bids_path did not have a datatype set. Assuming "meg". This ' + "will raise an exception in the future.", + module="mne_bids", + category=DeprecationWarning, + ) if meg_bids_path.suffix is None: - meg_bids_path.suffix = 'meg' - warn('bids_path did not have a suffix set. Assuming "meg". This ' - 'will raise an exception in the future.', module='mne_bids', - category=DeprecationWarning) + meg_bids_path.suffix = "meg" + warn( + 'bids_path did not have a suffix set. Assuming "meg". This ' + "will raise an exception in the future.", + module="mne_bids", + category=DeprecationWarning, + ) # Get the sidecar file for MRI landmarks t1w_bids_path = ( (meg_bids_path if t1_bids_path is None else t1_bids_path) .copy() - .update( - datatype='anat', - suffix='T1w', - task=None - ) + .update(datatype="anat", suffix="T1w", task=None) ) t1w_json_path = _find_matching_sidecar( - bids_path=t1w_bids_path, extension='.json', on_error='ignore' + bids_path=t1w_bids_path, extension=".json", on_error="ignore" ) del t1_bids_path @@ -955,10 +1018,9 @@ def get_head_mri_trans(bids_path, extra_params=None, t1_bids_path=None, if t1w_json_path is None or not t1w_json_path.exists(): raise FileNotFoundError( - f'Did not find T1w JSON sidecar file, tried location: ' - f'{t1w_json_path}' + f"Did not find T1w JSON sidecar file, tried location: " f"{t1w_json_path}" ) - for extension in ('.nii', '.nii.gz'): + for extension in (".nii", ".nii.gz"): t1w_path_candidate = t1w_json_path.with_suffix(extension) if t1w_path_candidate.exists(): t1w_bids_path = get_bids_path_from_fname(fname=t1w_path_candidate) @@ -966,54 +1028,60 @@ def get_head_mri_trans(bids_path, extra_params=None, t1_bids_path=None, if not t1w_bids_path.fpath.exists(): raise FileNotFoundError( - f'Did not find T1w recording file, tried location: ' + f"Did not find T1w recording file, tried location: " f'{t1w_path_candidate.name.replace(".nii.gz", "")}[.nii, .nii.gz]' ) # Get MRI landmarks from the JSON sidecar - t1w_json = json.loads(t1w_json_path.read_text(encoding='utf-8')) - mri_coords_dict = t1w_json.get('AnatomicalLandmarkCoordinates', dict()) + t1w_json = json.loads(t1w_json_path.read_text(encoding="utf-8")) + mri_coords_dict = t1w_json.get("AnatomicalLandmarkCoordinates", dict()) # landmarks array: rows: [LPA, NAS, RPA]; columns: [x, y, z] suffix = f"_{kind}" if kind is not None else "" mri_landmarks = np.full((3, 3), np.nan) for landmark_name, coords in mri_coords_dict.items(): - if landmark_name.upper() == ('LPA' + suffix).upper(): + if landmark_name.upper() == ("LPA" + suffix).upper(): mri_landmarks[0, :] = coords - elif landmark_name.upper() == ('RPA' + suffix).upper(): + elif landmark_name.upper() == ("RPA" + suffix).upper(): mri_landmarks[2, :] = coords - elif (landmark_name.upper() == ('NAS' + suffix).upper() or - landmark_name.lower() == ('nasion' + suffix).lower()): + elif ( + landmark_name.upper() == ("NAS" + suffix).upper() + or landmark_name.lower() == ("nasion" + suffix).lower() + ): mri_landmarks[1, :] = coords else: continue if np.isnan(mri_landmarks).any(): raise RuntimeError( - f'Could not extract fiducial points from T1w sidecar file: ' - f'{t1w_json_path}\n\n' - f'The sidecar file SHOULD contain a key ' + f"Could not extract fiducial points from T1w sidecar file: " + f"{t1w_json_path}\n\n" + f"The sidecar file SHOULD contain a key " f'"AnatomicalLandmarkCoordinates" pointing to an ' f'object with the keys "LPA", "NAS", and "RPA". ' - f'Yet, the following structure was found:\n\n' - f'{mri_coords_dict}' + f"Yet, the following structure was found:\n\n" + f"{mri_coords_dict}" ) # The MRI landmarks are in "voxels". We need to convert them to the # Neuromag RAS coordinate system in order to compare them with MEG # landmarks. See also: `mne_bids.write.write_anat` if fs_subject is None: - warn('Passing "fs_subject=None" has been deprecated and will raise ' - 'an error in future versions. Please explicitly specify the ' - 'FreeSurfer subject name.', DeprecationWarning) - fs_subject = f'sub-{meg_bids_path.subject}' + warn( + 'Passing "fs_subject=None" has been deprecated and will raise ' + "an error in future versions. Please explicitly specify the " + "FreeSurfer subject name.", + DeprecationWarning, + ) + fs_subject = f"sub-{meg_bids_path.subject}" fs_subjects_dir = get_subjects_dir(fs_subjects_dir, raise_error=False) - fs_t1_path = Path(fs_subjects_dir) / fs_subject / 'mri' / 'T1.mgz' + fs_t1_path = Path(fs_subjects_dir) / fs_subject / "mri" / "T1.mgz" if not fs_t1_path.exists(): raise ValueError( f"Could not find {fs_t1_path}. Consider running FreeSurfer's " - f"'recon-all` for subject {fs_subject}.") + f"'recon-all` for subject {fs_subject}." + ) fs_t1_mgh = nib.load(str(fs_t1_path)) t1_nifti = nib.load(str(t1w_bids_path.fpath)) @@ -1035,27 +1103,32 @@ def get_head_mri_trans(bids_path, extra_params=None, t1_bids_path=None, _, ext = _parse_ext(meg_bids_path) if extra_params is None: extra_params = dict() - if ext == '.fif': - extra_params['allow_maxshield'] = 'yes' + if ext == ".fif": + extra_params["allow_maxshield"] = "yes" raw = read_raw_bids(bids_path=meg_bids_path, extra_params=extra_params) - if (raw.get_montage() is None or - raw.get_montage().get_positions() is None or - any([raw.get_montage().get_positions()[fid_key] is None - for fid_key in ('nasion', 'lpa', 'rpa')])): + if ( + raw.get_montage() is None + or raw.get_montage().get_positions() is None + or any( + [ + raw.get_montage().get_positions()[fid_key] is None + for fid_key in ("nasion", "lpa", "rpa") + ] + ) + ): raise RuntimeError( - f'Could not extract fiducial points from ``raw`` file: ' - f'{meg_bids_path}\n\n' - f'The ``raw`` file SHOULD contain digitization points ' - 'for the nasion and left and right pre-auricular points ' - 'but none were found' + f"Could not extract fiducial points from ``raw`` file: " + f"{meg_bids_path}\n\n" + f"The ``raw`` file SHOULD contain digitization points " + "for the nasion and left and right pre-auricular points " + "but none were found" ) pos = raw.get_montage().get_positions() - meg_landmarks = np.asarray((pos['lpa'], pos['nasion'], pos['rpa'])) + meg_landmarks = np.asarray((pos["lpa"], pos["nasion"], pos["rpa"])) # Given the two sets of points, fit the transform - trans_fitted = fit_matched_points(src_pts=meg_landmarks, - tgt_pts=mri_landmarks) - trans = mne.transforms.Transform(fro='head', to='mri', trans=trans_fitted) + trans_fitted = fit_matched_points(src_pts=meg_landmarks, tgt_pts=mri_landmarks) + trans = mne.transforms.Transform(fro="head", to="mri", trans=trans_fitted) return trans diff --git a/mne_bids/report/_report.py b/mne_bids/report/_report.py index 2e532943e..dd776a893 100644 --- a/mne_bids/report/_report.py +++ b/mne_bids/report/_report.py @@ -13,16 +13,20 @@ from mne_bids.config import DOI, ALLOWED_DATATYPES from mne_bids.tsv_handler import _from_tsv -from mne_bids.path import (get_bids_path_from_fname, get_datatypes, - get_entity_vals, BIDSPath, - _parse_ext, _find_matching_sidecar) +from mne_bids.path import ( + get_bids_path_from_fname, + get_datatypes, + get_entity_vals, + BIDSPath, + _parse_ext, + _find_matching_sidecar, +) from mne_bids.utils import warn jinja_env = jinja2.Environment( loader=jinja2.PackageLoader( - package_name='mne_bids.report', - package_path='templates' + package_name="mne_bids.report", package_path="templates" ) ) @@ -33,54 +37,57 @@ def _pretty_str(listed): listed = list(listed) if len(listed) <= 1: - return ','.join(listed) - return '{}, and {}'.format(', '.join(listed[:-1]), listed[-1]) + return ",".join(listed) + return "{}, and {}".format(", ".join(listed[:-1]), listed[-1]) def _range_str(minval, maxval, meanval, stdval, n_unknown, type): - if minval == 'n/a': - return 'ages all unknown' + if minval == "n/a": + return "ages all unknown" if n_unknown > 0: - unknown_str = f'; {n_unknown} with unknown {type}' + unknown_str = f"; {n_unknown} with unknown {type}" else: - unknown_str = '' + unknown_str = "" return ( - f'ages ranged from {round(minval, 2)} to {round(maxval, 2)} ' - f'(mean = {round(meanval, 2)}, std = {round(stdval, 2)}{unknown_str})' + f"ages ranged from {round(minval, 2)} to {round(maxval, 2)} " + f"(mean = {round(meanval, 2)}, std = {round(stdval, 2)}{unknown_str})" ) def _summarize_participant_hand(hands): - n_unknown = len([hand for hand in hands if hand == 'n/a']) + n_unknown = len([hand for hand in hands if hand == "n/a"]) if n_unknown == len(hands): - return 'handedness were all unknown' + return "handedness were all unknown" - n_rhand = len([hand for hand in hands if hand.upper() == 'R']) - n_lhand = len([hand for hand in hands if hand.upper() == 'L']) - n_ambidex = len([hand for hand in hands if hand.upper() == 'A']) + n_rhand = len([hand for hand in hands if hand.upper() == "R"]) + n_lhand = len([hand for hand in hands if hand.upper() == "L"]) + n_ambidex = len([hand for hand in hands if hand.upper() == "A"]) - return f'comprised of {n_rhand} right hand, {n_lhand} left hand ' \ - f'and {n_ambidex} ambidextrous' + return ( + f"comprised of {n_rhand} right hand, {n_lhand} left hand " + f"and {n_ambidex} ambidextrous" + ) def _summarize_participant_sex(sexs): - n_unknown = len([sex for sex in sexs if sex == 'n/a']) + n_unknown = len([sex for sex in sexs if sex == "n/a"]) if n_unknown == len(sexs): - return 'sex were all unknown' + return "sex were all unknown" - n_males = len([sex for sex in sexs if sex.upper() == 'M']) - n_females = len([sex for sex in sexs if sex.upper() == 'F']) + n_males = len([sex for sex in sexs if sex.upper() == "M"]) + n_females = len([sex for sex in sexs if sex.upper() == "F"]) - return f'comprised of {n_males} male and {n_females} female participants' + return f"comprised of {n_males} male and {n_females} female participants" def _length_recording_str(length_recordings): import numpy as np + if length_recordings is None: - return '' + return "" min_record_length = round(np.min(length_recordings), 2) max_record_length = round(np.max(length_recordings), 2) @@ -89,38 +96,38 @@ def _length_recording_str(length_recordings): total_record_length = round(sum(length_recordings), 2) return ( - f'Recording durations ranged from {min_record_length} to ' - f'{max_record_length} seconds ' - f'(mean = {mean_record_length}, std = {std_record_length}), ' - f'for a total of {total_record_length} seconds of data recorded ' - f'over all scans.' + f"Recording durations ranged from {min_record_length} to " + f"{max_record_length} seconds " + f"(mean = {mean_record_length}, std = {std_record_length}), " + f"for a total of {total_record_length} seconds of data recorded " + f"over all scans." ) def _summarize_software_filters(software_filters): - if software_filters in [{}, 'n/a']: - return '' + if software_filters in [{}, "n/a"]: + return "" - msg = '' + msg = "" for key, value in software_filters.items(): - msg += f'{key}' + msg += f"{key}" if isinstance(value, dict) and value: parameters = [] for param_name, param_value in value.items(): if param_name and param_value: - parameters.append(f'{param_value} {param_name}') + parameters.append(f"{param_value} {param_name}") if parameters: - msg += ' with parameters ' - msg += ', '.join(parameters) + msg += " with parameters " + msg += ", ".join(parameters) return msg def _pretty_dict(template_dict): """Remove problematic blank spaces.""" for key, val in template_dict.items(): - if val == ' ': - template_dict[key] = 'n/a' + if val == " ": + template_dict[key] = "n/a" def _summarize_dataset(root): @@ -144,24 +151,23 @@ def _summarize_dataset(root): template_dict : dict A dictionary of values for various template strings. """ - dataset_descrip_fpath = op.join(root, - 'dataset_description.json') + dataset_descrip_fpath = op.join(root, "dataset_description.json") if not op.exists(dataset_descrip_fpath): return dict() # read file and 'REQUIRED' components of it - with open(dataset_descrip_fpath, 'r', encoding='utf-8-sig') as fin: + with open(dataset_descrip_fpath, "r", encoding="utf-8-sig") as fin: dataset_description = json.load(fin) # create dictionary to pass into template string - name = dataset_description['Name'] - bids_version = dataset_description['BIDSVersion'] - authors = dataset_description['Authors'] + name = dataset_description["Name"] + bids_version = dataset_description["BIDSVersion"] + authors = dataset_description["Authors"] template_dict = { - 'name': name, - 'bids_version': bids_version, - 'mne_bids_doi': DOI, - 'authors': _pretty_str(authors), + "name": name, + "bids_version": bids_version, + "mne_bids_doi": DOI, + "authors": _pretty_str(authors), } _pretty_dict(template_dict) return template_dict @@ -180,59 +186,54 @@ def _summarize_participants_tsv(root): template_dict : dict A dictionary of values for various template strings. """ - participants_tsv_fpath = op.join(root, 'participants.tsv') + participants_tsv_fpath = op.join(root, "participants.tsv") if not op.exists(participants_tsv_fpath): return dict() participants_tsv = _from_tsv(str(participants_tsv_fpath)) - p_ids = participants_tsv['participant_id'] - logger.info(f'Summarizing participants.tsv {participants_tsv_fpath}...') + p_ids = participants_tsv["participant_id"] + logger.info(f"Summarizing participants.tsv {participants_tsv_fpath}...") # summarize sex count statistics - keys = ['M', 'F', 'n/a'] - p_sex = participants_tsv.get('sex') + keys = ["M", "F", "n/a"] + p_sex = participants_tsv.get("sex") # phrasing works for both sex and gender - p_gender = participants_tsv.get('gender') - sexs = ['n/a'] + p_gender = participants_tsv.get("gender") + sexs = ["n/a"] if p_sex or p_gender: # only summarize sex if it conforms to `keys` referenced above p_sex = p_gender if p_sex is None else p_sex - if all([sex.upper() in keys - for sex in p_sex if sex != 'n/a']): + if all([sex.upper() in keys for sex in p_sex if sex != "n/a"]): sexs = p_sex # summarize hand count statistics - keys = ['R', 'L', 'A', 'n/a'] - p_hands = participants_tsv.get('hand') - hands = ['n/a'] + keys = ["R", "L", "A", "n/a"] + p_hands = participants_tsv.get("hand") + hands = ["n/a"] if p_hands: # only summarize handedness if it conforms to # mne-bids handedness - if all([hand.upper() in keys - for hand in p_hands if hand != 'n/a']): + if all([hand.upper() in keys for hand in p_hands if hand != "n/a"]): hands = p_hands # summarize age statistics: mean, std, min, max - p_ages = participants_tsv.get('age') - min_age, max_age = 'n/a', 'n/a' - mean_age, std_age = 'n/a', 'n/a' + p_ages = participants_tsv.get("age") + min_age, max_age = "n/a", "n/a" + mean_age, std_age = "n/a", "n/a" n_age_unknown = len(p_ages) if p_ages else len(p_ids) if p_ages: # only summarize age if they are numerics - if all([age.isnumeric() for age in p_ages if age != 'n/a']): - age_list = [float(age) for age in p_ages if age != 'n/a'] + if all([age.isnumeric() for age in p_ages if age != "n/a"]): + age_list = [float(age) for age in p_ages if age != "n/a"] n_age_unknown = len(p_ids) - len(age_list) if age_list: min_age, max_age = np.min(age_list), np.max(age_list) mean_age, std_age = np.mean(age_list), np.std(age_list) template_dict = { - 'sexs': _summarize_participant_sex(sexs), - 'hands': _summarize_participant_hand(hands), - 'ages': _range_str( - min_age, max_age, mean_age, std_age, n_age_unknown, - 'age' - ) + "sexs": _summarize_participant_sex(sexs), + "hands": _summarize_participant_hand(hands), + "ages": _range_str(min_age, max_age, mean_age, std_age, n_age_unknown, "age"), } return template_dict @@ -257,17 +258,18 @@ def _summarize_scans(root, session=None): """ root = Path(root) if session is None: - search_str = '*_scans.tsv' + search_str = "*_scans.tsv" else: - search_str = f'*ses-{session}' \ - f'*_scans.tsv' + search_str = f"*ses-{session}" f"*_scans.tsv" scans_fpaths = list(root.rglob(search_str)) if len(scans_fpaths) == 0: - warn('No *scans.tsv files found. Currently, ' - 'we do not generate a report without the scans.tsv files.') + warn( + "No *scans.tsv files found. Currently, " + "we do not generate a report without the scans.tsv files." + ) return dict() - logger.info(f'Summarizing scans.tsv files {scans_fpaths}...') + logger.info(f"Summarizing scans.tsv files {scans_fpaths}...") # summarize sidecar.json, channels.tsv template sidecar_dict = _summarize_sidecar_json(root, scans_fpaths) @@ -306,7 +308,7 @@ def _summarize_sidecar_json(root, scans_fpaths): # load in the scans.tsv file # and read metadata for each scan scans_tsv = _from_tsv(scan_fpath) - scans = scans_tsv['filename'] + scans = scans_tsv["filename"] for scan in scans: # summarize metadata of recordings bids_path, ext = _parse_ext(scan) @@ -322,44 +324,44 @@ def _summarize_sidecar_json(root, scans_fpaths): bids_path.root = root # XXX: improve to allow emptyroom - if bids_path.subject == 'emptyroom': + if bids_path.subject == "emptyroom": continue - sidecar_fname = _find_matching_sidecar(bids_path=bids_path, - suffix=datatype, - extension='.json') - with open(sidecar_fname, 'r', encoding='utf-8-sig') as fin: + sidecar_fname = _find_matching_sidecar( + bids_path=bids_path, suffix=datatype, extension=".json" + ) + with open(sidecar_fname, "r", encoding="utf-8-sig") as fin: sidecar_json = json.load(fin) # aggregate metadata from each scan # REQUIRED kwargs - sfreq = sidecar_json['SamplingFrequency'] - powerlinefreq = str(sidecar_json['PowerLineFrequency']) - software_filters = sidecar_json.get('SoftwareFilters') + sfreq = sidecar_json["SamplingFrequency"] + powerlinefreq = str(sidecar_json["PowerLineFrequency"]) + software_filters = sidecar_json.get("SoftwareFilters") if not software_filters: - software_filters = 'n/a' + software_filters = "n/a" # RECOMMENDED kwargs - manufacturer = sidecar_json.get('Manufacturer', 'n/a') - record_duration = sidecar_json.get('RecordingDuration', 'n/a') + manufacturer = sidecar_json.get("Manufacturer", "n/a") + record_duration = sidecar_json.get("RecordingDuration", "n/a") sfreqs.add(str(np.round(sfreq, 2))) powerlinefreqs.add(str(powerlinefreq)) - if manufacturer != 'n/a': + if manufacturer != "n/a": manufacturers.add(manufacturer) length_recordings.append(record_duration) # XXX: length summary is only allowed, if no 'n/a' was found - if any([dur == 'n/a' for dur in length_recordings]): + if any([dur == "n/a" for dur in length_recordings]): length_recordings = None template_dict = { - 'n_scans': n_scans, - 'manufacturer': _pretty_str(manufacturers), - 'sfreq': _pretty_str(sfreqs), - 'powerlinefreq': _pretty_str(powerlinefreqs), - 'software_filters': _summarize_software_filters(software_filters), - 'length_recordings': _length_recording_str(length_recordings), + "n_scans": n_scans, + "manufacturer": _pretty_str(manufacturers), + "sfreq": _pretty_str(sfreqs), + "powerlinefreq": _pretty_str(powerlinefreqs), + "software_filters": _summarize_software_filters(software_filters), + "length_recordings": _length_recording_str(length_recordings), } return template_dict @@ -386,7 +388,7 @@ def _summarize_channels_tsv(root, scans_fpaths): root = Path(root) # keep track of channel type, status - ch_status_count = {'bad': [], 'good': []} + ch_status_count = {"bad": [], "good": []} ch_count = [] # loop through each scan @@ -394,12 +396,12 @@ def _summarize_channels_tsv(root, scans_fpaths): # load in the scans.tsv file # and read metadata for each scan scans_tsv = _from_tsv(scan_fpath) - scans = scans_tsv['filename'] + scans = scans_tsv["filename"] for scan in scans: # summarize metadata of recordings bids_path, _ = _parse_ext(scan) datatype = op.dirname(scan) - if datatype not in ['meg', 'eeg', 'ieeg']: + if datatype not in ["meg", "eeg", "ieeg"]: continue # convert to BIDSPath @@ -408,29 +410,28 @@ def _summarize_channels_tsv(root, scans_fpaths): bids_path.root = root # XXX: improve to allow emptyroom - if bids_path.subject == 'emptyroom': + if bids_path.subject == "emptyroom": continue - channels_fname = _find_matching_sidecar(bids_path=bids_path, - suffix='channels', - extension='.tsv') + channels_fname = _find_matching_sidecar( + bids_path=bids_path, suffix="channels", extension=".tsv" + ) # summarize channels.tsv channels_tsv = _from_tsv(channels_fname) for status in ch_status_count.keys(): - ch_status = [ch for ch in channels_tsv['status'] - if ch == status] + ch_status = [ch for ch in channels_tsv["status"] if ch == status] ch_status_count[status].append(len(ch_status)) - ch_count.append(len(channels_tsv['name'])) + ch_count.append(len(channels_tsv["name"])) # create summary template strings for status template_dict = { - 'mean_chs': np.mean(ch_count), - 'std_chs': np.std(ch_count), - 'mean_good_chs': np.mean(ch_status_count['good']), - 'std_good_chs': np.std(ch_status_count['good']), - 'mean_bad_chs': np.mean(ch_status_count['bad']), - 'std_bad_chs': np.std(ch_status_count['bad']), + "mean_chs": np.mean(ch_count), + "std_chs": np.std(ch_count), + "mean_good_chs": np.mean(ch_status_count["good"]), + "std_good_chs": np.std(ch_status_count["good"]), + "mean_bad_chs": np.mean(ch_status_count["bad"]), + "std_bad_chs": np.std(ch_status_count["bad"]), } for key, val in template_dict.items(): template_dict[key] = round(val, 2) @@ -465,19 +466,22 @@ def make_report(root, session=None, verbose=None): describing the summary of the subjects. """ # high level summary - subjects = get_entity_vals(root, entity_key='subject') - sessions = get_entity_vals(root, entity_key='session') + subjects = get_entity_vals(root, entity_key="subject") + sessions = get_entity_vals(root, entity_key="session") modalities = get_datatypes(root) # only summarize allowed modalities (MEG/EEG/iEEG) data # map them to a pretty looking string datatype_map = { - 'meg': 'MEG', - 'eeg': 'EEG', - 'ieeg': 'iEEG', + "meg": "MEG", + "eeg": "EEG", + "ieeg": "iEEG", } - modalities = [datatype_map[datatype] for datatype in modalities - if datatype in datatype_map.keys()] + modalities = [ + datatype_map[datatype] + for datatype in modalities + if datatype in datatype_map.keys() + ] # REQUIRED: dataset_description.json summary dataset_summary = _summarize_dataset(root) @@ -489,50 +493,46 @@ def make_report(root, session=None, verbose=None): scans_summary = _summarize_scans(root, session=session) dataset_agnostic_summary = scans_summary.copy() - dataset_agnostic_summary['system'] = _pretty_str(modalities) + dataset_agnostic_summary["system"] = _pretty_str(modalities) # turn off 'recommended' report summary # if files are not available to summarize if not participant_summary: - participants_info = '' + participants_info = "" else: - particpants_info_template = jinja_env.get_template( - 'participants.jinja' - ) - participants_info = particpants_info_template.render( - **participant_summary - ) - logger.info(f'The participant template found: {participants_info}') + particpants_info_template = jinja_env.get_template("participants.jinja") + participants_info = particpants_info_template.render(**participant_summary) + logger.info(f"The participant template found: {participants_info}") if not scans_summary: - datatype_agnostic_info = '' + datatype_agnostic_info = "" else: - datatype_agnostic_template = jinja_env.get_template( - 'datatype_agnostic.jinja' - ) + datatype_agnostic_template = jinja_env.get_template("datatype_agnostic.jinja") datatype_agnostic_info = datatype_agnostic_template.render( **dataset_agnostic_summary ) - dataset_summary.update({ - 'n_subjects': len(subjects), - 'participants_info': participants_info, - 'n_sessions': len(sessions), - 'sessions': _pretty_str(sessions), - }) + dataset_summary.update( + { + "n_subjects": len(subjects), + "participants_info": participants_info, + "n_sessions": len(sessions), + "sessions": _pretty_str(sessions), + } + ) # XXX: add channel summary for modalities (ieeg, meg, eeg) # create the content and mne Template # lower-case templates are "Recommended", # while upper-case templates are "Required". - dataset_summary_template = jinja_env.get_template('dataset_summary.jinja') + dataset_summary_template = jinja_env.get_template("dataset_summary.jinja") dataset_summary_info = dataset_summary_template.render(**dataset_summary) # Concatenate info and clean the paragraph - paragraph = f'{dataset_summary_info}\n{datatype_agnostic_info}' - paragraph = paragraph.replace('\n', ' ') - while ' ' in paragraph: - paragraph = paragraph.replace(' ', ' ') + paragraph = f"{dataset_summary_info}\n{datatype_agnostic_info}" + paragraph = paragraph.replace("\n", " ") + while " " in paragraph: + paragraph = paragraph.replace(" ", " ") - return '\n'.join(textwrap.wrap(paragraph, width=80)) + return "\n".join(textwrap.wrap(paragraph, width=80)) diff --git a/mne_bids/sidecar_updates.py b/mne_bids/sidecar_updates.py index 45019a59e..b66c2796a 100644 --- a/mne_bids/sidecar_updates.py +++ b/mne_bids/sidecar_updates.py @@ -12,9 +12,7 @@ import numpy as np from mne.channels import DigMontage, make_dig_montage -from mne.utils import ( - logger, _validate_type, verbose, _check_on_missing, _on_missing -) +from mne.utils import logger, _validate_type, verbose, _check_on_missing, _on_missing from mne.io import read_fiducials from mne.io.constants import FIFF @@ -93,32 +91,31 @@ def update_sidecar_json(bids_path, entries, verbose=None): """ # get all matching json files bids_path = bids_path.copy() - if bids_path.extension != '.json': - raise RuntimeError('Only works for ".json" files. The ' - 'BIDSPath object passed in has ' - f'{bids_path.extension} extension.') + if bids_path.extension != ".json": + raise RuntimeError( + 'Only works for ".json" files. The ' + "BIDSPath object passed in has " + f"{bids_path.extension} extension." + ) # get the file path fpath = bids_path.fpath if not fpath.exists(): - raise RuntimeError(f'Sidecar file does not ' - f'exist for {fpath}.') + raise RuntimeError(f"Sidecar file does not " f"exist for {fpath}.") # sidecar update either from file, or as dictionary if isinstance(entries, dict): sidecar_tmp = entries else: - with open(entries, 'r') as tmp_f: - sidecar_tmp = json.load( - tmp_f, object_pairs_hook=OrderedDict) + with open(entries, "r") as tmp_f: + sidecar_tmp = json.load(tmp_f, object_pairs_hook=OrderedDict) logger.debug(sidecar_tmp) - logger.debug(f'Updating {fpath}...') + logger.debug(f"Updating {fpath}...") # load in sidecar filepath - with open(fpath, 'r') as tmp_f: - sidecar_json = json.load( - tmp_f, object_pairs_hook=OrderedDict) + with open(fpath, "r") as tmp_f: + sidecar_json = json.load(tmp_f, object_pairs_hook=OrderedDict) # update sidecar JSON file with the fields passed in sidecar_json.update(**sidecar_tmp) @@ -139,7 +136,7 @@ def _update_sidecar(sidecar_fname, key, val): val : str The corresponding value to change to in the sidecar JSON file. """ - with open(sidecar_fname, 'r', encoding='utf-8-sig') as fin: + with open(sidecar_fname, "r", encoding="utf-8-sig") as fin: sidecar_json = json.load(fin) sidecar_json[key] = val _write_json(sidecar_fname, sidecar_json, overwrite=True) @@ -147,8 +144,14 @@ def _update_sidecar(sidecar_fname, key, val): @verbose def update_anat_landmarks( - bids_path, landmarks, *, fs_subject=None, fs_subjects_dir=None, - kind=None, on_missing='raise', verbose=None + bids_path, + landmarks, + *, + fs_subject=None, + fs_subjects_dir=None, + kind=None, + on_missing="raise", + verbose=None, ): """Update the anatomical landmark coordinates of an MRI scan. @@ -204,9 +207,9 @@ def update_anat_landmarks( ----- .. versionadded:: 0.8 """ - _validate_type(item=bids_path, types=BIDSPath, item_name='bids_path') + _validate_type(item=bids_path, types=BIDSPath, item_name="bids_path") _validate_type( - item=landmarks, types=(DigMontage, 'path-like'), item_name='landmarks' + item=landmarks, types=(DigMontage, "path-like"), item_name="landmarks" ) _check_on_missing(on_missing) @@ -216,22 +219,25 @@ def update_anat_landmarks( # XXX full specification of all parts of the BIDSPath, thoughts? bids_path_mri = bids_path.copy() if bids_path_mri.datatype is None: - bids_path_mri.datatype = 'anat' + bids_path_mri.datatype = "anat" - if bids_path_mri.datatype != 'anat': + if bids_path_mri.datatype != "anat": raise ValueError( f'Can only operate on "anat" MRI data, but the provided bids_path ' - f'points to: {bids_path_mri.datatype}') + f"points to: {bids_path_mri.datatype}" + ) if bids_path_mri.suffix is None: - raise ValueError('Please specify the "suffix" entity of the provided ' - 'bids_path.') - elif bids_path_mri.suffix not in ('T1w', 'FLASH'): + raise ValueError( + 'Please specify the "suffix" entity of the provided ' "bids_path." + ) + elif bids_path_mri.suffix not in ("T1w", "FLASH"): raise ValueError( f'Can only operate on "T1w" and "FLASH" images, but the bids_path ' - f'suffix indicates: {bids_path_mri.suffix}') + f"suffix indicates: {bids_path_mri.suffix}" + ) - valid_extensions = ('.nii', '.nii.gz') + valid_extensions = (".nii", ".nii.gz") tried_paths = [] file_exists = False if bids_path_mri.extension is None: @@ -251,35 +257,37 @@ def update_anat_landmarks( if not file_exists: raise ValueError( - f'Could not find an MRI scan. Please check the provided ' - f'bids_path. Tried the following filenames: ' - f'{", ".join([p.name for p in tried_paths])}') + f"Could not find an MRI scan. Please check the provided " + f"bids_path. Tried the following filenames: " + f'{", ".join([p.name for p in tried_paths])}' + ) if not isinstance(landmarks, DigMontage): # it's pathlike if fs_subject is None: raise ValueError( 'You must provide the "fs_subject" parameter when passing the ' - 'path to fiducials' + "path to fiducials" ) landmarks = _get_landmarks_from_fiducials_file( bids_path=bids_path, fname=landmarks, fs_subject=fs_subject, - fs_subjects_dir=fs_subjects_dir + fs_subjects_dir=fs_subjects_dir, ) positions = landmarks.get_positions() - coord_frame = positions['coord_frame'] - if coord_frame != 'mri_voxel': + coord_frame = positions["coord_frame"] + if coord_frame != "mri_voxel": raise ValueError( - f'The landmarks must be specified in MRI voxel coordinates, but ' - f'provided DigMontage is in "{coord_frame}"') + f"The landmarks must be specified in MRI voxel coordinates, but " + f'provided DigMontage is in "{coord_frame}"' + ) # Extract the cardinal points name_to_coords_map = { - 'LPA': positions['lpa'], - 'NAS': positions['nasion'], - 'RPA': positions['rpa'] + "LPA": positions["lpa"], + "NAS": positions["nasion"], + "RPA": positions["rpa"], } # Check if coordinates for any cardinal point are missing, and convert to @@ -292,85 +300,90 @@ def update_anat_landmarks( # Funnily, np.float64 is JSON-serializabe, while np.float32 is not! # Thus, cast to float64 to avoid issues (which e.g. may arise when # fiducials were read from disk!) - name_to_coords_map[name] = list(coords.astype('float64')) + name_to_coords_map[name] = list(coords.astype("float64")) if missing_points: raise ValueError( - f'The provided DigMontage did not contain all required cardinal ' - f'points (nasion and left and right pre-auricular points). The ' - f'following points are missing: ' - f'{", ".join(missing_points)}') + f"The provided DigMontage did not contain all required cardinal " + f"points (nasion and left and right pre-auricular points). The " + f"following points are missing: " + f'{", ".join(missing_points)}' + ) - bids_path_json = bids_path.copy().update(extension='.json') + bids_path_json = bids_path.copy().update(extension=".json") if not bids_path_json.fpath.exists(): # Must exist before we can update it _write_json(bids_path_json.fpath, dict()) - mri_json = json.loads(bids_path_json.fpath.read_text(encoding='utf-8')) - if 'AnatomicalLandmarkCoordinates' not in mri_json: + mri_json = json.loads(bids_path_json.fpath.read_text(encoding="utf-8")) + if "AnatomicalLandmarkCoordinates" not in mri_json: _on_missing( on_missing=on_missing, - msg=f'No AnatomicalLandmarkCoordinates section found in ' - f'{bids_path_json.fpath.name}', - error_klass=KeyError + msg=f"No AnatomicalLandmarkCoordinates section found in " + f"{bids_path_json.fpath.name}", + error_klass=KeyError, ) - mri_json['AnatomicalLandmarkCoordinates'] = dict() + mri_json["AnatomicalLandmarkCoordinates"] = dict() for name, coords in name_to_coords_map.items(): if kind is not None: - name = f'{name}_{kind}' + name = f"{name}_{kind}" - if name not in mri_json['AnatomicalLandmarkCoordinates']: + if name not in mri_json["AnatomicalLandmarkCoordinates"]: _on_missing( on_missing=on_missing, - msg=f'Anatomical landmark not found in ' - f'{bids_path_json.fpath.name}: {name}', - error_klass=KeyError + msg=f"Anatomical landmark not found in " + f"{bids_path_json.fpath.name}: {name}", + error_klass=KeyError, ) - mri_json['AnatomicalLandmarkCoordinates'][name] = coords + mri_json["AnatomicalLandmarkCoordinates"][name] = coords update_sidecar_json(bids_path=bids_path_json, entries=mri_json) -def _get_landmarks_from_fiducials_file(*, bids_path, fname, fs_subject, - fs_subjects_dir): +def _get_landmarks_from_fiducials_file( + *, bids_path, fname, fs_subject, fs_subjects_dir +): """Get anatomical landmarks from fiducials file, in MRI voxel space.""" # avoid dicrular imports from mne_bids.write import ( - _get_t1w_mgh, _mri_landmarks_to_mri_voxels, _get_fid_coords + _get_t1w_mgh, + _mri_landmarks_to_mri_voxels, + _get_fid_coords, ) digpoints, coord_frame = read_fiducials(fname) # All of this should be guaranteed, but better be safe than sorry! assert coord_frame == FIFF.FIFFV_COORD_MRI - assert digpoints[0]['ident'] == FIFF.FIFFV_POINT_LPA - assert digpoints[1]['ident'] == FIFF.FIFFV_POINT_NASION - assert digpoints[2]['ident'] == FIFF.FIFFV_POINT_RPA + assert digpoints[0]["ident"] == FIFF.FIFFV_POINT_LPA + assert digpoints[1]["ident"] == FIFF.FIFFV_POINT_NASION + assert digpoints[2]["ident"] == FIFF.FIFFV_POINT_RPA montage_loaded = make_dig_montage( - lpa=digpoints[0]['r'], - nasion=digpoints[1]['r'], - rpa=digpoints[2]['r'], - coord_frame='mri' + lpa=digpoints[0]["r"], + nasion=digpoints[1]["r"], + rpa=digpoints[2]["r"], + coord_frame="mri", ) landmark_coords_mri, _ = _get_fid_coords(dig_points=montage_loaded.dig) landmark_coords_mri = np.asarray( - (landmark_coords_mri['lpa'], - landmark_coords_mri['nasion'], - landmark_coords_mri['rpa']) + ( + landmark_coords_mri["lpa"], + landmark_coords_mri["nasion"], + landmark_coords_mri["rpa"], + ) ) t1w_mgh = _get_t1w_mgh(fs_subject, fs_subjects_dir) landmark_coords_voxels = _mri_landmarks_to_mri_voxels( - mri_landmarks=landmark_coords_mri * 1000, # in mm - t1_mgh=t1w_mgh + mri_landmarks=landmark_coords_mri * 1000, t1_mgh=t1w_mgh # in mm ) montage_voxels = make_dig_montage( lpa=landmark_coords_voxels[0], nasion=landmark_coords_voxels[1], rpa=landmark_coords_voxels[2], - coord_frame='mri_voxel' + coord_frame="mri_voxel", ) return montage_voxels diff --git a/mne_bids/stats.py b/mne_bids/stats.py index bd4745595..01906c578 100644 --- a/mne_bids/stats.py +++ b/mne_bids/stats.py @@ -9,7 +9,7 @@ from mne_bids.config import EPHY_ALLOWED_DATATYPES -def count_events(root_or_path, datatype='auto'): +def count_events(root_or_path, datatype="auto"): """Count events present in dataset. Parameters @@ -40,26 +40,30 @@ def count_events(root_or_path, datatype='auto'): else: bids_path = root_or_path.copy() - bids_path.update(suffix='events', extension='.tsv') + bids_path.update(suffix="events", extension=".tsv") datatypes = get_datatypes(bids_path.root) this_datatypes = list(set(datatypes).intersection(EPHY_ALLOWED_DATATYPES)) - if (datatype == 'auto') and (bids_path.datatype is not None): + if (datatype == "auto") and (bids_path.datatype is not None): datatype = bids_path.datatype - if datatype == 'auto': + if datatype == "auto": if len(this_datatypes) > 1: - raise ValueError(f'Multiple datatypes present ({this_datatypes}).' - f' You need to specity datatype got: {datatype})') + raise ValueError( + f"Multiple datatypes present ({this_datatypes})." + f" You need to specity datatype got: {datatype})" + ) elif len(this_datatypes) == 0: - raise ValueError('No valid datatype present.') + raise ValueError("No valid datatype present.") datatype = this_datatypes[0] if datatype not in EPHY_ALLOWED_DATATYPES: - raise ValueError(f'datatype ({datatype}) is not supported. ' - f'It must be one of: {EPHY_ALLOWED_DATATYPES})') + raise ValueError( + f"datatype ({datatype}) is not supported. " + f"It must be one of: {EPHY_ALLOWED_DATATYPES})" + ) bids_path.update(datatype=datatype) @@ -72,39 +76,39 @@ def count_events(root_or_path, datatype='auto'): all_df = [] for bp in bids_path.match(): - df = pd.read_csv(str(bp), delimiter='\t') - df['subject'] = bp.subject + df = pd.read_csv(str(bp), delimiter="\t") + df["subject"] = bp.subject if bp.session is not None: - df['session'] = bp.session + df["session"] = bp.session if bp.run is not None: - df['run'] = bp.run + df["run"] = bp.run all_df.append(df) if not all_df: continue df = pd.concat(all_df) - groups = ['subject'] + groups = ["subject"] if bp.session is not None: - groups.append('session') + groups.append("session") if bp.run is not None: - groups.append('run') + groups.append("run") - if 'stim_type' in df.columns: + if "stim_type" in df.columns: # Deal with some old files that use stim_type rather than # trial_type df = df.rename(columns={"stim_type": "trial_type"}) # There are datasets out there without a `trial_type` or `stim_type` # column. - if 'trial_type' in df.columns: - groups.append('trial_type') + if "trial_type" in df.columns: + groups.append("trial_type") counts = df.groupby(groups).size() counts = counts.unstack() - if 'BAD_ACQ_SKIP' in counts.columns: - counts = counts.drop('BAD_ACQ_SKIP', axis=1) + if "BAD_ACQ_SKIP" in counts.columns: + counts = counts.drop("BAD_ACQ_SKIP", axis=1) counts.columns = pd.MultiIndex.from_arrays( [[task] * counts.shape[1], counts.columns] @@ -113,7 +117,7 @@ def count_events(root_or_path, datatype='auto'): all_counts.append(counts) if not all_counts: - raise ValueError('No events files found.') + raise ValueError("No events files found.") counts = pd.concat(all_counts, axis=1) diff --git a/mne_bids/tests/conftest.py b/mne_bids/tests/conftest.py index e8dd09466..0a05c1699 100644 --- a/mne_bids/tests/conftest.py +++ b/mne_bids/tests/conftest.py @@ -17,10 +17,10 @@ @pytest.fixture(scope="session") def _bids_validate(): """Fixture to run BIDS validator.""" - vadlidator_args = ['--config.error=41'] - exe = os.getenv('VALIDATOR_EXECUTABLE', 'bids-validator') + vadlidator_args = ["--config.error=41"] + exe = os.getenv("VALIDATOR_EXECUTABLE", "bids-validator") - if platform.system() == 'Windows': + if platform.system() == "Windows": shell = True else: shell = False diff --git a/mne_bids/tests/data/tiny_bids/code/make_tiny_bids_dataset.py b/mne_bids/tests/data/tiny_bids/code/make_tiny_bids_dataset.py index 11627f251..084f8c3ff 100644 --- a/mne_bids/tests/data/tiny_bids/code/make_tiny_bids_dataset.py +++ b/mne_bids/tests/data/tiny_bids/code/make_tiny_bids_dataset.py @@ -11,7 +11,7 @@ from mne_bids import BIDSPath, write_raw_bids data_path = mne.datasets.testing.data_path(download=False) -assert mne.datasets.has_dataset('testing'), 'Download testing data' +assert mne.datasets.has_dataset("testing"), "Download testing data" vhdr_path = data_path / "montage" / "bv_dig_test.vhdr" captrak_path = data_path / "montage" / "captrak_coords.bvct" @@ -20,8 +20,13 @@ tiny_bids_root.mkdir(exist_ok=True) bids_path = BIDSPath( - subject="01", task="rest", session="eeg", suffix="eeg", extension=".vhdr", - datatype="eeg", root=tiny_bids_root + subject="01", + task="rest", + session="eeg", + suffix="eeg", + extension=".vhdr", + datatype="eeg", + root=tiny_bids_root, ) # %% @@ -44,14 +49,16 @@ # %% # Add GSR and temperature channels -if 'GSR' not in raw.ch_names and 'Temperature' not in raw.ch_names: +if "GSR" not in raw.ch_names and "Temperature" not in raw.ch_names: gsr_data = np.array([2.1e-6] * len(raw.times)) temperature_data = np.array([36.5] * len(raw.times)) - gsr_and_temp_data = np.concatenate([ - np.atleast_2d(gsr_data), - np.atleast_2d(temperature_data), - ]) + gsr_and_temp_data = np.concatenate( + [ + np.atleast_2d(gsr_data), + np.atleast_2d(temperature_data), + ] + ) gsr_and_temp_info = mne.create_info( ch_names=["GSR", "Temperature"], sfreq=raw.info["sfreq"], @@ -72,33 +79,35 @@ # %% raw.set_annotations(None) -events = np.array([ - [0, 0, 1], - [1000, 0, 2] -]) -event_id = { - "start_experiment": 1, - "show_stimulus": 2 -} +events = np.array([[0, 0, 1], [1000, 0, 2]]) +event_id = {"start_experiment": 1, "show_stimulus": 2} # %% write_raw_bids( - raw, bids_path, events=events, event_id=event_id, overwrite=True, - allow_preload=True, format="BrainVision", + raw, + bids_path, + events=events, + event_id=event_id, + overwrite=True, + allow_preload=True, + format="BrainVision", ) mne_bids.mark_channels( bids_path=bids_path, - ch_names=['C3', 'C4', 'PO10', 'GSR', 'Temperature'], - status=['good', 'good', 'bad', 'good', 'good'], - descriptions=['resected', 'resected', 'continuously flat', - 'left index finger', 'left ear'] + ch_names=["C3", "C4", "PO10", "GSR", "Temperature"], + status=["good", "good", "bad", "good", "good"], + descriptions=[ + "resected", + "resected", + "continuously flat", + "left index finger", + "left ear", + ], ) # %% dataset_description_json_path = tiny_bids_root / "dataset_description.json" -ds_json = json.loads( - dataset_description_json_path.read_text(encoding="utf-8") -) +ds_json = json.loads(dataset_description_json_path.read_text(encoding="utf-8")) ds_json["Name"] = "tiny_bids" ds_json["Authors"] = ["MNE-BIDS Developers", "And Friends"] diff --git a/mne_bids/tests/test_copyfiles.py b/mne_bids/tests/test_copyfiles.py index 5a8192d7c..5b37c08c0 100644 --- a/mne_bids/tests/test_copyfiles.py +++ b/mne_bids/tests/test_copyfiles.py @@ -14,94 +14,97 @@ from mne.datasets import testing from mne_bids import BIDSPath from mne_bids.path import _parse_ext -from mne_bids.copyfiles import (_get_brainvision_encoding, - _get_brainvision_paths, - copyfile_brainvision, - copyfile_edf, - copyfile_eeglab, - copyfile_kit) +from mne_bids.copyfiles import ( + _get_brainvision_encoding, + _get_brainvision_paths, + copyfile_brainvision, + copyfile_edf, + copyfile_eeglab, + copyfile_kit, +) testing_path = testing.data_path(download=False) -base_path = op.join(op.dirname(mne.__file__), 'io') +base_path = op.join(op.dirname(mne.__file__), "io") @testing.requires_testing_data def test_get_brainvision_encoding(): """Test getting the file-encoding from a BrainVision header.""" - data_path = op.join(base_path, 'brainvision', 'tests', 'data') - raw_fname = op.join(data_path, 'test.vhdr') + data_path = op.join(base_path, "brainvision", "tests", "data") + raw_fname = op.join(data_path, "test.vhdr") with pytest.raises(UnicodeDecodeError): - with open(raw_fname, 'r', encoding='ascii') as f: + with open(raw_fname, "r", encoding="ascii") as f: f.readlines() enc = _get_brainvision_encoding(raw_fname) - with open(raw_fname, 'r', encoding=enc) as f: + with open(raw_fname, "r", encoding=enc) as f: f.readlines() def test_get_brainvision_paths(tmp_path): """Test getting the file links from a BrainVision header.""" - data_path = op.join(base_path, 'brainvision', 'tests', 'data') - raw_fname = op.join(data_path, 'test.vhdr') + data_path = op.join(base_path, "brainvision", "tests", "data") + raw_fname = op.join(data_path, "test.vhdr") with pytest.raises(ValueError): - _get_brainvision_paths(op.join(data_path, 'test.eeg')) + _get_brainvision_paths(op.join(data_path, "test.eeg")) # Write some temporary test files - with open(tmp_path / 'test1.vhdr', 'w') as f: - f.write('DataFile=testing.eeg') + with open(tmp_path / "test1.vhdr", "w") as f: + f.write("DataFile=testing.eeg") - with open(tmp_path / 'test2.vhdr', 'w') as f: - f.write('MarkerFile=testing.vmrk') + with open(tmp_path / "test2.vhdr", "w") as f: + f.write("MarkerFile=testing.vmrk") with pytest.raises(ValueError): - _get_brainvision_paths(tmp_path / 'test1.vhdr') + _get_brainvision_paths(tmp_path / "test1.vhdr") with pytest.raises(ValueError): - _get_brainvision_paths(tmp_path / 'test2.vhdr') + _get_brainvision_paths(tmp_path / "test2.vhdr") # This should work eeg_file_path, vmrk_file_path = _get_brainvision_paths(raw_fname) head, tail = op.split(eeg_file_path) - assert tail == 'test.eeg' + assert tail == "test.eeg" head, tail = op.split(vmrk_file_path) - assert tail == 'test.vmrk' + assert tail == "test.vmrk" -@pytest.mark.filterwarnings('ignore:.*Exception ignored.*:' - 'pytest.PytestUnraisableExceptionWarning') +@pytest.mark.filterwarnings( + "ignore:.*Exception ignored.*:" "pytest.PytestUnraisableExceptionWarning" +) def test_copyfile_brainvision(tmp_path): """Test the copying of BrainVision vhdr, vmrk and eeg files.""" bids_root = str(tmp_path) - data_path = op.join(base_path, 'brainvision', 'tests', 'data') - raw_fname = op.join(data_path, 'test.vhdr') - new_name = op.join(bids_root, 'tested_conversion.vhdr') + data_path = op.join(base_path, "brainvision", "tests", "data") + raw_fname = op.join(data_path, "test.vhdr") + new_name = op.join(bids_root, "tested_conversion.vhdr") # IO error testing - with pytest.raises(ValueError, match='Need to move data with same'): - copyfile_brainvision(raw_fname, new_name + '.eeg') + with pytest.raises(ValueError, match="Need to move data with same"): + copyfile_brainvision(raw_fname, new_name + ".eeg") # Try to copy the file copyfile_brainvision(raw_fname, new_name) # Have all been copied? head, tail = op.split(new_name) - assert op.exists(op.join(head, 'tested_conversion.vhdr')) - assert op.exists(op.join(head, 'tested_conversion.vmrk')) - assert op.exists(op.join(head, 'tested_conversion.eeg')) + assert op.exists(op.join(head, "tested_conversion.vhdr")) + assert op.exists(op.join(head, "tested_conversion.vmrk")) + assert op.exists(op.join(head, "tested_conversion.eeg")) # Try to read with MNE - if this works, the links are correct raw = mne.io.read_raw_brainvision(new_name) - assert raw.filenames[0] == (op.join(head, 'tested_conversion.eeg')) + assert raw.filenames[0] == (op.join(head, "tested_conversion.eeg")) # Test with anonymization raw = mne.io.read_raw_brainvision(raw_fname) - prev_date = raw.info['meas_date'] - anonymize = {'daysback': 32459} + prev_date = raw.info["meas_date"] + anonymize = {"daysback": 32459} copyfile_brainvision(raw_fname, new_name, anonymize) raw = mne.io.read_raw_brainvision(new_name) - new_date = raw.info['meas_date'] + new_date = raw.info["meas_date"] assert new_date == (prev_date - datetime.timedelta(days=32459)) @@ -109,44 +112,44 @@ def test_copyfile_edf(tmp_path): """Test the anonymization of EDF/BDF files.""" bids_root = tmp_path / "bids1" bids_root.mkdir() - data_path = op.join(base_path, 'edf', 'tests', 'data') + data_path = op.join(base_path, "edf", "tests", "data") # Test regular copying - for ext in ['.edf', '.bdf']: - raw_fname = op.join(data_path, 'test' + ext) - new_name = op.join(bids_root, 'test_copy' + ext) + for ext in [".edf", ".bdf"]: + raw_fname = op.join(data_path, "test" + ext) + new_name = op.join(bids_root, "test_copy" + ext) copyfile_edf(raw_fname, new_name) # IO error testing - with pytest.raises(ValueError, match='Need to move data with same'): - raw_fname = op.join(data_path, 'test.edf') - new_name = op.join(bids_root, 'test_copy.bdf') + with pytest.raises(ValueError, match="Need to move data with same"): + raw_fname = op.join(data_path, "test.edf") + new_name = op.join(bids_root, "test_copy.bdf") copyfile_edf(raw_fname, new_name) # Add some subject info to an EDF to test anonymization - testfile = op.join(bids_root, 'test_copy.edf') - raw_date = mne.io.read_raw_edf(testfile).info['meas_date'] + testfile = op.join(bids_root, "test_copy.edf") + raw_date = mne.io.read_raw_edf(testfile).info["meas_date"] date = datetime.datetime.strftime(raw_date, "%d-%b-%Y").upper() - test_id_info = '023 F 02-AUG-1951 Jane' - test_rec_info = 'Startdate {0} ID-123 John BioSemi_ActiveTwo'.format(date) - with open(testfile, 'r+b') as f: + test_id_info = "023 F 02-AUG-1951 Jane" + test_rec_info = "Startdate {0} ID-123 John BioSemi_ActiveTwo".format(date) + with open(testfile, "r+b") as f: f.seek(8) - f.write(bytes(test_id_info.ljust(80), 'ascii')) - f.write(bytes(test_rec_info.ljust(80), 'ascii')) + f.write(bytes(test_id_info.ljust(80), "ascii")) + f.write(bytes(test_rec_info.ljust(80), "ascii")) # Test date anonymization def _edf_get_real_date(fpath): - with open(fpath, 'rb') as f: + with open(fpath, "rb") as f: f.seek(88) - rec_info = f.read(80).decode('ascii').rstrip() - startdate = rec_info.split(' ')[1] + rec_info = f.read(80).decode("ascii").rstrip() + startdate = rec_info.split(" ")[1] return datetime.datetime.strptime(startdate, "%d-%b-%Y") - bids_root2 = tmp_path / 'bids2' + bids_root2 = tmp_path / "bids2" bids_root2.mkdir() - infile = op.join(bids_root, 'test_copy.edf') - outfile = op.join(bids_root2, 'test_copy_anon.edf') - anonymize = {'daysback': 33459, 'keep_his': False} + infile = op.join(bids_root, "test_copy.edf") + outfile = op.join(bids_root2, "test_copy_anon.edf") + anonymize = {"daysback": 33459, "keep_his": False} copyfile_edf(infile, outfile, anonymize) new_date = _edf_get_real_date(outfile) @@ -156,23 +159,23 @@ def _edf_get_real_date(fpath): # Test full ID info anonymization anon_startdate = datetime.datetime.strftime(new_date, "%d-%b-%Y").upper() - with open(outfile, 'rb') as f: + with open(outfile, "rb") as f: f.seek(8) - id_info = f.read(80).decode('ascii').rstrip() - rec_info = f.read(80).decode('ascii').rstrip() + id_info = f.read(80).decode("ascii").rstrip() + rec_info = f.read(80).decode("ascii").rstrip() rec_info_tmp = "Startdate {0} X mne-bids_anonymize X" assert id_info == "0 X X X" assert rec_info == rec_info_tmp.format(anon_startdate) # Test partial ID info anonymization - outfile2 = op.join(bids_root2, 'test_copy_anon_partial.edf') - anonymize = {'daysback': 33459, 'keep_his': True} + outfile2 = op.join(bids_root2, "test_copy_anon_partial.edf") + anonymize = {"daysback": 33459, "keep_his": True} copyfile_edf(infile, outfile2, anonymize) - with open(outfile2, 'rb') as f: + with open(outfile2, "rb") as f: f.seek(8) - id_info = f.read(80).decode('ascii').rstrip() - rec_info = f.read(80).decode('ascii').rstrip() - rec = 'Startdate {0} ID-123 John BioSemi_ActiveTwo'.format(anon_startdate) + id_info = f.read(80).decode("ascii").rstrip() + rec_info = f.read(80).decode("ascii").rstrip() + rec = "Startdate {0} ID-123 John BioSemi_ActiveTwo".format(anon_startdate) assert id_info == "023 F X X" assert rec_info == rec @@ -181,103 +184,118 @@ def test_copyfile_edfbdf_uppercase(tmp_path): """Test the copying of EDF/BDF files with upper-case extension.""" bids_root = tmp_path / "bids1" bids_root.mkdir() - data_path = op.join(base_path, 'edf', 'tests', 'data') + data_path = op.join(base_path, "edf", "tests", "data") # Test regular copying - for ext in ['.edf', '.bdf']: - raw_fname = op.join(data_path, 'test' + ext) - new_name = op.join(bids_root, 'test_copy' + ext.upper()) + for ext in [".edf", ".bdf"]: + raw_fname = op.join(data_path, "test" + ext) + new_name = op.join(bids_root, "test_copy" + ext.upper()) - with pytest.warns(RuntimeWarning, match='Upper-case extension'): + with pytest.warns(RuntimeWarning, match="Upper-case extension"): copyfile_edf(raw_fname, new_name) assert Path(new_name).with_suffix(ext).exists() -@pytest.mark.parametrize('fname', - ('test_raw.set', 'test_raw_chanloc.set', - 'test_raw_2021.set')) +@pytest.mark.parametrize( + "fname", ("test_raw.set", "test_raw_chanloc.set", "test_raw_2021.set") +) @testing.requires_testing_data def test_copyfile_eeglab(tmp_path, fname): """Test the copying of EEGlab set and fdt files.""" bids_root = str(tmp_path) - data_path = op.join(testing_path, 'EEGLAB') + data_path = op.join(testing_path, "EEGLAB") raw_fname = op.join(data_path, fname) - new_name = op.join(bids_root, f'CONVERTED_{fname}.set') + new_name = op.join(bids_root, f"CONVERTED_{fname}.set") # IO error testing with pytest.raises(ValueError, match="Need to move data with same ext"): - copyfile_eeglab(raw_fname, new_name + '.wrong') + copyfile_eeglab(raw_fname, new_name + ".wrong") # Test copying and reading copyfile_eeglab(raw_fname, new_name) - if fname == 'test_raw_chanloc.set': # combined set+fdt - with pytest.warns(RuntimeWarning, - match="The data contains 'boundary' events"): + if fname == "test_raw_chanloc.set": # combined set+fdt + with pytest.warns(RuntimeWarning, match="The data contains 'boundary' events"): raw = mne.io.read_raw_eeglab(new_name) - assert 'Fp1' in raw.ch_names + assert "Fp1" in raw.ch_names else: # combined set+fdt and single set (new EEGLAB format) raw = mne.io.read_raw_eeglab(new_name, preload=True) - assert 'EEG 001' in raw.ch_names + assert "EEG 001" in raw.ch_names assert isinstance(raw, mne.io.BaseRaw) def test_copyfile_kit(tmp_path): """Test copying and renaming KIT files to a new location.""" output_path = str(tmp_path) - data_path = op.join(base_path, 'kit', 'tests', 'data') - raw_fname = op.join(data_path, 'test.sqd') - hpi_fname = op.join(data_path, 'test_mrk.sqd') - electrode_fname = op.join(data_path, 'test.elp') - headshape_fname = op.join(data_path, 'test.hsp') - subject_id = '01' - session_id = '01' - run = '01' - acq = '01' - task = 'testing' + data_path = op.join(base_path, "kit", "tests", "data") + raw_fname = op.join(data_path, "test.sqd") + hpi_fname = op.join(data_path, "test_mrk.sqd") + electrode_fname = op.join(data_path, "test.elp") + headshape_fname = op.join(data_path, "test.hsp") + subject_id = "01" + session_id = "01" + run = "01" + acq = "01" + task = "testing" raw = mne.io.read_raw_kit( - raw_fname, mrk=hpi_fname, elp=electrode_fname, - hsp=headshape_fname) + raw_fname, mrk=hpi_fname, elp=electrode_fname, hsp=headshape_fname + ) _, ext = _parse_ext(raw_fname) - datatype = 'meg' # copyfile_kit makes the same assumption + datatype = "meg" # copyfile_kit makes the same assumption bids_path = BIDSPath( - subject=subject_id, session=session_id, run=run, acquisition=acq, - task=task) - kit_bids_path = bids_path.copy().update(acquisition=None, - datatype=datatype, - root=output_path) - bids_fname = str(bids_path.copy().update(datatype=datatype, - suffix=datatype, - extension=ext, - root=output_path)) - - copyfile_kit(raw_fname, bids_fname, subject_id, session_id, - task, run, raw._init_kwargs) + subject=subject_id, session=session_id, run=run, acquisition=acq, task=task + ) + kit_bids_path = bids_path.copy().update( + acquisition=None, datatype=datatype, root=output_path + ) + bids_fname = str( + bids_path.copy().update( + datatype=datatype, suffix=datatype, extension=ext, root=output_path + ) + ) + + copyfile_kit( + raw_fname, bids_fname, subject_id, session_id, task, run, raw._init_kwargs + ) assert op.exists(bids_fname) _, ext = _parse_ext(hpi_fname) - if ext == '.sqd': - kit_bids_path.update(suffix='markers', extension='.sqd') + if ext == ".sqd": + kit_bids_path.update(suffix="markers", extension=".sqd") assert op.exists(kit_bids_path) - elif ext == '.mrk': - kit_bids_path.update(suffix='markers', extension='.mrk') + elif ext == ".mrk": + kit_bids_path.update(suffix="markers", extension=".mrk") assert op.exists(kit_bids_path) if op.exists(electrode_fname): - task, run, key = None, None, 'ELP' - elp_ext = '.pos' + task, run, key = None, None, "ELP" + elp_ext = ".pos" elp_fname = BIDSPath( - subject=subject_id, session=session_id, task=task, run=run, - acquisition=key, suffix='headshape', extension=elp_ext, - datatype='meg', root=output_path) + subject=subject_id, + session=session_id, + task=task, + run=run, + acquisition=key, + suffix="headshape", + extension=elp_ext, + datatype="meg", + root=output_path, + ) assert op.exists(elp_fname) if op.exists(headshape_fname): - task, run, key = None, None, 'HSP' - hsp_ext = '.pos' + task, run, key = None, None, "HSP" + hsp_ext = ".pos" hsp_fname = BIDSPath( - subject=subject_id, session=session_id, task=task, run=run, - acquisition=key, suffix='headshape', extension=hsp_ext, - datatype='meg', root=output_path) + subject=subject_id, + session=session_id, + task=task, + run=run, + acquisition=key, + suffix="headshape", + extension=hsp_ext, + datatype="meg", + root=output_path, + ) assert op.exists(hsp_fname) diff --git a/mne_bids/tests/test_dig.py b/mne_bids/tests/test_dig.py index 6eaf8549c..93dd106fa 100644 --- a/mne_bids/tests/test_dig.py +++ b/mne_bids/tests/test_dig.py @@ -17,117 +17,131 @@ import mne_bids from mne.datasets import testing from mne_bids import BIDSPath, write_raw_bids, read_raw_bids -from mne_bids.dig import (_write_dig_bids, _read_dig_bids, template_to_head, - convert_montage_to_mri, convert_montage_to_ras) -from mne_bids.config import (BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS, - BIDS_TO_MNE_FRAMES, MNE_STR_TO_FRAME) - -base_path = op.join(op.dirname(mne.__file__), 'io') -subject_id = '01' -session_id = '01' -run = '01' -acq = '01' -run2 = '02' -task = 'testing' +from mne_bids.dig import ( + _write_dig_bids, + _read_dig_bids, + template_to_head, + convert_montage_to_mri, + convert_montage_to_ras, +) +from mne_bids.config import ( + BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS, + BIDS_TO_MNE_FRAMES, + MNE_STR_TO_FRAME, +) + +base_path = op.join(op.dirname(mne.__file__), "io") +subject_id = "01" +session_id = "01" +run = "01" +acq = "01" +run2 = "02" +task = "testing" _bids_path = BIDSPath( - subject=subject_id, session=session_id, run=run, acquisition=acq, - task=task) + subject=subject_id, session=session_id, run=run, acquisition=acq, task=task +) data_path = testing.data_path(download=False) def _load_raw(): """Load the sample raw data.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = mne.io.read_raw(raw_fname) - raw.drop_channels(raw.info['bads']) - raw.info['line_freq'] = 60 + raw.drop_channels(raw.info["bads"]) + raw.info["line_freq"] = 60 return raw @testing.requires_testing_data def test_dig_io(tmp_path): """Test passing different coordinate frames give proper warnings.""" - bids_root = tmp_path / 'bids1' + bids_root = tmp_path / "bids1" raw = _load_raw() - for datatype in ('eeg', 'ieeg'): - os.makedirs(op.join(bids_root, 'sub-01', 'ses-01', datatype)) + for datatype in ("eeg", "ieeg"): + os.makedirs(op.join(bids_root, "sub-01", "ses-01", datatype)) # test no coordinate frame in dig or in bids_path.space montage = raw.get_montage() - montage.apply_trans(mne.transforms.Transform('head', 'unknown')) - for datatype in ('eeg', 'ieeg'): - bids_path = _bids_path.copy().update(root=bids_root, datatype=datatype, - space=None) - with pytest.warns(RuntimeWarning, - match='Coordinate frame could not be inferred'): + montage.apply_trans(mne.transforms.Transform("head", "unknown")) + for datatype in ("eeg", "ieeg"): + bids_path = _bids_path.copy().update( + root=bids_root, datatype=datatype, space=None + ) + with pytest.warns( + RuntimeWarning, match="Coordinate frame could not be inferred" + ): _write_dig_bids(bids_path, raw, montage, acpc_aligned=True) # test coordinate frame-BIDSPath.space mismatch raw = _load_raw() montage = raw.get_montage() - print(montage.get_positions()['coord_frame']) + print(montage.get_positions()["coord_frame"]) bids_path = _bids_path.copy().update( - root=bids_root, datatype='eeg', space='fsaverage') - with pytest.raises(ValueError, match='Coordinates in the raw object ' - 'or montage are in the CapTrak ' - 'coordinate frame but ' - 'BIDSPath.space is fsaverage'): + root=bids_root, datatype="eeg", space="fsaverage" + ) + with pytest.raises( + ValueError, + match="Coordinates in the raw object " + "or montage are in the CapTrak " + "coordinate frame but " + "BIDSPath.space is fsaverage", + ): _write_dig_bids(bids_path, raw, montage) # test MEG space conflict fif (ElektaNeuromag) != CTF - bids_path = _bids_path.copy().update( - root=bids_root, datatype='meg', space='CTF') - with pytest.raises(ValueError, match='conflicts'): + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg", space="CTF") + with pytest.raises(ValueError, match="conflicts"): write_raw_bids(raw, bids_path) @testing.requires_testing_data def test_dig_pixels(tmp_path): """Test dig stored correctly for the Pixels coordinate frame.""" - bids_root = tmp_path / 'bids1' + bids_root = tmp_path / "bids1" # test coordinates in pixels bids_path = _bids_path.copy().update( - root=bids_root, datatype='ieeg', space='Pixels') - os.makedirs(op.join(bids_root, 'sub-01', 'ses-01', bids_path.datatype), - exist_ok=True) + root=bids_root, datatype="ieeg", space="Pixels" + ) + os.makedirs( + op.join(bids_root, "sub-01", "ses-01", bids_path.datatype), exist_ok=True + ) raw = _load_raw() raw.pick_types(eeg=True) raw.del_proj() - raw.set_channel_types({ch: 'ecog' for ch in raw.ch_names}) + raw.set_channel_types({ch: "ecog" for ch in raw.ch_names}) montage = raw.get_montage() # fake transform to pixel coordinates - montage.apply_trans(mne.transforms.Transform('head', 'unknown')) + montage.apply_trans(mne.transforms.Transform("head", "unknown")) _write_dig_bids(bids_path, raw, montage) electrodes_path = bids_path.copy().update( - task=None, run=None, suffix='electrodes', extension='.tsv') + task=None, run=None, suffix="electrodes", extension=".tsv" + ) coordsystem_path = bids_path.copy().update( - task=None, run=None, suffix='coordsystem', extension='.json') - with pytest.warns(RuntimeWarning, - match='not an MNE-Python coordinate frame'): - _read_dig_bids(electrodes_path, coordsystem_path, - bids_path.datatype, raw) + task=None, run=None, suffix="coordsystem", extension=".json" + ) + with pytest.warns(RuntimeWarning, match="not an MNE-Python coordinate frame"): + _read_dig_bids(electrodes_path, coordsystem_path, bids_path.datatype, raw) montage2 = raw.get_montage() - assert montage2.get_positions()['coord_frame'] == 'unknown' + assert montage2.get_positions()["coord_frame"] == "unknown" assert_almost_equal( - np.array(list(montage.get_positions()['ch_pos'].values())), - np.array(list(montage2.get_positions()['ch_pos'].values())) + np.array(list(montage.get_positions()["ch_pos"].values())), + np.array(list(montage2.get_positions()["ch_pos"].values())), ) -@pytest.mark.filterwarnings('ignore:The unit for chann*.:RuntimeWarning:mne') +@pytest.mark.filterwarnings("ignore:The unit for chann*.:RuntimeWarning:mne") @testing.requires_testing_data def test_dig_template(tmp_path): """Test that eeg and ieeg dig are stored properly.""" - bids_root = tmp_path / 'bids1' - for datatype in ('eeg', 'ieeg'): - (bids_root / 'sub-01' / 'ses-01' / datatype).mkdir(parents=True) + bids_root = tmp_path / "bids1" + for datatype in ("eeg", "ieeg"): + (bids_root / "sub-01" / "ses-01" / datatype).mkdir(parents=True) - for datatype in ('eeg', 'ieeg'): + for datatype in ("eeg", "ieeg"): bids_path = _bids_path.copy().update(root=bids_root, datatype=datatype) for coord_frame in BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS: raw = _load_raw() @@ -137,73 +151,78 @@ def test_dig_template(tmp_path): pos = montage.get_positions() mne_coord_frame = BIDS_TO_MNE_FRAMES.get(coord_frame, None) if mne_coord_frame is None: - montage.apply_trans( - mne.transforms.Transform('head', 'unknown')) + montage.apply_trans(mne.transforms.Transform("head", "unknown")) else: - montage.apply_trans(mne.transforms.Transform( - 'head', mne_coord_frame)) + montage.apply_trans(mne.transforms.Transform("head", mne_coord_frame)) _write_dig_bids(bids_path, raw, montage, acpc_aligned=True) electrodes_path = bids_path.copy().update( - task=None, run=None, suffix='electrodes', extension='.tsv') + task=None, run=None, suffix="electrodes", extension=".tsv" + ) coordsystem_path = bids_path.copy().update( - task=None, run=None, suffix='coordsystem', extension='.json') + task=None, run=None, suffix="coordsystem", extension=".json" + ) if mne_coord_frame is None: - with pytest.warns(RuntimeWarning, - match='not an MNE-Python coordinate frame'): - _read_dig_bids(electrodes_path, coordsystem_path, - datatype, raw) + with pytest.warns( + RuntimeWarning, match="not an MNE-Python coordinate frame" + ): + _read_dig_bids(electrodes_path, coordsystem_path, datatype, raw) else: - if coord_frame == 'MNI305': # saved to fsaverage, same - electrodes_path.update(space='fsaverage') - coordsystem_path.update(space='fsaverage') - _read_dig_bids(electrodes_path, coordsystem_path, - datatype, raw) + if coord_frame == "MNI305": # saved to fsaverage, same + electrodes_path.update(space="fsaverage") + coordsystem_path.update(space="fsaverage") + _read_dig_bids(electrodes_path, coordsystem_path, datatype, raw) montage2 = raw.get_montage() pos2 = montage2.get_positions() np.testing.assert_array_almost_equal( - np.array(list(pos['ch_pos'].values())), - np.array(list(pos2['ch_pos'].values()))) + np.array(list(pos["ch_pos"].values())), + np.array(list(pos2["ch_pos"].values())), + ) if mne_coord_frame is None: - assert pos2['coord_frame'] == 'unknown' + assert pos2["coord_frame"] == "unknown" else: - assert pos2['coord_frame'] == mne_coord_frame + assert pos2["coord_frame"] == mne_coord_frame # test MEG raw = _load_raw() for coord_frame in BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS: - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg', - space=coord_frame) + bids_path = _bids_path.copy().update( + root=bids_root, datatype="meg", space=coord_frame + ) write_raw_bids(raw, bids_path) raw2 = read_raw_bids(bids_path) - for ch, ch2 in zip(raw.info['chs'], raw2.info['chs']): - np.testing.assert_array_equal(ch['loc'], ch2['loc']) - assert ch['coord_frame'] == ch2['coord_frame'] + for ch, ch2 in zip(raw.info["chs"], raw2.info["chs"]): + np.testing.assert_array_equal(ch["loc"], ch2["loc"]) + assert ch["coord_frame"] == ch2["coord_frame"] def _set_montage_no_trans(raw, montage): """Set the montage without transforming to 'head'.""" - coord_frame = montage.get_positions()['coord_frame'] + coord_frame = montage.get_positions()["coord_frame"] with warnings.catch_warnings(): - warnings.filterwarnings(action='ignore', category=RuntimeWarning, - message='.*nasion not found', module='mne') - raw.set_montage(montage, on_missing='ignore') - for ch in raw.info['chs']: - ch['coord_frame'] = MNE_STR_TO_FRAME[coord_frame] - for d in raw.info['dig']: - d['coord_frame'] = MNE_STR_TO_FRAME[coord_frame] + warnings.filterwarnings( + action="ignore", + category=RuntimeWarning, + message=".*nasion not found", + module="mne", + ) + raw.set_montage(montage, on_missing="ignore") + for ch in raw.info["chs"]: + ch["coord_frame"] = MNE_STR_TO_FRAME[coord_frame] + for d in raw.info["dig"]: + d["coord_frame"] = MNE_STR_TO_FRAME[coord_frame] -def _test_montage_trans(raw, montage, pos_test, space='fsaverage', - coord_frame='auto', unit='auto'): +def _test_montage_trans( + raw, montage, pos_test, space="fsaverage", coord_frame="auto", unit="auto" +): """Test if a montage is transformed correctly.""" _set_montage_no_trans(raw, montage) - trans = template_to_head( - raw.info, space, coord_frame=coord_frame, unit=unit)[1] + trans = template_to_head(raw.info, space, coord_frame=coord_frame, unit=unit)[1] montage_test = raw.get_montage() montage_test.apply_trans(trans) assert_almost_equal( - pos_test, - np.array(list(montage_test.get_positions()['ch_pos'].values()))) + pos_test, np.array(list(montage_test.get_positions()["ch_pos"].values())) + ) @testing.requires_testing_data @@ -212,21 +231,22 @@ def test_template_to_head(): # test no montage raw = _load_raw() raw.set_montage(None) - with pytest.raises(RuntimeError, match='No montage found'): - template_to_head(raw.info, 'fsaverage', coord_frame='auto') + with pytest.raises(RuntimeError, match="No montage found"): + template_to_head(raw.info, "fsaverage", coord_frame="auto") # test no channels raw = _load_raw() montage_empty = mne.channels.make_dig_montage(hsp=[[0, 0, 0]]) _set_montage_no_trans(raw, montage_empty) - with pytest.raises(RuntimeError, match='No channel locations ' - 'found in the montage'): - template_to_head(raw.info, 'fsaverage', coord_frame='auto') + with pytest.raises( + RuntimeError, match="No channel locations " "found in the montage" + ): + template_to_head(raw.info, "fsaverage", coord_frame="auto") # test unexpected coordinate frame raw = _load_raw() - with pytest.raises(RuntimeError, match='not expected for a template'): - template_to_head(raw.info, 'fsaverage', coord_frame='auto') + with pytest.raises(RuntimeError, match="not expected for a template"): + template_to_head(raw.info, "fsaverage", coord_frame="auto") # test all coordinate frames raw = _load_raw() @@ -234,69 +254,79 @@ def test_template_to_head(): raw.pick_types(eeg=True) raw.drop_channels(raw.ch_names[3:]) montage = mne.channels.make_dig_montage( - ch_pos={raw.ch_names[0]: [0, 0, 0], - raw.ch_names[1]: [0, 0, 0.1], - raw.ch_names[2]: [0, 0, 0.2]}, - coord_frame='unknown') + ch_pos={ + raw.ch_names[0]: [0, 0, 0], + raw.ch_names[1]: [0, 0, 0.1], + raw.ch_names[2]: [0, 0, 0.2], + }, + coord_frame="unknown", + ) for space in BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS: - for cf in ('mri', 'mri_voxel', 'ras'): + for cf in ("mri", "mri_voxel", "ras"): _set_montage_no_trans(raw, montage) trans = template_to_head(raw.info, space, cf)[1] - assert trans['from'] == MNE_STR_TO_FRAME['head'] - assert trans['to'] == MNE_STR_TO_FRAME['mri'] + assert trans["from"] == MNE_STR_TO_FRAME["head"] + assert trans["to"] == MNE_STR_TO_FRAME["mri"] montage_test = raw.get_montage() pos = montage_test.get_positions() - assert pos['coord_frame'] == 'head' - assert pos['nasion'] is not None - assert pos['lpa'] is not None - assert pos['rpa'] is not None + assert pos["coord_frame"] == "head" + assert pos["nasion"] is not None + assert pos["lpa"] is not None + assert pos["rpa"] is not None # test that we get the right transform _set_montage_no_trans(raw, montage) - trans = template_to_head(raw.info, 'fsaverage', 'mri')[1] - trans2 = mne.read_trans(op.join( - op.dirname(op.dirname(mne_bids.__file__)), 'mne_bids', 'data', - 'space-fsaverage_trans.fif')) - assert_almost_equal(trans['trans'], trans2['trans']) + trans = template_to_head(raw.info, "fsaverage", "mri")[1] + trans2 = mne.read_trans( + op.join( + op.dirname(op.dirname(mne_bids.__file__)), + "mne_bids", + "data", + "space-fsaverage_trans.fif", + ) + ) + assert_almost_equal(trans["trans"], trans2["trans"]) # test auto coordinate frame # test auto voxels montage_vox = mne.channels.make_dig_montage( - ch_pos={raw.ch_names[0]: [2, 0, 10], - raw.ch_names[1]: [0, 0, 5.5], - raw.ch_names[2]: [0, 1, 3]}, - coord_frame='unknown') - pos_test = np.array([[0.126, -0.118, 0.128], - [0.128, -0.1225, 0.128], - [0.128, -0.125, 0.127]]) - _test_montage_trans(raw, montage_vox, pos_test, - coord_frame='auto', unit='mm') + ch_pos={ + raw.ch_names[0]: [2, 0, 10], + raw.ch_names[1]: [0, 0, 5.5], + raw.ch_names[2]: [0, 1, 3], + }, + coord_frame="unknown", + ) + pos_test = np.array( + [[0.126, -0.118, 0.128], [0.128, -0.1225, 0.128], [0.128, -0.125, 0.127]] + ) + _test_montage_trans(raw, montage_vox, pos_test, coord_frame="auto", unit="mm") # now negative values => scanner RAS montage_ras = mne.channels.make_dig_montage( - ch_pos={raw.ch_names[0]: [-30.2, 20, -40], - raw.ch_names[1]: [10, 30, 53.5], - raw.ch_names[2]: [30, -21, 33]}, - coord_frame='unknown') - pos_test = np.array([[-0.0302, 0.02, -0.04], - [0.01, 0.03, 0.0535], - [0.03, -0.021, 0.033]]) + ch_pos={ + raw.ch_names[0]: [-30.2, 20, -40], + raw.ch_names[1]: [10, 30, 53.5], + raw.ch_names[2]: [30, -21, 33], + }, + coord_frame="unknown", + ) + pos_test = np.array( + [[-0.0302, 0.02, -0.04], [0.01, 0.03, 0.0535], [0.03, -0.021, 0.033]] + ) _set_montage_no_trans(raw, montage_ras) - _test_montage_trans(raw, montage_ras, pos_test, - coord_frame='auto', unit='mm') + _test_montage_trans(raw, montage_ras, pos_test, coord_frame="auto", unit="mm") # test auto unit montage_mm = montage_ras.copy() _set_montage_no_trans(raw, montage_mm) - _test_montage_trans(raw, montage_mm, pos_test, - coord_frame='ras', unit='auto') + _test_montage_trans(raw, montage_mm, pos_test, coord_frame="ras", unit="auto") montage_m = montage_ras.copy() for d in montage_m.dig: - d['r'] = np.array(d['r']) / 1000 - _test_montage_trans(raw, montage_m, pos_test, - coord_frame='ras', unit='auto') + d["r"] = np.array(d["r"]) / 1000 + _test_montage_trans(raw, montage_m, pos_test, coord_frame="ras", unit="auto") @testing.requires_testing_data @@ -304,32 +334,31 @@ def test_convert_montage(): """Test the montage RAS conversion.""" raw = _load_raw() montage = raw.get_montage() - trans = mne.read_trans(op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc-trans.fif')) + trans = mne.read_trans( + op.join(data_path, "MEG", "sample", "sample_audvis_trunc-trans.fif") + ) montage.apply_trans(trans) - subjects_dir = op.join(data_path, 'subjects') + subjects_dir = op.join(data_path, "subjects") # test read - with pytest.raises(RuntimeError, match='incorrectly formatted'): - convert_montage_to_mri(montage, 'foo', subjects_dir) + with pytest.raises(RuntimeError, match="incorrectly formatted"): + convert_montage_to_mri(montage, "foo", subjects_dir) # test write - with pytest.raises(RuntimeError, match='incorrectly formatted'): - convert_montage_to_ras(montage, 'foo', subjects_dir) + with pytest.raises(RuntimeError, match="incorrectly formatted"): + convert_montage_to_ras(montage, "foo", subjects_dir) # test mri to ras - convert_montage_to_ras(montage, 'sample', subjects_dir) + convert_montage_to_ras(montage, "sample", subjects_dir) pos = montage.get_positions() - assert pos['coord_frame'] == 'ras' - assert_almost_equal(pos['ch_pos']['EEG 001'], - [-0.0366405, 0.063066, 0.0676311]) + assert pos["coord_frame"] == "ras" + assert_almost_equal(pos["ch_pos"]["EEG 001"], [-0.0366405, 0.063066, 0.0676311]) # test ras to mri - convert_montage_to_mri(montage, 'sample', subjects_dir) + convert_montage_to_mri(montage, "sample", subjects_dir) pos = montage.get_positions() - assert pos['coord_frame'] == 'mri' - assert_almost_equal(pos['ch_pos']['EEG 001'], - [-0.0313669, 0.0540269, 0.0949191]) + assert pos["coord_frame"] == "mri" + assert_almost_equal(pos["ch_pos"]["EEG 001"], [-0.0313669, 0.0540269, 0.0949191]) @testing.requires_testing_data @@ -337,22 +366,16 @@ def test_electrodes_io(tmp_path): """Ensure only electrodes end up in *_electrodes.json.""" raw = _load_raw() raw.pick_types(eeg=True, stim=True) # we don't need meg channels - bids_root = tmp_path / 'bids1' - bids_path = _bids_path.copy().update(root=bids_root, datatype='eeg') + bids_root = tmp_path / "bids1" + bids_path = _bids_path.copy().update(root=bids_root, datatype="eeg") write_raw_bids(raw=raw, bids_path=bids_path) - electrodes_path = ( - bids_path.copy() - .update( - task=None, - run=None, - space='CapTrak', - suffix='electrodes', - extension='.tsv' - ) + electrodes_path = bids_path.copy().update( + task=None, run=None, space="CapTrak", suffix="electrodes", extension=".tsv" ) - with open(electrodes_path, encoding='utf-8') as sidecar: - n_entries = len([line for line in sidecar - if 'name' not in line]) # don't need the header + with open(electrodes_path, encoding="utf-8") as sidecar: + n_entries = len( + [line for line in sidecar if "name" not in line] + ) # don't need the header # only eeg chs w/ electrode pos should be written to electrodes.tsv - assert n_entries == len(raw.get_channel_types('eeg')) + assert n_entries == len(raw.get_channel_types("eeg")) diff --git a/mne_bids/tests/test_inspect.py b/mne_bids/tests/test_inspect.py index b0537cda5..e97005b0a 100644 --- a/mne_bids/tests/test_inspect.py +++ b/mne_bids/tests/test_inspect.py @@ -12,43 +12,58 @@ from mne.utils._testing import requires_module from mne.viz.utils import _fake_click -from mne_bids import (BIDSPath, read_raw_bids, write_raw_bids, inspect_dataset, - write_meg_calibration, write_meg_crosstalk) +from mne_bids import ( + BIDSPath, + read_raw_bids, + write_raw_bids, + inspect_dataset, + write_meg_calibration, + write_meg_crosstalk, +) import mne_bids.inspect from mne_bids.read import _from_tsv from test_read import warning_str -requires_matplotlib = partial(requires_module, name='matplotlib', - call='import matplotlib') +requires_matplotlib = partial( + requires_module, name="matplotlib", call="import matplotlib" +) -_bids_path = BIDSPath(subject='01', session='01', run='01', task='testing', - datatype='meg') +_bids_path = BIDSPath( + subject="01", session="01", run="01", task="testing", datatype="meg" +) data_path = testing.data_path(download=False) def setup_bids_test_dir(bids_root): """Return path to a written test BIDS dir.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') - cal_fname = op.join(data_path, 'SSS', 'sss_cal_mgh.dat') - crosstalk_fname = op.join(data_path, 'SSS', 'ct_sparse.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) + cal_fname = op.join(data_path, "SSS", "sss_cal_mgh.dat") + crosstalk_fname = op.join(data_path, "SSS", "ct_sparse.fif") raw = mne.io.read_raw_fif(raw_fname) - raw.info['line_freq'] = 60 + raw.info["line_freq"] = 60 # Drop unknown events. events = mne.read_events(events_fname) events = events[events[:, 2] != 0] bids_path = _bids_path.copy().update(root=bids_root) - write_raw_bids(raw, bids_path=bids_path, events=events, - event_id=event_id, overwrite=True) + write_raw_bids( + raw, bids_path=bids_path, events=events, event_id=event_id, overwrite=True + ) write_meg_calibration(cal_fname, bids_path=bids_path) write_meg_crosstalk(crosstalk_fname, bids_path=bids_path) @@ -57,23 +72,24 @@ def setup_bids_test_dir(bids_root): @requires_matplotlib @testing.requires_testing_data -@pytest.mark.parametrize('save_changes', (True, False)) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.parametrize("save_changes", (True, False)) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) def test_inspect_single_file(tmp_path, save_changes): """Test inspecting a dataset consisting of only a single file.""" from mne.utils._testing import _click_ch_name import matplotlib import matplotlib.pyplot as plt - matplotlib.use('Agg') - plt.close('all') + + matplotlib.use("Agg") + plt.close("all") bids_root = setup_bids_test_dir(tmp_path) bids_path = _bids_path.copy().update(root=bids_root) - raw = read_raw_bids(bids_path=bids_path, verbose='error') - old_bads = raw.info['bads'].copy() + raw = read_raw_bids(bids_path=bids_path, verbose="error") + old_bads = raw.info["bads"].copy() inspect_dataset(bids_path) - raw_fig = mne_bids.inspect._global_vars['raw_fig'] + raw_fig = mne_bids.inspect._global_vars["raw_fig"] # Mark some channels as bad by clicking on their name. _click_ch_name(raw_fig, ch_index=0, button=1) @@ -81,21 +97,19 @@ def test_inspect_single_file(tmp_path, save_changes): _click_ch_name(raw_fig, ch_index=4, button=1) # Closing the window should open a dialog box. - key_event = KeyEvent( - name='Close', canvas=raw_fig.canvas, key=raw_fig.mne.close_key - ) - raw_fig.canvas.callbacks.process('key_press_event', key_event) - fig_dialog = mne_bids.inspect._global_vars['dialog_fig'] + key_event = KeyEvent(name="Close", canvas=raw_fig.canvas, key=raw_fig.mne.close_key) + raw_fig.canvas.callbacks.process("key_press_event", key_event) + fig_dialog = mne_bids.inspect._global_vars["dialog_fig"] if save_changes: - key = 'return' + key = "return" else: - key = 'escape' - key_event = KeyEvent(name='Save', canvas=fig_dialog.canvas, key=key) - fig_dialog.canvas.callbacks.process('key_press_event', key_event) + key = "escape" + key_event = KeyEvent(name="Save", canvas=fig_dialog.canvas, key=key) + fig_dialog.canvas.callbacks.process("key_press_event", key_event) - raw = read_raw_bids(bids_path=bids_path, verbose='error') - new_bads = raw.info['bads'].copy() + raw = read_raw_bids(bids_path=bids_path, verbose="error") + new_bads = raw.info["bads"].copy() if save_changes: assert len(new_bads) > len(old_bads) @@ -105,88 +119,84 @@ def test_inspect_single_file(tmp_path, save_changes): @requires_matplotlib @testing.requires_testing_data -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) def test_inspect_multiple_files(tmp_path): """Test inspecting a dataset consisting of more than one file.""" import matplotlib import matplotlib.pyplot as plt - matplotlib.use('Agg') - plt.close('all') + + matplotlib.use("Agg") + plt.close("all") bids_root = setup_bids_test_dir(tmp_path) bids_path = _bids_path.copy().update(root=bids_root) # Create a second subject - raw = read_raw_bids(bids_path=bids_path, verbose='error') - write_raw_bids(raw, bids_path.copy().update(subject='02')) + raw = read_raw_bids(bids_path=bids_path, verbose="error") + write_raw_bids(raw, bids_path.copy().update(subject="02")) del raw # Inspection should end with the second subject. inspect_dataset(bids_path.copy().update(subject=None)) - raw_fig = mne_bids.inspect._global_vars['raw_fig'] - assert raw_fig.mne.info['subject_info']['his_id'] == 'sub-02' + raw_fig = mne_bids.inspect._global_vars["raw_fig"] + assert raw_fig.mne.info["subject_info"]["his_id"] == "sub-02" - key_event = KeyEvent( - name='Close', canvas=raw_fig.canvas, key=raw_fig.mne.close_key - ) - raw_fig.canvas.callbacks.process('key_press_event', key_event) + key_event = KeyEvent(name="Close", canvas=raw_fig.canvas, key=raw_fig.mne.close_key) + raw_fig.canvas.callbacks.process("key_press_event", key_event) @requires_matplotlib @testing.requires_testing_data -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) def test_inspect_set_and_unset_bads(tmp_path): """Test marking channels as bad and later marking them as good again.""" from mne.utils._testing import _click_ch_name import matplotlib import matplotlib.pyplot as plt - matplotlib.use('Agg') - plt.close('all') + + matplotlib.use("Agg") + plt.close("all") bids_root = setup_bids_test_dir(tmp_path) bids_path = _bids_path.copy().update(root=bids_root) - raw = read_raw_bids(bids_path=bids_path, verbose='error') - orig_bads = raw.info['bads'].copy() + raw = read_raw_bids(bids_path=bids_path, verbose="error") + orig_bads = raw.info["bads"].copy() # Mark some channels as bad by clicking on their name. inspect_dataset(bids_path, find_flat=False) - raw_fig = mne_bids.inspect._global_vars['raw_fig'] + raw_fig = mne_bids.inspect._global_vars["raw_fig"] _click_ch_name(raw_fig, ch_index=0, button=1) _click_ch_name(raw_fig, ch_index=1, button=1) _click_ch_name(raw_fig, ch_index=4, button=1) # Close window and save changes. - key_event = KeyEvent( - name='Close', canvas=raw_fig.canvas, key=raw_fig.mne.close_key - ) - raw_fig.canvas.callbacks.process('key_press_event', key_event) + key_event = KeyEvent(name="Close", canvas=raw_fig.canvas, key=raw_fig.mne.close_key) + raw_fig.canvas.callbacks.process("key_press_event", key_event) - fig_dialog = mne_bids.inspect._global_vars['dialog_fig'] - key_event = KeyEvent(name='Save', canvas=fig_dialog.canvas, key='return') - fig_dialog.canvas.callbacks.process('key_press_event', key_event) + fig_dialog = mne_bids.inspect._global_vars["dialog_fig"] + key_event = KeyEvent(name="Save", canvas=fig_dialog.canvas, key="return") + fig_dialog.canvas.callbacks.process("key_press_event", key_event) # Inspect the data again, click on two of the bad channels to mark them as # good. inspect_dataset(bids_path, find_flat=False) - raw_fig = mne_bids.inspect._global_vars['raw_fig'] + raw_fig = mne_bids.inspect._global_vars["raw_fig"] _click_ch_name(raw_fig, ch_index=1, button=1) _click_ch_name(raw_fig, ch_index=4, button=1) # Close window and save changes. - key_event = KeyEvent( - name='Close', canvas=raw_fig.canvas, key=raw_fig.mne.close_key - ) - raw_fig.canvas.callbacks.process('key_press_event', key_event) + key_event = KeyEvent(name="Close", canvas=raw_fig.canvas, key=raw_fig.mne.close_key) + raw_fig.canvas.callbacks.process("key_press_event", key_event) - fig_dialog = mne_bids.inspect._global_vars['dialog_fig'] + fig_dialog = mne_bids.inspect._global_vars["dialog_fig"] - key_event = KeyEvent(name='Save', canvas=fig_dialog.canvas, key='return') - fig_dialog.canvas.callbacks.process('key_press_event', key_event) + key_event = KeyEvent(name="Save", canvas=fig_dialog.canvas, key="return") + fig_dialog.canvas.callbacks.process("key_press_event", key_event) # Check marking the channels as good has actually worked. - expected_bads = orig_bads + ['MEG 0113'] - raw = read_raw_bids(bids_path=bids_path, verbose='error') - new_bads = raw.info['bads'] + expected_bads = orig_bads + ["MEG 0113"] + raw = read_raw_bids(bids_path=bids_path, verbose="error") + new_bads = raw.info["bads"] assert set(new_bads) == set(expected_bads) @@ -194,151 +204,135 @@ def _add_annotation(raw_fig): """Add an Annotation to a Raw plot.""" data_ax = raw_fig.mne.ax_main - key_event = KeyEvent(name='Annotation', canvas=raw_fig.canvas, key='a') - raw_fig.canvas.callbacks.process('key_press_event', key_event) + key_event = KeyEvent(name="Annotation", canvas=raw_fig.canvas, key="a") + raw_fig.canvas.callbacks.process("key_press_event", key_event) ann_fig = raw_fig.mne.fig_annotation - for key in 'test': # Annotation will be named: BAD_test - key_event = KeyEvent(name='Bad', canvas=ann_fig.canvas, key=key) - ann_fig.canvas.callbacks.process('key_press_event', key_event) + for key in "test": # Annotation will be named: BAD_test + key_event = KeyEvent(name="Bad", canvas=ann_fig.canvas, key=key) + ann_fig.canvas.callbacks.process("key_press_event", key_event) - key_event = KeyEvent(name='Enter', canvas=ann_fig.canvas, key='enter') - ann_fig.canvas.callbacks.process('key_press_event', key_event) + key_event = KeyEvent(name="Enter", canvas=ann_fig.canvas, key="enter") + ann_fig.canvas.callbacks.process("key_press_event", key_event) # Draw a 4 second long Annotation. - _fake_click(raw_fig, data_ax, [1., 1.], xform='data', button=1, - kind='press') - _fake_click(raw_fig, data_ax, [5., 1.], xform='data', button=1, - kind='motion') - _fake_click(raw_fig, data_ax, [5., 1.], xform='data', button=1, - kind='release') + _fake_click(raw_fig, data_ax, [1.0, 1.0], xform="data", button=1, kind="press") + _fake_click(raw_fig, data_ax, [5.0, 1.0], xform="data", button=1, kind="motion") + _fake_click(raw_fig, data_ax, [5.0, 1.0], xform="data", button=1, kind="release") @requires_matplotlib @testing.requires_testing_data -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) def test_inspect_annotations(tmp_path): """Test inspection of Annotations.""" import matplotlib import matplotlib.pyplot as plt - matplotlib.use('Agg') - plt.close('all') + + matplotlib.use("Agg") + plt.close("all") bids_root = setup_bids_test_dir(tmp_path) bids_path = _bids_path.copy().update(root=bids_root) - raw = read_raw_bids(bids_path=bids_path, verbose='error') + raw = read_raw_bids(bids_path=bids_path, verbose="error") orig_annotations = raw.annotations.copy() inspect_dataset(bids_path, find_flat=False) - raw_fig = mne_bids.inspect._global_vars['raw_fig'] + raw_fig = mne_bids.inspect._global_vars["raw_fig"] _add_annotation(raw_fig) # Close window and save changes. - key_event = KeyEvent( - name='Close', canvas=raw_fig.canvas, key=raw_fig.mne.close_key - ) - raw_fig.canvas.callbacks.process('key_press_event', key_event) + key_event = KeyEvent(name="Close", canvas=raw_fig.canvas, key=raw_fig.mne.close_key) + raw_fig.canvas.callbacks.process("key_press_event", key_event) - fig_dialog = mne_bids.inspect._global_vars['dialog_fig'] - key_event = KeyEvent(name='Save', canvas=fig_dialog.canvas, key='return') - fig_dialog.canvas.callbacks.process('key_press_event', key_event) + fig_dialog = mne_bids.inspect._global_vars["dialog_fig"] + key_event = KeyEvent(name="Save", canvas=fig_dialog.canvas, key="return") + fig_dialog.canvas.callbacks.process("key_press_event", key_event) # Ensure changes were saved. - raw = read_raw_bids(bids_path=bids_path, verbose='error') - assert 'BAD_test' in raw.annotations.description - annot_idx = raw.annotations.description == 'BAD_test' + raw = read_raw_bids(bids_path=bids_path, verbose="error") + assert "BAD_test" in raw.annotations.description + annot_idx = raw.annotations.description == "BAD_test" assert raw.annotations.duration[annot_idx].squeeze() == 4 # Remove the Annotation. inspect_dataset(bids_path, find_flat=False) - raw_fig = mne_bids.inspect._global_vars['raw_fig'] + raw_fig = mne_bids.inspect._global_vars["raw_fig"] data_ax = raw_fig.mne.ax_main - key_event = KeyEvent(name='Annotations', canvas=raw_fig.canvas, key='a') - raw_fig.canvas.callbacks.process('key_press_event', key_event) - _fake_click(raw_fig, data_ax, [1., 1.], xform='data', button=3, - kind='press') + key_event = KeyEvent(name="Annotations", canvas=raw_fig.canvas, key="a") + raw_fig.canvas.callbacks.process("key_press_event", key_event) + _fake_click(raw_fig, data_ax, [1.0, 1.0], xform="data", button=3, kind="press") # Close window and save changes. - key_event = KeyEvent( - name='Close', canvas=raw_fig.canvas, key=raw_fig.mne.close_key - ) - raw_fig.canvas.callbacks.process('key_press_event', key_event) + key_event = KeyEvent(name="Close", canvas=raw_fig.canvas, key=raw_fig.mne.close_key) + raw_fig.canvas.callbacks.process("key_press_event", key_event) - fig_dialog = mne_bids.inspect._global_vars['dialog_fig'] - key_event = KeyEvent(name='Save', canvas=fig_dialog.canvas, key='return') - fig_dialog.canvas.callbacks.process('key_press_event', key_event) + fig_dialog = mne_bids.inspect._global_vars["dialog_fig"] + key_event = KeyEvent(name="Save", canvas=fig_dialog.canvas, key="return") + fig_dialog.canvas.callbacks.process("key_press_event", key_event) # Ensure changes were saved. - raw = read_raw_bids(bids_path=bids_path, verbose='error') - assert 'BAD_test' not in raw.annotations.description + raw = read_raw_bids(bids_path=bids_path, verbose="error") + assert "BAD_test" not in raw.annotations.description assert raw.annotations == orig_annotations @requires_matplotlib @testing.requires_testing_data -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) def test_inspect_annotations_remove_all(tmp_path): """Test behavior if all Annotations are removed by the user.""" import matplotlib import matplotlib.pyplot as plt - matplotlib.use('Agg') - plt.close('all') + + matplotlib.use("Agg") + plt.close("all") bids_root = setup_bids_test_dir(tmp_path) bids_path = _bids_path.copy().update(root=bids_root) - events_tsv_fpath = (bids_path.copy() - .update(suffix='events', extension='.tsv') - .fpath) + events_tsv_fpath = bids_path.copy().update(suffix="events", extension=".tsv").fpath # Remove all Annotations. - raw = read_raw_bids(bids_path=bids_path, verbose='error') + raw = read_raw_bids(bids_path=bids_path, verbose="error") raw.set_annotations(None) raw.load_data() raw.save(raw.filenames[0], overwrite=True) # Delete events.tsv sidecar. - (bids_path.copy() - .update(suffix='events', extension='.tsv') - .fpath - .unlink()) + (bids_path.copy().update(suffix="events", extension=".tsv").fpath.unlink()) # Add custom Annotation. inspect_dataset(bids_path, find_flat=False) - raw_fig = mne_bids.inspect._global_vars['raw_fig'] + raw_fig = mne_bids.inspect._global_vars["raw_fig"] _add_annotation(raw_fig) # Close window and save changes. - key_event = KeyEvent( - name='Close', canvas=raw_fig.canvas, key=raw_fig.mne.close_key - ) - raw_fig.canvas.callbacks.process('key_press_event', key_event) + key_event = KeyEvent(name="Close", canvas=raw_fig.canvas, key=raw_fig.mne.close_key) + raw_fig.canvas.callbacks.process("key_press_event", key_event) - fig_dialog = mne_bids.inspect._global_vars['dialog_fig'] - key_event = KeyEvent(name='Save', canvas=fig_dialog.canvas, key='return') - fig_dialog.canvas.callbacks.process('key_press_event', key_event) + fig_dialog = mne_bids.inspect._global_vars["dialog_fig"] + key_event = KeyEvent(name="Save", canvas=fig_dialog.canvas, key="return") + fig_dialog.canvas.callbacks.process("key_press_event", key_event) # events.tsv sidecar should have been created. assert events_tsv_fpath.exists() # Remove the Annotation. inspect_dataset(bids_path, find_flat=False) - raw_fig = mne_bids.inspect._global_vars['raw_fig'] + raw_fig = mne_bids.inspect._global_vars["raw_fig"] data_ax = raw_fig.mne.ax_main - key_event = KeyEvent(name='Annotations', canvas=raw_fig.canvas, key='a') - raw_fig.canvas.callbacks.process('key_press_event', key_event) - _fake_click(raw_fig, data_ax, [1., 1.], xform='data', button=3, - kind='press') + key_event = KeyEvent(name="Annotations", canvas=raw_fig.canvas, key="a") + raw_fig.canvas.callbacks.process("key_press_event", key_event) + _fake_click(raw_fig, data_ax, [1.0, 1.0], xform="data", button=3, kind="press") # Close window and save changes. - key_event = KeyEvent( - name='Close', canvas=raw_fig.canvas, key=raw_fig.mne.close_key - ) - raw_fig.canvas.callbacks.process('key_press_event', key_event) + key_event = KeyEvent(name="Close", canvas=raw_fig.canvas, key=raw_fig.mne.close_key) + raw_fig.canvas.callbacks.process("key_press_event", key_event) - fig_dialog = mne_bids.inspect._global_vars['dialog_fig'] - key_event = KeyEvent(name='Save', canvas=fig_dialog.canvas, key='return') - fig_dialog.canvas.callbacks.process('key_press_event', key_event) + fig_dialog = mne_bids.inspect._global_vars["dialog_fig"] + key_event = KeyEvent(name="Save", canvas=fig_dialog.canvas, key="return") + fig_dialog.canvas.callbacks.process("key_press_event", key_event) # events.tsv sidecar should not exist anymore. assert not events_tsv_fpath.exists() @@ -346,39 +340,41 @@ def test_inspect_annotations_remove_all(tmp_path): @requires_matplotlib @testing.requires_testing_data -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) def test_inspect_dont_show_annotations(tmp_path): """Test if show_annotations=False works.""" import matplotlib import matplotlib.pyplot as plt - matplotlib.use('Agg') - plt.close('all') + + matplotlib.use("Agg") + plt.close("all") bids_root = setup_bids_test_dir(tmp_path) bids_path = _bids_path.copy().update(root=bids_root) inspect_dataset(bids_path, find_flat=False, show_annotations=False) - raw_fig = mne_bids.inspect._global_vars['raw_fig'] + raw_fig = mne_bids.inspect._global_vars["raw_fig"] assert not raw_fig.mne.annotations @requires_matplotlib @testing.requires_testing_data -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) def test_inspect_bads_and_annotations(tmp_path): """Test adding bads and Annotations in one go.""" from mne.utils._testing import _click_ch_name import matplotlib import matplotlib.pyplot as plt - matplotlib.use('Agg') - plt.close('all') + + matplotlib.use("Agg") + plt.close("all") bids_root = setup_bids_test_dir(tmp_path) bids_path = _bids_path.copy().update(root=bids_root) - raw = read_raw_bids(bids_path=bids_path, verbose='error') - orig_bads = raw.info['bads'].copy() + raw = read_raw_bids(bids_path=bids_path, verbose="error") + orig_bads = raw.info["bads"].copy() inspect_dataset(bids_path, find_flat=False) - raw_fig = mne_bids.inspect._global_vars['raw_fig'] + raw_fig = mne_bids.inspect._global_vars["raw_fig"] # Mark some channels as bad by clicking on their name. _click_ch_name(raw_fig, ch_index=0, button=1) @@ -387,91 +383,87 @@ def test_inspect_bads_and_annotations(tmp_path): _add_annotation(raw_fig) # Close window and save changes. - key_event = KeyEvent( - name='Close', canvas=raw_fig.canvas, key=raw_fig.mne.close_key - ) - raw_fig.canvas.callbacks.process('key_press_event', key_event) + key_event = KeyEvent(name="Close", canvas=raw_fig.canvas, key=raw_fig.mne.close_key) + raw_fig.canvas.callbacks.process("key_press_event", key_event) - fig_dialog = mne_bids.inspect._global_vars['dialog_fig'] - key_event = KeyEvent(name='Save', canvas=fig_dialog.canvas, key='return') - fig_dialog.canvas.callbacks.process('key_press_event', key_event) + fig_dialog = mne_bids.inspect._global_vars["dialog_fig"] + key_event = KeyEvent(name="Save", canvas=fig_dialog.canvas, key="return") + fig_dialog.canvas.callbacks.process("key_press_event", key_event) # Check that the changes were saved. - raw = read_raw_bids(bids_path=bids_path, verbose='error') - new_bads = raw.info['bads'] - expected_bads = orig_bads + ['MEG 0113'] + raw = read_raw_bids(bids_path=bids_path, verbose="error") + new_bads = raw.info["bads"] + expected_bads = orig_bads + ["MEG 0113"] assert set(new_bads) == set(expected_bads) - assert 'BAD_test' in raw.annotations.description + assert "BAD_test" in raw.annotations.description @requires_matplotlib @testing.requires_testing_data -@pytest.mark.parametrize('save_changes', (True, False)) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.parametrize("save_changes", (True, False)) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) def test_inspect_auto_flats(tmp_path, save_changes): """Test flat channel & segment detection.""" import matplotlib import matplotlib.pyplot as plt - matplotlib.use('Agg') - plt.close('all') + + matplotlib.use("Agg") + plt.close("all") bids_root = setup_bids_test_dir(tmp_path) bids_path = _bids_path.copy().update(root=bids_root) - channels_tsv_fname = bids_path.copy().update(suffix='channels', - extension='.tsv') + channels_tsv_fname = bids_path.copy().update(suffix="channels", extension=".tsv") - raw = read_raw_bids(bids_path=bids_path, verbose='error') + raw = read_raw_bids(bids_path=bids_path, verbose="error") # Inject an entirely flat channel. raw.load_data() raw._data[10] = np.zeros_like(raw._data[10], dtype=raw._data.dtype) # Add a a flat time segment (approx. 100 ms) to another channel - raw._data[20, 500:500 + int(np.ceil(0.1 * raw.info['sfreq']))] = 0 + raw._data[20, 500 : 500 + int(np.ceil(0.1 * raw.info["sfreq"]))] = 0 raw.save(raw.filenames[0], overwrite=True) - old_bads = raw.info['bads'].copy() + old_bads = raw.info["bads"].copy() inspect_dataset(bids_path) - raw_fig = mne_bids.inspect._global_vars['raw_fig'] + raw_fig = mne_bids.inspect._global_vars["raw_fig"] # Closing the window should open a dialog box. - key_event = KeyEvent( - name='Close', canvas=raw_fig.canvas, key=raw_fig.mne.close_key - ) - raw_fig.canvas.callbacks.process('key_press_event', key_event) - fig_dialog = mne_bids.inspect._global_vars['dialog_fig'] + key_event = KeyEvent(name="Close", canvas=raw_fig.canvas, key=raw_fig.mne.close_key) + raw_fig.canvas.callbacks.process("key_press_event", key_event) + fig_dialog = mne_bids.inspect._global_vars["dialog_fig"] if save_changes: - key = 'return' + key = "return" else: - key = 'escape' - key_event = KeyEvent(name='Close', canvas=fig_dialog.canvas, key=key) - fig_dialog.canvas.callbacks.process('key_press_event', key_event) + key = "escape" + key_event = KeyEvent(name="Close", canvas=fig_dialog.canvas, key=key) + fig_dialog.canvas.callbacks.process("key_press_event", key_event) - raw = read_raw_bids(bids_path=bids_path, verbose='error') + raw = read_raw_bids(bids_path=bids_path, verbose="error") if save_changes: - assert old_bads != raw.info['bads'] - assert raw.ch_names[10] in raw.info['bads'] + assert old_bads != raw.info["bads"] + assert raw.ch_names[10] in raw.info["bads"] channels_tsv_data = _from_tsv(channels_tsv_fname) - assert (channels_tsv_data['status_description'][10] == - 'Flat channel, auto-detected via MNE-BIDS') + assert ( + channels_tsv_data["status_description"][10] + == "Flat channel, auto-detected via MNE-BIDS" + ) # This channel should not have been added to `bads`, but produced a # flat annotation. - assert raw.ch_names[20] not in raw.info['bads'] - assert 'BAD_flat' in raw.annotations.description + assert raw.ch_names[20] not in raw.info["bads"] + assert "BAD_flat" in raw.annotations.description else: - assert old_bads == raw.info['bads'] - assert 'BAD_flat' not in raw.annotations.description + assert old_bads == raw.info["bads"] + assert "BAD_flat" not in raw.annotations.description @requires_matplotlib @testing.requires_testing_data -@pytest.mark.parametrize(('l_freq', 'h_freq'), - [(None, None), - (1, None), - (None, 30), - (1, 30)]) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.parametrize( + ("l_freq", "h_freq"), [(None, None), (1, None), (None, 30), (1, 30)] +) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) def test_inspect_freq_filter(tmp_path, l_freq, h_freq): """Test frequency filter for Raw display.""" bids_root = setup_bids_test_dir(tmp_path) diff --git a/mne_bids/tests/test_path.py b/mne_bids/tests/test_path.py index d97294ce2..deea3fb73 100644 --- a/mne_bids/tests/test_path.py +++ b/mne_bids/tests/test_path.py @@ -16,46 +16,64 @@ from mne.datasets import testing from mne.io import anonymize_info -from mne_bids import (get_datatypes, get_entity_vals, print_dir_tree, - BIDSPath, write_raw_bids, read_raw_bids, - write_meg_calibration, write_meg_crosstalk) -from mne_bids.path import (_parse_ext, get_entities_from_fname, - _find_best_candidates, - _filter_fnames, search_folder_for_text, - get_bids_path_from_fname, find_matching_paths) +from mne_bids import ( + get_datatypes, + get_entity_vals, + print_dir_tree, + BIDSPath, + write_raw_bids, + read_raw_bids, + write_meg_calibration, + write_meg_crosstalk, +) +from mne_bids.path import ( + _parse_ext, + get_entities_from_fname, + _find_best_candidates, + _filter_fnames, + search_folder_for_text, + get_bids_path_from_fname, + find_matching_paths, +) from mne_bids.config import ALLOWED_PATH_ENTITIES_SHORT from test_read import _read_raw_fif, warning_str -subject_id = '01' -session_id = '01' -run = '01' +subject_id = "01" +session_id = "01" +run = "01" acq = None -task = 'testing' +task = "testing" data_path = testing.data_path(download=False) _bids_path = BIDSPath( - subject=subject_id, session=session_id, run=run, acquisition=acq, - task=task) + subject=subject_id, session=session_id, run=run, acquisition=acq, task=task +) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def return_bids_test_dir(tmp_path_factory): """Return path to a written test BIDS dir.""" - bids_root = str(tmp_path_factory.mktemp('mnebids_utils_test_bids_ds')) - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') - cal_fname = op.join(data_path, 'SSS', 'sss_cal_mgh.dat') - crosstalk_fname = op.join(data_path, 'SSS', 'ct_sparse.fif') + bids_root = str(tmp_path_factory.mktemp("mnebids_utils_test_bids_ds")) + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) + cal_fname = op.join(data_path, "SSS", "sss_cal_mgh.dat") + crosstalk_fname = op.join(data_path, "SSS", "ct_sparse.fif") raw = mne.io.read_raw_fif(raw_fname) - raw.info['line_freq'] = 60 + raw.info["line_freq"] = 60 # Drop unknown events. events = mne.read_events(events_fname) @@ -63,10 +81,9 @@ def return_bids_test_dir(tmp_path_factory): bids_path = _bids_path.copy().update(root=bids_root) # Write multiple runs for test_purposes - for run_idx in [run, '02']: + for run_idx in [run, "02"]: name = bids_path.copy().update(run=run_idx) - write_raw_bids(raw, name, events=events, - event_id=event_id, overwrite=True) + write_raw_bids(raw, name, events=events, event_id=event_id, overwrite=True) write_meg_calibration(cal_fname, bids_path=bids_path) write_meg_crosstalk(crosstalk_fname, bids_path=bids_path) @@ -77,63 +94,68 @@ def return_bids_test_dir(tmp_path_factory): def test_get_keys(return_bids_test_dir): """Test getting the datatypes (=modalities) of a dir.""" modalities = get_datatypes(return_bids_test_dir) - assert modalities == ['meg'] - - -@pytest.mark.parametrize('entity, expected_vals, kwargs', - [('bogus', None, None), - ('subject', [subject_id], None), - ('session', [session_id], None), - ('run', [run, '02'], None), - ('acquisition', ['calibration', 'crosstalk'], None), - ('task', [task], None), - ('subject', [], dict(ignore_subjects=[subject_id])), - ('subject', [], dict(ignore_subjects=subject_id)), - ('session', [], dict(ignore_sessions=[session_id])), - ('session', [], dict(ignore_sessions=session_id)), - ('run', [run], dict(ignore_runs=['02'])), - ('run', [run], dict(ignore_runs='02')), - ('task', [], dict(ignore_tasks=[task])), - ('task', [], dict(ignore_tasks=task)), - ('run', [run, '02'], dict(ignore_runs=['bogus'])), - ('run', [], dict(ignore_datatypes=['meg']))]) + assert modalities == ["meg"] + + +@pytest.mark.parametrize( + "entity, expected_vals, kwargs", + [ + ("bogus", None, None), + ("subject", [subject_id], None), + ("session", [session_id], None), + ("run", [run, "02"], None), + ("acquisition", ["calibration", "crosstalk"], None), + ("task", [task], None), + ("subject", [], dict(ignore_subjects=[subject_id])), + ("subject", [], dict(ignore_subjects=subject_id)), + ("session", [], dict(ignore_sessions=[session_id])), + ("session", [], dict(ignore_sessions=session_id)), + ("run", [run], dict(ignore_runs=["02"])), + ("run", [run], dict(ignore_runs="02")), + ("task", [], dict(ignore_tasks=[task])), + ("task", [], dict(ignore_tasks=task)), + ("run", [run, "02"], dict(ignore_runs=["bogus"])), + ("run", [], dict(ignore_datatypes=["meg"])), + ], +) @testing.requires_testing_data def test_get_entity_vals(entity, expected_vals, kwargs, return_bids_test_dir): """Test getting a list of entities.""" bids_root = return_bids_test_dir # Add some derivative data that should be ignored by get_entity_vals() - deriv_path = Path(bids_root) / 'derivatives' - deriv_meg_dir = deriv_path / 'pipeline' / 'sub-deriv' / 'ses-deriv' / 'meg' + deriv_path = Path(bids_root) / "derivatives" + deriv_meg_dir = deriv_path / "pipeline" / "sub-deriv" / "ses-deriv" / "meg" deriv_meg_dir.mkdir(parents=True) - (deriv_meg_dir / 'sub-deriv_ses-deriv_task-deriv_meg.fif').touch() - (deriv_meg_dir / 'sub-deriv_ses-deriv_task-deriv_meg.json').touch() + (deriv_meg_dir / "sub-deriv_ses-deriv_task-deriv_meg.fif").touch() + (deriv_meg_dir / "sub-deriv_ses-deriv_task-deriv_meg.json").touch() if kwargs is None: kwargs = dict() - if entity == 'bogus': - with pytest.raises(ValueError, match='`key` must be one of'): + if entity == "bogus": + with pytest.raises(ValueError, match="`key` must be one of"): get_entity_vals(root=bids_root, entity_key=entity, **kwargs) else: - vals = get_entity_vals(root=bids_root, entity_key=entity, - **kwargs) + vals = get_entity_vals(root=bids_root, entity_key=entity, **kwargs) assert vals == expected_vals # test using ``with_key`` kwarg - entities = get_entity_vals(root=bids_root, entity_key=entity, - with_key=True, **kwargs) + entities = get_entity_vals( + root=bids_root, entity_key=entity, with_key=True, **kwargs + ) entity_long_to_short = { val: key for key, val in ALLOWED_PATH_ENTITIES_SHORT.items() } - assert entities == [f'{entity_long_to_short[entity]}-{val}' - for val in expected_vals] + assert entities == [ + f"{entity_long_to_short[entity]}-{val}" for val in expected_vals + ] # Test without ignoring the derivatives dir entities = get_entity_vals( root=bids_root, entity_key=entity, **kwargs, ignore_dirs=None ) - if entity not in ('acquisition', 'run'): - assert 'deriv' in entities + if entity not in ("acquisition", "run"): + assert "deriv" in entities # Clean up shutil.rmtree(deriv_path) @@ -141,62 +163,60 @@ def test_get_entity_vals(entity, expected_vals, kwargs, return_bids_test_dir): def test_search_folder_for_text(capsys): """Test finding entries.""" - with pytest.raises(ValueError, match='is not a directory'): - search_folder_for_text('foo', 'i_dont_exist') + with pytest.raises(ValueError, match="is not a directory"): + search_folder_for_text("foo", "i_dont_exist") # We check the testing directory test_dir = op.dirname(__file__) - search_folder_for_text('n/a', test_dir) + search_folder_for_text("n/a", test_dir) captured = capsys.readouterr() - assert 'sub-01_ses-eeg_task-rest_eeg.json' in captured.out + assert "sub-01_ses-eeg_task-rest_eeg.json" in captured.out assert ( - ' 1 name type units low_cutof high_cuto descripti sampling_ status status_de\n' # noqa: E501 - ' 2 Fp1 EEG µV 0.0159154 1000.0 ElectroEn 5000.0 good n/a' # noqa: E501 + " 1 name type units low_cutof high_cuto descripti sampling_ status status_de\n" # noqa: E501 + " 2 Fp1 EEG µV 0.0159154 1000.0 ElectroEn 5000.0 good n/a" # noqa: E501 ) in captured.out # test if pathlib.Path object - search_folder_for_text('n/a', Path(test_dir)) + search_folder_for_text("n/a", Path(test_dir)) # test returning a string and without line numbers - out = search_folder_for_text( - 'n/a', test_dir, line_numbers=False, return_str=True - ) - assert 'sub-01_ses-eeg_task-rest_eeg.json' in out + out = search_folder_for_text("n/a", test_dir, line_numbers=False, return_str=True) + assert "sub-01_ses-eeg_task-rest_eeg.json" in out assert ( - ' name type units low_cutof high_cuto descripti sampling_ status status_de\n' # noqa: E501 - ' Fp1 EEG µV 0.0159154 1000.0 ElectroEn 5000.0 good n/a' # noqa: E501 + " name type units low_cutof high_cuto descripti sampling_ status status_de\n" # noqa: E501 + " Fp1 EEG µV 0.0159154 1000.0 ElectroEn 5000.0 good n/a" # noqa: E501 ) in out def test_print_dir_tree(capsys): """Test printing a dir tree.""" - with pytest.raises(FileNotFoundError, match='Folder does not exist'): - print_dir_tree('i_dont_exist') + with pytest.raises(FileNotFoundError, match="Folder does not exist"): + print_dir_tree("i_dont_exist") # We check the testing directory test_dir = op.dirname(__file__) - with pytest.raises(ValueError, match='must be a positive integer'): + with pytest.raises(ValueError, match="must be a positive integer"): print_dir_tree(test_dir, max_depth=-1) - with pytest.raises(ValueError, match='must be a positive integer'): - print_dir_tree(test_dir, max_depth='bad') + with pytest.raises(ValueError, match="must be a positive integer"): + print_dir_tree(test_dir, max_depth="bad") # Do not limit depth print_dir_tree(test_dir) captured = capsys.readouterr() - assert '|--- test_utils.py' in captured.out.split('\n') - assert '|--- __pycache__{}'.format(os.sep) in captured.out.split('\n') - assert '.pyc' in captured.out + assert "|--- test_utils.py" in captured.out.split("\n") + assert "|--- __pycache__{}".format(os.sep) in captured.out.split("\n") + assert ".pyc" in captured.out # Now limit depth ... we should not descend into pycache print_dir_tree(test_dir, max_depth=1) captured = capsys.readouterr() - assert '|--- test_utils.py' in captured.out.split('\n') - assert '|--- __pycache__{}'.format(os.sep) in captured.out.split('\n') - assert '.pyc' not in captured.out + assert "|--- test_utils.py" in captured.out.split("\n") + assert "|--- __pycache__{}".format(os.sep) in captured.out.split("\n") + assert ".pyc" not in captured.out # Limit depth even more print_dir_tree(test_dir, max_depth=0) captured = capsys.readouterr() - assert captured.out == '|tests{}\n'.format(os.sep) + assert captured.out == "|tests{}\n".format(os.sep) # test if pathlib.Path object print_dir_tree(Path(test_dir)) @@ -204,162 +224,188 @@ def test_print_dir_tree(capsys): # test returning a string out = print_dir_tree(test_dir, return_str=True, max_depth=1) assert isinstance(out, str) - assert '|--- test_utils.py' in out.split('\n') - assert '|--- __pycache__{}'.format(os.sep) in out.split('\n') - assert '.pyc' not in out + assert "|--- test_utils.py" in out.split("\n") + assert "|--- __pycache__{}".format(os.sep) in out.split("\n") + assert ".pyc" not in out def test_make_folders(tmp_path): """Test that folders are created and named properly.""" # Make sure folders are created properly - bids_path = BIDSPath(subject='01', session='foo', - datatype='eeg', root=str(tmp_path)) + bids_path = BIDSPath( + subject="01", session="foo", datatype="eeg", root=str(tmp_path) + ) bids_path.mkdir().directory - assert op.isdir(tmp_path / 'sub-01' / 'ses-foo' / 'eeg') + assert op.isdir(tmp_path / "sub-01" / "ses-foo" / "eeg") # If we remove a kwarg the folder shouldn't be created - bids_path = BIDSPath(subject='02', datatype='eeg', - root=tmp_path) + bids_path = BIDSPath(subject="02", datatype="eeg", root=tmp_path) bids_path.mkdir().directory - assert op.isdir(tmp_path / 'sub-02' / 'eeg') + assert op.isdir(tmp_path / "sub-02" / "eeg") # Check if a pathlib.Path bids_root works. - bids_path = BIDSPath(subject='03', session='foo', - datatype='eeg', root=tmp_path) + bids_path = BIDSPath(subject="03", session="foo", datatype="eeg", root=tmp_path) bids_path.mkdir().directory - assert op.isdir(tmp_path / 'sub-03' / 'ses-foo' / 'eeg') + assert op.isdir(tmp_path / "sub-03" / "ses-foo" / "eeg") # Check if bids_root=None creates folders in the current working directory bids_root = tmp_path / "tmp" bids_root.mkdir() curr_dir = os.getcwd() os.chdir(bids_root) - bids_path = BIDSPath(subject='04', session='foo', - datatype='eeg') + bids_path = BIDSPath(subject="04", session="foo", datatype="eeg") bids_path.mkdir().directory - assert op.isdir(op.join(os.getcwd(), 'sub-04', 'ses-foo', 'eeg')) + assert op.isdir(op.join(os.getcwd(), "sub-04", "ses-foo", "eeg")) os.chdir(curr_dir) def test_parse_ext(): """Test the file extension extraction.""" - f = 'sub-05_task-matchingpennies.vhdr' + f = "sub-05_task-matchingpennies.vhdr" fname, ext = _parse_ext(f) - assert fname == 'sub-05_task-matchingpennies' - assert ext == '.vhdr' + assert fname == "sub-05_task-matchingpennies" + assert ext == ".vhdr" # Test for case where no extension: assume BTI format - f = 'sub-01_task-rest' + f = "sub-01_task-rest" fname, ext = _parse_ext(f) assert fname == f - assert ext == '.pdf' + assert ext == ".pdf" # Get a .nii.gz file - f = 'sub-01_task-rest.nii.gz' + f = "sub-01_task-rest.nii.gz" fname, ext = _parse_ext(f) - assert fname == 'sub-01_task-rest' - assert ext == '.nii.gz' + assert fname == "sub-01_task-rest" + assert ext == ".nii.gz" -@pytest.mark.parametrize('fname', [ - 'sub-01_ses-02_task-test_run-3_split-01_meg.fif', - 'sub-01_ses-02_task-test_run-3_split-01', - '/bids_root/sub-01/ses-02/meg/sub-01_ses-02_task-test_run-3_split-01_meg.fif', # noqa: E501 - 'sub-01/ses-02/meg/sub-01_ses-02_task-test_run-3_split-01_meg.fif' -]) +@pytest.mark.parametrize( + "fname", + [ + "sub-01_ses-02_task-test_run-3_split-01_meg.fif", + "sub-01_ses-02_task-test_run-3_split-01", + "/bids_root/sub-01/ses-02/meg/sub-01_ses-02_task-test_run-3_split-01_meg.fif", # noqa: E501 + "sub-01/ses-02/meg/sub-01_ses-02_task-test_run-3_split-01_meg.fif", + ], +) def test_get_bids_path_from_fname(fname): bids_path = get_bids_path_from_fname(fname) assert bids_path.basename == Path(fname).name - if '/bids_root/' in fname: - assert Path(bids_path.root) == Path('/bids_root') + if "/bids_root/" in fname: + assert Path(bids_path.root) == Path("/bids_root") else: - if 'meg' in fname: + if "meg" in fname: # directory should match - assert Path(bids_path.directory) == Path('sub-01/ses-02/meg') + assert Path(bids_path.directory) == Path("sub-01/ses-02/meg") # root should be default '.' - assert str(bids_path.root) == '.' + assert str(bids_path.root) == "." -@pytest.mark.parametrize('fname', [ - 'sub-01_ses-02_task-test_run-3_split-01_desc-filtered_meg.fif', - 'sub-01_ses-02_task-test_run-3_split-01_desc-filtered.fif', - 'sub-01_ses-02_task-test_run-3_split-01_desc-filtered', - ('/bids_root/sub-01/ses-02/meg/' + - 'sub-01_ses-02_task-test_run-3_split-01_desc-filtered_meg.fif'), -]) +@pytest.mark.parametrize( + "fname", + [ + "sub-01_ses-02_task-test_run-3_split-01_desc-filtered_meg.fif", + "sub-01_ses-02_task-test_run-3_split-01_desc-filtered.fif", + "sub-01_ses-02_task-test_run-3_split-01_desc-filtered", + ( + "/bids_root/sub-01/ses-02/meg/" + + "sub-01_ses-02_task-test_run-3_split-01_desc-filtered_meg.fif" + ), + ], +) def test_get_entities_from_fname(fname): """Test parsing entities from a bids filename.""" params = get_entities_from_fname(fname) - assert params['subject'] == '01' - assert params['session'] == '02' - assert params['run'] == '3' - assert params['task'] == 'test' - assert params['description'] == 'filtered' - assert params['split'] == '01' + assert params["subject"] == "01" + assert params["session"] == "02" + assert params["run"] == "3" + assert params["task"] == "test" + assert params["description"] == "filtered" + assert params["split"] == "01" assert list(params.keys()) == [ - 'subject', 'session', 'task', - 'acquisition', 'run', 'processing', - 'space', 'recording', 'split', 'description', + "subject", + "session", + "task", + "acquisition", + "run", + "processing", + "space", + "recording", + "split", + "description", ] -@pytest.mark.parametrize('fname', [ - 'sub-01_ses-02_task-test_run-3_split-01_meg.fif', - ('/bids_root/sub-01/ses-02/meg/' - 'sub-01_ses-02_task-test_run-3_split-01_meg.fif'), - 'sub-01_ses-02_task-test_run-3_split-01_foo-tfr_meg.fif', -]) +@pytest.mark.parametrize( + "fname", + [ + "sub-01_ses-02_task-test_run-3_split-01_meg.fif", + ( + "/bids_root/sub-01/ses-02/meg/" + "sub-01_ses-02_task-test_run-3_split-01_meg.fif" + ), + "sub-01_ses-02_task-test_run-3_split-01_foo-tfr_meg.fif", + ], +) def test_get_entities_from_fname_errors(fname): """Test parsing entities from bids filename. Extends utility for not supported BIDS entities, such as 'foo'. """ - if 'foo' in fname: - with pytest.raises(KeyError, match='Unexpected entity'): - params = get_entities_from_fname(fname, on_error='raise') - with pytest.warns(RuntimeWarning, match='Unexpected entity'): - params = get_entities_from_fname(fname, on_error='warn') - params = get_entities_from_fname(fname, on_error='ignore') + if "foo" in fname: + with pytest.raises(KeyError, match="Unexpected entity"): + params = get_entities_from_fname(fname, on_error="raise") + with pytest.warns(RuntimeWarning, match="Unexpected entity"): + params = get_entities_from_fname(fname, on_error="warn") + params = get_entities_from_fname(fname, on_error="ignore") else: - params = get_entities_from_fname(fname, on_error='raise') - - expected_keys = ['subject', 'session', 'task', - 'acquisition', 'run', 'processing', - 'space', 'recording', 'split', 'description'] - - assert params['subject'] == '01' - assert params['session'] == '02' - assert params['run'] == '3' - assert params['task'] == 'test' - assert params['split'] == '01' - if 'foo' in fname: - assert params['foo'] == 'tfr' - expected_keys.append('foo') - assert list(params.keys()) == expected_keys - - -@pytest.mark.parametrize('candidate_list, best_candidates', [ - # Only one candidate - (['sub-01_ses-02'], ['sub-01_ses-02']), - - # Two candidates, but the second matches on more entities - (['sub-01', 'sub-01_ses-02'], ['sub-01_ses-02']), + params = get_entities_from_fname(fname, on_error="raise") + + expected_keys = [ + "subject", + "session", + "task", + "acquisition", + "run", + "processing", + "space", + "recording", + "split", + "description", + ] - # No candidates match - (['sub-02_ses-02', 'sub-01_ses-01'], []), + assert params["subject"] == "01" + assert params["session"] == "02" + assert params["run"] == "3" + assert params["task"] == "test" + assert params["split"] == "01" + if "foo" in fname: + assert params["foo"] == "tfr" + expected_keys.append("foo") + assert list(params.keys()) == expected_keys - # First candidate is disqualified (session doesn't match) - (['sub-01_ses-01', 'sub-01_ses-02'], ['sub-01_ses-02']), - # Multiple equally good candidates - (['sub-01_run-01', 'sub-01_run-02'], ['sub-01_run-01', 'sub-01_run-02']), -]) +@pytest.mark.parametrize( + "candidate_list, best_candidates", + [ + # Only one candidate + (["sub-01_ses-02"], ["sub-01_ses-02"]), + # Two candidates, but the second matches on more entities + (["sub-01", "sub-01_ses-02"], ["sub-01_ses-02"]), + # No candidates match + (["sub-02_ses-02", "sub-01_ses-01"], []), + # First candidate is disqualified (session doesn't match) + (["sub-01_ses-01", "sub-01_ses-02"], ["sub-01_ses-02"]), + # Multiple equally good candidates + (["sub-01_run-01", "sub-01_run-02"], ["sub-01_run-01", "sub-01_run-02"]), + ], +) def test_find_best_candidates(candidate_list, best_candidates): """Test matching of candidate sidecar files.""" - params = dict(subject='01', session='02', acquisition=None) + params = dict(subject="01", session="02", acquisition=None) assert _find_best_candidates(params, candidate_list) == best_candidates @@ -372,78 +418,78 @@ def test_find_matching_sidecar(return_bids_test_dir, tmp_path): # Now find a sidecar sidecar_fname = bids_path.find_matching_sidecar( - suffix='coordsystem', extension='.json') - expected_file = op.join('sub-01', 'ses-01', 'meg', - 'sub-01_ses-01_coordsystem.json') + suffix="coordsystem", extension=".json" + ) + expected_file = op.join("sub-01", "ses-01", "meg", "sub-01_ses-01_coordsystem.json") assert str(sidecar_fname).endswith(expected_file) # Find multiple sidecars, tied in score, triggering an error - with pytest.raises(RuntimeError, match='Expected to find a single'): - open(str(sidecar_fname).replace('coordsystem.json', - '2coordsystem.json'), 'w').close() + with pytest.raises(RuntimeError, match="Expected to find a single"): + open( + str(sidecar_fname).replace("coordsystem.json", "2coordsystem.json"), "w" + ).close() print_dir_tree(bids_root) - bids_path.find_matching_sidecar( - suffix='coordsystem', extension='.json') + bids_path.find_matching_sidecar(suffix="coordsystem", extension=".json") # Find nothing and raise. - with pytest.raises(RuntimeError, match='Did not find any'): - bids_path.find_matching_sidecar(suffix='foo', extension='.bogus') + with pytest.raises(RuntimeError, match="Did not find any"): + bids_path.find_matching_sidecar(suffix="foo", extension=".bogus") # Find nothing and receive None and a warning. - on_error = 'warn' - with pytest.warns(RuntimeWarning, match='Did not find any'): + on_error = "warn" + with pytest.warns(RuntimeWarning, match="Did not find any"): fname = bids_path.find_matching_sidecar( - suffix='foo', extension='.bogus', on_error=on_error) + suffix="foo", extension=".bogus", on_error=on_error + ) assert fname is None # Find nothing and receive None. - on_error = 'ignore' + on_error = "ignore" fname = bids_path.find_matching_sidecar( - suffix='foo', extension='.bogus', on_error=on_error) + suffix="foo", extension=".bogus", on_error=on_error + ) assert fname is None # Invalid on_error. - on_error = 'hello' - with pytest.raises(ValueError, match='Acceptable values for on_error are'): + on_error = "hello" + with pytest.raises(ValueError, match="Acceptable values for on_error are"): bids_path.find_matching_sidecar( - suffix='coordsystem', extension='.json', on_error=on_error) + suffix="coordsystem", extension=".json", on_error=on_error + ) # Test behavior of suffix and extension params when suffix and extension # are also (not) present in the passed BIDSPath - bids_path = BIDSPath( - subject='test', task='task', datatype='eeg', root=tmp_path - ) + bids_path = BIDSPath(subject="test", task="task", datatype="eeg", root=tmp_path) bids_path.mkdir() for suffix, extension in zip( - ['eeg', 'eeg', 'events', 'events'], - ['.fif', '.json', '.tsv', '.json'] + ["eeg", "eeg", "events", "events"], [".fif", ".json", ".tsv", ".json"] ): bids_path.suffix = suffix bids_path.extension = extension bids_path.fpath.touch() # suffix parameter should always override BIDSPath.suffix - bids_path.extension = '.json' + bids_path.extension = ".json" - for bp_suffix in (None, 'eeg'): + for bp_suffix in (None, "eeg"): bids_path.suffix = bp_suffix - s = bids_path.find_matching_sidecar(suffix='events') - assert Path(s).name == 'sub-test_task-task_events.json' + s = bids_path.find_matching_sidecar(suffix="events") + assert Path(s).name == "sub-test_task-task_events.json" # extension parameter should always override BIDSPath.extension - bids_path.suffix = 'events' + bids_path.suffix = "events" - for bp_extension in (None, '.json'): + for bp_extension in (None, ".json"): bids_path.extension = bp_extension - s = bids_path.find_matching_sidecar(extension='.tsv') - assert Path(s).name == 'sub-test_task-task_events.tsv' + s = bids_path.find_matching_sidecar(extension=".tsv") + assert Path(s).name == "sub-test_task-task_events.tsv" # If suffix and extension parameters are not passed, use BIDSPath # attributes - bids_path.update(suffix='events', extension='.tsv') + bids_path.update(suffix="events", extension=".tsv") s = bids_path.find_matching_sidecar() - assert s.name == 'sub-test_task-task_events.tsv' + assert s.name == "sub-test_task-task_events.tsv" @testing.requires_testing_data @@ -454,31 +500,45 @@ def test_bids_path_inference(return_bids_test_dir): # without providing all the entities, ambiguous when trying # to use fpath bids_path = BIDSPath( - subject=subject_id, session=session_id, acquisition=acq, - task=task, root=bids_root) - with pytest.raises(RuntimeError, match='Found more than one'): + subject=subject_id, + session=session_id, + acquisition=acq, + task=task, + root=bids_root, + ) + with pytest.raises(RuntimeError, match="Found more than one"): bids_path.fpath # shouldn't error out when there is no uncertainty - channels_fname = BIDSPath(subject=subject_id, session=session_id, - run=run, acquisition=acq, task=task, - root=bids_root, suffix='channels') + channels_fname = BIDSPath( + subject=subject_id, + session=session_id, + run=run, + acquisition=acq, + task=task, + root=bids_root, + suffix="channels", + ) channels_fname.fpath # create an extra file under 'eeg' - extra_file = op.join(bids_root, f'sub-{subject_id}', - f'ses-{session_id}', 'eeg', - channels_fname.basename + '.tsv') + extra_file = op.join( + bids_root, + f"sub-{subject_id}", + f"ses-{session_id}", + "eeg", + channels_fname.basename + ".tsv", + ) Path(extra_file).parent.mkdir(exist_ok=True, parents=True) # Creates a new file and because of this new file, there is now # ambiguity - with open(extra_file, 'w', encoding='utf-8'): + with open(extra_file, "w", encoding="utf-8"): pass - with pytest.raises(RuntimeError, match='Found data of more than one'): + with pytest.raises(RuntimeError, match="Found data of more than one"): channels_fname.fpath # if you set datatype, now there is no ambiguity - channels_fname.update(datatype='eeg') + channels_fname.update(datatype="eeg") assert str(channels_fname.fpath) == extra_file # set state back to original shutil.rmtree(Path(extra_file).parent) @@ -490,34 +550,42 @@ def test_bids_path(return_bids_test_dir): bids_root = return_bids_test_dir bids_path = BIDSPath( - subject=subject_id, session=session_id, run=run, acquisition=acq, - task=task, root=bids_root, suffix='meg') + subject=subject_id, + session=session_id, + run=run, + acquisition=acq, + task=task, + root=bids_root, + suffix="meg", + ) - expected_parent_dir = op.join(bids_root, f'sub-{subject_id}', - f'ses-{session_id}', 'meg') + expected_parent_dir = op.join( + bids_root, f"sub-{subject_id}", f"ses-{session_id}", "meg" + ) assert str(bids_path.fpath.parent) == expected_parent_dir # test BIDSPath without bids_root, suffix, extension # basename and fpath should be the same - expected_basename = f'sub-{subject_id}_ses-{session_id}_task-{task}_run-{run}' # noqa - assert (op.basename(bids_path.fpath) == - expected_basename + '_meg.fif') + expected_basename = ( + f"sub-{subject_id}_ses-{session_id}_task-{task}_run-{run}" # noqa + ) + assert op.basename(bids_path.fpath) == expected_basename + "_meg.fif" assert op.dirname(bids_path.fpath).startswith(bids_root) # when bids root is not passed in, passes relative path - bids_path2 = bids_path.copy().update(datatype='meg', root=None) + bids_path2 = bids_path.copy().update(datatype="meg", root=None) expected_relpath = op.join( - f'sub-{subject_id}', f'ses-{session_id}', 'meg', - expected_basename + '_meg') + f"sub-{subject_id}", f"ses-{session_id}", "meg", expected_basename + "_meg" + ) assert str(bids_path2.fpath) == expected_relpath # without bids_root and with suffix/extension # basename and fpath should be the same - bids_path.update(suffix='ieeg', extension='.vhdr') - expected_basename2 = expected_basename + '_ieeg.vhdr' - assert (bids_path.basename == expected_basename2) - bids_path.update(extension='.vhdr') - assert (bids_path.basename == expected_basename2) + bids_path.update(suffix="ieeg", extension=".vhdr") + expected_basename2 = expected_basename + "_ieeg.vhdr" + assert bids_path.basename == expected_basename2 + bids_path.update(extension=".vhdr") + assert bids_path.basename == expected_basename2 # with bids_root, but without suffix/extension # basename should work, but fpath should not. @@ -525,157 +593,181 @@ def test_bids_path(return_bids_test_dir): assert bids_path.basename == expected_basename # should find the correct filename if suffix was passed - bids_path.update(suffix='meg', extension='.fif') + bids_path.update(suffix="meg", extension=".fif") bids_fpath = bids_path.fpath assert op.basename(bids_fpath) == bids_path.basename # Same test, but exploiting the fact that bids_fpath is a pathlib.Path assert bids_fpath.name == bids_path.basename # confirm BIDSPath assigns properties correctly - bids_path = BIDSPath(subject=subject_id, - session=session_id) + bids_path = BIDSPath(subject=subject_id, session=session_id) assert bids_path.subject == subject_id assert bids_path.session == session_id - assert 'subject' in bids_path.entities - assert 'session' in bids_path.entities + assert "subject" in bids_path.entities + assert "session" in bids_path.entities print(bids_path.entities) - assert all(bids_path.entities.get(entity) is None - for entity in ['task', 'run', 'recording', 'acquisition', - 'space', 'processing', 'split', - 'root', 'datatype', - 'suffix', 'extension']) + assert all( + bids_path.entities.get(entity) is None + for entity in [ + "task", + "run", + "recording", + "acquisition", + "space", + "processing", + "split", + "root", + "datatype", + "suffix", + "extension", + ] + ) # test updating functionality - bids_path.update(acquisition='03', run='2', session='02', - task=None) + bids_path.update(acquisition="03", run="2", session="02", task=None) assert bids_path.subject == subject_id - assert bids_path.session == '02' - assert bids_path.acquisition == '03' - assert bids_path.run == '2' + assert bids_path.session == "02" + assert bids_path.acquisition == "03" + assert bids_path.run == "2" assert bids_path.task is None - new_bids_path = bids_path.copy().update(task='02', - acquisition=None) - assert new_bids_path.task == '02' + new_bids_path = bids_path.copy().update(task="02", acquisition=None) + assert new_bids_path.task == "02" assert new_bids_path.acquisition is None # equality of bids basename assert new_bids_path != bids_path - assert new_bids_path == bids_path.copy().update(task='02', - acquisition=None) + assert new_bids_path == bids_path.copy().update(task="02", acquisition=None) # error check on kwargs of update - with pytest.raises(ValueError, match='Key must be one of*'): + with pytest.raises(ValueError, match="Key must be one of*"): bids_path.update(sub=subject_id, session=session_id) # error check on the passed in entity containing a magic char - with pytest.raises(ValueError, match='Unallowed*'): - bids_path.update(subject=subject_id + '-') + with pytest.raises(ValueError, match="Unallowed*"): + bids_path.update(subject=subject_id + "-") # error check on suffix in BIDSPath (deep check) - suffix = 'meeg' - with pytest.raises(ValueError, match=f'Suffix {suffix} is not'): - BIDSPath(subject=subject_id, session=session_id, - suffix=suffix) + suffix = "meeg" + with pytest.raises(ValueError, match=f"Suffix {suffix} is not"): + BIDSPath(subject=subject_id, session=session_id, suffix=suffix) # do error check suffix in update - error_kind = 'foobar' - with pytest.raises(ValueError, match=f'Suffix {error_kind} is not'): + error_kind = "foobar" + with pytest.raises(ValueError, match=f"Suffix {error_kind} is not"): bids_path.update(suffix=error_kind) # does not error check on suffix in BIDSPath (deep check) - suffix = 'meeg' - bids_path = BIDSPath(subject=subject_id, session=session_id, - suffix=suffix, check=False) + suffix = "meeg" + bids_path = BIDSPath( + subject=subject_id, session=session_id, suffix=suffix, check=False + ) # also inherits error check from instantiation # always error check entities though - with pytest.raises(ValueError, match='Key must be one of'): - bids_path.copy().update(blah='blah-entity') + with pytest.raises(ValueError, match="Key must be one of"): + bids_path.copy().update(blah="blah-entity") # error check datatype if check is turned back on - with pytest.raises(ValueError, match='datatype .* is not valid'): + with pytest.raises(ValueError, match="datatype .* is not valid"): bids_path.copy().update(check=True, datatype=error_kind) # does not error check on space if check=False ... - BIDSPath(subject=subject_id, space='foo', suffix='eeg', check=False) + BIDSPath(subject=subject_id, space="foo", suffix="eeg", check=False) # ... but raises an error with check=True - match = r'space \(foo\) is not valid for datatype \(eeg\)' + match = r"space \(foo\) is not valid for datatype \(eeg\)" with pytest.raises(ValueError, match=match): - BIDSPath(subject=subject_id, space='foo', suffix='eeg', datatype='eeg') + BIDSPath(subject=subject_id, space="foo", suffix="eeg", datatype="eeg") # error check on space for datatypes that do not support space - match = 'space entity is not valid for datatype anat' + match = "space entity is not valid for datatype anat" with pytest.raises(ValueError, match=match): - BIDSPath(subject=subject_id, space='foo', datatype='anat') + BIDSPath(subject=subject_id, space="foo", datatype="anat") # error check on space if datatype is None - bids_path_tmpcopy = bids_path.copy().update(suffix='meeg') - match = 'You must define datatype if you want to use space' + bids_path_tmpcopy = bids_path.copy().update(suffix="meeg") + match = "You must define datatype if you want to use space" with pytest.raises(ValueError, match=match): - bids_path_tmpcopy.update(space='CapTrak', check=True) + bids_path_tmpcopy.update(space="CapTrak", check=True) # making a valid space update works - bids_path_tmpcopy.update(suffix='eeg', datatype='eeg', - space="CapTrak", check=True) + bids_path_tmpcopy.update(suffix="eeg", datatype="eeg", space="CapTrak", check=True) # suffix won't be error checks if initial check was false bids_path.update(suffix=suffix) # error check on extension in BIDSPath (deep check) - extension = '.mat' - with pytest.raises(ValueError, match=f'Extension {extension} is not'): - BIDSPath(subject=subject_id, session=session_id, - extension=extension) + extension = ".mat" + with pytest.raises(ValueError, match=f"Extension {extension} is not"): + BIDSPath(subject=subject_id, session=session_id, extension=extension) # do not error check extension in update (not deep check) - bids_path.update(extension='.foo') + bids_path.update(extension=".foo") # test repr - bids_path = BIDSPath(subject='01', session='02', - task='03', suffix='ieeg', datatype='ieeg', - extension='.edf') - assert repr(bids_path) == ('BIDSPath(\n' - 'root: None\n' - 'datatype: ieeg\n' - 'basename: sub-01_ses-02_task-03_ieeg.edf)') + bids_path = BIDSPath( + subject="01", + session="02", + task="03", + suffix="ieeg", + datatype="ieeg", + extension=".edf", + ) + assert repr(bids_path) == ( + "BIDSPath(\n" + "root: None\n" + "datatype: ieeg\n" + "basename: sub-01_ses-02_task-03_ieeg.edf)" + ) # test update can change check bids_path.update(check=False) - bids_path.update(extension='.mat') + bids_path.update(extension=".mat") # test that split gets properly set bids_path.update(split=1) - assert bids_path.basename == 'sub-01_ses-02_task-03_split-01_ieeg.mat' + assert bids_path.basename == "sub-01_ses-02_task-03_split-01_ieeg.mat" # test home dir expansion - bids_path = BIDSPath(root='~/foo') - assert '~/foo' not in str(bids_path.root) + bids_path = BIDSPath(root="~/foo") + assert "~/foo" not in str(bids_path.root) # explicitly test update() method too - bids_path.update(root='~/foo') - assert '~/foo' not in str(bids_path.root) + bids_path.update(root="~/foo") + assert "~/foo" not in str(bids_path.root) # Test property setters - bids_path = BIDSPath(subject='01', task='noise', datatype='eeg') - - for entity in ('subject', 'session', 'task', 'run', 'acquisition', - 'processing', 'recording', 'space', 'suffix', 'extension', - 'datatype', 'root', 'split'): - if entity == 'run': - new_val = '01' - elif entity == 'space': - new_val = 'CapTrak' - elif entity in ['suffix', 'datatype']: - new_val = 'eeg' - elif entity == 'extension': - new_val = '.fif' - elif entity == 'root': - new_val = Path('foo') - elif entity == 'split': - new_val = '01' + bids_path = BIDSPath(subject="01", task="noise", datatype="eeg") + + for entity in ( + "subject", + "session", + "task", + "run", + "acquisition", + "processing", + "recording", + "space", + "suffix", + "extension", + "datatype", + "root", + "split", + ): + if entity == "run": + new_val = "01" + elif entity == "space": + new_val = "CapTrak" + elif entity in ["suffix", "datatype"]: + new_val = "eeg" + elif entity == "extension": + new_val = ".fif" + elif entity == "root": + new_val = Path("foo") + elif entity == "split": + new_val = "01" else: - new_val = 'foo' + new_val = "foo" setattr(bids_path, entity, new_val) assert getattr(bids_path, entity) == new_val @@ -684,82 +776,102 @@ def test_bids_path(return_bids_test_dir): def test_make_filenames(): """Test that we create filenames according to the BIDS spec.""" # All keys work - prefix_data = dict(subject='one', session='two', task='three', - acquisition='four', run=1, processing='six', - recording='seven', suffix='ieeg', extension='.json', - datatype='ieeg') - expected_str = ('sub-one_ses-two_task-three_acq-four_run-01_proc-six_' - 'rec-seven_ieeg.json') + prefix_data = dict( + subject="one", + session="two", + task="three", + acquisition="four", + run=1, + processing="six", + recording="seven", + suffix="ieeg", + extension=".json", + datatype="ieeg", + ) + expected_str = ( + "sub-one_ses-two_task-three_acq-four_run-01_proc-six_" "rec-seven_ieeg.json" + ) assert BIDSPath(**prefix_data).basename == expected_str - assert BIDSPath(**prefix_data) == ( - Path('sub-one') / 'ses-two' / 'ieeg' / expected_str).as_posix() + assert ( + BIDSPath(**prefix_data) + == (Path("sub-one") / "ses-two" / "ieeg" / expected_str).as_posix() + ) # subsets of keys works - assert (BIDSPath(subject='one', task='three', run=4).basename == - 'sub-one_task-three_run-04') - assert (BIDSPath(subject='one', task='three', - suffix='meg', extension='.json').basename == - 'sub-one_task-three_meg.json') + assert ( + BIDSPath(subject="one", task="three", run=4).basename + == "sub-one_task-three_run-04" + ) + assert ( + BIDSPath(subject="one", task="three", suffix="meg", extension=".json").basename + == "sub-one_task-three_meg.json" + ) with pytest.raises(ValueError): - BIDSPath(subject='one-two', suffix='ieeg', extension='.edf') + BIDSPath(subject="one-two", suffix="ieeg", extension=".edf") - with pytest.raises(ValueError, match='At least one'): + with pytest.raises(ValueError, match="At least one"): BIDSPath() # emptyroom check: invalid task - with pytest.raises(ValueError, match='task must be'): - BIDSPath(subject='emptyroom', session='20131201', - task='blah', suffix='meg') + with pytest.raises(ValueError, match="task must be"): + BIDSPath(subject="emptyroom", session="20131201", task="blah", suffix="meg") # when the suffix is not 'meg', then it does not result in # an error - BIDSPath(subject='emptyroom', session='20131201', - task='blah') + BIDSPath(subject="emptyroom", session="20131201", task="blah") # test what would happen if you don't want to check - prefix_data['extension'] = '.h5' - with pytest.raises(ValueError, match='Extension .h5 is not allowed'): + prefix_data["extension"] = ".h5" + with pytest.raises(ValueError, match="Extension .h5 is not allowed"): BIDSPath(**prefix_data) basename = BIDSPath(**prefix_data, check=False) - assert basename.basename == 'sub-one_ses-two_task-three_acq-four_run-01_proc-six_rec-seven_ieeg.h5' # noqa + assert ( + basename.basename + == "sub-one_ses-two_task-three_acq-four_run-01_proc-six_rec-seven_ieeg.h5" + ) # noqa # what happens with scans.tsv file - with pytest.raises(ValueError, match='scans.tsv file name ' - 'can only contain'): + with pytest.raises(ValueError, match="scans.tsv file name " "can only contain"): BIDSPath( - subject=subject_id, session=session_id, task=task, - suffix='scans', extension='.tsv' + subject=subject_id, + session=session_id, + task=task, + suffix="scans", + extension=".tsv", ) # We should be able to create a BIDSPath for a *_sessions.tsv file - BIDSPath(subject=subject_id, suffix='sessions', extension='.tsv') + BIDSPath(subject=subject_id, suffix="sessions", extension=".tsv") @pytest.mark.parametrize( - 'entities, expected_n_matches', + "entities, expected_n_matches", [ (dict(), 9), - (dict(subject='01'), 2), - (dict(task='audio'), 2), - (dict(processing='sss'), 1), - (dict(suffix='meg'), 4), - (dict(acquisition='lowres'), 1), - (dict(task='test', processing='ica', suffix='eeg'), 2), - (dict(subject='5', task='test', processing='ica', suffix='eeg'), 1), - (dict(subject=['01', '02']), 3), # test multiple input - ]) + (dict(subject="01"), 2), + (dict(task="audio"), 2), + (dict(processing="sss"), 1), + (dict(suffix="meg"), 4), + (dict(acquisition="lowres"), 1), + (dict(task="test", processing="ica", suffix="eeg"), 2), + (dict(subject="5", task="test", processing="ica", suffix="eeg"), 1), + (dict(subject=["01", "02"]), 3), # test multiple input + ], +) def test_filter_fnames(entities, expected_n_matches): """Test filtering filenames based on BIDS entities works.""" - fnames = ('sub-01_task-audio_meg.fif', - 'sub-01_ses-05_task-audio_meg.fif', - 'sub-02_task-visual_eeg.vhdr', - 'sub-Foo_ses-bar_meg.fif', - 'sub-Bar_task-invasive_run-1_ieeg.fif', - 'sub-3_task-fun_proc-sss_meg.fif', - 'sub-4_task-pain_acq-lowres_T1w.nii.gz', - 'sub-5_task-test_proc-ica_eeg.vhdr', - 'sub-6_task-test_proc-ica_eeg.vhdr') + fnames = ( + "sub-01_task-audio_meg.fif", + "sub-01_ses-05_task-audio_meg.fif", + "sub-02_task-visual_eeg.vhdr", + "sub-Foo_ses-bar_meg.fif", + "sub-Bar_task-invasive_run-1_ieeg.fif", + "sub-3_task-fun_proc-sss_meg.fif", + "sub-4_task-pain_acq-lowres_T1w.nii.gz", + "sub-5_task-test_proc-ica_eeg.vhdr", + "sub-6_task-test_proc-ica_eeg.vhdr", + ) output = _filter_fnames(fnames, **entities) assert len(output) == expected_n_matches @@ -773,87 +885,90 @@ def test_match(return_bids_test_dir): bids_path_01 = BIDSPath(root=bids_root) paths = bids_path_01.match() assert len(paths) == 9 - assert all('sub-01_ses-01' in p.basename for p in paths) + assert all("sub-01_ses-01" in p.basename for p in paths) assert all([p.root == bids_root for p in paths]) - bids_path_01 = BIDSPath(root=bids_root, run='01') + bids_path_01 = BIDSPath(root=bids_root, run="01") paths = bids_path_01.match() assert len(paths) == 3 - assert paths[0].basename == ('sub-01_ses-01_task-testing_run-01_' - 'channels.tsv') + assert paths[0].basename == ("sub-01_ses-01_task-testing_run-01_" "channels.tsv") - bids_path_01 = BIDSPath(root=bids_root, subject='unknown') + bids_path_01 = BIDSPath(root=bids_root, subject="unknown") paths = bids_path_01.match() assert len(paths) == 0 bids_path_01 = _bids_path.copy().update(root=None) - with pytest.raises(RuntimeError, match='Cannot match'): + with pytest.raises(RuntimeError, match="Cannot match"): bids_path_01.match() - bids_path_01.update(datatype='meg', root=bids_root) + bids_path_01.update(datatype="meg", root=bids_root) same_paths = bids_path_01.match() assert len(same_paths) == 3 # Check handling of `extension`, part 1: no extension specified. - bids_path_01 = BIDSPath(root=bids_root, run='01') + bids_path_01 = BIDSPath(root=bids_root, run="01") paths = bids_path_01.match() - assert [p.extension for p in paths] == ['.tsv', '.tsv', '.fif'] + assert [p.extension for p in paths] == [".tsv", ".tsv", ".fif"] # Check handling of `extension`, part 2: extension specified. - bids_path_01 = BIDSPath(root=bids_root, run='01', extension='.fif', - datatype='meg') + bids_path_01 = BIDSPath(root=bids_root, run="01", extension=".fif", datatype="meg") paths = bids_path_01.match() assert len(paths) == 1 - assert paths[0].extension == '.fif' + assert paths[0].extension == ".fif" # Check handling of `extension` and `suffix`, part 1: no suffix - bids_path_01 = BIDSPath(root=bids_root, run='01', extension='.tsv', - datatype='meg') + bids_path_01 = BIDSPath(root=bids_root, run="01", extension=".tsv", datatype="meg") paths = bids_path_01.match() assert len(paths) == 2 - assert paths[0].extension == '.tsv' + assert paths[0].extension == ".tsv" # Check handling of `extension` and `suffix`, part 1: suffix passed - bids_path_01 = BIDSPath(root=bids_root, run='01', - suffix='channels', extension='.tsv', - datatype='meg') + bids_path_01 = BIDSPath( + root=bids_root, run="01", suffix="channels", extension=".tsv", datatype="meg" + ) paths = bids_path_01.match() assert len(paths) == 1 - assert paths[0].extension == '.tsv' - assert paths[0].suffix == 'channels' + assert paths[0].extension == ".tsv" + assert paths[0].suffix == "channels" # Check handling of `datatype` when explicitly passed in print_dir_tree(bids_root) - bids_path_01 = BIDSPath(root=bids_root, run='01', - suffix='channels', extension='.tsv', - datatype='meg') + bids_path_01 = BIDSPath( + root=bids_root, run="01", suffix="channels", extension=".tsv", datatype="meg" + ) paths = bids_path_01.match() assert len(paths) == 1 - assert paths[0].extension == '.tsv' - assert paths[0].suffix == 'channels' - assert Path(paths[0]).parent.name == 'meg' + assert paths[0].extension == ".tsv" + assert paths[0].suffix == "channels" + assert Path(paths[0]).parent.name == "meg" # Check handling of `datatype`, no datatype passed in # should be exactly the same if there is only one datatype # present in the dataset - bids_path_01 = BIDSPath(root=bids_root, run='01', - suffix='channels', extension='.tsv') + bids_path_01 = BIDSPath( + root=bids_root, run="01", suffix="channels", extension=".tsv" + ) paths = bids_path_01.match() assert len(paths) == 1 - assert paths[0].extension == '.tsv' - assert paths[0].suffix == 'channels' - assert Path(paths[0]).parent.name == 'meg' + assert paths[0].extension == ".tsv" + assert paths[0].suffix == "channels" + assert Path(paths[0]).parent.name == "meg" # Test `check` parameter bids_path_01 = _bids_path.copy() bids_path_01.update( - root=bids_root, session=None, task=None, run=None, - suffix='foo', extension='.eeg', check=False + root=bids_root, + session=None, + task=None, + run=None, + suffix="foo", + extension=".eeg", + check=False, ) bids_path_01.fpath.touch() assert bids_path_01.match(check=True) == [] - assert bids_path_01.match(check=False)[0].fpath.name == 'sub-01_foo.eeg' + assert bids_path_01.match(check=False)[0].fpath.name == "sub-01_foo.eeg" @testing.requires_testing_data @@ -893,44 +1008,66 @@ def test_find_matching_paths(return_bids_test_dir): # Test ignore_json parameter bids_path_01 = BIDSPath(root=bids_root) paths_match = bids_path_01.match(ignore_json=True) - paths_find = find_matching_paths(bids_root, extensions=[".tsv", ".fif", - ".dat", ".eeg"]) + paths_find = find_matching_paths( + bids_root, extensions=[".tsv", ".fif", ".dat", ".eeg"] + ) assert paths_match == paths_find # Test `check` parameter bids_path_01 = _bids_path.copy() bids_path_01.update( - root=bids_root, session=None, task=None, run=None, - suffix='foo', extension='.eeg', check=False + root=bids_root, + session=None, + task=None, + run=None, + suffix="foo", + extension=".eeg", + check=False, ) bids_path_01.fpath.touch() paths_match = bids_path_01.match(check=True) - paths_find = find_matching_paths(bids_root, sessions=None, tasks=None, - runs=None, suffixes='foo', - extensions='.eeg', check=True) + paths_find = find_matching_paths( + bids_root, + sessions=None, + tasks=None, + runs=None, + suffixes="foo", + extensions=".eeg", + check=True, + ) assert paths_match == paths_find paths_match = bids_path_01.match(check=False) - paths_find = find_matching_paths(bids_root, sessions=None, tasks=None, - runs=None, suffixes='foo', - extensions='.eeg', check=False) + paths_find = find_matching_paths( + bids_root, + sessions=None, + tasks=None, + runs=None, + suffixes="foo", + extensions=".eeg", + check=False, + ) assert paths_match == paths_find -@pytest.mark.filterwarnings(warning_str['meas_date_set_to_none']) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["meas_date_set_to_none"]) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_find_empty_room(return_bids_test_dir, tmp_path): """Test reading of empty room data.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") bids_root = tmp_path / "bids" bids_root.mkdir() raw = _read_raw_fif(raw_fname) - bids_path = BIDSPath(subject='01', session='01', - task='audiovisual', run='01', - root=bids_root, suffix='meg') + bids_path = BIDSPath( + subject="01", + session="01", + task="audiovisual", + run="01", + root=bids_root, + suffix="meg", + ) write_raw_bids(raw, bids_path, overwrite=True, verbose=False) # No empty-room data present. @@ -943,40 +1080,39 @@ def test_find_empty_room(return_bids_test_dir, tmp_path): # before reading it back in. tmp_dir = tmp_path / "tmp" tmp_dir.mkdir() - er_raw_fname = op.join(tmp_dir, 'ernoise_raw.fif') + er_raw_fname = op.join(tmp_dir, "ernoise_raw.fif") raw.copy().crop(0, 10).save(er_raw_fname, overwrite=True) er_raw = _read_raw_fif(er_raw_fname) - if not isinstance(er_raw.info['meas_date'], datetime): # pragma: no cover + if not isinstance(er_raw.info["meas_date"], datetime): # pragma: no cover # mne < v0.20 - er_date = datetime.fromtimestamp(er_raw.info['meas_date'][0]) + er_date = datetime.fromtimestamp(er_raw.info["meas_date"][0]) else: - er_date = er_raw.info['meas_date'] + er_date = er_raw.info["meas_date"] - er_date = er_date.strftime('%Y%m%d') - er_bids_path = BIDSPath(subject='emptyroom', task='noise', - session=er_date, suffix='meg', - root=bids_root) + er_date = er_date.strftime("%Y%m%d") + er_bids_path = BIDSPath( + subject="emptyroom", task="noise", session=er_date, suffix="meg", root=bids_root + ) write_raw_bids(er_raw, er_bids_path, overwrite=True, verbose=False) recovered_er_bids_path = bids_path.find_empty_room() assert er_bids_path == recovered_er_bids_path # assert that we get best emptyroom if there are multiple available - sh.rmtree(op.join(bids_root, 'sub-emptyroom')) - dates = ['20021204', '20021201', '20021001'] + sh.rmtree(op.join(bids_root, "sub-emptyroom")) + dates = ["20021204", "20021201", "20021001"] for date in dates: er_bids_path.update(session=date) - er_meas_date = datetime.strptime(date, '%Y%m%d') + er_meas_date = datetime.strptime(date, "%Y%m%d") er_meas_date = er_meas_date.replace(tzinfo=timezone.utc) er_raw.set_meas_date(er_meas_date) write_raw_bids(er_raw, er_bids_path, verbose=False) best_er_basename = bids_path.find_empty_room() - assert best_er_basename.session == '20021204' + assert best_er_basename.session == "20021204" - with pytest.raises(ValueError, - match='The root of the "bids_path" must be set'): + with pytest.raises(ValueError, match='The root of the "bids_path" must be set'): bids_path.copy().update(root=None).find_empty_room() # assert that we get an error if meas_date is not available. @@ -984,12 +1120,14 @@ def test_find_empty_room(return_bids_test_dir, tmp_path): raw.set_meas_date(None) anonymize_info(raw.info) write_raw_bids(raw, bids_path, overwrite=True, format="FIF") - with pytest.raises(ValueError, match='The provided recording does not ' - 'have a measurement date set'): + with pytest.raises( + ValueError, + match="The provided recording does not " "have a measurement date set", + ): bids_path.find_empty_room() # test that the `AssociatedEmptyRoom` key in MEG sidecar is respected - bids_root = tmp_path / 'associated-empty-room' + bids_root = tmp_path / "associated-empty-room" bids_root.mkdir() raw = _read_raw_fif(raw_fname) meas_date = datetime(year=2020, month=1, day=10, tzinfo=timezone.utc) @@ -1006,22 +1144,29 @@ def test_find_empty_room(return_bids_test_dir, tmp_path): # `AssociatedEmptyRoom` (without AssociatedEmptyRoom, find_empty_room() # would return the recording with the matching date instead) er_matching_date_bids_path = BIDSPath( - subject='emptyroom', session='20200110', task='noise', root=bids_root, - datatype='meg', suffix='meg', extension='.fif') + subject="emptyroom", + session="20200110", + task="noise", + root=bids_root, + datatype="meg", + suffix="meg", + extension=".fif", + ) write_raw_bids(er_raw_matching_date, bids_path=er_matching_date_bids_path) - er_associated_bids_path = (er_matching_date_bids_path.copy() - .update(session='20100101')) + er_associated_bids_path = er_matching_date_bids_path.copy().update( + session="20100101" + ) write_raw_bids(er_raw_associated, bids_path=er_associated_bids_path) # Now we write experimental data and associate it with the earlier # empty-room recording - with pytest.raises(RuntimeError, match='Did not find any'): + with pytest.raises(RuntimeError, match="Did not find any"): bids_path.find_matching_sidecar() - bids_path = (er_matching_date_bids_path.copy() - .update(subject='01', session=None, task='task')) - write_raw_bids(raw, bids_path=bids_path, - empty_room=er_associated_bids_path) + bids_path = er_matching_date_bids_path.copy().update( + subject="01", session=None, task="task" + ) + write_raw_bids(raw, bids_path=bids_path, empty_room=er_associated_bids_path) assert bids_path.find_matching_sidecar().is_file() # Retrieve empty-room BIDSPath @@ -1033,12 +1178,12 @@ def test_find_empty_room(return_bids_test_dir, tmp_path): assert len(candidates) == 2 # Should only work for MEG - with pytest.raises(ValueError, match='only supported for MEG'): - bids_path.copy().update(datatype='eeg').find_empty_room() + with pytest.raises(ValueError, match="only supported for MEG"): + bids_path.copy().update(datatype="eeg").find_empty_room() # Raises an error if the file is missing os.remove(er_associated_bids_path.fpath) - with pytest.raises(FileNotFoundError, match='Empty-room BIDS .* not foun'): + with pytest.raises(FileNotFoundError, match="Empty-room BIDS .* not foun"): bids_path.find_empty_room(use_sidecar_only=True) # Don't create `AssociatedEmptyRoom` entry in sidecar – we should now @@ -1054,79 +1199,79 @@ def test_find_empty_room(return_bids_test_dir, tmp_path): assert bids_path.find_empty_room(use_sidecar_only=True) is None -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_find_emptyroom_ties(tmp_path): """Test that we receive a warning on a date tie.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") bids_root = str(tmp_path) - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg') - session = '20010101' - er_dir_path = BIDSPath(subject='emptyroom', session=session, - datatype='meg', root=bids_root) + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg") + session = "20010101" + er_dir_path = BIDSPath( + subject="emptyroom", session=session, datatype="meg", root=bids_root + ) er_dir = er_dir_path.mkdir().directory - meas_date = (datetime - .strptime(session, '%Y%m%d') - .replace(tzinfo=timezone.utc)) + meas_date = datetime.strptime(session, "%Y%m%d").replace(tzinfo=timezone.utc) raw = _read_raw_fif(raw_fname) - er_raw_fname = op.join(data_path, 'MEG', 'sample', 'ernoise_raw.fif') + er_raw_fname = op.join(data_path, "MEG", "sample", "ernoise_raw.fif") raw.copy().crop(0, 10).save(er_raw_fname, overwrite=True) er_raw = _read_raw_fif(er_raw_fname) raw.set_meas_date(meas_date) er_raw.set_meas_date(meas_date) write_raw_bids(raw, bids_path, overwrite=True) - er_bids_path = BIDSPath(subject='emptyroom', session=session) + er_bids_path = BIDSPath(subject="emptyroom", session=session) er_basename_1 = er_bids_path.basename - er_basename_2 = BIDSPath(subject='emptyroom', session=session, - task='noise').basename - er_raw.save(op.join(er_dir, f'{er_basename_1}_meg.fif')) - er_raw.save(op.join(er_dir, f'{er_basename_2}_meg.fif')) + er_basename_2 = BIDSPath( + subject="emptyroom", session=session, task="noise" + ).basename + er_raw.save(op.join(er_dir, f"{er_basename_1}_meg.fif")) + er_raw.save(op.join(er_dir, f"{er_basename_2}_meg.fif")) - with pytest.warns(RuntimeWarning, match='Found more than one'): + with pytest.warns(RuntimeWarning, match="Found more than one"): bids_path.find_empty_room() -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_find_emptyroom_no_meas_date(tmp_path): """Test that we warn if measurement date can be read or inferred.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") bids_root = str(tmp_path) bids_path = _bids_path.copy().update(root=bids_root) - er_session = 'mysession' + er_session = "mysession" er_meas_date = None - er_dir_path = BIDSPath(subject='emptyroom', session=er_session, - datatype='meg', root=bids_root) + er_dir_path = BIDSPath( + subject="emptyroom", session=er_session, datatype="meg", root=bids_root + ) er_dir = er_dir_path.mkdir().directory - er_bids_path = BIDSPath(subject='emptyroom', session=er_session, - task='noise', check=False) + er_bids_path = BIDSPath( + subject="emptyroom", session=er_session, task="noise", check=False + ) er_basename = er_bids_path.basename raw = _read_raw_fif(raw_fname) - er_raw_fname = op.join(data_path, 'MEG', 'sample', 'ernoise_raw.fif') + er_raw_fname = op.join(data_path, "MEG", "sample", "ernoise_raw.fif") raw.copy().crop(0, 10).save(er_raw_fname, overwrite=True) er_raw = _read_raw_fif(er_raw_fname) er_raw.set_meas_date(er_meas_date) - er_raw.save(op.join(er_dir, f'{er_basename}_meg.fif'), overwrite=True) + er_raw.save(op.join(er_dir, f"{er_basename}_meg.fif"), overwrite=True) # Write raw file data using mne-bids, and remove participants.tsv # as it's incomplete (doesn't contain the emptyroom subject we wrote # manually using MNE's Raw.save() above) raw = _read_raw_fif(raw_fname) write_raw_bids(raw, bids_path, overwrite=True) - os.remove(op.join(bids_root, 'participants.tsv')) + os.remove(op.join(bids_root, "participants.tsv")) - with pytest.warns(RuntimeWarning, match='Could not retrieve .* date'): + with pytest.warns(RuntimeWarning, match="Could not retrieve .* date"): bids_path.find_empty_room() @@ -1136,9 +1281,9 @@ def test_bids_path_label_vs_index_entity(): BIDSPath(subject=1) match = "root must be an instance of path-like or None" with pytest.raises(TypeError, match=match): - BIDSPath(root=1, subject='01') - BIDSPath(subject='01', run=1) # ok as entity - BIDSPath(subject='01', split=1) # ok as entity + BIDSPath(root=1, subject="01") + BIDSPath(subject="01", run=1) # ok as entity + BIDSPath(subject="01", split=1) # ok as entity @testing.requires_testing_data @@ -1147,28 +1292,27 @@ def test_meg_calibration_fpath(return_bids_test_dir): # File exists, so BIDSPath.meg_calibration_fpath should return a non-None # value. - bids_path_ = _bids_path.copy().update(subject='01', root=bids_root) + bids_path_ = _bids_path.copy().update(subject="01", root=bids_root) assert bids_path_.meg_calibration_fpath is not None # subject not set. bids_path_ = _bids_path.copy().update(root=bids_root, subject=None) - with pytest.raises(ValueError, match='root and subject must be set'): + with pytest.raises(ValueError, match="root and subject must be set"): bids_path_.meg_calibration_fpath # root not set. - bids_path_ = _bids_path.copy().update(subject='01', root=None) - with pytest.raises(ValueError, match='root and subject must be set'): + bids_path_ = _bids_path.copy().update(subject="01", root=None) + with pytest.raises(ValueError, match="root and subject must be set"): bids_path_.meg_calibration_fpath # datatype is not 'meg''. - bids_path_ = _bids_path.copy().update(subject='01', root=bids_root, - datatype='eeg') - with pytest.raises(ValueError, match='Can only find .* for MEG'): + bids_path_ = _bids_path.copy().update(subject="01", root=bids_root, datatype="eeg") + with pytest.raises(ValueError, match="Can only find .* for MEG"): bids_path_.meg_calibration_fpath # Delete the fine-calibration file. BIDSPath.meg_calibration_fpath # should then return None. - bids_path_ = _bids_path.copy().update(subject='01', root=bids_root) + bids_path_ = _bids_path.copy().update(subject="01", root=bids_root) Path(bids_path_.meg_calibration_fpath).unlink() assert bids_path_.meg_calibration_fpath is None @@ -1179,58 +1323,63 @@ def test_meg_crosstalk_fpath(return_bids_test_dir): # File exists, so BIDSPath.crosstalk_fpath should return a non-None # value. - bids_path = _bids_path.copy().update(subject='01', root=bids_root) + bids_path = _bids_path.copy().update(subject="01", root=bids_root) assert bids_path.meg_crosstalk_fpath is not None # subject not set. bids_path = _bids_path.copy().update(root=bids_root, subject=None) - with pytest.raises(ValueError, match='root and subject must be set'): + with pytest.raises(ValueError, match="root and subject must be set"): bids_path.meg_crosstalk_fpath # root not set. - bids_path = _bids_path.copy().update(subject='01', root=None) - with pytest.raises(ValueError, match='root and subject must be set'): + bids_path = _bids_path.copy().update(subject="01", root=None) + with pytest.raises(ValueError, match="root and subject must be set"): bids_path.meg_crosstalk_fpath # datatype is not 'meg''. - bids_path = _bids_path.copy().update(subject='01', root=bids_root, - datatype='eeg') - with pytest.raises(ValueError, match='Can only find .* for MEG'): + bids_path = _bids_path.copy().update(subject="01", root=bids_root, datatype="eeg") + with pytest.raises(ValueError, match="Can only find .* for MEG"): bids_path.meg_crosstalk_fpath # Delete the crosstalk file. BIDSPath.meg_crosstalk_fpath should then # return None. - bids_path = _bids_path.copy().update(subject='01', root=bids_root) + bids_path = _bids_path.copy().update(subject="01", root=bids_root) Path(bids_path.meg_crosstalk_fpath).unlink() assert bids_path.meg_crosstalk_fpath is None @testing.requires_testing_data def test_datasetdescription_with_bidspath(return_bids_test_dir): - with pytest.raises(ValueError, match='Unallowed'): + with pytest.raises(ValueError, match="Unallowed"): bids_path = BIDSPath( - root=return_bids_test_dir, suffix='dataset_description', - extension='.json') + root=return_bids_test_dir, suffix="dataset_description", extension=".json" + ) # initialization should work bids_path = BIDSPath( - root=return_bids_test_dir, suffix='dataset_description', - extension='.json', check=False) - assert bids_path.fpath.as_posix() == \ - Path(f'{return_bids_test_dir}/dataset_description.json').as_posix() + root=return_bids_test_dir, + suffix="dataset_description", + extension=".json", + check=False, + ) + assert ( + bids_path.fpath.as_posix() + == Path(f"{return_bids_test_dir}/dataset_description.json").as_posix() + ) # setting it via update should work - bids_path = BIDSPath(root=return_bids_test_dir, - extension='.json', check=True) - bids_path.update(suffix='dataset_description', check=False) - assert bids_path.fpath.as_posix() == \ - Path(f'{return_bids_test_dir}/dataset_description.json').as_posix() + bids_path = BIDSPath(root=return_bids_test_dir, extension=".json", check=True) + bids_path.update(suffix="dataset_description", check=False) + assert ( + bids_path.fpath.as_posix() + == Path(f"{return_bids_test_dir}/dataset_description.json").as_posix() + ) def test_update_fail_check_no_change(): - bids_path = BIDSPath(subject='test') + bids_path = BIDSPath(subject="test") try: - bids_path.update(suffix='ave') + bids_path.update(suffix="ave") except Exception: pass assert bids_path.suffix is None @@ -1238,15 +1387,15 @@ def test_update_fail_check_no_change(): def test_setting_entities(): """Test setting entities via assignment.""" - bids_path = BIDSPath(subject='test', check=False) + bids_path = BIDSPath(subject="test", check=False) for entity_name in bids_path.entities: - if entity_name in ['dataype', 'suffix']: + if entity_name in ["dataype", "suffix"]: continue - if entity_name in ['run', 'split']: - value = '1' + if entity_name in ["run", "split"]: + value = "1" else: - value = 'foo' + value = "foo" setattr(bids_path, entity_name, value) assert getattr(bids_path, entity_name) == value @@ -1257,12 +1406,12 @@ def test_setting_entities(): def test_deprecation(): """Test deprecated behavior.""" - with pytest.warns(FutureWarning, match='This will raise an exception'): - BIDSPath(extension='vhdr') # no leading period + with pytest.warns(FutureWarning, match="This will raise an exception"): + BIDSPath(extension="vhdr") # no leading period def test_dont_create_dirs_on_fpath_access(tmp_path): """Regression test: don't create directories when accessing .fpath.""" - bp = BIDSPath(subject='01', datatype='eeg', root=tmp_path) + bp = BIDSPath(subject="01", datatype="eeg", root=tmp_path) bp.fpath # accessing .fpath is required for this regression test - assert not (tmp_path / 'sub-01').exists() + assert not (tmp_path / "sub-01").exists() diff --git a/mne_bids/tests/test_pick.py b/mne_bids/tests/test_pick.py index ad11d88db..61320ac49 100644 --- a/mne_bids/tests/test_pick.py +++ b/mne_bids/tests/test_pick.py @@ -16,12 +16,11 @@ @testing.requires_testing_data def test_coil_type(): """Test the correct coil type is retrieved.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = read_raw_fif(raw_fname) - assert coil_type(raw.info, 0) == 'meggradplanar' - assert coil_type(raw.info, 2) == 'megmag' - assert coil_type(raw.info, 306) == 'misc' - assert coil_type(raw.info, 315) == 'eeg' - raw.info['chs'][0]['coil_type'] = 1234 - assert coil_type(raw.info, 0) == 'n/a' + assert coil_type(raw.info, 0) == "meggradplanar" + assert coil_type(raw.info, 2) == "megmag" + assert coil_type(raw.info, 306) == "misc" + assert coil_type(raw.info, 315) == "eeg" + raw.info["chs"][0]["coil_type"] = 1234 + assert coil_type(raw.info, 0) == "n/a" diff --git a/mne_bids/tests/test_read.py b/mne_bids/tests/test_read.py index c1fdf929a..39fb1ece6 100644 --- a/mne_bids/tests/test_read.py +++ b/mne_bids/tests/test_read.py @@ -22,54 +22,60 @@ from mne.utils import assert_dig_allclose from mne_bids import BIDSPath -from mne_bids.config import (MNE_STR_TO_FRAME, BIDS_SHARED_COORDINATE_FRAMES, - BIDS_TO_MNE_FRAMES) -from mne_bids.read import (read_raw_bids, _read_raw, get_head_mri_trans, - _handle_events_reading, _handle_scans_reading) +from mne_bids.config import ( + MNE_STR_TO_FRAME, + BIDS_SHARED_COORDINATE_FRAMES, + BIDS_TO_MNE_FRAMES, +) +from mne_bids.read import ( + read_raw_bids, + _read_raw, + get_head_mri_trans, + _handle_events_reading, + _handle_scans_reading, +) from mne_bids.tsv_handler import _to_tsv, _from_tsv -from mne_bids.utils import (_write_json) +from mne_bids.utils import _write_json from mne_bids.sidecar_updates import _update_sidecar from mne_bids.path import _find_matching_sidecar import mne_bids.write from mne_bids.write import write_anat, write_raw_bids, get_anat_landmarks -subject_id = '01' -session_id = '01' -run = '01' -acq = '01' -task = 'testing' +subject_id = "01" +session_id = "01" +run = "01" +acq = "01" +task = "testing" _bids_path = BIDSPath( - subject=subject_id, session=session_id, run=run, acquisition=acq, - task=task) + subject=subject_id, session=session_id, run=run, acquisition=acq, task=task +) _bids_path_minimal = BIDSPath(subject=subject_id, task=task) # Get the MNE testing sample data - USA data_path = testing.data_path(download=False) -raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') +raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") # Data with cHPI info -raw_fname_chpi = op.join(data_path, 'SSS', 'test_move_anon_raw.fif') +raw_fname_chpi = op.join(data_path, "SSS", "test_move_anon_raw.fif") # Tiny BIDS testing dataset mne_bids_root = Path(mne_bids.__file__).parents[1] tiny_bids_root = mne_bids_root / "mne_bids" / "tests" / "data" / "tiny_bids" warning_str = dict( - channel_unit_changed='ignore:The unit for chann*.:RuntimeWarning:mne', - meas_date_set_to_none="ignore:.*'meas_date' set to None:RuntimeWarning:" - "mne", - nasion_not_found='ignore:.*nasion not found:RuntimeWarning:mne', - maxshield='ignore:.*Internal Active Shielding:RuntimeWarning:mne' + channel_unit_changed="ignore:The unit for chann*.:RuntimeWarning:mne", + meas_date_set_to_none="ignore:.*'meas_date' set to None:RuntimeWarning:" "mne", + nasion_not_found="ignore:.*nasion not found:RuntimeWarning:mne", + maxshield="ignore:.*Internal Active Shielding:RuntimeWarning:mne", ) def _wrap_read_raw(read_raw): def fn(fname, *args, **kwargs): raw = read_raw(fname, *args, **kwargs) - raw.info['line_freq'] = 60 + raw.info["line_freq"] = 60 return raw return fn @@ -83,141 +89,144 @@ def fn(fname, *args, **kwargs): def test_read_raw(): """Test the raw reading.""" # Use a file ending that does not exist - f = 'file.bogus' - with pytest.raises(ValueError, match='file name extension must be one of'): + f = "file.bogus" + with pytest.raises(ValueError, match="file name extension must be one of"): _read_raw(f) def test_not_implemented(tmp_path): """Test the not yet implemented data formats raise an adequate error.""" - for not_implemented_ext in ['.mef', '.nwb']: - raw_fname = tmp_path / f'test{not_implemented_ext}' - with open(raw_fname, 'w', encoding='utf-8'): + for not_implemented_ext in [".mef", ".nwb"]: + raw_fname = tmp_path / f"test{not_implemented_ext}" + with open(raw_fname, "w", encoding="utf-8"): pass - with pytest.raises(ValueError, match=('there is no IO support for ' - 'this file format yet')): + with pytest.raises( + ValueError, match=("there is no IO support for " "this file format yet") + ): _read_raw(raw_fname) def test_read_correct_inputs(): """Test that inputs of read functions are correct.""" - bids_path = 'sub-01_ses-01_meg.fif' - with pytest.raises(RuntimeError, match='"bids_path" must be a ' - 'BIDSPath object'): + bids_path = "sub-01_ses-01_meg.fif" + with pytest.raises(RuntimeError, match='"bids_path" must be a ' "BIDSPath object"): read_raw_bids(bids_path) - with pytest.raises(RuntimeError, match='"bids_path" must be a ' - 'BIDSPath object'): + with pytest.raises(RuntimeError, match='"bids_path" must be a ' "BIDSPath object"): get_head_mri_trans(bids_path) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_read_participants_data(tmp_path): """Test reading information from a BIDS sidecar.json file.""" - bids_path = _bids_path.copy().update(root=tmp_path, datatype='meg') + bids_path = _bids_path.copy().update(root=tmp_path, datatype="meg") raw = _read_raw_fif(raw_fname, verbose=False) # if subject info was set, we don't roundtrip birthday # due to possible anonymization in mne-bids - subject_info = { - 'hand': 1, - 'sex': 2, - 'weight': 70.5, - 'height': 180.5 - } - raw.info['subject_info'] = subject_info + subject_info = {"hand": 1, "sex": 2, "weight": 70.5, "height": 180.5} + raw.info["subject_info"] = subject_info write_raw_bids(raw, bids_path, overwrite=True, verbose=False) raw = read_raw_bids(bids_path=bids_path) - assert raw.info['subject_info']['hand'] == 1 - assert raw.info['subject_info']['weight'] == 70.5 - assert raw.info['subject_info']['height'] == 180.5 - assert raw.info['subject_info'].get('birthday', None) is None - assert raw.info['subject_info']['his_id'] == f'sub-{bids_path.subject}' - assert 'participant_id' not in raw.info['subject_info'] + assert raw.info["subject_info"]["hand"] == 1 + assert raw.info["subject_info"]["weight"] == 70.5 + assert raw.info["subject_info"]["height"] == 180.5 + assert raw.info["subject_info"].get("birthday", None) is None + assert raw.info["subject_info"]["his_id"] == f"sub-{bids_path.subject}" + assert "participant_id" not in raw.info["subject_info"] # if modifying participants tsv, then read_raw_bids reflects that - participants_tsv_fpath = tmp_path / 'participants.tsv' + participants_tsv_fpath = tmp_path / "participants.tsv" participants_tsv = _from_tsv(participants_tsv_fpath) - participants_tsv['hand'][0] = 'n/a' + participants_tsv["hand"][0] = "n/a" _to_tsv(participants_tsv, participants_tsv_fpath) raw = read_raw_bids(bids_path=bids_path) - assert raw.info['subject_info']['hand'] == 0 - assert raw.info['subject_info']['sex'] == 2 - assert raw.info['subject_info']['weight'] == 70.5 - assert raw.info['subject_info']['height'] == 180.5 - assert raw.info['subject_info'].get('birthday', None) is None + assert raw.info["subject_info"]["hand"] == 0 + assert raw.info["subject_info"]["sex"] == 2 + assert raw.info["subject_info"]["weight"] == 70.5 + assert raw.info["subject_info"]["height"] == 180.5 + assert raw.info["subject_info"].get("birthday", None) is None # make sure things are read even if the entries don't make sense participants_tsv = _from_tsv(participants_tsv_fpath) - participants_tsv['hand'][0] = 'righty' - participants_tsv['sex'][0] = 'malesy' + participants_tsv["hand"][0] = "righty" + participants_tsv["sex"][0] = "malesy" # 'n/a' values should get omitted - participants_tsv['weight'] = ['n/a'] - participants_tsv['height'] = ['tall'] + participants_tsv["weight"] = ["n/a"] + participants_tsv["height"] = ["tall"] _to_tsv(participants_tsv, participants_tsv_fpath) - with pytest.warns(RuntimeWarning, match='Unable to map'): + with pytest.warns(RuntimeWarning, match="Unable to map"): raw = read_raw_bids(bids_path=bids_path) - assert 'hand' not in raw.info['subject_info'] - assert 'sex' not in raw.info['subject_info'] - assert 'weight' not in raw.info['subject_info'] - assert 'height' not in raw.info['subject_info'] + assert "hand" not in raw.info["subject_info"] + assert "sex" not in raw.info["subject_info"] + assert "weight" not in raw.info["subject_info"] + assert "height" not in raw.info["subject_info"] # test reading if participants.tsv is missing raw = _read_raw_fif(raw_fname, verbose=False) write_raw_bids(raw, bids_path, overwrite=True, verbose=False) participants_tsv_fpath.unlink() - with pytest.warns(RuntimeWarning, match='participants.tsv file not found'): + with pytest.warns(RuntimeWarning, match="participants.tsv file not found"): raw = read_raw_bids(bids_path=bids_path) - assert raw.info['subject_info'] == dict() + assert raw.info["subject_info"] == dict() @pytest.mark.parametrize( - ('hand_bids', 'hand_mne', 'sex_bids', 'sex_mne'), - [('Right', 1, 'Female', 2), - ('RIGHT', 1, 'FEMALE', 2), - ('R', 1, 'F', 2), - ('left', 2, 'male', 1), - ('l', 2, 'm', 1)] + ("hand_bids", "hand_mne", "sex_bids", "sex_mne"), + [ + ("Right", 1, "Female", 2), + ("RIGHT", 1, "FEMALE", 2), + ("R", 1, "F", 2), + ("left", 2, "male", 1), + ("l", 2, "m", 1), + ], ) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data -def test_read_participants_handedness_and_sex_mapping(hand_bids, hand_mne, - sex_bids, sex_mne, - tmp_path): +def test_read_participants_handedness_and_sex_mapping( + hand_bids, hand_mne, sex_bids, sex_mne, tmp_path +): """Test we're correctly mapping handedness and sex between BIDS and MNE.""" - bids_path = _bids_path.copy().update(root=tmp_path, datatype='meg') - participants_tsv_fpath = tmp_path / 'participants.tsv' + bids_path = _bids_path.copy().update(root=tmp_path, datatype="meg") + participants_tsv_fpath = tmp_path / "participants.tsv" raw = _read_raw_fif(raw_fname, verbose=False) # Avoid that we end up with subject information stored in the raw data. - raw.info['subject_info'] = {} + raw.info["subject_info"] = {} write_raw_bids(raw, bids_path, overwrite=True, verbose=False) participants_tsv = _from_tsv(participants_tsv_fpath) - participants_tsv['hand'][0] = hand_bids - participants_tsv['sex'][0] = sex_bids + participants_tsv["hand"][0] = hand_bids + participants_tsv["sex"][0] = sex_bids _to_tsv(participants_tsv, participants_tsv_fpath) raw = read_raw_bids(bids_path=bids_path) - assert raw.info['subject_info']['hand'] is hand_mne - assert raw.info['subject_info']['sex'] is sex_mne + assert raw.info["subject_info"]["hand"] is hand_mne + assert raw.info["subject_info"]["sex"] is sex_mne -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_get_head_mri_trans(tmp_path): """Test getting a trans object from BIDS data.""" - nib = pytest.importorskip('nibabel') - - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') - subjects_dir = op.join(data_path, 'subjects') + nib = pytest.importorskip("nibabel") + + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) + subjects_dir = op.join(data_path, "subjects") # Drop unknown events. events = mne.read_events(events_fname) @@ -225,32 +234,27 @@ def test_get_head_mri_trans(tmp_path): # Write it to BIDS raw = _read_raw_fif(raw_fname) - bids_path = _bids_path.copy().update( - root=tmp_path, datatype='meg', suffix='meg' - ) - write_raw_bids(raw, bids_path, events=events, event_id=event_id, - overwrite=False) + bids_path = _bids_path.copy().update(root=tmp_path, datatype="meg", suffix="meg") + write_raw_bids(raw, bids_path, events=events, event_id=event_id, overwrite=False) # We cannot recover trans if no MRI has yet been written - with pytest.raises(FileNotFoundError, match='Did not find'): + with pytest.raises(FileNotFoundError, match="Did not find"): estimated_trans = get_head_mri_trans( - bids_path=bids_path, fs_subject='sample', - fs_subjects_dir=subjects_dir) + bids_path=bids_path, fs_subject="sample", fs_subjects_dir=subjects_dir + ) # Write some MRI data and supply a `trans` so that a sidecar gets written - trans = mne.read_trans(raw_fname.replace('_raw.fif', '-trans.fif')) + trans = mne.read_trans(raw_fname.replace("_raw.fif", "-trans.fif")) # Get the T1 weighted MRI data file ... test write_anat with a nibabel # image instead of a file path - t1w_mgh = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz') + t1w_mgh = op.join(data_path, "subjects", "sample", "mri", "T1.mgz") t1w_mgh = nib.load(t1w_mgh) landmarks = get_anat_landmarks( - t1w_mgh, raw.info, trans, fs_subject='sample', - fs_subjects_dir=subjects_dir) - t1w_bids_path = bids_path.copy().update( - datatype='anat', suffix='T1w' + t1w_mgh, raw.info, trans, fs_subject="sample", fs_subjects_dir=subjects_dir ) + t1w_bids_path = bids_path.copy().update(datatype="anat", suffix="T1w") t1w_bids_path = write_anat( t1w_mgh, bids_path=t1w_bids_path, landmarks=landmarks, verbose=True ) @@ -258,22 +262,24 @@ def test_get_head_mri_trans(tmp_path): # Try to get trans back through fitting points estimated_trans = get_head_mri_trans( - bids_path=bids_path, fs_subject='sample', fs_subjects_dir=subjects_dir) + bids_path=bids_path, fs_subject="sample", fs_subjects_dir=subjects_dir + ) - assert trans['from'] == estimated_trans['from'] - assert trans['to'] == estimated_trans['to'] - assert_almost_equal(trans['trans'], estimated_trans['trans']) + assert trans["from"] == estimated_trans["from"] + assert trans["to"] == estimated_trans["to"] + assert_almost_equal(trans["trans"], estimated_trans["trans"]) # provoke an error by introducing NaNs into MEG coords - raw.info['dig'][0]['r'] = np.full(3, np.nan) + raw.info["dig"][0]["r"] = np.full(3, np.nan) sh.rmtree(anat_dir) - bad_landmarks = get_anat_landmarks(t1w_mgh, raw.info, trans, 'sample', - op.join(data_path, 'subjects')) + bad_landmarks = get_anat_landmarks( + t1w_mgh, raw.info, trans, "sample", op.join(data_path, "subjects") + ) write_anat(t1w_mgh, bids_path=t1w_bids_path, landmarks=bad_landmarks) - with pytest.raises(RuntimeError, match='AnatomicalLandmarkCoordinates'): - estimated_trans = get_head_mri_trans(bids_path=t1w_bids_path, - fs_subject='sample', - fs_subjects_dir=subjects_dir) + with pytest.raises(RuntimeError, match="AnatomicalLandmarkCoordinates"): + estimated_trans = get_head_mri_trans( + bids_path=t1w_bids_path, fs_subject="sample", fs_subjects_dir=subjects_dir + ) # test raw with no fiducials to provoke error t1w_bids_path = write_anat( # put back @@ -287,97 +293,114 @@ def test_get_head_mri_trans(tmp_path): ctx = nullcontext() with ctx: - get_head_mri_trans(bids_path=bids_path, fs_subject='sample', - fs_subjects_dir=subjects_dir) + get_head_mri_trans( + bids_path=bids_path, fs_subject="sample", fs_subjects_dir=subjects_dir + ) # test we are permissive for different casings of landmark names in the # sidecar, and also accept "nasion" instead of just "NAS" raw = _read_raw_fif(raw_fname) - write_raw_bids(raw, bids_path, events=events, event_id=event_id, - overwrite=True) # overwrite with new acq + write_raw_bids( + raw, bids_path, events=events, event_id=event_id, overwrite=True + ) # overwrite with new acq t1w_bids_path = write_anat( t1w_mgh, bids_path=t1w_bids_path, landmarks=landmarks, overwrite=True ) - t1w_json_fpath = t1w_bids_path.copy().update(extension='.json').fpath - with t1w_json_fpath.open('r', encoding='utf-8') as f: + t1w_json_fpath = t1w_bids_path.copy().update(extension=".json").fpath + with t1w_json_fpath.open("r", encoding="utf-8") as f: t1w_json = json.load(f) - coords = t1w_json['AnatomicalLandmarkCoordinates'] - coords['lpa'] = coords['LPA'] - coords['Rpa'] = coords['RPA'] - coords['Nasion'] = coords['NAS'] - del coords['LPA'], coords['RPA'], coords['NAS'] + coords = t1w_json["AnatomicalLandmarkCoordinates"] + coords["lpa"] = coords["LPA"] + coords["Rpa"] = coords["RPA"] + coords["Nasion"] = coords["NAS"] + del coords["LPA"], coords["RPA"], coords["NAS"] _write_json(t1w_json_fpath, t1w_json, overwrite=True) estimated_trans = get_head_mri_trans( - bids_path=bids_path, - fs_subject='sample', fs_subjects_dir=subjects_dir) - assert_almost_equal(trans['trans'], estimated_trans['trans']) + bids_path=bids_path, fs_subject="sample", fs_subjects_dir=subjects_dir + ) + assert_almost_equal(trans["trans"], estimated_trans["trans"]) # Test t1_bids_path parameter # # Case 1: different BIDS roots meg_bids_path = _bids_path.copy().update( - root=tmp_path / 'meg_root', datatype='meg', suffix='meg' + root=tmp_path / "meg_root", datatype="meg", suffix="meg" ) t1_bids_path = _bids_path.copy().update( - root=tmp_path / 'mri_root', task=None, run=None + root=tmp_path / "mri_root", task=None, run=None ) raw = _read_raw_fif(raw_fname) write_raw_bids(raw, bids_path=meg_bids_path) landmarks = get_anat_landmarks( - t1w_mgh, raw.info, trans, fs_subject='sample', - fs_subjects_dir=subjects_dir) + t1w_mgh, raw.info, trans, fs_subject="sample", fs_subjects_dir=subjects_dir + ) write_anat(t1w_mgh, bids_path=t1_bids_path, landmarks=landmarks) read_trans = get_head_mri_trans( - bids_path=meg_bids_path, t1_bids_path=t1_bids_path, - fs_subject='sample', fs_subjects_dir=subjects_dir) - assert np.allclose(trans['trans'], read_trans['trans']) + bids_path=meg_bids_path, + t1_bids_path=t1_bids_path, + fs_subject="sample", + fs_subjects_dir=subjects_dir, + ) + assert np.allclose(trans["trans"], read_trans["trans"]) # Case 2: different sessions raw = _read_raw_fif(raw_fname) meg_bids_path = _bids_path.copy().update( - root=tmp_path / 'session_test', session='01', datatype='meg', - suffix='meg' + root=tmp_path / "session_test", session="01", datatype="meg", suffix="meg" ) t1_bids_path = meg_bids_path.copy().update( - session='02', task=None, run=None, datatype='anat', suffix='T1w' + session="02", task=None, run=None, datatype="anat", suffix="T1w" ) write_raw_bids(raw, bids_path=meg_bids_path) write_anat(t1w_mgh, bids_path=t1_bids_path, landmarks=landmarks) read_trans = get_head_mri_trans( - bids_path=meg_bids_path, t1_bids_path=t1_bids_path, - fs_subject='sample', fs_subjects_dir=subjects_dir) - assert np.allclose(trans['trans'], read_trans['trans']) + bids_path=meg_bids_path, + t1_bids_path=t1_bids_path, + fs_subject="sample", + fs_subjects_dir=subjects_dir, + ) + assert np.allclose(trans["trans"], read_trans["trans"]) # Test that incorrect subject directory throws error - with pytest.raises(ValueError, match='Could not find'): + with pytest.raises(ValueError, match="Could not find"): estimated_trans = get_head_mri_trans( - bids_path=bids_path, fs_subject='bad', - fs_subjects_dir=subjects_dir) + bids_path=bids_path, fs_subject="bad", fs_subjects_dir=subjects_dir + ) # Case 3: write with suffix for kind landmarks2 = landmarks.copy() - landmarks2.dig[0]['r'] *= -1 - landmarks2.save(tmp_path / 'landmarks2.fif') - landmarks2 = tmp_path / 'landmarks2.fif' - write_anat(t1w_mgh, bids_path=t1_bids_path, overwrite=True, - deface=True, - landmarks={"coreg": landmarks, "deface": landmarks2}) + landmarks2.dig[0]["r"] *= -1 + landmarks2.save(tmp_path / "landmarks2.fif") + landmarks2 = tmp_path / "landmarks2.fif" + write_anat( + t1w_mgh, + bids_path=t1_bids_path, + overwrite=True, + deface=True, + landmarks={"coreg": landmarks, "deface": landmarks2}, + ) read_trans1 = get_head_mri_trans( - bids_path=meg_bids_path, t1_bids_path=t1_bids_path, - fs_subject='sample', fs_subjects_dir=subjects_dir, - kind="coreg") - assert np.allclose(trans['trans'], read_trans1['trans']) + bids_path=meg_bids_path, + t1_bids_path=t1_bids_path, + fs_subject="sample", + fs_subjects_dir=subjects_dir, + kind="coreg", + ) + assert np.allclose(trans["trans"], read_trans1["trans"]) read_trans2 = get_head_mri_trans( - bids_path=meg_bids_path, t1_bids_path=t1_bids_path, - fs_subject='sample', fs_subjects_dir=subjects_dir, - kind="deface") - assert not np.allclose(trans['trans'], read_trans2['trans']) + bids_path=meg_bids_path, + t1_bids_path=t1_bids_path, + fs_subject="sample", + fs_subjects_dir=subjects_dir, + kind="deface", + ) + assert not np.allclose(trans["trans"], read_trans2["trans"]) # Test we're respecting existing suffix & data type # The following path is supposed to mimic a derivative generated by the @@ -387,60 +410,69 @@ def test_get_head_mri_trans(tmp_path): # BIDS-compatible output, e.g. including `channels.tsv` files for written # Raw data etc. raw = _read_raw_fif(raw_fname) - deriv_root = tmp_path / 'derivatives' / 'mne-bids-pipeline' + deriv_root = tmp_path / "derivatives" / "mne-bids-pipeline" electrophys_path = ( - deriv_root / 'sub-01' / 'eeg' / 'sub-01_task-av_proc-filt_raw.fif' + deriv_root / "sub-01" / "eeg" / "sub-01_task-av_proc-filt_raw.fif" ) electrophys_path.parent.mkdir(parents=True) raw.save(electrophys_path) electrophys_bids_path = BIDSPath( - subject='01', task='av', datatype='eeg', processing='filt', - suffix='raw', extension='.fif', root=deriv_root, - check=False + subject="01", + task="av", + datatype="eeg", + processing="filt", + suffix="raw", + extension=".fif", + root=deriv_root, + check=False, ) t1_bids_path = _bids_path.copy().update( - root=tmp_path / 'mri_root', task=None, run=None + root=tmp_path / "mri_root", task=None, run=None ) - with pytest.warns(RuntimeWarning, match='Did not find any channels.tsv'): + with pytest.warns(RuntimeWarning, match="Did not find any channels.tsv"): get_head_mri_trans( bids_path=electrophys_bids_path, t1_bids_path=t1_bids_path, - fs_subject='sample', - fs_subjects_dir=subjects_dir + fs_subject="sample", + fs_subjects_dir=subjects_dir, ) # bids_path without datatype is deprecated bids_path = electrophys_bids_path.copy().update(datatype=None) with pytest.raises(FileNotFoundError): # defaut location is all wrong! - with pytest.warns(DeprecationWarning, match='no datatype'): + with pytest.warns(DeprecationWarning, match="no datatype"): get_head_mri_trans( bids_path=bids_path, t1_bids_path=t1_bids_path, - fs_subject='sample', - fs_subjects_dir=subjects_dir + fs_subject="sample", + fs_subjects_dir=subjects_dir, ) # bids_path without suffix is deprecated bids_path = electrophys_bids_path.copy().update(suffix=None) with pytest.raises(FileNotFoundError): # defaut location is all wrong! - with pytest.warns(DeprecationWarning, match='no datatype'): + with pytest.warns(DeprecationWarning, match="no datatype"): get_head_mri_trans( bids_path=bids_path, t1_bids_path=t1_bids_path, - fs_subject='sample', - fs_subjects_dir=subjects_dir + fs_subject="sample", + fs_subjects_dir=subjects_dir, ) # Should fail for an unsupported coordinate frame raw = _read_raw_fif(raw_fname) - bids_root = tmp_path / 'unsupported_coord_frame' + bids_root = tmp_path / "unsupported_coord_frame" bids_path = BIDSPath( - subject='01', task='av', datatype='meg', suffix='meg', - extension='.fif', root=bids_root + subject="01", + task="av", + datatype="meg", + suffix="meg", + extension=".fif", + root=bids_root, ) t1_bids_path = _bids_path.copy().update( - root=tmp_path / 'mri_root', task=None, run=None + root=tmp_path / "mri_root", task=None, run=None ) write_raw_bids(raw=raw, bids_path=bids_path, verbose=False) @@ -453,10 +485,12 @@ def test_handle_events_reading(tmp_path): # Create an arbitrary events.tsv file, to test we can deal with 'n/a' # make sure we can deal w/ "#" characters - events = {'onset': [11, 12, 'n/a'], - 'duration': ['n/a', 'n/a', 'n/a'], - 'trial_type': ["rec start", "trial #1", "trial #2!"]} - events_fname = tmp_path / 'bids1' / 'sub-01_task-test_events.json' + events = { + "onset": [11, 12, "n/a"], + "duration": ["n/a", "n/a", "n/a"], + "trial_type": ["rec start", "trial #1", "trial #2!"], + } + events_fname = tmp_path / "bids1" / "sub-01_task-test_events.json" events_fname.parent.mkdir() _to_tsv(events, events_fname) @@ -464,24 +498,28 @@ def test_handle_events_reading(tmp_path): events, event_id = mne.events_from_annotations(raw) # Test with a `stim_type` column instead of `trial_type`. - events = {'onset': [11, 12, 'n/a'], - 'duration': ['n/a', 'n/a', 'n/a'], - 'stim_type': ["rec start", "trial #1", "trial #2!"]} - events_fname = tmp_path / 'bids2' / 'sub-01_task-test_events.json' + events = { + "onset": [11, 12, "n/a"], + "duration": ["n/a", "n/a", "n/a"], + "stim_type": ["rec start", "trial #1", "trial #2!"], + } + events_fname = tmp_path / "bids2" / "sub-01_task-test_events.json" events_fname.parent.mkdir() _to_tsv(events, events_fname) - with pytest.warns(RuntimeWarning, match='This column should be renamed'): + with pytest.warns(RuntimeWarning, match="This column should be renamed"): raw = _handle_events_reading(events_fname, raw) events, event_id = mne.events_from_annotations(raw) # Test with same `trial_type` referring to different `value`: # The events should be renamed automatically - events = {'onset': [11, 12, 13, 14, 15], - 'duration': ['n/a', 'n/a', 'n/a', 'n/a', 'n/a'], - 'trial_type': ["event1", "event1", "event2", "event3", "event3"], - 'value': [1, 2, 3, 4, 'n/a']} - events_fname = tmp_path / 'bids3' / 'sub-01_task-test_events.json' + events = { + "onset": [11, 12, 13, 14, 15], + "duration": ["n/a", "n/a", "n/a", "n/a", "n/a"], + "trial_type": ["event1", "event1", "event2", "event3", "event3"], + "value": [1, 2, 3, 4, "n/a"], + } + events_fname = tmp_path / "bids3" / "sub-01_task-test_events.json" events_fname.parent.mkdir() _to_tsv(events, events_fname) @@ -489,17 +527,16 @@ def test_handle_events_reading(tmp_path): events, event_id = mne.events_from_annotations(raw) assert len(events) == 5 - assert 'event1/1' in event_id - assert 'event1/2' in event_id - assert 'event3/4' in event_id - assert 'event3/na' in event_id # 'n/a' value should become 'na' + assert "event1/1" in event_id + assert "event1/2" in event_id + assert "event3/4" in event_id + assert "event3/na" in event_id # 'n/a' value should become 'na' # The event with unique value mapping should not be renamed - assert 'event2' in event_id + assert "event2" in event_id # Test without any kind of event description. - events = {'onset': [11, 12, 'n/a'], - 'duration': ['n/a', 'n/a', 'n/a']} - events_fname = tmp_path / 'bids4' / 'sub-01_task-test_events.json' + events = {"onset": [11, 12, "n/a"], "duration": ["n/a", "n/a", "n/a"]} + events_fname = tmp_path / "bids4" / "sub-01_task-test_events.json" events_fname.parent.mkdir() _to_tsv(events, events_fname) @@ -507,33 +544,32 @@ def test_handle_events_reading(tmp_path): events, event_id = mne.events_from_annotations(raw) ids = list(event_id.keys()) assert len(ids) == 1 - assert ids == ['n/a'] + assert ids == ["n/a"] -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_keep_essential_annotations(tmp_path): """Test that essential Annotations are not omitted during I/O roundtrip.""" raw = _read_raw_fif(raw_fname) - annotations = mne.Annotations(onset=[raw.times[0]], duration=[1], - description=['BAD_ACQ_SKIP']) + annotations = mne.Annotations( + onset=[raw.times[0]], duration=[1], description=["BAD_ACQ_SKIP"] + ) raw.set_annotations(annotations) # Write data, remove events.tsv, then try to read again - bids_path = BIDSPath(subject='01', task='task', datatype='meg', - root=tmp_path) - with pytest.warns(RuntimeWarning, match='Acquisition skips detected'): + bids_path = BIDSPath(subject="01", task="task", datatype="meg", root=tmp_path) + with pytest.warns(RuntimeWarning, match="Acquisition skips detected"): write_raw_bids(raw, bids_path, overwrite=True) - bids_path.copy().update(suffix='events', extension='.tsv').fpath.unlink() + bids_path.copy().update(suffix="events", extension=".tsv").fpath.unlink() raw_read = read_raw_bids(bids_path) assert len(raw_read.annotations) == len(raw.annotations) == 1 - assert (raw_read.annotations[0]['description'] == - raw.annotations[0]['description']) + assert raw_read.annotations[0]["description"] == raw.annotations[0]["description"] -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_handle_scans_reading(tmp_path): """Test reading data from a BIDS scans.tsv file.""" @@ -542,73 +578,84 @@ def test_handle_scans_reading(tmp_path): # write copy of raw with line freq of 60 # bids basename and fname - bids_path = BIDSPath(subject='01', session='01', - task='audiovisual', run='01', - datatype=suffix, - root=tmp_path) + bids_path = BIDSPath( + subject="01", + session="01", + task="audiovisual", + run="01", + datatype=suffix, + root=tmp_path, + ) bids_path = write_raw_bids(raw, bids_path, overwrite=True) raw_01 = read_raw_bids(bids_path) # find sidecar scans.tsv file and alter the # acquisition time to not have the optional microseconds - scans_path = BIDSPath(subject=bids_path.subject, - session=bids_path.session, - root=tmp_path, - suffix='scans', extension='.tsv') + scans_path = BIDSPath( + subject=bids_path.subject, + session=bids_path.session, + root=tmp_path, + suffix="scans", + extension=".tsv", + ) scans_tsv = _from_tsv(scans_path) - acq_time_str = scans_tsv['acq_time'][0] - acq_time = datetime.strptime(acq_time_str, '%Y-%m-%dT%H:%M:%S.%fZ') + acq_time_str = scans_tsv["acq_time"][0] + acq_time = datetime.strptime(acq_time_str, "%Y-%m-%dT%H:%M:%S.%fZ") acq_time = acq_time.replace(tzinfo=timezone.utc) - new_acq_time = acq_time_str.split('.')[0] - assert acq_time == raw_01.info['meas_date'] - scans_tsv['acq_time'][0] = new_acq_time + new_acq_time = acq_time_str.split(".")[0] + assert acq_time == raw_01.info["meas_date"] + scans_tsv["acq_time"][0] = new_acq_time _to_tsv(scans_tsv, scans_path) # now re-load the data and it should be different # from the original date and the same as the newly altered date raw_02 = read_raw_bids(bids_path) - new_acq_time += '.0Z' - new_acq_time = datetime.strptime(new_acq_time, - '%Y-%m-%dT%H:%M:%S.%fZ') + new_acq_time += ".0Z" + new_acq_time = datetime.strptime(new_acq_time, "%Y-%m-%dT%H:%M:%S.%fZ") new_acq_time = new_acq_time.replace(tzinfo=timezone.utc) - assert raw_02.info['meas_date'] == new_acq_time - assert new_acq_time != raw_01.info['meas_date'] + assert raw_02.info["meas_date"] == new_acq_time + assert new_acq_time != raw_01.info["meas_date"] -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) def test_handle_scans_reading_brainvision(tmp_path): """Test stability of BrainVision's different file extensions""" test_scan_eeg = OrderedDict( - [('filename', [Path('eeg/sub-01_ses-eeg_task-rest_eeg.eeg')]), - ('acq_time', ['2000-01-01T12:00:00.000000Z'])] + [ + ("filename", [Path("eeg/sub-01_ses-eeg_task-rest_eeg.eeg")]), + ("acq_time", ["2000-01-01T12:00:00.000000Z"]), + ] ) test_scan_vmrk = OrderedDict( - [('filename', [Path('eeg/sub-01_ses-eeg_task-rest_eeg.vmrk')]), - ('acq_time', ['2000-01-01T12:00:00.000000Z'])] + [ + ("filename", [Path("eeg/sub-01_ses-eeg_task-rest_eeg.vmrk")]), + ("acq_time", ["2000-01-01T12:00:00.000000Z"]), + ] ) test_scan_edf = OrderedDict( - [('filename', [Path('eeg/sub-01_ses-eeg_task-rest_eeg.edf')]), - ('acq_time', ['2000-01-01T12:00:00.000000Z'])] + [ + ("filename", [Path("eeg/sub-01_ses-eeg_task-rest_eeg.edf")]), + ("acq_time", ["2000-01-01T12:00:00.000000Z"]), + ] ) - os.mkdir(tmp_path / 'eeg') + os.mkdir(tmp_path / "eeg") for test_scan in [test_scan_eeg, test_scan_vmrk, test_scan_edf]: - _to_tsv(test_scan, tmp_path / test_scan['filename'][0]) + _to_tsv(test_scan, tmp_path / test_scan["filename"][0]) - bids_path = BIDSPath(subject='01', session='eeg', task='rest', - datatype='eeg', root=tiny_bids_root) + bids_path = BIDSPath( + subject="01", session="eeg", task="rest", datatype="eeg", root=tiny_bids_root + ) raw = read_raw_bids(bids_path) for test_scan in [test_scan_eeg, test_scan_vmrk]: - _handle_scans_reading(tmp_path / test_scan['filename'][0], - raw, bids_path) + _handle_scans_reading(tmp_path / test_scan["filename"][0], raw, bids_path) with pytest.raises(ValueError, match="is not in list"): - _handle_scans_reading(tmp_path / test_scan_edf['filename'][0], - raw, bids_path) + _handle_scans_reading(tmp_path / test_scan_edf["filename"][0], raw, bids_path) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_handle_info_reading(tmp_path): """Test reading information from a BIDS sidecar JSON file.""" @@ -617,45 +664,43 @@ def test_handle_info_reading(tmp_path): # write copy of raw with line freq of 60 # bids basename and fname - bids_path = BIDSPath(subject='01', session='01', - task='audiovisual', run='01', - root=tmp_path) + bids_path = BIDSPath( + subject="01", session="01", task="audiovisual", run="01", root=tmp_path + ) suffix = "meg" - bids_fname = bids_path.copy().update(suffix=suffix, - extension='.fif') + bids_fname = bids_path.copy().update(suffix=suffix, extension=".fif") write_raw_bids(raw, bids_path, overwrite=True) # find sidecar JSON fname bids_fname.update(datatype=suffix) - sidecar_fname = _find_matching_sidecar(bids_fname, suffix=suffix, - extension='.json') + sidecar_fname = _find_matching_sidecar(bids_fname, suffix=suffix, extension=".json") sidecar_fname = Path(sidecar_fname) # assert that we get the same line frequency set raw = read_raw_bids(bids_path=bids_path) - assert raw.info['line_freq'] == 60 + assert raw.info["line_freq"] == 60 # setting line_freq to None should produce 'n/a' in the JSON sidecar - raw.info['line_freq'] = None + raw.info["line_freq"] = None write_raw_bids(raw, bids_path, overwrite=True, format="FIF") raw = read_raw_bids(bids_path=bids_path) - assert raw.info['line_freq'] is None + assert raw.info["line_freq"] is None - sidecar_json = json.loads(sidecar_fname.read_text(encoding='utf-8')) - assert sidecar_json["PowerLineFrequency"] == 'n/a' + sidecar_json = json.loads(sidecar_fname.read_text(encoding="utf-8")) + assert sidecar_json["PowerLineFrequency"] == "n/a" # 2. if line frequency is not set in raw file, then ValueError - del raw.info['line_freq'] + del raw.info["line_freq"] with pytest.raises(ValueError, match="PowerLineFrequency .* required"): write_raw_bids(raw, bids_path, overwrite=True, format="FIF") # check whether there are "Extra points" in raw.info['dig'] if # DigitizedHeadPoints is set to True and not otherwise n_dig_points = 0 - for dig_point in raw.info['dig']: - if dig_point['kind'] == FIFF.FIFFV_POINT_EXTRA: + for dig_point in raw.info["dig"]: + if dig_point["kind"] == FIFF.FIFFV_POINT_EXTRA: n_dig_points += 1 - if sidecar_json['DigitizedHeadPoints']: + if sidecar_json["DigitizedHeadPoints"]: assert n_dig_points > 0 else: assert n_dig_points == 0 @@ -663,92 +708,104 @@ def test_handle_info_reading(tmp_path): # check whether any of NAS/LPA/RPA are present in raw.info['dig'] # DigitizedLandmark is set to True, and False otherwise landmark_present = False - for dig_point in raw.info['dig']: - if dig_point['kind'] in [FIFF.FIFFV_POINT_LPA, FIFF.FIFFV_POINT_RPA, - FIFF.FIFFV_POINT_NASION]: + for dig_point in raw.info["dig"]: + if dig_point["kind"] in [ + FIFF.FIFFV_POINT_LPA, + FIFF.FIFFV_POINT_RPA, + FIFF.FIFFV_POINT_NASION, + ]: landmark_present = True break if landmark_present: - assert sidecar_json['DigitizedLandmarks'] is True + assert sidecar_json["DigitizedLandmarks"] is True else: - assert sidecar_json['DigitizedLandmarks'] is False + assert sidecar_json["DigitizedLandmarks"] is False # make a copy of the sidecar in "derivatives/" # to check that we make sure we always get the right sidecar # in addition, it should not break the sidecar reading # in `read_raw_bids` - raw.info['line_freq'] = 60 + raw.info["line_freq"] = 60 write_raw_bids(raw, bids_path, overwrite=True, format="FIF") - deriv_dir = tmp_path / 'derivatives' + deriv_dir = tmp_path / "derivatives" deriv_dir.mkdir() sidecar_copy = deriv_dir / op.basename(sidecar_fname) - sidecar_json = json.loads(sidecar_fname.read_text(encoding='utf-8')) + sidecar_json = json.loads(sidecar_fname.read_text(encoding="utf-8")) sidecar_json["PowerLineFrequency"] = 45 _write_json(sidecar_copy, sidecar_json) raw = read_raw_bids(bids_path=bids_path) - assert raw.info['line_freq'] == 60 + assert raw.info["line_freq"] == 60 # 3. assert that we get an error when sidecar json doesn't match _update_sidecar(sidecar_fname, "PowerLineFrequency", 55) with pytest.warns(RuntimeWarning, match="Defaulting to .* sidecar JSON"): raw = read_raw_bids(bids_path=bids_path) - assert raw.info['line_freq'] == 55 + assert raw.info["line_freq"] == 55 -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) -@pytest.mark.filterwarnings(warning_str['maxshield']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) +@pytest.mark.filterwarnings(warning_str["maxshield"]) @testing.requires_testing_data def test_handle_chpi_reading(tmp_path): """Test reading of cHPI information.""" - raw = _read_raw_fif(raw_fname_chpi, allow_maxshield='yes') - root = tmp_path / 'chpi' + raw = _read_raw_fif(raw_fname_chpi, allow_maxshield="yes") + root = tmp_path / "chpi" root.mkdir() - bids_path = BIDSPath(subject='01', session='01', - task='audiovisual', run='01', - root=root, datatype='meg') + bids_path = BIDSPath( + subject="01", + session="01", + task="audiovisual", + run="01", + root=root, + datatype="meg", + ) bids_path = write_raw_bids(raw, bids_path) raw_read = read_raw_bids(bids_path) - assert raw_read.info['hpi_subsystem'] is not None + assert raw_read.info["hpi_subsystem"] is not None # cause conflicts between cHPI info in sidecar and raw data - meg_json_path = bids_path.copy().update(suffix='meg', extension='.json') - with open(meg_json_path, 'r', encoding='utf-8') as f: + meg_json_path = bids_path.copy().update(suffix="meg", extension=".json") + with open(meg_json_path, "r", encoding="utf-8") as f: meg_json_data = json.load(f) # cHPI frequency mismatch meg_json_data_freq_mismatch = meg_json_data.copy() - meg_json_data_freq_mismatch['HeadCoilFrequency'][0] = 123 + meg_json_data_freq_mismatch["HeadCoilFrequency"][0] = 123 _write_json(meg_json_path, meg_json_data_freq_mismatch, overwrite=True) - with pytest.warns(RuntimeWarning, match='Defaulting to .* mne.Raw object'): + with pytest.warns(RuntimeWarning, match="Defaulting to .* mne.Raw object"): raw_read = read_raw_bids(bids_path) # cHPI "off" according to sidecar, but present in the data meg_json_data_chpi_mismatch = meg_json_data.copy() - meg_json_data_chpi_mismatch['ContinuousHeadLocalization'] = False + meg_json_data_chpi_mismatch["ContinuousHeadLocalization"] = False _write_json(meg_json_path, meg_json_data_chpi_mismatch, overwrite=True) raw_read = read_raw_bids(bids_path) - assert raw_read.info['hpi_subsystem'] is None - assert raw_read.info['hpi_meas'] == [] + assert raw_read.info["hpi_subsystem"] is None + assert raw_read.info["hpi_meas"] == [] -@pytest.mark.filterwarnings(warning_str['nasion_not_found']) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["nasion_not_found"]) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_handle_eeg_coords_reading(tmp_path): """Test reading iEEG coordinates from BIDS files.""" bids_path = BIDSPath( - subject=subject_id, session=session_id, run=run, acquisition=acq, - task=task, root=tmp_path) + subject=subject_id, + session=session_id, + run=run, + acquisition=acq, + task=task, + root=tmp_path, + ) - raw_fname = op.join(data_path, 'EDF', 'test_reduced.edf') + raw_fname = op.join(data_path, "EDF", "test_reduced.edf") raw = _read_raw_edf(raw_fname) # ensure we are writing 'eeg' data - raw.set_channel_types({ch: 'eeg' - for ch in raw.ch_names}) + raw.set_channel_types({ch: "eeg" for ch in raw.ch_names}) # set a `random` montage ch_names = raw.ch_names @@ -757,29 +814,25 @@ def test_handle_eeg_coords_reading(tmp_path): # # create montage in 'unknown' coordinate frame # # and assert coordsystem/electrodes sidecar tsv don't exist - montage = mne.channels.make_dig_montage(ch_pos=ch_pos, - coord_frame="unknown") + montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame="unknown") raw.set_montage(montage) ctx = nullcontext() with ctx: write_raw_bids(raw, bids_path, overwrite=True) bids_path.update(root=tmp_path) - coordsystem_fname = _find_matching_sidecar(bids_path, - suffix='coordsystem', - extension='.json', - on_error='warn') - electrodes_fname = _find_matching_sidecar(bids_path, - suffix='electrodes', - extension='.tsv', - on_error='warn') + coordsystem_fname = _find_matching_sidecar( + bids_path, suffix="coordsystem", extension=".json", on_error="warn" + ) + electrodes_fname = _find_matching_sidecar( + bids_path, suffix="electrodes", extension=".tsv", on_error="warn" + ) assert coordsystem_fname is not None assert electrodes_fname is not None # create montage in head frame and set should result in # an error if landmarks are not set - montage = mne.channels.make_dig_montage(ch_pos=ch_pos, - coord_frame="head") + montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame="head") raw.set_montage(montage) ctx = nullcontext() with ctx: @@ -789,57 +842,53 @@ def test_handle_eeg_coords_reading(tmp_path): # obtain the sensor positions and assert ch_coords are same raw_test = read_raw_bids(bids_path, verbose=True) - assert not object_diff(raw.info['chs'], raw_test.info['chs']) + assert not object_diff(raw.info["chs"], raw_test.info["chs"]) # modify coordinate frame to not-captrak - coordsystem_fname = _find_matching_sidecar(bids_path, - suffix='coordsystem', - extension='.json') - _update_sidecar(coordsystem_fname, 'EEGCoordinateSystem', 'besa') - with pytest.warns(RuntimeWarning, match='is not a BIDS-acceptable ' - 'coordinate frame for EEG'): + coordsystem_fname = _find_matching_sidecar( + bids_path, suffix="coordsystem", extension=".json" + ) + _update_sidecar(coordsystem_fname, "EEGCoordinateSystem", "besa") + with pytest.warns( + RuntimeWarning, match="is not a BIDS-acceptable " "coordinate frame for EEG" + ): raw_test = read_raw_bids(bids_path) - assert raw_test.info['dig'] is None + assert raw_test.info["dig"] is None -@pytest.mark.parametrize('bids_path', - [_bids_path, _bids_path_minimal]) -@pytest.mark.filterwarnings(warning_str['nasion_not_found']) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.parametrize("bids_path", [_bids_path, _bids_path_minimal]) +@pytest.mark.filterwarnings(warning_str["nasion_not_found"]) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_handle_ieeg_coords_reading(bids_path, tmp_path): """Test reading iEEG coordinates from BIDS files.""" - raw_fname = op.join(data_path, 'EDF', 'test_reduced.edf') - bids_fname = bids_path.copy().update(datatype='ieeg', - suffix='ieeg', - extension='.edf', - root=tmp_path) + raw_fname = op.join(data_path, "EDF", "test_reduced.edf") + bids_fname = bids_path.copy().update( + datatype="ieeg", suffix="ieeg", extension=".edf", root=tmp_path + ) raw = _read_raw_edf(raw_fname) # ensure we are writing 'ecog'/'ieeg' data - raw.set_channel_types({ch: 'ecog' - for ch in raw.ch_names}) + raw.set_channel_types({ch: "ecog" for ch in raw.ch_names}) # coordinate frames in mne-python should all map correctly # set a `random` montage ch_names = raw.ch_names elec_locs = np.random.RandomState(0).randn(len(ch_names), 3) ch_pos = dict(zip(ch_names, elec_locs)) - coordinate_frames = ['mni_tal'] + coordinate_frames = ["mni_tal"] for coord_frame in coordinate_frames: # XXX: mne-bids doesn't support multiple electrodes.tsv files sh.rmtree(tmp_path) - montage = mne.channels.make_dig_montage(ch_pos=ch_pos, - coord_frame=coord_frame) + montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame=coord_frame) raw.set_montage(montage) - write_raw_bids(raw, bids_fname, - overwrite=True, verbose=False) + write_raw_bids(raw, bids_fname, overwrite=True, verbose=False) # read in raw file w/ updated coordinate frame # and make sure all digpoints are correct coordinate frames raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) coord_frame_int = MNE_STR_TO_FRAME[coord_frame] - for digpoint in raw_test.info['dig']: - assert digpoint['coord_frame'] == coord_frame_int + for digpoint in raw_test.info["dig"]: + assert digpoint["coord_frame"] == coord_frame_int # start w/ new bids root sh.rmtree(tmp_path) @@ -847,47 +896,43 @@ def test_handle_ieeg_coords_reading(bids_path, tmp_path): # obtain the sensor positions and assert ch_coords are same raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) - orig_locs = raw.info['dig'][1] - test_locs = raw_test.info['dig'][1] + orig_locs = raw.info["dig"][1] + test_locs = raw_test.info["dig"][1] assert orig_locs == test_locs - assert not object_diff(raw.info['chs'], raw_test.info['chs']) + assert not object_diff(raw.info["chs"], raw_test.info["chs"]) # read in the data and assert montage is the same # regardless of 'm', 'cm', 'mm', or 'pixel' - scalings = {'m': 1, 'cm': 100, 'mm': 1000} + scalings = {"m": 1, "cm": 100, "mm": 1000} bids_fname.update(root=tmp_path) - coordsystem_fname = _find_matching_sidecar(bids_fname, - suffix='coordsystem', - extension='.json') - electrodes_fname = _find_matching_sidecar(bids_fname, - suffix='electrodes', - extension='.tsv') - orig_electrodes_dict = _from_tsv(electrodes_fname, - [str, float, float, float, str]) + coordsystem_fname = _find_matching_sidecar( + bids_fname, suffix="coordsystem", extension=".json" + ) + electrodes_fname = _find_matching_sidecar( + bids_fname, suffix="electrodes", extension=".tsv" + ) + orig_electrodes_dict = _from_tsv(electrodes_fname, [str, float, float, float, str]) # not BIDS specified should not be read - coord_unit = 'km' + coord_unit = "km" scaling = 0.001 - _update_sidecar(coordsystem_fname, 'iEEGCoordinateUnits', coord_unit) - electrodes_dict = _from_tsv(electrodes_fname, - [str, float, float, float, str]) - for axis in ['x', 'y', 'z']: - electrodes_dict[axis] = \ - np.multiply(orig_electrodes_dict[axis], scaling) + _update_sidecar(coordsystem_fname, "iEEGCoordinateUnits", coord_unit) + electrodes_dict = _from_tsv(electrodes_fname, [str, float, float, float, str]) + for axis in ["x", "y", "z"]: + electrodes_dict[axis] = np.multiply(orig_electrodes_dict[axis], scaling) _to_tsv(electrodes_dict, electrodes_fname) - with pytest.warns(RuntimeWarning, match='Coordinate unit is not ' - 'an accepted BIDS unit'): + with pytest.warns( + RuntimeWarning, match="Coordinate unit is not " "an accepted BIDS unit" + ): raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) # correct BIDS units should scale to meters properly for coord_unit, scaling in scalings.items(): # update coordinate SI units - _update_sidecar(coordsystem_fname, 'iEEGCoordinateUnits', coord_unit) - electrodes_dict = _from_tsv(electrodes_fname, - [str, float, float, float, str]) - for axis in ['x', 'y', 'z']: - electrodes_dict[axis] = \ - np.multiply(orig_electrodes_dict[axis], scaling) + _update_sidecar(coordsystem_fname, "iEEGCoordinateUnits", coord_unit) + electrodes_dict = _from_tsv(electrodes_fname, [str, float, float, float, str]) + for axis in ["x", "y", "z"]: + electrodes_dict[axis] = np.multiply(orig_electrodes_dict[axis], scaling) _to_tsv(electrodes_dict, electrodes_fname) # read in raw file w/ updated montage @@ -899,43 +944,46 @@ def test_handle_ieeg_coords_reading(bids_path, tmp_path): # XXX: Improve by changing names to 'unknown' coordframe (needs mne PR) # check that coordinate systems other coordinate systems should be named # in the file and not the CoordinateSystem, which is reserved for keywords - coordinate_frames = ['Other'] + coordinate_frames = ["Other"] for coord_frame in coordinate_frames: # update coordinate units - _update_sidecar(coordsystem_fname, 'iEEGCoordinateSystem', coord_frame) + _update_sidecar(coordsystem_fname, "iEEGCoordinateSystem", coord_frame) # read in raw file w/ updated coordinate frame # and make sure all digpoints are MRI coordinate frame - with pytest.warns(RuntimeWarning, match="not an MNE-Python " - "coordinate frame"): + with pytest.warns( + RuntimeWarning, match="not an MNE-Python " "coordinate frame" + ): raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) - assert raw_test.info['dig'] is not None + assert raw_test.info["dig"] is not None # check that standard template identifiers that are unsupported in # mne-python coordinate frames, still get read in, but produce a warning for coord_frame in BIDS_SHARED_COORDINATE_FRAMES: # update coordinate units - _update_sidecar(coordsystem_fname, 'iEEGCoordinateSystem', coord_frame) + _update_sidecar(coordsystem_fname, "iEEGCoordinateSystem", coord_frame) # read in raw file w/ updated coordinate frame # and make sure all digpoints are MRI coordinate frame if coord_frame in BIDS_TO_MNE_FRAMES: raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) else: - with pytest.warns(RuntimeWarning, match="not an MNE-Python " - "coordinate frame"): + with pytest.warns( + RuntimeWarning, match="not an MNE-Python " "coordinate frame" + ): raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) - assert raw_test.info['dig'] is not None + assert raw_test.info["dig"] is not None # ACPC should be read in as RAS for iEEG - _update_sidecar(coordsystem_fname, 'iEEGCoordinateSystem', 'ACPC') + _update_sidecar(coordsystem_fname, "iEEGCoordinateSystem", "ACPC") raw_test = read_raw_bids(bids_path=bids_fname, verbose=False) - coord_frame_int = MNE_STR_TO_FRAME['ras'] - for digpoint in raw_test.info['dig']: - assert digpoint['coord_frame'] == coord_frame_int + coord_frame_int = MNE_STR_TO_FRAME["ras"] + for digpoint in raw_test.info["dig"]: + assert digpoint["coord_frame"] == coord_frame_int # if we delete the coordsystem.json file, an error will be raised os.remove(coordsystem_fname) - with pytest.raises(RuntimeError, match='BIDS mandates that ' - 'the coordsystem.json'): + with pytest.raises( + RuntimeError, match="BIDS mandates that " "the coordsystem.json" + ): raw = read_raw_bids(bids_path=bids_fname, verbose=False) # test error message if electrodes is not a subset of Raw @@ -949,88 +997,91 @@ def test_handle_ieeg_coords_reading(bids_path, tmp_path): _to_tsv(electrodes_dict, electrodes_fname) # popping off channels should not result in an error # however, a warning will be raised through mne-python - with pytest.warns(RuntimeWarning, match='DigMontage is ' - 'only a subset of info'): + with pytest.warns(RuntimeWarning, match="DigMontage is " "only a subset of info"): read_raw_bids(bids_path=bids_fname, verbose=False) # make sure montage is set if there are coordinates w/ 'n/a' - raw.info['bads'] = [] - write_raw_bids(raw, bids_path, - overwrite=True, verbose=False) + raw.info["bads"] = [] + write_raw_bids(raw, bids_path, overwrite=True, verbose=False) electrodes_dict = _from_tsv(electrodes_fname) - for axis in ['x', 'y', 'z']: - electrodes_dict[axis][0] = 'n/a' - electrodes_dict[axis][3] = 'n/a' + for axis in ["x", "y", "z"]: + electrodes_dict[axis][0] = "n/a" + electrodes_dict[axis][3] = "n/a" _to_tsv(electrodes_dict, electrodes_fname) # test if montage is correctly set via mne-bids # electrode coordinates should be nan # when coordinate is 'n/a' - nan_chs = [electrodes_dict['name'][i] for i in [0, 3]] - with pytest.warns(RuntimeWarning, match='There are channels ' - 'without locations'): + nan_chs = [electrodes_dict["name"][i] for i in [0, 3]] + with pytest.warns(RuntimeWarning, match="There are channels " "without locations"): raw = read_raw_bids(bids_path=bids_fname, verbose=False) - for idx, ch in enumerate(raw.info['chs']): - if ch['ch_name'] in nan_chs: - assert all(np.isnan(ch['loc'][:3])) + for idx, ch in enumerate(raw.info["chs"]): + if ch["ch_name"] in nan_chs: + assert all(np.isnan(ch["loc"][:3])) else: - assert not any(np.isnan(ch['loc'][:3])) - assert ch['ch_name'] not in raw.info['bads'] + assert not any(np.isnan(ch["loc"][:3])) + assert ch["ch_name"] not in raw.info["bads"] -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) -@pytest.mark.parametrize('fname', ['testdata_ctf.ds', 'catch-alp-good-f.ds']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) +@pytest.mark.parametrize("fname", ["testdata_ctf.ds", "catch-alp-good-f.ds"]) @testing.requires_testing_data def test_get_head_mri_trans_ctf(fname, tmp_path): """Test getting a trans object from BIDS data in CTF.""" - nib = pytest.importorskip('nibabel') + nib = pytest.importorskip("nibabel") - ctf_data_path = op.join(data_path, 'CTF') + ctf_data_path = op.join(data_path, "CTF") raw_ctf_fname = op.join(ctf_data_path, fname) raw_ctf = _read_raw_ctf(raw_ctf_fname, clean_names=True) - bids_path = _bids_path.copy().update( - root=tmp_path, datatype='meg', suffix='meg' - ) + bids_path = _bids_path.copy().update(root=tmp_path, datatype="meg", suffix="meg") write_raw_bids(raw_ctf, bids_path, overwrite=False) # Take a fake trans - trans = mne.read_trans(raw_fname.replace('_raw.fif', '-trans.fif')) + trans = mne.read_trans(raw_fname.replace("_raw.fif", "-trans.fif")) # Get the T1 weighted MRI data file ... test write_anat with a nibabel # image instead of a file path - t1w_mgh = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz') + t1w_mgh = op.join(data_path, "subjects", "sample", "mri", "T1.mgz") t1w_mgh = nib.load(t1w_mgh) - t1w_bids_path = BIDSPath(subject=subject_id, session=session_id, - acquisition=acq, root=tmp_path) + t1w_bids_path = BIDSPath( + subject=subject_id, session=session_id, acquisition=acq, root=tmp_path + ) landmarks = get_anat_landmarks( - t1w_mgh, raw_ctf.info, trans, fs_subject='sample', - fs_subjects_dir=op.join(data_path, 'subjects')) + t1w_mgh, + raw_ctf.info, + trans, + fs_subject="sample", + fs_subjects_dir=op.join(data_path, "subjects"), + ) write_anat(t1w_mgh, bids_path=t1w_bids_path, landmarks=landmarks) # Try to get trans back through fitting points estimated_trans = get_head_mri_trans( - bids_path=bids_path, extra_params=dict(clean_names=True), - fs_subject='sample', fs_subjects_dir=op.join(data_path, 'subjects')) + bids_path=bids_path, + extra_params=dict(clean_names=True), + fs_subject="sample", + fs_subjects_dir=op.join(data_path, "subjects"), + ) - assert_almost_equal(trans['trans'], estimated_trans['trans']) + assert_almost_equal(trans["trans"], estimated_trans["trans"]) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_read_raw_bids_pathlike(tmp_path): """Test that read_raw_bids() can handle a Path-like bids_root.""" - bids_path = _bids_path.copy().update(root=tmp_path, datatype='meg') + bids_path = _bids_path.copy().update(root=tmp_path, datatype="meg") raw = _read_raw_fif(raw_fname, verbose=False) write_raw_bids(raw, bids_path, overwrite=True, verbose=False) raw = read_raw_bids(bids_path=bids_path) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_read_raw_datatype(tmp_path): """Test that read_raw_bids() can infer the str_suffix if need be.""" - bids_path = _bids_path.copy().update(root=tmp_path, datatype='meg') + bids_path = _bids_path.copy().update(root=tmp_path, datatype="meg") raw = _read_raw_fif(raw_fname, verbose=False) write_raw_bids(raw, bids_path, overwrite=True, verbose=False) @@ -1053,113 +1104,107 @@ def test_handle_channel_type_casing(tmp_path): bids_path = _bids_path.copy().update(root=tmp_path) raw = _read_raw_fif(raw_fname, verbose=False) - write_raw_bids(raw, bids_path, overwrite=True, - verbose=False) + write_raw_bids(raw, bids_path, overwrite=True, verbose=False) - ch_path = bids_path.copy().update(root=tmp_path, - datatype='meg', - suffix='channels', - extension='.tsv') + ch_path = bids_path.copy().update( + root=tmp_path, datatype="meg", suffix="channels", extension=".tsv" + ) bids_channels_fname = ch_path.fpath # Convert all channel type entries to lowercase. channels_data = _from_tsv(bids_channels_fname) - channels_data['type'] = [t.lower() for t in channels_data['type']] + channels_data["type"] = [t.lower() for t in channels_data["type"]] _to_tsv(channels_data, bids_channels_fname) - with pytest.warns(RuntimeWarning, match='lowercase spelling'): + with pytest.warns(RuntimeWarning, match="lowercase spelling"): read_raw_bids(bids_path) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_handle_non_mne_channel_type(tmp_path): """Test that channel types not known to MNE will be read as 'misc'.""" bids_path = _bids_path.copy().update(root=tmp_path) raw = _read_raw_fif(raw_fname, verbose=False) - write_raw_bids(raw, bids_path, overwrite=True, - verbose=False) + write_raw_bids(raw, bids_path, overwrite=True, verbose=False) - channels_tsv_path = bids_path.copy().update( - root=tmp_path, - datatype='meg', - suffix='channels', - extension='.tsv' - ).fpath + channels_tsv_path = ( + bids_path.copy() + .update(root=tmp_path, datatype="meg", suffix="channels", extension=".tsv") + .fpath + ) channels_data = _from_tsv(channels_tsv_path) # Violates BIDS, but ensures we won't have an appropriate # BIDS -> MNE mapping. ch_idx = -1 - channels_data['type'][ch_idx] = 'FOOBAR' + channels_data["type"][ch_idx] = "FOOBAR" _to_tsv(data=channels_data, fname=channels_tsv_path) - with pytest.warns(RuntimeWarning, match='will be set to \"misc\"'): + with pytest.warns(RuntimeWarning, match='will be set to "misc"'): raw = read_raw_bids(bids_path) # Should be a 'misc' channel. - assert raw.get_channel_types([channels_data['name'][ch_idx]]) == ['misc'] + assert raw.get_channel_types([channels_data["name"][ch_idx]]) == ["misc"] -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_bads_reading(tmp_path): - bids_path = _bids_path.copy().update(root=tmp_path, datatype='meg') - bads_raw = ['MEG 0112', 'MEG 0113'] - bads_sidecar = ['EEG 053', 'MEG 2443'] + bids_path = _bids_path.copy().update(root=tmp_path, datatype="meg") + bads_raw = ["MEG 0112", "MEG 0113"] + bads_sidecar = ["EEG 053", "MEG 2443"] # Produce conflicting information between raw and sidecar file. raw = _read_raw_fif(raw_fname, verbose=False) - raw.info['bads'] = bads_sidecar + raw.info["bads"] = bads_sidecar write_raw_bids(raw, bids_path, verbose=False) - raw = _read_raw(bids_path.copy().update(extension='.fif').fpath, - preload=True) - raw.info['bads'] = bads_raw + raw = _read_raw(bids_path.copy().update(extension=".fif").fpath, preload=True) + raw.info["bads"] = bads_raw raw.save(raw.filenames[0], overwrite=True) # Upon reading the data, only the sidecar info should be present. raw = read_raw_bids(bids_path=bids_path, verbose=False) - assert len(raw.info['bads']) == len(bads_sidecar) - assert set(raw.info['bads']) == set(bads_sidecar) + assert len(raw.info["bads"]) == len(bads_sidecar) + assert set(raw.info["bads"]) == set(bads_sidecar) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_write_read_fif_split_file(tmp_path, monkeypatch): """Test split files are read correctly.""" # load raw test file, extend it to be larger than 2gb, and save it - bids_root = tmp_path / 'bids' - tmp_dir = tmp_path / 'tmp' + bids_root = tmp_path / "bids" + tmp_dir = tmp_path / "tmp" tmp_dir.mkdir() - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg') + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg") raw = _read_raw_fif(raw_fname, verbose=False) bids_path.update(acquisition=None) write_raw_bids(raw, bids_path, verbose=False) - bids_path.update(acquisition='01') + bids_path.update(acquisition="01") n_channels = len(raw.ch_names) n_times = int(2.5e6 / n_channels) # enough to produce a 10MB split - data = np.random.RandomState(0).randn( - n_channels, n_times).astype(np.float32) + data = np.random.RandomState(0).randn(n_channels, n_times).astype(np.float32) raw = mne.io.RawArray(data, raw.info) - big_fif_fname = Path(tmp_dir) / 'test_raw.fif' + big_fif_fname = Path(tmp_dir) / "test_raw.fif" - split_size = '10MB' + split_size = "10MB" raw.save(big_fif_fname, split_size=split_size) raw = _read_raw_fif(big_fif_fname, verbose=False) with monkeypatch.context() as m: # Force MNE-BIDS to split at 10MB - m.setattr(mne_bids.write, '_FIFF_SPLIT_SIZE', split_size) + m.setattr(mne_bids.write, "_FIFF_SPLIT_SIZE", split_size) write_raw_bids(raw, bids_path, verbose=False) # test whether split raw files were read correctly raw1 = read_raw_bids(bids_path=bids_path) - assert 'split-01' in str(bids_path.fpath) - bids_path.update(split='01') + assert "split-01" in str(bids_path.fpath) + bids_path.update(split="01") raw2 = read_raw_bids(bids_path=bids_path) - bids_path.update(split='02') + bids_path.update(split="02") raw3 = read_raw_bids(bids_path=bids_path) assert len(raw) == len(raw1) assert len(raw) == len(raw2) @@ -1167,58 +1212,60 @@ def test_write_read_fif_split_file(tmp_path, monkeypatch): # check that split files both appear in scans.tsv scans_tsv = BIDSPath( - subject=subject_id, session=session_id, - suffix='scans', extension='.tsv', - root=bids_root) + subject=subject_id, + session=session_id, + suffix="scans", + extension=".tsv", + root=bids_root, + ) scan_data = _from_tsv(scans_tsv) - scan_fnames = scan_data['filename'] - scan_acqtime = scan_data['acq_time'] + scan_fnames = scan_data["filename"] + scan_acqtime = scan_data["acq_time"] assert len(scan_fnames) == 3 - assert 'split-01' in scan_fnames[0] and 'split-02' in scan_fnames[1] + assert "split-01" in scan_fnames[0] and "split-02" in scan_fnames[1] # check that the acq_times in scans.tsv are the same assert scan_acqtime[0] == scan_acqtime[1] # check the recordings are in the correct order assert raw2.first_time < raw3.first_time # check whether non-matching acq_times are caught - scan_data['acq_time'][0] = scan_acqtime[0].split('.')[0] + scan_data["acq_time"][0] = scan_acqtime[0].split(".")[0] _to_tsv(scan_data, scans_tsv) - with pytest.raises(ValueError, - match='Split files must have the same acq_time.'): + with pytest.raises(ValueError, match="Split files must have the same acq_time."): read_raw_bids(bids_path) # reset scans.tsv file for downstream tests - scan_data['acq_time'][0] = scan_data['acq_time'][1] + scan_data["acq_time"][0] = scan_data["acq_time"][1] _to_tsv(scan_data, scans_tsv) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_ignore_exclude_param(tmp_path): """Test that extra_params=dict(exclude=...) is being ignored.""" bids_path = _bids_path.copy().update(root=tmp_path) - ch_name = 'EEG 001' + ch_name = "EEG 001" raw = _read_raw_fif(raw_fname, verbose=False) write_raw_bids(raw, bids_path=bids_path, overwrite=True, verbose=False) - raw = read_raw_bids(bids_path=bids_path, verbose=False, - extra_params=dict(exclude=[ch_name])) + raw = read_raw_bids( + bids_path=bids_path, verbose=False, extra_params=dict(exclude=[ch_name]) + ) assert ch_name in raw.ch_names -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_channels_tsv_raw_mismatch(tmp_path): """Test behavior when channels.tsv contains channels not found in raw.""" - bids_path = _bids_path.copy().update(root=tmp_path, datatype='meg', - task='rest') + bids_path = _bids_path.copy().update(root=tmp_path, datatype="meg", task="rest") # Remove one channel from the raw data without updating channels.tsv raw = _read_raw_fif(raw_fname, verbose=False) write_raw_bids(raw, bids_path=bids_path, overwrite=True, verbose=False) - raw_path = bids_path.copy().update(extension='.fif').fpath + raw_path = bids_path.copy().update(extension=".fif").fpath raw = _read_raw(raw_path, preload=True) raw.drop_channels(ch_names=raw.ch_names[-1]) raw.load_data() @@ -1226,31 +1273,31 @@ def test_channels_tsv_raw_mismatch(tmp_path): with pytest.warns( RuntimeWarning, - match='number of channels in the channels.tsv sidecar .* ' - 'does not match the number of channels in the raw data' + match="number of channels in the channels.tsv sidecar .* " + "does not match the number of channels in the raw data", ): read_raw_bids(bids_path) # Remame a channel in the raw data without updating channels.tsv # (number of channels in channels.tsv and raw remains different) ch_name_orig = raw.ch_names[-1] - ch_name_new = 'MEGtest' + ch_name_new = "MEGtest" raw.rename_channels({ch_name_orig: ch_name_new}) raw.save(raw_path, overwrite=True) with pytest.warns( RuntimeWarning, - match=f'Cannot set channel type for the following channels, as they ' - f'are missing in the raw data: {ch_name_orig}' + match=f"Cannot set channel type for the following channels, as they " + f"are missing in the raw data: {ch_name_orig}", ): read_raw_bids(bids_path) # Mark channel as bad in channels.tsv and remove it from the raw data raw = _read_raw_fif(raw_fname, verbose=False) ch_name_orig = raw.ch_names[-1] - ch_name_new = 'MEGtest' + ch_name_new = "MEGtest" - raw.info['bads'] = [ch_name_orig] + raw.info["bads"] = [ch_name_orig] write_raw_bids(raw, bids_path=bids_path, overwrite=True, verbose=False) raw.drop_channels(raw.ch_names[-2]) @@ -1260,7 +1307,7 @@ def test_channels_tsv_raw_mismatch(tmp_path): with pytest.warns( RuntimeWarning, match=f'Cannot set "bad" status for the following channels, as ' - f'they are missing in the raw data: {ch_name_orig}' + f"they are missing in the raw data: {ch_name_orig}", ): read_raw_bids(bids_path) @@ -1270,35 +1317,38 @@ def test_file_not_found(tmp_path): """Check behavior if the requested file cannot be found.""" # First a path with a filename extension. bp = BIDSPath( - root=tmp_path, subject='foo', task='bar', datatype='eeg', suffix='eeg', - extension='.fif' + root=tmp_path, + subject="foo", + task="bar", + datatype="eeg", + suffix="eeg", + extension=".fif", ) bp.fpath.parent.mkdir(parents=True) - with pytest.raises(FileNotFoundError, match='File does not exist'): + with pytest.raises(FileNotFoundError, match="File does not exist"): read_raw_bids(bids_path=bp) # Now without an extension bp.extension = None - with pytest.raises(FileNotFoundError, match='File does not exist'): + with pytest.raises(FileNotFoundError, match="File does not exist"): read_raw_bids(bids_path=bp) - bp.update(extension='.fif') + bp.update(extension=".fif") _read_raw_fif(raw_fname, verbose=False).save(bp.fpath) - with pytest.warns(RuntimeWarning, match=r'channels\.tsv'): + with pytest.warns(RuntimeWarning, match=r"channels\.tsv"): read_raw_bids(bp) # smoke test bp.update(task=None) - with pytest.raises(FileNotFoundError, match='Did you mean'): + with pytest.raises(FileNotFoundError, match="Did you mean"): read_raw_bids(bp) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) def test_gsr_and_temp_reading(): """Test GSR and temperature channels are handled correctly.""" bids_path = BIDSPath( - subject='01', session='eeg', task='rest', datatype='eeg', - root=tiny_bids_root + subject="01", session="eeg", task="rest", datatype="eeg", root=tiny_bids_root ) raw = read_raw_bids(bids_path) - assert raw.get_channel_types(['GSR']) == ['gsr'] - assert raw.get_channel_types(['Temperature']) == ['temperature'] + assert raw.get_channel_types(["GSR"]) == ["gsr"] + assert raw.get_channel_types(["Temperature"]) == ["temperature"] diff --git a/mne_bids/tests/test_report.py b/mne_bids/tests/test_report.py index d919cc6b2..901f998d5 100644 --- a/mne_bids/tests/test_report.py +++ b/mne_bids/tests/test_report.py @@ -9,47 +9,43 @@ import pytest from mne.datasets import testing -from mne_bids import (BIDSPath, - make_report) +from mne_bids import BIDSPath, make_report from mne_bids.write import write_raw_bids from mne_bids.config import BIDS_VERSION -subject_id = '01' -session_id = '01' -run = '01' -acq = '01' -task = 'testing' +subject_id = "01" +session_id = "01" +run = "01" +acq = "01" +task = "testing" _bids_path = BIDSPath( - subject=subject_id, session=session_id, run=run, acquisition=acq, - task=task + subject=subject_id, session=session_id, run=run, acquisition=acq, task=task ) # Get the MNE testing sample data data_path = testing.data_path(download=False) -raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') +raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") warning_str = dict( - channel_unit_changed='ignore:The unit for chann*.:RuntimeWarning:mne', + channel_unit_changed="ignore:The unit for chann*.:RuntimeWarning:mne", ) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_report(tmp_path): """Test that report generated works as intended.""" bids_root = str(tmp_path) raw = mne.io.read_raw_fif(raw_fname, verbose=False) - raw.info['line_freq'] = 60 + raw.info["line_freq"] = 60 bids_path = _bids_path.copy().update(root=bids_root) write_raw_bids(raw, bids_path, overwrite=True, verbose=False) report = make_report(bids_root) - expected_report = \ - f"""This dataset was created by [Unspecified] and conforms to BIDS version {BIDS_VERSION}. + expected_report = f"""This dataset was created by [Unspecified] and conforms to BIDS version {BIDS_VERSION}. This report was generated with MNE-BIDS (https://doi.org/10.21105/joss.01896). The dataset consists of 1 participants (sex were all unknown; handedness were all unknown; ages all unknown) and 1 recording sessions: 01. Data was recorded @@ -61,30 +57,29 @@ def test_report(tmp_path): = 0.0) recording channels per scan, out of which 374.0 (std = 0.0) were used in analysis (2.0 +/- 0.0 were removed from analysis).""" # noqa - expected_report = '\n'.join(textwrap.wrap(expected_report, width=80)) + expected_report = "\n".join(textwrap.wrap(expected_report, width=80)) assert report == expected_report -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_report_no_participant_information(tmp_path): """Test report with participants.tsv with participant_id column only.""" bids_root = tmp_path raw = mne.io.read_raw_fif(raw_fname, verbose=False) - raw.info['line_freq'] = 60 + raw.info["line_freq"] = 60 bids_path = _bids_path.copy().update(root=bids_root) write_raw_bids(raw, bids_path, overwrite=True, verbose=False) # remove all information and check if report still runs - (bids_root / 'participants.json').unlink() + (bids_root / "participants.json").unlink() # overwrite participant information to see if report still runs - (bids_root / 'participants.tsv').write_text('participant_id\nsub-001') + (bids_root / "participants.tsv").write_text("participant_id\nsub-001") report = make_report(bids_root) - expected_report = \ - f"""This dataset was created by [Unspecified] and conforms to BIDS version {BIDS_VERSION}. + expected_report = f"""This dataset was created by [Unspecified] and conforms to BIDS version {BIDS_VERSION}. This report was generated with MNE-BIDS (https://doi.org/10.21105/joss.01896). The dataset consists of 1 participants (sex were all unknown; handedness were all unknown; ages all unknown) and 1 recording sessions: 01. Data was recorded @@ -96,5 +91,5 @@ def test_report_no_participant_information(tmp_path): = 0.0) recording channels per scan, out of which 374.0 (std = 0.0) were used in analysis (2.0 +/- 0.0 were removed from analysis).""" # noqa - expected_report = '\n'.join(textwrap.wrap(expected_report, width=80)) + expected_report = "\n".join(textwrap.wrap(expected_report, width=80)) assert report == expected_report diff --git a/mne_bids/tests/test_stats.py b/mne_bids/tests/test_stats.py index a89be6cbf..12b58a72a 100644 --- a/mne_bids/tests/test_stats.py +++ b/mne_bids/tests/test_stats.py @@ -20,28 +20,45 @@ data_path = testing.data_path(download=False) -def _make_dataset(root, subjects, tasks=(None,), runs=(None,), - sessions=(None,)): - raw_fname = data_path / 'MEG' / 'sample' / 'sample_audvis_trunc_raw.fif' +def _make_dataset(root, subjects, tasks=(None,), runs=(None,), sessions=(None,)): + raw_fname = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" raw = mne.io.read_raw(raw_fname) - raw.info['line_freq'] = 60. + raw.info["line_freq"] = 60.0 events = mne.find_events(raw) - event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3, - 'visual/right': 4, 'face': 5, 'button': 32} - - for subject, session, task, run in \ - itertools.product(subjects, sessions, tasks, runs): + event_id = { + "auditory/left": 1, + "auditory/right": 2, + "visual/left": 3, + "visual/right": 4, + "face": 5, + "button": 32, + } + + for subject, session, task, run in itertools.product( + subjects, sessions, tasks, runs + ): bids_path = BIDSPath( - subject=subject, session=session, run=run, task=task, root=root, + subject=subject, + session=session, + run=run, + task=task, + root=root, + ) + write_raw_bids( + raw, + bids_path, + events=events, + event_id=event_id, + overwrite=True, + verbose=False, ) - write_raw_bids(raw, bids_path, events=events, event_id=event_id, - overwrite=True, verbose=False) return root, events, event_id -def _check_counts(counts, events, event_id, subjects, - tasks=(None,), runs=(None,), sessions=(None,)): +def _check_counts( + counts, events, event_id, subjects, tasks=(None,), runs=(None,), sessions=(None,) +): if (sessions[0] is None) and (runs[0] is None): assert np.all(counts.index == subjects) else: @@ -65,29 +82,25 @@ def _check_counts(counts, events, event_id, subjects, key += (runs[0],) key = key if len(key) > 1 else key[0] - assert ( - counts.at[key, (tasks[0], k)] == - (events[:, 2] == v).sum() - ) + assert counts.at[key, (tasks[0], k)] == (events[:, 2] == v).sum() @pytest.mark.parametrize( - ('subjects', 'tasks', 'runs', 'sessions'), + ("subjects", "tasks", "runs", "sessions"), [ - (['01'], ['task1'], ['01'], ['01']), - (['01', '02'], ['task1'], ['01'], ['01']), - (['01', '02'], ['task1', 'task2'], ['01'], ['01']), - (['01'], ['task1', 'task2'], [None], ['01']), - (['01'], ['task1', 'task2'], ['01'], [None]), - (['01'], ['task1', 'task2'], [None], [None]), - ] + (["01"], ["task1"], ["01"], ["01"]), + (["01", "02"], ["task1"], ["01"], ["01"]), + (["01", "02"], ["task1", "task2"], ["01"], ["01"]), + (["01"], ["task1", "task2"], [None], ["01"]), + (["01"], ["task1", "task2"], ["01"], [None]), + (["01"], ["task1", "task2"], [None], [None]), + ], ) @requires_pandas @testing.requires_testing_data def test_count_events(tmp_path, subjects, tasks, runs, sessions): """Test the event counts.""" - root, events, event_id = _make_dataset(tmp_path, subjects, tasks, runs, - sessions) + root, events, event_id = _make_dataset(tmp_path, subjects, tasks, runs, sessions) counts = count_events(root) @@ -98,34 +111,37 @@ def test_count_events(tmp_path, subjects, tasks, runs, sessions): @testing.requires_testing_data def test_count_events_bids_path(tmp_path): """Test the event counts passing a BIDSPath.""" - root, events, event_id = \ - _make_dataset(tmp_path, subjects=['01', '02'], tasks=['task1']) + root, events, event_id = _make_dataset( + tmp_path, subjects=["01", "02"], tasks=["task1"] + ) - with pytest.raises(ValueError, match='datatype .*anat.* is not supported'): - bids_path = BIDSPath(root=root, subject='01', datatype='anat') + with pytest.raises(ValueError, match="datatype .*anat.* is not supported"): + bids_path = BIDSPath(root=root, subject="01", datatype="anat") count_events(bids_path) - bids_path = BIDSPath(root=root, subject='01', datatype='meg') + bids_path = BIDSPath(root=root, subject="01", datatype="meg") counts = count_events(bids_path) - _check_counts(counts, events, event_id, subjects=['01'], tasks=['task1']) + _check_counts(counts, events, event_id, subjects=["01"], tasks=["task1"]) @requires_pandas @testing.requires_testing_data def test_count_no_events_file(tmp_path): """Test count_events with no event present.""" - raw_fname = data_path / 'MEG' / 'sample' / 'sample_audvis_trunc_raw.fif' + raw_fname = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" raw = mne.io.read_raw(raw_fname) - raw.info['line_freq'] = 60. + raw.info["line_freq"] = 60.0 root = str(tmp_path) bids_path = BIDSPath( - subject='01', task='task1', root=root, + subject="01", + task="task1", + root=root, ) write_raw_bids(raw, bids_path, overwrite=True, verbose=False) - with pytest.raises(ValueError, match='No events files found.'): + with pytest.raises(ValueError, match="No events files found."): count_events(root) @@ -133,19 +149,26 @@ def test_count_no_events_file(tmp_path): @testing.requires_testing_data def test_count_no_events_column(tmp_path): """Test case where events.tsv doesn't contain [stim,trial]_type column.""" - subject, task, run, session, datatype = '01', 'task1', '01', '01', 'meg' - root, events, event_id = _make_dataset(tmp_path, [subject], [task], [run], - [session]) + subject, task, run, session, datatype = "01", "task1", "01", "01", "meg" + root, events, event_id = _make_dataset( + tmp_path, [subject], [task], [run], [session] + ) # Delete the `stim_type` column. - events_tsv_fpath = BIDSPath(root=root, subject=subject, task=task, run=run, - session=session, datatype=datatype, - suffix='events', extension='.tsv').fpath + events_tsv_fpath = BIDSPath( + root=root, + subject=subject, + task=task, + run=run, + session=session, + datatype=datatype, + suffix="events", + extension=".tsv", + ).fpath events_tsv = _from_tsv(events_tsv_fpath) - events_tsv['stim_type'] = events_tsv['trial_type'] - del events_tsv['trial_type'] + events_tsv["stim_type"] = events_tsv["trial_type"] + del events_tsv["trial_type"] _write_tsv(fname=events_tsv_fpath, dictionary=events_tsv, overwrite=True) counts = count_events(root) - _check_counts(counts, events, event_id, [subject], [task], [run], - [session]) + _check_counts(counts, events, event_id, [subject], [task], [run], [session]) diff --git a/mne_bids/tests/test_tsv_handler.py b/mne_bids/tests/test_tsv_handler.py index ae5f7d8ce..97cb35b7c 100644 --- a/mne_bids/tests/test_tsv_handler.py +++ b/mne_bids/tests/test_tsv_handler.py @@ -8,28 +8,34 @@ import pytest -from mne_bids.tsv_handler import (_from_tsv, _to_tsv, _combine_rows, _drop, - _contains_row, _tsv_to_str) +from mne_bids.tsv_handler import ( + _from_tsv, + _to_tsv, + _combine_rows, + _drop, + _contains_row, + _tsv_to_str, +) def test_tsv_handler(tmp_path): """Test the TSV handling.""" # create some dummy data - d = odict(a=[1, 2, 3, 4], b=['five', 'six', 'seven', 'eight']) - assert _contains_row(d, {'a': 1, 'b': 'five'}) - d2 = odict(a=[5], b=['nine']) + d = odict(a=[1, 2, 3, 4], b=["five", "six", "seven", "eight"]) + assert _contains_row(d, {"a": 1, "b": "five"}) + d2 = odict(a=[5], b=["nine"]) d = _combine_rows(d, d2) - assert 5 in d['a'] + assert 5 in d["a"] d2 = odict(a=[5]) d = _combine_rows(d, d2) - assert 'n/a' in d['b'] - d2 = odict(a=[5], b=['ten']) - d = _combine_rows(d, d2, drop_column='a') + assert "n/a" in d["b"] + d2 = odict(a=[5], b=["ten"]) + d = _combine_rows(d, d2, drop_column="a") # make sure that the repeated data was dropped - assert 'nine' not in d['b'] + assert "nine" not in d["b"] print(_tsv_to_str(d)) - d_path = tmp_path / 'output.tsv' + d_path = tmp_path / "output.tsv" # write the data to an output tsv file _to_tsv(d, d_path) @@ -44,24 +50,24 @@ def test_tsv_handler(tmp_path): d = _from_tsv(d_path, str) # remove any rows with 2 or 5 in them - d = _drop(d, [2, 5], 'a') - assert 2 not in d['a'] + d = _drop(d, [2, 5], "a") + assert 2 not in d["a"] # test combining data with differing numbers of columns - d = odict(a=[1, 2], b=['three', 'four']) - d2 = odict(a=[4], b=['five'], c=[3.1415]) + d = odict(a=[1, 2], b=["three", "four"]) + d2 = odict(a=[4], b=["five"], c=[3.1415]) # raise error if a new column is tried to be added with pytest.raises(KeyError): d = _combine_rows(d, d2) d2 = odict(a=[5]) d = _combine_rows(d, d2) - assert d['b'] == ['three', 'four', 'n/a'] - assert _contains_row(d, {'a': 5}) + assert d["b"] == ["three", "four", "n/a"] + assert _contains_row(d, {"a": 5}) # test reading a single column _to_tsv(odict(a=[1, 2, 3, 4]), d_path) d = _from_tsv(d_path) - assert d['a'] == ['1', '2', '3', '4'] + assert d["a"] == ["1", "2", "3", "4"] # test an empty tsv (just headers) _to_tsv(odict(onset=[], duration=[], trial_type=[]), d_path) @@ -77,7 +83,7 @@ def test_contains_row_different_types(): NumPy, see https://github.com/mne-tools/mne-bids/pull/372 (pytest must be configured to fail on warnings for this to work!) """ - data = odict(age=[20, 30, 40, 'n/a']) # string + data = odict(age=[20, 30, 40, "n/a"]) # string row = dict(age=60) # int _contains_row(data, row) @@ -89,8 +95,8 @@ def test_drop_different_types(): NumPy, see https://github.com/mne-tools/mne-bids/pull/372 (pytest must be configured to fail on warnings for this to work!) """ - column = 'age' - data = odict([(column, [20, 30, 40, 'n/a'])]) # string + column = "age" + data = odict([(column, [20, 30, 40, "n/a"])]) # string values_to_drop = (20,) # int result = _drop(data, values=values_to_drop, column=column) diff --git a/mne_bids/tests/test_update.py b/mne_bids/tests/test_update.py index 90561f1fd..e65655dba 100644 --- a/mne_bids/tests/test_update.py +++ b/mne_bids/tests/test_update.py @@ -13,41 +13,53 @@ from mne.io.constants import FIFF from mne.datasets import testing -from mne_bids import (BIDSPath, write_raw_bids, - write_meg_calibration, write_meg_crosstalk, - get_anat_landmarks, update_sidecar_json, write_anat, - update_anat_landmarks) +from mne_bids import ( + BIDSPath, + write_raw_bids, + write_meg_calibration, + write_meg_crosstalk, + get_anat_landmarks, + update_sidecar_json, + write_anat, + update_anat_landmarks, +) from mne_bids.path import _mkdir_p from mne_bids.utils import _write_json -subject_id = '01' -session_id = '01' -run = '01' +subject_id = "01" +session_id = "01" +run = "01" acq = None -task = 'testing' +task = "testing" data_path = testing.data_path(download=False) bids_path = BIDSPath( - subject=subject_id, session=session_id, run=run, acquisition=acq, - task=task) + subject=subject_id, session=session_id, run=run, acquisition=acq, task=task +) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def _get_bids_test_dir(tmp_path_factory): """Return path to a written test BIDS dir.""" - bids_root = str(tmp_path_factory.mktemp('mnebids_utils_test_bids_ds')) - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') - cal_fname = op.join(data_path, 'SSS', 'sss_cal_mgh.dat') - crosstalk_fname = op.join(data_path, 'SSS', 'ct_sparse.fif') + bids_root = str(tmp_path_factory.mktemp("mnebids_utils_test_bids_ds")) + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) + cal_fname = op.join(data_path, "SSS", "sss_cal_mgh.dat") + crosstalk_fname = op.join(data_path, "SSS", "ct_sparse.fif") raw = mne.io.read_raw_fif(raw_fname) - raw.info['line_freq'] = 60 + raw.info["line_freq"] = 60 # Drop unknown events. events = mne.read_events(events_fname) @@ -55,30 +67,29 @@ def _get_bids_test_dir(tmp_path_factory): bids_path.update(root=bids_root) # Write multiple runs for test_purposes - for run_idx in [run, '02']: + for run_idx in [run, "02"]: name = bids_path.copy().update(run=run_idx) - write_raw_bids(raw, name, events=events, - event_id=event_id, overwrite=True) + write_raw_bids(raw, name, events=events, event_id=event_id, overwrite=True) write_meg_calibration(cal_fname, bids_path=bids_path) write_meg_crosstalk(crosstalk_fname, bids_path=bids_path) return bids_root -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def _get_sidecar_json_update_file(_get_bids_test_dir): """Return path to a sidecar JSON updating file.""" bids_root = _get_bids_test_dir - sample_scripts = op.join(bids_root, 'sourcedata') - sidecar_fpath = op.join(sample_scripts, 'sidecarjson_update.json') + sample_scripts = op.join(bids_root, "sourcedata") + sidecar_fpath = op.join(sample_scripts, "sidecarjson_update.json") _mkdir_p(sample_scripts) update_json = { - 'InstitutionName': 'mne-bids', - 'InstitutionAddress': 'Internet', - 'MEGChannelCount': 300, - 'MEGREFChannelCount': 6, - 'SEEGChannelCount': 0, + "InstitutionName": "mne-bids", + "InstitutionAddress": "Internet", + "MEGChannelCount": 300, + "MEGREFChannelCount": 6, + "SEEGChannelCount": 0, } _write_json(sidecar_fpath, update_json, overwrite=True) @@ -86,28 +97,37 @@ def _get_sidecar_json_update_file(_get_bids_test_dir): @testing.requires_testing_data -def test_update_sidecar_jsons(_get_bids_test_dir, _bids_validate, - _get_sidecar_json_update_file): +def test_update_sidecar_jsons( + _get_bids_test_dir, _bids_validate, _get_sidecar_json_update_file +): """Test updating sidecar JSON files.""" bids_path = BIDSPath( - subject=subject_id, session=session_id, run=run, acquisition=acq, - task=task, suffix='meg', root=_get_bids_test_dir) + subject=subject_id, + session=session_id, + run=run, + acquisition=acq, + task=task, + suffix="meg", + root=_get_bids_test_dir, + ) # expected key, original value, and expected value after update # Fields that are not `None` already are expected to exist # in this sidecar file. Fields that are `None` will get # written with the sidecar json value when update is called. - expected_checks = [('InstitutionName', None, 'mne-bids'), - ('InstitutionAddress', None, 'Internet'), - ('MEGChannelCount', 306, 300), - ('MEGREFChannelCount', 0, 6), - ('ECGChannelCount', 0, 0), - ('SEEGChannelCount', None, 0)] + expected_checks = [ + ("InstitutionName", None, "mne-bids"), + ("InstitutionAddress", None, "Internet"), + ("MEGChannelCount", 306, 300), + ("MEGREFChannelCount", 0, 6), + ("ECGChannelCount", 0, 0), + ("SEEGChannelCount", None, 0), + ] # get the sidecar json - sidecar_path = bids_path.copy().update(extension='.json', datatype='meg') + sidecar_path = bids_path.copy().update(extension=".json", datatype="meg") sidecar_fpath = sidecar_path.fpath - with open(sidecar_fpath, 'r', encoding='utf-8') as fin: + with open(sidecar_fpath, "r", encoding="utf-8") as fin: sidecar_json = json.load(fin) for key, val, _ in expected_checks: assert sidecar_json.get(key) == val @@ -115,7 +135,7 @@ def test_update_sidecar_jsons(_get_bids_test_dir, _bids_validate, # update sidecars update_sidecar_json(sidecar_path, _get_sidecar_json_update_file) - with open(sidecar_fpath, 'r', encoding='utf-8') as fin: + with open(sidecar_fpath, "r", encoding="utf-8") as fin: sidecar_json = json.load(fin) for key, _, val in expected_checks: assert sidecar_json.get(key) == val @@ -124,94 +144,100 @@ def test_update_sidecar_jsons(_get_bids_test_dir, _bids_validate, # should result in error if you don't explicitly say # its a json file with pytest.raises(RuntimeError, match='Only works for ".json"'): - update_sidecar_json(sidecar_path.copy().update( - extension=None), _get_sidecar_json_update_file) + update_sidecar_json( + sidecar_path.copy().update(extension=None), _get_sidecar_json_update_file + ) # error should raise if the file path doesn't exist - error_bids_path = sidecar_path.copy().update(subject='02') - with pytest.raises(RuntimeError, match='Sidecar file ' - 'does not exist.'): - update_sidecar_json( - error_bids_path, _get_sidecar_json_update_file) + error_bids_path = sidecar_path.copy().update(subject="02") + with pytest.raises(RuntimeError, match="Sidecar file " "does not exist."): + update_sidecar_json(error_bids_path, _get_sidecar_json_update_file) @testing.requires_testing_data def test_update_anat_landmarks(tmp_path): """Test updating the anatomical landmarks of an MRI scan.""" - pytest.importorskip('nibabel') - raw_path = data_path / 'MEG' / 'sample' / 'sample_audvis_trunc_raw.fif' - trans_path = Path(str(raw_path).replace('_raw.fif', '-trans.fif')) - t1_path = data_path / 'subjects' / 'sample' / 'mri' / 'T1.mgz' - fs_subject = 'sample' - fs_subjects_dir = data_path / 'subjects' + pytest.importorskip("nibabel") + raw_path = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" + trans_path = Path(str(raw_path).replace("_raw.fif", "-trans.fif")) + t1_path = data_path / "subjects" / "sample" / "mri" / "T1.mgz" + fs_subject = "sample" + fs_subjects_dir = data_path / "subjects" bids_root = tmp_path - bids_path_mri = BIDSPath(subject=subject_id, session=session_id, - acquisition=acq, root=bids_root, datatype='anat', - suffix='T1w') + bids_path_mri = BIDSPath( + subject=subject_id, + session=session_id, + acquisition=acq, + root=bids_root, + datatype="anat", + suffix="T1w", + ) # First, write the MRI scan to BIDS, including the anatomical landmarks info = mne.io.read_info(raw_path) trans = mne.read_trans(trans_path) landmarks = get_anat_landmarks( - image=t1_path, info=info, trans=trans, fs_subject=fs_subject, - fs_subjects_dir=fs_subjects_dir + image=t1_path, + info=info, + trans=trans, + fs_subject=fs_subject, + fs_subjects_dir=fs_subjects_dir, + ) + bids_path_mri = write_anat( + image=t1_path, bids_path=bids_path_mri, landmarks=landmarks, deface=False ) - bids_path_mri = write_anat(image=t1_path, bids_path=bids_path_mri, - landmarks=landmarks, deface=False) - bids_path_mri_json = bids_path_mri.copy().update(extension='.json') + bids_path_mri_json = bids_path_mri.copy().update(extension=".json") # Modify the landmarks # Move the nasion a bit landmarks_new = landmarks.copy() - landmarks_new.dig[1]['r'] *= 0.9 + landmarks_new.dig[1]["r"] *= 0.9 update_anat_landmarks(bids_path=bids_path_mri, landmarks=landmarks_new) - with bids_path_mri_json.fpath.open(encoding='utf-8') as f: + with bids_path_mri_json.fpath.open(encoding="utf-8") as f: mri_json = json.load(f) assert np.allclose( - landmarks_new.dig[1]['r'], - mri_json['AnatomicalLandmarkCoordinates']['NAS'] + landmarks_new.dig[1]["r"], mri_json["AnatomicalLandmarkCoordinates"]["NAS"] ) # Remove JSON sidecar; updating the anatomical landmarks should re-create # the file unless `on_missing` is `'raise'` bids_path_mri_json.fpath.unlink() with pytest.raises( - KeyError, - match='No AnatomicalLandmarkCoordinates section found' + KeyError, match="No AnatomicalLandmarkCoordinates section found" ): update_anat_landmarks(bids_path=bids_path_mri, landmarks=landmarks_new) update_anat_landmarks( - bids_path=bids_path_mri, landmarks=landmarks_new, on_missing='ignore' + bids_path=bids_path_mri, landmarks=landmarks_new, on_missing="ignore" ) - with pytest.raises(KeyError, match='landmark not found'): + with pytest.raises(KeyError, match="landmark not found"): update_anat_landmarks( - bids_path=bids_path_mri, landmarks=landmarks_new, kind='ses-1' + bids_path=bids_path_mri, landmarks=landmarks_new, kind="ses-1" ) update_anat_landmarks( - bids_path=bids_path_mri, landmarks=landmarks_new, kind='ses-1', - on_missing='ignore' + bids_path=bids_path_mri, + landmarks=landmarks_new, + kind="ses-1", + on_missing="ignore", ) - mri_json = json.loads(bids_path_mri_json.fpath.read_text(encoding='utf-8')) - assert 'NAS' in mri_json['AnatomicalLandmarkCoordinates'] - assert 'NAS_ses-1' in mri_json['AnatomicalLandmarkCoordinates'] + mri_json = json.loads(bids_path_mri_json.fpath.read_text(encoding="utf-8")) + assert "NAS" in mri_json["AnatomicalLandmarkCoordinates"] + assert "NAS_ses-1" in mri_json["AnatomicalLandmarkCoordinates"] assert np.allclose( - landmarks_new.dig[1]['r'], - mri_json['AnatomicalLandmarkCoordinates']['NAS'] + landmarks_new.dig[1]["r"], mri_json["AnatomicalLandmarkCoordinates"]["NAS"] ) # Check without extension provided bids_path_mri_no_ext = bids_path_mri.copy().update(extension=None) - update_anat_landmarks(bids_path=bids_path_mri_no_ext, - landmarks=landmarks_new) + update_anat_landmarks(bids_path=bids_path_mri_no_ext, landmarks=landmarks_new) # Check handling of invalid input - bids_path_invalid = bids_path_mri.copy().update(datatype='meg') + bids_path_invalid = bids_path_mri.copy().update(datatype="meg") with pytest.raises(ValueError, match='Can only operate on "anat"'): update_anat_landmarks(bids_path=bids_path_invalid, landmarks=landmarks) @@ -219,55 +245,50 @@ def test_update_anat_landmarks(tmp_path): with pytest.raises(ValueError, match='lease specify the "suffix"'): update_anat_landmarks(bids_path=bids_path_invalid, landmarks=landmarks) - bids_path_invalid = bids_path_mri.copy().update(suffix='meg') - with pytest.raises(ValueError, - match='Can only operate on "T1w" and "FLASH"'): + bids_path_invalid = bids_path_mri.copy().update(suffix="meg") + with pytest.raises(ValueError, match='Can only operate on "T1w" and "FLASH"'): update_anat_landmarks(bids_path=bids_path_invalid, landmarks=landmarks) - bids_path_invalid = bids_path_mri.copy().update(subject='invalid') - with pytest.raises(ValueError, match='Could not find an MRI scan'): + bids_path_invalid = bids_path_mri.copy().update(subject="invalid") + with pytest.raises(ValueError, match="Could not find an MRI scan"): update_anat_landmarks(bids_path=bids_path_invalid, landmarks=landmarks) # Unsupported coordinate frame landmarks_invalid = landmarks.copy() for digpoint in landmarks_invalid.dig: - digpoint['coord_frame'] = FIFF.FIFFV_MNE_COORD_RAS + digpoint["coord_frame"] = FIFF.FIFFV_MNE_COORD_RAS - with pytest.raises(ValueError, match='must be specified in MRI voxel'): - update_anat_landmarks(bids_path=bids_path_mri, - landmarks=landmarks_invalid) + with pytest.raises(ValueError, match="must be specified in MRI voxel"): + update_anat_landmarks(bids_path=bids_path_mri, landmarks=landmarks_invalid) # Missing cardinal point landmarks_invalid = landmarks.copy() del landmarks_invalid.dig[0] - with pytest.raises(ValueError, - match='did not contain all required cardinal points'): - update_anat_landmarks(bids_path=bids_path_mri, - landmarks=landmarks_invalid) + with pytest.raises( + ValueError, match="did not contain all required cardinal points" + ): + update_anat_landmarks(bids_path=bids_path_mri, landmarks=landmarks_invalid) # Test with path-like landmarks - fiducials_path = (data_path / 'subjects' / 'sample' / 'bem' / - 'sample-fiducials.fif') + fiducials_path = data_path / "subjects" / "sample" / "bem" / "sample-fiducials.fif" update_anat_landmarks( bids_path=bids_path_mri, landmarks=fiducials_path, - fs_subject='sample', - fs_subjects_dir=data_path / 'subjects' + fs_subject="sample", + fs_subjects_dir=data_path / "subjects", ) expected_coords_in_voxels = np.array( - [[68.38202, 45.24057, 43.439808], # noqa: E241 - [42.27006, 30.758774, 74.09837 ], # noqa: E202, E241 - [17.044853, 46.586075, 42.618504]] - ) - mri_json = json.loads( - bids_path_mri_json.fpath.read_text(encoding='utf-8') + [ + [68.38202, 45.24057, 43.439808], # noqa: E241 + [42.27006, 30.758774, 74.09837], # noqa: E202, E241 + [17.044853, 46.586075, 42.618504], + ] ) + mri_json = json.loads(bids_path_mri_json.fpath.read_text(encoding="utf-8")) for landmark, expected_coords in zip( - ('LPA', 'NAS', 'RPA'), - expected_coords_in_voxels + ("LPA", "NAS", "RPA"), expected_coords_in_voxels ): assert np.allclose( - mri_json['AnatomicalLandmarkCoordinates'][landmark], - expected_coords + mri_json["AnatomicalLandmarkCoordinates"][landmark], expected_coords ) diff --git a/mne_bids/tests/test_utils.py b/mne_bids/tests/test_utils.py index 97cab93d6..6a03370b2 100644 --- a/mne_bids/tests/test_utils.py +++ b/mne_bids/tests/test_utils.py @@ -14,27 +14,32 @@ import mne from mne_bids import BIDSPath -from mne_bids.utils import (_check_types, _age_on_date, _handle_datatype, - _infer_eeg_placement_scheme, _get_ch_type_mapping, - _check_datatype) +from mne_bids.utils import ( + _check_types, + _age_on_date, + _handle_datatype, + _infer_eeg_placement_scheme, + _get_ch_type_mapping, + _check_datatype, +) from mne_bids.path import _path_to_str -base_path = op.join(op.dirname(mne.__file__), 'io') -subject_id = '01' -session_id = '01' -run = '01' +base_path = op.join(op.dirname(mne.__file__), "io") +subject_id = "01" +session_id = "01" +run = "01" acq = None -task = 'testing' +task = "testing" bids_path = BIDSPath( - subject=subject_id, session=session_id, run=run, acquisition=acq, - task=task) + subject=subject_id, session=session_id, run=run, acquisition=acq, task=task +) def test_get_ch_type_mapping(): """Test getting a correct channel mapping.""" with pytest.raises(ValueError, match='specified from "bogus" to "mne"'): - _get_ch_type_mapping(fro='bogus', to='mne') + _get_ch_type_mapping(fro="bogus", to="mne") def test_handle_datatype(): @@ -44,57 +49,54 @@ def test_handle_datatype(): sampling_rate = 100 data = random((n_channels, sampling_rate)) # datatype is given, check once for each datatype - channel_types = ['grad', 'eeg', 'ecog', 'seeg', 'dbs'] - datatypes = ['meg', 'eeg', 'ieeg', 'ieeg', 'ieeg'] + channel_types = ["grad", "eeg", "ecog", "seeg", "dbs"] + datatypes = ["meg", "eeg", "ieeg", "ieeg", "ieeg"] for ch_type, datatype in zip(channel_types, datatypes): - info = mne.create_info(n_channels, sampling_rate, - ch_types=[ch_type] * 2) + info = mne.create_info(n_channels, sampling_rate, ch_types=[ch_type] * 2) raw = mne.io.RawArray(data, info) assert _handle_datatype(raw, datatype) == datatype # datatype is not given, will be inferred if possible datatype = None # check if datatype is correctly inferred (combined EEG and iEEG/MEG data) - channel_types = [['grad', 'eeg'], ['eeg', 'mag'], ['eeg', 'seeg'], - ['ecog', 'eeg']] - expected_modalities = ['meg', 'meg', 'ieeg', 'ieeg'] + channel_types = [["grad", "eeg"], ["eeg", "mag"], ["eeg", "seeg"], ["ecog", "eeg"]] + expected_modalities = ["meg", "meg", "ieeg", "ieeg"] for ch_type, expected_mod in zip(channel_types, expected_modalities): info = mne.create_info(n_channels, sampling_rate, ch_types=ch_type) raw = mne.io.RawArray(random((2, sampling_rate)), info) assert _handle_datatype(raw, datatype) == expected_mod # set type to MEG if type is EEG/iEEG but there are MEG channels as well - channel_types = [['grad', 'eeg'], ['grad', 'seeg']] - datatypes = ['eeg', 'ieeg'] + channel_types = [["grad", "eeg"], ["grad", "seeg"]] + datatypes = ["eeg", "ieeg"] for ch_type, datatype in zip(channel_types, datatypes): info = mne.create_info(n_channels, sampling_rate, ch_types=ch_type) raw = mne.io.RawArray(random((2, sampling_rate)), info) - assert _handle_datatype(raw, datatype) == 'meg' + assert _handle_datatype(raw, datatype) == "meg" # if the situation is ambiguous (iEEG and MEG), raise ValueError datatype = None - channel_types = [['grad', 'ecog'], ['grad', 'seeg']] + channel_types = [["grad", "ecog"], ["grad", "seeg"]] for ch_type in channel_types: - with pytest.raises(ValueError, match='Multiple data types'): + with pytest.raises(ValueError, match="Multiple data types"): info = mne.create_info(n_channels, sampling_rate, ch_types=ch_type) raw = mne.io.RawArray(random((2, sampling_rate)), info) _handle_datatype(raw, datatype) # if proper channel type (iEEG, EEG or MEG) is not found, raise ValueError - ch_type = ['misc'] - with pytest.raises(ValueError, match='No MEG, EEG or iEEG channels found'): - info = mne.create_info(n_channels, sampling_rate, - ch_types=ch_type * 2) + ch_type = ["misc"] + with pytest.raises(ValueError, match="No MEG, EEG or iEEG channels found"): + info = mne.create_info(n_channels, sampling_rate, ch_types=ch_type * 2) raw = mne.io.RawArray(data, info) _handle_datatype(raw, datatype) def test_check_types(): """Test the check whether vars are str or None.""" - assert _check_types(['foo', 'bar', None]) is None + assert _check_types(["foo", "bar", None]) is None with pytest.raises(ValueError): - _check_types([None, 1, 3.14, 'meg', [1, 2]]) + _check_types([None, 1, 3.14, "meg", [1, 2]]) def test_path_to_str(): """Test that _path_to_str returns a string.""" - path_str = 'foo' + path_str = "foo" assert _path_to_str(path_str) == path_str assert _path_to_str(Path(path_str)) == path_str @@ -119,50 +121,56 @@ def test_age_on_date(): def test_infer_eeg_placement_scheme(): """Test inferring a correct EEG placement scheme.""" # no eeg channels case (e.g., MEG data) - data_path = op.join(base_path, 'bti', 'tests', 'data') - raw_fname = op.join(data_path, 'test_pdf_linux') - config_fname = op.join(data_path, 'test_config_linux') - headshape_fname = op.join(data_path, 'test_hs_linux') + data_path = op.join(base_path, "bti", "tests", "data") + raw_fname = op.join(data_path, "test_pdf_linux") + config_fname = op.join(data_path, "test_config_linux") + headshape_fname = op.join(data_path, "test_hs_linux") raw = mne.io.read_raw_bti(raw_fname, config_fname, headshape_fname) placement_scheme = _infer_eeg_placement_scheme(raw) - assert placement_scheme == 'n/a' + assert placement_scheme == "n/a" # 1020 case - data_path = op.join(base_path, 'brainvision', 'tests', 'data') - raw_fname = op.join(data_path, 'test.vhdr') + data_path = op.join(base_path, "brainvision", "tests", "data") + raw_fname = op.join(data_path, "test.vhdr") raw = mne.io.read_raw_brainvision(raw_fname) placement_scheme = _infer_eeg_placement_scheme(raw) - assert placement_scheme == 'based on the extended 10/20 system' + assert placement_scheme == "based on the extended 10/20 system" # Unknown case, use raw from 1020 case but rename a channel - raw.rename_channels({'P3': 'foo'}) + raw.rename_channels({"P3": "foo"}) placement_scheme = _infer_eeg_placement_scheme(raw) - assert placement_scheme == 'n/a' + assert placement_scheme == "n/a" def test_check_datatype(): """Test checking if datatype exists in raw data.""" - sfreq, n_points = 1024., int(1e6) + sfreq, n_points = 1024.0, int(1e6) rng = RandomState(99) - info_eeg = mne.create_info(['ch1', 'ch2', 'ch3'], sfreq, ['eeg'] * 3) + info_eeg = mne.create_info(["ch1", "ch2", "ch3"], sfreq, ["eeg"] * 3) raw_eeg = mne.io.RawArray(rng.random((3, n_points)) * 1e-6, info_eeg) - info_meg = mne.create_info(['ch1', 'ch2', 'ch3'], sfreq, ['mag'] * 3) + info_meg = mne.create_info(["ch1", "ch2", "ch3"], sfreq, ["mag"] * 3) raw_meg = mne.io.RawArray(rng.random((3, n_points)) * 1e-6, info_meg) - info_ieeg = mne.create_info(['ch1', 'ch2', 'ch3'], sfreq, ['seeg'] * 3) + info_ieeg = mne.create_info(["ch1", "ch2", "ch3"], sfreq, ["seeg"] * 3) raw_ieeg = mne.io.RawArray(rng.random((3, n_points)) * 1e-6, info_ieeg) # check behavior for unsupported data types - for datatype in (None, 'anat'): - with pytest.raises(ValueError, match=f'The specified datatype ' - f'{datatype} is currently not'): + for datatype in (None, "anat"): + with pytest.raises( + ValueError, match=f"The specified datatype " f"{datatype} is currently not" + ): _check_datatype(raw_eeg, datatype) # check behavior for matching data type - for raw, datatype in [(raw_eeg, 'eeg'), (raw_meg, 'meg'), - (raw_ieeg, 'ieeg')]: + for raw, datatype in [(raw_eeg, "eeg"), (raw_meg, "meg"), (raw_ieeg, "ieeg")]: _check_datatype(raw, datatype) # check for missing data type - for raw, datatype in [(raw_ieeg, 'eeg'), (raw_meg, 'eeg'), - (raw_ieeg, 'meg'), (raw_eeg, 'meg'), - (raw_meg, 'ieeg'), (raw_eeg, 'ieeg')]: - with pytest.raises(ValueError, match=f'The specified datatype ' - f'{datatype} was not found'): + for raw, datatype in [ + (raw_ieeg, "eeg"), + (raw_meg, "eeg"), + (raw_ieeg, "meg"), + (raw_eeg, "meg"), + (raw_meg, "ieeg"), + (raw_eeg, "ieeg"), + ]: + with pytest.raises( + ValueError, match=f"The specified datatype " f"{datatype} was not found" + ): _check_datatype(raw, datatype) diff --git a/mne_bids/tests/test_write.py b/mne_bids/tests/test_write.py index c7456c06b..fa4cadc04 100644 --- a/mne_bids/tests/test_write.py +++ b/mne_bids/tests/test_write.py @@ -25,8 +25,7 @@ import pytest import numpy as np -from numpy.testing import (assert_allclose, assert_array_equal, - assert_array_almost_equal) +from numpy.testing import assert_allclose, assert_array_equal, assert_array_almost_equal import mne from mne.datasets import testing @@ -35,67 +34,79 @@ from mne.io.constants import FIFF from mne.io.kit.kit import get_kit_info -from mne_bids import (write_raw_bids, read_raw_bids, BIDSPath, - write_anat, make_dataset_description, - mark_channels, write_meg_calibration, - write_meg_crosstalk, get_entities_from_fname, - get_anat_landmarks, write, anonymize_dataset, - get_entity_vals) +from mne_bids import ( + write_raw_bids, + read_raw_bids, + BIDSPath, + write_anat, + make_dataset_description, + mark_channels, + write_meg_calibration, + write_meg_crosstalk, + get_entities_from_fname, + get_anat_landmarks, + write, + anonymize_dataset, + get_entity_vals, +) from mne_bids.write import _get_fid_coords -from mne_bids.utils import (_stamp_to_dt, _get_anonymization_daysback, - get_anonymization_daysback, _write_json) +from mne_bids.utils import ( + _stamp_to_dt, + _get_anonymization_daysback, + get_anonymization_daysback, + _write_json, +) from mne_bids.tsv_handler import _from_tsv, _to_tsv from mne_bids.sidecar_updates import _update_sidecar, update_sidecar_json from mne_bids.path import _find_matching_sidecar, _parse_ext from mne_bids.pick import coil_type -from mne_bids.config import (REFERENCES, BIDS_COORD_FRAME_DESCRIPTIONS, - PYBV_VERSION) - -base_path = op.join(op.dirname(mne.__file__), 'io') -subject_id = '01' -subject_id2 = '02' -session_id = '01' -run = '01' -acq = '01' -run2 = '02' -task = 'testing' +from mne_bids.config import REFERENCES, BIDS_COORD_FRAME_DESCRIPTIONS, PYBV_VERSION + +base_path = op.join(op.dirname(mne.__file__), "io") +subject_id = "01" +subject_id2 = "02" +session_id = "01" +run = "01" +acq = "01" +run2 = "02" +task = "testing" _bids_path = BIDSPath( - subject=subject_id, session=session_id, run=run, acquisition=acq, - task=task) + subject=subject_id, session=session_id, run=run, acquisition=acq, task=task +) _bids_path_minimal = BIDSPath(subject=subject_id, task=task) warning_str = dict( - channel_unit_changed='ignore:The unit for chann*.:RuntimeWarning:mne', - meas_date_set_to_none="ignore:.*'meas_date' set to None:RuntimeWarning:" - "mne", - nasion_not_found='ignore:.*nasion not found:RuntimeWarning:mne', - unraisable_exception='ignore:.*Exception ignored.*:' - 'pytest.PytestUnraisableExceptionWarning', - encountered_data_in='ignore:Encountered data in*.:RuntimeWarning:mne', - edf_warning=r'ignore:^EDF\/EDF\+\/BDF files contain two fields .*' - r':RuntimeWarning:mne', - maxshield='ignore:.*Internal Active Shielding:RuntimeWarning:mne', - edfblocks='ignore:.*EDF format requires equal-length data ' - 'blocks:RuntimeWarning:mne', - brainvision_unit='ignore:Encountered unsupported ' - 'non-voltage units*.:UserWarning', - cnt_warning1='ignore:.*Could not parse meas date from the header. ' - 'Setting to None.', - cnt_warning2='ignore:.*Could not define the number of bytes automatically.' - ' Defaulting to 2.', - cnt_warning3='ignore:.*Coordinate frame could not be inferred.*', - no_hand='ignore:.*Not setting subject handedness.:RuntimeWarning:mne', - no_montage=r'ignore:Not setting position of.*channel found in ' - r'montage.*:RuntimeWarning:mne', + channel_unit_changed="ignore:The unit for chann*.:RuntimeWarning:mne", + meas_date_set_to_none="ignore:.*'meas_date' set to None:RuntimeWarning:" "mne", + nasion_not_found="ignore:.*nasion not found:RuntimeWarning:mne", + unraisable_exception="ignore:.*Exception ignored.*:" + "pytest.PytestUnraisableExceptionWarning", + encountered_data_in="ignore:Encountered data in*.:RuntimeWarning:mne", + edf_warning=r"ignore:^EDF\/EDF\+\/BDF files contain two fields .*" + r":RuntimeWarning:mne", + maxshield="ignore:.*Internal Active Shielding:RuntimeWarning:mne", + edfblocks="ignore:.*EDF format requires equal-length data " + "blocks:RuntimeWarning:mne", + brainvision_unit="ignore:Encountered unsupported " + "non-voltage units*.:UserWarning", + cnt_warning1="ignore:.*Could not parse meas date from the header. " + "Setting to None.", + cnt_warning2="ignore:.*Could not define the number of bytes automatically." + " Defaulting to 2.", + cnt_warning3="ignore:.*Coordinate frame could not be inferred.*", + no_hand="ignore:.*Not setting subject handedness.:RuntimeWarning:mne", + no_montage=r"ignore:Not setting position of.*channel found in " + r"montage.*:RuntimeWarning:mne", ) def _wrap_read_raw(read_raw): def fn(fname, *args, **kwargs): raw = read_raw(fname, *args, **kwargs) - raw.info['line_freq'] = 60 + raw.info["line_freq"] = 60 return raw + return fn @@ -116,32 +127,51 @@ def fn(fname, *args, **kwargs): # parametrized directory, filename and reader for EEG/iEEG data formats test_eegieeg_data = [ - ('EDF', 'test_reduced.edf', _read_raw_edf), - ('Persyst', 'sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay', _read_raw_persyst), # noqa - ('NihonKohden', 'MB0400FU.EEG', _read_raw_nihon), - ('CNT', 'scan41_short.cnt', _read_raw_cnt), - ('EGI', 'test_egi.mff', _read_raw_egi), - ('curry', 'test_bdf_stim_channel Curry 8.cdt', _read_raw_curry), + ("EDF", "test_reduced.edf", _read_raw_edf), + ( + "Persyst", + "sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay", + _read_raw_persyst, + ), # noqa + ("NihonKohden", "MB0400FU.EEG", _read_raw_nihon), + ("CNT", "scan41_short.cnt", _read_raw_cnt), + ("EGI", "test_egi.mff", _read_raw_egi), + ("curry", "test_bdf_stim_channel Curry 8.cdt", _read_raw_curry), ] test_convert_data = test_eegieeg_data.copy() -test_convert_data.append(('CTF', 'testdata_ctf.ds', _read_raw_ctf)) +test_convert_data.append(("CTF", "testdata_ctf.ds", _read_raw_ctf)) # parametrization for testing conversion of file formats for MEG test_convertmeg_data = [ - ('CTF', 'FIF', 'testdata_ctf.ds', _read_raw_ctf), - ('CTF', 'auto', 'testdata_ctf.ds', _read_raw_ctf), + ("CTF", "FIF", "testdata_ctf.ds", _read_raw_ctf), + ("CTF", "auto", "testdata_ctf.ds", _read_raw_ctf), ] # parametrization for testing converting file formats for EEG/iEEG test_converteeg_data = [ - ('Persyst', 'BrainVision', 'sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay', _read_raw_persyst), # noqa - ('NihonKohden', 'BrainVision', 'MB0400FU.EEG', _read_raw_nihon), - ('CNT', 'BrainVision', 'scan41_short.cnt', _read_raw_cnt), - ('curry', 'BrainVision', 'test_bdf_stim_channel Curry 8.cdt', _read_raw_curry), # noqa - ('Persyst', 'EDF', 'sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay', _read_raw_persyst), # noqa - ('NihonKohden', 'EDF', 'MB0400FU.EEG', _read_raw_nihon), - ('CNT', 'EDF', 'scan41_short.cnt', _read_raw_cnt), - ('curry', 'EDF', 'test_bdf_stim_channel Curry 8.cdt', _read_raw_curry) + ( + "Persyst", + "BrainVision", + "sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay", + _read_raw_persyst, + ), # noqa + ("NihonKohden", "BrainVision", "MB0400FU.EEG", _read_raw_nihon), + ("CNT", "BrainVision", "scan41_short.cnt", _read_raw_cnt), + ( + "curry", + "BrainVision", + "test_bdf_stim_channel Curry 8.cdt", + _read_raw_curry, + ), # noqa + ( + "Persyst", + "EDF", + "sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay", + _read_raw_persyst, + ), # noqa + ("NihonKohden", "EDF", "MB0400FU.EEG", _read_raw_nihon), + ("CNT", "EDF", "scan41_short.cnt", _read_raw_cnt), + ("curry", "EDF", "test_bdf_stim_channel Curry 8.cdt", _read_raw_curry), ] data_path = testing.data_path(download=False) @@ -150,21 +180,31 @@ def fn(fname, *args, **kwargs): def _test_anonymize(root, raw, bids_path, events_fname=None, event_id=None): """Write data to `root` for testing anonymization.""" bids_path = _bids_path.copy().update(root=root) - if raw.info['meas_date'] is not None: + if raw.info["meas_date"] is not None: daysback, _ = get_anonymization_daysback(raw) else: # just pass back any arbitrary number if no measurement date daysback = 3300 - write_raw_bids(raw, bids_path, events=events_fname, - event_id=event_id, anonymize=dict(daysback=daysback), - overwrite=False) + write_raw_bids( + raw, + bids_path, + events=events_fname, + event_id=event_id, + anonymize=dict(daysback=daysback), + overwrite=False, + ) scans_tsv = BIDSPath( - subject=subject_id, session=session_id, - suffix='scans', extension='.tsv', root=root) + subject=subject_id, + session=session_id, + suffix="scans", + extension=".tsv", + root=root, + ) data = _from_tsv(scans_tsv) - if data['acq_time'] is not None and data['acq_time'][0] != 'n/a': - assert datetime.strptime(data['acq_time'][0], - '%Y-%m-%dT%H:%M:%S.%fZ').year < 1925 + if data["acq_time"] is not None and data["acq_time"][0] != "n/a": + assert ( + datetime.strptime(data["acq_time"][0], "%Y-%m-%dT%H:%M:%S.%fZ").year < 1925 + ) return root @@ -177,622 +217,675 @@ def test_write_participants(_bids_validate, tmp_path): files are kept, and mne-bids correctly writes all the subject info it can using ``raw.info['subject_info']``. """ - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = _read_raw_fif(raw_fname) # add fake participants data - raw.set_meas_date(datetime(year=1994, month=1, day=26, - tzinfo=timezone.utc)) - raw.info['subject_info'] = { - 'his_id': subject_id2, - 'birthday': (1993, 1, 26), - 'sex': 1, - 'hand': 2 + raw.set_meas_date(datetime(year=1994, month=1, day=26, tzinfo=timezone.utc)) + raw.info["subject_info"] = { + "his_id": subject_id2, + "birthday": (1993, 1, 26), + "sex": 1, + "hand": 2, } bids_path = _bids_path.copy().update(root=tmp_path) write_raw_bids(raw, bids_path) # assert age of participant is correct - participants_tsv = tmp_path / 'participants.tsv' + participants_tsv = tmp_path / "participants.tsv" data = _from_tsv(participants_tsv) - assert data['age'][data['participant_id'].index('sub-01')] == '1' + assert data["age"][data["participant_id"].index("sub-01")] == "1" # Removing some columns from participants.tsv should not prevent us from # writing additional participants later on. Before running this test, # ensure we have at least 2 participants in the dataset already – this is # a regression test for GH-1104. - bids_path.update(subject='02') + bids_path.update(subject="02") write_raw_bids(raw, bids_path, verbose=False) data = _from_tsv(participants_tsv) - data.pop('hand') + data.pop("hand") _to_tsv(data, participants_tsv) # write in now another subject - bids_path.update(subject='03') + bids_path.update(subject="03") write_raw_bids(raw, bids_path, verbose=False) data = _from_tsv(participants_tsv) # hand should have been written properly with now 'n/a' for sub-01 and # sub-03, but 'L' for sub-03 - assert data['hand'][data['participant_id'].index('sub-01')] == 'n/a' - assert data['hand'][data['participant_id'].index('sub-02')] == 'n/a' - assert data['hand'][data['participant_id'].index('sub-03')] == 'L' + assert data["hand"][data["participant_id"].index("sub-01")] == "n/a" + assert data["hand"][data["participant_id"].index("sub-02")] == "n/a" + assert data["hand"][data["participant_id"].index("sub-03")] == "L" # check to make sure participant data is overwritten, but keeps the fields # if there are extra fields that were user defined data = _from_tsv(participants_tsv) - participant_idx = data['participant_id'].index(f'sub-{subject_id}') + participant_idx = data["participant_id"].index(f"sub-{subject_id}") # create a new test column in participants file tsv - data['subject_test_col1'] = ['n/a'] * len(data['participant_id']) - data['subject_test_col1'][participant_idx] = 'S' - data['test_col2'] = ['n/a'] * len(data['participant_id']) + data["subject_test_col1"] = ["n/a"] * len(data["participant_id"]) + data["subject_test_col1"][participant_idx] = "S" + data["test_col2"] = ["n/a"] * len(data["participant_id"]) orig_key_order = list(data.keys()) _to_tsv(data, participants_tsv) # create corresponding json entry - participants_json_fpath = tmp_path / 'participants.json' + participants_json_fpath = tmp_path / "participants.json" json_field = { - 'Description': 'trial-outcome', - 'Levels': { - 'S': 'success', - 'F': 'failure' - } + "Description": "trial-outcome", + "Levels": {"S": "success", "F": "failure"}, } - _update_sidecar(participants_json_fpath, 'subject_test_col1', json_field) + _update_sidecar(participants_json_fpath, "subject_test_col1", json_field) # bids root should still be valid because json reflects changes in tsv _bids_validate(tmp_path) write_raw_bids(raw, bids_path, overwrite=True) data = _from_tsv(participants_tsv) - with open(participants_json_fpath, 'r', encoding='utf-8') as fin: + with open(participants_json_fpath, "r", encoding="utf-8") as fin: participants_json = json.load(fin) - assert 'subject_test_col1' in participants_json - assert data['subject_test_col1'][participant_idx] == 'S' + assert "subject_test_col1" in participants_json + assert data["subject_test_col1"][participant_idx] == "S" # in addition assert the original ordering of the new overwritten file assert list(data.keys()) == orig_key_order # if overwrite is False, then nothing should change from the above - with pytest.raises(FileExistsError, match='already exists'): + with pytest.raises(FileExistsError, match="already exists"): write_raw_bids(raw, bids_path, overwrite=False) data = _from_tsv(participants_tsv) - with open(participants_json_fpath, 'r', encoding='utf-8') as fin: + with open(participants_json_fpath, "r", encoding="utf-8") as fin: participants_json = json.load(fin) - assert 'subject_test_col1' in participants_json - assert data['age'][data['participant_id'].index('sub-01')] == '1' - assert data['subject_test_col1'][participant_idx] == 'S' + assert "subject_test_col1" in participants_json + assert data["age"][data["participant_id"].index("sub-01")] == "1" + assert data["subject_test_col1"][participant_idx] == "S" # in addition assert the original ordering of the new overwritten file assert list(data.keys()) == orig_key_order # For empty-room data, all fields except participant_id should be 'n/a' - assert raw.info['subject_info'] # Ensure the following test makes sense! + assert raw.info["subject_info"] # Ensure the following test makes sense! bids_path_er = bids_path.copy().update( - subject='emptyroom', task='noise', - session=raw.info['meas_date'].strftime('%Y%m%d') + subject="emptyroom", + task="noise", + session=raw.info["meas_date"].strftime("%Y%m%d"), ) write_raw_bids(raw=raw, bids_path=bids_path_er, verbose=False) participants_tsv = _from_tsv(participants_tsv) - idx = participants_tsv['participant_id'].index('sub-emptyroom') - assert participants_tsv['hand'][idx] == 'n/a' - assert participants_tsv['sex'][idx] == 'n/a' - assert participants_tsv['age'][idx] == 'n/a' + idx = participants_tsv["participant_id"].index("sub-emptyroom") + assert participants_tsv["hand"][idx] == "n/a" + assert participants_tsv["sex"][idx] == "n/a" + assert participants_tsv["age"][idx] == "n/a" @testing.requires_testing_data def test_write_correct_inputs(): """Test that inputs of write_raw_bids is correct.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = _read_raw_fif(raw_fname) - bids_path_str = 'sub-01_ses-01_meg.fif' - with pytest.raises(RuntimeError, match='"bids_path" must be a ' - 'BIDSPath object'): + bids_path_str = "sub-01_ses-01_meg.fif" + with pytest.raises(RuntimeError, match='"bids_path" must be a ' "BIDSPath object"): write_raw_bids(raw, bids_path_str) bids_path = _bids_path.copy() assert bids_path.root is None - with pytest.raises( - ValueError, - match='The root of the "bids_path" must be set'): + with pytest.raises(ValueError, match='The root of the "bids_path" must be set'): write_raw_bids(raw=raw, bids_path=bids_path) - bids_path = _bids_path.copy().update(root='/foo', subject=None) - with pytest.raises( - ValueError, - match='The subject of the "bids_path" must be set'): + bids_path = _bids_path.copy().update(root="/foo", subject=None) + with pytest.raises(ValueError, match='The subject of the "bids_path" must be set'): write_raw_bids(raw=raw, bids_path=bids_path) - bids_path = _bids_path.copy().update(root='/foo', task=None) - with pytest.raises( - ValueError, - match='The task of the "bids_path" must be set'): + bids_path = _bids_path.copy().update(root="/foo", task=None) + with pytest.raises(ValueError, match='The task of the "bids_path" must be set'): write_raw_bids(raw=raw, bids_path=bids_path) def test_make_dataset_description(tmp_path, monkeypatch): """Test making a dataset_description.json.""" - make_dataset_description(path=tmp_path, name='tst') + make_dataset_description(path=tmp_path, name="tst") - with open(op.join(tmp_path, 'dataset_description.json'), 'r', - encoding='utf-8') as fid: + with open( + op.join(tmp_path, "dataset_description.json"), "r", encoding="utf-8" + ) as fid: dataset_description_json = json.load(fid) assert dataset_description_json["Authors"] == ["[Unspecified]"] make_dataset_description( - path=tmp_path, name='tst', authors='MNE B., MNE P.', - funding='GSOC2019, GSOC2021', - references_and_links='https://doi.org/10.21105/joss.01896', - dataset_type='derivative', overwrite=False, verbose=True + path=tmp_path, + name="tst", + authors="MNE B., MNE P.", + funding="GSOC2019, GSOC2021", + references_and_links="https://doi.org/10.21105/joss.01896", + dataset_type="derivative", + overwrite=False, + verbose=True, ) - with open(op.join(tmp_path, 'dataset_description.json'), 'r', - encoding='utf-8') as fid: + with open( + op.join(tmp_path, "dataset_description.json"), "r", encoding="utf-8" + ) as fid: dataset_description_json = json.load(fid) assert dataset_description_json["Authors"] == ["[Unspecified]"] make_dataset_description( - path=tmp_path, name='tst2', authors='MNE B., MNE P.', - funding='GSOC2019, GSOC2021', - references_and_links='https://doi.org/10.21105/joss.01896', - dataset_type='derivative', overwrite=True, verbose=True + path=tmp_path, + name="tst2", + authors="MNE B., MNE P.", + funding="GSOC2019, GSOC2021", + references_and_links="https://doi.org/10.21105/joss.01896", + dataset_type="derivative", + overwrite=True, + verbose=True, ) - with open(op.join(tmp_path, 'dataset_description.json'), 'r', - encoding='utf-8') as fid: + with open( + op.join(tmp_path, "dataset_description.json"), "r", encoding="utf-8" + ) as fid: dataset_description_json = json.load(fid) - assert dataset_description_json["Authors"] == ['MNE B.', 'MNE P.'] + assert dataset_description_json["Authors"] == ["MNE B.", "MNE P."] # Check we raise warnings and errors where appropriate - with pytest.raises(ValueError, match='`dataset_type` must be either "raw" ' - 'or "derivative."'): - make_dataset_description(path=tmp_path, name='tst', dataset_type='src') + with pytest.raises( + ValueError, match='`dataset_type` must be either "raw" ' 'or "derivative."' + ): + make_dataset_description(path=tmp_path, name="tst", dataset_type="src") - with pytest.warns(RuntimeWarning, match='The `doi` field in.*'): - make_dataset_description(path=tmp_path, name='tst', - doi='10.5281/zenodo.3686061') + with pytest.warns(RuntimeWarning, match="The `doi` field in.*"): + make_dataset_description( + path=tmp_path, name="tst", doi="10.5281/zenodo.3686061" + ) for gen_by in [[1, 2], 12]: - with pytest.raises(ValueError, match='generated_by must be a list.*'): - make_dataset_description(path=tmp_path, name='tst', - generated_by=gen_by) + with pytest.raises(ValueError, match="generated_by must be a list.*"): + make_dataset_description(path=tmp_path, name="tst", generated_by=gen_by) with pytest.raises(ValueError, match='"Name" is a required field.*'): - make_dataset_description(path=tmp_path, name='tst', - generated_by=[{"Version": 2}]) + make_dataset_description( + path=tmp_path, name="tst", generated_by=[{"Version": 2}] + ) gen_by = [{"Name": "bla", "x": 3, "y": 1}] with pytest.raises(ValueError, match=".*in dict: {'.', '.'}"): - make_dataset_description(path=tmp_path, name='tst', - generated_by=gen_by) + make_dataset_description(path=tmp_path, name="tst", generated_by=gen_by) for s_ds in [[1, 2], 12]: - with pytest.raises(ValueError, match='source_datasets must be a.*'): - make_dataset_description(path=tmp_path, name='tst', - source_datasets=s_ds) + with pytest.raises(ValueError, match="source_datasets must be a.*"): + make_dataset_description(path=tmp_path, name="tst", source_datasets=s_ds) s_ds = [{"URL": "bla", "x": 3, "y": 1}] with pytest.raises(ValueError, match=".*in dict: {'.', '.'}"): - make_dataset_description(path=tmp_path, name='tst', - source_datasets=s_ds) + make_dataset_description(path=tmp_path, name="tst", source_datasets=s_ds) - monkeypatch.setattr(write, 'BIDS_VERSION', 'old') - with pytest.raises(ValueError, match='Previous BIDS version used'): - make_dataset_description(path=tmp_path, name='tst') + monkeypatch.setattr(write, "BIDS_VERSION", "old") + with pytest.raises(ValueError, match="Previous BIDS version used"): + make_dataset_description(path=tmp_path, name="tst") def test_stamp_to_dt(): """Test conversions of meas_date to datetime objects.""" meas_date = (1346981585, 835782) meas_datetime = _stamp_to_dt(meas_date) - assert (meas_datetime == datetime(2012, 9, 7, 1, 33, 5, 835782, - tzinfo=timezone.utc)) + assert meas_datetime == datetime(2012, 9, 7, 1, 33, 5, 835782, tzinfo=timezone.utc) meas_date = (1346981585,) meas_datetime = _stamp_to_dt(meas_date) - assert (meas_datetime == datetime(2012, 9, 7, 1, 33, 5, 0, - tzinfo=timezone.utc)) + assert meas_datetime == datetime(2012, 9, 7, 1, 33, 5, 0, tzinfo=timezone.utc) @testing.requires_testing_data def test_get_anonymization_daysback(): """Test daysback querying for anonymization.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = _read_raw_fif(raw_fname) daysback_min, daysback_max = _get_anonymization_daysback(raw) # max_val off by 1 on Windows for some reason assert abs(daysback_min - 28461) < 2 and abs(daysback_max - 36880) < 2 raw2 = raw.copy() with raw2.info._unlock(): - raw2.info['meas_date'] = (np.int32(1158942080), np.int32(720100)) + raw2.info["meas_date"] = (np.int32(1158942080), np.int32(720100)) raw3 = raw.copy() with raw3.info._unlock(): - raw3.info['meas_date'] = (np.int32(914992080), np.int32(720100)) + raw3.info["meas_date"] = (np.int32(914992080), np.int32(720100)) daysback_min, daysback_max = get_anonymization_daysback([raw, raw2, raw3]) assert abs(daysback_min - 29850) < 2 and abs(daysback_max - 35446) < 2 raw4 = raw.copy() with raw4.info._unlock(): - raw4.info['meas_date'] = (np.int32(4992080), np.int32(720100)) + raw4.info["meas_date"] = (np.int32(4992080), np.int32(720100)) raw5 = raw.copy() with raw5.info._unlock(): - raw5.info['meas_date'] = None - daysback_min2, daysback_max2 = get_anonymization_daysback([raw, raw2, - raw3, raw5]) + raw5.info["meas_date"] = None + daysback_min2, daysback_max2 = get_anonymization_daysback([raw, raw2, raw3, raw5]) assert daysback_min2 == daysback_min and daysback_max2 == daysback_max - with pytest.raises(ValueError, match='The dataset spans more time'): - daysback_min, daysback_max = \ - get_anonymization_daysback([raw, raw2, raw4]) + with pytest.raises(ValueError, match="The dataset spans more time"): + daysback_min, daysback_max = get_anonymization_daysback([raw, raw2, raw4]) def test_create_fif(_bids_validate, tmp_path): """Test functionality for very short raw file created from data.""" - out_dir = tmp_path / 'out' - bids_root = tmp_path / 'bids' + out_dir = tmp_path / "out" + bids_root = tmp_path / "bids" out_dir.mkdir() bids_path = _bids_path.copy().update(root=bids_root) - sfreq, n_points = 1024., int(1e6) - info = mne.create_info(['ch1', 'ch2', 'ch3', 'ch4', 'ch5'], sfreq, - ['seeg'] * 5) + sfreq, n_points = 1024.0, int(1e6) + info = mne.create_info(["ch1", "ch2", "ch3", "ch4", "ch5"], sfreq, ["seeg"] * 5) rng = np.random.RandomState(99) raw = mne.io.RawArray(rng.random((5, n_points)) * 1e-6, info) - raw.info['line_freq'] = 60 - raw.save(op.join(out_dir, 'test-raw.fif')) - raw = _read_raw_fif(op.join(out_dir, 'test-raw.fif')) + raw.info["line_freq"] = 60 + raw.save(op.join(out_dir, "test-raw.fif")) + raw = _read_raw_fif(op.join(out_dir, "test-raw.fif")) write_raw_bids(raw, bids_path, verbose=False, overwrite=True) _bids_validate(bids_root) -@pytest.mark.parametrize('line_freq', [60, None]) +@pytest.mark.parametrize("line_freq", [60, None]) def test_line_freq(line_freq, _bids_validate, tmp_path): """Test the power line frequency is written correctly.""" - out_dir = tmp_path / 'out' + out_dir = tmp_path / "out" out_dir.mkdir() - bids_root = tmp_path / 'bids' + bids_root = tmp_path / "bids" bids_path = _bids_path.copy().update(root=bids_root) - sfreq, n_points = 1024., int(1e6) - info = mne.create_info(['ch1', 'ch2', 'ch3', 'ch4', 'ch5'], sfreq, - ['eeg'] * 5) + sfreq, n_points = 1024.0, int(1e6) + info = mne.create_info(["ch1", "ch2", "ch3", "ch4", "ch5"], sfreq, ["eeg"] * 5) rng = np.random.RandomState(99) raw = mne.io.RawArray(rng.random((5, n_points)) * 1e-6, info) - raw.save(op.join(out_dir, 'test-raw.fif')) - raw = _read_raw_fif(op.join(out_dir, 'test-raw.fif')) - raw.info['line_freq'] = line_freq + raw.save(op.join(out_dir, "test-raw.fif")) + raw = _read_raw_fif(op.join(out_dir, "test-raw.fif")) + raw.info["line_freq"] = line_freq write_raw_bids(raw, bids_path, verbose=False, overwrite=True) _bids_validate(bids_root) - eeg_json_fpath = (bids_path.copy() - .update(suffix='eeg', datatype='eeg', extension='.json') - .fpath) - with open(eeg_json_fpath, 'r', encoding='utf-8') as fin: + eeg_json_fpath = ( + bids_path.copy().update(suffix="eeg", datatype="eeg", extension=".json").fpath + ) + with open(eeg_json_fpath, "r", encoding="utf-8") as fin: eeg_json = json.load(fin) if line_freq == 60: - assert eeg_json['PowerLineFrequency'] == line_freq + assert eeg_json["PowerLineFrequency"] == line_freq elif line_freq is None: - assert eeg_json['PowerLineFrequency'] == 'n/a' + assert eeg_json["PowerLineFrequency"] == "n/a" -@requires_version('pybv', PYBV_VERSION) +@requires_version("pybv", PYBV_VERSION) @testing.requires_testing_data -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) -@pytest.mark.filterwarnings(warning_str['maxshield']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) +@pytest.mark.filterwarnings(warning_str["maxshield"]) def test_fif(_bids_validate, tmp_path): """Test functionality of the write_raw_bids conversion for fif.""" - bids_root = tmp_path / 'bids1' - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg') - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + bids_root = tmp_path / "bids1" + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg") + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) # Drop unknown events. events = mne.read_events(events_fname) events = events[events[:, 2] != 0] raw = _read_raw_fif(raw_fname) - write_raw_bids(raw, bids_path, events=events, event_id=event_id, - overwrite=False) + write_raw_bids(raw, bids_path, events=events, event_id=event_id, overwrite=False) # Read the file back in to check that the data has come through cleanly. # Events and bad channel information was read through JSON sidecar files. with pytest.raises(TypeError, match="unexpected keyword argument 'foo'"): - read_raw_bids(bids_path=bids_path, extra_params=dict(foo='bar')) + read_raw_bids(bids_path=bids_path, extra_params=dict(foo="bar")) raw2 = read_raw_bids(bids_path=bids_path) - assert set(raw.info['bads']) == set(raw2.info['bads']) + assert set(raw.info["bads"]) == set(raw2.info["bads"]) events, _ = mne.events_from_annotations(raw2) events2 = mne.read_events(events_fname) events2 = events2[events2[:, 2] != 0] assert_array_equal(events2[:, 0], events[:, 0]) # check if write_raw_bids works when there is no stim channel - raw.set_channel_types({raw.ch_names[i]: 'misc' - for i in - mne.pick_types(raw.info, stim=True, meg=False)}) - bids_root = tmp_path / 'bids2' + raw.set_channel_types( + { + raw.ch_names[i]: "misc" + for i in mne.pick_types(raw.info, stim=True, meg=False) + } + ) + bids_root = tmp_path / "bids2" bids_path.update(root=bids_root) - with pytest.warns(RuntimeWarning, match='No events found or provided.'): + with pytest.warns(RuntimeWarning, match="No events found or provided."): write_raw_bids(raw, bids_path, overwrite=False) _bids_validate(bids_root) # try with eeg data only (conversion to bv) - bids_root = tmp_path / 'bids3' + bids_root = tmp_path / "bids3" bids_root.mkdir() bids_path.update(root=bids_root) raw = _read_raw_fif(raw_fname) raw.load_data() raw2 = raw.pick_types(meg=False, eeg=True, stim=True, eog=True, ecg=True) - raw2.save(bids_root / 'test-raw.fif', overwrite=True) - raw2 = mne.io.Raw(op.join(bids_root, 'test-raw.fif'), preload=False) + raw2.save(bids_root / "test-raw.fif", overwrite=True) + raw2 = mne.io.Raw(op.join(bids_root, "test-raw.fif"), preload=False) events = mne.find_events(raw2) - event_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3, - 'visual/right': 4, 'smiley': 5, 'button': 32} - - epochs = mne.Epochs(raw2, events, event_id=event_id, tmin=-0.2, tmax=0.5, - preload=True) - bids_path = bids_path.update(datatype='eeg') - with pytest.warns(RuntimeWarning, - match='Converting data files to BrainVision format'): - write_raw_bids(raw2, bids_path, - events=events, event_id=event_id, - verbose=True, overwrite=False) - bids_dir = op.join(bids_root, 'sub-%s' % subject_id, - 'ses-%s' % session_id, 'eeg') + event_id = { + "auditory/left": 1, + "auditory/right": 2, + "visual/left": 3, + "visual/right": 4, + "smiley": 5, + "button": 32, + } + + epochs = mne.Epochs( + raw2, events, event_id=event_id, tmin=-0.2, tmax=0.5, preload=True + ) + bids_path = bids_path.update(datatype="eeg") + with pytest.warns( + RuntimeWarning, match="Converting data files to BrainVision format" + ): + write_raw_bids( + raw2, + bids_path, + events=events, + event_id=event_id, + verbose=True, + overwrite=False, + ) + bids_dir = op.join(bids_root, "sub-%s" % subject_id, "ses-%s" % session_id, "eeg") sidecar_basename = bids_path.copy() - for sidecar in ['channels.tsv', 'eeg.eeg', 'eeg.json', 'eeg.vhdr', - 'eeg.vmrk', 'events.tsv']: - suffix, extension = sidecar.split('.') - extension = f'.{extension}' + for sidecar in [ + "channels.tsv", + "eeg.eeg", + "eeg.json", + "eeg.vhdr", + "eeg.vmrk", + "events.tsv", + ]: + suffix, extension = sidecar.split(".") + extension = f".{extension}" sidecar_basename.update(suffix=suffix, extension=extension) assert op.isfile(op.join(bids_dir, sidecar_basename.basename)) - bids_path.update(root=bids_root, datatype='eeg') - with pytest.warns(RuntimeWarning, match='Not setting position'): + bids_path.update(root=bids_root, datatype="eeg") + with pytest.warns(RuntimeWarning, match="Not setting position"): raw2 = read_raw_bids(bids_path=bids_path) - os.remove(op.join(bids_root, 'test-raw.fif')) + os.remove(op.join(bids_root, "test-raw.fif")) events2, _ = mne.events_from_annotations(raw2, event_id) - epochs2 = mne.Epochs(raw2, events2, event_id=event_id, tmin=-0.2, tmax=0.5, - preload=True) + epochs2 = mne.Epochs( + raw2, events2, event_id=event_id, tmin=-0.2, tmax=0.5, preload=True + ) assert_array_almost_equal(raw.get_data(), raw2.get_data()) assert_array_almost_equal(epochs.get_data(), epochs2.get_data(), decimal=4) _bids_validate(bids_root) # write the same data but pretend it is empty room data: raw = _read_raw_fif(raw_fname) - meas_date = raw.info['meas_date'] + meas_date = raw.info["meas_date"] if not isinstance(meas_date, datetime): meas_date = datetime.fromtimestamp(meas_date[0], tz=timezone.utc) - er_date = meas_date.strftime('%Y%m%d') - er_bids_path = BIDSPath(subject='emptyroom', session=er_date, - task='noise', root=bids_root) + er_date = meas_date.strftime("%Y%m%d") + er_bids_path = BIDSPath( + subject="emptyroom", session=er_date, task="noise", root=bids_root + ) write_raw_bids(raw, er_bids_path, overwrite=False) - assert op.exists(op.join( - bids_root, 'sub-emptyroom', 'ses-{0}'.format(er_date), 'meg', - 'sub-emptyroom_ses-{0}_task-noise_meg.json'.format(er_date))) + assert op.exists( + op.join( + bids_root, + "sub-emptyroom", + "ses-{0}".format(er_date), + "meg", + "sub-emptyroom_ses-{0}_task-noise_meg.json".format(er_date), + ) + ) _bids_validate(bids_root) # test that an incorrect date raises an error. - er_bids_basename_bad = BIDSPath(subject='emptyroom', session='19000101', - task='noise', root=bids_root) - with pytest.raises(ValueError, match='The date provided'): + er_bids_basename_bad = BIDSPath( + subject="emptyroom", session="19000101", task="noise", root=bids_root + ) + with pytest.raises(ValueError, match="The date provided"): write_raw_bids(raw, er_bids_basename_bad, overwrite=False) # test that the acquisition time was written properly scans_tsv = BIDSPath( - subject=subject_id, session=session_id, - suffix='scans', extension='.tsv', root=bids_root) + subject=subject_id, + session=session_id, + suffix="scans", + extension=".tsv", + root=bids_root, + ) data = _from_tsv(scans_tsv) - assert data['acq_time'][0] == meas_date.strftime('%Y-%m-%dT%H:%M:%S.%fZ') + assert data["acq_time"][0] == meas_date.strftime("%Y-%m-%dT%H:%M:%S.%fZ") # give the raw object some fake participant data (potentially overwriting) raw = _read_raw_fif(raw_fname) - bids_path_meg = bids_path.copy().update(datatype='meg') - write_raw_bids(raw, bids_path_meg, events=events, - event_id=event_id, overwrite=True) + bids_path_meg = bids_path.copy().update(datatype="meg") + write_raw_bids(raw, bids_path_meg, events=events, event_id=event_id, overwrite=True) # try and write preloaded data raw = _read_raw_fif(raw_fname, preload=True) - with pytest.raises(ValueError, match='allow_preload'): - write_raw_bids(raw, bids_path_meg, events=events, - event_id=event_id, allow_preload=False, overwrite=False) + with pytest.raises(ValueError, match="allow_preload"): + write_raw_bids( + raw, + bids_path_meg, + events=events, + event_id=event_id, + allow_preload=False, + overwrite=False, + ) # test anonymize raw = _read_raw_fif(raw_fname) raw.anonymize() - raw_fname2 = tmp_path / 'tmp_anon' / 'sample_audvis_raw.fif' + raw_fname2 = tmp_path / "tmp_anon" / "sample_audvis_raw.fif" raw_fname2.parent.mkdir() raw.save(raw_fname2) # add some readme text - readme = op.join(bids_root, 'README') - with open(readme, 'w', encoding='utf-8-sig') as fid: - fid.write('Welcome to my dataset\n') + readme = op.join(bids_root, "README") + with open(readme, "w", encoding="utf-8-sig") as fid: + fid.write("Welcome to my dataset\n") bids_path2 = bids_path_meg.copy().update(subject=subject_id2) raw = _read_raw_fif(raw_fname2) - bids_output_path = write_raw_bids(raw, bids_path2, - events=events, - event_id=event_id, overwrite=False) + bids_output_path = write_raw_bids( + raw, bids_path2, events=events, event_id=event_id, overwrite=False + ) # check that the overwrite parameters work correctly for the participant # data # change the gender but don't force overwrite. raw = _read_raw_fif(raw_fname) - raw.info['subject_info'] = {'his_id': subject_id2, - 'birthday': (1994, 1, 26), 'sex': 2, 'hand': 1} + raw.info["subject_info"] = { + "his_id": subject_id2, + "birthday": (1994, 1, 26), + "sex": 2, + "hand": 1, + } with pytest.raises(FileExistsError, match="already exists"): # noqa: F821 - write_raw_bids(raw, bids_path2, - events=events, event_id=event_id, overwrite=False) + write_raw_bids( + raw, bids_path2, events=events, event_id=event_id, overwrite=False + ) # assert README has references in it - with open(readme, 'r', encoding='utf-8-sig') as fid: + with open(readme, "r", encoding="utf-8-sig") as fid: text = fid.read() - assert 'Welcome to my dataset\n' in text - assert REFERENCES['mne-bids'] in text - assert REFERENCES['meg'] in text - assert REFERENCES['eeg'] not in text - assert REFERENCES['ieeg'] not in text + assert "Welcome to my dataset\n" in text + assert REFERENCES["mne-bids"] in text + assert REFERENCES["meg"] in text + assert REFERENCES["eeg"] not in text + assert REFERENCES["ieeg"] not in text # now force the overwrite - write_raw_bids(raw, bids_path2, events=events, event_id=event_id, - overwrite=True) + write_raw_bids(raw, bids_path2, events=events, event_id=event_id, overwrite=True) - with open(readme, 'r', encoding='utf-8-sig') as fid: + with open(readme, "r", encoding="utf-8-sig") as fid: text = fid.read() - assert 'Welcome to my dataset\n' in text - assert REFERENCES['mne-bids'] in text - assert REFERENCES['meg'] in text + assert "Welcome to my dataset\n" in text + assert REFERENCES["mne-bids"] in text + assert REFERENCES["meg"] in text - with pytest.raises(ValueError, match='raw_file must be'): - write_raw_bids('blah', bids_path) + with pytest.raises(ValueError, match="raw_file must be"): + write_raw_bids("blah", bids_path) _bids_validate(bids_root) - assert op.exists(op.join(bids_root, 'participants.tsv')) + assert op.exists(op.join(bids_root, "participants.tsv")) # asserting that single fif files do not include the split key - files = glob(op.join(bids_output_path, 'sub-' + subject_id2, - 'ses-' + subject_id2, 'meg', '*.fif')) + files = glob( + op.join( + bids_output_path, "sub-" + subject_id2, "ses-" + subject_id2, "meg", "*.fif" + ) + ) ii = 0 for ii, FILE in enumerate(files): - assert 'split' not in FILE + assert "split" not in FILE assert ii < 1 # check that split files have split key raw = _read_raw_fif(raw_fname) - raw_fname3 = tmp_path / 'test-split-key' / 'sample_audvis_raw.fif' + raw_fname3 = tmp_path / "test-split-key" / "sample_audvis_raw.fif" raw_fname3.parent.mkdir() - raw.save(raw_fname3, buffer_size_sec=1.0, split_size='10MB', - split_naming='neuromag', overwrite=True) + raw.save( + raw_fname3, + buffer_size_sec=1.0, + split_size="10MB", + split_naming="neuromag", + overwrite=True, + ) raw = _read_raw_fif(raw_fname3) - subject_id3 = '03' + subject_id3 = "03" bids_path3 = bids_path.copy().update(subject=subject_id3) - bids_output_path = write_raw_bids(raw, bids_path3, - overwrite=False) - files = glob(op.join(bids_output_path, 'sub-' + subject_id3, - 'ses-' + subject_id3, 'meg', '*.fif')) + bids_output_path = write_raw_bids(raw, bids_path3, overwrite=False) + files = glob( + op.join( + bids_output_path, "sub-" + subject_id3, "ses-" + subject_id3, "meg", "*.fif" + ) + ) for FILE in files: - assert 'split' in FILE + assert "split" in FILE # test whether extra points in raw.info['dig'] are correctly used # to set DigitizedHeadShape in the JSON sidecar # unchanged sample data includes extra points meg_json_path = Path( _find_matching_sidecar( - bids_path=bids_path.copy().update( - root=bids_root, datatype='meg' - ), - suffix='meg', - extension='.json' + bids_path=bids_path.copy().update(root=bids_root, datatype="meg"), + suffix="meg", + extension=".json", ) ) - meg_json = json.loads(meg_json_path.read_text(encoding='utf-8')) - assert meg_json['DigitizedHeadPoints'] is True + meg_json = json.loads(meg_json_path.read_text(encoding="utf-8")) + assert meg_json["DigitizedHeadPoints"] is True # drop extra points from raw.info['dig'] and write again raw_no_extra_points = _read_raw_fif(raw_fname) new_dig = [] - for dig_point in raw_no_extra_points.info['dig']: - if dig_point['kind'] != FIFF.FIFFV_POINT_EXTRA: + for dig_point in raw_no_extra_points.info["dig"]: + if dig_point["kind"] != FIFF.FIFFV_POINT_EXTRA: new_dig.append(dig_point) with raw_no_extra_points.info._unlock(): - raw_no_extra_points.info['dig'] = new_dig + raw_no_extra_points.info["dig"] = new_dig - write_raw_bids(raw_no_extra_points, bids_path, events=events, - event_id=event_id, overwrite=True) + write_raw_bids( + raw_no_extra_points, bids_path, events=events, event_id=event_id, overwrite=True + ) meg_json_path = Path( _find_matching_sidecar( - bids_path=bids_path.copy().update( - root=bids_root, datatype='meg' - ), - suffix='meg', - extension='.json' + bids_path=bids_path.copy().update(root=bids_root, datatype="meg"), + suffix="meg", + extension=".json", ) ) - meg_json = json.loads(meg_json_path.read_text(encoding='utf-8')) - - assert meg_json['DigitizedHeadPoints'] is False - assert 'SoftwareFilters' in meg_json - software_filters = meg_json['SoftwareFilters'] - assert 'SpatialCompensation' in software_filters - assert 'GradientOrder' in software_filters['SpatialCompensation'] - assert (software_filters['SpatialCompensation']['GradientOrder'] == - raw.compensation_grade) + meg_json = json.loads(meg_json_path.read_text(encoding="utf-8")) + + assert meg_json["DigitizedHeadPoints"] is False + assert "SoftwareFilters" in meg_json + software_filters = meg_json["SoftwareFilters"] + assert "SpatialCompensation" in software_filters + assert "GradientOrder" in software_filters["SpatialCompensation"] + assert ( + software_filters["SpatialCompensation"]["GradientOrder"] + == raw.compensation_grade + ) -@pytest.mark.parametrize('format', ('fif_no_chpi', 'fif', 'ctf', 'kit')) -@pytest.mark.filterwarnings(warning_str['maxshield']) +@pytest.mark.parametrize("format", ("fif_no_chpi", "fif", "ctf", "kit")) +@pytest.mark.filterwarnings(warning_str["maxshield"]) @testing.requires_testing_data def test_chpi(_bids_validate, tmp_path, format): """Test writing of cHPI information.""" - if format == 'fif_no_chpi': - fif_raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + if format == "fif_no_chpi": + fif_raw_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif" + ) raw = _read_raw_fif(fif_raw_fname) - elif format == 'fif': - fif_raw_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif') - raw = _read_raw_fif(fif_raw_fname, allow_maxshield='yes') - elif format == 'ctf': - ctf_raw_fname = op.join(data_path, 'CTF', 'testdata_ctf.ds') + elif format == "fif": + fif_raw_fname = op.join(data_path, "SSS", "test_move_anon_raw.fif") + raw = _read_raw_fif(fif_raw_fname, allow_maxshield="yes") + elif format == "ctf": + ctf_raw_fname = op.join(data_path, "CTF", "testdata_ctf.ds") raw = _read_raw_ctf(ctf_raw_fname) - elif format == 'kit': - kit_data_path = op.join(base_path, 'kit', 'tests', 'data') - kit_raw_fname = op.join(kit_data_path, 'test.sqd') - kit_hpi_fname = op.join(kit_data_path, 'test_mrk.sqd') - kit_electrode_fname = op.join(kit_data_path, 'test.elp') - kit_headshape_fname = op.join(kit_data_path, 'test.hsp') - raw = _read_raw_kit(kit_raw_fname, mrk=kit_hpi_fname, - elp=kit_electrode_fname, hsp=kit_headshape_fname) - - bids_root = tmp_path / 'bids' - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg') + elif format == "kit": + kit_data_path = op.join(base_path, "kit", "tests", "data") + kit_raw_fname = op.join(kit_data_path, "test.sqd") + kit_hpi_fname = op.join(kit_data_path, "test_mrk.sqd") + kit_electrode_fname = op.join(kit_data_path, "test.elp") + kit_headshape_fname = op.join(kit_data_path, "test.hsp") + raw = _read_raw_kit( + kit_raw_fname, + mrk=kit_hpi_fname, + elp=kit_electrode_fname, + hsp=kit_headshape_fname, + ) + + bids_root = tmp_path / "bids" + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg") write_raw_bids(raw, bids_path) _bids_validate(bids_path.root) - meg_json = bids_path.copy().update(suffix='meg', extension='.json') - meg_json_data = json.loads(meg_json.fpath.read_text(encoding='utf-8')) - - if parse_version(mne.__version__) <= parse_version('0.23'): - assert 'ContinuousHeadLocalization' not in meg_json_data - assert 'HeadCoilFrequency' not in meg_json_data - elif format in ['fif_no_chpi', 'fif']: - if format == 'fif_no_chpi': - assert meg_json_data['ContinuousHeadLocalization'] is False - assert meg_json_data['HeadCoilFrequency'] == [] - elif format == 'fif': - assert meg_json_data['ContinuousHeadLocalization'] is True - assert_array_almost_equal(meg_json_data['HeadCoilFrequency'], - [83., 143., 203., 263., 323.]) - elif format == 'kit': + meg_json = bids_path.copy().update(suffix="meg", extension=".json") + meg_json_data = json.loads(meg_json.fpath.read_text(encoding="utf-8")) + + if parse_version(mne.__version__) <= parse_version("0.23"): + assert "ContinuousHeadLocalization" not in meg_json_data + assert "HeadCoilFrequency" not in meg_json_data + elif format in ["fif_no_chpi", "fif"]: + if format == "fif_no_chpi": + assert meg_json_data["ContinuousHeadLocalization"] is False + assert meg_json_data["HeadCoilFrequency"] == [] + elif format == "fif": + assert meg_json_data["ContinuousHeadLocalization"] is True + assert_array_almost_equal( + meg_json_data["HeadCoilFrequency"], [83.0, 143.0, 203.0, 263.0, 323.0] + ) + elif format == "kit": # no cHPI info is contained in the sample data - assert meg_json_data['ContinuousHeadLocalization'] is False - assert meg_json_data['HeadCoilFrequency'] == [] - elif format == 'ctf': - assert meg_json_data['ContinuousHeadLocalization'] is True - assert meg_json_data['HeadCoilFrequency'] == [] + assert meg_json_data["ContinuousHeadLocalization"] is False + assert meg_json_data["HeadCoilFrequency"] == [] + elif format == "ctf": + assert meg_json_data["ContinuousHeadLocalization"] is True + assert meg_json_data["HeadCoilFrequency"] == [] -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_fif_dtype(_bids_validate, tmp_path): """Test functionality of the write_raw_bids conversion for fif.""" - bids_path = _bids_path.copy().update(root=tmp_path, datatype='meg') - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - desired_fmt = 'int' + bids_path = _bids_path.copy().update(root=tmp_path, datatype="meg") + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + desired_fmt = "int" raw = _read_raw_fif(raw_fname) # Fiddle with raw.orig_format -- this should never be done in "real-life", @@ -809,15 +902,21 @@ def test_fif_dtype(_bids_validate, tmp_path): @testing.requires_testing_data def test_fif_anonymize(_bids_validate, tmp_path): """Test write_raw_bids() with anonymization fif.""" - bids_root = tmp_path / 'bids1' + bids_root = tmp_path / "bids1" bids_path = _bids_path.copy().update(root=bids_root) - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) # Drop unknown events. events = mne.read_events(events_fname) @@ -825,126 +924,155 @@ def test_fif_anonymize(_bids_validate, tmp_path): # test keyword mne-bids anonymize raw = _read_raw_fif(raw_fname) - with pytest.raises(ValueError, match='`daysback` argument required'): - write_raw_bids(raw, bids_path, events=events, event_id=event_id, - anonymize=dict(), overwrite=True) + with pytest.raises(ValueError, match="`daysback` argument required"): + write_raw_bids( + raw, + bids_path, + events=events, + event_id=event_id, + anonymize=dict(), + overwrite=True, + ) - bids_root = tmp_path / 'bids2' + bids_root = tmp_path / "bids2" bids_path.update(root=bids_root) raw = _read_raw_fif(raw_fname) - with pytest.warns(RuntimeWarning, match='daysback` is too small'): - write_raw_bids(raw, bids_path, events=events, event_id=event_id, - anonymize=dict(daysback=400), overwrite=False) + with pytest.warns(RuntimeWarning, match="daysback` is too small"): + write_raw_bids( + raw, + bids_path, + events=events, + event_id=event_id, + anonymize=dict(daysback=400), + overwrite=False, + ) - bids_root = tmp_path / 'bids3' + bids_root = tmp_path / "bids3" bids_path.update(root=bids_root) raw = _read_raw_fif(raw_fname) - with pytest.raises(ValueError, match='`daysback` exceeds maximum value'): - write_raw_bids(raw, bids_path, events=events, event_id=event_id, - anonymize=dict(daysback=40000), overwrite=False) + with pytest.raises(ValueError, match="`daysback` exceeds maximum value"): + write_raw_bids( + raw, + bids_path, + events=events, + event_id=event_id, + anonymize=dict(daysback=40000), + overwrite=False, + ) - bids_root = tmp_path / 'bids4' + bids_root = tmp_path / "bids4" bids_path.update(root=bids_root) raw = _read_raw_fif(raw_fname) - write_raw_bids(raw, bids_path, events=events, event_id=event_id, - anonymize=dict(daysback=30000, keep_his=True), - overwrite=False) + write_raw_bids( + raw, + bids_path, + events=events, + event_id=event_id, + anonymize=dict(daysback=30000, keep_his=True), + overwrite=False, + ) scans_tsv = BIDSPath( - subject=subject_id, session=session_id, - suffix='scans', extension='.tsv', - root=bids_root) + subject=subject_id, + session=session_id, + suffix="scans", + extension=".tsv", + root=bids_root, + ) data = _from_tsv(scans_tsv) # anonymize using MNE manually - anonymized_info = anonymize_info(info=raw.info, daysback=30000, - keep_his=True) - anon_date = anonymized_info['meas_date'].strftime("%Y-%m-%dT%H:%M:%S.%fZ") - assert data['acq_time'][0] == anon_date + anonymized_info = anonymize_info(info=raw.info, daysback=30000, keep_his=True) + anon_date = anonymized_info["meas_date"].strftime("%Y-%m-%dT%H:%M:%S.%fZ") + assert data["acq_time"][0] == anon_date _bids_validate(bids_root) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_fif_ias(tmp_path): """Test writing FIF files with internal active shielding.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = _read_raw_fif(raw_fname) - raw.set_channel_types({raw.ch_names[0]: 'ias'}) + raw.set_channel_types({raw.ch_names[0]: "ias"}) - this_path = BIDSPath(subject='sample', task='task', root=tmp_path) + this_path = BIDSPath(subject="sample", task="task", root=tmp_path) write_raw_bids(raw, this_path) raw = read_raw_bids(this_path) - assert raw.info['chs'][0]['kind'] == FIFF.FIFFV_IAS_CH + assert raw.info["chs"][0]["kind"] == FIFF.FIFFV_IAS_CH -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_fif_exci(tmp_path): """Test writing FIF files with excitation channel.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = _read_raw_fif(raw_fname) - raw.set_channel_types({raw.ch_names[0]: 'exci'}) - this_path = BIDSPath(subject='sample', task='task', root=tmp_path) + raw.set_channel_types({raw.ch_names[0]: "exci"}) + this_path = BIDSPath(subject="sample", task="task", root=tmp_path) write_raw_bids(raw, this_path) raw = read_raw_bids(this_path) - assert raw.info['chs'][0]['kind'] == FIFF.FIFFV_EXCI_CH + assert raw.info["chs"][0]["kind"] == FIFF.FIFFV_EXCI_CH @testing.requires_testing_data def test_kit(_bids_validate, tmp_path): """Test functionality of the write_raw_bids conversion for KIT data.""" - bids_root = tmp_path / 'bids' - kit_path = op.join(base_path, 'kit', 'tests', 'data') - raw_fname = op.join(kit_path, 'test.sqd') - events_fname = op.join(kit_path, 'test-eve.txt') - hpi_fname = op.join(kit_path, 'test_mrk.sqd') - hpi_pre_fname = op.join(kit_path, 'test_mrk_pre.sqd') - hpi_post_fname = op.join(kit_path, 'test_mrk_post.sqd') - electrode_fname = op.join(kit_path, 'test.elp') - headshape_fname = op.join(kit_path, 'test.hsp') + bids_root = tmp_path / "bids" + kit_path = op.join(base_path, "kit", "tests", "data") + raw_fname = op.join(kit_path, "test.sqd") + events_fname = op.join(kit_path, "test-eve.txt") + hpi_fname = op.join(kit_path, "test_mrk.sqd") + hpi_pre_fname = op.join(kit_path, "test_mrk_pre.sqd") + hpi_post_fname = op.join(kit_path, "test_mrk_post.sqd") + electrode_fname = op.join(kit_path, "test.elp") + headshape_fname = op.join(kit_path, "test.hsp") event_id = dict(cond=128) - kit_bids_path = _bids_path.copy().update(acquisition=None, - root=bids_root, - suffix='meg') + kit_bids_path = _bids_path.copy().update( + acquisition=None, root=bids_root, suffix="meg" + ) raw = _read_raw_kit( - raw_fname, mrk=hpi_fname, elp=electrode_fname, - hsp=headshape_fname) - write_raw_bids(raw, kit_bids_path, - events=events_fname, - event_id=event_id, overwrite=False) + raw_fname, mrk=hpi_fname, elp=electrode_fname, hsp=headshape_fname + ) + write_raw_bids( + raw, kit_bids_path, events=events_fname, event_id=event_id, overwrite=False + ) _bids_validate(bids_root) - assert op.exists(bids_root / 'participants.tsv') + assert op.exists(bids_root / "participants.tsv") with pytest.warns(RuntimeWarning, match=".* changed from V to NA"): read_raw_bids(bids_path=kit_bids_path) # ensure the marker file is produced in the right place marker_fname = BIDSPath( - subject=subject_id, session=session_id, task=task, run=run, - suffix='markers', extension='.sqd', datatype='meg', - root=bids_root) + subject=subject_id, + session=session_id, + task=task, + run=run, + suffix="markers", + extension=".sqd", + datatype="meg", + root=bids_root, + ) assert op.exists(marker_fname) # test anonymize output_path = _test_anonymize( - tmp_path / 'tmp1', raw, kit_bids_path, events_fname, event_id + tmp_path / "tmp1", raw, kit_bids_path, events_fname, event_id ) _bids_validate(output_path) # ensure the channels file has no STI 014 channel: - channels_tsv = marker_fname.copy().update(datatype='meg', - suffix='channels', - extension='.tsv') + channels_tsv = marker_fname.copy().update( + datatype="meg", suffix="channels", extension=".tsv" + ) data = _from_tsv(channels_tsv) - assert 'STI 014' not in data['name'] + assert "STI 014" not in data["name"] # ensure the marker file is produced in the right place assert op.exists(marker_fname) @@ -953,165 +1081,179 @@ def test_kit(_bids_validate, tmp_path): event_data = np.loadtxt(events_fname) # make the data the wrong number of dimensions event_data_3d = np.atleast_3d(event_data) - other_output_path = tmp_path / 'tmp2' + other_output_path = tmp_path / "tmp2" bids_path = _bids_path.copy().update(root=other_output_path) - with pytest.raises(ValueError, match='two dimensions'): - write_raw_bids(raw, bids_path, events=event_data_3d, - event_id=event_id, overwrite=True) + with pytest.raises(ValueError, match="two dimensions"): + write_raw_bids( + raw, bids_path, events=event_data_3d, event_id=event_id, overwrite=True + ) # remove 3rd column event_data = event_data[:, :2] - with pytest.raises(ValueError, match='second dimension'): - write_raw_bids(raw, bids_path, events=event_data, - event_id=event_id, overwrite=True) + with pytest.raises(ValueError, match="second dimension"): + write_raw_bids( + raw, bids_path, events=event_data, event_id=event_id, overwrite=True + ) # test correct naming of marker files raw = _read_raw_kit( - raw_fname, mrk=[hpi_pre_fname, hpi_post_fname], elp=electrode_fname, - hsp=headshape_fname) + raw_fname, + mrk=[hpi_pre_fname, hpi_post_fname], + elp=electrode_fname, + hsp=headshape_fname, + ) kit_bids_path.update(subject=subject_id2) - write_raw_bids(raw, kit_bids_path, events=events_fname, - event_id=event_id, overwrite=False) + write_raw_bids( + raw, kit_bids_path, events=events_fname, event_id=event_id, overwrite=False + ) _bids_validate(bids_root) # ensure the marker files are renamed correctly - marker_fname.update(acquisition='pre', subject=subject_id2) + marker_fname.update(acquisition="pre", subject=subject_id2) info = get_kit_info(marker_fname, False)[0] - assert info['meas_date'] == get_kit_info(hpi_pre_fname, - False)[0]['meas_date'] - marker_fname.update(acquisition='post') + assert info["meas_date"] == get_kit_info(hpi_pre_fname, False)[0]["meas_date"] + marker_fname.update(acquisition="post") info = get_kit_info(marker_fname, False)[0] - assert info['meas_date'] == get_kit_info(hpi_post_fname, - False)[0]['meas_date'] + assert info["meas_date"] == get_kit_info(hpi_post_fname, False)[0]["meas_date"] # check that providing markers in the wrong order raises an error raw = _read_raw_kit( - raw_fname, mrk=[hpi_post_fname, hpi_pre_fname], elp=electrode_fname, - hsp=headshape_fname) - with pytest.raises(ValueError, match='Markers'): - write_raw_bids(raw, kit_bids_path.update(subject=subject_id2), - events=events_fname, event_id=event_id, - overwrite=True) + raw_fname, + mrk=[hpi_post_fname, hpi_pre_fname], + elp=electrode_fname, + hsp=headshape_fname, + ) + with pytest.raises(ValueError, match="Markers"): + write_raw_bids( + raw, + kit_bids_path.update(subject=subject_id2), + events=events_fname, + event_id=event_id, + overwrite=True, + ) # check that everything works with MRK markers, and CON files - kit_path = op.join(data_path, 'KIT') - raw_fname = op.join(kit_path, 'data_berlin.con') - hpi_fname = op.join(kit_path, 'MQKIT_125.mrk') - electrode_fname = op.join(kit_path, 'MQKIT_125.elp') - headshape_fname = op.join(kit_path, 'MQKIT_125.hsp') - bids_root = tmp_path / 'bids_kit_mrk' - kit_bids_path = _bids_path.copy().update(acquisition=None, - root=bids_root, - suffix='meg') + kit_path = op.join(data_path, "KIT") + raw_fname = op.join(kit_path, "data_berlin.con") + hpi_fname = op.join(kit_path, "MQKIT_125.mrk") + electrode_fname = op.join(kit_path, "MQKIT_125.elp") + headshape_fname = op.join(kit_path, "MQKIT_125.hsp") + bids_root = tmp_path / "bids_kit_mrk" + kit_bids_path = _bids_path.copy().update( + acquisition=None, root=bids_root, suffix="meg" + ) raw = _read_raw_kit( - raw_fname, mrk=hpi_fname, elp=electrode_fname, - hsp=headshape_fname) + raw_fname, mrk=hpi_fname, elp=electrode_fname, hsp=headshape_fname + ) write_raw_bids(raw, kit_bids_path) _bids_validate(bids_root) - assert op.exists(bids_root / 'participants.tsv') + assert op.exists(bids_root / "participants.tsv") read_raw_bids(bids_path=kit_bids_path) # Check that we can successfully write even when elp, hsp, and mrk are not # supplied raw = _read_raw_kit(raw_fname) - bids_root = tmp_path / 'no_elp_hsp_mrk' + bids_root = tmp_path / "no_elp_hsp_mrk" kit_bids_path = kit_bids_path.copy().update(root=bids_root) write_raw_bids(raw=raw, bids_path=kit_bids_path) _bids_validate(bids_root) -@pytest.mark.filterwarnings(warning_str['meas_date_set_to_none']) +@pytest.mark.filterwarnings(warning_str["meas_date_set_to_none"]) @testing.requires_testing_data def test_ctf(_bids_validate, tmp_path): """Test functionality of the write_raw_bids conversion for CTF data.""" - raw_fname = data_path / 'CTF' / 'testdata_ctf.ds' - bids_path = _bids_path.copy().update(root=tmp_path, datatype='meg') + raw_fname = data_path / "CTF" / "testdata_ctf.ds" + bids_path = _bids_path.copy().update(root=tmp_path, datatype="meg") raw = _read_raw_ctf(raw_fname) - raw.info['line_freq'] = 60 + raw.info["line_freq"] = 60 write_raw_bids(raw, bids_path) write_raw_bids(raw, bids_path, overwrite=True) # test overwrite _bids_validate(tmp_path) - with pytest.warns(RuntimeWarning, match='Did not find any events'): - raw = read_raw_bids(bids_path=bids_path, - extra_params=dict(clean_names=False)) + with pytest.warns(RuntimeWarning, match="Did not find any events"): + raw = read_raw_bids(bids_path=bids_path, extra_params=dict(clean_names=False)) # test to check that running again with overwrite == False raises an error with pytest.raises(FileExistsError, match="already exists"): # noqa: F821 write_raw_bids(raw, bids_path) - assert op.exists(tmp_path / 'participants.tsv') + assert op.exists(tmp_path / "participants.tsv") # test anonymize raw = _read_raw_ctf(raw_fname) - with pytest.warns(RuntimeWarning, - match='Converting to FIF for anonymization'): - output_path = _test_anonymize(tmp_path / 'tmp', raw, bids_path) + with pytest.warns(RuntimeWarning, match="Converting to FIF for anonymization"): + output_path = _test_anonymize(tmp_path / "tmp", raw, bids_path) _bids_validate(output_path) raw.set_meas_date(None) raw.anonymize() - with pytest.raises(ValueError, match='All measurement dates are None'): + with pytest.raises(ValueError, match="All measurement dates are None"): get_anonymization_daysback(raw) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) def test_bti(_bids_validate, tmp_path): """Test functionality of the write_raw_bids conversion for BTi data.""" - bti_path = op.join(base_path, 'bti', 'tests', 'data') - raw_fname = op.join(bti_path, 'test_pdf_linux') - config_fname = op.join(bti_path, 'test_config_linux') - headshape_fname = op.join(bti_path, 'test_hs_linux') + bti_path = op.join(base_path, "bti", "tests", "data") + raw_fname = op.join(bti_path, "test_pdf_linux") + config_fname = op.join(bti_path, "test_config_linux") + headshape_fname = op.join(bti_path, "test_hs_linux") - raw = _read_raw_bti(raw_fname, config_fname=config_fname, - head_shape_fname=headshape_fname) + raw = _read_raw_bti( + raw_fname, config_fname=config_fname, head_shape_fname=headshape_fname + ) - bids_path = _bids_path.copy().update(root=tmp_path, datatype='meg') + bids_path = _bids_path.copy().update(root=tmp_path, datatype="meg") # write the BIDS dataset description, then write BIDS files s_ds = [{"URL": "https://mne.testing.data"}] gen_by = [{"Name": "mne_bids"}] make_dataset_description( - path=tmp_path, name="BTi data", source_datasets=s_ds, - generated_by=gen_by, authors="a,b,c") + path=tmp_path, + name="BTi data", + source_datasets=s_ds, + generated_by=gen_by, + authors="a,b,c", + ) write_raw_bids(raw, bids_path, verbose=True) - assert op.exists(tmp_path / 'participants.tsv') + assert op.exists(tmp_path / "participants.tsv") _bids_validate(tmp_path) raw = read_raw_bids(bids_path=bids_path) with pytest.raises(TypeError, match="unexpected keyword argument 'foo'"): - read_raw_bids(bids_path=bids_path, extra_params=dict(foo='bar')) + read_raw_bids(bids_path=bids_path, extra_params=dict(foo="bar")) # test anonymize - raw = _read_raw_bti(raw_fname, config_fname=config_fname, - head_shape_fname=headshape_fname) - with pytest.warns(RuntimeWarning, - match='Converting to FIF for anonymization'): - output_path = _test_anonymize(tmp_path / 'tmp', raw, bids_path) + raw = _read_raw_bti( + raw_fname, config_fname=config_fname, head_shape_fname=headshape_fname + ) + with pytest.warns(RuntimeWarning, match="Converting to FIF for anonymization"): + output_path = _test_anonymize(tmp_path / "tmp", raw, bids_path) _bids_validate(output_path) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed'], - warning_str['unraisable_exception']) +@pytest.mark.filterwarnings( + warning_str["channel_unit_changed"], warning_str["unraisable_exception"] +) @testing.requires_testing_data def test_vhdr(_bids_validate, tmp_path): """Test write_raw_bids conversion for BrainVision data.""" - bids_root = tmp_path / 'bids1' - bv_path = op.join(base_path, 'brainvision', 'tests', 'data') - raw_fname = op.join(bv_path, 'test.vhdr') + bids_root = tmp_path / "bids1" + bv_path = op.join(base_path, "brainvision", "tests", "data") + raw_fname = op.join(bv_path, "test.vhdr") raw = _read_raw_brainvision(raw_fname) # inject a bad channel - assert not raw.info['bads'] - injected_bad = ['FP1'] - raw.info['bads'] = injected_bad + assert not raw.info["bads"] + injected_bad = ["FP1"] + raw.info["bads"] = injected_bad bids_path = _bids_path.copy().update(root=bids_root) - bids_path_minimal = _bids_path_minimal.copy().update(root=bids_root, - datatype='eeg') + bids_path_minimal = _bids_path_minimal.copy().update(root=bids_root, datatype="eeg") # write with injected bad channels write_raw_bids(raw, bids_path_minimal, overwrite=False) @@ -1120,84 +1262,85 @@ def test_vhdr(_bids_validate, tmp_path): # read and also get the bad channels raw = read_raw_bids(bids_path=bids_path_minimal) with pytest.raises(TypeError, match="unexpected keyword argument 'foo'"): - read_raw_bids(bids_path=bids_path_minimal, - extra_params=dict(foo='bar')) + read_raw_bids(bids_path=bids_path_minimal, extra_params=dict(foo="bar")) # Check that injected bad channel shows up in raw after reading - np.testing.assert_array_equal(np.asarray(raw.info['bads']), - np.asarray(injected_bad)) + np.testing.assert_array_equal( + np.asarray(raw.info["bads"]), np.asarray(injected_bad) + ) # Test that correct channel units are written ... and that bad channel # is in channels.tsv - suffix, ext = 'channels', '.tsv' - channels_tsv_name = bids_path_minimal.copy().update( - suffix=suffix, extension=ext) + suffix, ext = "channels", ".tsv" + channels_tsv_name = bids_path_minimal.copy().update(suffix=suffix, extension=ext) data = _from_tsv(channels_tsv_name) - assert data['units'][data['name'].index('FP1')] == 'µV' - assert data['units'][data['name'].index('CP5')] == 'n/a' - assert data['status'][data['name'].index(injected_bad[0])] == 'bad' - status_description = data['status_description'] - assert status_description[data['name'].index(injected_bad[0])] == 'n/a' + assert data["units"][data["name"].index("FP1")] == "µV" + assert data["units"][data["name"].index("CP5")] == "n/a" + assert data["status"][data["name"].index(injected_bad[0])] == "bad" + status_description = data["status_description"] + assert status_description[data["name"].index(injected_bad[0])] == "n/a" # check events.tsv is written - events_tsv_fname = channels_tsv_name.update(suffix='events') + events_tsv_fname = channels_tsv_name.update(suffix="events") assert op.exists(events_tsv_fname) # test anonymize and convert - if check_version('pybv', PYBV_VERSION): + if check_version("pybv", PYBV_VERSION): raw = _read_raw_brainvision(raw_fname) - output_path = _test_anonymize(tmp_path / 'tmp', raw, bids_path) + output_path = _test_anonymize(tmp_path / "tmp", raw, bids_path) _bids_validate(output_path) # Also cover iEEG # We use the same data and pretend that eeg channels are ecog raw = _read_raw_brainvision(raw_fname) - raw.set_channel_types({raw.ch_names[i]: 'ecog' - for i in mne.pick_types(raw.info, eeg=True)}) - bids_root = tmp_path / 'bids2' - bids_path.update(root=bids_root, datatype='ieeg') + raw.set_channel_types( + {raw.ch_names[i]: "ecog" for i in mne.pick_types(raw.info, eeg=True)} + ) + bids_root = tmp_path / "bids2" + bids_path.update(root=bids_root, datatype="ieeg") write_raw_bids(raw, bids_path, overwrite=False) _bids_validate(bids_root) # Now let's test that the same works for new channel type 'dbs' raw = _read_raw_brainvision(raw_fname) - raw.set_channel_types({raw.ch_names[i]: 'dbs' - for i in mne.pick_types(raw.info, eeg=True)}) - bids_root = tmp_path / 'bids_dbs' + raw.set_channel_types( + {raw.ch_names[i]: "dbs" for i in mne.pick_types(raw.info, eeg=True)} + ) + bids_root = tmp_path / "bids_dbs" bids_path.update(root=bids_root) write_raw_bids(raw, bids_path, overwrite=False) _bids_validate(bids_root) # Test coords and impedance writing # first read the data and set a montage - mon_path = op.join(data_path, 'montage') - fname_vhdr = op.join(mon_path, 'bv_dig_test.vhdr') + mon_path = op.join(data_path, "montage") + fname_vhdr = op.join(mon_path, "bv_dig_test.vhdr") raw = _read_raw_brainvision(fname_vhdr, preload=False) - raw.set_channel_types({'HEOG': 'eog', 'VEOG': 'eog', 'ECG': 'ecg'}) - fname_bvct = op.join(mon_path, 'captrak_coords.bvct') + raw.set_channel_types({"HEOG": "eog", "VEOG": "eog", "ECG": "ecg"}) + fname_bvct = op.join(mon_path, "captrak_coords.bvct") montage = mne.channels.read_dig_captrak(fname_bvct) raw.set_montage(montage) # convert to BIDS - bids_root = tmp_path / 'bids3' - bids_path.update(root=bids_root, datatype='eeg') + bids_root = tmp_path / "bids3" + bids_path.update(root=bids_root, datatype="eeg") write_raw_bids(raw, bids_path) # check impedances electrodes_fpath = _find_matching_sidecar( - bids_path.copy().update(root=bids_root), - suffix='electrodes', extension='.tsv') + bids_path.copy().update(root=bids_root), suffix="electrodes", extension=".tsv" + ) tsv = _from_tsv(electrodes_fpath) - assert len(tsv.get('impedance', {})) > 0 - assert tsv['impedance'][-3:] == ['n/a', 'n/a', 'n/a'] - assert tsv['impedance'][:3] == ['5.0', '2.0', '4.0'] + assert len(tsv.get("impedance", {})) > 0 + assert tsv["impedance"][-3:] == ["n/a", "n/a", "n/a"] + assert tsv["impedance"][:3] == ["5.0", "2.0", "4.0"] # check coordsystem coordsystem_fpath = _find_matching_sidecar( - bids_path.copy().update(root=bids_root), - suffix='coordsystem', extension='.json') - with open(coordsystem_fpath, 'r') as fin: + bids_path.copy().update(root=bids_root), suffix="coordsystem", extension=".json" + ) + with open(coordsystem_fpath, "r") as fin: coordsys_data = json.load(fin) descr = coordsys_data.get("EEGCoordinateSystemDescription", "") assert descr == BIDS_COORD_FRAME_DESCRIPTIONS["captrak"] @@ -1205,29 +1348,33 @@ def test_vhdr(_bids_validate, tmp_path): # electrodes file path should only contain # sub/ses/acq/space at most entities = get_entities_from_fname(electrodes_fpath) - assert all([entity is None for key, entity in entities.items() - if key not in ['subject', 'session', - 'acquisition', 'space']]) + assert all( + [ + entity is None + for key, entity in entities.items() + if key not in ["subject", "session", "acquisition", "space"] + ] + ) -@pytest.mark.parametrize('dir_name, fname, reader', test_eegieeg_data) +@pytest.mark.parametrize("dir_name, fname, reader", test_eegieeg_data) @pytest.mark.filterwarnings( - warning_str['nasion_not_found'], - warning_str['brainvision_unit'], - warning_str['channel_unit_changed'], - warning_str['cnt_warning1'], - warning_str['cnt_warning2'], - warning_str['no_hand'], - warning_str['no_montage'], + warning_str["nasion_not_found"], + warning_str["brainvision_unit"], + warning_str["channel_unit_changed"], + warning_str["cnt_warning1"], + warning_str["cnt_warning2"], + warning_str["no_hand"], + warning_str["no_montage"], ) @testing.requires_testing_data def test_eegieeg(dir_name, fname, reader, _bids_validate, tmp_path): """Test write_raw_bids conversion for EEG/iEEG data formats.""" - bids_root = tmp_path / 'bids1' + bids_root = tmp_path / "bids1" raw_fname = data_path / dir_name / fname # the BIDSPath for test datasets to get written to - bids_path = _bids_path.copy().update(root=bids_root, datatype='eeg') + bids_path = _bids_path.copy().update(root=bids_root, datatype="eeg") raw = reader(raw_fname) raw.set_montage(None) # remove montage @@ -1235,12 +1382,12 @@ def test_eegieeg(dir_name, fname, reader, _bids_validate, tmp_path): kwargs = dict(raw=raw, bids_path=bids_path, overwrite=True) warning_to_catch = { - 'EDF': None, - 'curry': 'Encountered data in "int" format. Converting to float32.', - 'NihonKohden': 'Encountered data in "short" format', - 'CNT': 'Encountered data in "int" format. Converting to float32.', - 'EGI': None, - 'Persyst': 'Encountered data in "double" format' + "EDF": None, + "curry": 'Encountered data in "int" format. Converting to float32.', + "NihonKohden": 'Encountered data in "short" format', + "CNT": 'Encountered data in "int" format. Converting to float32.', + "EGI": None, + "Persyst": 'Encountered data in "double" format', } if warning_to_catch[dir_name] is None: @@ -1249,13 +1396,11 @@ def test_eegieeg(dir_name, fname, reader, _bids_validate, tmp_path): with pytest.warns(RuntimeWarning, match=warning_to_catch[dir_name]): bids_output_path = write_raw_bids(**kwargs) - with pytest.raises(ValueError, - match='You passed events, but no event_id '): + with pytest.raises(ValueError, match="You passed events, but no event_id "): write_raw_bids(raw, bids_path, events=events) # check events.tsv is written - events_tsv_fname = bids_output_path.copy().update(suffix='events', - extension='.tsv') + events_tsv_fname = bids_output_path.copy().update(suffix="events", extension=".tsv") if events.size == 0: assert not events_tsv_fname.fpath.exists() else: @@ -1267,10 +1412,10 @@ def test_eegieeg(dir_name, fname, reader, _bids_validate, tmp_path): del raw2, events2 # alter some channels manually - raw.rename_channels({raw.ch_names[0]: 'EOGtest'}) - raw.info['chs'][0]['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR - raw.rename_channels({raw.ch_names[1]: 'EMG'}) - raw.set_channel_types({'EMG': 'emg'}) + raw.rename_channels({raw.ch_names[0]: "EOGtest"}) + raw.info["chs"][0]["coil_type"] = FIFF.FIFFV_COIL_EEG_BIPOLAR + raw.rename_channels({raw.ch_names[1]: "EMG"}) + raw.set_channel_types({"EMG": "emg"}) # Test we can overwrite dataset_description.json kwargs = dict(raw=raw, bids_path=bids_path, overwrite=True) @@ -1280,13 +1425,17 @@ def test_eegieeg(dir_name, fname, reader, _bids_validate, tmp_path): with pytest.warns(RuntimeWarning, match=warning_to_catch[dir_name]): bids_output_path = write_raw_bids(**kwargs) - make_dataset_description(path=bids_root, name="test", - authors=["test1", "test2"], overwrite=True, - dataset_type="raw", - ethics_approvals=["approved by S."], - hed_version="No HED used (just testing)") + make_dataset_description( + path=bids_root, + name="test", + authors=["test1", "test2"], + overwrite=True, + dataset_type="raw", + ethics_approvals=["approved by S."], + hed_version="No HED used (just testing)", + ) dataset_description_fpath = op.join(bids_root, "dataset_description.json") - with open(dataset_description_fpath, 'r', encoding='utf-8') as f: + with open(dataset_description_fpath, "r", encoding="utf-8") as f: dataset_description_json = json.load(f) assert dataset_description_json["Authors"] == ["test1", "test2"] @@ -1301,7 +1450,7 @@ def test_eegieeg(dir_name, fname, reader, _bids_validate, tmp_path): # dataset_description.json files should not be overwritten inside # write_raw_bids calls - with open(dataset_description_fpath, 'r', encoding='utf-8') as f: + with open(dataset_description_fpath, "r", encoding="utf-8") as f: dataset_description_json = json.load(f) assert dataset_description_json["Authors"] == ["test1", "test2"] @@ -1309,54 +1458,54 @@ def test_eegieeg(dir_name, fname, reader, _bids_validate, tmp_path): # some channels (there's now a mismatch between BIDS and Raw channel # names, and BIDS should take precedence) raw_read = read_raw_bids(bids_path=bids_path) - assert raw_read.ch_names[0] == 'EOGtest' - assert raw_read.ch_names[1] == 'EMG' + assert raw_read.ch_names[0] == "EOGtest" + assert raw_read.ch_names[1] == "EMG" with pytest.raises(TypeError, match="unexpected keyword argument 'foo'"): - read_raw_bids(bids_path=bids_path, extra_params=dict(foo='bar')) + read_raw_bids(bids_path=bids_path, extra_params=dict(foo="bar")) bids_path = bids_path.copy().update(run=run2) # add data in as a montage, but .set_montage only works for some # channel types, so make a specific selection - ch_names = [ch_name - for ch_name, ch_type in - zip(raw.ch_names, raw.get_channel_types()) - if ch_type in ['eeg', 'seeg', 'ecog', 'dbs', 'fnirs']] + ch_names = [ + ch_name + for ch_name, ch_type in zip(raw.ch_names, raw.get_channel_types()) + if ch_type in ["eeg", "seeg", "ecog", "dbs", "fnirs"] + ] elec_locs = np.random.random((len(ch_names), 3)) # test what happens if there is some nan entries elec_locs[-1, :] = [np.nan, np.nan, np.nan] ch_pos = dict(zip(ch_names, elec_locs.tolist())) - eeg_montage = mne.channels.make_dig_montage(ch_pos=ch_pos, - coord_frame='head') + eeg_montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame="head") raw.set_montage(eeg_montage) # remove the 3 fiducial digitization points for i in range(3): - del raw.info['dig'][i] + del raw.info["dig"][i] # electrodes are not written w/o landmarks - with pytest.raises(RuntimeError, match="'head' coordinate frame must " - "contain nasion"): + with pytest.raises( + RuntimeError, match="'head' coordinate frame must " "contain nasion" + ): if warning_to_catch[dir_name] is None: write_raw_bids(**kwargs) else: - with pytest.warns( - RuntimeWarning, match=warning_to_catch[dir_name] - ): + with pytest.warns(RuntimeWarning, match=warning_to_catch[dir_name]): write_raw_bids(**kwargs) - electrodes_fpath = _find_matching_sidecar(bids_path, - suffix='electrodes', - extension='.tsv', - on_error='ignore') + electrodes_fpath = _find_matching_sidecar( + bids_path, suffix="electrodes", extension=".tsv", on_error="ignore" + ) assert electrodes_fpath is None # with landmarks, eeg montage is written - eeg_montage = mne.channels.make_dig_montage(ch_pos=ch_pos, - coord_frame='head', - nasion=[1, 0, 0], - lpa=[0, 1, 0], - rpa=[0, 0, 1]) + eeg_montage = mne.channels.make_dig_montage( + ch_pos=ch_pos, + coord_frame="head", + nasion=[1, 0, 0], + lpa=[0, 1, 0], + rpa=[0, 0, 1], + ) raw.set_montage(eeg_montage) kwargs = dict(raw=raw, bids_path=bids_path, overwrite=True) if warning_to_catch[dir_name] is None: @@ -1365,57 +1514,68 @@ def test_eegieeg(dir_name, fname, reader, _bids_validate, tmp_path): with pytest.warns(RuntimeWarning, match=warning_to_catch[dir_name]): bids_output_path = write_raw_bids(**kwargs) - electrodes_fpath = _find_matching_sidecar(bids_path, - suffix='electrodes', - extension='.tsv') + electrodes_fpath = _find_matching_sidecar( + bids_path, suffix="electrodes", extension=".tsv" + ) assert op.exists(electrodes_fpath) _bids_validate(bids_root) # ensure there is an EMG channel in the channels.tsv: channels_tsv = BIDSPath( - subject=subject_id, session=session_id, task=task, run=run, - suffix='channels', extension='.tsv', acquisition=acq, - root=bids_root, datatype='eeg') + subject=subject_id, + session=session_id, + task=task, + run=run, + suffix="channels", + extension=".tsv", + acquisition=acq, + root=bids_root, + datatype="eeg", + ) data = _from_tsv(channels_tsv) - assert 'ElectroMyoGram' in data['description'] + assert "ElectroMyoGram" in data["description"] # check that the scans list contains two scans scans_tsv = BIDSPath( - subject=subject_id, session=session_id, - suffix='scans', extension='.tsv', - root=bids_root) + subject=subject_id, + session=session_id, + suffix="scans", + extension=".tsv", + root=bids_root, + ) data = _from_tsv(scans_tsv) assert len(list(data.values())[0]) == 2 # check that scans list is properly converted to brainvision - if check_version('pybv', PYBV_VERSION) or dir_name == 'EDF': - if raw.info['meas_date'] is not None: + if check_version("pybv", PYBV_VERSION) or dir_name == "EDF": + if raw.info["meas_date"] is not None: daysback_min, daysback_max = _get_anonymization_daysback(raw) daysback = (daysback_min + daysback_max) // 2 else: # just pass back any arbitrary number if no measurement date daysback = 3300 - kwargs = dict(raw=raw, bids_path=bids_path, - anonymize=dict(daysback=daysback), overwrite=True) - if dir_name == 'EDF': + kwargs = dict( + raw=raw, + bids_path=bids_path, + anonymize=dict(daysback=daysback), + overwrite=True, + ) + if dir_name == "EDF": match = r"^EDF\/EDF\+\/BDF files contain two fields .*" with pytest.warns(RuntimeWarning, match=match): write_raw_bids(**kwargs) elif warning_to_catch[dir_name] is None: bids_output_path = write_raw_bids(**kwargs) else: - with pytest.warns( - RuntimeWarning, match=warning_to_catch[dir_name] - ): + with pytest.warns(RuntimeWarning, match=warning_to_catch[dir_name]): bids_output_path = write_raw_bids(**kwargs) data = _from_tsv(scans_tsv) bids_path = bids_path.copy() - if dir_name != 'EDF': - bids_path = bids_path.update(suffix='eeg', extension='.vhdr') - assert any([bids_path.basename in fname - for fname in data['filename']]) + if dir_name != "EDF": + bids_path = bids_path.update(suffix="eeg", extension=".vhdr") + assert any([bids_path.basename in fname for fname in data["filename"]]) # Also cover iEEG # We use the same data and pretend that eeg channels are ecog @@ -1426,10 +1586,9 @@ def test_eegieeg(dir_name, fname, reader, _bids_validate, tmp_path): # convert channel types to ECoG and write BIDS eeg_picks = mne.pick_types(ieeg_raw.info, eeg=True) - ieeg_raw.set_channel_types({raw.ch_names[i]: 'ecog' - for i in eeg_picks}) - bids_root = tmp_path / 'bids2' - bids_path.update(root=bids_root, datatype='ieeg') + ieeg_raw.set_channel_types({raw.ch_names[i]: "ecog" for i in eeg_picks}) + bids_root = tmp_path / "bids2" + bids_path.update(root=bids_root, datatype="ieeg") kwargs = dict(raw=ieeg_raw, bids_path=bids_path, overwrite=True) if warning_to_catch[dir_name] is None: bids_output_path = write_raw_bids(**kwargs) @@ -1440,28 +1599,28 @@ def test_eegieeg(dir_name, fname, reader, _bids_validate, tmp_path): _bids_validate(bids_root) # assert README has references in it - readme = op.join(bids_root, 'README') - with open(readme, 'r', encoding='utf-8-sig') as fid: + readme = op.join(bids_root, "README") + with open(readme, "r", encoding="utf-8-sig") as fid: text = fid.read() - assert REFERENCES['ieeg'] in text - assert REFERENCES['meg'] not in text - assert REFERENCES['eeg'] not in text + assert REFERENCES["ieeg"] in text + assert REFERENCES["meg"] not in text + assert REFERENCES["eeg"] not in text # test writing electrode coordinates (.tsv) # and coordinate system (.json) # .set_montage only works for some channel types -> specific selection - ch_names = [ch_name - for ch_name, ch_type in - zip(ieeg_raw.ch_names, ieeg_raw.get_channel_types()) - if ch_type in ['eeg', 'seeg', 'ecog', 'dbs', 'fnirs']] + ch_names = [ + ch_name + for ch_name, ch_type in zip(ieeg_raw.ch_names, ieeg_raw.get_channel_types()) + if ch_type in ["eeg", "seeg", "ecog", "dbs", "fnirs"] + ] elec_locs = np.random.random((len(ch_names), 3)).tolist() ch_pos = dict(zip(ch_names, elec_locs)) - ecog_montage = mne.channels.make_dig_montage(ch_pos=ch_pos, - coord_frame='mni_tal') + ecog_montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame="mni_tal") ieeg_raw.set_montage(ecog_montage) - bids_root = tmp_path / 'bids3' - bids_path.update(root=bids_root, datatype='ieeg') + bids_root = tmp_path / "bids3" + bids_path.update(root=bids_root, datatype="ieeg") kwargs = dict(raw=ieeg_raw, bids_path=bids_path, overwrite=True) if warning_to_catch[dir_name] is None: bids_output_path = write_raw_bids(**kwargs) @@ -1474,36 +1633,49 @@ def test_eegieeg(dir_name, fname, reader, _bids_validate, tmp_path): # XXX: Should be improved with additional coordinate system descriptions # iEEG montages written from mne-python end up as "Other" bids_path.update(root=bids_root) - electrodes_path = bids_path.copy().update( - suffix='electrodes', extension='.tsv', - space='fsaverage', task=None, run=None - ).fpath - coordsystem_path = bids_path.copy().update( - suffix='coordsystem', extension='.json', - space='fsaverage', task=None, run=None - ).fpath + electrodes_path = ( + bids_path.copy() + .update( + suffix="electrodes", + extension=".tsv", + space="fsaverage", + task=None, + run=None, + ) + .fpath + ) + coordsystem_path = ( + bids_path.copy() + .update( + suffix="coordsystem", + extension=".json", + space="fsaverage", + task=None, + run=None, + ) + .fpath + ) assert electrodes_path.exists() assert coordsystem_path.exists() # Test we get the correct sidecar via _find_matching_sidecar() - electrodes_fname = _find_matching_sidecar(bids_path, - suffix='electrodes', - extension='.tsv') - coordsystem_fname = _find_matching_sidecar(bids_path, - suffix='coordsystem', - extension='.json') + electrodes_fname = _find_matching_sidecar( + bids_path, suffix="electrodes", extension=".tsv" + ) + coordsystem_fname = _find_matching_sidecar( + bids_path, suffix="coordsystem", extension=".json" + ) electrodes_fname == str(electrodes_fpath) coordsystem_fname == str(coordsystem_path) - coordsystem_json = json.loads(coordsystem_path.read_text(encoding='utf-8')) - assert coordsystem_json['iEEGCoordinateSystem'] == 'fsaverage' + coordsystem_json = json.loads(coordsystem_path.read_text(encoding="utf-8")) + assert coordsystem_json["iEEGCoordinateSystem"] == "fsaverage" # test writing to ACPC - ecog_montage = mne.channels.make_dig_montage(ch_pos=ch_pos, - coord_frame='ras') - bids_root = tmp_path / 'bids4' - bids_path.update(root=bids_root, datatype='ieeg') + ecog_montage = mne.channels.make_dig_montage(ch_pos=ch_pos, coord_frame="ras") + bids_root = tmp_path / "bids4" + bids_path.update(root=bids_root, datatype="ieeg") # test works if ACPC-aligned is specified kwargs.update(montage=ecog_montage, acpc_aligned=True) if warning_to_catch[dir_name] is None: @@ -1515,108 +1687,118 @@ def test_eegieeg(dir_name, fname, reader, _bids_validate, tmp_path): _bids_validate(bids_root) bids_path.update(root=bids_root) - electrodes_path = bids_path.copy().update( - suffix='electrodes', extension='.tsv', space='ACPC', - task=None, run=None - ).fpath - coordsystem_path = bids_path.copy().update( - suffix='coordsystem', extension='.json', space='ACPC', - task=None, run=None - ).fpath + electrodes_path = ( + bids_path.copy() + .update( + suffix="electrodes", extension=".tsv", space="ACPC", task=None, run=None + ) + .fpath + ) + coordsystem_path = ( + bids_path.copy() + .update( + suffix="coordsystem", extension=".json", space="ACPC", task=None, run=None + ) + .fpath + ) assert electrodes_path.exists() assert coordsystem_path.exists() # Test we get the correct sidecar via _find_matching_sidecar() - electrodes_fname = _find_matching_sidecar(bids_path, - suffix='electrodes', - extension='.tsv') - coordsystem_fname = _find_matching_sidecar(bids_path, - suffix='coordsystem', - extension='.json') + electrodes_fname = _find_matching_sidecar( + bids_path, suffix="electrodes", extension=".tsv" + ) + coordsystem_fname = _find_matching_sidecar( + bids_path, suffix="coordsystem", extension=".json" + ) electrodes_fname == str(electrodes_fpath) coordsystem_fname == str(coordsystem_path) - coordsystem_json = json.loads(coordsystem_path.read_text(encoding='utf-8')) - assert coordsystem_json['iEEGCoordinateSystem'] == 'ACPC' + coordsystem_json = json.loads(coordsystem_path.read_text(encoding="utf-8")) + assert coordsystem_json["iEEGCoordinateSystem"] == "ACPC" kwargs.update(acpc_aligned=False) - with pytest.raises(RuntimeError, match='`acpc_aligned` is False'): + with pytest.raises(RuntimeError, match="`acpc_aligned` is False"): write_raw_bids(**kwargs) # test anonymize and convert - if check_version('pybv', PYBV_VERSION) or dir_name == 'EDF': + if check_version("pybv", PYBV_VERSION) or dir_name == "EDF": raw = reader(raw_fname) - bids_path.update(root=bids_root, datatype='eeg') + bids_path.update(root=bids_root, datatype="eeg") kwargs = dict(raw=raw, bids_path=bids_path, overwrite=True) - if dir_name == 'NihonKohden': - with pytest.warns(RuntimeWarning, - match='Encountered data in "short" format'): + if dir_name == "NihonKohden": + with pytest.warns( + RuntimeWarning, match='Encountered data in "short" format' + ): write_raw_bids(**kwargs) - output_path = _test_anonymize(tmp_path / 'a', raw, bids_path) - elif dir_name == 'EDF': + output_path = _test_anonymize(tmp_path / "a", raw, bids_path) + elif dir_name == "EDF": match = r"^EDF\/EDF\+\/BDF files contain two fields .*" with pytest.warns(RuntimeWarning, match=match): write_raw_bids(**kwargs) # Just copies. - output_path = _test_anonymize(tmp_path / 'b', raw, bids_path) - elif dir_name == 'CNT': - with pytest.warns(RuntimeWarning, - match='Encountered data in "int" format. ' - 'Converting to float32.'): + output_path = _test_anonymize(tmp_path / "b", raw, bids_path) + elif dir_name == "CNT": + with pytest.warns( + RuntimeWarning, + match='Encountered data in "int" format. ' "Converting to float32.", + ): write_raw_bids(**kwargs) - output_path = _test_anonymize(tmp_path / 'c', raw, bids_path) - elif dir_name == 'EGI': + output_path = _test_anonymize(tmp_path / "c", raw, bids_path) + elif dir_name == "EGI": write_raw_bids(**kwargs) - output_path = _test_anonymize(tmp_path / 'd', raw, bids_path) - elif dir_name == 'curry': - with pytest.warns(RuntimeWarning, - match='Encountered data in "int" format. ' - 'Converting to float32.'): + output_path = _test_anonymize(tmp_path / "d", raw, bids_path) + elif dir_name == "curry": + with pytest.warns( + RuntimeWarning, + match='Encountered data in "int" format. ' "Converting to float32.", + ): write_raw_bids(**kwargs) - output_path = _test_anonymize(tmp_path / 'd', raw, bids_path) + output_path = _test_anonymize(tmp_path / "d", raw, bids_path) else: - with pytest.warns(RuntimeWarning, - match='Encountered data in "double" format'): + with pytest.warns( + RuntimeWarning, match='Encountered data in "double" format' + ): write_raw_bids(**kwargs) # Converts. - output_path = _test_anonymize(tmp_path / 'e', raw, bids_path) + output_path = _test_anonymize(tmp_path / "e", raw, bids_path) _bids_validate(output_path) @testing.requires_testing_data def test_snirf(_bids_validate, tmp_path): """Test write_raw_bids conversion for SNIRF data.""" - raw_fname = op.join(data_path, 'SNIRF', 'MNE-NIRS', '20220217', - '20220217_nirx_15_3_recording.snirf') - bids_path = _bids_path.copy().update(root=tmp_path, datatype='nirs') + raw_fname = op.join( + data_path, "SNIRF", "MNE-NIRS", "20220217", "20220217_nirx_15_3_recording.snirf" + ) + bids_path = _bids_path.copy().update(root=tmp_path, datatype="nirs") raw = _read_raw_snirf(raw_fname) write_raw_bids(raw, bids_path, overwrite=False) _bids_validate(tmp_path) - subjects = get_entity_vals(tmp_path, 'subject') + subjects = get_entity_vals(tmp_path, "subject") assert len(subjects) == 1 - sessions = get_entity_vals(tmp_path, 'session') - assert sessions == ['01'] + sessions = get_entity_vals(tmp_path, "session") + assert sessions == ["01"] rawbids = read_raw_bids(bids_path) assert rawbids.annotations.onset[0] == raw.annotations.onset[0] assert rawbids.annotations.description[2] == raw.annotations.description[2] - assert rawbids.annotations.description[2] == '1.0' + assert rawbids.annotations.description[2] == "1.0" assert raw.times[-1] == rawbids.times[-1] # Test common modifications when generating BIDS-formatted data. raw.annotations.duration = [2, 7, 1] - raw.annotations.rename({'1.0': 'Control', - '2.0': 'Tapping/Left', - '4.0': 'Tapping/Right'}) + raw.annotations.rename( + {"1.0": "Control", "2.0": "Tapping/Left", "4.0": "Tapping/Right"} + ) write_raw_bids(raw, bids_path, overwrite=True) _bids_validate(tmp_path) rawbids = read_raw_bids(bids_path) assert rawbids.annotations.onset[0] == raw.annotations.onset[0] - assert rawbids.annotations.description[2] == 'Control' + assert rawbids.annotations.description[2] == "Control" assert raw.times[-1] == rawbids.times[-1] - with pytest.raises(ValueError, - match='The input "format" FIF is not an accepted.*'): + with pytest.raises(ValueError, match='The input "format" FIF is not an accepted.*'): write_raw_bids(raw, bids_path, overwrite=True, format="FIF") # Test with different optode coordinate frame @@ -1625,44 +1807,44 @@ def test_snirf(_bids_validate, tmp_path): _bids_validate(tmp_path) raw = _read_raw_snirf(raw_fname, optode_frame="mri") - raw.info['dig'].pop(1) - with pytest.raises(RuntimeError, - match="'head' coordinate frame must contain nasion"): + raw.info["dig"].pop(1) + with pytest.raises( + RuntimeError, match="'head' coordinate frame must contain nasion" + ): write_raw_bids(raw, bids_path, overwrite=True) def test_bdf(_bids_validate, tmp_path): """Test write_raw_bids conversion for Biosemi data.""" - raw_fname = op.join(base_path, 'edf', 'tests', 'data', 'test.bdf') + raw_fname = op.join(base_path, "edf", "tests", "data", "test.bdf") - bids_path = _bids_path.copy().update(root=tmp_path, datatype='eeg') + bids_path = _bids_path.copy().update(root=tmp_path, datatype="eeg") raw = _read_raw_bdf(raw_fname) - raw.info['line_freq'] = 60 + raw.info["line_freq"] = 60 write_raw_bids(raw, bids_path, overwrite=False) _bids_validate(tmp_path) # assert README has references in it - readme = op.join(tmp_path, 'README') - with open(readme, 'r', encoding='utf-8-sig') as fid: + readme = op.join(tmp_path, "README") + with open(readme, "r", encoding="utf-8-sig") as fid: text = fid.read() - assert REFERENCES['eeg'] in text - assert REFERENCES['meg'] not in text - assert REFERENCES['ieeg'] not in text + assert REFERENCES["eeg"] in text + assert REFERENCES["meg"] not in text + assert REFERENCES["ieeg"] not in text # Test also the reading of channel types from channels.tsv # the first channel in the raw data is not MISC right now test_ch_idx = 0 - assert coil_type(raw.info, test_ch_idx) != 'misc' + assert coil_type(raw.info, test_ch_idx) != "misc" # we will change the channel type to MISC and overwrite the channels file - bids_fname = bids_path.copy().update(suffix='eeg', - extension='.bdf') - channels_fname = _find_matching_sidecar(bids_fname, - suffix='channels', - extension='.tsv') + bids_fname = bids_path.copy().update(suffix="eeg", extension=".bdf") + channels_fname = _find_matching_sidecar( + bids_fname, suffix="channels", extension=".tsv" + ) channels_dict = _from_tsv(channels_fname) - channels_dict['type'][test_ch_idx] = 'MISC' + channels_dict["type"][test_ch_idx] = "MISC" _to_tsv(channels_dict, channels_fname) # Now read the raw data back from BIDS, with the tampered TSV, to show @@ -1670,104 +1852,107 @@ def test_bdf(_bids_validate, tmp_path): # in the raw data object with pytest.warns(RuntimeWarning, match="Fp1 has changed from V .*"): raw = read_raw_bids(bids_path=bids_path) - assert coil_type(raw.info, test_ch_idx) == 'misc' + assert coil_type(raw.info, test_ch_idx) == "misc" with pytest.raises(TypeError, match="unexpected keyword argument 'foo'"): - read_raw_bids(bids_path=bids_path, extra_params=dict(foo='bar')) + read_raw_bids(bids_path=bids_path, extra_params=dict(foo="bar")) # Test errors for modified raw.times raw = _read_raw_bdf(raw_fname) - with pytest.raises(ValueError, match='fewer time points'): - write_raw_bids(raw.copy().crop(0, raw.times[-2]), bids_path, - overwrite=True) + with pytest.raises(ValueError, match="fewer time points"): + write_raw_bids(raw.copy().crop(0, raw.times[-2]), bids_path, overwrite=True) - with pytest.raises(ValueError, match='more time points'): - write_raw_bids(mne.concatenate_raws([raw.copy(), raw]), bids_path, - overwrite=True) + with pytest.raises(ValueError, match="more time points"): + write_raw_bids( + mne.concatenate_raws([raw.copy(), raw]), bids_path, overwrite=True + ) - if hasattr(raw.info, '_unlock'): + if hasattr(raw.info, "_unlock"): with raw.info._unlock(): - raw.info['sfreq'] -= 10 # change raw.times, but retain shape - elif parse_version(mne.__version__) >= parse_version('0.23'): - raw.info['sfreq'] -= 10 + raw.info["sfreq"] -= 10 # change raw.times, but retain shape + elif parse_version(mne.__version__) >= parse_version("0.23"): + raw.info["sfreq"] -= 10 else: raw._times = raw._times / 5 - with pytest.raises(ValueError, match='raw.times has changed'): + with pytest.raises(ValueError, match="raw.times has changed"): write_raw_bids(raw, bids_path, overwrite=True) # test anonymize and convert raw = _read_raw_bdf(raw_fname) match = r"^EDF\/EDF\+\/BDF files contain two fields .*" with pytest.warns(RuntimeWarning, match=match): - output_path = _test_anonymize(tmp_path / 'tmp', raw, bids_path) + output_path = _test_anonymize(tmp_path / "tmp", raw, bids_path) _bids_validate(output_path) -@pytest.mark.filterwarnings(warning_str['meas_date_set_to_none']) +@pytest.mark.filterwarnings(warning_str["meas_date_set_to_none"]) @testing.requires_testing_data def test_set(_bids_validate, tmp_path): """Test write_raw_bids conversion for EEGLAB data.""" # standalone .set file with associated .fdt - bids_root = tmp_path / 'bids1' - raw_fname = data_path / 'EEGLAB' / 'test_raw.set' + bids_root = tmp_path / "bids1" + raw_fname = data_path / "EEGLAB" / "test_raw.set" raw = _read_raw_eeglab(raw_fname) - bids_path = _bids_path.copy().update(root=bids_root, datatype='eeg') + bids_path = _bids_path.copy().update(root=bids_root, datatype="eeg") # proceed with the actual test for EEGLAB data write_raw_bids(raw, bids_path, overwrite=False) read_raw_bids(bids_path=bids_path) with pytest.raises(TypeError, match="unexpected keyword argument 'foo'"): - read_raw_bids(bids_path=bids_path, extra_params=dict(foo='bar')) + read_raw_bids(bids_path=bids_path, extra_params=dict(foo="bar")) with pytest.raises(FileExistsError, match="already exists"): # noqa: F821 write_raw_bids(raw, bids_path, overwrite=False) _bids_validate(bids_root) # check events.tsv is written - events_tsv_fname = op.join(bids_root, 'sub-' + subject_id, - 'ses-' + session_id, 'eeg', - bids_path.basename + '_events.tsv') + events_tsv_fname = op.join( + bids_root, + "sub-" + subject_id, + "ses-" + session_id, + "eeg", + bids_path.basename + "_events.tsv", + ) assert op.exists(events_tsv_fname) # Also cover iEEG # We use the same data and pretend that eeg channels are ecog - raw.set_channel_types({raw.ch_names[i]: 'ecog' - for i in mne.pick_types(raw.info, eeg=True)}) - bids_root = tmp_path / 'bids2' - bids_path.update(root=bids_root, datatype='ieeg') + raw.set_channel_types( + {raw.ch_names[i]: "ecog" for i in mne.pick_types(raw.info, eeg=True)} + ) + bids_root = tmp_path / "bids2" + bids_path.update(root=bids_root, datatype="ieeg") write_raw_bids(raw, bids_path) _bids_validate(bids_root) # test anonymize and convert - if check_version('pybv', PYBV_VERSION): - with pytest.warns(RuntimeWarning, - match='Encountered data in "double" format'): - output_path = _test_anonymize(tmp_path / 'tmp', raw, bids_path) + if check_version("pybv", PYBV_VERSION): + with pytest.warns(RuntimeWarning, match='Encountered data in "double" format'): + output_path = _test_anonymize(tmp_path / "tmp", raw, bids_path) _bids_validate(output_path) def _check_anat_json(bids_path): - json_path = bids_path.copy().update(extension='.json') + json_path = bids_path.copy().update(extension=".json") # Validate that matching sidecar file is as expected assert op.exists(json_path.fpath) - with open(json_path, 'r', encoding='utf-8') as f: + with open(json_path, "r", encoding="utf-8") as f: json_dict = json.load(f) # We only should have AnatomicalLandmarkCoordinates as key - np.testing.assert_array_equal(list(json_dict.keys()), - ['AnatomicalLandmarkCoordinates']) + np.testing.assert_array_equal( + list(json_dict.keys()), ["AnatomicalLandmarkCoordinates"] + ) # And within AnatomicalLandmarkCoordinates only LPA, NAS, RPA in that order - anat_dict = json_dict['AnatomicalLandmarkCoordinates'] - point_list = ['LPA', 'NAS', 'RPA'] - np.testing.assert_array_equal(list(anat_dict.keys()), - point_list) + anat_dict = json_dict["AnatomicalLandmarkCoordinates"] + point_list = ["LPA", "NAS", "RPA"] + np.testing.assert_array_equal(list(anat_dict.keys()), point_list) # test the actual values of the voxels (no floating points) for i, point in enumerate([(66, 51, 46), (41, 32, 74), (17, 53, 47)]): coords = anat_dict[point_list[i]] - np.testing.assert_array_equal(np.asarray(coords, dtype=int), - point) + np.testing.assert_array_equal(np.asarray(coords, dtype=int), point) @testing.requires_testing_data @@ -1775,29 +1960,33 @@ def test_get_anat_landmarks(): """Test getting anatomical landmarks in image space.""" # Get the T1 weighted MRI data file # Needs to be converted to Nifti because we only have mgh in our test base - t1w_mgh = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz') - fs_subjects_dir = op.join(data_path, 'subjects') - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + t1w_mgh = op.join(data_path, "subjects", "sample", "mri", "T1.mgz") + fs_subjects_dir = op.join(data_path, "subjects") + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = _read_raw_fif(raw_fname) # Write some MRI data and supply a `trans` - trans_fname = raw_fname.replace('_raw.fif', '-trans.fif') + trans_fname = raw_fname.replace("_raw.fif", "-trans.fif") trans = mne.read_trans(trans_fname) # define some keyword arguments to simplify testing - kwargs = dict(image=t1w_mgh, info=raw.info, trans=trans, - fs_subject='sample', fs_subjects_dir=fs_subjects_dir) + kwargs = dict( + image=t1w_mgh, + info=raw.info, + trans=trans, + fs_subject="sample", + fs_subjects_dir=fs_subjects_dir, + ) # trans has a wrong type wrong_type = 1 - match = f'trans must be an instance of .*, got {type(wrong_type)} ' + match = f"trans must be an instance of .*, got {type(wrong_type)} " ex = TypeError with pytest.raises(ex, match=match): get_anat_landmarks(**dict(kwargs, trans=wrong_type)) # trans is a str, but file does not exist - wrong_fname = 'not_a_trans' + wrong_fname = "not_a_trans" match = 'trans file "{}" not found'.format(wrong_fname) with pytest.raises(IOError, match=match): get_anat_landmarks(**dict(kwargs, trans=wrong_fname)) @@ -1807,33 +1996,34 @@ def test_get_anat_landmarks(): # test unsupported coord_frame fail_info = raw.info.copy() - fail_info['dig'][0]['coord_frame'] = 3 - fail_info['dig'][1]['coord_frame'] = 3 - fail_info['dig'][2]['coord_frame'] = 3 + fail_info["dig"][0]["coord_frame"] = 3 + fail_info["dig"][1]["coord_frame"] = 3 + fail_info["dig"][2]["coord_frame"] = 3 - with pytest.raises(ValueError, match='must be in the head'): + with pytest.raises(ValueError, match="must be in the head"): get_anat_landmarks(**dict(kwargs, info=fail_info)) # test bad freesurfer directory - with pytest.raises(ValueError, match='subject folder is incorrect'): - get_anat_landmarks(**dict(kwargs, fs_subject='bad')) + with pytest.raises(ValueError, match="subject folder is incorrect"): + get_anat_landmarks(**dict(kwargs, fs_subject="bad")) # test _get_fid_coords fail_landmarks = mne.channels.make_dig_montage( - lpa=[66.08580, 51.33362, 46.52982], - coord_frame='mri_voxel') + lpa=[66.08580, 51.33362, 46.52982], coord_frame="mri_voxel" + ) - with pytest.raises(ValueError, match='Some fiducial points are missing'): + with pytest.raises(ValueError, match="Some fiducial points are missing"): _get_fid_coords(fail_landmarks.dig, raise_error=True) fail_landmarks = mne.channels.make_dig_montage( lpa=[66.08580, 51.33362, 46.52982], nasion=[41.87363, 32.24694, 74.55314], rpa=[17.23812, 53.08294, 47.01789], - coord_frame='mri_voxel') - fail_landmarks.dig[2]['coord_frame'] = 99 + coord_frame="mri_voxel", + ) + fail_landmarks.dig[2]["coord_frame"] = 99 - with pytest.raises(ValueError, match='must be in the same coordinate'): + with pytest.raises(ValueError, match="must be in the same coordinate"): _get_fid_coords(fail_landmarks.dig, raise_error=True) # test main @@ -1841,54 +2031,61 @@ def test_get_anat_landmarks(): lpa=[66.08580, 51.33362, 46.52982], nasion=[41.87363, 32.24694, 74.55314], rpa=[17.23812, 53.08294, 47.01789], - coord_frame='mri_voxel') - coords_dict, mri_voxel_coord_frame = _get_fid_coords( - mri_voxel_landmarks.dig) - mri_voxel_landmarks = np.asarray((coords_dict['lpa'], - coords_dict['nasion'], - coords_dict['rpa'])) + coord_frame="mri_voxel", + ) + coords_dict, mri_voxel_coord_frame = _get_fid_coords(mri_voxel_landmarks.dig) + mri_voxel_landmarks = np.asarray( + (coords_dict["lpa"], coords_dict["nasion"], coords_dict["rpa"]) + ) landmarks = get_anat_landmarks(**kwargs) coords_dict2, coord_frame = _get_fid_coords(landmarks.dig) - landmarks = np.asarray((coords_dict2['lpa'], - coords_dict2['nasion'], - coords_dict2['rpa'])) + landmarks = np.asarray( + (coords_dict2["lpa"], coords_dict2["nasion"], coords_dict2["rpa"]) + ) assert mri_voxel_coord_frame == coord_frame - np.testing.assert_array_almost_equal( - mri_voxel_landmarks, landmarks, decimal=5) + np.testing.assert_array_almost_equal(mri_voxel_landmarks, landmarks, decimal=5) @testing.requires_testing_data def test_write_anat(_bids_validate, tmp_path): """Test writing anatomical data.""" - nib = pytest.importorskip('nibabel') + nib = pytest.importorskip("nibabel") # Get the MNE testing sample data - bids_root = tmp_path / 'bids1' + bids_root = tmp_path / "bids1" # Get the T1 weighted MRI data file # Needs to be converted to Nifti because we only have mgh in our test base - t1w_mgh = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz') + t1w_mgh = op.join(data_path, "subjects", "sample", "mri", "T1.mgz") # define hard-coded landmark locations in voxel and scanner RAS mri_voxel_landmarks = mne.channels.make_dig_montage( lpa=[66.08580, 51.33362, 46.52982], nasion=[41.87363, 32.24694, 74.55314], rpa=[17.23812, 53.08294, 47.01789], - coord_frame='mri_voxel') + coord_frame="mri_voxel", + ) mri_scanner_ras_landmarks = mne.channels.make_dig_montage( lpa=[-0.07453101, 0.01962855, -0.05228882], nasion=[-0.00189453, 0.1036985, 0.00497122], rpa=[0.07201203, 0.02109275, -0.05753678], - coord_frame='ras') + coord_frame="ras", + ) # write base bids directory - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) # Drop unknown events. events = mne.read_events(events_fname) @@ -1896,64 +2093,66 @@ def test_write_anat(_bids_validate, tmp_path): raw = _read_raw_fif(raw_fname) bids_path = _bids_path.copy().update(root=bids_root) - write_raw_bids(raw, bids_path, events=events, event_id=event_id, - overwrite=False) + write_raw_bids(raw, bids_path, events=events, event_id=event_id, overwrite=False) # define some keyword arguments to simplify testing - kwargs = dict(bids_path=bids_path, landmarks=mri_voxel_landmarks, - deface=True, verbose=True, overwrite=True) + kwargs = dict( + bids_path=bids_path, + landmarks=mri_voxel_landmarks, + deface=True, + verbose=True, + overwrite=True, + ) # test writing with no sidecar bids_path = write_anat(t1w_mgh, **kwargs) anat_dir = bids_path.directory _bids_validate(bids_root) - assert op.exists(op.join(anat_dir, 'sub-01_ses-01_acq-01_T1w.nii.gz')) + assert op.exists(op.join(anat_dir, "sub-01_ses-01_acq-01_T1w.nii.gz")) # Validate that files are as expected _check_anat_json(bids_path) # Now try some anat writing that will fail # We already have some MRI data there - with pytest.raises(IOError, match='`overwrite` is set to False'): + with pytest.raises(IOError, match="`overwrite` is set to False"): write_anat(t1w_mgh, **dict(kwargs, overwrite=False)) # check overwrite no JSON - with pytest.raises(IOError, match='it already exists'): - write_anat(t1w_mgh, bids_path=bids_path, verbose=True, - overwrite=False) + with pytest.raises(IOError, match="it already exists"): + write_anat(t1w_mgh, bids_path=bids_path, verbose=True, overwrite=False) # pass some invalid type as T1 MRI - with pytest.raises(ValueError, match='must be a path to an MRI'): + with pytest.raises(ValueError, match="must be a path to an MRI"): write_anat(9999999999999, **kwargs) # Return without writing sidecar sh.rmtree(anat_dir) write_anat(t1w_mgh, bids_path=bids_path) # Assert that we truly cannot find a sidecar - with pytest.raises(RuntimeError, match='Did not find any'): - _find_matching_sidecar(bids_path, - suffix='T1w', extension='.json') + with pytest.raises(RuntimeError, match="Did not find any"): + _find_matching_sidecar(bids_path, suffix="T1w", extension=".json") # Writing without a session does NOT yield "ses-None" anywhere bids_path.update(session=None, acquisition=None) kwargs.update(bids_path=bids_path) bids_path = write_anat(t1w_mgh, bids_path=bids_path) anat_dir2 = bids_path.directory - assert 'ses-None' not in anat_dir2.as_posix() - assert op.exists(op.join(anat_dir2, 'sub-01_T1w.nii.gz')) + assert "ses-None" not in anat_dir2.as_posix() + assert op.exists(op.join(anat_dir2, "sub-01_T1w.nii.gz")) # test deface bids_path = write_anat(t1w_mgh, **kwargs) anat_dir = bids_path.directory - t1w = nib.load(op.join(anat_dir, 'sub-01_T1w.nii.gz')) + t1w = nib.load(op.join(anat_dir, "sub-01_T1w.nii.gz")) vox_sum = t1w.get_fdata().sum() _check_anat_json(bids_path) # Check that increasing inset leads to more voxels at 0 - bids_path = write_anat(t1w_mgh, **dict(kwargs, deface=dict(inset=25.))) + bids_path = write_anat(t1w_mgh, **dict(kwargs, deface=dict(inset=25.0))) anat_dir2 = bids_path.directory - t1w2 = nib.load(op.join(anat_dir2, 'sub-01_T1w.nii.gz')) + t1w2 = nib.load(op.join(anat_dir2, "sub-01_T1w.nii.gz")) vox_sum2 = t1w2.get_fdata().sum() _check_anat_json(bids_path) @@ -1963,25 +2162,26 @@ def test_write_anat(_bids_validate, tmp_path): # Check that increasing theta leads to more voxels at 0 bids_path = write_anat(t1w_mgh, **dict(kwargs, deface=dict(theta=45))) anat_dir3 = bids_path.directory - t1w3 = nib.load(op.join(anat_dir3, 'sub-01_T1w.nii.gz')) + t1w3 = nib.load(op.join(anat_dir3, "sub-01_T1w.nii.gz")) vox_sum3 = t1w3.get_fdata().sum() assert vox_sum > vox_sum3 - with pytest.raises(ValueError, match='must be provided to deface'): - write_anat(t1w_mgh, bids_path=bids_path, deface=True, - verbose=True, overwrite=True) + with pytest.raises(ValueError, match="must be provided to deface"): + write_anat( + t1w_mgh, bids_path=bids_path, deface=True, verbose=True, overwrite=True + ) - with pytest.raises(ValueError, match='inset must be numeric'): - write_anat(t1w_mgh, **dict(kwargs, deface=dict(inset='small'))) + with pytest.raises(ValueError, match="inset must be numeric"): + write_anat(t1w_mgh, **dict(kwargs, deface=dict(inset="small"))) - with pytest.raises(ValueError, match='inset should be positive'): - write_anat(t1w_mgh, **dict(kwargs, deface=dict(inset=-2.))) + with pytest.raises(ValueError, match="inset should be positive"): + write_anat(t1w_mgh, **dict(kwargs, deface=dict(inset=-2.0))) - with pytest.raises(ValueError, match='theta must be numeric'): - write_anat(t1w_mgh, **dict(kwargs, deface=dict(theta='big'))) + with pytest.raises(ValueError, match="theta must be numeric"): + write_anat(t1w_mgh, **dict(kwargs, deface=dict(theta="big"))) - with pytest.raises(ValueError, match='theta should be between 0 and 90'): + with pytest.raises(ValueError, match="theta should be between 0 and 90"): write_anat(t1w_mgh, **dict(kwargs, deface=dict(theta=100))) # test using landmarks @@ -1989,61 +2189,72 @@ def test_write_anat(_bids_validate, tmp_path): # test unsupported coord_frame fail_landmarks = mri_voxel_landmarks.copy() - fail_landmarks.dig[0]['coord_frame'] = 3 - fail_landmarks.dig[1]['coord_frame'] = 3 - fail_landmarks.dig[2]['coord_frame'] = 3 + fail_landmarks.dig[0]["coord_frame"] = 3 + fail_landmarks.dig[1]["coord_frame"] = 3 + fail_landmarks.dig[2]["coord_frame"] = 3 - with pytest.raises(ValueError, match='Coordinate frame not supported'): + with pytest.raises(ValueError, match="Coordinate frame not supported"): write_anat(t1w_mgh, **dict(kwargs, landmarks=fail_landmarks)) # Test now using FLASH - flash_mgh = \ - op.join(data_path, 'subjects', 'sample', 'mri', 'flash', 'mef05.mgz') - trans_fname = raw_fname.replace('_raw.fif', '-trans.fif') - landmarks = get_anat_landmarks(flash_mgh, raw.info, trans_fname, 'sample', - op.join(data_path, 'subjects')) - bids_path = BIDSPath(subject=subject_id, session=session_id, - suffix='FLASH', root=bids_root) + flash_mgh = op.join(data_path, "subjects", "sample", "mri", "flash", "mef05.mgz") + trans_fname = raw_fname.replace("_raw.fif", "-trans.fif") + landmarks = get_anat_landmarks( + flash_mgh, raw.info, trans_fname, "sample", op.join(data_path, "subjects") + ) + bids_path = BIDSPath( + subject=subject_id, session=session_id, suffix="FLASH", root=bids_root + ) kwargs.update(bids_path=bids_path, landmarks=landmarks) bids_path = write_anat(flash_mgh, **kwargs) anat_dir = bids_path.directory - assert op.exists(op.join(anat_dir, 'sub-01_ses-01_FLASH.nii.gz')) + assert op.exists(op.join(anat_dir, "sub-01_ses-01_FLASH.nii.gz")) _bids_validate(bids_root) - flash1 = nib.load(op.join(anat_dir, 'sub-01_ses-01_FLASH.nii.gz')) + flash1 = nib.load(op.join(anat_dir, "sub-01_ses-01_FLASH.nii.gz")) fvox1 = flash1.get_fdata() # test landmarks in scanner RAS coordinates bids_path = write_anat( - flash_mgh, **dict(kwargs, landmarks=mri_scanner_ras_landmarks)) + flash_mgh, **dict(kwargs, landmarks=mri_scanner_ras_landmarks) + ) anat_dir = bids_path.directory - flash2 = nib.load(op.join(anat_dir, 'sub-01_ses-01_FLASH.nii.gz')) + flash2 = nib.load(op.join(anat_dir, "sub-01_ses-01_FLASH.nii.gz")) fvox2 = flash2.get_fdata() assert_array_equal(fvox1, fvox2) # test that we can now use a BIDSPath to use the landmarks - landmarks = get_anat_landmarks(bids_path, raw.info, trans_fname, - 'sample', op.join(data_path, 'subjects')) + landmarks = get_anat_landmarks( + bids_path, raw.info, trans_fname, "sample", op.join(data_path, "subjects") + ) @testing.requires_testing_data def test_write_raw_pathlike(tmp_path): """Ensure writing pathlib.Path works.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32, - 'unknown': 0} + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + "unknown": 0, + } raw = _read_raw_fif(raw_fname) bids_root = tmp_path - events_fname = (data_path / 'MEG' / 'sample' / - 'sample_audvis_trunc_raw-eve.fif') + events_fname = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw-eve.fif" bids_path = _bids_path.copy().update(root=bids_root) - bids_path_ = write_raw_bids(raw=raw, bids_path=bids_path, - events=events_fname, - event_id=event_id, overwrite=False) + bids_path_ = write_raw_bids( + raw=raw, + bids_path=bids_path, + events=events_fname, + event_id=event_id, + overwrite=False, + ) # write_raw_bids() should return a string. assert isinstance(bids_path_, BIDSPath) @@ -2053,45 +2264,51 @@ def test_write_raw_pathlike(tmp_path): @testing.requires_testing_data def test_write_raw_no_dig(tmp_path): """Test writing without dig.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = _read_raw_fif(raw_fname) bids_root = tmp_path bids_path = _bids_path.copy().update(root=bids_root) - bids_path_ = write_raw_bids(raw=raw, bids_path=bids_path, - overwrite=True) + bids_path_ = write_raw_bids(raw=raw, bids_path=bids_path, overwrite=True) assert bids_path_.root == bids_root with raw.info._unlock(): - raw.info['dig'] = None - raw.save(str(bids_root / 'tmp_raw.fif')) - raw = _read_raw_fif(bids_root / 'tmp_raw.fif') - bids_path_ = write_raw_bids(raw=raw, bids_path=bids_path, - overwrite=True) + raw.info["dig"] = None + raw.save(str(bids_root / "tmp_raw.fif")) + raw = _read_raw_fif(bids_root / "tmp_raw.fif") + bids_path_ = write_raw_bids(raw=raw, bids_path=bids_path, overwrite=True) assert bids_path_.root == bids_root - assert bids_path_.suffix == 'meg' - assert bids_path_.extension == '.fif' + assert bids_path_.suffix == "meg" + assert bids_path_.extension == ".fif" @testing.requires_testing_data def test_write_anat_pathlike(tmp_path): """Test writing anatomical data with pathlib.Paths.""" - pytest.importorskip('nibabel') - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - trans_fname = raw_fname.replace('_raw.fif', '-trans.fif') + pytest.importorskip("nibabel") + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + trans_fname = raw_fname.replace("_raw.fif", "-trans.fif") raw = _read_raw_fif(raw_fname) trans = mne.read_trans(trans_fname) bids_root = tmp_path - t1w_mgh_fname = Path(data_path) / 'subjects' / 'sample' / 'mri' / 'T1.mgz' - bids_path = BIDSPath(subject=subject_id, session=session_id, - acquisition=acq, root=bids_root) + t1w_mgh_fname = Path(data_path) / "subjects" / "sample" / "mri" / "T1.mgz" + bids_path = BIDSPath( + subject=subject_id, session=session_id, acquisition=acq, root=bids_root + ) landmarks = get_anat_landmarks( - t1w_mgh_fname, raw.info, trans, 'sample', - fs_subjects_dir=op.join(data_path, 'subjects')) - bids_path = write_anat(t1w_mgh_fname, bids_path=bids_path, - landmarks=landmarks, deface=True, - verbose=True, overwrite=True) + t1w_mgh_fname, + raw.info, + trans, + "sample", + fs_subjects_dir=op.join(data_path, "subjects"), + ) + bids_path = write_anat( + t1w_mgh_fname, + bids_path=bids_path, + landmarks=landmarks, + deface=True, + verbose=True, + overwrite=True, + ) # write_anat() should return a BIDSPath. assert isinstance(bids_path, BIDSPath) @@ -2100,10 +2317,10 @@ def test_write_anat_pathlike(tmp_path): @testing.requires_testing_data def test_write_does_not_alter_events_inplace(tmp_path): """Test that writing does not modify the passed events array.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) raw = _read_raw_fif(raw_fname) events = mne.read_events(events_fname) @@ -2112,12 +2329,19 @@ def test_write_does_not_alter_events_inplace(tmp_path): events = events[events[:, 2] != 0] events_orig = events.copy() - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } bids_path = _bids_path.copy().update(root=tmp_path) - write_raw_bids(raw=raw, bids_path=bids_path, - events=events, event_id=event_id, overwrite=True) + write_raw_bids( + raw=raw, bids_path=bids_path, events=events, event_id=event_id, overwrite=True + ) assert np.array_equal(events, events_orig) @@ -2133,212 +2357,253 @@ def _ensure_list(x): @pytest.mark.parametrize( - 'ch_names, descriptions, drop_status_col, drop_description_col, ' - 'existing_ch_names, existing_descriptions', + "ch_names, descriptions, drop_status_col, drop_description_col, " + "existing_ch_names, existing_descriptions", [ # Only mark channels, do not set descriptions. - (['MEG 0112', 'MEG 0131', 'EEG 053'], None, False, False, [], []), - ('MEG 0112', None, False, False, [], []), - ('nonsense', None, False, False, [], []), + (["MEG 0112", "MEG 0131", "EEG 053"], None, False, False, [], []), + ("MEG 0112", None, False, False, [], []), + ("nonsense", None, False, False, [], []), # Now also set descriptions. - (['MEG 0112', 'MEG 0131'], ['Really bad!', 'Even worse.'], False, - False, [], []), - ('MEG 0112', 'Really bad!', False, False, [], []), + ( + ["MEG 0112", "MEG 0131"], + ["Really bad!", "Even worse."], + False, + False, + [], + [], + ), + ("MEG 0112", "Really bad!", False, False, [], []), # Should raise. - (['MEG 0112', 'MEG 0131'], ['Really bad!'], False, False, [], []), + (["MEG 0112", "MEG 0131"], ["Really bad!"], False, False, [], []), # `datatype='meg` - (['MEG 0112'], ['Really bad!'], False, False, [], []), + (["MEG 0112"], ["Really bad!"], False, False, [], []), # Enure we create missing columns. - ('MEG 0112', 'Really bad!', True, True, [], []), - ]) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) + ("MEG 0112", "Really bad!", True, True, [], []), + ], +) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data -def test_mark_channels(_bids_validate, - ch_names, descriptions, - drop_status_col, drop_description_col, - existing_ch_names, existing_descriptions, - tmp_path): +def test_mark_channels( + _bids_validate, + ch_names, + descriptions, + drop_status_col, + drop_description_col, + existing_ch_names, + existing_descriptions, + tmp_path, +): """Test marking channels of an existing BIDS dataset as "bad".""" # Setup: Create a fresh BIDS dataset. - bids_root = tmp_path / 'bids1' - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg', - suffix='meg') - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') + bids_root = tmp_path / "bids1" + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg", suffix="meg") + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) # Drop unknown events. events = mne.read_events(events_fname) events = events[events[:, 2] != 0] raw = _read_raw_fif(raw_fname, verbose=False) - raw.info['bads'] = [] - write_raw_bids(raw, bids_path=bids_path, events=events, - event_id=event_id, verbose=False) + raw.info["bads"] = [] + write_raw_bids( + raw, bids_path=bids_path, events=events, event_id=event_id, verbose=False + ) - channels_fname = _find_matching_sidecar(bids_path, suffix='channels', - extension='.tsv') + channels_fname = _find_matching_sidecar( + bids_path, suffix="channels", extension=".tsv" + ) if drop_status_col: # Remove `status` column from the sidecare TSV file. tsv_data = _from_tsv(channels_fname) - del tsv_data['status'] + del tsv_data["status"] _to_tsv(tsv_data, channels_fname) if drop_description_col: # Remove `status_description` column from the sidecare TSV file. tsv_data = _from_tsv(channels_fname) - del tsv_data['status_description'] + del tsv_data["status_description"] _to_tsv(tsv_data, channels_fname) # Test that we raise if number of channels doesn't match number of # descriptions. - if (descriptions is not None and - len(_ensure_list(ch_names)) != len(_ensure_list(descriptions))): - with pytest.raises(ValueError, match='must match'): - mark_channels(ch_names=ch_names, descriptions=descriptions, - bids_path=bids_path, status='bad', - verbose=False) + if descriptions is not None and len(_ensure_list(ch_names)) != len( + _ensure_list(descriptions) + ): + with pytest.raises(ValueError, match="must match"): + mark_channels( + ch_names=ch_names, + descriptions=descriptions, + bids_path=bids_path, + status="bad", + verbose=False, + ) return # Test that we raise if we encounter an unknown channel name. - if any([ch_name not in raw.ch_names - for ch_name in _ensure_list(ch_names)]): - with pytest.raises(ValueError, match='not found in dataset'): - mark_channels(ch_names=ch_names, descriptions=descriptions, - bids_path=bids_path, status='bad', verbose=False) + if any([ch_name not in raw.ch_names for ch_name in _ensure_list(ch_names)]): + with pytest.raises(ValueError, match="not found in dataset"): + mark_channels( + ch_names=ch_names, + descriptions=descriptions, + bids_path=bids_path, + status="bad", + verbose=False, + ) return # Mark `existing_ch_names` as bad in raw and sidecar TSV before we # begin our actual tests, which should then add additional channels # to the list of bads, retaining the ones we're specifying here. - mark_channels(ch_names=[], - bids_path=bids_path, status='good', - verbose=False) + mark_channels(ch_names=[], bids_path=bids_path, status="good", verbose=False) _bids_validate(bids_root) raw = read_raw_bids(bids_path=bids_path, verbose=False) # Order is not preserved - assert set(existing_ch_names) == set(raw.info['bads']) + assert set(existing_ch_names) == set(raw.info["bads"]) del raw - mark_channels(ch_names=ch_names, descriptions=descriptions, - bids_path=bids_path, status='bad', verbose=False) + mark_channels( + ch_names=ch_names, + descriptions=descriptions, + bids_path=bids_path, + status="bad", + verbose=False, + ) _bids_validate(bids_root) raw = read_raw_bids(bids_path=bids_path, verbose=False) # expected bad channels and descriptions just get appended - expected_bads = (_ensure_list(ch_names) + - _ensure_list(existing_ch_names)) - expected_descriptions = (_ensure_list(descriptions) + - _ensure_list(existing_descriptions)) + expected_bads = _ensure_list(ch_names) + _ensure_list(existing_ch_names) + expected_descriptions = _ensure_list(descriptions) + _ensure_list( + existing_descriptions + ) # Order is not preserved - assert len(expected_bads) == len(raw.info['bads']) - assert set(expected_bads) == set(raw.info['bads']) + assert len(expected_bads) == len(raw.info["bads"]) + assert set(expected_bads) == set(raw.info["bads"]) # Descriptions are not mapped to Raw, so let's check the TSV contents # directly. tsv_data = _from_tsv(channels_fname) - assert 'status' in tsv_data - assert 'status_description' in tsv_data + assert "status" in tsv_data + assert "status_description" in tsv_data for description in expected_descriptions: - assert description in tsv_data['status_description'] + assert description in tsv_data["status_description"] -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_mark_channel_roundtrip(tmp_path): """Test marking channels fulfills roundtrip.""" # Setup: Create a fresh BIDS dataset. - bids_root = tmp_path / 'bids1' - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg', - suffix='meg') - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') + bids_root = tmp_path / "bids1" + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg", suffix="meg") + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) # Drop unknown events. events = mne.read_events(events_fname) events = events[events[:, 2] != 0] raw = _read_raw_fif(raw_fname, verbose=False) - write_raw_bids(raw, bids_path=bids_path, events=events, - event_id=event_id, verbose=False) - channels_fname = _find_matching_sidecar(bids_path, suffix='channels', - extension='.tsv') + write_raw_bids( + raw, bids_path=bids_path, events=events, event_id=event_id, verbose=False + ) + channels_fname = _find_matching_sidecar( + bids_path, suffix="channels", extension=".tsv" + ) ch_names = raw.ch_names # first mark all channels as good - mark_channels(bids_path, ch_names=[], status='good', verbose=False) + mark_channels(bids_path, ch_names=[], status="good", verbose=False) tsv_data = _from_tsv(channels_fname) - assert all(status == 'good' for status in tsv_data['status']) + assert all(status == "good" for status in tsv_data["status"]) # now mark some bad channels - mark_channels(bids_path, ch_names=ch_names[:5], status='bad', - verbose=False) + mark_channels(bids_path, ch_names=ch_names[:5], status="bad", verbose=False) tsv_data = _from_tsv(channels_fname) - status = tsv_data['status'] - assert all(status_ == 'bad' for status_ in status[:5]) - assert all(status_ == 'good' for status_ in status[5:]) + status = tsv_data["status"] + assert all(status_ == "bad" for status_ in status[:5]) + assert all(status_ == "good" for status_ in status[5:]) # now mark them good again - mark_channels(bids_path, ch_names=ch_names[:5], status='good', - verbose=False) + mark_channels(bids_path, ch_names=ch_names[:5], status="good", verbose=False) tsv_data = _from_tsv(channels_fname) - assert all(status == 'good' for status in tsv_data['status']) + assert all(status == "good" for status in tsv_data["status"]) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_error_mark_channels(tmp_path): """Test errors when marking channels.""" # Setup: Create a fresh BIDS dataset. - bids_root = tmp_path / 'bids1' - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg', - suffix='meg') - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') + bids_root = tmp_path / "bids1" + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg", suffix="meg") + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) # Drop unknown events. events = mne.read_events(events_fname) events = events[events[:, 2] != 0] raw = _read_raw_fif(raw_fname, verbose=False) - write_raw_bids(raw, bids_path=bids_path, events=events, - event_id=event_id, verbose=False) + write_raw_bids( + raw, bids_path=bids_path, events=events, event_id=event_id, verbose=False + ) ch_names = raw.ch_names - with pytest.raises(ValueError, match='Setting the status'): - mark_channels(ch_names=ch_names, bids_path=bids_path, - status='test') + with pytest.raises(ValueError, match="Setting the status"): + mark_channels(ch_names=ch_names, bids_path=bids_path, status="test") -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_mark_channels_files(tmp_path): """Test validity of bad channel writing.""" # BV - bids_root = tmp_path / 'bids1' - raw_fname = data_path / 'montage' / 'bv_dig_test.vhdr' + bids_root = tmp_path / "bids1" + raw_fname = data_path / "montage" / "bv_dig_test.vhdr" raw = _read_raw_brainvision(raw_fname) - raw.set_channel_types({'HEOG': 'eog', 'VEOG': 'eog', 'ECG': 'ecg'}) + raw.set_channel_types({"HEOG": "eog", "VEOG": "eog", "ECG": "ecg"}) # inject a bad channel - assert not raw.info['bads'] - injected_bad = ['Fp1'] - raw.info['bads'] = injected_bad + assert not raw.info["bads"] + injected_bad = ["Fp1"] + raw.info["bads"] = injected_bad bids_path = _bids_path.copy().update(root=bids_root) @@ -2346,155 +2611,153 @@ def test_mark_channels_files(tmp_path): write_raw_bids(raw, bids_path, overwrite=True) # mark bad channels that get stored as uV in write_brain_vision - bads = ['CP5', 'CP6'] - mark_channels(bids_path=bids_path, ch_names=bads, status='bad') - raw.info['bads'].extend(bads) + bads = ["CP5", "CP6"] + mark_channels(bids_path=bids_path, ch_names=bads, status="bad") + raw.info["bads"].extend(bads) # the raw data should match if you drop the bads raw_2 = read_raw_bids(bids_path) - raw.drop_channels(raw.info['bads']) - raw_2.drop_channels(raw_2.info['bads']) + raw.drop_channels(raw.info["bads"]) + raw_2.drop_channels(raw_2.info["bads"]) assert_array_almost_equal(raw.get_data(), raw_2.get_data()) # test EDF too - dir_name = 'EDF' - fname = 'test_reduced.edf' - bids_root = tmp_path / 'bids2' + dir_name = "EDF" + fname = "test_reduced.edf" + bids_root = tmp_path / "bids2" bids_path = _bids_path.copy().update(root=bids_root) raw_fname = data_path / dir_name / fname raw = _read_raw_edf(raw_fname) write_raw_bids(raw, bids_path, overwrite=True) - mark_channels(bids_path=bids_path, ch_names=raw.ch_names[0], - status='bad') + mark_channels(bids_path=bids_path, ch_names=raw.ch_names[0], status="bad") @testing.requires_testing_data def test_write_meg_calibration(_bids_validate, tmp_path): """Test writing of the Elekta/Neuromag fine-calibration file.""" - bids_root = tmp_path / 'bids1' + bids_root = tmp_path / "bids1" bids_path = _bids_path.copy().update(root=bids_root) - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = _read_raw_fif(raw_fname, verbose=False) write_raw_bids(raw, bids_path=bids_path, verbose=False) - fine_cal_fname = data_path / 'SSS' / 'sss_cal_mgh.dat' + fine_cal_fname = data_path / "SSS" / "sss_cal_mgh.dat" # Test passing a filename. - write_meg_calibration(calibration=fine_cal_fname, - bids_path=bids_path) + write_meg_calibration(calibration=fine_cal_fname, bids_path=bids_path) _bids_validate(bids_root) # Test passing a dict. calibration = mne.preprocessing.read_fine_calibration(fine_cal_fname) - write_meg_calibration(calibration=calibration, - bids_path=bids_path) + write_meg_calibration(calibration=calibration, bids_path=bids_path) _bids_validate(bids_root) # Test passing in incompatible dict. calibration = mne.preprocessing.read_fine_calibration(fine_cal_fname) - del calibration['locs'] - with pytest.raises(ValueError, match='not .* proper fine-calibration'): - write_meg_calibration(calibration=calibration, - bids_path=bids_path) + del calibration["locs"] + with pytest.raises(ValueError, match="not .* proper fine-calibration"): + write_meg_calibration(calibration=calibration, bids_path=bids_path) # subject not set. bids_path = bids_path.copy().update(root=bids_root, subject=None) - with pytest.raises(ValueError, match='must have root and subject set'): + with pytest.raises(ValueError, match="must have root and subject set"): write_meg_calibration(fine_cal_fname, bids_path) # root not set. - bids_path = bids_path.copy().update(subject='01', root=None) - with pytest.raises(ValueError, match='must have root and subject set'): + bids_path = bids_path.copy().update(subject="01", root=None) + with pytest.raises(ValueError, match="must have root and subject set"): write_meg_calibration(fine_cal_fname, bids_path) # datatype is not 'meg. - bids_path = bids_path.copy().update(subject='01', root=bids_root, - datatype='eeg') - with pytest.raises(ValueError, match='Can only write .* for MEG'): + bids_path = bids_path.copy().update(subject="01", root=bids_root, datatype="eeg") + with pytest.raises(ValueError, match="Can only write .* for MEG"): write_meg_calibration(fine_cal_fname, bids_path) @testing.requires_testing_data def test_write_meg_crosstalk(_bids_validate, tmp_path): """Test writing of the Elekta/Neuromag fine-calibration file.""" - bids_root = tmp_path / 'bids1' + bids_root = tmp_path / "bids1" bids_path = _bids_path.copy().update(root=bids_root) - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = _read_raw_fif(raw_fname, verbose=False) write_raw_bids(raw, bids_path=bids_path, verbose=False) - crosstalk_fname = data_path / 'SSS' / 'ct_sparse.fif' + crosstalk_fname = data_path / "SSS" / "ct_sparse.fif" write_meg_crosstalk(fname=crosstalk_fname, bids_path=bids_path) _bids_validate(bids_root) # subject not set. bids_path = bids_path.copy().update(root=bids_root, subject=None) - with pytest.raises(ValueError, match='must have root and subject set'): + with pytest.raises(ValueError, match="must have root and subject set"): write_meg_crosstalk(crosstalk_fname, bids_path) # root not set. - bids_path = bids_path.copy().update(subject='01', root=None) - with pytest.raises(ValueError, match='must have root and subject set'): + bids_path = bids_path.copy().update(subject="01", root=None) + with pytest.raises(ValueError, match="must have root and subject set"): write_meg_crosstalk(crosstalk_fname, bids_path) # datatype is not 'meg'. - bids_path = bids_path.copy().update(subject='01', root=bids_root, - datatype='eeg') - with pytest.raises(ValueError, match='Can only write .* for MEG'): + bids_path = bids_path.copy().update(subject="01", root=bids_root, datatype="eeg") + with pytest.raises(ValueError, match="Can only write .* for MEG"): write_meg_crosstalk(crosstalk_fname, bids_path) -@pytest.mark.parametrize( - 'bad_segments', - [False, 'add', 'only'] -) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.parametrize("bad_segments", [False, "add", "only"]) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_annotations(_bids_validate, bad_segments, tmp_path): """Test that Annotations are stored as events.""" - bids_root = tmp_path / 'bids1' - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg') - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') + bids_root = tmp_path / "bids1" + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg") + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) events = mne.read_events(events_fname) - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } event_desc = dict(zip(event_id.values(), event_id.keys())) raw = _read_raw_fif(raw_fname) annotations = mne.annotations_from_events( - events=events, sfreq=raw.info['sfreq'], event_desc=event_desc, - orig_time=raw.info['meas_date'] + events=events, + sfreq=raw.info["sfreq"], + event_desc=event_desc, + orig_time=raw.info["meas_date"], ) if bad_segments: bad_annots = mne.Annotations( # Try to avoid rounding errors. - onset=(annotations.onset[0] + 1 / raw.info['sfreq'] * 600, - annotations.onset[0] + 1 / raw.info['sfreq'] * 3000), - duration=(1 / raw.info['sfreq'] * 750, - 1 / raw.info['sfreq'] * 550), - description=('BAD_segment', 'BAD_segment'), - orig_time=annotations.orig_time) - - if bad_segments == 'add': + onset=( + annotations.onset[0] + 1 / raw.info["sfreq"] * 600, + annotations.onset[0] + 1 / raw.info["sfreq"] * 3000, + ), + duration=(1 / raw.info["sfreq"] * 750, 1 / raw.info["sfreq"] * 550), + description=("BAD_segment", "BAD_segment"), + orig_time=annotations.orig_time, + ) + + if bad_segments == "add": annotations += bad_annots - elif bad_segments == 'only': + elif bad_segments == "only": annotations = bad_annots else: - raise ValueError('Unknown `bad_segments` test parameter passed.') + raise ValueError("Unknown `bad_segments` test parameter passed.") del bad_annots raw.set_annotations(annotations) - write_raw_bids(raw, bids_path, events=None, event_id=None, - overwrite=False) + write_raw_bids(raw, bids_path, events=None, event_id=None, overwrite=False) annotations_read = read_raw_bids(bids_path=bids_path).annotations assert_array_almost_equal(annotations.onset, annotations_read.onset) @@ -2505,48 +2768,52 @@ def test_annotations(_bids_validate, bad_segments, tmp_path): @pytest.mark.parametrize( - 'write_events', [True, False] # whether to pass "events" to write_raw_bids + "write_events", [True, False] # whether to pass "events" to write_raw_bids ) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_annotations_and_events(_bids_validate, tmp_path, write_events): """Test combined writing of Annotations and events.""" - bids_root = tmp_path / 'bids' - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg') - raw_fname = data_path / 'MEG' / 'sample' / 'sample_audvis_trunc_raw.fif' - events_fname = ( - data_path / 'MEG' / 'sample' / 'sample_audvis_trunc_raw-eve.fif' - ) + bids_root = tmp_path / "bids" + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg") + raw_fname = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" + events_fname = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw-eve.fif" events_tsv_fname = bids_path.copy().update( - suffix='events', - extension='.tsv', + suffix="events", + extension=".tsv", ) - events_json_fname = events_tsv_fname.copy().update(extension='.json') + events_json_fname = events_tsv_fname.copy().update(extension=".json") events = mne.read_events(events_fname) events = events[events[:, 2] != 0] # drop unknown "0" events - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } raw = _read_raw_fif(raw_fname) annotations = mne.Annotations( # Try to avoid rounding errors. onset=( - 1 / raw.info['sfreq'] * 600, - 1 / raw.info['sfreq'] * 600, # intentional - 1 / raw.info['sfreq'] * 3000 + 1 / raw.info["sfreq"] * 600, + 1 / raw.info["sfreq"] * 600, # intentional + 1 / raw.info["sfreq"] * 3000, ), duration=( - 1 / raw.info['sfreq'], - 1 / raw.info['sfreq'], - 1 / raw.info['sfreq'] * 200 + 1 / raw.info["sfreq"], + 1 / raw.info["sfreq"], + 1 / raw.info["sfreq"] * 200, ), - description=('BAD_segment', 'EDGE_segment', 'custom'), + description=("BAD_segment", "EDGE_segment", "custom"), ) raw.set_annotations(annotations) # Write annotations while passing event_id # Should raise since annotations descriptions are missing from event_id - with pytest.raises(ValueError, match='The following entries are missing'): + with pytest.raises(ValueError, match="The following entries are missing"): write_raw_bids( raw, bids_path=bids_path, @@ -2556,11 +2823,9 @@ def test_annotations_and_events(_bids_validate, tmp_path, write_events): # Passing a complete mapping should work event_id_with_annots = event_id.copy() - event_id_with_annots.update({ - 'BAD_segment': 9999, - 'EDGE_segment': 10000, - 'custom': 2000 - }) + event_id_with_annots.update( + {"BAD_segment": 9999, "EDGE_segment": 10000, "custom": 2000} + ) write_raw_bids( raw, bids_path=bids_path, @@ -2574,32 +2839,27 @@ def test_annotations_and_events(_bids_validate, tmp_path, write_events): if write_events: n_events_expected = len(events) + len(raw.annotations) - events_json = json.loads( - events_json_fname.fpath.read_text(encoding='utf-8') - ) - assert 'value' in events_json - assert 'sample' in events_json - assert 'trial_type' in events_json + events_json = json.loads(events_json_fname.fpath.read_text(encoding="utf-8")) + assert "value" in events_json + assert "sample" in events_json + assert "trial_type" in events_json else: n_events_expected = len(raw.annotations) - assert len(events_tsv['trial_type']) == n_events_expected + assert len(events_tsv["trial_type"]) == n_events_expected -@pytest.mark.parametrize( - 'drop_undescribed_events', - [True, False] -) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.parametrize("drop_undescribed_events", [True, False]) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_undescribed_events(_bids_validate, drop_undescribed_events, tmp_path): """Test we're raising if event descriptions are missing.""" - bids_root = tmp_path / 'bids1' - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg') - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') + bids_root = tmp_path / "bids1" + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg") + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) events = mne.read_events(events_fname) if drop_undescribed_events: @@ -2608,16 +2868,23 @@ def test_undescribed_events(_bids_validate, drop_undescribed_events, tmp_path): events = events[mask] del mask - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } raw = _read_raw_fif(raw_fname) raw.set_annotations(None) # Make sure it's clean. - kwargs = dict(raw=raw, bids_path=bids_path, events=events, - event_id=event_id, overwrite=False) + kwargs = dict( + raw=raw, bids_path=bids_path, events=events, event_id=event_id, overwrite=False + ) if not drop_undescribed_events: - with pytest.raises(ValueError, match='No description was specified'): + with pytest.raises(ValueError, match="No description was specified"): write_raw_bids(**kwargs) return else: @@ -2633,18 +2900,17 @@ def test_undescribed_events(_bids_validate, drop_undescribed_events, tmp_path): _bids_validate(bids_root) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_event_storage(tmp_path): """Test we're retaining the original event IDs when storing events.""" - bids_root = tmp_path / 'bids1' - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg') - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') - events_tsv_fname = (bids_path.copy() - .update(suffix='events', extension='.tsv')) + bids_root = tmp_path / "bids1" + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg") + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) + events_tsv_fname = bids_path.copy().update(suffix="events", extension=".tsv") events = mne.read_events(events_fname) events = events[events[:, -1] != 0] # Drop unused events @@ -2652,44 +2918,52 @@ def test_event_storage(tmp_path): idx = np.where(events[:, -1] == 1)[0] events[idx, -1] = 123 - event_id = {'Auditory/Left': 123, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} + event_id = { + "Auditory/Left": 123, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } raw = _read_raw_fif(raw_fname) - write_raw_bids(raw=raw, bids_path=bids_path, events=events, - event_id=event_id, overwrite=False) + write_raw_bids( + raw=raw, bids_path=bids_path, events=events, event_id=event_id, overwrite=False + ) events_tsv = _from_tsv(events_tsv_fname) - assert set(int(e) for e in events_tsv['value']) == set(event_id.values()) + assert set(int(e) for e in events_tsv["value"]) == set(event_id.values()) @pytest.mark.parametrize( - 'dir_name, fname, reader, datatype, coord_frame', [ - ('EDF', 'test_reduced.edf', _read_raw_edf, 'ieeg', 'mni_tal'), - ('EDF', 'test_reduced.edf', _read_raw_edf, 'ieeg', 'ras'), - ('EDF', 'test_reduced.edf', _read_raw_edf, 'eeg', 'head'), - ('EDF', 'test_reduced.edf', _read_raw_edf, 'eeg', 'mri'), - ('EDF', 'test_reduced.edf', _read_raw_edf, 'eeg', 'unknown'), - ('CTF', 'testdata_ctf.ds', _read_raw_ctf, 'meg', ''), - ('MEG', 'sample/sample_audvis_trunc_raw.fif', _read_raw_fif, 'meg', ''), # noqa - ] + "dir_name, fname, reader, datatype, coord_frame", + [ + ("EDF", "test_reduced.edf", _read_raw_edf, "ieeg", "mni_tal"), + ("EDF", "test_reduced.edf", _read_raw_edf, "ieeg", "ras"), + ("EDF", "test_reduced.edf", _read_raw_edf, "eeg", "head"), + ("EDF", "test_reduced.edf", _read_raw_edf, "eeg", "mri"), + ("EDF", "test_reduced.edf", _read_raw_edf, "eeg", "unknown"), + ("CTF", "testdata_ctf.ds", _read_raw_ctf, "meg", ""), + ("MEG", "sample/sample_audvis_trunc_raw.fif", _read_raw_fif, "meg", ""), # noqa + ], ) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) -@pytest.mark.filterwarnings(warning_str['encountered_data_in']) -@pytest.mark.filterwarnings(warning_str['nasion_not_found']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) +@pytest.mark.filterwarnings(warning_str["encountered_data_in"]) +@pytest.mark.filterwarnings(warning_str["nasion_not_found"]) @testing.requires_testing_data def test_coordsystem_json_compliance( - dir_name, fname, reader, datatype, coord_frame, tmp_path): + dir_name, fname, reader, datatype, coord_frame, tmp_path +): """Tests that coordsystem.json contents are written correctly. Tests multiple manufacturer data formats and MEG, EEG, and iEEG. """ - bids_root = tmp_path / 'bids1' + bids_root = tmp_path / "bids1" raw_fname = data_path / dir_name / fname # the BIDSPath for test datasets to get written to - bids_path = _bids_path.copy().update(root=bids_root, - datatype=datatype) + bids_path = _bids_path.copy().update(root=bids_root, datatype=datatype) raw = reader(raw_fname) @@ -2697,231 +2971,260 @@ def test_coordsystem_json_compliance( # not transform back to "head" as it does for internal consistency landmarks = dict(nasion=[1, 0, 0], lpa=[0, 1, 0], rpa=[0, 0, 1]) - if datatype == 'eeg': - raw.set_channel_types({ch: 'eeg' for ch in raw.ch_names}) - elif datatype == 'ieeg': - raw.set_channel_types({ch: 'seeg' for ch in raw.ch_names}) + if datatype == "eeg": + raw.set_channel_types({ch: "eeg" for ch in raw.ch_names}) + elif datatype == "ieeg": + raw.set_channel_types({ch: "seeg" for ch in raw.ch_names}) - if datatype == 'meg': + if datatype == "meg": montage = None else: # alter some channels manually with electrodes to write ch_names = raw.ch_names elec_locs = np.random.random((len(ch_names), 3)).tolist() ch_pos = dict(zip(ch_names, elec_locs)) - montage = mne.channels.make_dig_montage(ch_pos=ch_pos, - coord_frame=coord_frame, - **landmarks) - if datatype == 'eeg': + montage = mne.channels.make_dig_montage( + ch_pos=ch_pos, coord_frame=coord_frame, **landmarks + ) + if datatype == "eeg": raw.set_montage(montage) montage = None # clean all events for this test - kwargs = dict(raw=raw, bids_path=bids_path, acpc_aligned=True, - montage=montage, overwrite=True, verbose=False) + kwargs = dict( + raw=raw, + bids_path=bids_path, + acpc_aligned=True, + montage=montage, + overwrite=True, + verbose=False, + ) # write to BIDS and then check the coordsystem files bids_output_path = write_raw_bids(**kwargs) - coordsystem_fname = _find_matching_sidecar(bids_output_path, - suffix='coordsystem', - extension='.json') - with open(coordsystem_fname, 'r', encoding='utf-8') as fin: + coordsystem_fname = _find_matching_sidecar( + bids_output_path, suffix="coordsystem", extension=".json" + ) + with open(coordsystem_fname, "r", encoding="utf-8") as fin: coordsystem_json = json.load(fin) # writing twice should work as long as the coordsystem # contents have not changed - kwargs.update(bids_path=bids_path.copy().update(run='02'), - overwrite=False) + kwargs.update(bids_path=bids_path.copy().update(run="02"), overwrite=False) write_raw_bids(**kwargs) - datatype_ = {'meg': 'MEG', 'eeg': 'EEG', 'ieeg': 'iEEG'}[datatype] + datatype_ = {"meg": "MEG", "eeg": "EEG", "ieeg": "iEEG"}[datatype] # if there is a change in the underlying # coordsystem.json file, then an error will occur. # upon changing coordsystem contents, and overwrite not True # this will fail new_coordsystem_json = coordsystem_json.copy() - new_coordsystem_json[f'{datatype_}CoordinateSystem'] = 'blah' + new_coordsystem_json[f"{datatype_}CoordinateSystem"] = "blah" _write_json(coordsystem_fname, new_coordsystem_json, overwrite=True) - kwargs.update(bids_path=bids_path.copy().update(run='03')) - with pytest.raises(RuntimeError, - match='Trying to write coordsystem.json, ' - 'but it already exists'): + kwargs.update(bids_path=bids_path.copy().update(run="03")) + with pytest.raises( + RuntimeError, match="Trying to write coordsystem.json, " "but it already exists" + ): write_raw_bids(**kwargs) _write_json(coordsystem_fname, coordsystem_json, overwrite=True) - if datatype != 'meg': - electrodes_fname = _find_matching_sidecar(bids_output_path, - suffix='electrodes', - extension='.tsv') + if datatype != "meg": + electrodes_fname = _find_matching_sidecar( + bids_output_path, suffix="electrodes", extension=".tsv" + ) elecs_tsv = _from_tsv(electrodes_fname) # electrodes.tsv file, then an error will occur. # upon changing electrodes contents, and overwrite not True # this will fail new_elecs_tsv = elecs_tsv.copy() - new_elecs_tsv['name'][0] = 'blah' + new_elecs_tsv["name"][0] = "blah" _to_tsv(new_elecs_tsv, electrodes_fname) - kwargs.update(bids_path=bids_path.copy().update(run='04')) + kwargs.update(bids_path=bids_path.copy().update(run="04")) with pytest.raises( - RuntimeError, match='Trying to write electrodes.tsv, ' - 'but it already exists'): + RuntimeError, + match="Trying to write electrodes.tsv, " "but it already exists", + ): write_raw_bids(**kwargs) # perform checks on the coordsystem.json file itself - if datatype == 'eeg' and coord_frame == 'head': - assert coordsystem_json['EEGCoordinateSystem'] == 'CapTrak' - assert coordsystem_json['EEGCoordinateSystemDescription'] == \ - BIDS_COORD_FRAME_DESCRIPTIONS['captrak'] - elif datatype == 'eeg' and coord_frame == 'unknown': - assert coordsystem_json['EEGCoordinateSystem'] == 'CapTrak' - assert coordsystem_json['EEGCoordinateSystemDescription'] == \ - BIDS_COORD_FRAME_DESCRIPTIONS['captrak'] - elif datatype == 'ieeg' and coord_frame == 'mni_tal': - assert 'space-fsaverage' in str(coordsystem_fname) - assert coordsystem_json['iEEGCoordinateSystem'] == 'fsaverage' - assert coordsystem_json['iEEGCoordinateSystemDescription'] == \ - BIDS_COORD_FRAME_DESCRIPTIONS['fsaverage'] - elif datatype == 'ieeg' and coord_frame == 'mri': - assert 'space-ACPC' in str(coordsystem_fname) - assert coordsystem_json['iEEGCoordinateSystem'] == 'ACPC' - assert coordsystem_json['iEEGCoordinateSystemDescription'] == \ - BIDS_COORD_FRAME_DESCRIPTIONS['acpc'] - elif datatype == 'ieeg' and coord_frame == 'unknown': - assert coordsystem_json['iEEGCoordinateSystem'] == 'Other' - assert coordsystem_json['iEEGCoordinateSystemDescription'] == 'n/a' - elif datatype == 'meg' and dir_name == 'CTF': - assert coordsystem_json['MEGCoordinateSystem'] == 'CTF' - assert coordsystem_json['MEGCoordinateSystemDescription'] == \ - BIDS_COORD_FRAME_DESCRIPTIONS['ctf'] - elif datatype == 'meg' and dir_name == 'MEG': - assert coordsystem_json['MEGCoordinateSystem'] == 'ElektaNeuromag' - assert coordsystem_json['MEGCoordinateSystemDescription'] == \ - BIDS_COORD_FRAME_DESCRIPTIONS['elektaneuromag'] + if datatype == "eeg" and coord_frame == "head": + assert coordsystem_json["EEGCoordinateSystem"] == "CapTrak" + assert ( + coordsystem_json["EEGCoordinateSystemDescription"] + == BIDS_COORD_FRAME_DESCRIPTIONS["captrak"] + ) + elif datatype == "eeg" and coord_frame == "unknown": + assert coordsystem_json["EEGCoordinateSystem"] == "CapTrak" + assert ( + coordsystem_json["EEGCoordinateSystemDescription"] + == BIDS_COORD_FRAME_DESCRIPTIONS["captrak"] + ) + elif datatype == "ieeg" and coord_frame == "mni_tal": + assert "space-fsaverage" in str(coordsystem_fname) + assert coordsystem_json["iEEGCoordinateSystem"] == "fsaverage" + assert ( + coordsystem_json["iEEGCoordinateSystemDescription"] + == BIDS_COORD_FRAME_DESCRIPTIONS["fsaverage"] + ) + elif datatype == "ieeg" and coord_frame == "mri": + assert "space-ACPC" in str(coordsystem_fname) + assert coordsystem_json["iEEGCoordinateSystem"] == "ACPC" + assert ( + coordsystem_json["iEEGCoordinateSystemDescription"] + == BIDS_COORD_FRAME_DESCRIPTIONS["acpc"] + ) + elif datatype == "ieeg" and coord_frame == "unknown": + assert coordsystem_json["iEEGCoordinateSystem"] == "Other" + assert coordsystem_json["iEEGCoordinateSystemDescription"] == "n/a" + elif datatype == "meg" and dir_name == "CTF": + assert coordsystem_json["MEGCoordinateSystem"] == "CTF" + assert ( + coordsystem_json["MEGCoordinateSystemDescription"] + == BIDS_COORD_FRAME_DESCRIPTIONS["ctf"] + ) + elif datatype == "meg" and dir_name == "MEG": + assert coordsystem_json["MEGCoordinateSystem"] == "ElektaNeuromag" + assert ( + coordsystem_json["MEGCoordinateSystemDescription"] + == BIDS_COORD_FRAME_DESCRIPTIONS["elektaneuromag"] + ) @pytest.mark.parametrize( - 'subject, dir_name, fname, reader', [ - ('01', 'EDF', 'test_reduced.edf', _read_raw_edf), - ('02', 'Persyst', 'sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay', _read_raw_persyst), # noqa - ('03', 'NihonKohden', 'MB0400FU.EEG', _read_raw_nihon), - ('emptyroom', 'MEG/sample', - 'sample_audvis_trunc_raw.fif', _read_raw_fif), - ] + "subject, dir_name, fname, reader", + [ + ("01", "EDF", "test_reduced.edf", _read_raw_edf), + ( + "02", + "Persyst", + "sub-pt1_ses-02_task-monitor_acq-ecog_run-01_clip2.lay", + _read_raw_persyst, + ), # noqa + ("03", "NihonKohden", "MB0400FU.EEG", _read_raw_nihon), + ("emptyroom", "MEG/sample", "sample_audvis_trunc_raw.fif", _read_raw_fif), + ], ) @pytest.mark.filterwarnings( - warning_str['encountered_data_in'], - warning_str['channel_unit_changed'], - warning_str['edf_warning'], - warning_str['brainvision_unit'] + warning_str["encountered_data_in"], + warning_str["channel_unit_changed"], + warning_str["edf_warning"], + warning_str["brainvision_unit"], ) @testing.requires_testing_data def test_anonymize(subject, dir_name, fname, reader, tmp_path, _bids_validate): """Test writing anonymized EDF data.""" raw_fname = op.join(data_path, dir_name, fname) - bids_root = tmp_path / 'bids1' + bids_root = tmp_path / "bids1" raw = reader(raw_fname) - raw_date = raw.info['meas_date'].strftime('%Y%m%d') + raw_date = raw.info["meas_date"].strftime("%Y%m%d") bids_path = BIDSPath(subject=subject, root=bids_root) # handle different edge cases - if subject == 'emptyroom': - bids_path.update(task='noise', session=raw_date, - suffix='meg', datatype='meg') + if subject == "emptyroom": + bids_path.update(task="noise", session=raw_date, suffix="meg", datatype="meg") else: - bids_path.update(task='task', suffix='eeg', datatype='eeg') + bids_path.update(task="task", suffix="eeg", datatype="eeg") daysback_min, daysback_max = get_anonymization_daysback(raw) anonymize = dict(daysback=daysback_min + 1) orig_bids_path = bids_path.copy() - bids_path = \ - write_raw_bids(raw, bids_path, overwrite=True, - anonymize=anonymize, verbose=False) + bids_path = write_raw_bids( + raw, bids_path, overwrite=True, anonymize=anonymize, verbose=False + ) # emptyroom recordings' session should match the recording date - if subject == 'emptyroom': - assert ( - bids_path.session == - (raw.info['meas_date'] - - timedelta(days=anonymize['daysback'])).strftime('%Y%m%d') - ) + if subject == "emptyroom": + assert bids_path.session == ( + raw.info["meas_date"] - timedelta(days=anonymize["daysback"]) + ).strftime("%Y%m%d") raw2 = read_raw_bids(bids_path, verbose=False) - if raw_fname.endswith('.edf'): + if raw_fname.endswith(".edf"): _raw = reader(bids_path) - assert _raw.info['meas_date'].year == 1985 - assert _raw.info['meas_date'].month == 1 - assert _raw.info['meas_date'].day == 1 - assert raw2.info['meas_date'].year < 1925 + assert _raw.info["meas_date"].year == 1985 + assert _raw.info["meas_date"].month == 1 + assert _raw.info["meas_date"].day == 1 + assert raw2.info["meas_date"].year < 1925 # write without source - scans_fname = BIDSPath(subject=bids_path.subject, - session=bids_path.session, - suffix='scans', extension='.tsv', - root=bids_path.root) - anonymize['keep_source'] = False - bids_path = \ - write_raw_bids(raw, orig_bids_path, overwrite=True, - anonymize=anonymize, verbose=False) + scans_fname = BIDSPath( + subject=bids_path.subject, + session=bids_path.session, + suffix="scans", + extension=".tsv", + root=bids_path.root, + ) + anonymize["keep_source"] = False + bids_path = write_raw_bids( + raw, orig_bids_path, overwrite=True, anonymize=anonymize, verbose=False + ) scans_tsv = _from_tsv(scans_fname) - assert 'source' not in scans_tsv.keys() + assert "source" not in scans_tsv.keys() # Write with source this time get the scans tsv bids_path = write_raw_bids( - raw, orig_bids_path, overwrite=True, + raw, + orig_bids_path, + overwrite=True, anonymize=dict(daysback=daysback_min, keep_source=True), - verbose=False) - scans_fname = BIDSPath(subject=bids_path.subject, - session=bids_path.session, - suffix='scans', extension='.tsv', - root=bids_path.root) + verbose=False, + ) + scans_fname = BIDSPath( + subject=bids_path.subject, + session=bids_path.session, + suffix="scans", + extension=".tsv", + root=bids_path.root, + ) scans_tsv = _from_tsv(scans_fname) - assert scans_tsv['source'] == [ - Path(f).name for f in raw.filenames - ] + assert scans_tsv["source"] == [Path(f).name for f in raw.filenames] _bids_validate(bids_path.root) # update the scans sidecar JSON with information - scans_json_fpath = scans_fname.copy().update(extension='.json') - with open(scans_json_fpath, 'r') as fin: + scans_json_fpath = scans_fname.copy().update(extension=".json") + with open(scans_json_fpath, "r") as fin: scans_json = json.load(fin) - scans_json['test'] = 'New stuff...' + scans_json["test"] = "New stuff..." update_sidecar_json(scans_json_fpath, scans_json) # write again and make sure scans json was not altered bids_path = write_raw_bids( - raw, orig_bids_path, overwrite=True, + raw, + orig_bids_path, + overwrite=True, anonymize=dict(daysback=daysback_min, keep_source=True), - verbose=False) - with open(scans_json_fpath, 'r') as fin: + verbose=False, + ) + with open(scans_json_fpath, "r") as fin: scans_json = json.load(fin) - assert 'test' in scans_json + assert "test" in scans_json -@pytest.mark.parametrize('dir_name, fname', [ - ['EDF', 'test_reduced.edf'], - ['BDF', 'test_bdf_stim_channel.bdf'] -]) +@pytest.mark.parametrize( + "dir_name, fname", + [["EDF", "test_reduced.edf"], ["BDF", "test_bdf_stim_channel.bdf"]], +) @testing.requires_testing_data def test_write_uppercase_edfbdf(tmp_path, dir_name, fname): """Test writing uppercase EDF/BDF ext results in lowercase.""" - subject = 'cap' - if dir_name == 'EDF': + subject = "cap" + if dir_name == "EDF": read_func = _read_raw_edf - elif dir_name == 'BDF': + elif dir_name == "BDF": read_func = _read_raw_bdf raw_fname = op.join(data_path, dir_name, fname) # capitalize the extension file - lower_case_ext = f'.{dir_name.lower()}' - upper_case_ext = f'.{dir_name.upper()}' - new_basename = (op.basename(raw_fname).split(lower_case_ext)[0] + - upper_case_ext) + lower_case_ext = f".{dir_name.lower()}" + upper_case_ext = f".{dir_name.upper()}" + new_basename = op.basename(raw_fname).split(lower_case_ext)[0] + upper_case_ext new_raw_fname = tmp_path / new_basename sh.copyfile(raw_fname, new_raw_fname) raw_fname = new_raw_fname.as_posix() # now read in the file and write to BIDS - bids_root = tmp_path / 'bids1' + bids_root = tmp_path / "bids1" raw = read_func(raw_fname) bids_path = BIDSPath(subject=subject, task=task, root=bids_root) bids_path = write_raw_bids(raw, bids_path, overwrite=True, verbose=False) @@ -2930,23 +3233,25 @@ def test_write_uppercase_edfbdf(tmp_path, dir_name, fname): assert bids_path.extension == lower_case_ext -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_sidecar_encoding(_bids_validate, tmp_path): """Test we're properly encoding text as UTF8.""" - bids_root = tmp_path / 'bids1' - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg') - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') - events_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw-eve.fif') + bids_root = tmp_path / "bids1" + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg") + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") + events_fname = op.join( + data_path, "MEG", "sample", "sample_audvis_trunc_raw-eve.fif" + ) raw = _read_raw_fif(raw_fname) events = mne.read_events(events_fname) - event_desc = {1: 'döner', 2: 'bøfsandwich'} + event_desc = {1: "döner", 2: "bøfsandwich"} annotations = mne.annotations_from_events( - events=events, sfreq=raw.info['sfreq'], event_desc=event_desc, - orig_time=raw.info['meas_date'] + events=events, + sfreq=raw.info["sfreq"], + event_desc=event_desc, + orig_time=raw.info["meas_date"], ) raw.set_annotations(annotations) @@ -2954,46 +3259,44 @@ def test_sidecar_encoding(_bids_validate, tmp_path): _bids_validate(bids_root) # TSV files should be written with a BOM - for tsv_file in bids_path.root.rglob('*.tsv'): - with open(tsv_file, 'r', encoding='utf-8') as f: + for tsv_file in bids_path.root.rglob("*.tsv"): + with open(tsv_file, "r", encoding="utf-8") as f: x = f.read() - assert x[0] == codecs.BOM_UTF8.decode('utf-8') + assert x[0] == codecs.BOM_UTF8.decode("utf-8") # Readme should be written with a BOM - with open(bids_path.root / 'README', 'r', encoding='utf-8') as f: + with open(bids_path.root / "README", "r", encoding="utf-8") as f: x = f.read() - assert x[0] == codecs.BOM_UTF8.decode('utf-8') + assert x[0] == codecs.BOM_UTF8.decode("utf-8") # JSON files should be written without a BOM - for json_file in bids_path.root.rglob('*.json'): - with open(json_file, 'r', encoding='utf-8') as f: + for json_file in bids_path.root.rglob("*.json"): + with open(json_file, "r", encoding="utf-8") as f: x = f.read() - assert x[0] != codecs.BOM_UTF8.decode('utf-8') + assert x[0] != codecs.BOM_UTF8.decode("utf-8") # Unicode event names should be written correctly - events_tsv_fname = (bids_path.copy() - .update(suffix='events', extension='.tsv') - .match()[0]) - with open(str(events_tsv_fname), 'r', encoding='utf-8-sig') as f: + events_tsv_fname = ( + bids_path.copy().update(suffix="events", extension=".tsv").match()[0] + ) + with open(str(events_tsv_fname), "r", encoding="utf-8-sig") as f: x = f.read() - assert 'döner' in x - assert 'bøfsandwich' in x + assert "döner" in x + assert "bøfsandwich" in x # Read back the data raw_read = read_raw_bids(bids_path) - assert_array_equal(raw.annotations.description, - raw_read.annotations.description) + assert_array_equal(raw.annotations.description, raw_read.annotations.description) -@requires_version('pybv', PYBV_VERSION) -@pytest.mark.parametrize( - 'dir_name, format, fname, reader', test_converteeg_data) +@requires_version("pybv", PYBV_VERSION) +@pytest.mark.parametrize("dir_name, format, fname, reader", test_converteeg_data) @pytest.mark.filterwarnings( - warning_str['channel_unit_changed'], - warning_str['edfblocks'], - warning_str['cnt_warning1'], - warning_str['cnt_warning2'], - warning_str['no_hand'], + warning_str["channel_unit_changed"], + warning_str["edfblocks"], + warning_str["cnt_warning1"], + warning_str["cnt_warning2"], + warning_str["no_hand"], ) @testing.requires_testing_data def test_convert_eeg_formats(dir_name, format, fname, reader, tmp_path): @@ -3002,57 +3305,64 @@ def test_convert_eeg_formats(dir_name, format, fname, reader, tmp_path): raw_fname = data_path / dir_name / fname # the BIDSPath for test datasets to get written to - bids_path = _bids_path.copy().update(root=bids_root, datatype='eeg') + bids_path = _bids_path.copy().update(root=bids_root, datatype="eeg") raw = reader(raw_fname) # drop 'misc' type channels when exporting raw = raw.pick_types(eeg=True) - kwargs = dict(raw=raw, format=format, bids_path=bids_path, overwrite=True, - verbose=False) + kwargs = dict( + raw=raw, format=format, bids_path=bids_path, overwrite=True, verbose=False + ) # test formatting to BrainVision, EDF, or auto (BrainVision) - if format in ['BrainVision', 'auto']: - if dir_name == 'NihonKohden': - with pytest.warns(RuntimeWarning, - match='Encountered data in "short" format'): + if format in ["BrainVision", "auto"]: + if dir_name == "NihonKohden": + with pytest.warns( + RuntimeWarning, match='Encountered data in "short" format' + ): bids_output_path = write_raw_bids(**kwargs) - elif dir_name == 'CNT': - with pytest.warns(RuntimeWarning, - match='Encountered data in "int" format. ' - 'Converting to float32.'): + elif dir_name == "CNT": + with pytest.warns( + RuntimeWarning, + match='Encountered data in "int" format. ' "Converting to float32.", + ): bids_output_path = write_raw_bids(**kwargs) - elif dir_name == 'curry': - with pytest.warns(RuntimeWarning, - match='Encountered data in "int" format. ' - 'Converting to float32.'): + elif dir_name == "curry": + with pytest.warns( + RuntimeWarning, + match='Encountered data in "int" format. ' "Converting to float32.", + ): bids_output_path = write_raw_bids(**kwargs) else: - with pytest.warns(RuntimeWarning, - match='Encountered data in "double" format'): + with pytest.warns( + RuntimeWarning, match='Encountered data in "double" format' + ): bids_output_path = write_raw_bids(**kwargs) else: - with pytest.warns(RuntimeWarning, - match='Converting data files to EDF format'): + with pytest.warns(RuntimeWarning, match="Converting data files to EDF format"): bids_output_path = write_raw_bids(**kwargs) # channel units should stay the same raw2 = read_raw_bids(bids_output_path) - assert all([ch1['unit'] == ch2['unit'] for ch1, ch2 in - zip(raw.info['chs'], raw2.info['chs'])]) - assert raw2.info['chs'][0]['unit'] == FIFF.FIFF_UNIT_V + assert all( + [ + ch1["unit"] == ch2["unit"] + for ch1, ch2 in zip(raw.info["chs"], raw2.info["chs"]) + ] + ) + assert raw2.info["chs"][0]["unit"] == FIFF.FIFF_UNIT_V # load channels.tsv; the unit should be Volts - channels_fname = bids_output_path.copy().update( - suffix='channels', extension='.tsv') + channels_fname = bids_output_path.copy().update(suffix="channels", extension=".tsv") channels_tsv = _from_tsv(channels_fname) - assert channels_tsv['units'][0] == 'V' + assert channels_tsv["units"][0] == "V" - if format == 'BrainVision': - assert raw2.filenames[0].endswith('.eeg') - assert bids_output_path.extension == '.vhdr' - elif format == 'EDF': - assert raw2.filenames[0].endswith('.edf') - assert bids_output_path.extension == '.edf' + if format == "BrainVision": + assert raw2.filenames[0].endswith(".eeg") + assert bids_output_path.extension == ".vhdr" + elif format == "EDF": + assert raw2.filenames[0].endswith(".edf") + assert bids_output_path.extension == ".edf" orig_len = len(raw) assert_allclose(raw.times, raw2.times[:orig_len], atol=1e-5, rtol=0) @@ -3061,29 +3371,26 @@ def test_convert_eeg_formats(dir_name, format, fname, reader, tmp_path): # writing to EDF is not 100% lossless, as the resolution is determined # by the physical min/max. The precision is to 0.09 uV. - assert_array_almost_equal( - raw.get_data(), raw2.get_data()[:, :orig_len], decimal=6) + assert_array_almost_equal(raw.get_data(), raw2.get_data()[:, :orig_len], decimal=6) -@requires_version('pybv', PYBV_VERSION) -@pytest.mark.parametrize( - 'dir_name, format, fname, reader', test_converteeg_data) +@requires_version("pybv", PYBV_VERSION) +@pytest.mark.parametrize("dir_name, format, fname, reader", test_converteeg_data) @pytest.mark.filterwarnings( - warning_str['channel_unit_changed'], - warning_str['edfblocks'], - warning_str['cnt_warning1'], - warning_str['cnt_warning2'], - warning_str['no_hand'], + warning_str["channel_unit_changed"], + warning_str["edfblocks"], + warning_str["cnt_warning1"], + warning_str["cnt_warning2"], + warning_str["no_hand"], ) @testing.requires_testing_data -def test_format_conversion_overwrite(dir_name, format, fname, reader, - tmp_path): +def test_format_conversion_overwrite(dir_name, format, fname, reader, tmp_path): """Test that overwrite works when format is passed to write_raw_bids.""" bids_root = tmp_path / format raw_fname = data_path / dir_name / fname # the BIDSPath for test datasets to get written to - bids_path = _bids_path.copy().update(root=bids_root, datatype='eeg') + bids_path = _bids_path.copy().update(root=bids_root, datatype="eeg") raw = reader(raw_fname) # drop 'misc' type channels when exporting @@ -3093,7 +3400,7 @@ def test_format_conversion_overwrite(dir_name, format, fname, reader, with warnings.catch_warnings(): # ignore all warnings for this case to remove verbosity # this unit test is not meant to test for warnings - warnings.filterwarnings('ignore') + warnings.filterwarnings("ignore") # writing with the 'format' parameter should always work # if overwrite is True @@ -3101,36 +3408,33 @@ def test_format_conversion_overwrite(dir_name, format, fname, reader, write_raw_bids(**kwargs, overwrite=True) -@pytest.mark.parametrize( - 'dir_name, format, fname, reader', test_converteeg_data) +@pytest.mark.parametrize("dir_name, format, fname, reader", test_converteeg_data) @pytest.mark.filterwarnings( - warning_str['channel_unit_changed'], - warning_str['cnt_warning1'], - warning_str['cnt_warning2'], - warning_str['no_hand'], + warning_str["channel_unit_changed"], + warning_str["cnt_warning1"], + warning_str["cnt_warning2"], + warning_str["no_hand"], ) @testing.requires_testing_data def test_error_write_meg_as_eeg(dir_name, format, fname, reader, tmp_path): """Test error writing as BrainVision EEG data for MEG.""" - bids_root = tmp_path / 'bids1' + bids_root = tmp_path / "bids1" raw_fname = data_path / dir_name / fname - bids_path = _bids_path.copy().update(root=bids_root, datatype='eeg', - extension='.vhdr') + bids_path = _bids_path.copy().update( + root=bids_root, datatype="eeg", extension=".vhdr" + ) raw = reader(raw_fname) - kwargs = dict(raw=raw, format='auto', - bids_path=bids_path.update(datatype='meg')) + kwargs = dict(raw=raw, format="auto", bids_path=bids_path.update(datatype="meg")) # if we accidentally add MEG channels, then an error will occur - raw.set_channel_types({raw.info['ch_names'][0]: 'mag'}) - with pytest.raises(ValueError, match='Got file extension .*' - 'for MEG data'): + raw.set_channel_types({raw.info["ch_names"][0]: "mag"}) + with pytest.raises(ValueError, match="Got file extension .*" "for MEG data"): write_raw_bids(**kwargs) -@pytest.mark.parametrize( - 'dir_name, format, fname, reader', test_convertmeg_data) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.parametrize("dir_name, format, fname, reader", test_convertmeg_data) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_convert_meg_formats(dir_name, format, fname, reader, tmp_path): """Test conversion of MEG manufacturer format to FIF.""" @@ -3138,11 +3442,12 @@ def test_convert_meg_formats(dir_name, format, fname, reader, tmp_path): raw_fname = data_path / dir_name / fname # the BIDSPath for test datasets to get written to - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg') + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg") raw = reader(raw_fname) - kwargs = dict(raw=raw, format=format, bids_path=bids_path, overwrite=True, - verbose=False) + kwargs = dict( + raw=raw, format=format, bids_path=bids_path, overwrite=True, verbose=False + ) # test formatting to FIF, or auto (FIF) bids_output_path = write_raw_bids(**kwargs) @@ -3150,82 +3455,86 @@ def test_convert_meg_formats(dir_name, format, fname, reader, tmp_path): # channel units should stay the same raw2 = read_raw_bids(bids_output_path) - if format == 'FIF': - assert raw2.filenames[0].endswith('.fif') - assert bids_output_path.extension == '.fif' + if format == "FIF": + assert raw2.filenames[0].endswith(".fif") + assert bids_output_path.extension == ".fif" orig_len = len(raw) assert_allclose(raw.times, raw2.times[:orig_len], atol=1e-5, rtol=0) assert_array_equal(raw.ch_names, raw2.ch_names) assert raw.get_channel_types() == raw2.get_channel_types() - assert_array_almost_equal( - raw.get_data(), raw2.get_data()[:, :orig_len], decimal=3) + assert_array_almost_equal(raw.get_data(), raw2.get_data()[:, :orig_len], decimal=3) -@pytest.mark.parametrize('dir_name, fname, reader', test_convert_data) +@pytest.mark.parametrize("dir_name, fname, reader", test_convert_data) @pytest.mark.filterwarnings( - warning_str['channel_unit_changed'], - warning_str['cnt_warning1'], - warning_str['cnt_warning2'], - warning_str['cnt_warning3'], - warning_str['no_hand'], + warning_str["channel_unit_changed"], + warning_str["cnt_warning1"], + warning_str["cnt_warning2"], + warning_str["cnt_warning3"], + warning_str["no_hand"], ) @testing.requires_testing_data def test_convert_raw_errors(dir_name, fname, reader, tmp_path): """Test errors when converting raw file formats.""" - bids_root = tmp_path / 'bids_1' + bids_root = tmp_path / "bids_1" raw_fname = data_path / dir_name / fname # the BIDSPath for test datasets to get written to - bids_path = _bids_path.copy().update(root=bids_root, datatype='eeg') + bids_path = _bids_path.copy().update(root=bids_root, datatype="eeg") # test conversion to BrainVision/FIF raw = reader(raw_fname) kwargs = dict(raw=raw, bids_path=bids_path, overwrite=True) # only accepted keywords will work for the 'format' parameter - with pytest.raises(ValueError, match='The input "format" .* is ' - 'not an accepted input format for ' - '`write_raw_bids`'): - kwargs['format'] = 'blah' + with pytest.raises( + ValueError, + match='The input "format" .* is ' + "not an accepted input format for " + "`write_raw_bids`", + ): + kwargs["format"] = "blah" write_raw_bids(**kwargs) # write should fail when trying to convert to wrong data format for # the datatype inside the file (e.g. EEG -> 'FIF' or MEG -> 'BrainVision') - with pytest.raises(ValueError, match='The input "format" .* is not an ' - 'accepted input format for ' - '.* datatype.'): - if dir_name == 'CTF': - new_format = 'BrainVision' + with pytest.raises( + ValueError, + match='The input "format" .* is not an ' + "accepted input format for " + ".* datatype.", + ): + if dir_name == "CTF": + new_format = "BrainVision" else: - new_format = 'FIF' - kwargs['format'] = new_format + new_format = "FIF" + kwargs["format"] = new_format write_raw_bids(**kwargs) @testing.requires_testing_data def test_write_fif_triux(tmp_path): """Test writing Triux files.""" - triux_path = op.join(data_path, 'SSS', 'TRIUX') - tri_fname = op.join(triux_path, 'triux_bmlhus_erm_raw.fif') + triux_path = op.join(data_path, "SSS", "TRIUX") + tri_fname = op.join(triux_path, "triux_bmlhus_erm_raw.fif") raw = mne.io.read_raw_fif(tri_fname) bids_path = BIDSPath( - subject="01", task="task", session="01", run="01", datatype="meg", - root=tmp_path + subject="01", task="task", session="01", run="01", datatype="meg", root=tmp_path ) write_raw_bids(raw, bids_path=bids_path, overwrite=True) -@pytest.mark.filterwarnings(warning_str['nasion_not_found']) -@pytest.mark.parametrize('datatype', ['eeg', 'ieeg']) +@pytest.mark.filterwarnings(warning_str["nasion_not_found"]) +@pytest.mark.parametrize("datatype", ["eeg", "ieeg"]) @testing.requires_testing_data def test_write_extension_case_insensitive(_bids_validate, tmp_path, datatype): """Test writing files is case insensitive.""" - dir_name, fname, reader = 'EDF', 'test_reduced.edf', _read_raw_edf + dir_name, fname, reader = "EDF", "test_reduced.edf", _read_raw_edf - bids_root = tmp_path / 'bids1' - source_path = Path(bids_root) / 'sourcedata' + bids_root = tmp_path / "bids1" + source_path = Path(bids_root) / "sourcedata" dir_path = data_path / dir_name sh.copytree(dir_path, source_path) dir_path = source_path @@ -3241,44 +3550,43 @@ def test_write_extension_case_insensitive(_bids_validate, tmp_path, datatype): # the BIDSPath for test datasets to get written to raw = reader(new_raw_fname) - bids_path = _bids_path.copy().update(root=bids_root, datatype='eeg') + bids_path = _bids_path.copy().update(root=bids_root, datatype="eeg") write_raw_bids(raw, bids_path) - raw.set_channel_types({raw.ch_names[i]: 'ecog' - for i in mne.pick_types(raw.info, eeg=True)}) - bids_path = _bids_path.copy().update(root=bids_root, datatype='ieeg') + raw.set_channel_types( + {raw.ch_names[i]: "ecog" for i in mne.pick_types(raw.info, eeg=True)} + ) + bids_path = _bids_path.copy().update(root=bids_root, datatype="ieeg") write_raw_bids(raw, bids_path) -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) @testing.requires_testing_data def test_symlink(tmp_path): """Test creation of symbolic links.""" - raw_trunc_path = (data_path / 'MEG' / 'sample' / - 'sample_audvis_trunc_raw.fif') + raw_trunc_path = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" # in case there are symlinks in the path, we need to .resolve() for later raw_trunc_path = raw_trunc_path.resolve(strict=True) raw = _read_raw_fif(raw_trunc_path) - root = tmp_path / 'symlink' - bids_path = _bids_path.copy().update(root=root, datatype='meg') + root = tmp_path / "symlink" + bids_path = _bids_path.copy().update(root=root, datatype="meg") kwargs = dict(raw=raw, bids_path=bids_path, symlink=True) # We currently don't support windows - if sys.platform in ('win32', 'cygwin'): - with pytest.raises(NotImplementedError, match='not supported'): + if sys.platform in ("win32", "cygwin"): + with pytest.raises(NotImplementedError, match="not supported"): write_raw_bids(**kwargs) return # Symlinks & anonymization don't go together - with pytest.raises(ValueError, match='Cannot create symlinks'): + with pytest.raises(ValueError, match="Cannot create symlinks"): write_raw_bids(anonymize=dict(daysback=123), **kwargs) # We currently only support FIFF - raw_eeglab_path = data_path / 'EEGLAB' / 'test_raw.set' + raw_eeglab_path = data_path / "EEGLAB" / "test_raw.set" raw_eeglab = _read_raw_eeglab(raw_eeglab_path) - bids_path_eeglab = _bids_path.copy().update(root=root, datatype='eeg') - with pytest.raises(NotImplementedError, match='only.*for FIFF'): - write_raw_bids(raw=raw_eeglab, bids_path=bids_path_eeglab, - symlink=True) + bids_path_eeglab = _bids_path.copy().update(root=root, datatype="eeg") + with pytest.raises(NotImplementedError, match="only.*for FIFF"): + write_raw_bids(raw=raw_eeglab, bids_path=bids_path_eeglab, symlink=True) p = write_raw_bids(raw=raw, bids_path=bids_path, symlink=True) assert p.fpath.is_symlink() @@ -3287,56 +3595,62 @@ def test_symlink(tmp_path): # test with split files # prepare the split files - split_raw_path = tmp_path / 'raw' / 'sample_audivis_raw.fif' + split_raw_path = tmp_path / "raw" / "sample_audivis_raw.fif" split_raw_path.parent.mkdir() - raw.save(split_raw_path, split_size='10MB', split_naming='neuromag') + raw.save(split_raw_path, split_size="10MB", split_naming="neuromag") raw = _read_raw_fif(split_raw_path) assert len(raw.filenames) == 2 # now actually test the I/O roundtrip - root = tmp_path / 'symlink-split' - bids_path = _bids_path.copy().update(root=root, datatype='meg') + root = tmp_path / "symlink-split" + bids_path = _bids_path.copy().update(root=root, datatype="meg") p = write_raw_bids(raw=raw, bids_path=bids_path, symlink=True) raw = read_raw_bids(p) assert len(raw.filenames) == 2 -@pytest.mark.filterwarnings(warning_str['channel_unit_changed']) -@pytest.mark.parametrize('empty_room_dtype', ['BIDSPath', 'raw']) +@pytest.mark.filterwarnings(warning_str["channel_unit_changed"]) +@pytest.mark.parametrize("empty_room_dtype", ["BIDSPath", "raw"]) @testing.requires_testing_data -def test_write_associated_emptyroom( - _bids_validate, tmp_path, empty_room_dtype -): +def test_write_associated_emptyroom(_bids_validate, tmp_path, empty_room_dtype): """Test functionality of the write_raw_bids conversion for fif.""" - bids_root = tmp_path / 'bids1' - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + bids_root = tmp_path / "bids1" + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = _read_raw_fif(raw_fname) meas_date = datetime(year=2020, month=1, day=10, tzinfo=timezone.utc) - if empty_room_dtype == 'BIDSPath': + if empty_room_dtype == "BIDSPath": # First write "empty-room" data raw.set_meas_date(meas_date) - bids_path_er = BIDSPath(subject='emptyroom', session='20200110', - task='noise', root=bids_root, datatype='meg', - suffix='meg', extension='.fif') + bids_path_er = BIDSPath( + subject="emptyroom", + session="20200110", + task="noise", + root=bids_root, + datatype="meg", + suffix="meg", + extension=".fif", + ) write_raw_bids(raw, bids_path=bids_path_er) # Now we write experimental data and associate it with the empty-room # recording - bids_path = bids_path_er.copy().update( - subject='01', session=None, task='task' - ) + bids_path = bids_path_er.copy().update(subject="01", session=None, task="task") write_raw_bids(raw, bids_path=bids_path, empty_room=bids_path_er) - elif empty_room_dtype == 'raw': + elif empty_room_dtype == "raw": bids_path = _bids_path.copy().update( - subject='01', session='session', task='task', suffix='meg', - extension='.fif', datatype='meg', root=bids_root + subject="01", + session="session", + task="task", + suffix="meg", + extension=".fif", + datatype="meg", + root=bids_root, ) # Should raise if no measurement date was provided raw.set_meas_date(None) - with pytest.raises(ValueError, match='empty-room .* measurement date'): + with pytest.raises(ValueError, match="empty-room .* measurement date"): write_raw_bids(raw, bids_path=bids_path, empty_room=raw) # With a proper measurement date it should work @@ -3346,51 +3660,54 @@ def test_write_associated_emptyroom( _bids_validate(bids_path.root) - meg_json_path = bids_path.copy().update(extension='.json') - with open(meg_json_path, 'r') as fin: + meg_json_path = bids_path.copy().update(extension=".json") + with open(meg_json_path, "r") as fin: meg_json_data = json.load(fin) - assert 'AssociatedEmptyRoom' in meg_json_data - assert (bids_path_er.fpath - .as_posix() # make test work on Windows, too - .endswith(meg_json_data['AssociatedEmptyRoom'])) - assert meg_json_data['AssociatedEmptyRoom'].startswith('/') + assert "AssociatedEmptyRoom" in meg_json_data + assert bids_path_er.fpath.as_posix().endswith( # make test work on Windows, too + meg_json_data["AssociatedEmptyRoom"] + ) + assert meg_json_data["AssociatedEmptyRoom"].startswith("/") def test_preload(_bids_validate, tmp_path): """Test writing custom preloaded raw objects.""" - bids_root = tmp_path / 'bids' + bids_root = tmp_path / "bids" bids_path = _bids_path.copy().update(root=bids_root) - sfreq, n_points = 1024., int(1e6) - info = mne.create_info(['ch1', 'ch2', 'ch3', 'ch4', 'ch5'], sfreq, - ['eeg'] * 5) + sfreq, n_points = 1024.0, int(1e6) + info = mne.create_info(["ch1", "ch2", "ch3", "ch4", "ch5"], sfreq, ["eeg"] * 5) rng = np.random.RandomState(99) raw = mne.io.RawArray(rng.random((5, n_points)) * 1e-6, info) - raw.orig_format = 'single' - raw.info['line_freq'] = 60 + raw.orig_format = "single" + raw.info["line_freq"] = 60 # reject preloaded by default - with pytest.raises(ValueError, match='allow_preload'): + with pytest.raises(ValueError, match="allow_preload"): write_raw_bids(raw, bids_path, verbose=False, overwrite=True) # preloaded raw must specify format - with pytest.raises(ValueError, match='format'): - write_raw_bids(raw, bids_path, allow_preload=True, - verbose=False, overwrite=True) + with pytest.raises(ValueError, match="format"): + write_raw_bids( + raw, bids_path, allow_preload=True, verbose=False, overwrite=True + ) - write_raw_bids(raw, bids_path, allow_preload=True, format='BrainVision', - verbose=False, overwrite=True) + write_raw_bids( + raw, + bids_path, + allow_preload=True, + format="BrainVision", + verbose=False, + overwrite=True, + ) _bids_validate(bids_root) -@pytest.mark.parametrize( - 'dir_name', ('tsv_test', 'json_test') -) +@pytest.mark.parametrize("dir_name", ("tsv_test", "json_test")) @testing.requires_testing_data def test_write_raw_special_paths(tmp_path, dir_name): """Test writing to locations containing strings with special meaning.""" - raw_fname = op.join(data_path, 'MEG', 'sample', - 'sample_audvis_trunc_raw.fif') + raw_fname = op.join(data_path, "MEG", "sample", "sample_audvis_trunc_raw.fif") raw = _read_raw_fif(raw_fname) root = tmp_path / dir_name @@ -3401,38 +3718,43 @@ def test_write_raw_special_paths(tmp_path, dir_name): @testing.requires_testing_data def test_anonymize_dataset(_bids_validate, tmpdir): """Test creating an anonymized copy of a dataset.""" - pytest.importorskip('nibabel') + pytest.importorskip("nibabel") # Create a non-anonymized dataset - bids_root = tmpdir / 'bids' + bids_root = tmpdir / "bids" bids_path = _bids_path.copy().update( - root=bids_root, subject='testparticipant', extension='.fif', - datatype='meg' + root=bids_root, subject="testparticipant", extension=".fif", datatype="meg" ) bids_path_er = bids_path.copy().update( - subject='emptyroom', task='noise', session='20021203', run=None, - acquisition=None + subject="emptyroom", + task="noise", + session="20021203", + run=None, + acquisition=None, ) bids_path_anat = bids_path.copy().update( - datatype='anat', suffix='T1w', extension='.nii.gz' + datatype="anat", suffix="T1w", extension=".nii.gz" ) - raw_path = data_path / 'MEG' / 'sample' / 'sample_audvis_trunc_raw.fif' - raw_er_path = data_path / 'MEG' / 'sample' / 'ernoise_raw.fif' - fine_cal_path = data_path / 'SSS' / 'sss_cal_mgh.dat' - crosstalk_path = data_path / 'SSS' / 'ct_sparse_mgh.fif' - t1w_path = data_path / 'subjects' / 'sample' / 'mri' / 'T1.mgz' + raw_path = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" + raw_er_path = data_path / "MEG" / "sample" / "ernoise_raw.fif" + fine_cal_path = data_path / "SSS" / "sss_cal_mgh.dat" + crosstalk_path = data_path / "SSS" / "ct_sparse_mgh.fif" + t1w_path = data_path / "subjects" / "sample" / "mri" / "T1.mgz" mri_landmarks = mne.channels.make_dig_montage( lpa=[66.08580, 51.33362, 46.52982], nasion=[41.87363, 32.24694, 74.55314], rpa=[17.23812, 53.08294, 47.01789], - coord_frame='mri_voxel' + coord_frame="mri_voxel", ) - events_path = (data_path / 'MEG' / 'sample' / - 'sample_audvis_trunc_raw-eve.fif') + events_path = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw-eve.fif" event_id = { - 'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32, - 'unknown': 0 + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + "unknown": 0, } raw = _read_raw_fif(raw_path, verbose=False) @@ -3440,219 +3762,197 @@ def test_anonymize_dataset(_bids_validate, tmpdir): write_raw_bids(raw_er, bids_path=bids_path_er) write_raw_bids( - raw, bids_path=bids_path, empty_room=bids_path_er, - events=events_path, event_id=event_id, verbose=False - ) - write_meg_crosstalk( - fname=crosstalk_path, bids_path=bids_path, verbose=False - ) - write_meg_calibration( - calibration=fine_cal_path, bids_path=bids_path, verbose=False + raw, + bids_path=bids_path, + empty_room=bids_path_er, + events=events_path, + event_id=event_id, + verbose=False, ) + write_meg_crosstalk(fname=crosstalk_path, bids_path=bids_path, verbose=False) + write_meg_calibration(calibration=fine_cal_path, bids_path=bids_path, verbose=False) write_anat( - image=t1w_path, bids_path=bids_path_anat, landmarks=mri_landmarks, - verbose=False + image=t1w_path, bids_path=bids_path_anat, landmarks=mri_landmarks, verbose=False ) _bids_validate(bids_root) # Now run the actual anonymization - bids_root_anon = tmpdir / 'bids-anonymized' + bids_root_anon = tmpdir / "bids-anonymized" anonymize_dataset( - bids_root_in=bids_root, - bids_root_out=bids_root_anon, - random_state=42 + bids_root_in=bids_root, bids_root_out=bids_root_anon, random_state=42 ) _bids_validate(bids_root_anon) - meg_dir = bids_root_anon / 'sub-1' / 'ses-01' / 'meg' - assert (meg_dir / - 'sub-1_ses-01_task-testing_acq-01_run-01_meg.fif').exists() - assert (meg_dir / 'sub-1_ses-01_acq-crosstalk_meg.fif').exists() - assert (meg_dir / 'sub-1_ses-01_acq-calibration_meg.dat').exists() - assert (bids_root_anon / 'sub-1' / 'ses-01' / 'anat' / - 'sub-1_ses-01_acq-01_T1w.nii.gz').exists() - assert (bids_root_anon / 'sub-emptyroom' / 'ses-19221211' / 'meg' / - 'sub-emptyroom_ses-19221211_task-noise_meg.fif').exists() - - events_tsv_orig_bp = bids_path.copy().update( - suffix='events', extension='.tsv' - ) + meg_dir = bids_root_anon / "sub-1" / "ses-01" / "meg" + assert (meg_dir / "sub-1_ses-01_task-testing_acq-01_run-01_meg.fif").exists() + assert (meg_dir / "sub-1_ses-01_acq-crosstalk_meg.fif").exists() + assert (meg_dir / "sub-1_ses-01_acq-calibration_meg.dat").exists() + assert ( + bids_root_anon / "sub-1" / "ses-01" / "anat" / "sub-1_ses-01_acq-01_T1w.nii.gz" + ).exists() + assert ( + bids_root_anon + / "sub-emptyroom" + / "ses-19221211" + / "meg" + / "sub-emptyroom_ses-19221211_task-noise_meg.fif" + ).exists() + + events_tsv_orig_bp = bids_path.copy().update(suffix="events", extension=".tsv") events_tsv_anonymized_bp = events_tsv_orig_bp.copy().update( - subject='1', root=bids_root_anon + subject="1", root=bids_root_anon ) events_tsv_orig = _from_tsv(events_tsv_orig_bp) events_tsv_anonymized = _from_tsv(events_tsv_anonymized_bp) assert events_tsv_orig == events_tsv_anonymized # Explicitly specify multiple data types - bids_root_anon = tmpdir / 'bids-anonymized-1' + bids_root_anon = tmpdir / "bids-anonymized-1" anonymize_dataset( bids_root_in=bids_root, bids_root_out=bids_root_anon, - datatypes=['meg', 'anat'], - random_state=42 + datatypes=["meg", "anat"], + random_state=42, ) _bids_validate(bids_root_anon) - assert (bids_root_anon / 'sub-1' / 'ses-01' / 'meg').exists() - assert (bids_root_anon / 'sub-1' / 'ses-01' / 'anat').exists() - assert (bids_root_anon / 'sub-emptyroom').exists() + assert (bids_root_anon / "sub-1" / "ses-01" / "meg").exists() + assert (bids_root_anon / "sub-1" / "ses-01" / "anat").exists() + assert (bids_root_anon / "sub-emptyroom").exists() # One data type, daysback, subject mapping - bids_root_anon = tmpdir / 'bids-anonymized-2' + bids_root_anon = tmpdir / "bids-anonymized-2" anonymize_dataset( bids_root_in=bids_root, bids_root_out=bids_root_anon, daysback=10, - datatypes='meg', - subject_mapping={ - 'testparticipant': '123', - 'emptyroom': 'emptyroom' - } + datatypes="meg", + subject_mapping={"testparticipant": "123", "emptyroom": "emptyroom"}, ) _bids_validate(bids_root_anon) - assert (bids_root_anon / 'sub-123' / 'ses-01' / 'meg').exists() - assert not (bids_root_anon / 'sub-123' / 'ses-01' / 'anat').exists() - assert (bids_root_anon / 'sub-emptyroom' / 'ses-20021123').exists() + assert (bids_root_anon / "sub-123" / "ses-01" / "meg").exists() + assert not (bids_root_anon / "sub-123" / "ses-01" / "anat").exists() + assert (bids_root_anon / "sub-emptyroom" / "ses-20021123").exists() # Unknown subject in subject_mapping - bids_root_anon = tmpdir / 'bids-anonymized-3' - with pytest.raises(IndexError, match='does not contain an entry for'): + bids_root_anon = tmpdir / "bids-anonymized-3" + with pytest.raises(IndexError, match="does not contain an entry for"): anonymize_dataset( bids_root_in=bids_root, bids_root_out=bids_root_anon, - subject_mapping={ - 'foobar': '123', - 'emptyroom': 'emptyroom' - } + subject_mapping={"foobar": "123", "emptyroom": "emptyroom"}, ) # Duplicated entries in subject_mapping - bids_root_anon = tmpdir / 'bids-anonymized-4' - with pytest.raises(ValueError, match='dictionary contains duplicated'): + bids_root_anon = tmpdir / "bids-anonymized-4" + with pytest.raises(ValueError, match="dictionary contains duplicated"): anonymize_dataset( bids_root_in=bids_root, bids_root_out=bids_root_anon, subject_mapping={ - 'testparticipant': '123', - 'foobar': '123', - 'emptyroom': 'emptyroom' - } + "testparticipant": "123", + "foobar": "123", + "emptyroom": "emptyroom", + }, ) # bids_root_in does not exist - bids_root_anon = tmpdir / 'bids-anonymized-5' - with pytest.raises(FileNotFoundError, match='directory does not exist'): - anonymize_dataset( - bids_root_in='/foobar', - bids_root_out=bids_root_anon - ) + bids_root_anon = tmpdir / "bids-anonymized-5" + with pytest.raises(FileNotFoundError, match="directory does not exist"): + anonymize_dataset(bids_root_in="/foobar", bids_root_out=bids_root_anon) # input dir == output dir - with pytest.raises(ValueError, match='directory must differ'): - anonymize_dataset( - bids_root_in=bids_root, - bids_root_out=bids_root - ) + with pytest.raises(ValueError, match="directory must differ"): + anonymize_dataset(bids_root_in=bids_root, bids_root_out=bids_root) # bids_root_out exists - bids_root_anon = tmpdir / 'bids-anonymized-6' + bids_root_anon = tmpdir / "bids-anonymized-6" bids_root_anon.mkdir() - with pytest.raises(FileExistsError, match='directory already exists'): - anonymize_dataset( - bids_root_in=bids_root, - bids_root_out=bids_root_anon - ) + with pytest.raises(FileExistsError, match="directory already exists"): + anonymize_dataset(bids_root_in=bids_root, bids_root_out=bids_root_anon) # Unsupported data type - bids_root_anon = tmpdir / 'bids-anonymized-7' - with pytest.raises(ValueError, match='Unsupported data type'): + bids_root_anon = tmpdir / "bids-anonymized-7" + with pytest.raises(ValueError, match="Unsupported data type"): anonymize_dataset( - bids_root_in=bids_root, - bids_root_out=bids_root_anon, - datatypes='func' + bids_root_in=bids_root, bids_root_out=bids_root_anon, datatypes="func" ) # subject_mapping None - bids_root_anon = tmpdir / 'bids-anonymized-8' + bids_root_anon = tmpdir / "bids-anonymized-8" anonymize_dataset( bids_root_in=bids_root, bids_root_out=bids_root_anon, - datatypes='meg', - subject_mapping=None + datatypes="meg", + subject_mapping=None, ) _bids_validate(bids_root_anon) - assert (bids_root_anon / 'sub-testparticipant').exists() - assert (bids_root_anon / 'sub-emptyroom').exists() + assert (bids_root_anon / "sub-testparticipant").exists() + assert (bids_root_anon / "sub-emptyroom").exists() # subject_mapping callable - bids_root_anon = tmpdir / 'bids-anonymized-9' + bids_root_anon = tmpdir / "bids-anonymized-9" anonymize_dataset( bids_root_in=bids_root, bids_root_out=bids_root_anon, - datatypes='meg', - subject_mapping=lambda x: { - 'testparticipant': '123', 'emptyroom': 'emptyroom' - } + datatypes="meg", + subject_mapping=lambda x: {"testparticipant": "123", "emptyroom": "emptyroom"}, ) _bids_validate(bids_root_anon) - assert (bids_root_anon / 'sub-123').exists() - assert (bids_root_anon / 'sub-emptyroom').exists() + assert (bids_root_anon / "sub-123").exists() + assert (bids_root_anon / "sub-emptyroom").exists() # Rename emptyroom - bids_root_anon = tmpdir / 'bids-anonymized-10' + bids_root_anon = tmpdir / "bids-anonymized-10" with pytest.warns( - RuntimeWarning, - match='requested to change the "emptyroom" subject ID' + RuntimeWarning, match='requested to change the "emptyroom" subject ID' ): anonymize_dataset( bids_root_in=bids_root, bids_root_out=bids_root_anon, - datatypes='meg', + datatypes="meg", subject_mapping={ - 'testparticipant': 'testparticipant', - 'emptyroom': 'emptiestroom' - } + "testparticipant": "testparticipant", + "emptyroom": "emptiestroom", + }, ) _bids_validate(bids_root) - assert (bids_root_anon / 'sub-testparticipant').exists() - assert (bids_root_anon / 'sub-emptiestroom').exists() + assert (bids_root_anon / "sub-testparticipant").exists() + assert (bids_root_anon / "sub-emptiestroom").exists() # Only anat data - bids_root_anon = tmpdir / 'bids-anonymized-11' + bids_root_anon = tmpdir / "bids-anonymized-11" anonymize_dataset( - bids_root_in=bids_root, - bids_root_out=bids_root_anon, - datatypes='anat' + bids_root_in=bids_root, bids_root_out=bids_root_anon, datatypes="anat" ) _bids_validate(bids_root_anon) - assert (bids_root_anon / 'sub-1' / 'ses-01' / 'anat').exists() - assert not (bids_root_anon / 'sub-1' / 'ses-01' / 'meg').exists() + assert (bids_root_anon / "sub-1" / "ses-01" / "anat").exists() + assert not (bids_root_anon / "sub-1" / "ses-01" / "meg").exists() # Ensure that additional JSON sidecar fields are transferred if they are # "safe", and are omitted if they are not whitelisted - bids_path.datatype = 'meg' - meg_json_path = bids_path.copy().update(suffix='meg', extension='.json') - meg_json = json.loads(meg_json_path.fpath.read_text(encoding='utf-8')) - assert 'Instructions' not in meg_json # ensure following test makes sense - meg_json['Instructions'] = 'Foo' - meg_json['UnknownKey'] = 'Bar' - meg_json_path.fpath.write_text( - data=json.dumps(meg_json), - encoding='utf-8' - ) + bids_path.datatype = "meg" + meg_json_path = bids_path.copy().update(suffix="meg", extension=".json") + meg_json = json.loads(meg_json_path.fpath.read_text(encoding="utf-8")) + assert "Instructions" not in meg_json # ensure following test makes sense + meg_json["Instructions"] = "Foo" + meg_json["UnknownKey"] = "Bar" + meg_json_path.fpath.write_text(data=json.dumps(meg_json), encoding="utf-8") # After anonymization, "Instructions" should be there and "UnknownKey" # should be gone. - bids_root_anon = tmpdir / 'bids-anonymized-12' + bids_root_anon = tmpdir / "bids-anonymized-12" anonymize_dataset( - bids_root_in=bids_root, - bids_root_out=bids_root_anon, - datatypes='meg' + bids_root_in=bids_root, bids_root_out=bids_root_anon, datatypes="meg" + ) + path = ( + bids_root_anon + / "sub-1" + / "ses-01" + / "meg" + / "sub-1_ses-01_task-testing_acq-01_run-01_meg.json" ) - path = (bids_root_anon / 'sub-1' / 'ses-01' / 'meg' / - 'sub-1_ses-01_task-testing_acq-01_run-01_meg.json') - meg_json = json.loads(path.read_text(encoding='utf=8')) - assert 'Instructions' in meg_json - assert 'UnknownKey' not in meg_json + meg_json = json.loads(path.read_text(encoding="utf=8")) + assert "Instructions" in meg_json + assert "UnknownKey" not in meg_json @testing.requires_testing_data @@ -3661,45 +3961,43 @@ def test_anonymize_dataset_daysback(tmpdir): # Check progress bar output from mne_bids.write import _get_daysback - bids_root = tmpdir / 'bids' + bids_root = tmpdir / "bids" bids_path = _bids_path.copy().update( - root=bids_root, subject='testparticipant', datatype='meg' + root=bids_root, subject="testparticipant", datatype="meg" ) - raw_path = data_path / 'MEG' / 'sample' / 'sample_audvis_trunc_raw.fif' + raw_path = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" raw = _read_raw_fif(raw_path, verbose=False) write_raw_bids(raw, bids_path=bids_path) _get_daysback( - bids_paths=[bids_path], - rng=np.random.default_rng(), - show_progress_thresh=1 + bids_paths=[bids_path], rng=np.random.default_rng(), show_progress_thresh=1 ) # Multiple runs _get_daysback( bids_paths=[ - bids_path.copy().update(run='01'), - bids_path.copy().update(run='02') + bids_path.copy().update(run="01"), + bids_path.copy().update(run="02"), ], rng=np.random.default_rng(), - show_progress_thresh=20 + show_progress_thresh=20, ) # Multiple sessions - bids_root = tmpdir / 'bids-multisession' + bids_root = tmpdir / "bids-multisession" bids_path = _bids_path.copy().update( - root=bids_root, subject='testparticipant', datatype='meg' + root=bids_root, subject="testparticipant", datatype="meg" ) - write_raw_bids(raw, bids_path=bids_path.copy().update(session='01')) - write_raw_bids(raw, bids_path=bids_path.copy().update(session='02')) + write_raw_bids(raw, bids_path=bids_path.copy().update(session="01")) + write_raw_bids(raw, bids_path=bids_path.copy().update(session="02")) _get_daysback( bids_paths=[ - bids_path.copy().update(session='01'), - bids_path.copy().update(session='02') + bids_path.copy().update(session="01"), + bids_path.copy().update(session="02"), ], rng=np.random.default_rng(), - show_progress_thresh=20 + show_progress_thresh=20, ) @@ -3711,7 +4009,7 @@ def test_repeat_write_location(tmpdir): raw = _read_raw_edf(raw_fname) # Write as BIDS - bids_root = tmpdir.mkdir('bids2') + bids_root = tmpdir.mkdir("bids2") bids_path = _bids_path.copy().update(root=bids_root) bids_path = write_raw_bids(raw, bids_path, verbose=False) @@ -3720,78 +4018,89 @@ def test_repeat_write_location(tmpdir): raw = read_raw_bids(bids_path, verbose=False) # Re-writing with src == dest should error - with pytest.raises(FileExistsError, match='Desired output BIDSPath'): + with pytest.raises(FileExistsError, match="Desired output BIDSPath"): write_raw_bids(raw, bids_path, overwrite=True, verbose=False) @testing.requires_testing_data def test_events_data_deprecation(tmp_path): """Test that passing events_data raises a FutureWarning.""" - bids_root = tmp_path / 'bids' + bids_root = tmp_path / "bids" bids_path = _bids_path.copy().update(root=bids_root) - raw_path = data_path / 'MEG' / 'sample' / 'sample_audvis_trunc_raw.fif' - events_path = (data_path / 'MEG' / 'sample' / - 'sample_audvis_trunc_raw-eve.fif') - event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, - 'Visual/Right': 4, 'Smiley': 5, 'Button': 32} + raw_path = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" + events_path = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw-eve.fif" + event_id = { + "Auditory/Left": 1, + "Auditory/Right": 2, + "Visual/Left": 3, + "Visual/Right": 4, + "Smiley": 5, + "Button": 32, + } # Drop unknown events. events = mne.read_events(events_path) events = events[events[:, 2] != 0] raw = _read_raw_fif(raw_path) - with pytest.warns(FutureWarning, match='will be removed'): + with pytest.warns(FutureWarning, match="will be removed"): write_raw_bids( raw=raw, bids_path=bids_path, events_data=events, event_id=event_id ) with pytest.raises( - ValueError, - match='Only one of events and events_data can be passed' + ValueError, match="Only one of events and events_data can be passed" ): write_raw_bids( - raw=raw, bids_path=bids_path, events=events, events_data=events, - event_id=event_id + raw=raw, + bids_path=bids_path, + events=events, + events_data=events, + event_id=event_id, ) @testing.requires_testing_data def test_unknown_extension(_bids_validate, tmp_path): """Write data with unknown extension to BIDS.""" - bids_root = tmp_path / 'bids' - bids_path = _bids_path.copy().update(root=bids_root, datatype='meg') - raw_fname = data_path / 'MEG' / 'sample' / 'sample_audvis_trunc_raw.fif' + bids_root = tmp_path / "bids" + bids_path = _bids_path.copy().update(root=bids_root, datatype="meg") + raw_fname = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" raw = _read_raw_fif(raw_fname) - raw._filenames = (raw.filenames[0].replace('.fif', '.foo'),) + raw._filenames = (raw.filenames[0].replace(".fif", ".foo"),) # When data is not preloaded, we should raise an exception. - with pytest.raises(ValueError, match='file format not supported by BIDS'): + with pytest.raises(ValueError, match="file format not supported by BIDS"): write_raw_bids(raw, bids_path) # With preloaded data, writing should work. - raw._filenames = (raw.filenames[0].replace('.foo', '.fif'),) + raw._filenames = (raw.filenames[0].replace(".foo", ".fif"),) raw.load_data() - raw._filenames = (raw.filenames[0].replace('.fif', '.foo'),) + raw._filenames = (raw.filenames[0].replace(".fif", ".foo"),) - write_raw_bids(raw, bids_path, allow_preload=True, format='FIF') + write_raw_bids(raw, bids_path, allow_preload=True, format="FIF") _bids_validate(bids_root) @testing.requires_testing_data def test_write_neuromag122(_bids_validate, tmp_path): """Test writing Neuromag122 data to BIDS.""" - bids_root = tmp_path / 'bids' - raw_fname = data_path / 'MEG' / 'sample' / 'sample_audvis_trunc_raw.fif' + bids_root = tmp_path / "bids" + raw_fname = data_path / "MEG" / "sample" / "sample_audvis_trunc_raw.fif" raw = mne.io.read_raw_fif(raw_fname, allow_maxshield=True) raw.info["line_freq"] = 50 # power line frequency as required by BIDS - raw.pick('mag') - for c in raw.info['chs']: - c['coil_type'] = FIFF.FIFFV_COIL_NM_122 - - bids_path = BIDSPath(subject="01", task="wordreport", run="01", - root=bids_root, - extension=".fif", datatype="meg") - write_raw_bids(raw, bids_path, overwrite=True, allow_preload=True, - format="FIF") + raw.pick("mag") + for c in raw.info["chs"]: + c["coil_type"] = FIFF.FIFFV_COIL_NM_122 + + bids_path = BIDSPath( + subject="01", + task="wordreport", + run="01", + root=bids_root, + extension=".fif", + datatype="meg", + ) + write_raw_bids(raw, bids_path, overwrite=True, allow_preload=True, format="FIF") _bids_validate(bids_root) diff --git a/mne_bids/tsv_handler.py b/mne_bids/tsv_handler.py index 17be1f5d7..c8a86c086 100644 --- a/mne_bids/tsv_handler.py +++ b/mne_bids/tsv_handler.py @@ -140,8 +140,10 @@ def _from_tsv(fname, dtypes=None): """ from .utils import warn # avoid circular import - data = np.loadtxt(fname, dtype=str, delimiter='\t', ndmin=2, - comments=None, encoding='utf-8-sig') + + data = np.loadtxt( + fname, dtype=str, delimiter="\t", ndmin=2, comments=None, encoding="utf-8-sig" + ) column_names = data[0, :] info = data[1:, :] data_dict = OrderedDict() @@ -150,8 +152,10 @@ def _from_tsv(fname, dtypes=None): if not isinstance(dtypes, (list, tuple)): dtypes = [dtypes] * info.shape[1] if not len(dtypes) == info.shape[1]: - raise ValueError('dtypes length mismatch. Provided: {0}, ' - 'Expected: {1}'.format(len(dtypes), info.shape[1])) + raise ValueError( + "dtypes length mismatch. Provided: {0}, " + "Expected: {1}".format(len(dtypes), info.shape[1]) + ) empty_cols = 0 for i, name in enumerate(column_names): values = info[:, i].astype(dtypes[i]).tolist() @@ -179,9 +183,9 @@ def _to_tsv(data, fname): n_rows = len(data[list(data.keys())[0]]) output = _tsv_to_str(data, n_rows) - with open(fname, 'w', encoding='utf-8-sig') as f: + with open(fname, "w", encoding="utf-8-sig") as f: f.write(output) - f.write('\n') + f.write("\n") def _tsv_to_str(data, rows=5): @@ -204,12 +208,12 @@ def _tsv_to_str(data, rows=5): n_rows = len(data[col_names[0]]) output = list() # write headings. - output.append('\t'.join(col_names)) + output.append("\t".join(col_names)) # write column data. max_rows = min(n_rows, rows) for idx in range(max_rows): row_data = list(str(data[key][idx]) for key in data) - output.append('\t'.join(row_data)) + output.append("\t".join(row_data)) - return '\n'.join(output) + return "\n".join(output) diff --git a/mne_bids/utils.py b/mne_bids/utils.py index 9a4a9b2e9..8e47b1a08 100644 --- a/mne_bids/utils.py +++ b/mne_bids/utils.py @@ -24,7 +24,7 @@ # This regex matches key-val pairs. Any characters are allowed in the key and # the value, except these special symbols: - _ . \ / -param_regex = re.compile(r'([^-_\.\\\/]+)-([^-_\.\\\/]+)') +param_regex = re.compile(r"([^-_\.\\\/]+)-([^-_\.\\\/]+)") def _ensure_tuple(x): @@ -37,7 +37,7 @@ def _ensure_tuple(x): return tuple(x) -def _get_ch_type_mapping(fro='mne', to='bids'): +def _get_ch_type_mapping(fro="mne", to="bids"): """Map between BIDS and MNE nomenclatures for channel types. Parameters @@ -63,33 +63,62 @@ def _get_ch_type_mapping(fro='mne', to='bids'): Bio channels are supported in mne-python and are converted to MISC because there is no "Bio" supported channel in BIDS. """ - if fro == 'mne' and to == 'bids': - mapping = dict(eeg='EEG', misc='MISC', stim='TRIG', emg='EMG', - ecog='ECOG', seeg='SEEG', eog='EOG', ecg='ECG', - resp='RESP', bio='MISC', dbs='DBS', gsr='GSR', - temperature='TEMP', - # NIRS - fnirs_cw_amplitude='NIRSCWAMPLITUDE', - # MEG channels - meggradaxial='MEGGRADAXIAL', megmag='MEGMAG', - megrefgradaxial='MEGREFGRADAXIAL', - meggradplanar='MEGGRADPLANAR', megrefmag='MEGREFMAG', - ias='MEGOTHER', syst='MEGOTHER', exci='MEGOTHER') - - elif fro == 'bids' and to == 'mne': - mapping = dict(EEG='eeg', MISC='misc', TRIG='stim', EMG='emg', - ECOG='ecog', SEEG='seeg', EOG='eog', ECG='ecg', - RESP='resp', GSR='gsr', TEMP='temperature', - # NIRS - NIRSCWAMPLITUDE='fnirs_cw_amplitude', - NIRS='fnirs_cw_amplitude', - # No MEG channels for now (see Notes above) - # Many to one mapping - VEOG='eog', HEOG='eog', DBS='dbs') + if fro == "mne" and to == "bids": + mapping = dict( + eeg="EEG", + misc="MISC", + stim="TRIG", + emg="EMG", + ecog="ECOG", + seeg="SEEG", + eog="EOG", + ecg="ECG", + resp="RESP", + bio="MISC", + dbs="DBS", + gsr="GSR", + temperature="TEMP", + # NIRS + fnirs_cw_amplitude="NIRSCWAMPLITUDE", + # MEG channels + meggradaxial="MEGGRADAXIAL", + megmag="MEGMAG", + megrefgradaxial="MEGREFGRADAXIAL", + meggradplanar="MEGGRADPLANAR", + megrefmag="MEGREFMAG", + ias="MEGOTHER", + syst="MEGOTHER", + exci="MEGOTHER", + ) + + elif fro == "bids" and to == "mne": + mapping = dict( + EEG="eeg", + MISC="misc", + TRIG="stim", + EMG="emg", + ECOG="ecog", + SEEG="seeg", + EOG="eog", + ECG="ecg", + RESP="resp", + GSR="gsr", + TEMP="temperature", + # NIRS + NIRSCWAMPLITUDE="fnirs_cw_amplitude", + NIRS="fnirs_cw_amplitude", + # No MEG channels for now (see Notes above) + # Many to one mapping + VEOG="eog", + HEOG="eog", + DBS="dbs", + ) else: - raise ValueError('Only two types of mappings are currently supported: ' - 'from mne to bids, or from bids to mne. However, ' - 'you specified from "{}" to "{}"'.format(fro, to)) + raise ValueError( + "Only two types of mappings are currently supported: " + "from mne to bids, or from bids to mne. However, " + 'you specified from "{}" to "{}"'.format(fro, to) + ) return mapping @@ -115,41 +144,47 @@ def _handle_datatype(raw, datatype): if datatype is not None: _check_datatype(raw, datatype) # MEG data is not supported by BrainVision or EDF files - if datatype in ['eeg', 'ieeg'] and 'meg' in raw: - logger.info(f"{os.linesep}Both {datatype} and 'meg' data found. " - f"BrainVision and EDF do not support 'meg' data. " - f"The data will therefore be stored as 'meg' data. " - f"If you wish to store your {datatype} data in " - f"BrainVision or EDF, please remove the 'meg'" - f"channels from your recording.{os.linesep}") - datatype = 'meg' + if datatype in ["eeg", "ieeg"] and "meg" in raw: + logger.info( + f"{os.linesep}Both {datatype} and 'meg' data found. " + f"BrainVision and EDF do not support 'meg' data. " + f"The data will therefore be stored as 'meg' data. " + f"If you wish to store your {datatype} data in " + f"BrainVision or EDF, please remove the 'meg'" + f"channels from your recording.{os.linesep}" + ) + datatype = "meg" else: datatypes = list() - ieeg_types = ['seeg', 'ecog', 'dbs'] + ieeg_types = ["seeg", "ecog", "dbs"] if any(ieeg_type in raw for ieeg_type in ieeg_types): - datatypes.append('ieeg') - if 'meg' in raw: - datatypes.append('meg') - if 'eeg' in raw: - datatypes.append('eeg') - if 'fnirs_cw_amplitude' in raw: - datatypes.append('nirs') + datatypes.append("ieeg") + if "meg" in raw: + datatypes.append("meg") + if "eeg" in raw: + datatypes.append("eeg") + if "fnirs_cw_amplitude" in raw: + datatypes.append("nirs") if len(datatypes) == 0: - raise ValueError('No MEG, EEG or iEEG channels found in data. ' - 'Please use raw.set_channel_types to set the ' - 'channel types in the data.') + raise ValueError( + "No MEG, EEG or iEEG channels found in data. " + "Please use raw.set_channel_types to set the " + "channel types in the data." + ) elif len(datatypes) > 1: - if 'meg' in datatypes and 'ieeg' not in datatypes: - datatype = 'meg' - elif 'ieeg' in datatypes and 'meg' not in datatypes: - datatype = 'ieeg' + if "meg" in datatypes and "ieeg" not in datatypes: + datatype = "meg" + elif "ieeg" in datatypes and "meg" not in datatypes: + datatype = "ieeg" else: - raise ValueError(f'Multiple data types (``{datatypes}``) were ' - 'found in the data. Please specify the ' - 'datatype using ' - '`bids_path.update(datatype="")` ' - 'or use raw.set_channel_types to set the ' - 'correct channel types in the raw object.') + raise ValueError( + f"Multiple data types (``{datatypes}``) were " + "found in the data. Please specify the " + "datatype using " + '`bids_path.update(datatype="")` ' + "or use raw.set_channel_types to set the " + "correct channel types in the raw object." + ) else: datatype = datatypes[0] return datatype @@ -167,8 +202,7 @@ def _age_on_date(bday, exp_date): """ if exp_date < bday: - raise ValueError("The experimentation date must be after the birth " - "date") + raise ValueError("The experimentation date must be after the birth " "date") if exp_date.month > bday.month: return exp_date.year - bday.year elif exp_date.month == bday.month: @@ -181,21 +215,24 @@ def _check_types(variables): """Make sure all vars are str or None.""" for var in variables: if not isinstance(var, (str, type(None))): - raise ValueError(f"You supplied a value ({var}) of type " - f"{type(var)}, where a string or None was " - f"expected.") + raise ValueError( + f"You supplied a value ({var}) of type " + f"{type(var)}, where a string or None was " + f"expected." + ) def _write_json(fname, dictionary, overwrite=False): """Write JSON to a file.""" if op.exists(fname) and not overwrite: - raise FileExistsError(f'"{fname}" already exists. ' - 'Please set overwrite to True.') + raise FileExistsError( + f'"{fname}" already exists. ' "Please set overwrite to True." + ) json_output = json.dumps(dictionary, indent=4) - with open(fname, 'w', encoding='utf-8') as fid: + with open(fname, "w", encoding="utf-8") as fid: fid.write(json_output) - fid.write('\n') + fid.write("\n") logger.info(f"Writing '{fname}'...") @@ -204,8 +241,9 @@ def _write_json(fname, dictionary, overwrite=False): def _write_tsv(fname, dictionary, overwrite=False, verbose=None): """Write an ordered dictionary to a .tsv file.""" if op.exists(fname) and not overwrite: - raise FileExistsError(f'"{fname}" already exists. ' - 'Please set overwrite to True.') + raise FileExistsError( + f'"{fname}" already exists. ' "Please set overwrite to True." + ) _to_tsv(dictionary, fname) logger.info(f"Writing '{fname}'...") @@ -214,27 +252,29 @@ def _write_tsv(fname, dictionary, overwrite=False, verbose=None): def _write_text(fname, text, overwrite=False): """Write text to a file.""" if op.exists(fname) and not overwrite: - raise FileExistsError(f'"{fname}" already exists. ' - 'Please set overwrite to True.') - with open(fname, 'w', encoding='utf-8-sig') as fid: + raise FileExistsError( + f'"{fname}" already exists. ' "Please set overwrite to True." + ) + with open(fname, "w", encoding="utf-8-sig") as fid: fid.write(text) - fid.write('\n') + fid.write("\n") logger.info(f"Writing '{fname}'...") def _check_key_val(key, val): """Perform checks on a value to make sure it adheres to the spec.""" - if any(ii in val for ii in ['-', '_', '/']): - raise ValueError("Unallowed `-`, `_`, or `/` found in key/value pair" - f" {key}: {val}") + if any(ii in val for ii in ["-", "_", "/"]): + raise ValueError( + "Unallowed `-`, `_`, or `/` found in key/value pair" f" {key}: {val}" + ) return key, val def _get_mrk_meas_date(mrk): """Find the measurement date from a KIT marker file.""" info = get_kit_info(mrk, False)[0] - meas_date = info.get('meas_date', None) + meas_date = info.get("meas_date", None) if isinstance(meas_date, (tuple, list, np.ndarray)): meas_date = meas_date[0] if isinstance(meas_date, datetime): @@ -261,43 +301,45 @@ def _infer_eeg_placement_scheme(raw): extraction. """ - placement_scheme = 'n/a' + placement_scheme = "n/a" # Check if the raw data contains eeg data at all - if 'eeg' not in raw: + if "eeg" not in raw: return placement_scheme # How many of the channels in raw are based on the extended 10/20 system sel = pick_types(raw.info, meg=False, eeg=True) ch_names = [raw.ch_names[i] for i in sel] channel_names = [ch.lower() for ch in ch_names] - montage1005 = make_standard_montage('standard_1005') + montage1005 = make_standard_montage("standard_1005") montage1005_names = [ch.lower() for ch in montage1005.ch_names] if set(channel_names).issubset(set(montage1005_names)): - placement_scheme = 'based on the extended 10/20 system' + placement_scheme = "based on the extended 10/20 system" return placement_scheme def _scale_coord_to_meters(coord, unit): """Scale units to meters (mne-python default).""" - if unit == 'cm': - return np.divide(coord, 100.) - elif unit == 'mm': - return np.divide(coord, 1000.) + if unit == "cm": + return np.divide(coord, 100.0) + elif unit == "mm": + return np.divide(coord, 1000.0) else: return coord -def _check_empty_room_basename(bids_path, on_invalid_er_task='raise'): +def _check_empty_room_basename(bids_path, on_invalid_er_task="raise"): # only check task entity for emptyroom when it is the sidecar/MEG file - if bids_path.suffix == 'meg': - if bids_path.task != 'noise': - msg = (f'task must be "noise" if subject is "emptyroom", but ' - f'received: {bids_path.task}') - if on_invalid_er_task == 'raise': + if bids_path.suffix == "meg": + if bids_path.task != "noise": + msg = ( + f'task must be "noise" if subject is "emptyroom", but ' + f"received: {bids_path.task}" + ) + if on_invalid_er_task == "raise": raise ValueError(msg) - elif on_invalid_er_task == 'warn': + elif on_invalid_er_task == "warn": logger.critical(msg) else: pass @@ -306,26 +348,29 @@ def _check_empty_room_basename(bids_path, on_invalid_er_task='raise'): def _check_anonymize(anonymize, raw, ext): """Check the `anonymize` dict.""" # if info['meas_date'] None, then the dates are not stored - if raw.info['meas_date'] is None: + if raw.info["meas_date"] is None: daysback = None else: - if 'daysback' not in anonymize or anonymize['daysback'] is None: - raise ValueError('`daysback` argument required to anonymize.') - daysback = anonymize['daysback'] + if "daysback" not in anonymize or anonymize["daysback"] is None: + raise ValueError("`daysback` argument required to anonymize.") + daysback = anonymize["daysback"] daysback_min, daysback_max = _get_anonymization_daysback(raw) if daysback < daysback_min: - warn('`daysback` is too small; the measurement date ' - 'is after 1925, which is not recommended by BIDS.' - 'The minimum `daysback` value for changing the ' - 'measurement date of this data to before this date ' - f'is {daysback_min}') - if ext == '.fif' and daysback > daysback_max: - raise ValueError('`daysback` exceeds maximum value MNE ' - 'is able to store in FIF format, must ' - f'be less than {daysback_max}') - keep_his = anonymize['keep_his'] if 'keep_his' in anonymize else False - keep_source = anonymize['keep_source'] if 'keep_source' in \ - anonymize else False + warn( + "`daysback` is too small; the measurement date " + "is after 1925, which is not recommended by BIDS." + "The minimum `daysback` value for changing the " + "measurement date of this data to before this date " + f"is {daysback_min}" + ) + if ext == ".fif" and daysback > daysback_max: + raise ValueError( + "`daysback` exceeds maximum value MNE " + "is able to store in FIF format, must " + f"be less than {daysback_max}" + ) + keep_his = anonymize["keep_his"] if "keep_his" in anonymize else False + keep_source = anonymize["keep_source"] if "keep_source" in anonymize else False return daysback, keep_his, keep_source @@ -344,10 +389,13 @@ def _get_anonymization_daysback(raw): daysback_max : int The maximum number of daysback that MNE can store. """ - this_date = _stamp_to_dt(raw.info['meas_date']).date() + this_date = _stamp_to_dt(raw.info["meas_date"]).date() daysback_min = (this_date - date(year=1924, month=12, day=31)).days - daysback_max = (this_date - datetime.fromtimestamp(0).date() + - timedelta(seconds=np.iinfo('>i4').max)).days + daysback_max = ( + this_date + - datetime.fromtimestamp(0).date() + + timedelta(seconds=np.iinfo(">i4").max) + ).days return daysback_min, daysback_max @@ -385,20 +433,23 @@ def get_anonymization_daysback(raws, verbose=None): daysback_min_list = list() daysback_max_list = list() for raw in raws: - if raw.info['meas_date'] is not None: + if raw.info["meas_date"] is not None: daysback_min, daysback_max = _get_anonymization_daysback(raw) daysback_min_list.append(daysback_min) daysback_max_list.append(daysback_max) if not daysback_min_list or not daysback_max_list: - raise ValueError('All measurement dates are None, ' - 'pass any `daysback` value to anonymize.') + raise ValueError( + "All measurement dates are None, " "pass any `daysback` value to anonymize." + ) daysback_min = max(daysback_min_list) daysback_max = min(daysback_max_list) if daysback_min > daysback_max: - raise ValueError('The dataset spans more time than can be ' - 'accomodated by MNE, you may have to ' - 'not follow BIDS recommendations and use' - 'anonymized dates after 1925') + raise ValueError( + "The dataset spans more time than can be " + "accomodated by MNE, you may have to " + "not follow BIDS recommendations and use" + "anonymized dates after 1925" + ) return daysback_min, daysback_max @@ -413,8 +464,9 @@ def _stamp_to_dt(utc_stamp): stamp = [int(s) for s in utc_stamp] if len(stamp) == 1: # In case there is no microseconds information stamp.append(0) - return (datetime.fromtimestamp(0, tz=timezone.utc) + - timedelta(0, stamp[0], stamp[1])) # day, sec, μs + return datetime.fromtimestamp(0, tz=timezone.utc) + timedelta( + 0, stamp[0], stamp[1] + ) # day, sec, μs def _check_datatype(raw, datatype): @@ -431,46 +483,52 @@ def _check_datatype(raw, datatype): ------- None """ - supported_types = ('meg', 'eeg', 'ieeg', 'nirs') + supported_types = ("meg", "eeg", "ieeg", "nirs") if datatype not in supported_types: raise ValueError( - f'The specified datatype {datatype} is currently not supported. ' - f'It should be one of either `meg`, `eeg` or `ieeg` (Got ' - f'`{datatype}`. Please specify a valid datatype using ' - f'`bids_path.update(datatype="")`.') + f"The specified datatype {datatype} is currently not supported. " + f"It should be one of either `meg`, `eeg` or `ieeg` (Got " + f"`{datatype}`. Please specify a valid datatype using " + f'`bids_path.update(datatype="")`.' + ) datatype_matches = False - if datatype == 'eeg' and datatype in raw: + if datatype == "eeg" and datatype in raw: datatype_matches = True - elif datatype == 'meg' and datatype in raw: + elif datatype == "meg" and datatype in raw: datatype_matches = True - elif datatype == 'nirs' and 'fnirs_cw_amplitude' in raw: + elif datatype == "nirs" and "fnirs_cw_amplitude" in raw: datatype_matches = True - elif datatype == 'ieeg': - ieeg_types = ('seeg', 'ecog', 'dbs') + elif datatype == "ieeg": + ieeg_types = ("seeg", "ecog", "dbs") if any(ieeg_type in raw for ieeg_type in ieeg_types): datatype_matches = True if not datatype_matches: raise ValueError( - f'The specified datatype {datatype} was not found in the raw ' - 'object. Please specify the correct datatype using ' + f"The specified datatype {datatype} was not found in the raw " + "object. Please specify the correct datatype using " '`bids_path.update(datatype="")` or use ' - 'raw.set_channel_types to set the correct channel types in ' - 'the raw object.') + "raw.set_channel_types to set the correct channel types in " + "the raw object." + ) -def _import_nibabel(why='work with MRI data'): +def _import_nibabel(why="work with MRI data"): try: import nibabel # noqa except ImportError as exc: raise exc.__class__( - f'nibabel is required to {why} but could not be imported, ' - f'got: {exc}') from None + f"nibabel is required to {why} but could not be imported, " f"got: {exc}" + ) from None else: return nibabel -def warn(message, category=RuntimeWarning, module='mne_bids', - ignore_namespaces=('mne', 'mne_bids')): # noqa: D103 +def warn( + message, + category=RuntimeWarning, + module="mne_bids", + ignore_namespaces=("mne", "mne_bids"), +): # noqa: D103 _warn( message, category=category, @@ -480,4 +538,4 @@ def warn(message, category=RuntimeWarning, module='mne_bids', # Some of the defaults here will be wrong but it should be close enough -warn.__doc__ = getattr(_warn, '__doc__', None) +warn.__doc__ = getattr(_warn, "__doc__", None) diff --git a/mne_bids/write.py b/mne_bids/write.py index d91701b87..e093f4efa 100644 --- a/mne_bids/write.py +++ b/mne_bids/write.py @@ -23,46 +23,78 @@ import numpy as np from scipy import linalg import mne -from mne.transforms import (_get_trans, apply_trans, rotation, translation) +from mne.transforms import _get_trans, apply_trans, rotation, translation from mne import Epochs from mne.io.constants import FIFF from mne.io.pick import channel_type, _picks_to_idx from mne.io import BaseRaw, read_fiducials -from mne.channels.channels import (_unit2human, _get_meg_system) +from mne.channels.channels import _unit2human, _get_meg_system from mne.chpi import get_chpi_info -from mne.utils import (check_version, logger, Bunch, - _validate_type, get_subjects_dir, verbose, - ProgressBar) +from mne.utils import ( + check_version, + logger, + Bunch, + _validate_type, + get_subjects_dir, + verbose, + ProgressBar, +) import mne.preprocessing from mne_bids.pick import coil_type from mne_bids.dig import _write_dig_bids, _write_coordsystem_json -from mne_bids.utils import (_write_json, _write_tsv, _write_text, - _age_on_date, _infer_eeg_placement_scheme, - _get_ch_type_mapping, _check_anonymize, - _stamp_to_dt, _handle_datatype, warn, - _import_nibabel) -from mne_bids import (BIDSPath, read_raw_bids, get_anonymization_daysback, - get_bids_path_from_fname) +from mne_bids.utils import ( + _write_json, + _write_tsv, + _write_text, + _age_on_date, + _infer_eeg_placement_scheme, + _get_ch_type_mapping, + _check_anonymize, + _stamp_to_dt, + _handle_datatype, + warn, + _import_nibabel, +) +from mne_bids import ( + BIDSPath, + read_raw_bids, + get_anonymization_daysback, + get_bids_path_from_fname, +) from mne_bids.path import _parse_ext, _mkdir_p, _path_to_str -from mne_bids.copyfiles import (copyfile_brainvision, copyfile_eeglab, - copyfile_ctf, copyfile_bti, copyfile_kit, - copyfile_edf) -from mne_bids.tsv_handler import (_from_tsv, _drop, _contains_row, - _combine_rows) +from mne_bids.copyfiles import ( + copyfile_brainvision, + copyfile_eeglab, + copyfile_ctf, + copyfile_bti, + copyfile_kit, + copyfile_edf, +) +from mne_bids.tsv_handler import _from_tsv, _drop, _contains_row, _combine_rows from mne_bids.read import _find_matching_sidecar, _read_events from mne_bids.sidecar_updates import update_sidecar_json -from mne_bids.config import (ORIENTATION, EXT_TO_UNIT_MAP, MANUFACTURERS, - IGNORED_CHANNELS, ALLOWED_DATATYPE_EXTENSIONS, - BIDS_VERSION, REFERENCES, _map_options, reader, - ALLOWED_INPUT_EXTENSIONS, CONVERT_FORMATS, - ANONYMIZED_JSON_KEY_WHITELIST, PYBV_VERSION, - BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS, - UNITS_MNE_TO_BIDS_MAP,) +from mne_bids.config import ( + ORIENTATION, + EXT_TO_UNIT_MAP, + MANUFACTURERS, + IGNORED_CHANNELS, + ALLOWED_DATATYPE_EXTENSIONS, + BIDS_VERSION, + REFERENCES, + _map_options, + reader, + ALLOWED_INPUT_EXTENSIONS, + CONVERT_FORMATS, + ANONYMIZED_JSON_KEY_WHITELIST, + PYBV_VERSION, + BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS, + UNITS_MNE_TO_BIDS_MAP, +) -_FIFF_SPLIT_SIZE = '2GB' # MNE-Python default; can be altered during debugging +_FIFF_SPLIT_SIZE = "2GB" # MNE-Python default; can be altered during debugging def _is_numeric(n): @@ -84,53 +116,53 @@ def _channels_tsv(raw, fname, overwrite=False): """ # Get channel type mappings between BIDS and MNE nomenclatures - map_chs = _get_ch_type_mapping(fro='mne', to='bids') + map_chs = _get_ch_type_mapping(fro="mne", to="bids") # Prepare the descriptions for each channel type - map_desc = defaultdict(lambda: 'Other type of channel') - map_desc.update(meggradaxial='Axial Gradiometer', - megrefgradaxial='Axial Gradiometer Reference', - meggradplanar='Planar Gradiometer', - megmag='Magnetometer', - megrefmag='Magnetometer Reference', - stim='Trigger', - eeg='ElectroEncephaloGram', - ecog='Electrocorticography', - seeg='StereoEEG', - ecg='ElectroCardioGram', - eog='ElectroOculoGram', - emg='ElectroMyoGram', - misc='Miscellaneous', - bio='Biological', - ias='Internal Active Shielding', - dbs='Deep Brain Stimulation', - fnirs_cw_amplitude='Near Infrared Spectroscopy ' - '(continuous wave)', - resp='Respiration', - gsr='Galvanic skin response (electrodermal activity, EDA)', - temperature='Temperature',) - get_specific = ('mag', 'ref_meg', 'grad') + map_desc = defaultdict(lambda: "Other type of channel") + map_desc.update( + meggradaxial="Axial Gradiometer", + megrefgradaxial="Axial Gradiometer Reference", + meggradplanar="Planar Gradiometer", + megmag="Magnetometer", + megrefmag="Magnetometer Reference", + stim="Trigger", + eeg="ElectroEncephaloGram", + ecog="Electrocorticography", + seeg="StereoEEG", + ecg="ElectroCardioGram", + eog="ElectroOculoGram", + emg="ElectroMyoGram", + misc="Miscellaneous", + bio="Biological", + ias="Internal Active Shielding", + dbs="Deep Brain Stimulation", + fnirs_cw_amplitude="Near Infrared Spectroscopy " "(continuous wave)", + resp="Respiration", + gsr="Galvanic skin response (electrodermal activity, EDA)", + temperature="Temperature", + ) + get_specific = ("mag", "ref_meg", "grad") # get the manufacturer from the file in the Raw object _, ext = _parse_ext(raw.filenames[0]) - manufacturer = MANUFACTURERS.get(ext, '') + manufacturer = MANUFACTURERS.get(ext, "") ignored_channels = IGNORED_CHANNELS.get(manufacturer, list()) status, ch_type, description = list(), list(), list() - for idx, ch in enumerate(raw.info['ch_names']): - status.append('bad' if ch in raw.info['bads'] else 'good') + for idx, ch in enumerate(raw.info["ch_names"]): + status.append("bad" if ch in raw.info["bads"] else "good") _channel_type = channel_type(raw.info, idx) if _channel_type in get_specific: _channel_type = coil_type(raw.info, idx, _channel_type) ch_type.append(map_chs[_channel_type]) description.append(map_desc[_channel_type]) - low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass']) + low_cutoff, high_cutoff = (raw.info["highpass"], raw.info["lowpass"]) if raw._orig_units: - units = [raw._orig_units.get(ch, 'n/a') for ch in raw.ch_names] + units = [raw._orig_units.get(ch, "n/a") for ch in raw.ch_names] else: - units = [_unit2human.get(ch_i['unit'], 'n/a') - for ch_i in raw.info['chs']] - units = [u if u not in ['NA'] else 'n/a' for u in units] + units = [_unit2human.get(ch_i["unit"], "n/a") for ch_i in raw.info["chs"]] + units = [u if u not in ["NA"] else "n/a" for u in units] # Translate from MNE to BIDS unit naming for idx, mne_unit in enumerate(units): @@ -138,31 +170,34 @@ def _channels_tsv(raw, fname, overwrite=False): bids_unit = UNITS_MNE_TO_BIDS_MAP[mne_unit] units[idx] = bids_unit - n_channels = raw.info['nchan'] - sfreq = raw.info['sfreq'] + n_channels = raw.info["nchan"] + sfreq = raw.info["sfreq"] # default to 'n/a' for status description # XXX: improve with API to modify the description - status_description = ['n/a'] * len(status) - - ch_data = OrderedDict([ - ('name', raw.info['ch_names']), - ('type', ch_type), - ('units', units), - ('low_cutoff', np.full((n_channels), low_cutoff)), - ('high_cutoff', np.full((n_channels), high_cutoff)), - ('description', description), - ('sampling_frequency', np.full((n_channels), sfreq)), - ('status', status), - ('status_description', status_description) - ]) - ch_data = _drop(ch_data, ignored_channels, 'name') - - if 'fnirs_cw_amplitude' in raw: - ch_data["wavelength_nominal"] = [raw.info["chs"][i]["loc"][9] for i in - range(len(raw.ch_names))] - - picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True) + status_description = ["n/a"] * len(status) + + ch_data = OrderedDict( + [ + ("name", raw.info["ch_names"]), + ("type", ch_type), + ("units", units), + ("low_cutoff", np.full((n_channels), low_cutoff)), + ("high_cutoff", np.full((n_channels), high_cutoff)), + ("description", description), + ("sampling_frequency", np.full((n_channels), sfreq)), + ("status", status), + ("status_description", status_description), + ] + ) + ch_data = _drop(ch_data, ignored_channels, "name") + + if "fnirs_cw_amplitude" in raw: + ch_data["wavelength_nominal"] = [ + raw.info["chs"][i]["loc"][9] for i in range(len(raw.ch_names)) + ] + + picks = _picks_to_idx(raw.info, "fnirs", exclude=[], allow_empty=True) sources = np.empty(picks.shape, dtype=" 0 and raise_error: - if set(fid_coord_frames.keys()) != set(['nasion', 'lpa', 'rpa']): + if set(fid_coord_frames.keys()) != set(["nasion", "lpa", "rpa"]): raise ValueError( - f'Some fiducial points are missing, got {fid_coords.keys()}') + f"Some fiducial points are missing, got {fid_coords.keys()}" + ) if len(set(fid_coord_frames.values())) > 1: raise ValueError( - 'All fiducial points must be in the same coordinate system, ' - f'got {len(fid_coord_frames)})') + "All fiducial points must be in the same coordinate system, " + f"got {len(fid_coord_frames)})" + ) coord_frame = fid_coord_frames.popitem()[1] if fid_coord_frames else None @@ -267,24 +305,27 @@ def _events_tsv(events, durations, raw, fname, trial_type, overwrite=False): """ # Start by filling all data that we know into an ordered dictionary first_samp = raw.first_samp - sfreq = raw.info['sfreq'] + sfreq = raw.info["sfreq"] events = events.copy() events[:, 0] -= first_samp # Onset column needs to be specified in seconds - data = OrderedDict([('onset', events[:, 0] / sfreq), - ('duration', durations), - ('trial_type', None), - ('value', events[:, 2]), - ('sample', events[:, 0])]) + data = OrderedDict( + [ + ("onset", events[:, 0] / sfreq), + ("duration", durations), + ("trial_type", None), + ("value", events[:, 2]), + ("sample", events[:, 0]), + ] + ) # Now check if trial_type is specified or should be removed if trial_type: trial_type_map = {v: k for k, v in trial_type.items()} - data['trial_type'] = [trial_type_map.get(i, 'n/a') for - i in events[:, 2]] + data["trial_type"] = [trial_type_map.get(i, "n/a") for i in events[:, 2]] else: - del data['trial_type'] + del data["trial_type"] _write_tsv(fname, data, overwrite) @@ -300,26 +341,21 @@ def _events_json(fname, overwrite=False): Whether to overwrite the output file if it exists. """ new_data = { - 'sample': { - 'Description': 'The event onset time in number of sampling points.' - }, - 'value': { - 'Description': ( - 'The event code (also known as trigger code or event ID) ' - 'associated with the event.' + "sample": {"Description": "The event onset time in number of sampling points."}, + "value": { + "Description": ( + "The event code (also known as trigger code or event ID) " + "associated with the event." ) }, - 'trial_type': { - 'Description': 'The type, category, or name of the event.' - }, + "trial_type": {"Description": "The type, category, or name of the event."}, } # make sure to append any JSON fields added by the user fname = Path(fname) if fname.exists(): orig_data = json.loads( - fname.read_text(encoding='utf-8'), - object_pairs_hook=OrderedDict + fname.read_text(encoding="utf-8"), object_pairs_hook=OrderedDict ) new_data = {**orig_data, **new_data} @@ -347,19 +383,21 @@ def _readme(datatype, fname, overwrite=False): already contains that citation. """ if os.path.isfile(fname) and not overwrite: - with open(fname, 'r', encoding='utf-8-sig') as fid: + with open(fname, "r", encoding="utf-8-sig") as fid: orig_data = fid.read() - mne_bids_ref = REFERENCES['mne-bids'] in orig_data + mne_bids_ref = REFERENCES["mne-bids"] in orig_data datatype_ref = REFERENCES[datatype] in orig_data if mne_bids_ref and datatype_ref: return - text = '{}References\n----------\n{}{}'.format( - orig_data + '\n\n', - '' if mne_bids_ref else REFERENCES['mne-bids'] + '\n\n', - '' if datatype_ref else REFERENCES[datatype] + '\n') + text = "{}References\n----------\n{}{}".format( + orig_data + "\n\n", + "" if mne_bids_ref else REFERENCES["mne-bids"] + "\n\n", + "" if datatype_ref else REFERENCES[datatype] + "\n", + ) else: - text = 'References\n----------\n{}{}'.format( - REFERENCES['mne-bids'] + '\n\n', REFERENCES[datatype] + '\n') + text = "References\n----------\n{}{}".format( + REFERENCES["mne-bids"] + "\n\n", REFERENCES[datatype] + "\n" + ) _write_text(fname, text, overwrite=True) @@ -385,25 +423,27 @@ def _participants_tsv(raw, subject_id, fname, overwrite=False): False, an error will be raised. """ - subject_age = 'n/a' - sex = 'n/a' - hand = 'n/a' - weight = 'n/a' - height = 'n/a' - subject_info = raw.info.get('subject_info', None) - - if subject_id != 'emptyroom' and subject_info is not None: + subject_age = "n/a" + sex = "n/a" + hand = "n/a" + weight = "n/a" + height = "n/a" + subject_info = raw.info.get("subject_info", None) + + if subject_id != "emptyroom" and subject_info is not None: # add sex - sex = _map_options(what='sex', key=subject_info.get('sex', 0), - fro='mne', to='bids') + sex = _map_options( + what="sex", key=subject_info.get("sex", 0), fro="mne", to="bids" + ) # add handedness - hand = _map_options(what='hand', key=subject_info.get('hand', 0), - fro='mne', to='bids') + hand = _map_options( + what="hand", key=subject_info.get("hand", 0), fro="mne", to="bids" + ) # determine the age of the participant - age = subject_info.get('birthday', None) - meas_date = raw.info.get('meas_date', None) + age = subject_info.get("birthday", None) + meas_date = raw.info.get("meas_date", None) if isinstance(meas_date, (tuple, list, np.ndarray)): meas_date = meas_date[0] @@ -412,25 +452,26 @@ def _participants_tsv(raw, subject_id, fname, overwrite=False): if isinstance(meas_date, datetime): meas_datetime = meas_date else: - meas_datetime = datetime.fromtimestamp(meas_date, - tz=timezone.utc) + meas_datetime = datetime.fromtimestamp(meas_date, tz=timezone.utc) subject_age = _age_on_date(bday, meas_datetime) else: subject_age = "n/a" # add weight and height - weight = subject_info.get('weight', 'n/a') - height = subject_info.get('height', 'n/a') + weight = subject_info.get("weight", "n/a") + height = subject_info.get("height", "n/a") - subject_id = 'sub-' + subject_id + subject_id = "sub-" + subject_id data = OrderedDict(participant_id=[subject_id]) - data.update({ - 'age': [subject_age], - 'sex': [sex], - 'hand': [hand], - 'weight': [weight], - 'height': [height] - }) + data.update( + { + "age": [subject_age], + "sex": [sex], + "hand": [hand], + "weight": [weight], + "height": [height], + } + ) if os.path.exists(fname): orig_data = _from_tsv(fname) @@ -438,22 +479,24 @@ def _participants_tsv(raw, subject_id, fname, overwrite=False): exact_included = _contains_row( data=orig_data, row_data={ - 'participant_id': subject_id, - 'age': subject_age, - 'sex': sex, - 'hand': hand, - 'weight': weight, - 'height': height - } + "participant_id": subject_id, + "age": subject_age, + "sex": sex, + "hand": hand, + "weight": weight, + "height": height, + }, ) # whether the subject id is in the previous data - sid_included = subject_id in orig_data['participant_id'] + sid_included = subject_id in orig_data["participant_id"] # if the subject data provided is different to the currently existing # data and overwrite is not True raise an error if (sid_included and not exact_included) and not overwrite: - raise FileExistsError(f'"{subject_id}" already exists in ' - f'the participant list. Please set ' - f'overwrite to True.') + raise FileExistsError( + f'"{subject_id}" already exists in ' + f"the participant list. Please set " + f"overwrite to True." + ) # Append any columns the original data did not have, and fill them with # n/a's. @@ -461,7 +504,7 @@ def _participants_tsv(raw, subject_id, fname, overwrite=False): if key in orig_data: continue - orig_data[key] = ['n/a'] * len(orig_data['participant_id']) + orig_data[key] = ["n/a"] * len(orig_data["participant_id"]) # Append any additional columns that original data had. # Keep the original order of the data by looping over @@ -472,13 +515,13 @@ def _participants_tsv(raw, subject_id, fname, overwrite=False): # add original value for any user-appended columns # that were not handled by mne-bids - p_id = data['participant_id'][0] - if p_id in orig_data['participant_id']: - row_idx = orig_data['participant_id'].index(p_id) + p_id = data["participant_id"][0] + if p_id in orig_data["participant_id"]: + row_idx = orig_data["participant_id"].index(p_id) data[key] = [orig_data[key][row_idx]] # otherwise add the new data as new row - data = _combine_rows(orig_data, data, 'participant_id') + data = _combine_rows(orig_data, data, "participant_id") # overwrite is forced to True as all issues with overwrite == False have # been handled by this point @@ -500,36 +543,21 @@ def _participants_json(fname, overwrite=False): """ new_data = { - 'participant_id': { - 'Description': 'Unique participant identifier' - }, - 'age': { - 'Description': 'Age of the participant at time of testing', - 'Units': 'years' - }, - 'sex': { - 'Description': 'Biological sex of the participant', - 'Levels': { - 'F': 'female', - 'M': 'male' - } + "participant_id": {"Description": "Unique participant identifier"}, + "age": { + "Description": "Age of the participant at time of testing", + "Units": "years", }, - 'hand': { - 'Description': 'Handedness of the participant', - 'Levels': { - 'R': 'right', - 'L': 'left', - 'A': 'ambidextrous' - } + "sex": { + "Description": "Biological sex of the participant", + "Levels": {"F": "female", "M": "male"}, }, - 'weight': { - 'Description': 'Body weight of the participant', - 'Units': 'kg' + "hand": { + "Description": "Handedness of the participant", + "Levels": {"R": "right", "L": "left", "A": "ambidextrous"}, }, - 'height': { - 'Description': 'Body height of the participant', - 'Units': 'm' - } + "weight": {"Description": "Body weight of the participant", "Units": "kg"}, + "height": {"Description": "Body height of the participant", "Units": "m"}, } # make sure to append any JSON fields added by the user @@ -538,8 +566,7 @@ def _participants_json(fname, overwrite=False): fname = Path(fname) if fname.exists(): orig_data = json.loads( - fname.read_text(encoding='utf-8'), - object_pairs_hook=OrderedDict + fname.read_text(encoding="utf-8"), object_pairs_hook=OrderedDict ) new_data = {**orig_data, **new_data} @@ -567,48 +594,49 @@ def _scans_tsv(raw, raw_fname, fname, keep_source, overwrite=False): """ # get measurement date in UTC from the data info - meas_date = raw.info['meas_date'] + meas_date = raw.info["meas_date"] if meas_date is None: - acq_time = 'n/a' + acq_time = "n/a" elif isinstance(meas_date, datetime): # for MNE >= v0.20 - acq_time = meas_date.strftime('%Y-%m-%dT%H:%M:%S.%fZ') + acq_time = meas_date.strftime("%Y-%m-%dT%H:%M:%S.%fZ") # for fif files check whether raw file is likely to be split raw_fnames = [raw_fname] - if raw_fname.endswith('.fif'): + if raw_fname.endswith(".fif"): # check whether fif files were split when saved # use the files in the target directory what should be written # to scans.tsv datatype, basename = raw_fname.split(os.sep) raw_dir = op.join(op.dirname(fname), datatype) - raw_files = [f for f in os.listdir(raw_dir) if f.endswith('.fif')] + raw_files = [f for f in os.listdir(raw_dir) if f.endswith(".fif")] if basename not in raw_files: raw_fnames = [] - split_base = basename.replace('_meg.fif', '_split-{}') + split_base = basename.replace("_meg.fif", "_split-{}") for raw_f in raw_files: - if len(raw_f.split('_split-')) == 2: - if split_base.format(raw_f.split('_split-')[1]) == raw_f: + if len(raw_f.split("_split-")) == 2: + if split_base.format(raw_f.split("_split-")[1]) == raw_f: raw_fnames.append(op.join(datatype, raw_f)) raw_fnames.sort() data = OrderedDict( - [('filename', ['{:s}'.format(raw_f.replace(os.sep, '/')) - for raw_f in raw_fnames]), - ('acq_time', [acq_time] * len(raw_fnames))]) + [ + ( + "filename", + ["{:s}".format(raw_f.replace(os.sep, "/")) for raw_f in raw_fnames], + ), + ("acq_time", [acq_time] * len(raw_fnames)), + ] + ) # add source filename if desired if keep_source: - data['source'] = [Path(src_fname).name for src_fname in raw.filenames] + data["source"] = [Path(src_fname).name for src_fname in raw.filenames] # write out a sidecar JSON if not exists - sidecar_json_path = Path(fname).with_suffix('.json') + sidecar_json_path = Path(fname).with_suffix(".json") sidecar_json_path = get_bids_path_from_fname(sidecar_json_path) - sidecar_json = { - 'source': { - 'Description': 'Original source filename.' - } - } + sidecar_json = {"source": {"Description": "Original source filename."}} if sidecar_json_path.fpath.exists(): update_sidecar_json(sidecar_json_path, sidecar_json) @@ -618,27 +646,29 @@ def _scans_tsv(raw, raw_fname, fname, keep_source, overwrite=False): if os.path.exists(fname): orig_data = _from_tsv(fname) # if the file name is already in the file raise an error - if raw_fname in orig_data['filename'] and not overwrite: - raise FileExistsError(f'"{raw_fname}" already exists in ' - f'the scans list. Please set ' - f'overwrite to True.') + if raw_fname in orig_data["filename"] and not overwrite: + raise FileExistsError( + f'"{raw_fname}" already exists in ' + f"the scans list. Please set " + f"overwrite to True." + ) for key in data.keys(): if key in orig_data: continue # add 'n/a' if any missing columns - orig_data[key] = ['n/a'] * len(next(iter(data.values()))) + orig_data[key] = ["n/a"] * len(next(iter(data.values()))) # otherwise add the new data - data = _combine_rows(orig_data, data, 'filename') + data = _combine_rows(orig_data, data, "filename") # overwrite is forced to True as all issues with overwrite == False have # been handled by this point _write_tsv(fname, data, True) -def _load_image(image, name='image'): +def _load_image(image, name="image"): nib = _import_nibabel() if type(image) not in nib.all_image_classes: try: @@ -647,9 +677,11 @@ def _load_image(image, name='image'): # image -> str conversion in the try block was successful, # so load the file from the specified location. We do this # here to keep the try block as short as possible. - raise ValueError('`{}` must be a path to an MRI data ' - 'file or a nibabel image object, but it ' - 'is of type "{}"'.format(name, type(image))) + raise ValueError( + "`{}` must be a path to an MRI data " + "file or a nibabel image object, but it " + 'is of type "{}"'.format(name, type(image)) + ) else: image = nib.load(image) @@ -657,8 +689,8 @@ def _load_image(image, name='image'): # XYZT_UNITS = NIFT_UNITS_MM (10 in binary or 2 in decimal) # seems to be the default for Nifti files # https://nifti.nimh.nih.gov/nifti-1/documentation/nifti1fields/nifti1fields_pages/xyzt_units.html - if image.header['xyzt_units'] == 0: - image.header['xyzt_units'] = np.array(10, dtype='uint8') + if image.header["xyzt_units"] == 0: + image.header["xyzt_units"] = np.array(10, dtype="uint8") return image @@ -747,8 +779,9 @@ def _mri_scanner_ras_to_mri_voxels(ras_landmarks, img_mgh): return vox_landmarks -def _sidecar_json(raw, task, manufacturer, fname, datatype, - emptyroom_fname=None, overwrite=False): +def _sidecar_json( + raw, task, manufacturer, fname, datatype, emptyroom_fname=None, overwrite=False +): """Create a sidecar json file depending on the suffix and save it. The sidecar json file provides meta data about the data @@ -775,11 +808,10 @@ def _sidecar_json(raw, task, manufacturer, fname, datatype, Defaults to False. """ - sfreq = raw.info['sfreq'] + sfreq = raw.info["sfreq"] try: - powerlinefrequency = raw.info['line_freq'] - powerlinefrequency = ('n/a' if powerlinefrequency is None else - powerlinefrequency) + powerlinefrequency = raw.info["line_freq"] + powerlinefrequency = "n/a" if powerlinefrequency is None else powerlinefrequency except KeyError: raise ValueError( "PowerLineFrequency parameter is required in the sidecar files. " @@ -789,167 +821,171 @@ def _sidecar_json(raw, task, manufacturer, fname, datatype, "in your script, or by passing: " " --line_freq 60 " "in the command line for a 60 Hz line frequency. If the frequency " - "is unknown, set it to None") + "is unknown, set it to None" + ) if isinstance(raw, BaseRaw): - rec_type = 'continuous' + rec_type = "continuous" elif isinstance(raw, Epochs): - rec_type = 'epoched' + rec_type = "epoched" else: - rec_type = 'n/a' + rec_type = "n/a" # determine whether any channels have to be ignored: - n_ignored = len([ch_name for ch_name in - IGNORED_CHANNELS.get(manufacturer, list()) if - ch_name in raw.ch_names]) + n_ignored = len( + [ + ch_name + for ch_name in IGNORED_CHANNELS.get(manufacturer, list()) + if ch_name in raw.ch_names + ] + ) # all ignored channels are trigger channels at the moment... - n_megchan = len([ch for ch in raw.info['chs'] - if ch['kind'] == FIFF.FIFFV_MEG_CH]) - n_megrefchan = len([ch for ch in raw.info['chs'] - if ch['kind'] == FIFF.FIFFV_REF_MEG_CH]) - n_eegchan = len([ch for ch in raw.info['chs'] - if ch['kind'] == FIFF.FIFFV_EEG_CH]) - n_ecogchan = len([ch for ch in raw.info['chs'] - if ch['kind'] == FIFF.FIFFV_ECOG_CH]) - n_seegchan = len([ch for ch in raw.info['chs'] - if ch['kind'] == FIFF.FIFFV_SEEG_CH]) - n_eogchan = len([ch for ch in raw.info['chs'] - if ch['kind'] == FIFF.FIFFV_EOG_CH]) - n_ecgchan = len([ch for ch in raw.info['chs'] - if ch['kind'] == FIFF.FIFFV_ECG_CH]) - n_emgchan = len([ch for ch in raw.info['chs'] - if ch['kind'] == FIFF.FIFFV_EMG_CH]) - n_miscchan = len([ch for ch in raw.info['chs'] - if ch['kind'] == FIFF.FIFFV_MISC_CH]) - n_stimchan = len([ch for ch in raw.info['chs'] - if ch['kind'] == FIFF.FIFFV_STIM_CH]) - n_ignored - n_dbschan = len([ch for ch in raw.info['chs'] - if ch['kind'] == FIFF.FIFFV_DBS_CH]) - nirs_channels = [ch for ch in raw.info['chs'] if - ch['kind'] == FIFF.FIFFV_FNIRS_CH] + n_megchan = len([ch for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_MEG_CH]) + n_megrefchan = len( + [ch for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_REF_MEG_CH] + ) + n_eegchan = len([ch for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_EEG_CH]) + n_ecogchan = len([ch for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_ECOG_CH]) + n_seegchan = len([ch for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_SEEG_CH]) + n_eogchan = len([ch for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_EOG_CH]) + n_ecgchan = len([ch for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_ECG_CH]) + n_emgchan = len([ch for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_EMG_CH]) + n_miscchan = len([ch for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_MISC_CH]) + n_stimchan = ( + len([ch for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_STIM_CH]) + - n_ignored + ) + n_dbschan = len([ch for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_DBS_CH]) + nirs_channels = [ch for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_FNIRS_CH] n_nirscwchan = len(nirs_channels) - n_nirscwsrc = len(np.unique([ch["ch_name"].split(" ")[0].split("_")[0] - for ch in nirs_channels])) - n_nirscwdet = len(np.unique([ch["ch_name"].split(" ")[0].split("_")[1] - for ch in nirs_channels])) + n_nirscwsrc = len( + np.unique([ch["ch_name"].split(" ")[0].split("_")[0] for ch in nirs_channels]) + ) + n_nirscwdet = len( + np.unique([ch["ch_name"].split(" ")[0].split("_")[1] for ch in nirs_channels]) + ) # Set DigitizedLandmarks to True if any of LPA, RPA, NAS are found # Set DigitizedHeadPoints to True if any "Extra" points are found # (DigitizedHeadPoints done for Neuromag MEG files only) digitized_head_points = False digitized_landmark = False - if datatype == 'meg' and raw.info['dig'] is not None: - for dig_point in raw.info['dig']: - if dig_point['kind'] in [FIFF.FIFFV_POINT_NASION, - FIFF.FIFFV_POINT_RPA, - FIFF.FIFFV_POINT_LPA]: + if datatype == "meg" and raw.info["dig"] is not None: + for dig_point in raw.info["dig"]: + if dig_point["kind"] in [ + FIFF.FIFFV_POINT_NASION, + FIFF.FIFFV_POINT_RPA, + FIFF.FIFFV_POINT_LPA, + ]: digitized_landmark = True - elif dig_point['kind'] == FIFF.FIFFV_POINT_EXTRA and \ - raw.filenames[0].endswith('.fif'): + elif dig_point["kind"] == FIFF.FIFFV_POINT_EXTRA and raw.filenames[ + 0 + ].endswith(".fif"): digitized_head_points = True software_filters = { - 'SpatialCompensation': { - 'GradientOrder': raw.compensation_grade - } + "SpatialCompensation": {"GradientOrder": raw.compensation_grade} } # Compile cHPI information, if any. system, _ = _get_meg_system(raw.info) chpi = None hpi_freqs = [] - if (datatype == 'meg' and - parse_version(mne.__version__) > parse_version('0.23')): + if datatype == "meg" and parse_version(mne.__version__) > parse_version("0.23"): # We need to handle different data formats differently - if system == 'CTF_275': + if system == "CTF_275": try: mne.chpi.extract_chpi_locs_ctf(raw) chpi = True except RuntimeError: chpi = False - logger.info('Could not find cHPI information in raw data.') - elif system == 'KIT': + logger.info("Could not find cHPI information in raw data.") + elif system == "KIT": try: mne.chpi.extract_chpi_locs_kit(raw) chpi = True except (RuntimeError, ValueError): chpi = False - logger.info('Could not find cHPI information in raw data.') - elif system in ['122m', '306m']: - n_active_hpi = mne.chpi.get_active_chpi(raw, on_missing='ignore') + logger.info("Could not find cHPI information in raw data.") + elif system in ["122m", "306m"]: + n_active_hpi = mne.chpi.get_active_chpi(raw, on_missing="ignore") chpi = bool(n_active_hpi.sum() > 0) if chpi: - hpi_freqs, _, _ = get_chpi_info(info=raw.info, - on_missing='ignore') + hpi_freqs, _, _ = get_chpi_info(info=raw.info, on_missing="ignore") hpi_freqs = list(hpi_freqs) - elif datatype == 'meg': - logger.info('Cannot check for & write continuous head localization ' - 'information: requires MNE-Python >= 0.24') + elif datatype == "meg": + logger.info( + "Cannot check for & write continuous head localization " + "information: requires MNE-Python >= 0.24" + ) # Define datatype-specific JSON dictionaries ch_info_json_common = [ - ('TaskName', task), - ('Manufacturer', manufacturer), - ('PowerLineFrequency', powerlinefrequency), - ('SamplingFrequency', sfreq), - ('SoftwareFilters', 'n/a'), - ('RecordingDuration', raw.times[-1]), - ('RecordingType', rec_type)] + ("TaskName", task), + ("Manufacturer", manufacturer), + ("PowerLineFrequency", powerlinefrequency), + ("SamplingFrequency", sfreq), + ("SoftwareFilters", "n/a"), + ("RecordingDuration", raw.times[-1]), + ("RecordingType", rec_type), + ] ch_info_json_meg = [ - ('DewarPosition', 'n/a'), - ('DigitizedLandmarks', digitized_landmark), - ('DigitizedHeadPoints', digitized_head_points), - ('MEGChannelCount', n_megchan), - ('MEGREFChannelCount', n_megrefchan), - ('SoftwareFilters', software_filters)] + ("DewarPosition", "n/a"), + ("DigitizedLandmarks", digitized_landmark), + ("DigitizedHeadPoints", digitized_head_points), + ("MEGChannelCount", n_megchan), + ("MEGREFChannelCount", n_megrefchan), + ("SoftwareFilters", software_filters), + ] if chpi is not None: - ch_info_json_meg.append(('ContinuousHeadLocalization', chpi)) - ch_info_json_meg.append(('HeadCoilFrequency', hpi_freqs)) + ch_info_json_meg.append(("ContinuousHeadLocalization", chpi)) + ch_info_json_meg.append(("HeadCoilFrequency", hpi_freqs)) if emptyroom_fname is not None: - ch_info_json_meg.append(('AssociatedEmptyRoom', str(emptyroom_fname))) + ch_info_json_meg.append(("AssociatedEmptyRoom", str(emptyroom_fname))) ch_info_json_eeg = [ - ('EEGReference', 'n/a'), - ('EEGGround', 'n/a'), - ('EEGPlacementScheme', _infer_eeg_placement_scheme(raw)), - ('Manufacturer', manufacturer)] + ("EEGReference", "n/a"), + ("EEGGround", "n/a"), + ("EEGPlacementScheme", _infer_eeg_placement_scheme(raw)), + ("Manufacturer", manufacturer), + ] ch_info_json_ieeg = [ - ('iEEGReference', 'n/a'), - ('ECOGChannelCount', n_ecogchan), - ('SEEGChannelCount', n_seegchan + n_dbschan)] - - ch_info_json_nirs = [ - ('Manufacturer', manufacturer) + ("iEEGReference", "n/a"), + ("ECOGChannelCount", n_ecogchan), + ("SEEGChannelCount", n_seegchan + n_dbschan), ] + ch_info_json_nirs = [("Manufacturer", manufacturer)] + ch_info_ch_counts = [ - ('EEGChannelCount', n_eegchan), - ('EOGChannelCount', n_eogchan), - ('ECGChannelCount', n_ecgchan), - ('EMGChannelCount', n_emgchan), - ('MiscChannelCount', n_miscchan), - ('TriggerChannelCount', n_stimchan)] + ("EEGChannelCount", n_eegchan), + ("EOGChannelCount", n_eogchan), + ("ECGChannelCount", n_ecgchan), + ("EMGChannelCount", n_emgchan), + ("MiscChannelCount", n_miscchan), + ("TriggerChannelCount", n_stimchan), + ] ch_info_ch_counts_nirs = [ - ('NIRSChannelCount', n_nirscwchan), - ('NIRSSourceOptodeCount', n_nirscwsrc), - ('NIRSDetectorOptodeCount', n_nirscwdet) + ("NIRSChannelCount", n_nirscwchan), + ("NIRSSourceOptodeCount", n_nirscwsrc), + ("NIRSDetectorOptodeCount", n_nirscwdet), ] # Stitch together the complete JSON dictionary ch_info_json = ch_info_json_common - if datatype == 'meg': + if datatype == "meg": append_datatype_json = ch_info_json_meg - elif datatype == 'eeg': + elif datatype == "eeg": append_datatype_json = ch_info_json_eeg - elif datatype == 'ieeg': + elif datatype == "ieeg": append_datatype_json = ch_info_json_ieeg - elif datatype == 'nirs': + elif datatype == "nirs": append_datatype_json = ch_info_json_nirs ch_info_ch_counts.extend(ch_info_ch_counts_nirs) @@ -963,39 +999,37 @@ def _sidecar_json(raw, task, manufacturer, fname, datatype, def _deface(image, landmarks, deface): - nib = _import_nibabel('deface MRIs') + nib = _import_nibabel("deface MRIs") - inset, theta = (5, 15.) + inset, theta = (5, 15.0) if isinstance(deface, dict): - if 'inset' in deface: - inset = deface['inset'] - if 'theta' in deface: - theta = deface['theta'] + if "inset" in deface: + inset = deface["inset"] + if "theta" in deface: + theta = deface["theta"] if not _is_numeric(inset): - raise ValueError('inset must be numeric (float, int). ' - 'Got %s' % type(inset)) + raise ValueError("inset must be numeric (float, int). " "Got %s" % type(inset)) if not _is_numeric(theta): - raise ValueError('theta must be numeric (float, int). ' - 'Got %s' % type(theta)) + raise ValueError("theta must be numeric (float, int). " "Got %s" % type(theta)) if inset < 0: - raise ValueError('inset should be positive, ' - 'Got %s' % inset) + raise ValueError("inset should be positive, " "Got %s" % inset) if not 0 <= theta < 90: - raise ValueError('theta should be between 0 and 90 ' - 'degrees. Got %s' % theta) + raise ValueError("theta should be between 0 and 90 " "degrees. Got %s" % theta) # get image data, make a copy image_data = image.get_fdata().copy() # make indices to move around so that the image doesn't have to - idxs = np.meshgrid(np.arange(image_data.shape[0]), - np.arange(image_data.shape[1]), - np.arange(image_data.shape[2]), - indexing='ij') + idxs = np.meshgrid( + np.arange(image_data.shape[0]), + np.arange(image_data.shape[1]), + np.arange(image_data.shape[2]), + indexing="ij", + ) idxs = np.array(idxs) # (3, *image_data.shape) idxs = np.transpose(idxs, [1, 2, 3, 0]) # (*image_data.shape, 3) idxs = idxs.reshape(-1, 3) # (n_voxels, 3) @@ -1010,8 +1044,8 @@ def _deface(image, landmarks, deface): idxs = apply_trans(translation(x=-x, y=-y + inset, z=-z), idxs) idxs = apply_trans(rotation(x=-np.pi / 2 + np.deg2rad(theta)), idxs) idxs = idxs.reshape(image_data.shape + (3,)) - mask = (idxs[..., 2] < 0) # z < middle - image_data[mask] = 0. + mask = idxs[..., 2] < 0 # z < middle + image_data[mask] = 0.0 # smooth decided against for potential lack of anonymizaton # https://gist.github.com/alexrockhill/15043928b716a432db3a84a050b241ae @@ -1033,8 +1067,11 @@ def _write_raw_fif(raw, bids_fname): """ raw.save( - bids_fname, fmt=raw.orig_format, split_size=_FIFF_SPLIT_SIZE, - split_naming='bids', overwrite=True + bids_fname, + fmt=raw.orig_format, + split_size=_FIFF_SPLIT_SIZE, + split_naming="bids", + overwrite=True, ) @@ -1053,54 +1090,62 @@ def _write_raw_brainvision(raw, bids_fname, events, overwrite): overwrite : bool Whether or not to overwrite existing files. """ - if not check_version('pybv', PYBV_VERSION): # pragma: no cover - raise ImportError(f'pybv >= {PYBV_VERSION} is required for converting' - ' file to BrainVision format') + if not check_version("pybv", PYBV_VERSION): # pragma: no cover + raise ImportError( + f"pybv >= {PYBV_VERSION} is required for converting" + " file to BrainVision format" + ) from pybv import write_brainvision + # Subtract raw.first_samp because brainvision marks events starting from # the first available data point and ignores the raw.first_samp if events is not None: events[:, 0] -= raw.first_samp events = events[:, [0, 2]] # reorder for pybv required order - meas_date = raw.info['meas_date'] + meas_date = raw.info["meas_date"] if meas_date is not None: meas_date = _stamp_to_dt(meas_date) # pybv needs to know the units of the data for appropriate scaling # get voltage units as micro-volts and all other units "as is" unit = [] - for chs in raw.info['chs']: - if chs['unit'] == FIFF.FIFF_UNIT_V: - unit.append('µV') + for chs in raw.info["chs"]: + if chs["unit"] == FIFF.FIFF_UNIT_V: + unit.append("µV") else: - unit.append(_unit2human.get(chs['unit'], 'n/a')) - unit = [u if u not in ['NA'] else 'n/a' for u in unit] + unit.append(_unit2human.get(chs["unit"], "n/a")) + unit = [u if u not in ["NA"] else "n/a" for u in unit] # We enforce conversion to float32 format # XXX: pybv can also write to int16, to do that, we need to get # original units of data prior to conversion, and add an optimization # function to pybv that maximizes the resolution parameter while # ensuring that int16 can represent the data in original units. - if raw.orig_format != 'single': - warn(f'Encountered data in "{raw.orig_format}" format. ' - 'Converting to float32.', RuntimeWarning) + if raw.orig_format != "single": + warn( + f'Encountered data in "{raw.orig_format}" format. ' + "Converting to float32.", + RuntimeWarning, + ) # Writing to float32 µV with 0.1 resolution are the pybv defaults, # which guarantees accurate roundtrip for values >= 1e-7 µV - fmt = 'binary_float32' + fmt = "binary_float32" resolution = 1e-1 - write_brainvision(data=raw.get_data(), - sfreq=raw.info['sfreq'], - ch_names=raw.ch_names, - ref_ch_names=None, - fname_base=op.splitext(op.basename(bids_fname))[0], - folder_out=op.dirname(bids_fname), - overwrite=overwrite, - events=events, - resolution=resolution, - unit=unit, - fmt=fmt, - meas_date=None) + write_brainvision( + data=raw.get_data(), + sfreq=raw.info["sfreq"], + ch_names=raw.ch_names, + ref_ch_names=None, + fname_base=op.splitext(op.basename(bids_fname))[0], + folder_out=op.dirname(bids_fname), + overwrite=overwrite, + events=events, + resolution=resolution, + unit=unit, + fmt=fmt, + meas_date=None, + ) def _write_raw_edf(raw, bids_fname, overwrite): @@ -1115,18 +1160,30 @@ def _write_raw_edf(raw, bids_fname, overwrite): overwrite : bool Whether to overwrite an existing file or not. """ - assert str(bids_fname).endswith('.edf') + assert str(bids_fname).endswith(".edf") raw.export(bids_fname, overwrite=overwrite) @verbose -def make_dataset_description(*, path, name, hed_version=None, - dataset_type='raw', data_license=None, - authors=None, acknowledgements=None, - how_to_acknowledge=None, funding=None, - ethics_approvals=None, references_and_links=None, - doi=None, generated_by=None, source_datasets=None, - overwrite=False, verbose=None): +def make_dataset_description( + *, + path, + name, + hed_version=None, + dataset_type="raw", + data_license=None, + authors=None, + acknowledgements=None, + how_to_acknowledge=None, + funding=None, + ethics_approvals=None, + references_and_links=None, + doi=None, + generated_by=None, + source_datasets=None, + overwrite=False, + verbose=None, +): """Create a dataset_description.json file for a BIDS dataset. The dataset_description.json file is required in BIDS and describes @@ -1195,78 +1252,83 @@ def make_dataset_description(*, path, name, hed_version=None, """ # Convert potential string input into list of strings convert_vars = [authors, funding, references_and_links, ethics_approvals] - convert_vars = [[i.strip() for i in var.split(',')] - if isinstance(var, str) else var - for var in convert_vars] + convert_vars = [ + [i.strip() for i in var.split(",")] if isinstance(var, str) else var + for var in convert_vars + ] authors, funding, references_and_links, ethics_approvals = convert_vars # Perform input checks - if dataset_type not in ['raw', 'derivative']: - raise ValueError('`dataset_type` must be either "raw" or ' - '"derivative."') + if dataset_type not in ["raw", "derivative"]: + raise ValueError('`dataset_type` must be either "raw" or ' '"derivative."') if isinstance(doi, str): - if not doi.startswith('doi:'): - warn('The `doi` field in dataset_description should be of the ' - 'form `doi:`') + if not doi.startswith("doi:"): + warn( + "The `doi` field in dataset_description should be of the " + "form `doi:`" + ) # check generated_by and source_datasets - msg_type = '{} must be a list of dicts or None.' - msg_key = 'found unexpected key(s) in dict: {}' + msg_type = "{} must be a list of dicts or None." + msg_key = "found unexpected key(s) in dict: {}" - generated_by_keys = set([ - "Name", "Version", "Description", "CodeURL", "Container"]) + generated_by_keys = set(["Name", "Version", "Description", "CodeURL", "Container"]) if isinstance(generated_by, list): if not all([isinstance(i, dict) for i in generated_by]): - raise ValueError(msg_type.format('generated_by')) + raise ValueError(msg_type.format("generated_by")) for i in generated_by: - if 'Name' not in i: + if "Name" not in i: raise ValueError( - '"Name" is a required field for each dict in ' - 'generated_by') + '"Name" is a required field for each dict in ' "generated_by" + ) if not set(i.keys()).issubset(generated_by_keys): raise ValueError(msg_key.format(i.keys() - generated_by_keys)) else: if generated_by is not None: - raise ValueError(msg_type.format('generated_by')) + raise ValueError(msg_type.format("generated_by")) source_ds_keys = set(["URL", "DOI", "Version"]) if isinstance(source_datasets, list): if not all([isinstance(i, dict) for i in source_datasets]): - raise ValueError(msg_type.format('source_datasets')) + raise ValueError(msg_type.format("source_datasets")) for i in source_datasets: if not set(i.keys()).issubset(source_ds_keys): raise ValueError(msg_key.format(i.keys() - source_ds_keys)) else: if source_datasets is not None: - raise ValueError(msg_type.format('source_datasets')) + raise ValueError(msg_type.format("source_datasets")) # Prepare dataset_description.json - fname = op.join(path, 'dataset_description.json') - description = OrderedDict([ - ('Name', name), - ('BIDSVersion', BIDS_VERSION), - ('HEDVersion', hed_version), - ('DatasetType', dataset_type), - ('License', data_license), - ('Authors', authors), - ('Acknowledgements', acknowledgements), - ('HowToAcknowledge', how_to_acknowledge), - ('Funding', funding), - ('EthicsApprovals', ethics_approvals), - ('ReferencesAndLinks', references_and_links), - ('DatasetDOI', doi), - ('GeneratedBy', generated_by), - ('SourceDatasets', source_datasets)]) + fname = op.join(path, "dataset_description.json") + description = OrderedDict( + [ + ("Name", name), + ("BIDSVersion", BIDS_VERSION), + ("HEDVersion", hed_version), + ("DatasetType", dataset_type), + ("License", data_license), + ("Authors", authors), + ("Acknowledgements", acknowledgements), + ("HowToAcknowledge", how_to_acknowledge), + ("Funding", funding), + ("EthicsApprovals", ethics_approvals), + ("ReferencesAndLinks", references_and_links), + ("DatasetDOI", doi), + ("GeneratedBy", generated_by), + ("SourceDatasets", source_datasets), + ] + ) # Handle potentially existing file contents if op.isfile(fname): - with open(fname, 'r', encoding='utf-8-sig') as fin: + with open(fname, "r", encoding="utf-8-sig") as fin: orig_cols = json.load(fin) - if 'BIDSVersion' in orig_cols and \ - orig_cols['BIDSVersion'] != BIDS_VERSION: - raise ValueError('Previous BIDS version used, please redo the ' - 'conversion to BIDS in a new directory ' - 'after ensuring all software is updated') + if "BIDSVersion" in orig_cols and orig_cols["BIDSVersion"] != BIDS_VERSION: + raise ValueError( + "Previous BIDS version used, please redo the " + "conversion to BIDS in a new directory " + "after ensuring all software is updated" + ) for key in description: if description[key] is None or not overwrite: description[key] = orig_cols.get(key, None) @@ -1274,8 +1336,8 @@ def make_dataset_description(*, path, name, hed_version=None, # default author to make dataset description BIDS compliant # if the user passed an author don't overwrite, # if there was an author there, only overwrite if `overwrite=True` - if authors is None and (description['Authors'] is None or overwrite): - description['Authors'] = ["[Unspecified]"] + if authors is None and (description["Authors"] is None or overwrite): + description["Authors"] = ["[Unspecified]"] # Only write data that is not None pop_keys = [key for key, val in description.items() if val is None] @@ -1292,7 +1354,7 @@ def write_raw_bids( event_id=None, *, anonymize=None, - format='auto', + format="auto", symlink=False, empty_room=None, allow_preload=False, @@ -1300,7 +1362,7 @@ def write_raw_bids( acpc_aligned=False, overwrite=False, events_data=None, - verbose=None + verbose=None, ): """Save raw data to a BIDS-compliant folder structure. @@ -1546,44 +1608,52 @@ def write_raw_bids( """ if events_data is not None and events is not None: - raise ValueError('Only one of events and events_data can be passed.') + raise ValueError("Only one of events and events_data can be passed.") if events_data is not None: warn( - message='The events_data parameter has been deprecated in favor ' - 'the new events parameter, to ensure better consistency ' - 'with MNE-Python. The events_data parameter will be ' - 'removed in MNE-BIDS 0.14. Please use the events ' - 'parameter instead.', - category=FutureWarning + message="The events_data parameter has been deprecated in favor " + "the new events parameter, to ensure better consistency " + "with MNE-Python. The events_data parameter will be " + "removed in MNE-BIDS 0.14. Please use the events " + "parameter instead.", + category=FutureWarning, ) events = events_data del events_data if not isinstance(raw, BaseRaw): - raise ValueError('raw_file must be an instance of BaseRaw, ' - 'got %s' % type(raw)) + raise ValueError( + "raw_file must be an instance of BaseRaw, " "got %s" % type(raw) + ) if raw.preload is not False and not allow_preload: raise ValueError( - 'The data has already been loaded from disk. To write it to BIDS, ' + "The data has already been loaded from disk. To write it to BIDS, " 'pass "allow_preload=True" and the "format" parameter.' ) if not isinstance(bids_path, BIDSPath): - raise RuntimeError('"bids_path" must be a BIDSPath object. Please ' - 'instantiate using mne_bids.BIDSPath().') + raise RuntimeError( + '"bids_path" must be a BIDSPath object. Please ' + "instantiate using mne_bids.BIDSPath()." + ) - _validate_type(events, types=('path-like', np.ndarray, None), - item_name='events', - type_name='path-like, NumPy array, or None') + _validate_type( + events, + types=("path-like", np.ndarray, None), + item_name="events", + type_name="path-like, NumPy array, or None", + ) - if symlink and sys.platform in ('win32', 'cygwin'): - raise NotImplementedError('Symbolic links are currently not supported ' - 'by MNE-BIDS on Windows operating systems.') + if symlink and sys.platform in ("win32", "cygwin"): + raise NotImplementedError( + "Symbolic links are currently not supported " + "by MNE-BIDS on Windows operating systems." + ) if symlink and anonymize is not None: - raise ValueError('Cannot create symlinks when anonymizing data.') + raise ValueError("Cannot create symlinks when anonymizing data.") if bids_path.root is None: raise ValueError( @@ -1604,13 +1674,13 @@ def write_raw_bids( ) if events is not None and event_id is None: - raise ValueError('You passed events, but no event_id ' - 'dictionary.') + raise ValueError("You passed events, but no event_id " "dictionary.") - _validate_type(item=empty_room, item_name='empty_room', - types=(mne.io.BaseRaw, BIDSPath, None)) - _validate_type(montage, (mne.channels.DigMontage, None), 'montage') - _validate_type(acpc_aligned, bool, 'acpc_aligned') + _validate_type( + item=empty_room, item_name="empty_room", types=(mne.io.BaseRaw, BIDSPath, None) + ) + _validate_type(montage, (mne.channels.DigMontage, None), "montage") + _validate_type(acpc_aligned, bool, "acpc_aligned") raw = raw.copy() convert = False # flag if converting not copying @@ -1618,47 +1688,48 @@ def write_raw_bids( # Load file, filename, extension if not allow_preload: raw_fname = raw.filenames[0] - if '.ds' in op.dirname(raw.filenames[0]): + if ".ds" in op.dirname(raw.filenames[0]): raw_fname = op.dirname(raw.filenames[0]) # point to file containing header info for multifile systems - raw_fname = raw_fname.replace('.eeg', '.vhdr') - raw_fname = raw_fname.replace('.fdt', '.set') - raw_fname = raw_fname.replace('.dat', '.lay') + raw_fname = raw_fname.replace(".eeg", ".vhdr") + raw_fname = raw_fname.replace(".fdt", ".set") + raw_fname = raw_fname.replace(".dat", ".lay") _, ext = _parse_ext(raw_fname) # force all EDF/BDF files with upper-case extension to be written as # lower case - if ext == '.EDF': - ext = '.edf' - elif ext == '.BDF': - ext = '.bdf' + if ext == ".EDF": + ext = ".edf" + elif ext == ".BDF": + ext = ".bdf" if ext not in ALLOWED_INPUT_EXTENSIONS: raise ValueError( - f'The input data is in a file format not supported by ' + f"The input data is in a file format not supported by " f'BIDS: "{ext}". You can try to preload the data and call ' f'write_raw_bids() with the "allow_preload=True" and the ' f'"format" parameters.' ) - if symlink and ext != '.fif': - raise NotImplementedError('Symlinks are currently only supported ' - 'for FIFF files.') + if symlink and ext != ".fif": + raise NotImplementedError( + "Symlinks are currently only supported " "for FIFF files." + ) raw_orig = reader[ext](**raw._init_kwargs) else: - if format == 'BrainVision': - ext = '.vhdr' - elif format == 'EDF': - ext = '.edf' - elif format == 'FIF': - ext = '.fif' + if format == "BrainVision": + ext = ".vhdr" + elif format == "EDF": + ext = ".edf" + elif format == "FIF": + ext = ".fif" else: msg = ( 'For preloaded data, you must set the "format" parameter ' - 'to one of: BrainVision, EDF, or FIF' + "to one of: BrainVision, EDF, or FIF" ) - if format != 'auto': # the default was changed + if format != "auto": # the default was changed msg += f', but got: "{format}"' raise ValueError(msg) @@ -1668,45 +1739,54 @@ def write_raw_bids( # Check times if not np.array_equal(raw.times, raw_orig.times): if len(raw.times) == len(raw_orig.times): - msg = ("raw.times has changed since reading from disk, but " - "write_raw_bids() doesn't allow writing modified data.") + msg = ( + "raw.times has changed since reading from disk, but " + "write_raw_bids() doesn't allow writing modified data." + ) else: - msg = ("The raw data you want to write contains {comp} time " - "points than the raw data on disk. It is possible that you " - "{guess} your data.") + msg = ( + "The raw data you want to write contains {comp} time " + "points than the raw data on disk. It is possible that you " + "{guess} your data." + ) if len(raw.times) < len(raw_orig.times): - msg = msg.format(comp='fewer', guess='cropped') + msg = msg.format(comp="fewer", guess="cropped") elif len(raw.times) > len(raw_orig.times): - msg = msg.format(comp='more', guess='concatenated') + msg = msg.format(comp="more", guess="concatenated") - msg += (' To write the data, please preload it and pass ' - '"allow_preload=True" and the "format" parameter to ' - 'write_raw_bids().') + msg += ( + " To write the data, please preload it and pass " + '"allow_preload=True" and the "format" parameter to ' + "write_raw_bids()." + ) raise ValueError(msg) # Initialize BIDSPath datatype = _handle_datatype(raw, bids_path.datatype) - bids_path = (bids_path.copy() - .update(datatype=datatype, suffix=datatype, extension=ext)) + bids_path = bids_path.copy().update( + datatype=datatype, suffix=datatype, extension=ext + ) # Check whether provided info and raw indicates valid MEG emptyroom data data_is_emptyroom = False - if (bids_path.datatype == 'meg' and bids_path.subject == 'emptyroom' and - bids_path.task == 'noise'): + if ( + bids_path.datatype == "meg" + and bids_path.subject == "emptyroom" + and bids_path.task == "noise" + ): data_is_emptyroom = True # check the session date provided is consistent with the value in raw - meas_date = raw.info.get('meas_date', None) + meas_date = raw.info.get("meas_date", None) if meas_date is not None: if not isinstance(meas_date, datetime): - meas_date = datetime.fromtimestamp(meas_date[0], - tz=timezone.utc) + meas_date = datetime.fromtimestamp(meas_date[0], tz=timezone.utc) - if anonymize is not None and 'daysback' in anonymize: - meas_date = meas_date - timedelta(anonymize['daysback']) - er_date = meas_date.strftime('%Y%m%d') + if anonymize is not None and "daysback" in anonymize: + meas_date = meas_date - timedelta(anonymize["daysback"]) + er_date = meas_date.strftime("%Y%m%d") bids_path = bids_path.copy().update(session=er_date) else: - er_date = meas_date.strftime('%Y%m%d') + er_date = meas_date.strftime("%Y%m%d") if er_date != bids_path.session: raise ValueError( @@ -1719,18 +1799,15 @@ def write_raw_bids( associated_er_path = None if isinstance(empty_room, mne.io.BaseRaw): - er_date = empty_room.info['meas_date'] + er_date = empty_room.info["meas_date"] if not er_date: raise ValueError( - 'The empty-room raw data must have a valid measurement date ' + "The empty-room raw data must have a valid measurement date " 'set. Please update its info["meas_date"] field.' ) er_session = er_date.strftime("%Y%m%d") er_bids_path = bids_path.copy().update( - subject='emptyroom', - session=er_session, - task='noise', - run=None + subject="emptyroom", session=er_session, task="noise", run=None ) write_raw_bids( raw=empty_room, @@ -1744,30 +1821,35 @@ def write_raw_bids( montage=montage, acpc_aligned=acpc_aligned, overwrite=overwrite, - verbose=verbose + verbose=verbose, ) associated_er_path = er_bids_path.fpath del er_bids_path, er_date, er_session elif isinstance(empty_room, BIDSPath): - if bids_path.datatype != 'meg': - raise ValueError('"empty_room" is only supported for ' - 'MEG data.') + if bids_path.datatype != "meg": + raise ValueError('"empty_room" is only supported for ' "MEG data.") if data_is_emptyroom: - raise ValueError('You cannot write empty-room data and pass ' - '"empty_room" at the same time.') + raise ValueError( + "You cannot write empty-room data and pass " + '"empty_room" at the same time.' + ) if bids_path.root != empty_room.root: - raise ValueError('The MEG data and its associated empty-room ' - 'recording must share the same BIDS root.') + raise ValueError( + "The MEG data and its associated empty-room " + "recording must share the same BIDS root." + ) associated_er_path = empty_room.fpath if associated_er_path is not None: if not associated_er_path.exists(): - raise FileNotFoundError(f'Empty-room data file not found: ' - f'{associated_er_path}') + raise FileNotFoundError( + f"Empty-room data file not found: " f"{associated_er_path}" + ) # Turn it into a path relative to the BIDS root - associated_er_path = Path(str(associated_er_path) - .replace(str(bids_path.root), '')) + associated_er_path = Path( + str(associated_er_path).replace(str(bids_path.root), "") + ) # Ensure it works on Windows too associated_er_path = associated_er_path.as_posix() @@ -1781,29 +1863,29 @@ def write_raw_bids( data_path = bids_path.mkdir().directory # create *_scans.tsv - session_path = BIDSPath(subject=bids_path.subject, - session=bids_path.session, root=bids_path.root) - scans_path = session_path.copy().update(suffix='scans', extension='.tsv') + session_path = BIDSPath( + subject=bids_path.subject, session=bids_path.session, root=bids_path.root + ) + scans_path = session_path.copy().update(suffix="scans", extension=".tsv") # create *_coordsystem.json coordsystem_path = session_path.copy().update( - acquisition=bids_path.acquisition, space=bids_path.space, - datatype=bids_path.datatype, suffix='coordsystem', extension='.json') + acquisition=bids_path.acquisition, + space=bids_path.space, + datatype=bids_path.datatype, + suffix="coordsystem", + extension=".json", + ) # For the remaining files, we can use BIDSPath to alter. - readme_fname = op.join(bids_path.root, 'README') - participants_tsv_fname = op.join(bids_path.root, 'participants.tsv') - participants_json_fname = participants_tsv_fname.replace('.tsv', - '.json') - - sidecar_path = bids_path.copy().update(suffix=bids_path.datatype, - extension='.json') - events_tsv_path = bids_path.copy().update( - suffix='events', extension='.tsv' - ) - events_json_path = events_tsv_path.copy().update(extension='.json') - channels_path = bids_path.copy().update( - suffix='channels', extension='.tsv') + readme_fname = op.join(bids_path.root, "README") + participants_tsv_fname = op.join(bids_path.root, "participants.tsv") + participants_json_fname = participants_tsv_fname.replace(".tsv", ".json") + + sidecar_path = bids_path.copy().update(suffix=bids_path.datatype, extension=".json") + events_tsv_path = bids_path.copy().update(suffix="events", extension=".tsv") + events_json_path = events_tsv_path.copy().update(extension=".json") + channels_path = bids_path.copy().update(suffix="channels", extension=".tsv") # Anonymize keep_source = False @@ -1811,20 +1893,19 @@ def write_raw_bids( daysback, keep_his, keep_source = _check_anonymize(anonymize, raw, ext) raw.anonymize(daysback=daysback, keep_his=keep_his) - if bids_path.datatype == 'meg' and ext != '.fif': - warn('Converting to FIF for anonymization') + if bids_path.datatype == "meg" and ext != ".fif": + warn("Converting to FIF for anonymization") convert = True - bids_path.update(extension='.fif') - elif bids_path.datatype in ['eeg', 'ieeg']: - if ext not in ['.vhdr', '.edf', '.bdf', '.EDF']: - warn('Converting data files to BrainVision format ' - 'for anonymization') + bids_path.update(extension=".fif") + elif bids_path.datatype in ["eeg", "ieeg"]: + if ext not in [".vhdr", ".edf", ".bdf", ".EDF"]: + warn("Converting data files to BrainVision format " "for anonymization") convert = True - bids_path.update(extension='.vhdr') + bids_path.update(extension=".vhdr") # Read in Raw object and extract metadata from Raw object if needed - orient = ORIENTATION.get(ext, 'n/a') - unit = EXT_TO_UNIT_MAP.get(ext, 'n/a') - manufacturer = MANUFACTURERS.get(ext, 'n/a') + orient = ORIENTATION.get(ext, "n/a") + unit = EXT_TO_UNIT_MAP.get(ext, "n/a") + manufacturer = MANUFACTURERS.get(ext, "n/a") # save readme file unless it already exists # XXX: can include README overwrite in future if using a template API @@ -1833,53 +1914,69 @@ def write_raw_bids( # save all participants meta data _participants_tsv( - raw=raw, subject_id=bids_path.subject, fname=participants_tsv_fname, - overwrite=overwrite + raw=raw, + subject_id=bids_path.subject, + fname=participants_tsv_fname, + overwrite=overwrite, ) _participants_json(participants_json_fname, True) # for MEG, we only write coordinate system - if bids_path.datatype == 'meg' and not data_is_emptyroom: + if bids_path.datatype == "meg" and not data_is_emptyroom: if bids_path.space is None: sensor_coord_system = orient - elif orient == 'n/a': + elif orient == "n/a": sensor_coord_system = bids_path.space elif bids_path.space in BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS: sensor_coord_system = bids_path.space elif orient != bids_path.space: - raise ValueError(f'BIDSPath.space {bids_path.space} conflicts ' - f'with filetype {ext} which has coordinate ' - f'frame {orient}') - _write_coordsystem_json(raw=raw, unit=unit, hpi_coord_system=orient, - sensor_coord_system=sensor_coord_system, - fname=coordsystem_path.fpath, - datatype=bids_path.datatype, - overwrite=overwrite) - _write_coordsystem_json(raw=raw, unit=unit, hpi_coord_system=orient, - sensor_coord_system=sensor_coord_system, - fname=coordsystem_path.fpath, - datatype=bids_path.datatype, - overwrite=overwrite) - elif bids_path.datatype in ['eeg', 'ieeg', 'nirs']: + raise ValueError( + f"BIDSPath.space {bids_path.space} conflicts " + f"with filetype {ext} which has coordinate " + f"frame {orient}" + ) + _write_coordsystem_json( + raw=raw, + unit=unit, + hpi_coord_system=orient, + sensor_coord_system=sensor_coord_system, + fname=coordsystem_path.fpath, + datatype=bids_path.datatype, + overwrite=overwrite, + ) + _write_coordsystem_json( + raw=raw, + unit=unit, + hpi_coord_system=orient, + sensor_coord_system=sensor_coord_system, + fname=coordsystem_path.fpath, + datatype=bids_path.datatype, + overwrite=overwrite, + ) + elif bids_path.datatype in ["eeg", "ieeg", "nirs"]: # We only write electrodes.tsv and accompanying coordsystem.json # if we have an available DigMontage - if montage is not None or \ - (raw.info['dig'] is not None and raw.info['dig']): - _write_dig_bids(bids_path, raw, montage, acpc_aligned, - overwrite) + if montage is not None or (raw.info["dig"] is not None and raw.info["dig"]): + _write_dig_bids(bids_path, raw, montage, acpc_aligned, overwrite) else: - logger.info(f'Writing of electrodes.tsv is not supported ' - f'for data type "{bids_path.datatype}". Skipping ...') + logger.info( + f"Writing of electrodes.tsv is not supported " + f'for data type "{bids_path.datatype}". Skipping ...' + ) # Write events. if not data_is_emptyroom: events_array, event_dur, event_desc_id_map = _read_events( - events, event_id, raw, bids_path=bids_path) + events, event_id, raw, bids_path=bids_path + ) if events_array.size != 0: _events_tsv( - events=events_array, durations=event_dur, raw=raw, - fname=events_tsv_path.fpath, trial_type=event_desc_id_map, - overwrite=overwrite + events=events_array, + durations=event_dur, + raw=raw, + fname=events_tsv_path.fpath, + trial_type=event_desc_id_map, + overwrite=overwrite, ) _events_json(fname=events_json_path.fpath, overwrite=overwrite) # Kepp events_array around for BrainVision writing below. @@ -1891,9 +1988,15 @@ def write_raw_bids( # this function. make_dataset_description(path=bids_path.root, name=" ", overwrite=False) - _sidecar_json(raw, task=bids_path.task, manufacturer=manufacturer, - fname=sidecar_path.fpath, datatype=bids_path.datatype, - emptyroom_fname=associated_er_path, overwrite=overwrite) + _sidecar_json( + raw, + task=bids_path.task, + manufacturer=manufacturer, + fname=sidecar_path.fpath, + datatype=bids_path.datatype, + emptyroom_fname=associated_er_path, + overwrite=overwrite, + ) _channels_tsv(raw, channels_path.fpath, overwrite) # create parent directories if needed @@ -1906,52 +2009,62 @@ def write_raw_bids( if convert and symlink: raise RuntimeError( - 'The input file format is not supported by the BIDS standard. ' - 'To store your data, MNE-BIDS would have to convert it. ' - 'However, this is not possible since you set symlink=True. ' - 'Deactivate symbolic links by passing symlink=False to allow ' - 'file format conversion.') + "The input file format is not supported by the BIDS standard. " + "To store your data, MNE-BIDS would have to convert it. " + "However, this is not possible since you set symlink=True. " + "Deactivate symbolic links by passing symlink=False to allow " + "file format conversion." + ) # check if there is an BIDS-unsupported MEG format - if bids_path.datatype == 'meg' and convert and not anonymize: + if bids_path.datatype == "meg" and convert and not anonymize: raise ValueError( f"Got file extension {ext} for MEG data, " f"expected one of " - f"{', '.join(sorted(ALLOWED_DATATYPE_EXTENSIONS['meg']))}") + f"{', '.join(sorted(ALLOWED_DATATYPE_EXTENSIONS['meg']))}" + ) if not convert: - logger.info(f'Copying data files to {bids_path.fpath.name}') + logger.info(f"Copying data files to {bids_path.fpath.name}") # If users desire a certain format, will handle auto-conversion - if format != 'auto': - if format == 'BrainVision' and bids_path.datatype in ['ieeg', 'eeg']: + if format != "auto": + if format == "BrainVision" and bids_path.datatype in ["ieeg", "eeg"]: convert = True - bids_path.update(extension='.vhdr') - elif format == 'EDF' and bids_path.datatype in ['ieeg', 'eeg']: + bids_path.update(extension=".vhdr") + elif format == "EDF" and bids_path.datatype in ["ieeg", "eeg"]: convert = True - bids_path.update(extension='.edf') - elif format == 'FIF' and bids_path.datatype == 'meg': + bids_path.update(extension=".edf") + elif format == "FIF" and bids_path.datatype == "meg": convert = True - bids_path.update(extension='.fif') + bids_path.update(extension=".fif") elif all(format not in values for values in CONVERT_FORMATS.values()): - raise ValueError(f'The input "format" {format} is not an ' - f'accepted input format for `write_raw_bids`. ' - f'Please use one of {CONVERT_FORMATS[datatype]} ' - f'for {datatype} datatype.') + raise ValueError( + f'The input "format" {format} is not an ' + f"accepted input format for `write_raw_bids`. " + f"Please use one of {CONVERT_FORMATS[datatype]} " + f"for {datatype} datatype." + ) elif format not in CONVERT_FORMATS[datatype]: - raise ValueError(f'The input "format" {format} is not an ' - f'accepted input format for {datatype} datatype. ' - f'Please use one of {CONVERT_FORMATS[datatype]} ' - f'for {datatype} datatype.') + raise ValueError( + f'The input "format" {format} is not an ' + f"accepted input format for {datatype} datatype. " + f"Please use one of {CONVERT_FORMATS[datatype]} " + f"for {datatype} datatype." + ) # raise error when trying to copy files (copyfile_*) into same location # (src == dest, see https://github.com/mne-tools/mne-bids/issues/867) - if bids_path.fpath.exists() and not convert and \ - bids_path.fpath.as_posix() == Path(raw_fname).as_posix(): + if ( + bids_path.fpath.exists() + and not convert + and bids_path.fpath.as_posix() == Path(raw_fname).as_posix() + ): raise FileExistsError( f'Desired output BIDSPath ("{bids_path.fpath}") is the source' - ' file. Please pass a different output BIDSPath, or set' - ' `format` to something other than "auto".') + " file. Please pass a different output BIDSPath, or set" + ' `format` to something other than "auto".' + ) # otherwise if the BIDSPath currently exists, check if we # would like to overwrite the existing dataset @@ -1965,25 +2078,31 @@ def write_raw_bids( bids_path.fpath.unlink() else: raise FileExistsError( - f'"{bids_path.fpath}" already exists. ' - 'Please set overwrite to True.') + f'"{bids_path.fpath}" already exists. ' "Please set overwrite to True." + ) # File saving branching logic if convert: - if bids_path.datatype == 'meg': + if bids_path.datatype == "meg": _write_raw_fif( - raw, (op.join(data_path, bids_path.basename) - if ext == '.pdf' else bids_path.fpath)) - elif bids_path.datatype in ['eeg', 'ieeg'] and format == 'EDF': - warn('Converting data files to EDF format') + raw, + ( + op.join(data_path, bids_path.basename) + if ext == ".pdf" + else bids_path.fpath + ), + ) + elif bids_path.datatype in ["eeg", "ieeg"] and format == "EDF": + warn("Converting data files to EDF format") _write_raw_edf(raw, bids_path.fpath, overwrite=overwrite) else: - warn('Converting data files to BrainVision format') - bids_path.update(suffix=bids_path.datatype, extension='.vhdr') + warn("Converting data files to BrainVision format") + bids_path.update(suffix=bids_path.datatype, extension=".vhdr") # XXX Should we write durations here too? - _write_raw_brainvision(raw, bids_path.fpath, events=events_array, - overwrite=overwrite) - elif ext == '.fif': + _write_raw_brainvision( + raw, bids_path.fpath, events=events_array, overwrite=overwrite + ) + elif ext == ".fif": if symlink: link_target = Path(raw.filenames[0]) link_path = bids_path.fpath @@ -1991,41 +2110,52 @@ def write_raw_bids( else: _write_raw_fif(raw, bids_path) # CTF data is saved and renamed in a directory - elif ext == '.ds': + elif ext == ".ds": copyfile_ctf(raw_fname, bids_path) # BrainVision is multifile, copy over all of them and fix pointers - elif ext == '.vhdr': + elif ext == ".vhdr": copyfile_brainvision(raw_fname, bids_path, anonymize=anonymize) - elif ext in ['.edf', '.EDF', '.bdf', '.BDF']: + elif ext in [".edf", ".EDF", ".bdf", ".BDF"]: if anonymize is not None: - warn("EDF/EDF+/BDF files contain two fields for recording dates." - "Due to file format limitations, one of these fields only " - "supports 2-digit years. The date for that field will be " - "set to 85 (i.e., 1985), the earliest possible date. " - "The true anonymized date is stored in the scans.tsv file.") + warn( + "EDF/EDF+/BDF files contain two fields for recording dates." + "Due to file format limitations, one of these fields only " + "supports 2-digit years. The date for that field will be " + "set to 85 (i.e., 1985), the earliest possible date. " + "The true anonymized date is stored in the scans.tsv file." + ) copyfile_edf(raw_fname, bids_path, anonymize=anonymize) # EEGLAB .set might be accompanied by a .fdt - find out and copy it too - elif ext == '.set': + elif ext == ".set": copyfile_eeglab(raw_fname, bids_path) - elif ext == '.pdf': + elif ext == ".pdf": raw_dir = op.join(data_path, op.splitext(bids_path.basename)[0]) _mkdir_p(raw_dir) copyfile_bti(raw_orig, raw_dir) - elif ext in ['.con', '.sqd']: - copyfile_kit(raw_fname, bids_path.fpath, bids_path.subject, - bids_path.session, bids_path.task, bids_path.run, - raw._init_kwargs) + elif ext in [".con", ".sqd"]: + copyfile_kit( + raw_fname, + bids_path.fpath, + bids_path.subject, + bids_path.session, + bids_path.task, + bids_path.run, + raw._init_kwargs, + ) else: # ext may be .snirf shutil.copyfile(raw_fname, bids_path) # write to the scans.tsv file the output file written scan_relative_fpath = op.join(bids_path.datatype, bids_path.fpath.name) - _scans_tsv(raw, raw_fname=scan_relative_fpath, - fname=scans_path.fpath, keep_source=keep_source, - overwrite=overwrite) - logger.info(f'Wrote {scans_path.fpath} entry with ' - f'{scan_relative_fpath}.') + _scans_tsv( + raw, + raw_fname=scan_relative_fpath, + fname=scans_path.fpath, + keep_source=keep_source, + overwrite=overwrite, + ) + logger.info(f"Wrote {scans_path.fpath} entry with " f"{scan_relative_fpath}.") return bids_path @@ -2066,17 +2196,19 @@ def get_anat_landmarks(image, info, trans, fs_subject, fs_subjects_dir=None): landmarks : mne.channels.DigMontage A montage with the landmarks in MRI voxel space. """ - nib = _import_nibabel('get anatomical landmarks') - coords_dict, coord_frame = _get_fid_coords(info['dig']) + nib = _import_nibabel("get anatomical landmarks") + coords_dict, coord_frame = _get_fid_coords(info["dig"]) if coord_frame != FIFF.FIFFV_COORD_HEAD: - raise ValueError('Fiducial coordinates in `info` must be in ' - f'the head coordinate frame, got {coord_frame}') - landmarks = np.asarray((coords_dict['lpa'], - coords_dict['nasion'], - coords_dict['rpa'])) + raise ValueError( + "Fiducial coordinates in `info` must be in " + f"the head coordinate frame, got {coord_frame}" + ) + landmarks = np.asarray( + (coords_dict["lpa"], coords_dict["nasion"], coords_dict["rpa"]) + ) # get trans and ensure it is from head to MRI - trans, _ = _get_trans(trans, fro='head', to='mri') + trans, _ = _get_trans(trans, fro="head", to="mri") landmarks = _meg_landmarks_to_mri_landmarks(landmarks, trans) # Get FS T1 image in MGH format @@ -2091,13 +2223,13 @@ def get_anat_landmarks(image, info, trans, fs_subject, fs_subjects_dir=None): # Input image: go to T1 voxel space from T1 scanner space if isinstance(image, BIDSPath): image = image.fpath - img_nii = _load_image(image, name='image') + img_nii = _load_image(image, name="image") img_mgh = nib.MGHImage(img_nii.dataobj, img_nii.affine) landmarks = _mri_scanner_ras_to_mri_voxels(landmarks, img_mgh) landmarks = mne.channels.make_dig_montage( - lpa=landmarks[0], nasion=landmarks[1], rpa=landmarks[2], - coord_frame='mri_voxel') + lpa=landmarks[0], nasion=landmarks[1], rpa=landmarks[2], coord_frame="mri_voxel" + ) return landmarks @@ -2107,55 +2239,58 @@ def _get_t1w_mgh(fs_subject, fs_subjects_dir): import nibabel as nib fs_subjects_dir = get_subjects_dir(fs_subjects_dir, raise_error=True) - t1_fname = Path(fs_subjects_dir) / fs_subject / 'mri' / 'T1.mgz' + t1_fname = Path(fs_subjects_dir) / fs_subject / "mri" / "T1.mgz" if not t1_fname.exists(): - raise ValueError('Freesurfer recon-all subject folder ' - 'is incorrect or improperly formatted, ' - f'got {Path(fs_subjects_dir) / fs_subject}') - t1w_img = _load_image(str(t1_fname), name='T1.mgz') + raise ValueError( + "Freesurfer recon-all subject folder " + "is incorrect or improperly formatted, " + f"got {Path(fs_subjects_dir) / fs_subject}" + ) + t1w_img = _load_image(str(t1_fname), name="T1.mgz") t1w_mgh = nib.MGHImage(t1w_img.dataobj, t1w_img.affine) return t1w_mgh -def _get_landmarks(landmarks, image_nii, kind=''): +def _get_landmarks(landmarks, image_nii, kind=""): import nibabel as nib + if isinstance(landmarks, (str, Path)): landmarks, coord_frame = read_fiducials(landmarks) - landmarks = np.array([landmark['r'] for landmark in - landmarks], dtype=float) # unpack + landmarks = np.array( + [landmark["r"] for landmark in landmarks], dtype=float + ) # unpack else: # Prepare to write the sidecar JSON, extract MEG landmarks coords_dict, coord_frame = _get_fid_coords(landmarks.dig) - landmarks = np.asarray((coords_dict['lpa'], - coords_dict['nasion'], - coords_dict['rpa'])) + landmarks = np.asarray( + (coords_dict["lpa"], coords_dict["nasion"], coords_dict["rpa"]) + ) # check if coord frame is supported - if coord_frame not in (FIFF.FIFFV_MNE_COORD_MRI_VOXEL, - FIFF.FIFFV_MNE_COORD_RAS): - raise ValueError(f'Coordinate frame not supported: {coord_frame}') + if coord_frame not in (FIFF.FIFFV_MNE_COORD_MRI_VOXEL, FIFF.FIFFV_MNE_COORD_RAS): + raise ValueError(f"Coordinate frame not supported: {coord_frame}") # convert to voxels from scanner RAS to voxels if coord_frame == FIFF.FIFFV_MNE_COORD_RAS: # Make MGH image for header properties img_mgh = nib.MGHImage(image_nii.dataobj, image_nii.affine) - landmarks = _mri_scanner_ras_to_mri_voxels( - landmarks * 1e3, img_mgh) + landmarks = _mri_scanner_ras_to_mri_voxels(landmarks * 1e3, img_mgh) suffix = f"_{kind}" if kind else "" # Write sidecar.json img_json = { - 'LPA' + suffix: list(landmarks[0, :]), - 'NAS' + suffix: list(landmarks[1, :]), - 'RPA' + suffix: list(landmarks[2, :]) + "LPA" + suffix: list(landmarks[0, :]), + "NAS" + suffix: list(landmarks[1, :]), + "RPA" + suffix: list(landmarks[2, :]), } return img_json, landmarks @verbose -def write_anat(image, bids_path, landmarks=None, deface=False, overwrite=False, - verbose=None): +def write_anat( + image, bids_path, landmarks=None, deface=False, overwrite=False, verbose=None +): """Put anatomical MRI data into a BIDS format. Given an MRI scan, format and store the MR data according to BIDS in the @@ -2220,18 +2355,20 @@ def write_anat(image, bids_path, landmarks=None, deface=False, overwrite=False, bids_path : BIDSPath Path to the written MRI data. """ - nib = _import_nibabel('write anatomical MRI data') + nib = _import_nibabel("write anatomical MRI data") write_sidecar = landmarks is not None if deface and landmarks is None: - raise ValueError('`landmarks` must be provided to deface the image') + raise ValueError("`landmarks` must be provided to deface the image") # Check if the root is available if bids_path.root is None: - raise ValueError('The root of the "bids_path" must be set. ' - 'Please use `bids_path.update(root="")` ' - 'to set the root of the BIDS folder to read.') + raise ValueError( + 'The root of the "bids_path" must be set. ' + 'Please use `bids_path.update(root="")` ' + "to set the root of the BIDS folder to read." + ) # create a copy bids_path = bids_path.copy() @@ -2243,14 +2380,14 @@ def write_anat(image, bids_path, landmarks=None, deface=False, overwrite=False, # this file is anat if bids_path.datatype is None: - bids_path.update(datatype='anat') + bids_path.update(datatype="anat") # default to T1w if not bids_path.suffix: - bids_path.update(suffix='T1w') + bids_path.update(suffix="T1w") # data is compressed Nifti - bids_path.update(extension='.nii.gz') + bids_path.update(extension=".nii.gz") # create the directory for the MRI data bids_path.directory.mkdir(exist_ok=True, parents=True) @@ -2264,15 +2401,14 @@ def write_anat(image, bids_path, landmarks=None, deface=False, overwrite=False, landmarks = {"": landmarks} img_json = {} for kind, this_landmarks in landmarks.items(): - img_json.update( - _get_landmarks(this_landmarks, image_nii, kind=kind)[0] - ) - img_json = {'AnatomicalLandmarkCoordinates': img_json} - fname = bids_path.copy().update(extension='.json') + img_json.update(_get_landmarks(this_landmarks, image_nii, kind=kind)[0]) + img_json = {"AnatomicalLandmarkCoordinates": img_json} + fname = bids_path.copy().update(extension=".json") if op.isfile(fname) and not overwrite: - raise IOError('Wanted to write a file but it already exists and ' - '`overwrite` is set to False. File: "{}"' - .format(fname)) + raise IOError( + "Wanted to write a file but it already exists and " + '`overwrite` is set to False. File: "{}"'.format(fname) + ) _write_json(fname, img_json, overwrite) if deface: @@ -2288,8 +2424,10 @@ def write_anat(image, bids_path, landmarks=None, deface=False, overwrite=False, if overwrite: os.remove(bids_path) else: - raise IOError(f'Wanted to write a file but it already exists and ' - f'`overwrite` is set to False. File: "{bids_path}"') + raise IOError( + f"Wanted to write a file but it already exists and " + f'`overwrite` is set to False. File: "{bids_path}"' + ) nib.save(image_nii, bids_path.fpath) @@ -2297,8 +2435,7 @@ def write_anat(image, bids_path, landmarks=None, deface=False, overwrite=False, @verbose -def mark_channels(bids_path, *, ch_names, status, descriptions=None, - verbose=None): +def mark_channels(bids_path, *, ch_names, status, descriptions=None, verbose=None): """Update status and description of channels in an existing BIDS dataset. Parameters @@ -2348,23 +2485,28 @@ def mark_channels(bids_path, *, ch_names, status, descriptions=None, ... verbose=False) """ if not isinstance(bids_path, BIDSPath): - raise RuntimeError('"bids_path" must be a BIDSPath object. Please ' - 'instantiate using mne_bids.BIDSPath().') + raise RuntimeError( + '"bids_path" must be a BIDSPath object. Please ' + "instantiate using mne_bids.BIDSPath()." + ) if bids_path.root is None: - raise ValueError('The root of the "bids_path" must be set. ' - 'Please use `bids_path.update(root="")` ' - 'to set the root of the BIDS folder to read.') + raise ValueError( + 'The root of the "bids_path" must be set. ' + 'Please use `bids_path.update(root="")` ' + "to set the root of the BIDS folder to read." + ) # Read sidecar file - channels_fname = _find_matching_sidecar(bids_path, suffix='channels', - extension='.tsv') + channels_fname = _find_matching_sidecar( + bids_path, suffix="channels", extension=".tsv" + ) tsv_data = _from_tsv(channels_fname) # if an empty list is passed in, then these are the entire list # of channels if ch_names == []: - ch_names = tsv_data['name'] + ch_names = tsv_data["name"] elif isinstance(ch_names, str): ch_names = [ch_names] @@ -2379,41 +2521,45 @@ def mark_channels(bids_path, *, ch_names, status, descriptions=None, status = [status] * len(ch_names) if len(ch_names) != len(descriptions): - raise ValueError('Number of channels and descriptions must match.') + raise ValueError("Number of channels and descriptions must match.") if len(status) != len(ch_names): - raise ValueError(f'If status is a list of {len(status)} statuses, ' - f'then it must have the same length as ch_names ' - f'({len(ch_names)}).') + raise ValueError( + f"If status is a list of {len(status)} statuses, " + f"then it must have the same length as ch_names " + f"({len(ch_names)})." + ) - if not all(status in ['good', 'bad'] for status in status): - raise ValueError('Setting the status of a channel must only be ' - '"good", or "bad".') + if not all(status in ["good", "bad"] for status in status): + raise ValueError( + "Setting the status of a channel must only be " '"good", or "bad".' + ) # Read sidecar and create required columns if they do not exist. - if 'status' not in tsv_data: + if "status" not in tsv_data: logger.info('No "status" column found in input file. Creating.') - tsv_data['status'] = ['good'] * len(tsv_data['name']) + tsv_data["status"] = ["good"] * len(tsv_data["name"]) - if 'status_description' not in tsv_data: - logger.info('No "status_description" column found in input file. ' - 'Creating.') - tsv_data['status_description'] = ['n/a'] * len(tsv_data['name']) + if "status_description" not in tsv_data: + logger.info('No "status_description" column found in input file. ' "Creating.") + tsv_data["status_description"] = ["n/a"] * len(tsv_data["name"]) # Now actually mark the user-requested channels as bad. for ch_name, status_, description in zip(ch_names, status, descriptions): - if ch_name not in tsv_data['name']: - raise ValueError(f'Channel {ch_name} not found in dataset!') - - idx = tsv_data['name'].index(ch_name) - logger.info(f'Processing channel {ch_name}:\n' - f' status: bad\n' - f' description: {description}') - tsv_data['status'][idx] = status_ + if ch_name not in tsv_data["name"]: + raise ValueError(f"Channel {ch_name} not found in dataset!") + + idx = tsv_data["name"].index(ch_name) + logger.info( + f"Processing channel {ch_name}:\n" + f" status: bad\n" + f" description: {description}" + ) + tsv_data["status"][idx] = status_ # only write if the description was passed in if description is not None: - tsv_data['status_description'][idx] = description + tsv_data["status_description"][idx] = description _write_tsv(channels_fname, tsv_data, overwrite=True) @@ -2444,36 +2590,50 @@ def write_meg_calibration(calibration, bids_path, *, verbose=None): Writing fine-calibration file to ...sub-01_ses-test_acq-calibration_meg.dat... """ # noqa: E501 if bids_path.root is None or bids_path.subject is None: - raise ValueError('bids_path must have root and subject set.') - if bids_path.datatype not in (None, 'meg'): - raise ValueError('Can only write fine-calibration information for MEG ' - 'datasets.') - - _validate_type(calibration, types=('path-like', dict), - item_name='calibration', - type_name='path or dictionary') - - if (isinstance(calibration, dict) and - ('ch_names' not in calibration or - 'locs' not in calibration or - 'imb_cals' not in calibration)): - raise ValueError('The dictionary you passed does not appear to be a ' - 'proper fine-calibration dict. Please only pass the ' - 'output of ' - 'mne.preprocessing.read_fine_calibration(), or a ' - 'filename.') + raise ValueError("bids_path must have root and subject set.") + if bids_path.datatype not in (None, "meg"): + raise ValueError( + "Can only write fine-calibration information for MEG " "datasets." + ) + + _validate_type( + calibration, + types=("path-like", dict), + item_name="calibration", + type_name="path or dictionary", + ) + + if isinstance(calibration, dict) and ( + "ch_names" not in calibration + or "locs" not in calibration + or "imb_cals" not in calibration + ): + raise ValueError( + "The dictionary you passed does not appear to be a " + "proper fine-calibration dict. Please only pass the " + "output of " + "mne.preprocessing.read_fine_calibration(), or a " + "filename." + ) if not isinstance(calibration, dict): calibration = mne.preprocessing.read_fine_calibration(calibration) - out_path = BIDSPath(subject=bids_path.subject, session=bids_path.session, - acquisition='calibration', suffix='meg', - extension='.dat', datatype='meg', root=bids_path.root) + out_path = BIDSPath( + subject=bids_path.subject, + session=bids_path.session, + acquisition="calibration", + suffix="meg", + extension=".dat", + datatype="meg", + root=bids_path.root, + ) - logger.info(f'Writing fine-calibration file to {out_path}') + logger.info(f"Writing fine-calibration file to {out_path}") out_path.mkdir() - mne.preprocessing.write_fine_calibration(fname=str(out_path), - calibration=calibration) + mne.preprocessing.write_fine_calibration( + fname=str(out_path), calibration=calibration + ) @verbose @@ -2500,30 +2660,34 @@ def write_meg_crosstalk(fname, bids_path, verbose=None): Writing crosstalk file to ...sub-01_ses-test_acq-crosstalk_meg.fif """ # noqa: E501 if bids_path.root is None or bids_path.subject is None: - raise ValueError('bids_path must have root and subject set.') - if bids_path.datatype not in (None, 'meg'): - raise ValueError('Can only write fine-calibration information for MEG ' - 'datasets.') + raise ValueError("bids_path must have root and subject set.") + if bids_path.datatype not in (None, "meg"): + raise ValueError( + "Can only write fine-calibration information for MEG " "datasets." + ) - _validate_type(fname, types=('path-like',), item_name='fname') + _validate_type(fname, types=("path-like",), item_name="fname") # MNE doesn't have public reader and writer functions for crosstalk data, # so just copy the original file. Use shutil.copyfile() to only copy file # contents, but not metadata & permissions. - out_path = BIDSPath(subject=bids_path.subject, session=bids_path.session, - acquisition='crosstalk', suffix='meg', - extension='.fif', datatype='meg', root=bids_path.root) + out_path = BIDSPath( + subject=bids_path.subject, + session=bids_path.session, + acquisition="crosstalk", + suffix="meg", + extension=".fif", + datatype="meg", + root=bids_path.root, + ) - logger.info(f'Writing crosstalk file to {out_path}') + logger.info(f"Writing crosstalk file to {out_path}") out_path.mkdir() shutil.copyfile(src=fname, dst=str(out_path)) def _get_daysback( - *, - bids_paths: List[BIDSPath], - rng: np.random.Generator, - show_progress_thresh: int + *, bids_paths: List[BIDSPath], rng: np.random.Generator, show_progress_thresh: int ) -> int: """Try to find a suitable "daysback" for anonymization. @@ -2552,15 +2716,16 @@ def _get_daysback( continue elif session is None: # Keep any one run for each data type - if datatype not in [p.datatype - for p in bids_paths_for_daysback[subject]]: + if datatype not in [p.datatype for p in bids_paths_for_daysback[subject]]: bids_paths_for_daysback[subject].append(bids_path) elif session is not None: # Keep any one run for each data type and session if all( - [session != p.session - for p in bids_paths_for_daysback[subject] - if datatype == p.datatype] + [ + session != p.session + for p in bids_paths_for_daysback[subject] + if datatype == p.datatype + ] ): bids_paths_for_daysback[subject].append(bids_path) @@ -2570,52 +2735,56 @@ def _get_daysback( if len(bids_paths_to_consider) >= show_progress_thresh: raws = [] - logger.info('\n') + logger.info("\n") for bids_path in ProgressBar( - iterable=bids_paths_to_consider, mesg='Determining daysback' + iterable=bids_paths_to_consider, mesg="Determining daysback" ): - raw = read_raw_bids(bids_path=bids_path, verbose='error') + raw = read_raw_bids(bids_path=bids_path, verbose="error") raws.append(raw) else: - raws = [read_raw_bids(bids_path=bp, verbose='error') - for bp in bids_paths_to_consider] + raws = [ + read_raw_bids(bids_path=bp, verbose="error") + for bp in bids_paths_to_consider + ] - daysback_min, daysback_max = get_anonymization_daysback( - raws=raws, verbose=False - ) + daysback_min, daysback_max = get_anonymization_daysback(raws=raws, verbose=False) # Pick one randomly - daysback = rng.choice( - np.arange(daysback_min, daysback_max + 1, dtype=int) - ) + daysback = rng.choice(np.arange(daysback_min, daysback_max + 1, dtype=int)) daysback = int(daysback) return daysback def _check_crosstalk_path(bids_path: BIDSPath) -> bool: is_crosstalk_path = ( - bids_path.datatype == 'meg' and - bids_path.suffix == 'meg' and - bids_path.acquisition == 'crosstalk' and - bids_path.extension == '.fif' + bids_path.datatype == "meg" + and bids_path.suffix == "meg" + and bids_path.acquisition == "crosstalk" + and bids_path.extension == ".fif" ) return is_crosstalk_path def _check_finecal_path(bids_path: BIDSPath) -> bool: is_finecal_path = ( - bids_path.datatype == 'meg' and - bids_path.suffix == 'meg' and - bids_path.acquisition == 'calibration' and - bids_path.extension == '.dat' + bids_path.datatype == "meg" + and bids_path.suffix == "meg" + and bids_path.acquisition == "calibration" + and bids_path.extension == ".dat" ) return is_finecal_path @verbose -def anonymize_dataset(bids_root_in, bids_root_out, daysback='auto', - subject_mapping='auto', datatypes=None, - random_state=None, verbose=None): +def anonymize_dataset( + bids_root_in, + bids_root_out, + daysback="auto", + subject_mapping="auto", + datatypes=None, + random_state=None, + verbose=None, +): """Anonymize a BIDS dataset. This function creates a copy of a BIDS dataset, and tries to remove all @@ -2652,28 +2821,28 @@ def anonymize_dataset(bids_root_in, bids_root_out, daysback='auto', if not bids_root_in.is_dir(): raise FileNotFoundError( - f'The specified input directory does not exist: {bids_root_in}' + f"The specified input directory does not exist: {bids_root_in}" ) if bids_root_in == bids_root_out: - raise ValueError('Input and output directory must differ') + raise ValueError("Input and output directory must differ") if bids_root_out.exists(): raise FileExistsError( - f'The specified output directory already exists. Please remove ' - f'it to perform anonymization: {bids_root_out}' + f"The specified output directory already exists. Please remove " + f"it to perform anonymization: {bids_root_out}" ) if not isinstance(subject_mapping, dict): - participants_tsv = _from_tsv(bids_root_in / 'participants.tsv') + participants_tsv = _from_tsv(bids_root_in / "participants.tsv") participants_in = [ - participant.replace('sub-', '') - for participant in participants_tsv['participant_id'] + participant.replace("sub-", "") + for participant in participants_tsv["participant_id"] ] - if subject_mapping == 'auto': + if subject_mapping == "auto": # Don't change `emptyroom` subject ID - if 'emptyroom' in participants_in: + if "emptyroom" in participants_in: n_participants = len(participants_in) - 1 else: n_participants = len(participants_in) @@ -2687,11 +2856,11 @@ def anonymize_dataset(bids_root_in, bids_root_out, daysback='auto', participants_out = [str(p).zfill(id_len) for p in participants_out] - if 'emptyroom' in participants_in: + if "emptyroom" in participants_in: # Append empty-room at the end - participants_in.remove('emptyroom') - participants_in.append('emptyroom') - participants_out.append('emptyroom') + participants_in.remove("emptyroom") + participants_in.append("emptyroom") + participants_out.append("emptyroom") assert len(participants_in) == len(participants_out) subject_mapping = dict(zip(participants_in, participants_out)) @@ -2701,25 +2870,24 @@ def anonymize_dataset(bids_root_in, bids_root_out, daysback='auto', # identity mapping subject_mapping = dict(zip(participants_in, participants_in)) - if subject_mapping not in ('auto', None): + if subject_mapping not in ("auto", None): # Make sure we're mapping to strings for k, v in subject_mapping.items(): subject_mapping[k] = str(v) - if ('emptyroom' in subject_mapping and - subject_mapping['emptyroom'] != 'emptyroom'): + if "emptyroom" in subject_mapping and subject_mapping["emptyroom"] != "emptyroom": warn( f'You requested to change the "emptyroom" subject ID ' f'(to {subject_mapping["emptyroom"]}). It is not ' - f'recommended to do this!' + f"recommended to do this!" ) - allowed_datatypes = ('meg', 'eeg', 'ieeg', 'anat') - allowed_suffixes = ('meg', 'eeg', 'ieeg', 'T1w', 'FLASH') + allowed_datatypes = ("meg", "eeg", "ieeg", "anat") + allowed_suffixes = ("meg", "eeg", "ieeg", "T1w", "FLASH") allowed_extensions = [] for v in ALLOWED_DATATYPE_EXTENSIONS.values(): allowed_extensions.extend(v) - allowed_extensions.extend(['.nii', '.nii.gz']) + allowed_extensions.extend([".nii", ".nii.gz"]) if isinstance(datatypes, str): requested_datatypes = [datatypes] @@ -2730,34 +2898,30 @@ def anonymize_dataset(bids_root_in, bids_root_out, daysback='auto', for datatype in requested_datatypes: if datatype not in allowed_datatypes: - raise ValueError(f'Unsupported data type: {datatype}') + raise ValueError(f"Unsupported data type: {datatype}") del datatype, datatypes # Assemble list of candidate files for conversion - matches = bids_root_in.glob('sub-*/**/sub-*.*') + matches = bids_root_in.glob("sub-*/**/sub-*.*") bids_paths_in = [] for f in matches: - bids_path = get_bids_path_from_fname(f, verbose='error') - if ( - bids_path.datatype in requested_datatypes and + bids_path = get_bids_path_from_fname(f, verbose="error") + if bids_path.datatype in requested_datatypes and ( ( - ( - bids_path.suffix in allowed_suffixes and - bids_path.extension in allowed_extensions - ) or ( - _check_finecal_path(bids_path) or - _check_crosstalk_path(bids_path) - ) + bids_path.suffix in allowed_suffixes + and bids_path.extension in allowed_extensions ) + or (_check_finecal_path(bids_path) or _check_crosstalk_path(bids_path)) ): bids_paths_in.append(bids_path) # Ensure we convert empty-room recordings first, as we'll want to pass # their anonymized path when writing the associated experimental recordings - if 'meg' in requested_datatypes: + if "meg" in requested_datatypes: bids_paths_in_er_only = [ - bp for bp in bids_paths_in - if bp.subject == 'emptyroom' and bp.task == 'noise' + bp + for bp in bids_paths_in + if bp.subject == "emptyroom" and bp.task == "noise" ] bids_paths_in_er_first = bids_paths_in_er_only.copy() for bp in bids_paths_in: @@ -2767,16 +2931,17 @@ def anonymize_dataset(bids_root_in, bids_root_out, daysback='auto', bids_paths_in = bids_paths_in_er_first del bids_paths_in_er_first, bids_paths_in_er_only - logger.info('\nAnonymizing BIDS dataset') - if daysback == 'auto': + logger.info("\nAnonymizing BIDS dataset") + if daysback == "auto": # Find recordings that can be read with MNE-Python to extract the # recording dates bids_paths = [ - bp for bp in bids_paths_in + bp + for bp in bids_paths_in if ( - bp.datatype != 'anat' and - not _check_crosstalk_path(bp) and - not _check_finecal_path(bp) + bp.datatype != "anat" + and not _check_crosstalk_path(bp) + and not _check_finecal_path(bp) ) ] if bids_paths: @@ -2792,12 +2957,11 @@ def anonymize_dataset(bids_root_in, bids_root_out, daysback='auto', # Check subject_mapping subjects_in_dataset = set([bp.subject for bp in bids_paths_in]) subjects_missing_mapping_keys = [ - s for s in subjects_in_dataset - if s not in subject_mapping + s for s in subjects_in_dataset if s not in subject_mapping ] if subjects_missing_mapping_keys: raise IndexError( - f'The subject_mapping dictionary does not contain an entry for ' + f"The subject_mapping dictionary does not contain an entry for " f'subject ID: {", ".join(subjects_missing_mapping_keys)}' ) @@ -2808,37 +2972,29 @@ def anonymize_dataset(bids_root_in, bids_root_out, daysback='auto', if non_unique_vals_idx.size > 0: keys = np.array(list(subject_mapping.values()))[non_unique_vals_idx] raise ValueError( - f'The subject_mapping dictionary contains duplicated anonymized ' + f"The subject_mapping dictionary contains duplicated anonymized " f'subjet IDs: {", ".join(keys)}' ) # Produce some logging output - msg = ( - f'\n' - f' Input: {bids_root_in}\n' - f' Output: {bids_root_out}\n' - f'\n' - ) + msg = f"\n" f" Input: {bids_root_in}\n" f" Output: {bids_root_out}\n" f"\n" if daysback is None: - msg += 'Not shifting recording dates (found anatomical scans only).\n' + msg += "Not shifting recording dates (found anatomical scans only).\n" else: msg += ( - f'Shifting recording dates by {daysback} days ' - f'({round(daysback / 365, 1)} years).\n' + f"Shifting recording dates by {daysback} days " + f"({round(daysback / 365, 1)} years).\n" ) - msg += 'Using the following subject ID anonymization mapping:\n\n' + msg += "Using the following subject ID anonymization mapping:\n\n" for orig_sub, anon_sub in subject_mapping.items(): - msg += f' sub-{orig_sub} → sub-{anon_sub}\n' + msg += f" sub-{orig_sub} → sub-{anon_sub}\n" logger.info(msg) del msg # Actual processing starts here - for bp_in in ProgressBar(iterable=bids_paths_in, mesg='Anonymizing'): - bp_out = ( - bp_in.copy().update( - subject=subject_mapping[bp_in.subject], - root=bids_root_out - ) + for bp_in in ProgressBar(iterable=bids_paths_in, mesg="Anonymizing"): + bp_out = bp_in.copy().update( + subject=subject_mapping[bp_in.subject], root=bids_root_out ) bp_er_in = bp_er_out = None @@ -2846,18 +3002,16 @@ def anonymize_dataset(bids_root_in, bids_root_out, daysback='auto', # Handle empty-room anonymization: we need to change the session to # match the new date if ( - bp_in.datatype == 'meg' and - 'emptyroom' in subject_mapping and - not (_check_finecal_path(bp_in) or _check_crosstalk_path(bp_in)) + bp_in.datatype == "meg" + and "emptyroom" in subject_mapping + and not (_check_finecal_path(bp_in) or _check_crosstalk_path(bp_in)) ): - if bp_in.subject == 'emptyroom': + if bp_in.subject == "emptyroom": er_session_in = bp_in.session else: # An experimental recording, so we need to find the associated # empty-room - bp_er_in = bp_in.find_empty_room( - use_sidecar_only=True, verbose='error' - ) + bp_er_in = bp_in.find_empty_room(use_sidecar_only=True, verbose="error") if bp_er_in is None: er_session_in = None else: @@ -2865,86 +3019,73 @@ def anonymize_dataset(bids_root_in, bids_root_out, daysback='auto', # Update the session entity if er_session_in is not None: - date_fmt = '%Y%m%d' - er_session_out = ( - datetime.strptime(er_session_in, date_fmt) - - timedelta(days=daysback) + date_fmt = "%Y%m%d" + er_session_out = datetime.strptime(er_session_in, date_fmt) - timedelta( + days=daysback ) er_session_out = datetime.strftime(er_session_out, date_fmt) - if bp_in.subject == 'emptyroom': + if bp_in.subject == "emptyroom": bp_out.session = er_session_out assert bp_er_out is None else: bp_er_out = bp_er_in.copy().update( - subject=subject_mapping['emptyroom'], + subject=subject_mapping["emptyroom"], session=er_session_out, - root=bp_out.root + root=bp_out.root, ) - if bp_in.datatype == 'anat': - bp_anat_json = bp_in.copy().update(extension='.json') - anat_json = json.loads( - bp_anat_json.fpath.read_text(encoding='utf-8') - ) - landmarks = anat_json['AnatomicalLandmarkCoordinates'] + if bp_in.datatype == "anat": + bp_anat_json = bp_in.copy().update(extension=".json") + anat_json = json.loads(bp_anat_json.fpath.read_text(encoding="utf-8")) + landmarks = anat_json["AnatomicalLandmarkCoordinates"] landmarks_dig = mne.channels.make_dig_montage( - nasion=landmarks['NAS'], - lpa=landmarks['LPA'], - rpa=landmarks['RPA'], - coord_frame='mri_voxel' + nasion=landmarks["NAS"], + lpa=landmarks["LPA"], + rpa=landmarks["RPA"], + coord_frame="mri_voxel", ) write_anat( image=bp_in.fpath, bids_path=bp_out, landmarks=landmarks_dig, deface=True, - verbose='error' + verbose="error", ) elif _check_crosstalk_path(bp_in): - write_meg_crosstalk( - fname=bp_in.fpath, - bids_path=bp_out, - verbose='error' - ) + write_meg_crosstalk(fname=bp_in.fpath, bids_path=bp_out, verbose="error") elif _check_finecal_path(bp_in): write_meg_calibration( - calibration=bp_in.fpath, - bids_path=bp_out, - verbose='error' + calibration=bp_in.fpath, bids_path=bp_out, verbose="error" ) else: - raw = read_raw_bids(bids_path=bp_in, verbose='error') + raw = read_raw_bids(bids_path=bp_in, verbose="error") write_raw_bids( raw=raw, bids_path=bp_out, anonymize={ - 'daysback': daysback, - 'keep_his': False, - 'keep_source': False, + "daysback": daysback, + "keep_his": False, + "keep_source": False, }, empty_room=bp_er_out, - verbose='error' + verbose="error", ) # Enrich sidecars - bp_in_json = bp_in.copy().update(extension='.json') - bp_out_json = bp_out.copy().update(extension='.json') - bp_in_events = bp_in.copy().update(suffix='events', extension='.tsv') - bp_out_events = bp_out.copy().update(suffix='events', extension='.tsv') + bp_in_json = bp_in.copy().update(extension=".json") + bp_out_json = bp_out.copy().update(extension=".json") + bp_in_events = bp_in.copy().update(suffix="events", extension=".tsv") + bp_out_events = bp_out.copy().update(suffix="events", extension=".tsv") # Enrich the JSON file if bp_in_json.fpath.exists(): - json_in = json.loads( - bp_in_json.fpath.read_text(encoding='utf-8') - ) + json_in = json.loads(bp_in_json.fpath.read_text(encoding="utf-8")) else: json_in = dict() if bp_out_json.fpath.exists(): - json_out = json.loads( - bp_out_json.fpath.read_text(encoding='utf-8') - ) + json_out = json.loads(bp_out_json.fpath.read_text(encoding="utf-8")) else: json_out = dict() @@ -2959,9 +3100,7 @@ def anonymize_dataset(bids_root_in, bids_root_out, daysback='auto', if json_updates: bp_out_json.fpath.touch(exist_ok=True) update_sidecar_json( - bids_path=bp_out_json, - entries=json_updates, - verbose='error' + bids_path=bp_out_json, entries=json_updates, verbose="error" ) # Transfer trigger codes from original *_events.tsv file @@ -2970,21 +3109,21 @@ def anonymize_dataset(bids_root_in, bids_root_out, daysback='auto', events_tsv_in = _from_tsv(bp_in_events) events_tsv_out = _from_tsv(bp_out_events) - assert events_tsv_in['trial_type'] == events_tsv_out['trial_type'] - events_tsv_out['value'] = events_tsv_in['value'] + assert events_tsv_in["trial_type"] == events_tsv_out["trial_type"] + events_tsv_out["value"] = events_tsv_in["value"] _write_tsv( fname=bp_out_events.fpath, dictionary=events_tsv_out, overwrite=True, - verbose='error' + verbose="error", ) # Copy some additional files additional_files = ( - 'README', - 'CHANGES', - 'dataset_description.json', - 'participants.json' + "README", + "CHANGES", + "dataset_description.json", + "participants.json", ) for fname in additional_files: in_path = bids_root_in / fname diff --git a/setup.cfg b/setup.cfg index e629de552..03d24b6f3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -58,8 +58,10 @@ console_scripts = universal = true [flake8] +max-line-length = 88 exclude = __init__.py -ignore = W504,I101,I100,I201 +ignore = I101,I100,I201 +extend-ignore = W503,E203 per-file-ignores = mne_bids/commands/tests/test_*.py:E402 mne_bids/tests/test_*.py:E402 diff --git a/setup.py b/setup.py index 244cf255c..9d8038fff 100644 --- a/setup.py +++ b/setup.py @@ -10,14 +10,14 @@ SETUP_REQUIRES += ["wheel"] if "bdist_wheel" in sys.argv else [] version = None -with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid: +with open(os.path.join("mne_bids", "__init__.py"), "r") as fid: for line in fid: line = line.strip() - if line.startswith('__version__ = '): - version = line.split(' = ')[1].split('#')[0].strip('\'') + if line.startswith("__version__ = "): + version = line.split(" = ")[1].split("#")[0].strip('"') break if version is None: - raise RuntimeError('Could not determine version') + raise RuntimeError("Could not determine version") if __name__ == "__main__": diff --git a/test_requirements.txt b/test_requirements.txt index e4f01681e..8988739af 100644 --- a/test_requirements.txt +++ b/test_requirements.txt @@ -14,3 +14,4 @@ pytest-sugar check-manifest pydocstyle flake8 +black