diff --git a/.github/workflows/contrib.yml b/.github/workflows/contrib.yml
new file mode 100644
index 00000000..5e48a0b0
--- /dev/null
+++ b/.github/workflows/contrib.yml
@@ -0,0 +1,25 @@
+name: Contribution checks
+on: [push, pull_request]
+
+defaults:
+ run:
+ shell: bash
+
+jobs:
+ stable:
+ name: Run ruff
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ submodules: recursive
+ fetch-depth: 0
+ - name: Set up Python 3
+ uses: actions/setup-python@v5
+ with:
+ python-version: 3
+ - name: Lint EddyMotion
+ run: pipx run ruff check --diff
+ - name: Format EddyMotion
+ run: pipx run ruff format --diff
diff --git a/.maint/update_authors.py b/.maint/update_authors.py
index 66448a29..b270be0c 100644
--- a/.maint/update_authors.py
+++ b/.maint/update_authors.py
@@ -1,8 +1,10 @@
#!/usr/bin/env python3
"""Update and sort the creators list of the zenodo record."""
+
+import json
import sys
from pathlib import Path
-import json
+
import click
from fuzzywuzzy import fuzz, process
@@ -36,10 +38,7 @@ def read_md_table(md_text):
retval = []
for line in md_text.splitlines():
if line.strip().startswith("| --- |"):
- keys = (
- k.replace("*", "").strip()
- for k in prev.split("|")
- )
+ keys = (k.replace("*", "").strip() for k in prev.split("|"))
keys = [k.lower() for k in keys if k]
continue
elif not keys:
@@ -60,19 +59,13 @@ def sort_contributors(entries, git_lines, exclude=None, last=None):
last = last or []
sorted_authors = sorted(entries, key=lambda i: i["name"])
- first_last = [
- " ".join(val["name"].split(",")[::-1]).strip() for val in sorted_authors
- ]
- first_last_excl = [
- " ".join(val["name"].split(",")[::-1]).strip() for val in exclude or []
- ]
+ first_last = [" ".join(val["name"].split(",")[::-1]).strip() for val in sorted_authors]
+ first_last_excl = [" ".join(val["name"].split(",")[::-1]).strip() for val in exclude or []]
unmatched = []
author_matches = []
for ele in git_lines:
- matches = process.extract(
- ele, first_last, scorer=fuzz.token_sort_ratio, limit=2
- )
+ matches = process.extract(ele, first_last, scorer=fuzz.token_sort_ratio, limit=2)
# matches is a list [('First match', % Match), ('Second match', % Match)]
if matches[0][1] > 80:
val = sorted_authors[first_last.index(matches[0][0])]
@@ -152,8 +145,9 @@ def cli():
@cli.command()
@click.option("-z", "--zenodo-file", type=click.Path(exists=True), default=".zenodo.json")
@click.option("-m", "--maintainers", type=click.Path(exists=True), default=".maint/MAINTAINERS.md")
-@click.option("-c", "--contributors", type=click.Path(exists=True),
- default=".maint/CONTRIBUTORS.md")
+@click.option(
+ "-c", "--contributors", type=click.Path(exists=True), default=".maint/CONTRIBUTORS.md"
+)
@click.option("--pi", type=click.Path(exists=True), default=".maint/PIs.md")
@click.option("-f", "--former-file", type=click.Path(exists=True), default=".maint/FORMER.md")
def zenodo(
@@ -176,15 +170,13 @@ def zenodo(
)
zen_contributors, miss_contributors = sort_contributors(
- _namelast(read_md_table(Path(contributors).read_text())),
- data,
- exclude=former
+ _namelast(read_md_table(Path(contributors).read_text())), data, exclude=former
)
zen_pi = _namelast(
sorted(
read_md_table(Path(pi).read_text()),
- key=lambda v: (int(v.get("position", -1)), v.get("lastname"))
+ key=lambda v: (int(v.get("position", -1)), v.get("lastname")),
)
)
@@ -194,8 +186,7 @@ def zenodo(
misses = set(miss_creators).intersection(miss_contributors)
if misses:
print(
- "Some people made commits, but are missing in .maint/ "
- f"files: {', '.join(misses)}",
+ "Some people made commits, but are missing in .maint/ " f"files: {', '.join(misses)}",
file=sys.stderr,
)
@@ -214,15 +205,14 @@ def zenodo(
if isinstance(creator["affiliation"], list):
creator["affiliation"] = creator["affiliation"][0]
- Path(zenodo_file).write_text(
- "%s\n" % json.dumps(zenodo, indent=2)
- )
+ Path(zenodo_file).write_text("%s\n" % json.dumps(zenodo, indent=2))
@cli.command()
@click.option("-m", "--maintainers", type=click.Path(exists=True), default=".maint/MAINTAINERS.md")
-@click.option("-c", "--contributors", type=click.Path(exists=True),
- default=".maint/CONTRIBUTORS.md")
+@click.option(
+ "-c", "--contributors", type=click.Path(exists=True), default=".maint/CONTRIBUTORS.md"
+)
@click.option("--pi", type=click.Path(exists=True), default=".maint/PIs.md")
@click.option("-f", "--former-file", type=click.Path(exists=True), default=".maint/FORMER.md")
def publication(
@@ -232,9 +222,8 @@ def publication(
former_file,
):
"""Generate the list of authors and affiliations for papers."""
- members = (
- _namelast(read_md_table(Path(maintainers).read_text()))
- + _namelast(read_md_table(Path(contributors).read_text()))
+ members = _namelast(read_md_table(Path(maintainers).read_text())) + _namelast(
+ read_md_table(Path(contributors).read_text())
)
hits, misses = sort_contributors(
@@ -246,15 +235,12 @@ def publication(
pi_hits = _namelast(
sorted(
read_md_table(Path(pi).read_text()),
- key=lambda v: (int(v.get("position", -1)), v.get("lastname"))
+ key=lambda v: (int(v.get("position", -1)), v.get("lastname")),
)
)
pi_names = [pi["name"] for pi in pi_hits]
- hits = [
- hit for hit in hits
- if hit["name"] not in pi_names
- ] + pi_hits
+ hits = [hit for hit in hits if hit["name"] not in pi_names] + pi_hits
def _aslist(value):
if isinstance(value, (list, tuple)):
@@ -281,27 +267,19 @@ def _aslist(value):
if misses:
print(
- "Some people made commits, but are missing in .maint/ "
- f"files: {', '.join(misses)}",
+ "Some people made commits, but are missing in .maint/ " f"files: {', '.join(misses)}",
file=sys.stderr,
)
print("Authors (%d):" % len(hits))
print(
"%s."
- % "; ".join(
- [
- "%s \\ :sup:`%s`\\ " % (i["name"], idx)
- for i, idx in zip(hits, aff_indexes)
- ]
- )
+ % "; ".join(["%s \\ :sup:`%s`\\ " % (i["name"], idx) for i, idx in zip(hits, aff_indexes)])
)
print(
"\n\nAffiliations:\n%s"
- % "\n".join(
- ["{0: >2}. {1}".format(i + 1, a) for i, a in enumerate(affiliations)]
- )
+ % "\n".join(["{0: >2}. {1}".format(i + 1, a) for i, a in enumerate(affiliations)])
)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 31c2bcbf..73e2ce76 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,23 +1,26 @@
+# To install the git pre-commit hook run:
+# pre-commit install
+# To update the pre-commit hooks run:
+# pre-commit install-hooks
+
repos:
-- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.1.0
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.2.0
hooks:
- - id: trailing-whitespace
- - id: end-of-file-fixer
- - id: check-yaml
- - id: check-json
- - id: check-toml
- - id: check-case-conflict
- - id: check-docstring-first
- - id: check-merge-conflict
- - id: check-vcs-permalinks
- - id: pretty-format-json
- args: ['--autofix']
-- repo: https://github.com/psf/black
- rev: 22.3.0
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: debug-statements
+ - id: check-yaml
+ - id: check-json
+ - id: check-toml
+ - id: check-case-conflict
+ - id: check-docstring-first
+ - id: check-merge-conflict
+ - id: check-vcs-permalinks
+ - id: pretty-format-json
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.3.4
hooks:
- - id: black
-- repo: https://github.com/pycqa/isort
- rev: 5.10.1
- hooks:
- - id: isort
+ - id: ruff
+ args: [ --fix ]
+ - id: ruff-format
diff --git a/nireports/__init__.py b/nireports/__init__.py
index c71de6b1..97d3f1fe 100644
--- a/nireports/__init__.py
+++ b/nireports/__init__.py
@@ -21,13 +21,15 @@
# https://www.nipreps.org/community/licensing/
#
"""Add metadata on import."""
+
__packagename__ = "nireports"
__copyright__ = "2023, The NiPreps developers"
try:
from ._version import __version__
except ModuleNotFoundError:
- from importlib.metadata import version, PackageNotFoundError
+ from importlib.metadata import PackageNotFoundError, version
+
try:
__version__ = version(__packagename__)
except PackageNotFoundError:
diff --git a/nireports/assembler/data/__init__.py b/nireports/assembler/data/__init__.py
index fab31927..5f59bfb6 100644
--- a/nireports/assembler/data/__init__.py
+++ b/nireports/assembler/data/__init__.py
@@ -10,6 +10,7 @@
.. autoclass:: Loader
"""
+
from __future__ import annotations
import atexit
diff --git a/nireports/assembler/misc.py b/nireports/assembler/misc.py
index d5f6439a..6677fb35 100644
--- a/nireports/assembler/misc.py
+++ b/nireports/assembler/misc.py
@@ -23,10 +23,11 @@
# STATEMENT OF CHANGES: This file was ported carrying over full git history from niworkflows,
# another NiPreps project licensed under the Apache-2.0 terms, and has been changed since.
"""Miscellaneous utilities."""
+
from collections import defaultdict
from pathlib import Path
-from bids.utils import listify
+from bids.utils import listify
from nipype.utils.filemanip import loadcrash
@@ -242,7 +243,7 @@ def unfold_columns(indict, prefix=None, delimiter="_"):
"""
prefix = listify(prefix) if prefix is not None else []
- keys = sorted(set(list(indict.keys())))
+ keys = sorted(set(indict.keys()))
data = []
subdict = defaultdict(dict, {})
@@ -254,7 +255,7 @@ def unfold_columns(indict, prefix=None, delimiter="_"):
subdict[col[0]][col[1]] = indict[key]
if subdict:
- for skey in sorted(list(subdict.keys())):
+ for skey in sorted(subdict.keys()):
sskeys = list(subdict[skey].keys())
# If there is only one subkey, merge back
diff --git a/nireports/assembler/report.py b/nireports/assembler/report.py
index eb858601..0555fc6c 100644
--- a/nireports/assembler/report.py
+++ b/nireports/assembler/report.py
@@ -23,6 +23,7 @@
# STATEMENT OF CHANGES: This file was ported carrying over full git history from niworkflows,
# another NiPreps project licensed under the Apache-2.0 terms, and has been changed since.
"""Core objects representing reports."""
+
import re
from collections import defaultdict
from itertools import compress
@@ -36,7 +37,6 @@
from nireports.assembler import data
from nireports.assembler.reportlet import Reportlet
-
# Add a new figures spec
try:
add_config_paths(figures=data.load("nipreps.json"))
@@ -270,8 +270,7 @@ def __init__(
metadata = metadata or {}
if "filename" not in metadata:
metadata["filename"] = Path(out_filename).name.replace(
- "".join(Path(out_filename).suffixes),
- ""
+ "".join(Path(out_filename).suffixes), ""
)
# Initialize structuring elements
@@ -287,9 +286,7 @@ def __init__(
"out_dir": str(out_dir),
"reportlets_dir": str(root),
}
- meta_repl.update({
- kk: vv for kk, vv in metadata.items() if isinstance(vv, str)
- })
+ meta_repl.update({kk: vv for kk, vv in metadata.items() if isinstance(vv, str)})
meta_repl.update(bids_filters)
expr = re.compile(f'{{({"|".join(meta_repl.keys())})}}')
@@ -308,7 +305,8 @@ def __init__(
# Path to the Jinja2 template
self.template_path = (
- Path(settings["template_path"]) if "template_path" in settings
+ Path(settings["template_path"])
+ if "template_path" in settings
else data.load("report.tpl").absolute()
)
@@ -383,7 +381,8 @@ def index(self, config):
# do not display entities with the value None.
c_filt = [
f'{key} {c_value}'
- for key, c_value in zip(entities, c) if c_value is not None
+ for key, c_value in zip(entities, c)
+ if c_value is not None
]
# Set a common title for this particular combination c
title = "Reports for: %s." % ", ".join(c_filt)
@@ -420,11 +419,11 @@ def process_plugins(self, config, metadata=None):
self.footer = []
plugins = config.get("plugins", None)
- for plugin in (plugins or []):
+ for plugin in plugins or []:
env = jinja2.Environment(
- loader=jinja2.FileSystemLoader(searchpath=str(
- Path(__file__).parent / "data" / f"{plugin['type']}"
- )),
+ loader=jinja2.FileSystemLoader(
+ searchpath=str(Path(__file__).parent / "data" / f"{plugin['type']}")
+ ),
trim_blocks=True,
lstrip_blocks=True,
autoescape=False,
@@ -434,12 +433,17 @@ def process_plugins(self, config, metadata=None):
plugin_meta.update((metadata or {}).get(plugin["type"], {}))
for member in ("header", "navbar", "footer"):
old_value = getattr(self, member)
- setattr(self, member, old_value + [
- env.get_template(f"{member}.tpl").render(
- config=plugin,
- metadata=plugin_meta,
- )
- ])
+ setattr(
+ self,
+ member,
+ old_value
+ + [
+ env.get_template(f"{member}.tpl").render(
+ config=plugin,
+ metadata=plugin_meta,
+ )
+ ],
+ )
def generate_report(self):
"""Once the Report has been indexed, the final HTML can be generated"""
diff --git a/nireports/assembler/reportlet.py b/nireports/assembler/reportlet.py
index a69c421a..d31d1231 100644
--- a/nireports/assembler/reportlet.py
+++ b/nireports/assembler/reportlet.py
@@ -23,14 +23,16 @@
# STATEMENT OF CHANGES: This file was ported carrying over full git history from niworkflows,
# another NiPreps project licensed under the Apache-2.0 terms, and has been changed since.
"""The reporting visualization unit or *reportlet*."""
+
+import re
from pathlib import Path
from uuid import uuid4
-import re
+
from nipype.utils.filemanip import copyfile
+
from nireports.assembler import data
from nireports.assembler.misc import dict2html, read_crashfile
-
SVG_SNIPPET = [
"""\
@@ -104,7 +106,7 @@
"""
-HTML_BOILER_STYLE = ' font-family: \'Bitstream Charter\', \'Georgia\', Times;'
+HTML_BOILER_STYLE = " font-family: 'Bitstream Charter', 'Georgia', Times;"
class Reportlet:
@@ -231,7 +233,6 @@ def __init__(self, layout, config=None, out_dir=None, bids_filters=None, metadat
if ext == ".html":
contents = src.read_text().strip()
elif ext == ".svg":
-
entities = dict(bidsfile.entities)
if desc_text:
desc_text = desc_text.format(**entities)
@@ -258,9 +259,7 @@ def __init__(self, layout, config=None, out_dir=None, bids_filters=None, metadat
if line.strip().startswith(""
end_idx = image_svg.rfind(end_tag)
if start_idx == -1 or end_idx == -1:
- warnings.warn("svg tags not found in extract_svg")
+ warnings.warn("svg tags not found in extract_svg", stacklevel=2)
# rfind gives the start index of the substr. We want this substr
# included in our return value so we add its length to the index.
end_idx += len(end_tag)
@@ -340,7 +339,7 @@ def compose_view(bg_svgs, fg_svgs, ref=0, out_file="report.svg"):
def _compose_view(bg_svgs, fg_svgs, ref=0):
from svgutils.compose import Unit
- from svgutils.transform import SVGFigure, GroupElement
+ from svgutils.transform import GroupElement, SVGFigure
if fg_svgs is None:
fg_svgs = []
diff --git a/nireports/reportlets/xca.py b/nireports/reportlets/xca.py
index af0b1ef1..2476465a 100644
--- a/nireports/reportlets/xca.py
+++ b/nireports/reportlets/xca.py
@@ -23,11 +23,11 @@
# STATEMENT OF CHANGES: This file was ported carrying over full git history from niworkflows,
# another NiPreps project licensed under the Apache-2.0 terms, and has been changed since.
"""Plotting results of component decompositions (xCA -- P/I-CA)."""
-import numpy as np
-import nibabel as nb
-import pandas as pd
import matplotlib.pyplot as plt
+import nibabel as nb
+import numpy as np
+import pandas as pd
from nilearn.plotting.cm import cold_white_hot
from nireports.reportlets.utils import transform_to_2d
@@ -77,11 +77,13 @@ def plot_melodic_components(
"""
import os
+
import numpy as np
- from matplotlib.gridspec import GridSpec
import pylab as plt
import seaborn as sns
+ from matplotlib.gridspec import GridSpec
from nilearn.image import index_img, iter_img
+
try:
from nilearn.maskers import NiftiMasker
except ImportError: # nilearn < 0.9
@@ -152,7 +154,7 @@ def plot_melodic_components(
textcoords="axes fraction",
size=12,
color="#ea8800",
- bbox=dict(boxstyle="round", fc="#f7dcb7", ec="#FC990E"),
+ bbox={"boxstyle": "round", "fc": "#f7dcb7", "ec": "#FC990E"},
)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
@@ -163,7 +165,6 @@ def plot_melodic_components(
if ICs.ndim == 3:
ICs = ICs.slicer[..., None]
for i, img in enumerate(iter_img(ICs)):
-
col = i % 2
row = i // 2
l_row = row * 2 + warning_row
diff --git a/nireports/tests/conftest.py b/nireports/tests/conftest.py
index 9077f2e3..0a8d706b 100644
--- a/nireports/tests/conftest.py
+++ b/nireports/tests/conftest.py
@@ -20,7 +20,8 @@
#
# https://www.nipreps.org/community/licensing/
#
-""" py.test configuration file """
+"""py.test configuration file"""
+
import os
import pytest
diff --git a/nireports/tests/generate_data.py b/nireports/tests/generate_data.py
index dd8ecf81..f0da774d 100644
--- a/nireports/tests/generate_data.py
+++ b/nireports/tests/generate_data.py
@@ -10,12 +10,12 @@ def _create_dtseries_cifti(timepoints, models):
def create_series_map():
return ci.Cifti2MatrixIndicesMap(
(0,),
- 'CIFTI_INDEX_TYPE_SERIES',
+ "CIFTI_INDEX_TYPE_SERIES",
number_of_series_points=timepoints,
series_exponent=0,
series_start=0,
series_step=1,
- series_unit='SECOND',
+ series_unit="SECOND",
)
def create_geometry_map():
@@ -41,7 +41,7 @@ def create_geometry_map():
setattr(bm, attr, indices)
if model_type == "CIFTI_MODEL_TYPE_SURFACE":
# define total vertices for surface models
- setattr(bm, "surface_number_of_vertices", 32492)
+ bm.surface_number_of_vertices = 32492
index_offset += len(data)
brain_models.append(bm)
timeseries = np.column_stack((timeseries, data.T))
diff --git a/nireports/tests/test_dwi.py b/nireports/tests/test_dwi.py
index efb6980e..6e43fea4 100644
--- a/nireports/tests/test_dwi.py
+++ b/nireports/tests/test_dwi.py
@@ -22,11 +22,9 @@
#
"""Test DWI reportlets."""
-import pytest
-from pathlib import Path
-
import nibabel as nb
import numpy as np
+import pytest
from matplotlib import pyplot as plt
from nireports.reportlets.modality.dwi import plot_dwi, plot_gradients
@@ -35,12 +33,12 @@
def test_plot_dwi(tmp_path, testdata_path, outdir):
"""Check the plot of DWI data."""
- stem = 'ds000114_sub-01_ses-test_desc-trunc_dwi'
- dwi_img = nb.load(testdata_path / f'{stem}.nii.gz')
+ stem = "ds000114_sub-01_ses-test_desc-trunc_dwi"
+ dwi_img = nb.load(testdata_path / f"{stem}.nii.gz")
affine = dwi_img.affine
- bvecs = np.loadtxt(testdata_path / f'{stem}.bvec').T
- bvals = np.loadtxt(testdata_path / f'{stem}.bval')
+ bvecs = np.loadtxt(testdata_path / f"{stem}.bvec").T
+ bvals = np.loadtxt(testdata_path / f"{stem}.bval")
gradients = np.hstack([bvecs, bvals[:, None]])
@@ -51,18 +49,18 @@ def test_plot_dwi(tmp_path, testdata_path, outdir):
_ = plot_dwi(dwi_img.get_fdata()[..., idx], affine, gradient=gradients[idx])
if outdir is not None:
- plt.savefig(outdir / f'{stem}.svg', bbox_inches='tight')
+ plt.savefig(outdir / f"{stem}.svg", bbox_inches="tight")
@pytest.mark.parametrize(
- 'dwi_btable',
- ['ds000114_singleshell', 'hcph_multishell', 'ds004737_dsi'],
+ "dwi_btable",
+ ["ds000114_singleshell", "hcph_multishell", "ds004737_dsi"],
)
def test_plot_gradients(tmp_path, testdata_path, dwi_btable, outdir):
"""Check the plot of DWI gradients."""
- bvecs = np.loadtxt(testdata_path / f'{dwi_btable}.bvec').T
- bvals = np.loadtxt(testdata_path / f'{dwi_btable}.bval')
+ bvecs = np.loadtxt(testdata_path / f"{dwi_btable}.bvec").T
+ bvals = np.loadtxt(testdata_path / f"{dwi_btable}.bval")
b0s_mask = bvals < 50
@@ -70,4 +68,4 @@ def test_plot_gradients(tmp_path, testdata_path, dwi_btable, outdir):
_ = plot_gradients(gradients)
if outdir is not None:
- plt.savefig(outdir / f'{dwi_btable}.svg', bbox_inches='tight')
+ plt.savefig(outdir / f"{dwi_btable}.svg", bbox_inches="tight")
diff --git a/nireports/tests/test_interfaces.py b/nireports/tests/test_interfaces.py
index 0db618c4..d361dd6b 100644
--- a/nireports/tests/test_interfaces.py
+++ b/nireports/tests/test_interfaces.py
@@ -21,6 +21,7 @@
# https://www.nipreps.org/community/licensing/
#
"""Tests plotting interfaces."""
+
import os
from shutil import copy
@@ -45,7 +46,7 @@ def test_CompCorVariancePlot(datadir):
_smoke_test_report(cc_rpt, "compcor_variance.svg")
-@pytest.mark.parametrize('ignore_initial_volumes', (0, 1))
+@pytest.mark.parametrize("ignore_initial_volumes", (0, 1))
def test_ConfoundsCorrelationPlot(datadir, ignore_initial_volumes):
"""confounds correlation report test"""
confounds_file = os.path.join(datadir, "confounds_test.tsv")
diff --git a/nireports/tests/test_reportlets.py b/nireports/tests/test_reportlets.py
index 85779baa..671334e0 100644
--- a/nireports/tests/test_reportlets.py
+++ b/nireports/tests/test_reportlets.py
@@ -21,22 +21,22 @@
# https://www.nipreps.org/community/licensing/
#
"""Test reportlets module."""
+
import os
-from pathlib import Path
-from itertools import permutations
from functools import partial
+from itertools import permutations
+from pathlib import Path
import nibabel as nb
import numpy as np
import pandas as pd
import pytest
-
from templateflow.api import get
from nireports.reportlets.modality.func import fMRIPlot
+from nireports.reportlets.mosaic import plot_mosaic
from nireports.reportlets.nuisance import plot_carpet
from nireports.reportlets.surface import cifti_surfaces_plot
-from nireports.reportlets.mosaic import plot_mosaic
from nireports.reportlets.xca import compcor_variance_plot, plot_melodic_components
from nireports.tools.timeseries import cifti_timeseries as _cifti_timeseries
from nireports.tools.timeseries import get_tr as _get_tr
@@ -270,8 +270,8 @@ def create_surface_dtseries():
out_file = _create_dtseries_cifti(
timepoints=10,
models=[
- ('CIFTI_STRUCTURE_CORTEX_LEFT', np.random.rand(29696, 10)),
- ('CIFTI_STRUCTURE_CORTEX_RIGHT', np.random.rand(29716, 10)),
+ ("CIFTI_STRUCTURE_CORTEX_LEFT", np.random.rand(29696, 10)),
+ ("CIFTI_STRUCTURE_CORTEX_RIGHT", np.random.rand(29716, 10)),
],
)
yield str(out_file)
@@ -328,10 +328,9 @@ def test_nifti_carpetplot(tmp_path, testdata_path, outdir):
)
-_views = (
- list(permutations(("axial", "sagittal", "coronal", None), 3))
- + [(v, None, None) for v in ("axial", "sagittal", "coronal")]
-)
+_views = list(permutations(("axial", "sagittal", "coronal", None), 3)) + [
+ (v, None, None) for v in ("axial", "sagittal", "coronal")
+]
@pytest.mark.parametrize("views", _views)
@@ -339,9 +338,7 @@ def test_nifti_carpetplot(tmp_path, testdata_path, outdir):
def test_mriqc_plot_mosaic(tmp_path, testdata_path, outdir, views, plot_sagittal):
"""Exercise the generation of mosaics."""
- fname = (
- f"mosaic_{'_'.join(v or 'none' for v in views)}_{plot_sagittal:d}.svg"
- )
+ fname = f"mosaic_{'_'.join(v or 'none' for v in views)}_{plot_sagittal:d}.svg"
testfunc = partial(
plot_mosaic,
diff --git a/nireports/tools/timeseries.py b/nireports/tools/timeseries.py
index 5f2e46f2..e184efff 100644
--- a/nireports/tools/timeseries.py
+++ b/nireports/tools/timeseries.py
@@ -26,8 +26,9 @@
# https://github.com/nipreps/niworkflows/blob/fa273d004c362d9562616253180e95694f07be3b/
# niworkflows/utils/timeseries.py
"""Extracting signals from NIfTI and CIFTI2 files."""
-import numpy as np
+
import nibabel as nb
+import numpy as np
def get_tr(img):
@@ -72,13 +73,8 @@ def cifti_timeseries(dataset):
}
seg = {label: [] for label in list(labels.values()) + ["Other"]}
for bm in matrix.get_index_map(1).brain_models:
- label = (
- "Other" if bm.brain_structure not in labels else
- labels[bm.brain_structure]
- )
- seg[label] += list(range(
- bm.index_offset, bm.index_offset + bm.index_count
- ))
+ label = "Other" if bm.brain_structure not in labels else labels[bm.brain_structure]
+ seg[label] += list(range(bm.index_offset, bm.index_offset + bm.index_count))
return dataset.get_fdata(dtype="float32").T, seg
@@ -108,9 +104,9 @@ def nifti_timeseries(
if lut is None:
lut = np.zeros((256,), dtype="uint8")
lut[100:201] = 1 # Ctx GM
- lut[30:99] = 2 # dGM
- lut[1:11] = 3 # WM+CSF
- lut[255] = 4 # Cerebellum
+ lut[30:99] = 2 # dGM
+ lut[1:11] = 3 # WM+CSF
+ lut[255] = 4 # Cerebellum
# Apply lookup table
segmentation = lut[segmentation]
diff --git a/pyproject.toml b/pyproject.toml
index 93aa541e..2db40460 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -50,10 +50,8 @@ doc = [
]
dev = [
- "black ~= 22.3.0",
+ "ruff",
"pre-commit",
- "isort ~= 5.10.1",
- "flake8-pyproject",
]
test = [
@@ -101,40 +99,49 @@ version-file = "nireports/_version.py"
# Developer tool configurations
#
-[tool.black]
+[tool.ruff]
line-length = 99
-target-version = ['py39']
-skip-string-normalization = true
-exclude = '''
-# Directories
-/(
- \.eggs
- | \.git
- | \.hg
- | \.mypy_cache
- | \.tox
- | \.venv
- | venv
- | _build
- | build
- | dist
-)/
-'''
-
-[tool.isort]
-profile = 'black'
-skip_gitignore = true
-
-[tool.flake8]
-max-line-length = "99"
-doctests = "False"
-exclude = "*build/"
-ignore = ["W503", "E203"]
-per-file-ignores = [
- "**/__init__.py : F401",
- "docs/conf.py : E265",
+target-version = "py39"
+exclude = [
+ ".eggs",
+ ".git",
+ ".hg",
+ ".mypy_cache",
+ ".tox",
+ ".venv",
+ "venv",
+ "_build",
+ "build",
+ "dist",
+]
+
+[tool.ruff.lint]
+select = [
+ "F",
+ "E",
+ "C",
+ "W",
+ "B",
+ "I",
+]
+ignore = [
+ "E203",
]
+[tool.ruff.lint.flake8-quotes]
+inline-quotes = "double"
+
+[tool.ruff.lint.extend-per-file-ignores]
+"*/__init__.py" = ["F401"]
+"docs/conf.py" = ["E265"]
+"/^\\s*\\.\\. _.*?: http/" = ["E501"]
+
+[tool.ruff.format]
+quote-style = "double"
+
+[tool.ruff.lint.isort]
+known-first-party=["nireports"]
+
[tool.pytest.ini_options]
norecursedirs = [".git"]
addopts = "-svx --doctest-modules -n auto"