From 478ebb08b2fb536b1861398aec548e5eec6a5dc2 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 22 Jul 2022 23:13:25 -0400 Subject: [PATCH 01/12] MNT: Add blue configuration --- pyproject.toml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index b5b6c0e52d..f6730e4152 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,3 +55,13 @@ find = {} [tool.setuptools.package-data] nibabel = ["tests/data/*", "*/tests/data/*", "benchmarks/pytest.benchmark.ini"] + +[tool.blue] +line_length = 99 +target-version = ['py37'] +extend-exclude = ''' +( + _version.py + | nibabel/externals/ +) +''' From 5104cae50c640cc3df1bc8dd7b3e3b9cb35e1593 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 22 Jul 2022 23:14:45 -0400 Subject: [PATCH 02/12] MNT: Add isort configuration --- pyproject.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index f6730e4152..47ee66b024 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,3 +65,8 @@ extend-exclude = ''' | nibabel/externals/ ) ''' + +[tool.isort] +profile = "black" +line_length = 99 +extend_skip = ["_version.py", "externals"] From cc9a0bf92bf4025a8bf0d300d9de8bfb2c245c05 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 22 Jul 2022 22:38:55 -0400 Subject: [PATCH 03/12] STY: Simplify flake8 rules; fix issue found --- nibabel/nifti1.py | 3 --- setup.cfg | 5 +++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 1bffac10ce..a951522c8d 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -524,9 +524,6 @@ def get_sizeondisk(self): def __repr__(self): return "Nifti1Extensions(%s)" % ', '.join(str(e) for e in self) - def __cmp__(self, other): - return cmp(list(self), list(other)) - def write_to(self, fileobj, byteswap): """ Write header extensions to fileobj diff --git a/setup.cfg b/setup.cfg index 336958c605..0374c54f98 100644 --- a/setup.cfg +++ b/setup.cfg @@ -40,12 +40,13 @@ all = [flake8] max-line-length = 100 -ignore = D100,D101,D102,D103,D104,D105,D200,D201,D202,D204,D205,D208,D209,D210,D300,D301,D400,D401,D403,E24,E121,E123,E126,E226,E266,E402,E704,E731,F821,I100,I101,I201,N802,N803,N804,N806,W503,W504,W605 +extend-ignore = E203,E266,E402,E731 exclude = *test* *sphinx* nibabel/externals/* - */__init__.py +per-file-ignores = + */__init__.py: F401 [versioneer] VCS = git From 29c733d558c25237dac2dd1adb975f040be1f669 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Fri, 22 Jul 2022 23:21:41 -0400 Subject: [PATCH 04/12] MNT: Add make rule for building an ignore file for git-blame --- Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2190f815fc..093e177c36 100644 --- a/Makefile +++ b/Makefile @@ -78,6 +78,9 @@ distclean: clean $(WWW_DIR): if [ ! -d $(WWW_DIR) ]; then mkdir -p $(WWW_DIR); fi +.git-blame-ignore-revs: + git log --grep "\[git-blame-ignore-rev\]" --pretty=format:"# %ad - %ae - %s%n%H" \ + > .git-blame-ignore-revs # # Tests @@ -288,4 +291,4 @@ rm-orig: # Remove .orig temporary diff files generated by git find . -name "*.orig" -print | grep -v "fsaverage" | xargs rm -.PHONY: orig-src pylint all build +.PHONY: orig-src pylint all build .git-blame-ignore-revs From 1a8dd302ff85b1136c81d492509b80e7748339f0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 21:30:29 -0500 Subject: [PATCH 05/12] STY: blue Run with a custom patch to avoid realigning inline comments: pipx run --spec git+https://github.com/effigies/blue.git@fix/hanging-comments blue nibabel [git-blame-ignore-rev] --- nibabel/__init__.py | 48 +- nibabel/affines.py | 24 +- nibabel/analyze.py | 189 +++-- nibabel/arrayproxy.py | 125 +-- nibabel/arraywriters.py | 174 ++--- nibabel/batteryrunners.py | 35 +- nibabel/benchmarks/bench_array_to_file.py | 4 +- .../benchmarks/bench_arrayproxy_slicing.py | 8 +- nibabel/benchmarks/bench_fileslice.py | 56 +- nibabel/benchmarks/bench_finite_range.py | 4 +- nibabel/benchmarks/bench_load_save.py | 4 +- nibabel/benchmarks/butils.py | 5 +- nibabel/brikhead.py | 75 +- nibabel/caret.py | 18 +- nibabel/casting.py | 119 +-- nibabel/cifti2/__init__.py | 33 +- nibabel/cifti2/cifti2.py | 364 +++++---- nibabel/cifti2/cifti2_axes.py | 326 ++++---- nibabel/cifti2/parse_cifti2.py | 210 +++--- nibabel/cifti2/tests/test_axes.py | 148 ++-- nibabel/cifti2/tests/test_cifti2.py | 103 ++- nibabel/cifti2/tests/test_cifti2io_axes.py | 115 ++- nibabel/cifti2/tests/test_cifti2io_header.py | 304 ++++---- nibabel/cifti2/tests/test_name.py | 41 +- nibabel/cifti2/tests/test_new_cifti2.py | 212 +++--- nibabel/cmdline/conform.py | 37 +- nibabel/cmdline/convert.py | 38 +- nibabel/cmdline/dicomfs.py | 71 +- nibabel/cmdline/diff.py | 162 ++-- nibabel/cmdline/ls.py | 128 ++-- nibabel/cmdline/nifti_dx.py | 13 +- nibabel/cmdline/parrec2nii.py | 280 ++++--- nibabel/cmdline/roi.py | 47 +- nibabel/cmdline/stats.py | 21 +- nibabel/cmdline/tck2trk.py | 19 +- nibabel/cmdline/tests/test_conform.py | 18 +- nibabel/cmdline/tests/test_convert.py | 43 +- nibabel/cmdline/tests/test_parrec2nii.py | 28 +- nibabel/cmdline/tests/test_roi.py | 90 +-- nibabel/cmdline/tests/test_stats.py | 8 +- nibabel/cmdline/tests/test_utils.py | 448 ++++++++--- nibabel/cmdline/trk2tck.py | 12 +- nibabel/cmdline/utils.py | 22 +- nibabel/data.py | 74 +- nibabel/dataobj_images.py | 43 +- nibabel/deprecated.py | 16 +- nibabel/deprecator.py | 40 +- nibabel/dft.py | 125 +-- nibabel/ecat.py | 117 ++- nibabel/environment.py | 2 +- nibabel/eulerangles.py | 41 +- nibabel/filebasedimages.py | 70 +- nibabel/fileholders.py | 24 +- nibabel/filename_parser.py | 43 +- nibabel/fileslice.py | 103 ++- nibabel/fileutils.py | 2 +- nibabel/freesurfer/__init__.py | 11 +- nibabel/freesurfer/io.py | 76 +- nibabel/freesurfer/mghformat.py | 224 +++--- nibabel/freesurfer/tests/test_io.py | 97 +-- nibabel/freesurfer/tests/test_mghformat.py | 50 +- nibabel/funcs.py | 31 +- nibabel/gifti/__init__.py | 11 +- nibabel/gifti/gifti.py | 211 +++--- nibabel/gifti/parse_gifti_fast.py | 126 ++-- nibabel/gifti/tests/test_1.py | 2 +- nibabel/gifti/tests/test_gifti.py | 55 +- nibabel/gifti/tests/test_parse_gifti_fast.py | 186 +++-- nibabel/gifti/util.py | 39 +- nibabel/imageclasses.py | 40 +- nibabel/imageglobals.py | 5 +- nibabel/imagestats.py | 4 +- nibabel/info.py | 4 +- nibabel/loadsave.py | 45 +- nibabel/minc1.py | 74 +- nibabel/minc2.py | 28 +- nibabel/mriutils.py | 12 +- nibabel/nicom/__init__.py | 13 +- nibabel/nicom/ascconv.py | 26 +- nibabel/nicom/csareader.py | 40 +- nibabel/nicom/dicomreaders.py | 41 +- nibabel/nicom/dicomwrappers.py | 174 +++-- nibabel/nicom/dwiparams.py | 17 +- nibabel/nicom/structreader.py | 10 +- nibabel/nicom/tests/__init__.py | 4 +- nibabel/nicom/tests/data_pkgs.py | 12 +- nibabel/nicom/tests/test_ascconv.py | 32 +- nibabel/nicom/tests/test_csareader.py | 11 +- nibabel/nicom/tests/test_dicomreaders.py | 16 +- nibabel/nicom/tests/test_dicomwrappers.py | 144 ++-- nibabel/nicom/tests/test_dwiparams.py | 11 +- nibabel/nicom/tests/test_structreader.py | 6 +- nibabel/nicom/tests/test_utils.py | 4 +- nibabel/nicom/utils.py | 4 +- nibabel/nifti1.py | 710 +++++++++--------- nibabel/nifti2.py | 39 +- nibabel/onetime.py | 6 +- nibabel/openers.py | 42 +- nibabel/optpkg.py | 10 +- nibabel/orientations.py | 38 +- nibabel/parrec.py | 358 +++++---- nibabel/pkg_info.py | 13 +- nibabel/processing.py | 99 ++- nibabel/pydicom_compat.py | 9 +- nibabel/quaternions.py | 68 +- nibabel/rstutils.py | 44 +- nibabel/spaces.py | 10 +- nibabel/spatialimages.py | 128 ++-- nibabel/spm2analyze.py | 29 +- nibabel/spm99analyze.py | 59 +- nibabel/streamlines/__init__.py | 26 +- nibabel/streamlines/array_sequence.py | 185 +++-- nibabel/streamlines/header.py | 31 +- nibabel/streamlines/tck.py | 117 +-- .../streamlines/tests/test_array_sequence.py | 123 ++- nibabel/streamlines/tests/test_streamlines.py | 192 +++-- nibabel/streamlines/tests/test_tck.py | 87 +-- nibabel/streamlines/tests/test_tractogram.py | 557 +++++++------- .../streamlines/tests/test_tractogram_file.py | 8 +- nibabel/streamlines/tests/test_trk.py | 224 +++--- nibabel/streamlines/tractogram.py | 171 +++-- nibabel/streamlines/tractogram_file.py | 24 +- nibabel/streamlines/trk.py | 261 ++++--- nibabel/streamlines/utils.py | 6 +- nibabel/testing/__init__.py | 40 +- nibabel/testing/helpers.py | 13 +- nibabel/testing/np_features.py | 5 +- nibabel/tests/data/check_parrec_reslice.py | 15 +- nibabel/tests/data/gen_standard.py | 23 +- nibabel/tests/data/make_moved_anat.py | 6 +- nibabel/tests/nibabel_data.py | 11 +- nibabel/tests/scriptrunner.py | 35 +- nibabel/tests/test_affines.py | 94 +-- nibabel/tests/test_analyze.py | 156 ++-- nibabel/tests/test_api_validators.py | 26 +- nibabel/tests/test_arrayproxy.py | 101 ++- nibabel/tests/test_arraywriters.py | 135 ++-- nibabel/tests/test_batteryrunners.py | 2 +- nibabel/tests/test_brikhead.py | 54 +- nibabel/tests/test_casting.py | 64 +- nibabel/tests/test_data.py | 20 +- nibabel/tests/test_dataobj_images.py | 6 +- nibabel/tests/test_deprecated.py | 14 +- nibabel/tests/test_deprecator.py | 65 +- nibabel/tests/test_dft.py | 23 +- nibabel/tests/test_diff.py | 19 +- nibabel/tests/test_ecat.py | 104 +-- nibabel/tests/test_ecat_data.py | 4 +- nibabel/tests/test_endiancodes.py | 4 +- nibabel/tests/test_environment.py | 4 +- nibabel/tests/test_euler.py | 51 +- nibabel/tests/test_filebasedimages.py | 17 +- nibabel/tests/test_filehandles.py | 2 +- nibabel/tests/test_fileholders.py | 2 +- nibabel/tests/test_filename_parser.py | 54 +- nibabel/tests/test_files_interface.py | 16 +- nibabel/tests/test_fileslice.py | 469 +++++++----- nibabel/tests/test_fileutils.py | 2 +- nibabel/tests/test_floating.py | 102 ++- nibabel/tests/test_funcs.py | 32 +- nibabel/tests/test_image_api.py | 126 ++-- nibabel/tests/test_image_load_save.py | 24 +- nibabel/tests/test_image_types.py | 93 ++- nibabel/tests/test_imageclasses.py | 2 +- nibabel/tests/test_imageglobals.py | 2 +- nibabel/tests/test_imagestats.py | 2 +- nibabel/tests/test_init.py | 39 +- nibabel/tests/test_loadsave.py | 78 +- nibabel/tests/test_minc1.py | 83 +- nibabel/tests/test_minc2.py | 83 +- nibabel/tests/test_minc2_data.py | 56 +- nibabel/tests/test_mriutils.py | 5 +- nibabel/tests/test_nibabel_data.py | 2 +- nibabel/tests/test_nifti1.py | 336 +++++---- nibabel/tests/test_nifti2.py | 33 +- nibabel/tests/test_onetime.py | 1 + nibabel/tests/test_openers.py | 179 +++-- nibabel/tests/test_optpkg.py | 9 +- nibabel/tests/test_orientations.py | 269 +++---- nibabel/tests/test_parrec.py | 374 +++++---- nibabel/tests/test_parrec_data.py | 2 +- nibabel/tests/test_pkg_info.py | 75 +- nibabel/tests/test_processing.py | 107 ++- nibabel/tests/test_proxy_api.py | 87 ++- nibabel/tests/test_quaternions.py | 18 +- nibabel/tests/test_recoder.py | 6 +- nibabel/tests/test_removalschedule.py | 167 ++-- nibabel/tests/test_round_trip.py | 47 +- nibabel/tests/test_rstutils.py | 65 +- nibabel/tests/test_scaling.py | 107 +-- nibabel/tests/test_scripts.py | 237 +++--- nibabel/tests/test_spaces.py | 74 +- nibabel/tests/test_spatialimages.py | 115 +-- nibabel/tests/test_spm2analyze.py | 32 +- nibabel/tests/test_spm99analyze.py | 166 ++-- nibabel/tests/test_testing.py | 69 +- nibabel/tests/test_tmpdirs.py | 2 +- nibabel/tests/test_tripwire.py | 5 +- nibabel/tests/test_viewers.py | 9 +- nibabel/tests/test_volumeutils.py | 362 +++++---- nibabel/tests/test_wrapstruct.py | 38 +- nibabel/tmpdirs.py | 10 +- nibabel/tripwire.py | 11 +- nibabel/viewers.py | 118 +-- nibabel/volumeutils.py | 195 ++--- nibabel/wrapstruct.py | 67 +- nibabel/xmlutils.py | 14 +- 207 files changed, 8734 insertions(+), 7556 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index f96e80f0eb..ad14fc52dc 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -11,6 +11,7 @@ from .pkg_info import __version__ from .info import long_description as __doc__ + __doc__ += """ Quickstart ========== @@ -42,6 +43,7 @@ from . import spm2analyze as spm2 from . import nifti1 as ni1 from . import ecat + # object imports from .fileholders import FileHolder, FileHolderError from .loadsave import load, save @@ -56,10 +58,14 @@ from .cifti2 import Cifti2Header, Cifti2Image from .gifti import GiftiImage from .freesurfer import MGHImage -from .funcs import (squeeze_image, concat_images, four_to_three, - as_closest_canonical) -from .orientations import (io_orientation, flip_axis, OrientationError, - apply_orientation, aff2axcodes) +from .funcs import squeeze_image, concat_images, four_to_three, as_closest_canonical +from .orientations import ( + io_orientation, + flip_axis, + OrientationError, + apply_orientation, + aff2axcodes, +) from .imageclasses import all_image_classes from . import mriutils from . import streamlines @@ -72,9 +78,15 @@ def get_info(): return _get_pkg_info(os.path.dirname(__file__)) -def test(label=None, verbose=1, extra_argv=None, - doctests=False, coverage=False, raise_warnings=None, - timer=False): +def test( + label=None, + verbose=1, + extra_argv=None, + doctests=False, + coverage=False, + raise_warnings=None, + timer=False, +): """ Run tests for nibabel using pytest @@ -107,29 +119,30 @@ def test(label=None, verbose=1, extra_argv=None, Returns the result of running the tests as a ``pytest.ExitCode`` enum """ import pytest + args = [] if label is not None: - raise NotImplementedError("Labels cannot be set at present") + raise NotImplementedError('Labels cannot be set at present') verbose = int(verbose) if verbose > 0: - args.append("-" + "v" * verbose) + args.append('-' + 'v' * verbose) elif verbose < 0: - args.append("-" + "q" * -verbose) + args.append('-' + 'q' * -verbose) if extra_argv: args.extend(extra_argv) if doctests: - args.append("--doctest-modules") + args.append('--doctest-modules') if coverage: - args.extend(["--cov", "nibabel"]) + args.extend(['--cov', 'nibabel']) if raise_warnings is not None: - raise NotImplementedError("Warning filters are not implemented") + raise NotImplementedError('Warning filters are not implemented') if timer: - raise NotImplementedError("Timing is not implemented") + raise NotImplementedError('Timing is not implemented') - args.extend(["--pyargs", "nibabel"]) + args.extend(['--pyargs', 'nibabel']) return pytest.main(args=args) @@ -157,9 +170,10 @@ def bench(label=None, verbose=1, extra_argv=None): Returns the result of running the tests as a ``pytest.ExitCode`` enum """ from pkg_resources import resource_filename - config = resource_filename("nibabel", "benchmarks/pytest.benchmark.ini") + + config = resource_filename('nibabel', 'benchmarks/pytest.benchmark.ini') args = [] if extra_argv is not None: args.extend(extra_argv) - args.extend(["-c", config]) + args.extend(['-c', config]) return test(label, verbose, extra_argv=args) diff --git a/nibabel/affines.py b/nibabel/affines.py index 9fd141a8b7..c8bc586aa7 100644 --- a/nibabel/affines.py +++ b/nibabel/affines.py @@ -1,6 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" Utility routines for working with points and affine transforms +"""Utility routines for working with points and affine transforms """ import numpy as np @@ -8,14 +8,15 @@ class AffineError(ValueError): - """ Errors in calculating or using affines """ + """Errors in calculating or using affines""" + # Inherits from ValueError to keep compatibility with ValueError previously # raised in append_diag pass def apply_affine(aff, pts, inplace=False): - """ Apply affine matrix `aff` to points `pts` + """Apply affine matrix `aff` to points `pts` Returns result of application of `aff` to the *right* of `pts`. The coordinate dimension of `pts` should be the last. @@ -142,7 +143,7 @@ def to_matvec(transform): def from_matvec(matrix, vector=None): - """ Combine a matrix and vector into an homogeneous affine + """Combine a matrix and vector into an homogeneous affine Combine a rotation / scaling / shearing matrix and translation vector into a transform in homogeneous coordinates. @@ -185,14 +186,14 @@ def from_matvec(matrix, vector=None): nin, nout = matrix.shape t = np.zeros((nin + 1, nout + 1), matrix.dtype) t[0:nin, 0:nout] = matrix - t[nin, nout] = 1. + t[nin, nout] = 1.0 if vector is not None: t[0:nin, nout] = vector return t def append_diag(aff, steps, starts=()): - """ Add diagonal elements `steps` and translations `starts` to affine + """Add diagonal elements `steps` and translations `starts` to affine Typical use is in expanding 4x4 affines to larger dimensions. Nipy is the main consumer because it uses NxM affines, whereas we generally only use @@ -236,8 +237,7 @@ def append_diag(aff, steps, starts=()): raise AffineError('Steps should have same length as starts') old_n_out, old_n_in = aff.shape[0] - 1, aff.shape[1] - 1 # make new affine - aff_plus = np.zeros((old_n_out + n_steps + 1, - old_n_in + n_steps + 1), dtype=aff.dtype) + aff_plus = np.zeros((old_n_out + n_steps + 1, old_n_in + n_steps + 1), dtype=aff.dtype) # Get stuff from old affine aff_plus[:old_n_out, :old_n_in] = aff[:old_n_out, :old_n_in] aff_plus[:old_n_out, -1] = aff[:old_n_out, -1] @@ -250,7 +250,7 @@ def append_diag(aff, steps, starts=()): def dot_reduce(*args): - r""" Apply numpy dot product function from right to left on arrays + r"""Apply numpy dot product function from right to left on arrays For passed arrays :math:`A, B, C, ... Z` returns :math:`A \dot B \dot C ... \dot Z` where "." is the numpy array dot product. @@ -270,7 +270,7 @@ def dot_reduce(*args): def voxel_sizes(affine): - r""" Return voxel size for each input axis given `affine` + r"""Return voxel size for each input axis given `affine` The `affine` is the mapping between array (voxel) coordinates and mm (world) coordinates. @@ -308,7 +308,7 @@ def voxel_sizes(affine): but in general has length (N-1) where input `affine` is shape (M, N). """ top_left = affine[:-1, :-1] - return np.sqrt(np.sum(top_left ** 2, axis=0)) + return np.sqrt(np.sum(top_left**2, axis=0)) def obliquity(affine): @@ -340,7 +340,7 @@ def obliquity(affine): def rescale_affine(affine, shape, zooms, new_shape=None): - """ Return a new affine matrix with updated voxel sizes (zooms) + """Return a new affine matrix with updated voxel sizes (zooms) This function preserves the rotations and shears of the original affine, as well as the RAS location of the central voxel of the diff --git a/nibabel/analyze.py b/nibabel/analyze.py index a1c1cf1d2f..648c75d68a 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read / write access to the basic Mayo Analyze format +"""Read / write access to the basic Mayo Analyze format =========================== The Analyze header format @@ -84,14 +84,18 @@ import numpy as np -from .volumeutils import (native_code, swapped_code, make_dt_codes, - shape_zoom_affine, array_from_file, seek_tell, - apply_read_scaling) -from .arraywriters import (make_array_writer, get_slope_inter, WriterError, - ArrayWriter) +from .volumeutils import ( + native_code, + swapped_code, + make_dt_codes, + shape_zoom_affine, + array_from_file, + seek_tell, + apply_read_scaling, +) +from .arraywriters import make_array_writer, get_slope_inter, WriterError, ArrayWriter from .wrapstruct import LabeledWrapStruct -from .spatialimages import (HeaderDataError, HeaderTypeError, - SpatialImage) +from .spatialimages import HeaderDataError, HeaderTypeError, SpatialImage from .fileholders import copy_file_map from .batteryrunners import Report from .arrayproxy import ArrayProxy @@ -105,7 +109,7 @@ ('extents', 'i4'), ('session_error', 'i2'), ('regular', 'S1'), - ('hkey_un0', 'S1') + ('hkey_un0', 'S1'), ] image_dimension_dtd = [ ('dim', 'i2', (8,)), @@ -125,7 +129,7 @@ ('compressed', 'i4'), ('verified', 'i4'), ('glmax', 'i4'), - ('glmin', 'i4') + ('glmin', 'i4'), ] data_history_dtd = [ ('descrip', 'S80'), @@ -145,12 +149,11 @@ ('omax', 'i4'), ('omin', 'i4'), ('smax', 'i4'), - ('smin', 'i4') + ('smin', 'i4'), ] # Full header numpy dtype combined across sub-fields -header_dtype = np.dtype(header_key_dtd + image_dimension_dtd + - data_history_dtd) +header_dtype = np.dtype(header_key_dtd + image_dimension_dtd + data_history_dtd) _dtdefs = ( # code, conversion function, equivalent dtype, aliases (0, 'none', np.void), @@ -161,21 +164,21 @@ (16, 'float32', np.float32), (32, 'complex64', np.complex64), # numpy complex format? (64, 'float64', np.float64), - (128, 'RGB', np.dtype([('R', 'u1'), - ('G', 'u1'), - ('B', 'u1')])), - (255, 'all', np.void)) + (128, 'RGB', np.dtype([('R', 'u1'), ('G', 'u1'), ('B', 'u1')])), + (255, 'all', np.void), +) # Make full code alias bank, including dtype column data_type_codes = make_dt_codes(_dtdefs) class AnalyzeHeader(LabeledWrapStruct): - """ Class for basic analyze header + """Class for basic analyze header Implements zoom-only setting of affine transform, and no image scaling """ + # Copies of module-level definitions template_dtype = header_dtype _data_type_codes = data_type_codes @@ -190,11 +193,8 @@ class AnalyzeHeader(LabeledWrapStruct): sizeof_hdr = 348 - def __init__(self, - binaryblock=None, - endianness=None, - check=True): - """ Initialize header from binary data block + def __init__(self, binaryblock=None, endianness=None, check=True): + """Initialize header from binary data block Parameters ---------- @@ -252,7 +252,7 @@ def __init__(self, @classmethod def guessed_endian(klass, hdr): - """ Guess intended endianness from mapping-like ``hdr`` + """Guess intended endianness from mapping-like ``hdr`` Parameters ---------- @@ -335,8 +335,7 @@ def guessed_endian(klass, hdr): @classmethod def default_structarr(klass, endianness=None): - """ Return header data for empty header with given endianness - """ + """Return header data for empty header with given endianness""" hdr_data = super(AnalyzeHeader, klass).default_structarr(endianness) hdr_data['sizeof_hdr'] = klass.sizeof_hdr hdr_data['dim'] = 1 @@ -348,7 +347,7 @@ def default_structarr(klass, endianness=None): @classmethod def from_header(klass, header=None, check=True): - """ Class method to create header from another header + """Class method to create header from another header Parameters ---------- @@ -394,9 +393,11 @@ def from_header(klass, header=None, check=True): try: obj.set_data_dtype(orig_code) except HeaderDataError: - raise HeaderDataError(f"Input header {header.__class__} has " - f"datatype {header.get_value_label('datatype')} " - f"but output header {klass} does not support it") + raise HeaderDataError( + f'Input header {header.__class__} has ' + f"datatype {header.get_value_label('datatype')} " + f'but output header {klass} does not support it' + ) obj.set_data_dtype(header.get_data_dtype()) obj.set_data_shape(header.get_data_shape()) obj.set_zooms(header.get_zooms()) @@ -405,7 +406,7 @@ def from_header(klass, header=None, check=True): return obj def _clean_after_mapping(self): - """ Set format-specific stuff after converting header from mapping + """Set format-specific stuff after converting header from mapping This routine cleans up Analyze-type headers that have had their fields set from an Analyze map returned by the ``as_analyze_map`` method. @@ -426,7 +427,7 @@ def _clean_after_mapping(self): pass def raw_data_from_fileobj(self, fileobj): - """ Read unscaled data array from `fileobj` + """Read unscaled data array from `fileobj` Parameters ---------- @@ -444,7 +445,7 @@ def raw_data_from_fileobj(self, fileobj): return array_from_file(shape, dtype, fileobj, offset) def data_from_fileobj(self, fileobj): - """ Read scaled data array from `fileobj` + """Read scaled data array from `fileobj` Use this routine to get the scaled image data from an image file `fileobj`, given a header `self`. "Scaled" means, with any header @@ -478,7 +479,7 @@ def data_from_fileobj(self, fileobj): return apply_read_scaling(data, slope, inter) def data_to_fileobj(self, data, fileobj, rescale=True): - """ Write `data` to `fileobj`, maybe rescaling data, modifying `self` + """Write `data` to `fileobj`, maybe rescaling data, modifying `self` In writing the data, we match the header to the written data, by setting the header scaling factors, iff `rescale` is True. Thus we @@ -512,15 +513,13 @@ def data_to_fileobj(self, data, fileobj, rescale=True): data = np.asanyarray(data) shape = self.get_data_shape() if data.shape != shape: - raise HeaderDataError('Data should be shape (%s)' % - ', '.join(str(s) for s in shape)) + raise HeaderDataError('Data should be shape (%s)' % ', '.join(str(s) for s in shape)) out_dtype = self.get_data_dtype() if rescale: try: - arr_writer = make_array_writer(data, - out_dtype, - self.has_data_slope, - self.has_data_intercept) + arr_writer = make_array_writer( + data, out_dtype, self.has_data_slope, self.has_data_intercept + ) except WriterError as e: raise HeaderTypeError(str(e)) else: @@ -530,7 +529,7 @@ def data_to_fileobj(self, data, fileobj, rescale=True): self.set_slope_inter(*get_slope_inter(arr_writer)) def get_data_dtype(self): - """ Get numpy dtype for data + """Get numpy dtype for data For examples see ``set_data_dtype`` """ @@ -539,7 +538,7 @@ def get_data_dtype(self): return dtype.newbyteorder(self.endianness) def set_data_dtype(self, datatype): - """ Set numpy dtype for data from code or dtype or type + """Set numpy dtype for data from code or dtype or type Examples -------- @@ -568,22 +567,19 @@ def set_data_dtype(self, datatype): try: dt = np.dtype(dt) except TypeError: - raise HeaderDataError( - f'data dtype "{datatype}" not recognized') + raise HeaderDataError(f'data dtype "{datatype}" not recognized') if dt not in self._data_type_codes: - raise HeaderDataError( - f'data dtype "{datatype}" not supported') + raise HeaderDataError(f'data dtype "{datatype}" not supported') code = self._data_type_codes[dt] dtype = self._data_type_codes.dtype[code] # test for void, being careful of user-defined types if dtype.type is np.void and not dtype.fields: - raise HeaderDataError( - f'data dtype "{datatype}" known but not supported') + raise HeaderDataError(f'data dtype "{datatype}" known but not supported') self._structarr['datatype'] = code self._structarr['bitpix'] = dtype.itemsize * 8 def get_data_shape(self): - """ Get shape of data + """Get shape of data Examples -------- @@ -602,11 +598,11 @@ def get_data_shape(self): dims = self._structarr['dim'] ndims = dims[0] if ndims == 0: - return 0, - return tuple(int(d) for d in dims[1:ndims + 1]) + return (0,) + return tuple(int(d) for d in dims[1 : ndims + 1]) def set_data_shape(self, shape): - """ Set shape of data + """Set shape of data If ``ndims == len(shape)`` then we set zooms for dimensions higher than ``ndims`` to 1.0 @@ -621,20 +617,20 @@ def set_data_shape(self, shape): dims[:] = 1 dims[0] = ndims try: - dims[1:ndims + 1] = shape + dims[1 : ndims + 1] = shape except (ValueError, OverflowError): # numpy 1.4.1 at least generates a ValueError from trying to set a # python long into an int64 array (dims are int64 for nifti2) values_fit = False else: - values_fit = np.all(dims[1:ndims + 1] == shape) + values_fit = np.all(dims[1 : ndims + 1] == shape) # Error if we did not succeed setting dimensions if not values_fit: raise HeaderDataError(f'shape {shape} does not fit in dim datatype') - self._structarr['pixdim'][ndims + 1:] = 1.0 + self._structarr['pixdim'][ndims + 1 :] = 1.0 def get_base_affine(self): - """ Get affine from basic (shared) header fields + """Get affine from basic (shared) header fields Note that we get the translations from the center of the image. @@ -655,14 +651,14 @@ def get_base_affine(self): hdr = self._structarr dims = hdr['dim'] ndim = dims[0] - return shape_zoom_affine(hdr['dim'][1:ndim + 1], - hdr['pixdim'][1:ndim + 1], - self.default_x_flip) + return shape_zoom_affine( + hdr['dim'][1 : ndim + 1], hdr['pixdim'][1 : ndim + 1], self.default_x_flip + ) get_best_affine = get_base_affine def get_zooms(self): - """ Get zooms from header + """Get zooms from header Returns ------- @@ -687,10 +683,10 @@ def get_zooms(self): if ndim == 0: return (1.0,) pixdims = hdr['pixdim'] - return tuple(pixdims[1:ndim + 1]) + return tuple(pixdims[1 : ndim + 1]) def set_zooms(self, zooms): - """ Set zooms into header fields + """Set zooms into header fields See docstring for ``get_zooms`` for examples """ @@ -699,15 +695,14 @@ def set_zooms(self, zooms): ndim = dims[0] zooms = np.asarray(zooms) if len(zooms) != ndim: - raise HeaderDataError('Expecting %d zoom values for ndim %d' - % (ndim, ndim)) + raise HeaderDataError('Expecting %d zoom values for ndim %d' % (ndim, ndim)) if np.any(zooms < 0): raise HeaderDataError('zooms must be positive') pixdims = hdr['pixdim'] - pixdims[1:ndim + 1] = zooms[:] + pixdims[1 : ndim + 1] = zooms[:] def as_analyze_map(self): - """ Return header as mapping for conversion to Analyze types + """Return header as mapping for conversion to Analyze types Collect data from custom header type to fill in fields for Analyze and derived header types (such as Nifti1 and Nifti2). @@ -746,12 +741,11 @@ def as_analyze_map(self): return self def set_data_offset(self, offset): - """ Set offset into data file to read data - """ + """Set offset into data file to read data""" self._structarr['vox_offset'] = offset def get_data_offset(self): - """ Return offset into data file to read data + """Return offset into data file to read data Examples -------- @@ -765,14 +759,14 @@ def get_data_offset(self): return int(self._structarr['vox_offset']) def get_slope_inter(self): - """ Get scalefactor and intercept + """Get scalefactor and intercept These are not implemented for basic Analyze """ return None, None def set_slope_inter(self, slope, inter=None): - """ Set slope and / or intercept into header + """Set slope and / or intercept into header Set slope and intercept for image data, such that, if the image data is ``arr``, then the scaled image data will be ``(arr * @@ -789,19 +783,14 @@ def set_slope_inter(self, slope, inter=None): inter : None or float, optional If float, value must be 0.0 or we raise a ``HeaderTypeError`` """ - if ((slope in (None, 1) or np.isnan(slope)) and - (inter in (None, 0) or np.isnan(inter))): + if (slope in (None, 1) or np.isnan(slope)) and (inter in (None, 0) or np.isnan(inter)): return - raise HeaderTypeError('Cannot set slope != 1 or intercept != 0 ' - 'for Analyze headers') + raise HeaderTypeError('Cannot set slope != 1 or intercept != 0 ' 'for Analyze headers') @classmethod def _get_checks(klass): - """ Return sequence of check functions for this class """ - return (klass._chk_sizeof_hdr, - klass._chk_datatype, - klass._chk_bitpix, - klass._chk_pixdims) + """Return sequence of check functions for this class""" + return (klass._chk_sizeof_hdr, klass._chk_datatype, klass._chk_bitpix, klass._chk_pixdims) """ Check functions in format expected by BatteryRunner class """ @@ -893,15 +882,16 @@ def may_contain_header(klass, binaryblock): if len(binaryblock) < klass.sizeof_hdr: return False - hdr_struct = np.ndarray(shape=(), dtype=header_dtype, - buffer=binaryblock[:klass.sizeof_hdr]) + hdr_struct = np.ndarray( + shape=(), dtype=header_dtype, buffer=binaryblock[: klass.sizeof_hdr] + ) bs_hdr_struct = hdr_struct.byteswap() return 348 in (hdr_struct['sizeof_hdr'], bs_hdr_struct['sizeof_hdr']) class AnalyzeImage(SpatialImage): - """ Class for basic Analyze format image - """ + """Class for basic Analyze format image""" + header_class = AnalyzeHeader _meta_sniff_len = header_class.sizeof_hdr files_types = (('image', '.img'), ('header', '.hdr')) @@ -913,16 +903,15 @@ class AnalyzeImage(SpatialImage): ImageArrayProxy = ArrayProxy - def __init__(self, dataobj, affine, header=None, - extra=None, file_map=None, dtype=None): - super(AnalyzeImage, self).__init__( - dataobj, affine, header, extra, file_map) + def __init__(self, dataobj, affine, header=None, extra=None, file_map=None, dtype=None): + super(AnalyzeImage, self).__init__(dataobj, affine, header, extra, file_map) # Reset consumable values self._header.set_data_offset(0) self._header.set_slope_inter(None, None) if dtype is not None: self.set_data_dtype(dtype) + __init__.__doc__ = SpatialImage.__init__.__doc__ def get_data_dtype(self): @@ -933,7 +922,7 @@ def set_data_dtype(self, dtype): @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - """ Class method to create image from mapping in ``file_map`` + """Class method to create image from mapping in ``file_map`` Parameters ---------- @@ -971,20 +960,21 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): imgf = img_fh.fileobj if imgf is None: imgf = img_fh.filename - data = klass.ImageArrayProxy(imgf, hdr_copy, mmap=mmap, - keep_file_open=keep_file_open) + data = klass.ImageArrayProxy(imgf, hdr_copy, mmap=mmap, keep_file_open=keep_file_open) # Initialize without affine to allow header to pass through unmodified img = klass(data, None, header, file_map=file_map) # set affine from header though img._affine = header.get_best_affine() - img._load_cache = {'header': hdr_copy, - 'affine': img._affine.copy(), - 'file_map': copy_file_map(file_map)} + img._load_cache = { + 'header': hdr_copy, + 'affine': img._affine.copy(), + 'file_map': copy_file_map(file_map), + } return img @staticmethod def _get_fileholders(file_map): - """ Return fileholder for header and image + """Return fileholder for header and image Allows single-file image types to return one fileholder for both types. For Analyze there are two fileholders, one for the header, one for the @@ -993,7 +983,7 @@ def _get_fileholders(file_map): return file_map['header'], file_map['image'] def to_file_map(self, file_map=None, dtype=None): - """ Write image to `file_map` or contained ``self.file_map`` + """Write image to `file_map` or contained ``self.file_map`` Parameters ---------- @@ -1022,10 +1012,9 @@ def to_file_map(self, file_map=None, dtype=None): scale_me = np.all(np.isnan((slope, inter))) try: if scale_me: - arr_writer = make_array_writer(data, - out_dtype, - hdr.has_data_slope, - hdr.has_data_intercept) + arr_writer = make_array_writer( + data, out_dtype, hdr.has_data_slope, hdr.has_data_intercept + ) else: arr_writer = ArrayWriter(data, out_dtype, check_scaling=False) except WriterError: diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index dc9b171c0b..bb97b8efb0 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Array proxy base class +"""Array proxy base class The proxy API is - at minimum: @@ -55,7 +55,7 @@ class ArrayProxy: - """ Class to act as proxy for the array that can be read from a file + """Class to act as proxy for the array that can be read from a file The array proxy allows us to freeze the passed fileobj and header such that it returns the expected data array. @@ -84,6 +84,7 @@ class ArrayProxy: See :mod:`nibabel.minc1`, :mod:`nibabel.ecat` and :mod:`nibabel.parrec` for examples. """ + _default_order = 'F' def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=None): @@ -138,25 +139,30 @@ def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=Non self.file_like = file_like if hasattr(spec, 'get_data_shape'): slope, inter = spec.get_slope_inter() - par = (spec.get_data_shape(), - spec.get_data_dtype(), - spec.get_data_offset(), - 1. if slope is None else slope, - 0. if inter is None else inter) + par = ( + spec.get_data_shape(), + spec.get_data_dtype(), + spec.get_data_offset(), + 1.0 if slope is None else slope, + 0.0 if inter is None else inter, + ) elif 2 <= len(spec) <= 5: - optional = (0, 1., 0.) - par = spec + optional[len(spec) - 2:] + optional = (0, 1.0, 0.0) + par = spec + optional[len(spec) - 2 :] else: raise TypeError('spec must be tuple of length 2-5 or header object') # Warn downstream users that the class variable order is going away if hasattr(self.__class__, 'order'): - warnings.warn(f'Class {self.__class__} has an `order` class variable. ' - 'ArrayProxy subclasses should rename this variable to `_default_order` ' - 'to avoid conflict with instance variables.\n' - '* deprecated in version: 5.0\n' - '* will raise error in version: 7.0\n', - DeprecationWarning, stacklevel=2) + warnings.warn( + f'Class {self.__class__} has an `order` class variable. ' + 'ArrayProxy subclasses should rename this variable to `_default_order` ' + 'to avoid conflict with instance variables.\n' + '* deprecated in version: 5.0\n' + '* will raise error in version: 7.0\n', + DeprecationWarning, + stacklevel=2, + ) # Override _default_order with order, to follow intent of subclasser self._default_order = self.order @@ -170,8 +176,9 @@ def __init__(self, file_like, spec, *, mmap=True, order=None, keep_file_open=Non self.order = order # Flags to keep track of whether a single ImageOpener is created, and # whether a single underlying file handle is created. - self._keep_file_open, self._persist_opener = \ - self._should_keep_file_open(file_like, keep_file_open) + self._keep_file_open, self._persist_opener = self._should_keep_file_open( + file_like, keep_file_open + ) self._lock = RLock() def __del__(self): @@ -183,13 +190,13 @@ def __del__(self): self._opener = None def __getstate__(self): - """Returns the state of this ``ArrayProxy`` during pickling. """ + """Returns the state of this ``ArrayProxy`` during pickling.""" state = self.__dict__.copy() state.pop('_lock', None) return state def __setstate__(self, state): - """Sets the state of this ``ArrayProxy`` during unpickling. """ + """Sets the state of this ``ArrayProxy`` during unpickling.""" self.__dict__.update(state) self._lock = RLock() @@ -260,8 +267,10 @@ def _should_keep_file_open(self, file_like, keep_file_open): if keep_file_open is None: keep_file_open = KEEP_FILE_OPEN_DEFAULT if keep_file_open not in (True, False): - raise ValueError("nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT " - f"must be boolean. Found: {keep_file_open}") + raise ValueError( + 'nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT ' + f'must be boolean. Found: {keep_file_open}' + ) elif keep_file_open not in (True, False): raise ValueError('keep_file_open must be one of {None, True, False}') @@ -317,32 +326,35 @@ def _get_fileobj(self): """ if self._persist_opener: if not hasattr(self, '_opener'): - self._opener = openers.ImageOpener( - self.file_like, keep_open=self._keep_file_open) + self._opener = openers.ImageOpener(self.file_like, keep_open=self._keep_file_open) yield self._opener else: - with openers.ImageOpener( - self.file_like, keep_open=False) as opener: + with openers.ImageOpener(self.file_like, keep_open=False) as opener: yield opener def _get_unscaled(self, slicer): - if canonical_slicers(slicer, self._shape, False) == \ - canonical_slicers((), self._shape, False): + if canonical_slicers(slicer, self._shape, False) == canonical_slicers( + (), self._shape, False + ): with self._get_fileobj() as fileobj, self._lock: - return array_from_file(self._shape, - self._dtype, - fileobj, - offset=self._offset, - order=self.order, - mmap=self._mmap) + return array_from_file( + self._shape, + self._dtype, + fileobj, + offset=self._offset, + order=self.order, + mmap=self._mmap, + ) with self._get_fileobj() as fileobj: - return fileslice(fileobj, - slicer, - self._shape, - self._dtype, - self._offset, - order=self.order, - lock=self._lock) + return fileslice( + fileobj, + slicer, + self._shape, + self._dtype, + self._offset, + order=self.order, + lock=self._lock, + ) def _get_scaled(self, dtype, slicer): # Ensure scale factors have dtypes @@ -361,14 +373,14 @@ def _get_scaled(self, dtype, slicer): return scaled def get_unscaled(self): - """ Read data from file + """Read data from file This is an optional part of the proxy API """ return self._get_unscaled(slicer=()) def __array__(self, dtype=None): - """ Read data from file and apply scaling, casting to ``dtype`` + """Read data from file and apply scaling, casting to ``dtype`` If ``dtype`` is unspecified, the dtype of the returned array is the narrowest dtype that can represent the data without overflow. @@ -397,31 +409,32 @@ def __getitem__(self, slicer): return self._get_scaled(dtype=None, slicer=slicer) def reshape(self, shape): - """ Return an ArrayProxy with a new shape, without modifying data """ + """Return an ArrayProxy with a new shape, without modifying data""" size = np.prod(self._shape) # Calculate new shape if not fully specified from operator import mul from functools import reduce + n_unknowns = len([e for e in shape if e == -1]) if n_unknowns > 1: - raise ValueError("can only specify one unknown dimension") + raise ValueError('can only specify one unknown dimension') elif n_unknowns == 1: known_size = reduce(mul, shape, -1) unknown_size = size // known_size shape = tuple(unknown_size if e == -1 else e for e in shape) if np.prod(shape) != size: - raise ValueError(f"cannot reshape array of size {size:d} into shape {shape!s}") - return self.__class__(file_like=self.file_like, - spec=(shape, self._dtype, self._offset, - self._slope, self._inter), - mmap=self._mmap) + raise ValueError(f'cannot reshape array of size {size:d} into shape {shape!s}') + return self.__class__( + file_like=self.file_like, + spec=(shape, self._dtype, self._offset, self._slope, self._inter), + mmap=self._mmap, + ) def is_proxy(obj): - """ Return True if `obj` is an array proxy - """ + """Return True if `obj` is an array proxy""" try: return obj.is_proxy except AttributeError: @@ -429,19 +442,17 @@ def is_proxy(obj): def reshape_dataobj(obj, shape): - """ Use `obj` reshape method if possible, else numpy reshape function - """ - return (obj.reshape(shape) if hasattr(obj, 'reshape') - else np.reshape(obj, shape)) + """Use `obj` reshape method if possible, else numpy reshape function""" + return obj.reshape(shape) if hasattr(obj, 'reshape') else np.reshape(obj, shape) def get_obj_dtype(obj): - """ Get the effective dtype of an array-like object """ + """Get the effective dtype of an array-like object""" if is_proxy(obj): # Read and potentially apply scaling to one value idx = (0,) * len(obj.shape) return obj[idx].dtype - elif hasattr(obj, "dtype"): + elif hasattr(obj, 'dtype'): # Trust the dtype (probably an ndarray) return obj.dtype else: diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index c2bbb2912c..1a80bcfa98 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -1,4 +1,4 @@ -""" Array writer objects +"""Array writer objects Array writers have init signature:: @@ -31,8 +31,15 @@ def __init__(self, array, out_dtype=None) import numpy as np -from .casting import (int_to_float, as_int, int_abs, type_info, floor_exact, - best_float, shared_range) +from .casting import ( + int_to_float, + as_int, + int_abs, + type_info, + floor_exact, + best_float, + shared_range, +) from .volumeutils import finite_range, array_to_file @@ -45,9 +52,8 @@ class ScalingError(WriterError): class ArrayWriter: - def __init__(self, array, out_dtype=None, **kwargs): - r""" Initialize array writer + r"""Initialize array writer Parameters ---------- @@ -92,10 +98,10 @@ def __init__(self, array, out_dtype=None, **kwargs): self._has_nan = None self._nan2zero = nan2zero if check_scaling and self.scaling_needed(): - raise WriterError("Scaling needed but cannot scale") + raise WriterError('Scaling needed but cannot scale') def scaling_needed(self): - """ Checks if scaling is needed for input array + """Checks if scaling is needed for input array Raises WriterError if no scaling possible. @@ -155,18 +161,17 @@ def scaling_needed(self): @property def array(self): - """ Return array from arraywriter """ + """Return array from arraywriter""" return self._array @property def out_dtype(self): - """ Return `out_dtype` from arraywriter """ + """Return `out_dtype` from arraywriter""" return self._out_dtype @property def has_nan(self): - """ True if array has NaNs - """ + """True if array has NaNs""" # Structured types raise an error for finite range; don't run finite # range unless we have to. if self._has_nan is None: @@ -177,7 +182,7 @@ def has_nan(self): return self._has_nan def finite_range(self): - """ Return (maybe cached) finite range of data array """ + """Return (maybe cached) finite range of data array""" if self._finite_range is None: mn, mx, has_nan = finite_range(self._array, True) self._finite_range = (mn, mx) @@ -185,14 +190,16 @@ def finite_range(self): return self._finite_range def _needs_nan2zero(self): - """ True if nan2zero check needed for writing array """ - return (self._nan2zero and - self._array.dtype.kind in 'fc' and - self.out_dtype.kind in 'iu' and - self.has_nan) + """True if nan2zero check needed for writing array""" + return ( + self._nan2zero + and self._array.dtype.kind in 'fc' + and self.out_dtype.kind in 'iu' + and self.has_nan + ) def to_fileobj(self, fileobj, order='F'): - """ Write array into `fileobj` + """Write array into `fileobj` Parameters ---------- @@ -200,18 +207,20 @@ def to_fileobj(self, fileobj, order='F'): order : {'F', 'C'} order (Fortran or C) to which to write array """ - array_to_file(self._array, - fileobj, - self._out_dtype, - offset=None, - mn=None, - mx=None, - order=order, - nan2zero=self._needs_nan2zero()) + array_to_file( + self._array, + fileobj, + self._out_dtype, + offset=None, + mn=None, + mx=None, + order=order, + nan2zero=self._needs_nan2zero(), + ) class SlopeArrayWriter(ArrayWriter): - """ ArrayWriter that can use scalefactor for writing arrays + """ArrayWriter that can use scalefactor for writing arrays The scalefactor allows the array writer to write floats to int output types, and rescale larger ints to smaller. It can therefore lose @@ -227,9 +236,8 @@ class SlopeArrayWriter(ArrayWriter): * calc_scale() - calculate slope to best write self.array """ - def __init__(self, array, out_dtype=None, calc_scale=True, - scaler_dtype=np.float32, **kwargs): - r""" Initialize array writer + def __init__(self, array, out_dtype=None, calc_scale=True, scaler_dtype=np.float32, **kwargs): + r"""Initialize array writer Parameters ---------- @@ -286,7 +294,7 @@ def __init__(self, array, out_dtype=None, calc_scale=True, self.calc_scale() def scaling_needed(self): - """ Checks if scaling is needed for input array + """Checks if scaling is needed for input array Raises WriterError if no scaling possible. @@ -312,7 +320,7 @@ def scaling_needed(self): return (mn, mx) != (np.inf, -np.inf) def reset(self): - """ Set object to values before any scaling calculation """ + """Set object to values before any scaling calculation""" self.slope = 1.0 self._finite_range = None self._scale_calced = False @@ -322,11 +330,11 @@ def _get_slope(self): def _set_slope(self, val): self._slope = np.squeeze(self.scaler_dtype.type(val)) + slope = property(_get_slope, _set_slope, None, 'get/set slope') def calc_scale(self, force=False): - """ Calculate / set scaling for floats/(u)ints to (u)ints - """ + """Calculate / set scaling for floats/(u)ints to (u)ints""" # If we've run already, return unless told otherwise if not force and self._scale_calced: return @@ -337,7 +345,7 @@ def calc_scale(self, force=False): self._scale_calced = True def _writing_range(self): - """ Finite range for thresholding on write """ + """Finite range for thresholding on write""" if self._out_dtype.kind in 'iu' and self._array.dtype.kind == 'f': mn, mx = self.finite_range() if (mn, mx) == (np.inf, -np.inf): # no finite data @@ -346,7 +354,7 @@ def _writing_range(self): return None, None def to_fileobj(self, fileobj, order='F'): - """ Write array into `fileobj` + """Write array into `fileobj` Parameters ---------- @@ -355,15 +363,17 @@ def to_fileobj(self, fileobj, order='F'): order (Fortran or C) to which to write array """ mn, mx = self._writing_range() - array_to_file(self._array, - fileobj, - self._out_dtype, - offset=None, - divslope=self.slope, - mn=mn, - mx=mx, - order=order, - nan2zero=self._needs_nan2zero()) + array_to_file( + self._array, + fileobj, + self._out_dtype, + offset=None, + divslope=self.slope, + mn=mn, + mx=mx, + order=order, + nan2zero=self._needs_nan2zero(), + ) def _do_scaling(self): arr = self._array @@ -383,7 +393,7 @@ def _do_scaling(self): out_max, out_min = info.max, info.min # If left as int64, uint64, comparisons will default to floats, and # these are inexact for > 2**53 - so convert to int - if (as_int(mx) <= as_int(out_max) and as_int(mn) >= as_int(out_min)): + if as_int(mx) <= as_int(out_max) and as_int(mn) >= as_int(out_min): # already in range return # (u)int to (u)int scaling @@ -408,7 +418,7 @@ def _iu2iu(self): self._range_scale(mn, mx) def _range_scale(self, in_min, in_max): - """ Calculate scaling based on data range and output type """ + """Calculate scaling based on data range and output type""" out_dtype = self._out_dtype info = type_info(out_dtype) out_min, out_max = info['min'], info['max'] @@ -418,12 +428,12 @@ def _range_scale(self, in_min, in_max): # not lose precision because min/max are of fp type. out_min, out_max = np.array((out_min, out_max), dtype=big_float) else: # (u)int - out_min, out_max = [int_to_float(v, big_float) - for v in (out_min, out_max)] + out_min, out_max = [int_to_float(v, big_float) for v in (out_min, out_max)] if self._out_dtype.kind == 'u': if in_min < 0 and in_max > 0: - raise WriterError('Cannot scale negative and positive ' - 'numbers to uint without intercept') + raise WriterError( + 'Cannot scale negative and positive ' 'numbers to uint without intercept' + ) if in_max <= 0: # All input numbers <= 0 self.slope = in_min / out_max else: # All input numbers > 0 @@ -438,7 +448,7 @@ def _range_scale(self, in_min, in_max): class SlopeInterArrayWriter(SlopeArrayWriter): - """ Array writer that can use slope and intercept to scale array + """Array writer that can use slope and intercept to scale array The writer can subtract an intercept, and divided by a slope, in order to be able to convert floating point values into a (u)int range, or to convert @@ -455,9 +465,8 @@ class SlopeInterArrayWriter(SlopeArrayWriter): * calc_scale() - calculate inter, slope to best write self.array """ - def __init__(self, array, out_dtype=None, calc_scale=True, - scaler_dtype=np.float32, **kwargs): - r""" Initialize array writer + def __init__(self, array, out_dtype=None, calc_scale=True, scaler_dtype=np.float32, **kwargs): + r"""Initialize array writer Parameters ---------- @@ -498,14 +507,12 @@ def __init__(self, array, out_dtype=None, calc_scale=True, >>> (aw.slope, aw.inter) == (1.0, 128) True """ - super(SlopeInterArrayWriter, self).__init__(array, - out_dtype, - calc_scale, - scaler_dtype, - **kwargs) + super(SlopeInterArrayWriter, self).__init__( + array, out_dtype, calc_scale, scaler_dtype, **kwargs + ) def reset(self): - """ Set object to values before any scaling calculation """ + """Set object to values before any scaling calculation""" super(SlopeInterArrayWriter, self).reset() self.inter = 0.0 @@ -514,10 +521,11 @@ def _get_inter(self): def _set_inter(self, val): self._inter = np.squeeze(self.scaler_dtype.type(val)) + inter = property(_get_inter, _set_inter, None, 'get/set inter') def to_fileobj(self, fileobj, order='F'): - """ Write array into `fileobj` + """Write array into `fileobj` Parameters ---------- @@ -526,16 +534,18 @@ def to_fileobj(self, fileobj, order='F'): order (Fortran or C) to which to write array """ mn, mx = self._writing_range() - array_to_file(self._array, - fileobj, - self._out_dtype, - offset=None, - intercept=self.inter, - divslope=self.slope, - mn=mn, - mx=mx, - order=order, - nan2zero=self._needs_nan2zero()) + array_to_file( + self._array, + fileobj, + self._out_dtype, + offset=None, + intercept=self.inter, + divslope=self.slope, + mn=mn, + mx=mx, + order=order, + nan2zero=self._needs_nan2zero(), + ) def _iu2iu(self): # (u)int to (u)int @@ -546,8 +556,7 @@ def _iu2iu(self): # Options in this method are scaling using intercept only. These will # have to pass through ``self.scaler_dtype`` (because the intercept is # in this type). - o_min, o_max = [as_int(v) - for v in shared_range(self.scaler_dtype, out_dtype)] + o_min, o_max = [as_int(v) for v in shared_range(self.scaler_dtype, out_dtype)] type_range = o_max - o_min mn2mx = mx - mn if mn2mx <= type_range: # might offset be enough? @@ -573,10 +582,9 @@ def _iu2iu(self): super(SlopeInterArrayWriter, self)._iu2iu() def _range_scale(self, in_min, in_max): - """ Calculate scaling, intercept based on data range and output type - """ + """Calculate scaling, intercept based on data range and output type""" if in_max == in_min: # Only one number in array - self.slope = 1. + self.slope = 1.0 self.inter = in_min return big_float = best_float() @@ -596,8 +604,7 @@ def _range_scale(self, in_min, in_max): in_min, in_max = as_int(in_min), as_int(in_max) in_range = int_to_float(in_max - in_min, big_float) # Cast to float for later processing. - in_min, in_max = [int_to_float(v, big_float) - for v in (in_min, in_max)] + in_min, in_max = [int_to_float(v, big_float) for v in (in_min, in_max)] if out_dtype.kind == 'f': # Type range, these are also floats info = type_info(out_dtype) @@ -676,7 +683,7 @@ def _range_scale(self, in_min, in_max): self.inter = inter self.slope = slope if not np.all(np.isfinite([self.slope, self.inter])): - raise ScalingError("Slope / inter not both finite") + raise ScalingError('Slope / inter not both finite') # Check nan fill value if not (0 in (in_min, in_max) and self._nan2zero and self.has_nan): return @@ -691,7 +698,7 @@ def _range_scale(self, in_min, in_max): def get_slope_inter(writer): - """ Return slope, intercept from array writer object + """Return slope, intercept from array writer object Parameters ---------- @@ -725,9 +732,8 @@ def get_slope_inter(writer): return slope, inter -def make_array_writer(data, out_type, has_slope=True, has_intercept=True, - **kwargs): - r""" Make array writer instance for array `data` and output type `out_type` +def make_array_writer(data, out_type, has_slope=True, has_intercept=True, **kwargs): + r"""Make array writer instance for array `data` and output type `out_type` Parameters ---------- diff --git a/nibabel/batteryrunners.py b/nibabel/batteryrunners.py index a860ba3778..50650b1647 100644 --- a/nibabel/batteryrunners.py +++ b/nibabel/batteryrunners.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Battery runner classes and Report classes +"""Battery runner classes and Report classes These classes / objects are for generic checking / fixing batteries @@ -104,15 +104,14 @@ def chk_pixdims(hdr, fix=True): hdr['pixdim'][1:4] = np.abs(hdr['pixdim'][1:4]) rep.fix_msg = 'setting to abs of pixdim values' return hdr, rep - """ class BatteryRunner: - """ Class to run set of checks """ + """Class to run set of checks""" def __init__(self, checks): - """ Initialize instance from sequence of `checks` + """Initialize instance from sequence of `checks` Parameters ---------- @@ -130,7 +129,7 @@ def __init__(self, checks): self._checks = checks def check_only(self, obj): - """ Run checks on `obj` returning reports + """Run checks on `obj` returning reports Parameters ---------- @@ -150,7 +149,7 @@ def check_only(self, obj): return reports def check_fix(self, obj): - """ Run checks, with fixes, on `obj` returning `obj`, reports + """Run checks, with fixes, on `obj` returning `obj`, reports Parameters ---------- @@ -175,13 +174,8 @@ def __len__(self): class Report: - - def __init__(self, - error=Exception, - problem_level=0, - problem_msg='', - fix_msg=''): - """ Initialize report with values + def __init__(self, error=Exception, problem_level=0, problem_msg='', fix_msg=''): + """Initialize report with values Parameters ---------- @@ -214,7 +208,7 @@ def __init__(self, self.fix_msg = fix_msg def __getstate__(self): - """ State that defines object + """State that defines object Returns ------- @@ -223,7 +217,7 @@ def __getstate__(self): return self.error, self.problem_level, self.problem_msg, self.fix_msg def __eq__(self, other): - """ are two BatteryRunner-like objects equal? + """are two BatteryRunner-like objects equal? Parameters ---------- @@ -243,26 +237,25 @@ def __eq__(self, other): return self.__getstate__() == other.__getstate__() def __ne__(self, other): - """ are two BatteryRunner-like objects not equal? + """are two BatteryRunner-like objects not equal? See docstring for __eq__ """ return not self == other def __str__(self): - """ Printable string for object """ + """Printable string for object""" return self.__dict__.__str__() @property def message(self): - """ formatted message string, including fix message if present - """ + """formatted message string, including fix message if present""" if self.fix_msg: return '; '.join((self.problem_msg, self.fix_msg)) return self.problem_msg def log_raise(self, logger, error_level=40): - """ Log problem, raise error if problem >= `error_level` + """Log problem, raise error if problem >= `error_level` Parameters ---------- @@ -277,7 +270,7 @@ def log_raise(self, logger, error_level=40): raise self.error(self.problem_msg) def write_raise(self, stream, error_level=40, log_level=30): - """ Write report to `stream` + """Write report to `stream` Parameters ---------- diff --git a/nibabel/benchmarks/bench_array_to_file.py b/nibabel/benchmarks/bench_array_to_file.py index ee0d25044d..7b59fbcaec 100644 --- a/nibabel/benchmarks/bench_array_to_file.py +++ b/nibabel/benchmarks/bench_array_to_file.py @@ -1,4 +1,4 @@ -""" Benchmarks for array_to_file routine +"""Benchmarks for array_to_file routine Run benchmarks with:: @@ -28,7 +28,7 @@ def bench_array_to_file(): img_shape = (128, 128, 64, 10) arr = rng.normal(size=img_shape) sys.stdout.flush() - print_git_title("\nArray to file") + print_git_title('\nArray to file') mtime = measure('array_to_file(arr, BytesIO(), np.float32)', repeat) print('%30s %6.2f' % ('Save float64 to float32', mtime)) mtime = measure('array_to_file(arr, BytesIO(), np.int16)', repeat) diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index fb037eec29..71ea801756 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -124,7 +124,7 @@ def fmt_sliceobj(sliceobj): results = [] # We use the same random seed for each slice object, - seeds = [np.random.randint(0, 2 ** 32) for s in SLICEOBJS] + seeds = [np.random.randint(0, 2**32) for s in SLICEOBJS] for ti, test in enumerate(tests): @@ -144,8 +144,7 @@ def basefunc(): img.dataobj[fix_sliceobj(sliceobj)] def testfunc(): - with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', - have_igzip): + with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', have_igzip): imggz.dataobj[fix_sliceobj(sliceobj)] # make sure nothing is floating around from the previous test @@ -167,8 +166,7 @@ def testfunc(): np.random.seed(seed) basetime = float(timeit(basefunc, number=NITERS)) / float(NITERS) - results.append((label, keep_open, sliceobj, testtime, basetime, - testmem, basemem)) + results.append((label, keep_open, sliceobj, testtime, basetime, testmem, basemem)) data = np.zeros((len(results), 4)) data[:, 0] = [r[3] for r in results] diff --git a/nibabel/benchmarks/bench_fileslice.py b/nibabel/benchmarks/bench_fileslice.py index 1c531f9113..59b6aa9314 100644 --- a/nibabel/benchmarks/bench_fileslice.py +++ b/nibabel/benchmarks/bench_fileslice.py @@ -1,4 +1,4 @@ -""" Benchmarks for fileslicing +"""Benchmarks for fileslicing import nibabel as nib nib.bench() @@ -22,20 +22,13 @@ SHAPE = (64, 64, 32, 100) ROW_NAMES = [f'axis {i}, len {dim}' for i, dim in enumerate(SHAPE)] -COL_NAMES = ['mid int', - 'step 1', - 'half step 1', - 'step mid int'] -HAVE_ZSTD = optional_package("pyzstd")[1] +COL_NAMES = ['mid int', 'step 1', 'half step 1', 'step mid int'] +HAVE_ZSTD = optional_package('pyzstd')[1] def _slices_for_len(L): # Example slices for a dimension of length L - return ( - L // 2, - slice(None, None, 1), - slice(None, L // 2, 1), - slice(None, None, L // 2)) + return (L // 2, slice(None, None, 1), slice(None, L // 2, 1), slice(None, None, L // 2)) def run_slices(file_like, repeat=3, offset=0, order='F'): @@ -53,63 +46,48 @@ def run_slices(file_like, repeat=3, offset=0, order='F'): sliceobj[i] = slicer def f(): - fileslice(fobj, - tuple(sliceobj), - arr.shape, - arr.dtype, - offset, - order) + fileslice(fobj, tuple(sliceobj), arr.shape, arr.dtype, offset, order) + times_arr[i, j] = timeit(f, number=repeat) def g(): fobj.seek(offset) data = fobj.read() np.ndarray(SHAPE, arr.dtype, buffer=data, order=order) + base_time = timeit(g, number=repeat) return times_arr, base_time -def bench_fileslice(bytes=True, - file_=True, - gz=True, - bz2=False, - zst=True): +def bench_fileslice(bytes=True, file_=True, gz=True, bz2=False, zst=True): sys.stdout.flush() repeat = 2 def my_table(title, times, base): print() - print(rst_table(times, ROW_NAMES, COL_NAMES, title, - val_fmt='{0[0]:3.2f} ({0[1]:3.2f})')) + print(rst_table(times, ROW_NAMES, COL_NAMES, title, val_fmt='{0[0]:3.2f} ({0[1]:3.2f})')) print(f'Base time: {base:3.2f}') + if bytes: fobj = BytesIO() times, base = run_slices(fobj, repeat) - my_table('Bytes slice - raw (ratio)', - np.dstack((times, times / base)), - base) + my_table('Bytes slice - raw (ratio)', np.dstack((times, times / base)), base) if file_: with InTemporaryDirectory(): file_times, file_base = run_slices('data.bin', repeat) - my_table('File slice - raw (ratio)', - np.dstack((file_times, file_times / file_base)), - file_base) + my_table( + 'File slice - raw (ratio)', np.dstack((file_times, file_times / file_base)), file_base + ) if gz: with InTemporaryDirectory(): gz_times, gz_base = run_slices('data.gz', repeat) - my_table('gz slice - raw (ratio)', - np.dstack((gz_times, gz_times / gz_base)), - gz_base) + my_table('gz slice - raw (ratio)', np.dstack((gz_times, gz_times / gz_base)), gz_base) if bz2: with InTemporaryDirectory(): bz2_times, bz2_base = run_slices('data.bz2', repeat) - my_table('bz2 slice - raw (ratio)', - np.dstack((bz2_times, bz2_times / bz2_base)), - bz2_base) + my_table('bz2 slice - raw (ratio)', np.dstack((bz2_times, bz2_times / bz2_base)), bz2_base) if zst and HAVE_ZSTD: with InTemporaryDirectory(): zst_times, zst_base = run_slices('data.zst', repeat) - my_table('zst slice - raw (ratio)', - np.dstack((zst_times, zst_times / zst_base)), - zst_base) + my_table('zst slice - raw (ratio)', np.dstack((zst_times, zst_times / zst_base)), zst_base) sys.stdout.flush() diff --git a/nibabel/benchmarks/bench_finite_range.py b/nibabel/benchmarks/bench_finite_range.py index 1ca2bf95d0..0a6ff576fa 100644 --- a/nibabel/benchmarks/bench_finite_range.py +++ b/nibabel/benchmarks/bench_finite_range.py @@ -1,4 +1,4 @@ -""" Benchmarks for finite_range routine +"""Benchmarks for finite_range routine Run benchmarks with:: @@ -28,7 +28,7 @@ def bench_finite_range(): img_shape = (128, 128, 64, 10) arr = rng.normal(size=img_shape) sys.stdout.flush() - print_git_title("\nFinite range") + print_git_title('\nFinite range') mtime = measure('finite_range(arr)', repeat) print('%30s %6.2f' % ('float64 all finite', mtime)) arr[:, :, :, 1] = np.nan diff --git a/nibabel/benchmarks/bench_load_save.py b/nibabel/benchmarks/bench_load_save.py index 46118df43e..d9c6461959 100644 --- a/nibabel/benchmarks/bench_load_save.py +++ b/nibabel/benchmarks/bench_load_save.py @@ -1,4 +1,4 @@ -""" Benchmarks for load and save of image arrays +"""Benchmarks for load and save of image arrays Run benchmarks with:: @@ -34,7 +34,7 @@ def bench_load_save(): hdr = img.header sys.stdout.flush() print() - print_git_title("Image load save") + print_git_title('Image load save') hdr.set_data_dtype(np.float32) mtime = measure('sio.truncate(0); img.to_file_map()', repeat) print('%30s %6.2f' % ('Save float64 to float32', mtime)) diff --git a/nibabel/benchmarks/butils.py b/nibabel/benchmarks/butils.py index 4cc521ab66..01d6931eba 100644 --- a/nibabel/benchmarks/butils.py +++ b/nibabel/benchmarks/butils.py @@ -1,12 +1,11 @@ -""" Benchmarking utilities +"""Benchmarking utilities """ from .. import get_info def print_git_title(title): - """ Prints title string with git hash if possible, and underline - """ + """Prints title string with git hash if possible, and underline""" title = f"{title} for git revision {get_info()['commit_hash']}" print(title) print('-' * len(title)) diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 666ff11251..4a330893b3 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -36,23 +36,14 @@ from .arrayproxy import ArrayProxy from .fileslice import strided_scalar -from .spatialimages import ( - SpatialImage, - SpatialHeader, - HeaderDataError, - ImageDataError -) +from .spatialimages import SpatialImage, SpatialHeader, HeaderDataError, ImageDataError from .volumeutils import Recoder # used for doc-tests filepath = os.path.dirname(os.path.realpath(__file__)) datadir = os.path.realpath(os.path.join(filepath, 'tests/data')) -_attr_dic = { - 'string': str, - 'integer': int, - 'float': float -} +_attr_dic = {'string': str, 'integer': int, 'float': float} _endian_dict = { 'LSB_FIRST': '<', @@ -66,11 +57,10 @@ 5: 'D', } -space_codes = Recoder(( - (0, 'unknown', ''), - (1, 'scanner', 'ORIG'), - (3, 'talairach', 'TLRC'), - (4, 'mni', 'MNI')), fields=('code', 'label', 'space')) +space_codes = Recoder( + ((0, 'unknown', ''), (1, 'scanner', 'ORIG'), (3, 'talairach', 'TLRC'), (4, 'mni', 'MNI')), + fields=('code', 'label', 'space'), +) class AFNIImageError(ImageDataError): @@ -114,8 +104,9 @@ def _unpack_var(var): TEMPLATE_SPACE ORIG """ - err_msg = ('Please check HEAD file to ensure it is AFNI compliant. ' - f'Offending attribute:\n{var}') + err_msg = ( + 'Please check HEAD file to ensure it is AFNI compliant. ' f'Offending attribute:\n{var}' + ) atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var) if len(atype) != 1: raise AFNIHeaderError(f'Invalid attribute type entry in HEAD file. {err_msg}') @@ -127,13 +118,15 @@ def _unpack_var(var): try: attr = [atype(f) for f in attr.split()] except ValueError: - raise AFNIHeaderError('Failed to read variable from HEAD file ' - f'due to improper type casting. {err_msg}') + raise AFNIHeaderError( + 'Failed to read variable from HEAD file ' + f'due to improper type casting. {err_msg}' + ) else: # AFNI string attributes will always start with open single quote and # end with a tilde (NUL). These attributes CANNOT contain tildes (so # stripping is safe), but can contain single quotes (so we replace) - attr = attr.replace('\'', '', 1).rstrip('~') + attr = attr.replace("'", '', 1).rstrip('~') return aname[0], attr[0] if len(attr) == 1 else attr @@ -165,12 +158,12 @@ def _get_datatype(info): bt = info['BRICK_TYPES'] if isinstance(bt, list): if np.unique(bt).size > 1: - raise AFNIImageError('Can\'t load file with multiple data types.') + raise AFNIImageError("Can't load file with multiple data types.") bt = bt[0] bo = _endian_dict.get(bo, '=') bt = _dtype_dict.get(bt, None) if bt is None: - raise AFNIImageError('Can\'t deduce image data type.') + raise AFNIImageError("Can't deduce image data type.") return np.dtype(bo + bt) @@ -208,7 +201,7 @@ def parse_AFNI_header(fobj): class AFNIArrayProxy(ArrayProxy): - """ Proxy object for AFNI image array. + """Proxy object for AFNI image array. Attributes ---------- @@ -244,10 +237,9 @@ def __init__(self, file_like, header, *, mmap=True, keep_file_open=None): effect. The default value (``None``) will result in the value of ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used. """ - super(AFNIArrayProxy, self).__init__(file_like, - header, - mmap=mmap, - keep_file_open=keep_file_open) + super(AFNIArrayProxy, self).__init__( + file_like, header, mmap=mmap, keep_file_open=keep_file_open + ) self._scaling = header.get_data_scaling() @property @@ -299,9 +291,9 @@ def __init__(self, info): """ self.info = info dt = _get_datatype(self.info) - super(AFNIHeader, self).__init__(data_dtype=dt, - shape=self._calc_data_shape(), - zooms=self._calc_zooms()) + super(AFNIHeader, self).__init__( + data_dtype=dt, shape=self._calc_data_shape(), zooms=self._calc_zooms() + ) @classmethod def from_header(klass, header=None): @@ -337,7 +329,7 @@ def _calc_data_shape(self): j, k. """ dset_rank = self.info['DATASET_RANK'] - shape = tuple(self.info['DATASET_DIMENSIONS'][:dset_rank[0]]) + shape = tuple(self.info['DATASET_DIMENSIONS'][: dset_rank[0]]) n_vols = dset_rank[1] return shape + (n_vols,) @@ -362,7 +354,13 @@ def _calc_zooms(self): origin", and second giving "Time step (TR)". """ xyz_step = tuple(np.abs(self.info['DELTA'])) - t_step = self.info.get('TAXIS_FLOATS', (0, 0,)) + t_step = self.info.get( + 'TAXIS_FLOATS', + ( + 0, + 0, + ), + ) if len(t_step) > 0: t_step = (t_step[1],) return xyz_step + t_step @@ -402,8 +400,7 @@ def get_affine(self): # AFNI default is RAI- == LPS+ == DICOM order. We need to flip RA sign # to align with nibabel RAS+ system affine = np.asarray(self.info['IJK_TO_DICOM_REAL']).reshape(3, 4) - affine = np.row_stack((affine * [[-1], [-1], [1]], - [0, 0, 0, 1])) + affine = np.row_stack((affine * [[-1], [-1], [1]], [0, 0, 0, 1])) return affine def get_data_scaling(self): @@ -526,10 +523,8 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): hdr = klass.header_class.from_fileobj(hdr_fobj) imgf = file_map['image'].fileobj imgf = file_map['image'].filename if imgf is None else imgf - data = klass.ImageArrayProxy(imgf, hdr.copy(), mmap=mmap, - keep_file_open=keep_file_open) - return klass(data, hdr.get_affine(), header=hdr, extra=None, - file_map=file_map) + data = klass.ImageArrayProxy(imgf, hdr.copy(), mmap=mmap, keep_file_open=keep_file_open) + return klass(data, hdr.get_affine(), header=hdr, extra=None, file_map=file_map) @classmethod def filespec_to_file_map(klass, filespec): @@ -568,7 +563,7 @@ def filespec_to_file_map(klass, filespec): fname = fholder.filename if key == 'header' and not os.path.exists(fname): for ext in klass._compressed_suffixes: - fname = fname[:-len(ext)] if fname.endswith(ext) else fname + fname = fname[: -len(ext)] if fname.endswith(ext) else fname elif key == 'image' and not os.path.exists(fname): for ext in klass._compressed_suffixes: if os.path.exists(fname + ext): diff --git a/nibabel/caret.py b/nibabel/caret.py index 9f05585cb2..e142922f26 100644 --- a/nibabel/caret.py +++ b/nibabel/caret.py @@ -12,7 +12,7 @@ class CaretMetaData(xml.XmlSerializable, MutableMapping): - """ A list of name-value pairs used in various Caret-based XML formats + """A list of name-value pairs used in various Caret-based XML formats * Description - Provides a simple method for user-supplied metadata that associates names with values. @@ -44,18 +44,18 @@ class CaretMetaData(xml.XmlSerializable, MutableMapping): >>> md.to_xml() b'keyval' """ + def __init__(self, *args, **kwargs): args, kwargs = self._sanitize(args, kwargs) self._data = dict(*args, **kwargs) @staticmethod def _sanitize(args, kwargs): - """ Override in subclasses to accept and warn on previous invocations - """ + """Override in subclasses to accept and warn on previous invocations""" return args, kwargs def __getitem__(self, key): - """ Get metadata entry by name + """Get metadata entry by name >>> md = CaretMetaData({'key': 'val'}) >>> md['key'] @@ -64,7 +64,7 @@ def __getitem__(self, key): return self._data[key] def __setitem__(self, key, value): - """ Set metadata entry by name + """Set metadata entry by name >>> md = CaretMetaData({'key': 'val'}) >>> dict(md) @@ -79,7 +79,7 @@ def __setitem__(self, key, value): self._data[key] = value def __delitem__(self, key): - """ Delete metadata entry by name + """Delete metadata entry by name >>> md = CaretMetaData({'key': 'val'}) >>> dict(md) @@ -91,7 +91,7 @@ def __delitem__(self, key): del self._data[key] def __len__(self): - """ Get length of metadata list + """Get length of metadata list >>> md = CaretMetaData({'key': 'val'}) >>> len(md) @@ -100,7 +100,7 @@ def __len__(self): return len(self._data) def __iter__(self): - """ Iterate over metadata entries + """Iterate over metadata entries >>> md = CaretMetaData({'key': 'val'}) >>> for key in md: @@ -110,7 +110,7 @@ def __iter__(self): return iter(self._data) def __repr__(self): - return f"<{self.__class__.__name__} {self._data!r}>" + return f'<{self.__class__.__name__} {self._data!r}>' def _to_xml_element(self): metadata = xml.Element('MetaData') diff --git a/nibabel/casting.py b/nibabel/casting.py index 45c2c5bd36..c2bceeaf0f 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -1,4 +1,4 @@ -""" Utilities for casting numpy values in various ways +"""Utilities for casting numpy values in various ways Most routines work round some numpy oddities in floating point precision and casting. Others work round numpy casting to and from python ints @@ -24,7 +24,7 @@ class CastingError(Exception): def float_to_int(arr, int_type, nan2zero=True, infmax=False): - """ Convert floating point array `arr` to type `int_type` + """Convert floating point array `arr` to type `int_type` * Rounds numbers to nearest integer * Clips values to prevent overflows when casting @@ -114,7 +114,7 @@ def float_to_int(arr, int_type, nan2zero=True, infmax=False): def shared_range(flt_type, int_type): - """ Min and max in float type that are >=min, <=max in integer type + """Min and max in float type that are >=min, <=max in integer type This is not as easy as it sounds, because the float type may not be able to exactly represent the max or min integer values, so we have to find the @@ -172,12 +172,13 @@ def shared_range(flt_type, int_type): # types. # ---------------------------------------------------------------------------- + class FloatingError(Exception): pass def on_powerpc(): - """ True if we are running on a Power PC platform + """True if we are running on a Power PC platform Has to deal with older Macs and IBM POWER7 series among others """ @@ -185,7 +186,7 @@ def on_powerpc(): def type_info(np_type): - """ Return dict with min, max, nexp, nmant, width for numpy type `np_type` + """Return dict with min, max, nexp, nmant, width for numpy type `np_type` Type can be integer in which case nexp and nmant are None. @@ -225,20 +226,28 @@ def type_info(np_type): except ValueError: pass else: - return dict(min=np_type(info.min), max=np_type(info.max), minexp=None, - maxexp=None, nmant=None, nexp=None, width=width) + return dict( + min=np_type(info.min), + max=np_type(info.max), + minexp=None, + maxexp=None, + nmant=None, + nexp=None, + width=width, + ) info = np.finfo(dt) # Trust the standard IEEE types nmant, nexp = info.nmant, info.nexp - ret = dict(min=np_type(info.min), - max=np_type(info.max), - nmant=nmant, - nexp=nexp, - minexp=info.minexp, - maxexp=info.maxexp, - width=width) - if np_type in (np.float16, np.float32, np.float64, - np.complex64, np.complex128): + ret = dict( + min=np_type(info.min), + max=np_type(info.max), + nmant=nmant, + nexp=nexp, + minexp=info.minexp, + maxexp=info.maxexp, + width=width, + ) + if np_type in (np.float16, np.float32, np.float64, np.complex64, np.complex128): return ret info_64 = np.finfo(np.float64) if dt.kind == 'c': @@ -247,16 +256,18 @@ def type_info(np_type): else: assert np_type is np.longdouble vals = (nmant, nexp, width) - if vals in ((112, 15, 16), # binary128 - (info_64.nmant, info_64.nexp, 8), # float64 - (63, 15, 12), (63, 15, 16)): # Intel extended 80 + if vals in ( + (112, 15, 16), # binary128 + (info_64.nmant, info_64.nexp, 8), # float64 + (63, 15, 12), + (63, 15, 16), + ): # Intel extended 80 return ret # these are OK without modification # The remaining types are longdoubles with bad finfo values. Some we # correct, others we wait to hear of errors. # We start with float64 as basis ret = type_info(np.float64) - if vals in ((52, 15, 12), # windows float96 - (52, 15, 16)): # windows float128? + if vals in ((52, 15, 12), (52, 15, 16)): # windows float96 # windows float128? # On windows 32 bit at least, float96 is Intel 80 storage but operating # at float64 precision. The finfo values give nexp == 15 (as for intel # 80) but in calculations nexp in fact appears to be 11 as for float64 @@ -270,39 +281,32 @@ def type_info(np_type): # their complex equivalent. if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32): raise FloatingError(f'We had not expected type {np_type}') - if (vals == (1, 1, 16) and on_powerpc() and - _check_maxexp(np.longdouble, 1024)): + if vals == (1, 1, 16) and on_powerpc() and _check_maxexp(np.longdouble, 1024): # double pair on PPC. The _check_nmant routine does not work for this # type, hence the powerpc platform check instead ret.update(dict(nmant=106, width=width)) - elif (_check_nmant(np.longdouble, 52) and - _check_maxexp(np.longdouble, 11)): + elif _check_nmant(np.longdouble, 52) and _check_maxexp(np.longdouble, 11): # Got float64 despite everything pass - elif (_check_nmant(np.longdouble, 112) and - _check_maxexp(np.longdouble, 16384)): + elif _check_nmant(np.longdouble, 112) and _check_maxexp(np.longdouble, 16384): # binary 128, but with some busted type information. np.longcomplex # seems to break here too, so we need to use np.longdouble and # complexify two = np.longdouble(2) # See: https://matthew-brett.github.io/pydagogue/floating_point.html - max_val = (two ** 113 - 1) / (two ** 112) * two ** 16383 + max_val = (two**113 - 1) / (two**112) * two**16383 if np_type is np.longcomplex: max_val += 0j - ret = dict(min=-max_val, - max=max_val, - nmant=112, - nexp=15, - minexp=-16382, - maxexp=16384, - width=width) + ret = dict( + min=-max_val, max=max_val, nmant=112, nexp=15, minexp=-16382, maxexp=16384, width=width + ) else: # don't recognize the type raise FloatingError(f'We had not expected long double type {np_type} with info {info}') return ret def _check_nmant(np_type, nmant): - """ True if fp type `np_type` seems to have `nmant` significand digits + """True if fp type `np_type` seems to have `nmant` significand digits Note 'digits' does not include implicit digits. And in fact if there are no implicit digits, the `nmant` number is one less than the actual digits. @@ -328,7 +332,7 @@ def _check_nmant(np_type, nmant): def _check_maxexp(np_type, maxexp): - """ True if fp type `np_type` seems to have `maxexp` maximum exponent + """True if fp type `np_type` seems to have `maxexp` maximum exponent We're testing "maxexp" as returned by numpy. This value is set to one greater than the maximum power of 2 that `np_type` can represent. @@ -351,12 +355,12 @@ def _check_maxexp(np_type, maxexp): np_type = dt.type two = np_type(2).reshape((1,)) # to avoid upcasting with warnings.catch_warnings(): - warnings.simplefilter("ignore", RuntimeWarning) # Expected overflow warning - return np.isfinite(two ** (maxexp - 1)) and not np.isfinite(two ** maxexp) + warnings.simplefilter('ignore', RuntimeWarning) # Expected overflow warning + return np.isfinite(two ** (maxexp - 1)) and not np.isfinite(two**maxexp) def as_int(x, check=True): - """ Return python integer representation of number + """Return python integer representation of number This is useful because the numpy int(val) mechanism is broken for large values in np.longdouble. @@ -417,7 +421,7 @@ def as_int(x, check=True): def int_to_float(val, flt_type): - """ Convert integer `val` to floating point type `flt_type` + """Convert integer `val` to floating point type `flt_type` Why is this so complicated? @@ -454,7 +458,7 @@ def int_to_float(val, flt_type): def floor_exact(val, flt_type): - """ Return nearest exact integer <= `val` in float type `flt_type` + """Return nearest exact integer <= `val` in float type `flt_type` Parameters ---------- @@ -508,14 +512,14 @@ def floor_exact(val, flt_type): if diff >= 0: # floating point value <= val return fval # Float casting made the value go up - biggest_gap = 2**(floor_log2(val) - info['nmant']) + biggest_gap = 2 ** (floor_log2(val) - info['nmant']) assert biggest_gap > 1 fval -= flt_type(biggest_gap) return fval def ceil_exact(val, flt_type): - """ Return nearest exact integer >= `val` in float type `flt_type` + """Return nearest exact integer >= `val` in float type `flt_type` Parameters ---------- @@ -559,7 +563,7 @@ def ceil_exact(val, flt_type): def int_abs(arr): - """ Absolute values of array taking care of max negative int values + """Absolute values of array taking care of max negative int values Parameters ---------- @@ -599,7 +603,7 @@ def int_abs(arr): def floor_log2(x): - """ floor of log2 of abs(`x`) + """floor of log2 of abs(`x`) Embarrassingly, from https://en.wikipedia.org/wiki/Binary_logarithm @@ -639,7 +643,7 @@ def floor_log2(x): def best_float(): - """ Floating point type with best precision + """Floating point type with best precision This is nearly always np.longdouble, except on Windows, where np.longdouble is Intel80 storage, but with float64 precision for calculations. In that @@ -662,15 +666,15 @@ def best_float(): long_info = type_info(np.longdouble) except FloatingError: return np.float64 - if (long_info['nmant'] > type_info(np.float64)['nmant'] and - machine() != 'sparc64'): # sparc has crazy-slow float128 + if ( + long_info['nmant'] > type_info(np.float64)['nmant'] and machine() != 'sparc64' + ): # sparc has crazy-slow float128 return np.longdouble return np.float64 def longdouble_lte_float64(): - """ Return True if longdouble appears to have the same precision as float64 - """ + """Return True if longdouble appears to have the same precision as float64""" return np.longdouble(2**53) == np.longdouble(2**53) + 1 @@ -679,7 +683,7 @@ def longdouble_lte_float64(): def longdouble_precision_improved(): - """ True if longdouble precision increased since initial import + """True if longdouble precision increased since initial import This can happen on Windows compiled with MSVC. It may be because libraries compiled with mingw (longdouble is Intel80) get linked to numpy compiled @@ -689,8 +693,7 @@ def longdouble_precision_improved(): def have_binary128(): - """ True if we have a binary128 IEEE longdouble - """ + """True if we have a binary128 IEEE longdouble""" try: ti = type_info(np.longdouble) except FloatingError: @@ -699,7 +702,7 @@ def have_binary128(): def ok_floats(): - """ Return floating point types sorted by precision + """Return floating point types sorted by precision Remove longdouble if it has no higher precision than float64 """ @@ -714,7 +717,7 @@ def ok_floats(): def able_int_type(values): - """ Find the smallest integer numpy type to contain sequence `values` + """Find the smallest integer numpy type to contain sequence `values` Prefers uint to int if minimum is >= 0 @@ -751,7 +754,7 @@ def able_int_type(values): def ulp(val=np.float64(1.0)): - """ Return gap between `val` and nearest representable number of same type + """Return gap between `val` and nearest representable number of same type This is the value of a unit in the last place (ULP), and is similar in meaning to the MATLAB eps function. @@ -785,4 +788,4 @@ def ulp(val=np.float64(1.0)): if fl2 is None or fl2 < info['minexp']: # subnormal fl2 = info['minexp'] # 'nmant' value does not include implicit first bit - return 2**(fl2 - info['nmant']) + return 2 ** (fl2 - info['nmant']) diff --git a/nibabel/cifti2/__init__.py b/nibabel/cifti2/__init__.py index c0933c9041..e7c999b6cd 100644 --- a/nibabel/cifti2/__init__.py +++ b/nibabel/cifti2/__init__.py @@ -18,12 +18,27 @@ """ from .parse_cifti2 import Cifti2Extension -from .cifti2 import (Cifti2MetaData, Cifti2Header, Cifti2Image, Cifti2Label, - Cifti2LabelTable, Cifti2VertexIndices, - Cifti2VoxelIndicesIJK, Cifti2BrainModel, Cifti2Matrix, - Cifti2MatrixIndicesMap, Cifti2NamedMap, Cifti2Parcel, - Cifti2Surface, - Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, - Cifti2Vertices, Cifti2Volume, CIFTI_BRAIN_STRUCTURES, - Cifti2HeaderError, CIFTI_MODEL_TYPES, load, save) -from .cifti2_axes import (Axis, BrainModelAxis, ParcelsAxis, SeriesAxis, LabelAxis, ScalarAxis) +from .cifti2 import ( + Cifti2MetaData, + Cifti2Header, + Cifti2Image, + Cifti2Label, + Cifti2LabelTable, + Cifti2VertexIndices, + Cifti2VoxelIndicesIJK, + Cifti2BrainModel, + Cifti2Matrix, + Cifti2MatrixIndicesMap, + Cifti2NamedMap, + Cifti2Parcel, + Cifti2Surface, + Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, + Cifti2Vertices, + Cifti2Volume, + CIFTI_BRAIN_STRUCTURES, + Cifti2HeaderError, + CIFTI_MODEL_TYPES, + load, + save, +) +from .cifti2_axes import Axis, BrainModelAxis, ParcelsAxis, SeriesAxis, LabelAxis, ScalarAxis diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 31d631bb5f..4b6fd3df25 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read / write access to CIFTI-2 image format +"""Read / write access to CIFTI-2 image format Format of the NIFTI2 container format described here: @@ -41,74 +41,74 @@ def _float_01(val): class Cifti2HeaderError(Exception): - """ Error in CIFTI-2 header - """ + """Error in CIFTI-2 header""" _dtdefs = ( # code, label, dtype definition, niistring - (2, 'uint8', np.uint8, "NIFTI_TYPE_UINT8"), - (4, 'int16', np.int16, "NIFTI_TYPE_INT16"), - (8, 'int32', np.int32, "NIFTI_TYPE_INT32"), - (16, 'float32', np.float32, "NIFTI_TYPE_FLOAT32"), - (64, 'float64', np.float64, "NIFTI_TYPE_FLOAT64"), - (256, 'int8', np.int8, "NIFTI_TYPE_INT8"), - (512, 'uint16', np.uint16, "NIFTI_TYPE_UINT16"), - (768, 'uint32', np.uint32, "NIFTI_TYPE_UINT32"), - (1024, 'int64', np.int64, "NIFTI_TYPE_INT64"), - (1280, 'uint64', np.uint64, "NIFTI_TYPE_UINT64"), + (2, 'uint8', np.uint8, 'NIFTI_TYPE_UINT8'), + (4, 'int16', np.int16, 'NIFTI_TYPE_INT16'), + (8, 'int32', np.int32, 'NIFTI_TYPE_INT32'), + (16, 'float32', np.float32, 'NIFTI_TYPE_FLOAT32'), + (64, 'float64', np.float64, 'NIFTI_TYPE_FLOAT64'), + (256, 'int8', np.int8, 'NIFTI_TYPE_INT8'), + (512, 'uint16', np.uint16, 'NIFTI_TYPE_UINT16'), + (768, 'uint32', np.uint32, 'NIFTI_TYPE_UINT32'), + (1024, 'int64', np.int64, 'NIFTI_TYPE_INT64'), + (1280, 'uint64', np.uint64, 'NIFTI_TYPE_UINT64'), ) # Make full code alias bank, including dtype column data_type_codes = make_dt_codes(_dtdefs) -CIFTI_MAP_TYPES = ('CIFTI_INDEX_TYPE_BRAIN_MODELS', - 'CIFTI_INDEX_TYPE_PARCELS', - 'CIFTI_INDEX_TYPE_SERIES', - 'CIFTI_INDEX_TYPE_SCALARS', - 'CIFTI_INDEX_TYPE_LABELS') +CIFTI_MAP_TYPES = ( + 'CIFTI_INDEX_TYPE_BRAIN_MODELS', + 'CIFTI_INDEX_TYPE_PARCELS', + 'CIFTI_INDEX_TYPE_SERIES', + 'CIFTI_INDEX_TYPE_SCALARS', + 'CIFTI_INDEX_TYPE_LABELS', +) CIFTI_MODEL_TYPES = ( 'CIFTI_MODEL_TYPE_SURFACE', # Modeled using surface vertices - 'CIFTI_MODEL_TYPE_VOXELS' # Modeled using voxels. + 'CIFTI_MODEL_TYPE_VOXELS', # Modeled using voxels. ) -CIFTI_SERIESUNIT_TYPES = ('SECOND', - 'HERTZ', - 'METER', - 'RADIAN') - -CIFTI_BRAIN_STRUCTURES = ('CIFTI_STRUCTURE_ACCUMBENS_LEFT', - 'CIFTI_STRUCTURE_ACCUMBENS_RIGHT', - 'CIFTI_STRUCTURE_ALL_WHITE_MATTER', - 'CIFTI_STRUCTURE_ALL_GREY_MATTER', - 'CIFTI_STRUCTURE_AMYGDALA_LEFT', - 'CIFTI_STRUCTURE_AMYGDALA_RIGHT', - 'CIFTI_STRUCTURE_BRAIN_STEM', - 'CIFTI_STRUCTURE_CAUDATE_LEFT', - 'CIFTI_STRUCTURE_CAUDATE_RIGHT', - 'CIFTI_STRUCTURE_CEREBELLAR_WHITE_MATTER_LEFT', - 'CIFTI_STRUCTURE_CEREBELLAR_WHITE_MATTER_RIGHT', - 'CIFTI_STRUCTURE_CEREBELLUM', - 'CIFTI_STRUCTURE_CEREBELLUM_LEFT', - 'CIFTI_STRUCTURE_CEREBELLUM_RIGHT', - 'CIFTI_STRUCTURE_CEREBRAL_WHITE_MATTER_LEFT', - 'CIFTI_STRUCTURE_CEREBRAL_WHITE_MATTER_RIGHT', - 'CIFTI_STRUCTURE_CORTEX', - 'CIFTI_STRUCTURE_CORTEX_LEFT', - 'CIFTI_STRUCTURE_CORTEX_RIGHT', - 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_LEFT', - 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_RIGHT', - 'CIFTI_STRUCTURE_HIPPOCAMPUS_LEFT', - 'CIFTI_STRUCTURE_HIPPOCAMPUS_RIGHT', - 'CIFTI_STRUCTURE_OTHER', - 'CIFTI_STRUCTURE_OTHER_GREY_MATTER', - 'CIFTI_STRUCTURE_OTHER_WHITE_MATTER', - 'CIFTI_STRUCTURE_PALLIDUM_LEFT', - 'CIFTI_STRUCTURE_PALLIDUM_RIGHT', - 'CIFTI_STRUCTURE_PUTAMEN_LEFT', - 'CIFTI_STRUCTURE_PUTAMEN_RIGHT', - 'CIFTI_STRUCTURE_THALAMUS_LEFT', - 'CIFTI_STRUCTURE_THALAMUS_RIGHT') +CIFTI_SERIESUNIT_TYPES = ('SECOND', 'HERTZ', 'METER', 'RADIAN') + +CIFTI_BRAIN_STRUCTURES = ( + 'CIFTI_STRUCTURE_ACCUMBENS_LEFT', + 'CIFTI_STRUCTURE_ACCUMBENS_RIGHT', + 'CIFTI_STRUCTURE_ALL_WHITE_MATTER', + 'CIFTI_STRUCTURE_ALL_GREY_MATTER', + 'CIFTI_STRUCTURE_AMYGDALA_LEFT', + 'CIFTI_STRUCTURE_AMYGDALA_RIGHT', + 'CIFTI_STRUCTURE_BRAIN_STEM', + 'CIFTI_STRUCTURE_CAUDATE_LEFT', + 'CIFTI_STRUCTURE_CAUDATE_RIGHT', + 'CIFTI_STRUCTURE_CEREBELLAR_WHITE_MATTER_LEFT', + 'CIFTI_STRUCTURE_CEREBELLAR_WHITE_MATTER_RIGHT', + 'CIFTI_STRUCTURE_CEREBELLUM', + 'CIFTI_STRUCTURE_CEREBELLUM_LEFT', + 'CIFTI_STRUCTURE_CEREBELLUM_RIGHT', + 'CIFTI_STRUCTURE_CEREBRAL_WHITE_MATTER_LEFT', + 'CIFTI_STRUCTURE_CEREBRAL_WHITE_MATTER_RIGHT', + 'CIFTI_STRUCTURE_CORTEX', + 'CIFTI_STRUCTURE_CORTEX_LEFT', + 'CIFTI_STRUCTURE_CORTEX_RIGHT', + 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_LEFT', + 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_RIGHT', + 'CIFTI_STRUCTURE_HIPPOCAMPUS_LEFT', + 'CIFTI_STRUCTURE_HIPPOCAMPUS_RIGHT', + 'CIFTI_STRUCTURE_OTHER', + 'CIFTI_STRUCTURE_OTHER_GREY_MATTER', + 'CIFTI_STRUCTURE_OTHER_WHITE_MATTER', + 'CIFTI_STRUCTURE_PALLIDUM_LEFT', + 'CIFTI_STRUCTURE_PALLIDUM_RIGHT', + 'CIFTI_STRUCTURE_PUTAMEN_LEFT', + 'CIFTI_STRUCTURE_PUTAMEN_RIGHT', + 'CIFTI_STRUCTURE_THALAMUS_LEFT', + 'CIFTI_STRUCTURE_THALAMUS_RIGHT', +) def _value_if_klass(val, klass): @@ -118,7 +118,7 @@ def _value_if_klass(val, klass): def _underscore(string): - """ Convert a string from CamelCase to underscored """ + """Convert a string from CamelCase to underscored""" string = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', string) return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', string).lower() @@ -128,7 +128,7 @@ class LimitedNifti2Header(Nifti2Header): class Cifti2MetaData(CaretMetaData): - """ A list of name-value pairs + """A list of name-value pairs * Description - Provides a simple method for user-supplied metadata that associates names with values. @@ -146,9 +146,10 @@ class Cifti2MetaData(CaretMetaData): ---------- data : list of (name, value) tuples """ + @staticmethod def _sanitize(args, kwargs): - """ Sanitize and warn on deprecated arguments + """Sanitize and warn on deprecated arguments Accept metadata positional/keyword argument that can take ``None`` to indicate no initialization. @@ -175,20 +176,26 @@ def _sanitize(args, kwargs): >>> Cifti2MetaData(metadata='val') """ - if not args and list(kwargs) == ["metadata"]: - if not isinstance(kwargs["metadata"], str): - warn("Cifti2MetaData now has a dict-like interface and will " - "no longer accept the ``metadata`` keyword argument in " - "NiBabel 6.0. See ``pydoc dict`` for initialization options.", - FutureWarning, stacklevel=3) - md = kwargs.pop("metadata") + if not args and list(kwargs) == ['metadata']: + if not isinstance(kwargs['metadata'], str): + warn( + 'Cifti2MetaData now has a dict-like interface and will ' + 'no longer accept the ``metadata`` keyword argument in ' + 'NiBabel 6.0. See ``pydoc dict`` for initialization options.', + FutureWarning, + stacklevel=3, + ) + md = kwargs.pop('metadata') if md is not None: args = (md,) if args == (None,): - warn("Cifti2MetaData now has a dict-like interface and will no longer " - "accept the positional argument ``None`` in NiBabel 6.0. " - "See ``pydoc dict`` for initialization options.", - FutureWarning, stacklevel=3) + warn( + 'Cifti2MetaData now has a dict-like interface and will no longer ' + 'accept the positional argument ``None`` in NiBabel 6.0. ' + 'See ``pydoc dict`` for initialization options.', + FutureWarning, + stacklevel=3, + ) args = () return args, kwargs @@ -216,7 +223,7 @@ def difference_update(self, metadata): class Cifti2LabelTable(xml.XmlSerializable, MutableMapping): - r""" CIFTI-2 label table: a sequence of ``Cifti2Label``\s + r"""CIFTI-2 label table: a sequence of ``Cifti2Label``\s * Description - Used by NamedMap when IndicesMapToDataType is "CIFTI_INDEX_TYPE_LABELS" in order to associate names and display colors @@ -255,8 +262,10 @@ def __setitem__(self, key, value): try: self._labels[key] = Cifti2Label(*([key] + list(value))) except ValueError: - raise ValueError('Key should be int, value should be sequence ' - 'of str and 4 floats between 0 and 1') + raise ValueError( + 'Key should be int, value should be sequence ' + 'of str and 4 floats between 0 and 1' + ) def __delitem__(self, key): del self._labels[key] @@ -274,7 +283,7 @@ def _to_xml_element(self): class Cifti2Label(xml.XmlSerializable): - """ CIFTI-2 label: association of integer key with a name and RGBA values + """CIFTI-2 label: association of integer key with a name and RGBA values For all color components, value is floating point with range 0.0 to 1.0. @@ -311,7 +320,8 @@ class Cifti2Label(xml.XmlSerializable): alpha : float, optional Alpha color component for label (between 0 and 1). """ - def __init__(self, key=0, label='', red=0., green=0., blue=0., alpha=0.): + + def __init__(self, key=0, label='', red=0.0, green=0.0, blue=0.0, alpha=0.0): self.key = int(key) self.label = str(label) self.red = _float_01(red) @@ -321,7 +331,7 @@ def __init__(self, key=0, label='', red=0., green=0., blue=0., alpha=0.): @property def rgba(self): - """ Returns RGBA as tuple """ + """Returns RGBA as tuple""" return (self.red, self.green, self.blue, self.alpha) def _to_xml_element(self): @@ -377,6 +387,7 @@ class Cifti2NamedMap(xml.XmlSerializable): label_table : None or Cifti2LabelTable Label table associated with named map """ + def __init__(self, map_name=None, metadata=None, label_table=None): self.map_name = map_name self.metadata = metadata @@ -388,7 +399,7 @@ def metadata(self): @metadata.setter def metadata(self, metadata): - """ Set the metadata for this NamedMap + """Set the metadata for this NamedMap Parameters ---------- @@ -406,7 +417,7 @@ def label_table(self): @label_table.setter def label_table(self, label_table): - """ Set the label_table for this NamedMap + """Set the label_table for this NamedMap Parameters ---------- @@ -455,6 +466,7 @@ class Cifti2Surface(xml.XmlSerializable): surface_number_of_vertices : int Number of vertices on surface """ + def __init__(self, brain_structure=None, surface_number_of_vertices=None): self.brain_structure = brain_structure self.surface_number_of_vertices = surface_number_of_vertices @@ -486,6 +498,7 @@ class Cifti2VoxelIndicesIJK(xml.XmlSerializable, MutableSequence): Each element of this sequence is a triple of integers. """ + def __init__(self, indices=None): self._indices = [] if indices is not None: @@ -545,8 +558,7 @@ def _to_xml_element(self): raise Cifti2HeaderError('VoxelIndicesIJK element require an index table') vox_ind = xml.Element('VoxelIndicesIJK') - vox_ind.text = '\n'.join(' '.join([str(v) for v in row]) - for row in self._indices) + vox_ind.text = '\n'.join(' '.join([str(v) for v in row]) for row in self._indices) return vox_ind @@ -575,6 +587,7 @@ class Cifti2Vertices(xml.XmlSerializable, MutableSequence): A string from the BrainStructure list to identify what surface this vertex list is from (usually left cortex, right cortex, or cerebellum). """ + def __init__(self, brain_structure=None, vertices=None): self._vertices = [] if vertices is not None: @@ -642,14 +655,14 @@ class Cifti2Parcel(xml.XmlSerializable): vertices : list of Cifti2Vertices Vertices associated with parcel """ + def __init__(self, name=None, voxel_indices_ijk=None, vertices=None): self.name = name self._voxel_indices_ijk = voxel_indices_ijk self.vertices = vertices if vertices is not None else [] for val in self.vertices: if not isinstance(val, Cifti2Vertices): - raise ValueError('Cifti2Parcel vertices must be instances of ' - 'Cifti2Vertices') + raise ValueError('Cifti2Parcel vertices must be instances of ' 'Cifti2Vertices') @property def voxel_indices_ijk(self): @@ -660,18 +673,18 @@ def voxel_indices_ijk(self, value): self._voxel_indices_ijk = _value_if_klass(value, Cifti2VoxelIndicesIJK) def append_cifti_vertices(self, vertices): - """ Appends a Cifti2Vertices element to the Cifti2Parcel + """Appends a Cifti2Vertices element to the Cifti2Parcel Parameters ---------- vertices : Cifti2Vertices """ if not isinstance(vertices, Cifti2Vertices): - raise TypeError("Not a valid Cifti2Vertices instance") + raise TypeError('Not a valid Cifti2Vertices instance') self.vertices.append(vertices) def pop_cifti2_vertices(self, ith): - """ Pops the ith vertices element from the Cifti2Parcel """ + """Pops the ith vertices element from the Cifti2Parcel""" self.vertices.pop(ith) def _to_xml_element(self): @@ -712,6 +725,7 @@ class Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(xml.XmlSerializable): matrix : array-like shape (4, 4) Affine transformation matrix from voxel indices to RAS space. """ + # meterExponent = int # matrix = np.array @@ -726,8 +740,7 @@ def _to_xml_element(self): ) trans = xml.Element('TransformationMatrixVoxelIndicesIJKtoXYZ') trans.attrib['MeterExponent'] = str(self.meter_exponent) - trans.text = '\n'.join(' '.join(map('{:.10f}'.format, row)) - for row in self.matrix) + trans.text = '\n'.join(' '.join(map('{:.10f}'.format, row)) for row in self.matrix) return trans @@ -759,6 +772,7 @@ class Cifti2Volume(xml.XmlSerializable): : Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ Matrix that translates voxel indices to spatial coordinates """ + def __init__(self, volume_dimensions=None, transform_matrix=None): self.volume_dimensions = volume_dimensions self.transformation_matrix_voxel_indices_ijk_to_xyz = transform_matrix @@ -768,8 +782,7 @@ def _to_xml_element(self): raise Cifti2HeaderError('Volume element requires dimensions') volume = xml.Element('Volume') - volume.attrib['VolumeDimensions'] = ','.join( - [str(val) for val in self.volume_dimensions]) + volume.attrib['VolumeDimensions'] = ','.join([str(val) for val in self.volume_dimensions]) volume.append(self.transformation_matrix_voxel_indices_ijk_to_xyz._to_xml_element()) return volume @@ -792,6 +805,7 @@ class Cifti2VertexIndices(xml.XmlSerializable, MutableSequence): content. * Parent Element - BrainModel """ + def __init__(self, indices=None): self._indices = [] if indices is not None: @@ -830,7 +844,7 @@ def _to_xml_element(self): class Cifti2BrainModel(xml.XmlSerializable): - """ Element representing a mapping of the dimension to vertex or voxels. + """Element representing a mapping of the dimension to vertex or voxels. Mapping to vertices of voxels must be specified. @@ -886,9 +900,16 @@ class Cifti2BrainModel(xml.XmlSerializable): Indices of the vertices towards where the array indices are mapped """ - def __init__(self, index_offset=None, index_count=None, model_type=None, - brain_structure=None, n_surface_vertices=None, - voxel_indices_ijk=None, vertex_indices=None): + def __init__( + self, + index_offset=None, + index_count=None, + model_type=None, + brain_structure=None, + n_surface_vertices=None, + voxel_indices_ijk=None, + vertex_indices=None, + ): self.index_offset = index_offset self.index_count = index_count self.model_type = model_type @@ -917,8 +938,13 @@ def vertex_indices(self, value): def _to_xml_element(self): brain_model = xml.Element('BrainModel') - for key in ['IndexOffset', 'IndexCount', 'ModelType', 'BrainStructure', - 'SurfaceNumberOfVertices']: + for key in [ + 'IndexOffset', + 'IndexCount', + 'ModelType', + 'BrainStructure', + 'SurfaceNumberOfVertices', + ]: attr = _underscore(key) value = getattr(self, attr) if value is not None: @@ -987,23 +1013,26 @@ class Cifti2MatrixIndicesMap(xml.XmlSerializable, MutableSequence): series_unit : str, optional If it is a series, units """ + _valid_type_mappings_ = { Cifti2BrainModel: ('CIFTI_INDEX_TYPE_BRAIN_MODELS',), Cifti2Parcel: ('CIFTI_INDEX_TYPE_PARCELS',), Cifti2NamedMap: ('CIFTI_INDEX_TYPE_LABELS',), Cifti2Volume: ('CIFTI_INDEX_TYPE_SCALARS', 'CIFTI_INDEX_TYPE_SERIES'), - Cifti2Surface: ('CIFTI_INDEX_TYPE_SCALARS', 'CIFTI_INDEX_TYPE_SERIES') + Cifti2Surface: ('CIFTI_INDEX_TYPE_SCALARS', 'CIFTI_INDEX_TYPE_SERIES'), } - def __init__(self, applies_to_matrix_dimension, - indices_map_to_data_type, - number_of_series_points=None, - series_exponent=None, - series_start=None, - series_step=None, - series_unit=None, - maps=[], - ): + def __init__( + self, + applies_to_matrix_dimension, + indices_map_to_data_type, + number_of_series_points=None, + series_exponent=None, + series_start=None, + series_step=None, + series_unit=None, + maps=[], + ): self.applies_to_matrix_dimension = applies_to_matrix_dimension self.indices_map_to_data_type = indices_map_to_data_type self.number_of_series_points = number_of_series_points @@ -1025,22 +1054,15 @@ def __getitem__(self, index): return self._maps[index] def __setitem__(self, index, value): - if ( - isinstance(value, Cifti2Volume) and - ( - self.volume is not None and - not isinstance(self._maps[index], Cifti2Volume) - ) + if isinstance(value, Cifti2Volume) and ( + self.volume is not None and not isinstance(self._maps[index], Cifti2Volume) ): - raise Cifti2HeaderError("Only one Volume can be in a MatrixIndicesMap") + raise Cifti2HeaderError('Only one Volume can be in a MatrixIndicesMap') self._maps[index] = value def insert(self, index, value): - if ( - isinstance(value, Cifti2Volume) and - self.volume is not None - ): - raise Cifti2HeaderError("Only one Volume can be in a MatrixIndicesMap") + if isinstance(value, Cifti2Volume) and self.volume is not None: + raise Cifti2HeaderError('Only one Volume can be in a MatrixIndicesMap') self._maps.insert(index, value) @@ -1072,7 +1094,7 @@ def volume(self): @volume.setter def volume(self, volume): if not isinstance(volume, Cifti2Volume): - raise ValueError("You can only set a volume with a volume") + raise ValueError('You can only set a volume with a volume') for i, v in enumerate(self): if isinstance(v, Cifti2Volume): break @@ -1087,7 +1109,7 @@ def volume(self): if isinstance(v, Cifti2Volume): break else: - raise ValueError("No Cifti2Volume element") + raise ValueError('No Cifti2Volume element') del self[i] @property @@ -1105,8 +1127,14 @@ def _to_xml_element(self): mat_ind_map = xml.Element('MatrixIndicesMap') dims_as_strings = [str(dim) for dim in self.applies_to_matrix_dimension] mat_ind_map.attrib['AppliesToMatrixDimension'] = ','.join(dims_as_strings) - for key in ['IndicesMapToDataType', 'NumberOfSeriesPoints', 'SeriesExponent', - 'SeriesStart', 'SeriesStep', 'SeriesUnit']: + for key in [ + 'IndicesMapToDataType', + 'NumberOfSeriesPoints', + 'SeriesExponent', + 'SeriesStart', + 'SeriesStep', + 'SeriesUnit', + ]: attr = _underscore(key) value = getattr(self, attr) if value is not None: @@ -1118,7 +1146,7 @@ def _to_xml_element(self): class Cifti2Matrix(xml.XmlSerializable, MutableSequence): - """ CIFTI-2 Matrix object + """CIFTI-2 Matrix object This is a list-like container where the elements are instances of :class:`Cifti2MatrixIndicesMap`. @@ -1137,6 +1165,7 @@ class Cifti2Matrix(xml.XmlSerializable, MutableSequence): For each matrix (data) dimension, exactly one MatrixIndicesMap element must list it in the AppliesToMatrixDimension attribute. """ + def __init__(self): self._mims = [] self.metadata = None @@ -1147,7 +1176,7 @@ def metadata(self): @metadata.setter def metadata(self, meta): - """ Set the metadata for this Cifti2Header + """Set the metadata for this Cifti2Header Parameters ---------- @@ -1161,10 +1190,7 @@ def metadata(self, meta): def _get_indices_from_mim(self, mim): applies_to_matrix_dimension = mim.applies_to_matrix_dimension - if not isinstance( - applies_to_matrix_dimension, - Iterable - ): + if not isinstance(applies_to_matrix_dimension, Iterable): applies_to_matrix_dimension = (int(applies_to_matrix_dimension),) return applies_to_matrix_dimension @@ -1200,24 +1226,23 @@ def get_index_map(self, index): a2md = self._get_indices_from_mim(v) if index in a2md: return v - raise Cifti2HeaderError("Index not mapped") + raise Cifti2HeaderError('Index not mapped') def _validate_new_mim(self, value): if value.applies_to_matrix_dimension is None: raise Cifti2HeaderError( - "Cifti2MatrixIndicesMap needs to have " - "the applies_to_matrix_dimension attribute set" + 'Cifti2MatrixIndicesMap needs to have ' + 'the applies_to_matrix_dimension attribute set' ) a2md = self._get_indices_from_mim(value) if not set(self.mapped_indices).isdisjoint(a2md): raise Cifti2HeaderError( - "Indices in this Cifti2MatrixIndicesMap " - "already mapped in this matrix" + 'Indices in this Cifti2MatrixIndicesMap ' 'already mapped in this matrix' ) def __setitem__(self, key, value): if not isinstance(value, Cifti2MatrixIndicesMap): - raise TypeError("Not a valid Cifti2MatrixIndicesMap instance") + raise TypeError('Not a valid Cifti2MatrixIndicesMap instance') self._validate_new_mim(value) self._mims[key] = value @@ -1232,7 +1257,7 @@ def __len__(self): def insert(self, index, value): if not isinstance(value, Cifti2MatrixIndicesMap): - raise TypeError("Not a valid Cifti2MatrixIndicesMap instance") + raise TypeError('Not a valid Cifti2MatrixIndicesMap instance') self._validate_new_mim(value) self._mims.insert(index, value) @@ -1261,6 +1286,7 @@ def get_axis(self, index): axis : :class:`.cifti2_axes.Axis` """ from . import cifti2_axes + return cifti2_axes.from_index_mapping(self.get_index_map(index)) def get_data_shape(self): @@ -1270,6 +1296,7 @@ def get_data_shape(self): Any dimensions omitted in the CIFTI-2 header will be given a default size of None. """ from . import cifti2_axes + if len(self.mapped_indices) == 0: return () base_shape = [None] * (max(self.mapped_indices) + 1) @@ -1281,9 +1308,9 @@ def get_data_shape(self): class Cifti2Header(FileBasedHeader, xml.XmlSerializable): - """ Class for CIFTI-2 header extension """ + """Class for CIFTI-2 header extension""" - def __init__(self, matrix=None, version="2.0"): + def __init__(self, matrix=None, version='2.0'): FileBasedHeader.__init__(self) xml.XmlSerializable.__init__(self) if matrix is None: @@ -1305,6 +1332,7 @@ def __eq__(self, other): @classmethod def may_contain_header(klass, binaryblock): from .parse_cifti2 import _Cifti2AsNiftiHeader + return _Cifti2AsNiftiHeader.may_contain_header(binaryblock) @property @@ -1370,26 +1398,23 @@ def from_axes(cls, axes): new header describing the rows/columns in a format consistent with Cifti2 """ from . import cifti2_axes + return cifti2_axes.to_header(axes) class Cifti2Image(DataobjImage, SerializableImage): - """ Class for single file CIFTI-2 format image - """ + """Class for single file CIFTI-2 format image""" + header_class = Cifti2Header valid_exts = Nifti2Image.valid_exts files_types = Nifti2Image.files_types makeable = False rw = True - def __init__(self, - dataobj=None, - header=None, - nifti_header=None, - extra=None, - file_map=None, - dtype=None): - """ Initialize image + def __init__( + self, dataobj=None, header=None, nifti_header=None, extra=None, file_map=None, dtype=None + ): + """Initialize image The image is a combination of (dataobj, header), with optional metadata in `nifti_header` (a NIfTI2 header). There may be more metadata in the @@ -1415,8 +1440,7 @@ def __init__(self, """ if not isinstance(header, Cifti2Header) and header: header = Cifti2Header.from_axes(header) - super(Cifti2Image, self).__init__(dataobj, header=header, - extra=extra, file_map=file_map) + super(Cifti2Image, self).__init__(dataobj, header=header, extra=extra, file_map=file_map) self._nifti_header = LimitedNifti2Header.from_header(nifti_header) # if NIfTI header not specified, get data type from input array @@ -1427,8 +1451,10 @@ def __init__(self, self.update_headers() if self._dataobj.shape != self.header.matrix.get_data_shape(): - warn(f"Dataobj shape {self._dataobj.shape} does not match shape " - f"expected from CIFTI-2 header {self.header.matrix.get_data_shape()}") + warn( + f'Dataobj shape {self._dataobj.shape} does not match shape ' + f'expected from CIFTI-2 header {self.header.matrix.get_data_shape()}' + ) @property def nifti_header(self): @@ -1436,7 +1462,7 @@ def nifti_header(self): @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - """ Load a CIFTI-2 image from a file_map + """Load a CIFTI-2 image from a file_map Parameters ---------- @@ -1446,10 +1472,12 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): ------- img : Cifti2Image Returns a Cifti2Image - """ + """ from .parse_cifti2 import _Cifti2AsNiftiImage, Cifti2Extension - nifti_img = _Cifti2AsNiftiImage.from_file_map(file_map, mmap=mmap, - keep_file_open=keep_file_open) + + nifti_img = _Cifti2AsNiftiImage.from_file_map( + file_map, mmap=mmap, keep_file_open=keep_file_open + ) # Get cifti2 header for item in nifti_img.header.extensions: @@ -1457,20 +1485,21 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): cifti_header = item.get_content() break else: - raise ValueError('NIfTI2 header does not contain a CIFTI-2 ' - 'extension') + raise ValueError('NIfTI2 header does not contain a CIFTI-2 ' 'extension') # Construct cifti image. # Use array proxy object where possible dataobj = nifti_img.dataobj - return Cifti2Image(reshape_dataobj(dataobj, dataobj.shape[4:]), - header=cifti_header, - nifti_header=nifti_img.header, - file_map=file_map) + return Cifti2Image( + reshape_dataobj(dataobj, dataobj.shape[4:]), + header=cifti_header, + nifti_header=nifti_img.header, + file_map=file_map, + ) @classmethod def from_image(klass, img): - """ Class method to create new instance of own class from `img` + """Class method to create new instance of own class from `img` Parameters ---------- @@ -1487,7 +1516,7 @@ def from_image(klass, img): raise NotImplementedError def to_file_map(self, file_map=None, dtype=None): - """ Write image to `file_map` or contained ``self.file_map`` + """Write image to `file_map` or contained ``self.file_map`` Parameters ---------- @@ -1500,6 +1529,7 @@ def to_file_map(self, file_map=None, dtype=None): None """ from .parse_cifti2 import Cifti2Extension + self.update_headers() header = self._nifti_header extension = Cifti2Extension(content=self.header.to_xml()) @@ -1509,13 +1539,13 @@ def to_file_map(self, file_map=None, dtype=None): header.extensions.append(extension) if self._dataobj.shape != self.header.matrix.get_data_shape(): raise ValueError( - f"Dataobj shape {self._dataobj.shape} does not match shape " - f"expected from CIFTI-2 header {self.header.matrix.get_data_shape()}") + f'Dataobj shape {self._dataobj.shape} does not match shape ' + f'expected from CIFTI-2 header {self.header.matrix.get_data_shape()}' + ) # if intent code is not set, default to unknown CIFTI if header.get_intent()[0] == 'none': header.set_intent('NIFTI_INTENT_CONNECTIVITY_UNKNOWN') - data = reshape_dataobj(self.dataobj, - (1, 1, 1, 1) + self.dataobj.shape) + data = reshape_dataobj(self.dataobj, (1, 1, 1, 1) + self.dataobj.shape) # If qform not set, reset pixdim values so Nifti2 does not complain if header['qform_code'] == 0: header['pixdim'][:4] = 1 @@ -1523,7 +1553,7 @@ def to_file_map(self, file_map=None, dtype=None): img.to_file_map(file_map or self.file_map) def update_headers(self): - """ Harmonize NIfTI headers with image data + """Harmonize NIfTI headers with image data Ensures that the NIfTI-2 header records the data shape in the last three ``dim`` fields. Per the spec: diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index 174222e189..31e4ab55ab 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -136,11 +136,13 @@ def from_index_mapping(mim): ------- axis : subclass of :class:`Axis` """ - return_type = {'CIFTI_INDEX_TYPE_SCALARS': ScalarAxis, - 'CIFTI_INDEX_TYPE_LABELS': LabelAxis, - 'CIFTI_INDEX_TYPE_SERIES': SeriesAxis, - 'CIFTI_INDEX_TYPE_BRAIN_MODELS': BrainModelAxis, - 'CIFTI_INDEX_TYPE_PARCELS': ParcelsAxis} + return_type = { + 'CIFTI_INDEX_TYPE_SCALARS': ScalarAxis, + 'CIFTI_INDEX_TYPE_LABELS': LabelAxis, + 'CIFTI_INDEX_TYPE_SERIES': SeriesAxis, + 'CIFTI_INDEX_TYPE_BRAIN_MODELS': BrainModelAxis, + 'CIFTI_INDEX_TYPE_PARCELS': ParcelsAxis, + } return return_type[mim.indices_map_to_data_type].from_index_mapping(mim) @@ -242,8 +244,9 @@ class BrainModelAxis(Axis): This Axis describes which vertex/voxel is represented by each row/column. """ - def __init__(self, name, voxel=None, vertex=None, affine=None, - volume_shape=None, nvertices=None): + def __init__( + self, name, voxel=None, vertex=None, affine=None, volume_shape=None, nvertices=None + ): """ New BrainModelAxis axes can be constructed by passing on the greyordinate brain-structure names and voxel/vertex indices to the constructor or by one of the @@ -275,7 +278,7 @@ def __init__(self, name, voxel=None, vertex=None, affine=None, """ if voxel is None: if vertex is None: - raise ValueError("At least one of voxel or vertex indices should be defined") + raise ValueError('At least one of voxel or vertex indices should be defined') nelements = len(vertex) self.voxel = np.full((nelements, 3), fill_value=-1, dtype=int) else: @@ -294,8 +297,10 @@ def __init__(self, name, voxel=None, vertex=None, affine=None, if nvertices is None: self.nvertices = {} else: - self.nvertices = {self.to_cifti_brain_structure_name(name): number - for name, number in nvertices.items()} + self.nvertices = { + self.to_cifti_brain_structure_name(name): number + for name, number in nvertices.items() + } for name in list(self.nvertices.keys()): if name not in self.name: @@ -307,8 +312,10 @@ def __init__(self, name, voxel=None, vertex=None, affine=None, self.volume_shape = None else: if affine is None or volume_shape is None: - raise ValueError("Affine and volume shape should be defined " - "for BrainModelAxis containing voxels") + raise ValueError( + 'Affine and volume shape should be defined ' + 'for BrainModelAxis containing voxels' + ) self.affine = np.asanyarray(affine) self.volume_shape = volume_shape @@ -318,10 +325,12 @@ def __init__(self, name, voxel=None, vertex=None, affine=None, raise ValueError('Undefined voxel indices found for volumetric elements') for check_name in ('name', 'voxel', 'vertex'): - shape = (self.size, 3) if check_name == 'voxel' else (self.size, ) + shape = (self.size, 3) if check_name == 'voxel' else (self.size,) if getattr(self, check_name).shape != shape: - raise ValueError(f"Input {check_name} has incorrect shape " - f"({getattr(self, check_name).shape}) for BrainModelAxis axis") + raise ValueError( + f'Input {check_name} has incorrect shape ' + f'({getattr(self, check_name).shape}) for BrainModelAxis axis' + ) @classmethod def from_mask(cls, mask, name='other', affine=None): @@ -348,7 +357,9 @@ def from_mask(cls, mask, name='other', affine=None): else: affine = np.asanyarray(affine) if affine.shape != (4, 4): - raise ValueError(f"Affine transformation should be a 4x4 array or None, not {affine!r}") + raise ValueError( + f'Affine transformation should be a 4x4 array or None, not {affine!r}' + ) mask = np.asanyarray(mask) if mask.ndim == 1: @@ -357,8 +368,10 @@ def from_mask(cls, mask, name='other', affine=None): voxels = np.array(np.where(mask != 0)).T return cls(name, voxel=voxels, affine=affine, volume_shape=mask.shape) else: - raise ValueError("Mask should be either 1-dimensional (for surfaces) or " - "3-dimensional (for volumes), not %i-dimensional" % mask.ndim) + raise ValueError( + 'Mask should be either 1-dimensional (for surfaces) or ' + '3-dimensional (for volumes), not %i-dimensional' % mask.ndim + ) @classmethod def from_surface(cls, vertices, nvertex, name='Other'): @@ -379,8 +392,7 @@ def from_surface(cls, vertices, nvertex, name='Other'): BrainModelAxis which covers (part of) the surface """ cifti_name = cls.to_cifti_brain_structure_name(name) - return cls(cifti_name, vertex=vertices, - nvertices={cifti_name: nvertex}) + return cls(cifti_name, vertex=vertices, nvertices={cifti_name: nvertex}) @classmethod def from_index_mapping(cls, mim): @@ -407,10 +419,10 @@ def from_index_mapping(cls, mim): is_surface = bm.model_type == 'CIFTI_MODEL_TYPE_SURFACE' name.extend([bm.brain_structure] * bm.index_count) if is_surface: - vertex[bm.index_offset: index_end] = bm.vertex_indices + vertex[bm.index_offset : index_end] = bm.vertex_indices nvertices[bm.brain_structure] = bm.surface_number_of_vertices else: - voxel[bm.index_offset: index_end, :] = bm.voxel_indices_ijk + voxel[bm.index_offset : index_end, :] = bm.voxel_indices_ijk if affine is None: shape = mim.volume.volume_dimensions affine = mim.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix @@ -444,9 +456,13 @@ def to_mapping(self, dim): affine = cifti2.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, self.affine) mim.volume = cifti2.Cifti2Volume(self.volume_shape, affine) cifti_bm = cifti2.Cifti2BrainModel( - to_slice.start, len(bm), - 'CIFTI_MODEL_TYPE_SURFACE' if is_surface else 'CIFTI_MODEL_TYPE_VOXELS', - name, nvertex, voxels, vertices + to_slice.start, + len(bm), + 'CIFTI_MODEL_TYPE_SURFACE' if is_surface else 'CIFTI_MODEL_TYPE_VOXELS', + name, + nvertex, + voxels, + vertices, ) mim.append(cifti_bm) return mim @@ -466,7 +482,7 @@ def iter_structures(self): start_name = self.name[idx_start] for idx_current, name in enumerate(self.name): if start_name != name: - yield start_name, slice(idx_start, idx_current), self[idx_start: idx_current] + yield start_name, slice(idx_start, idx_current), self[idx_start:idx_current] idx_start = idx_current start_name = self.name[idx_start] yield start_name, slice(idx_start, None), self[idx_start:] @@ -518,14 +534,14 @@ def to_cifti_brain_structure_name(name): if poss_orient == name.lower()[:idx]: orientation = poss_orient if name[idx] in '_ ': - structure = name[idx + 1:] + structure = name[idx + 1 :] else: structure = name[idx:] break if poss_orient == name.lower()[-idx:]: orientation = poss_orient if name[-idx - 1] in '_ ': - structure = name[:-idx - 1] + structure = name[: -idx - 1] else: structure = name[:-idx] break @@ -537,8 +553,10 @@ def to_cifti_brain_structure_name(name): else: proposed_name = f'CIFTI_STRUCTURE_{structure.upper()}_{orientation.upper()}' if proposed_name not in cifti2.CIFTI_BRAIN_STRUCTURES: - raise ValueError(f'{name} was interpreted as {proposed_name}, which is not ' - 'a valid CIFTI brain structure') + raise ValueError( + f'{name} was interpreted as {proposed_name}, which is not ' + 'a valid CIFTI brain structure' + ) return proposed_name @property @@ -586,17 +604,16 @@ def volume_shape(self, value): if value is not None: value = tuple(value) if len(value) != 3: - raise ValueError("Volume shape should be a tuple of length 3") + raise ValueError('Volume shape should be a tuple of length 3') if not all(isinstance(v, int) for v in value): - raise ValueError("All elements of the volume shape should be integers") + raise ValueError('All elements of the volume shape should be integers') self._volume_shape = value _name = None @property def name(self): - """The brain structure to which the voxel/vertices of belong - """ + """The brain structure to which the voxel/vertices of belong""" return self._name @name.setter @@ -612,13 +629,15 @@ def __eq__(self, other): if xor(self.affine is None, other.affine is None): return False return ( - (self.affine is None or - np.allclose(self.affine, other.affine) and - self.volume_shape == other.volume_shape) and - self.nvertices == other.nvertices and - np.array_equal(self.name, other.name) and - np.array_equal(self.voxel[self.volume_mask], other.voxel[other.volume_mask]) and - np.array_equal(self.vertex[self.surface_mask], other.vertex[other.surface_mask]) + ( + self.affine is None + or np.allclose(self.affine, other.affine) + and self.volume_shape == other.volume_shape + ) + and self.nvertices == other.nvertices + and np.array_equal(self.name, other.name) + and np.array_equal(self.voxel[self.volume_mask], other.voxel[other.volume_mask]) + and np.array_equal(self.vertex[self.surface_mask], other.vertex[other.surface_mask]) ) def __add__(self, other): @@ -641,23 +660,27 @@ def __add__(self, other): else: affine, shape = self.affine, self.volume_shape if other.affine is not None and ( - not np.allclose(other.affine, affine) or - other.volume_shape != shape + not np.allclose(other.affine, affine) or other.volume_shape != shape ): - raise ValueError("Trying to concatenate two BrainModels defined " - "in a different brain volume") + raise ValueError( + 'Trying to concatenate two BrainModels defined ' 'in a different brain volume' + ) nvertices = dict(self.nvertices) for name, value in other.nvertices.items(): if name in nvertices.keys() and nvertices[name] != value: - raise ValueError("Trying to concatenate two BrainModels with " - f"inconsistent number of vertices for {name}") + raise ValueError( + 'Trying to concatenate two BrainModels with ' + f'inconsistent number of vertices for {name}' + ) nvertices[name] = value return self.__class__( - np.append(self.name, other.name), - np.concatenate((self.voxel, other.voxel), 0), - np.append(self.vertex, other.vertex), - affine, shape, nvertices + np.append(self.name, other.name), + np.concatenate((self.voxel, other.voxel), 0), + np.append(self.vertex, other.vertex), + affine, + shape, + nvertices, ) def __getitem__(self, item): @@ -680,9 +703,15 @@ def __getitem__(self, item): if isinstance(item, int): return self.get_element(item) if isinstance(item, str): - raise IndexError("Can not index an Axis with a string (except for ParcelsAxis)") - return self.__class__(self.name[item], self.voxel[item], self.vertex[item], - self.affine, self.volume_shape, self.nvertices) + raise IndexError('Can not index an Axis with a string (except for ParcelsAxis)') + return self.__class__( + self.name[item], + self.voxel[item], + self.vertex[item], + self.affine, + self.volume_shape, + self.nvertices, + ) def get_element(self, index): """ @@ -758,13 +787,17 @@ def __init__(self, name, voxels, vertices, affine=None, volume_shape=None, nvert if nvertices is None: self.nvertices = {} else: - self.nvertices = {BrainModelAxis.to_cifti_brain_structure_name(name): number - for name, number in nvertices.items()} + self.nvertices = { + BrainModelAxis.to_cifti_brain_structure_name(name): number + for name, number in nvertices.items() + } for check_name in ('name', 'voxels', 'vertices'): - if getattr(self, check_name).shape != (self.size, ): - raise ValueError(f"Input {check_name} has incorrect shape " - f"({getattr(self, check_name).shape}) for Parcel axis") + if getattr(self, check_name).shape != (self.size,): + raise ValueError( + f'Input {check_name} has incorrect shape ' + f'({getattr(self, check_name).shape}) for Parcel axis' + ) @classmethod def from_brain_models(cls, named_brain_models): @@ -796,16 +829,20 @@ def from_brain_models(cls, named_brain_models): affine = bm.affine volume_shape = bm.volume_shape elif not np.allclose(affine, bm.affine) or (volume_shape != bm.volume_shape): - raise ValueError("Can not combine brain models defined in different " - "volumes into a single Parcel axis") + raise ValueError( + 'Can not combine brain models defined in different ' + 'volumes into a single Parcel axis' + ) all_voxels[idx_parcel] = voxels vertices = {} for name, _, bm_part in bm.iter_structures(): if name in bm.nvertices.keys(): if name in nvertices.keys() and nvertices[name] != bm.nvertices[name]: - raise ValueError("Got multiple conflicting number of " - f"vertices for surface structure {name}") + raise ValueError( + 'Got multiple conflicting number of ' + f'vertices for surface structure {name}' + ) nvertices[name] = bm.nvertices[name] vertices[name] = bm_part.vertex all_vertices[idx_parcel] = vertices @@ -846,7 +883,9 @@ def from_index_mapping(cls, mim): name = vertex.brain_structure vertices[vertex.brain_structure] = np.array(vertex) if name not in nvertices.keys(): - raise ValueError(f"Number of vertices for surface structure {name} not defined") + raise ValueError( + f'Number of vertices for surface structure {name} not defined' + ) all_voxels[idx_parcel] = voxels all_vertices[idx_parcel] = vertices all_names.append(parcel.name) @@ -910,25 +949,28 @@ def volume_shape(self, value): if value is not None: value = tuple(value) if len(value) != 3: - raise ValueError("Volume shape should be a tuple of length 3") + raise ValueError('Volume shape should be a tuple of length 3') if not all(isinstance(v, int) for v in value): - raise ValueError("All elements of the volume shape should be integers") + raise ValueError('All elements of the volume shape should be integers') self._volume_shape = value def __len__(self): return self.name.size def __eq__(self, other): - if (self.__class__ != other.__class__ or len(self) != len(other) or - not np.array_equal(self.name, other.name) or self.nvertices != other.nvertices or - any(not np.array_equal(vox1, vox2) - for vox1, vox2 in zip(self.voxels, other.voxels))): + if ( + self.__class__ != other.__class__ + or len(self) != len(other) + or not np.array_equal(self.name, other.name) + or self.nvertices != other.nvertices + or any(not np.array_equal(vox1, vox2) for vox1, vox2 in zip(self.voxels, other.voxels)) + ): return False if self.affine is not None: if ( - other.affine is None or - not np.allclose(self.affine, other.affine) or - self.volume_shape != other.volume_shape + other.affine is None + or not np.allclose(self.affine, other.affine) + or self.volume_shape != other.volume_shape ): return False elif other.affine is not None: @@ -960,21 +1002,27 @@ def __add__(self, other): affine, shape = other.affine, other.volume_shape else: affine, shape = self.affine, self.volume_shape - if other.affine is not None and (not np.allclose(other.affine, affine) or - other.volume_shape != shape): - raise ValueError("Trying to concatenate two ParcelsAxis defined " - "in a different brain volume") + if other.affine is not None and ( + not np.allclose(other.affine, affine) or other.volume_shape != shape + ): + raise ValueError( + 'Trying to concatenate two ParcelsAxis defined ' 'in a different brain volume' + ) nvertices = dict(self.nvertices) for name, value in other.nvertices.items(): if name in nvertices.keys() and nvertices[name] != value: - raise ValueError("Trying to concatenate two ParcelsAxis with " - f"inconsistent number of vertices for {name}") + raise ValueError( + 'Trying to concatenate two ParcelsAxis with ' + f'inconsistent number of vertices for {name}' + ) nvertices[name] = value return self.__class__( - np.append(self.name, other.name), - np.append(self.voxels, other.voxels), - np.append(self.vertices, other.vertices), - affine, shape, nvertices + np.append(self.name, other.name), + np.append(self.voxels, other.voxels), + np.append(self.vertices, other.vertices), + affine, + shape, + nvertices, ) def __getitem__(self, item): @@ -988,14 +1036,20 @@ def __getitem__(self, item): if isinstance(item, str): idx = np.where(self.name == item)[0] if len(idx) == 0: - raise IndexError(f"Parcel {item} not found") + raise IndexError(f'Parcel {item} not found') if len(idx) > 1: - raise IndexError(f"Multiple parcels with name {item} found") + raise IndexError(f'Multiple parcels with name {item} found') return self.voxels[idx[0]], self.vertices[idx[0]] if isinstance(item, int): return self.get_element(item) - return self.__class__(self.name[item], self.voxels[item], self.vertices[item], - self.affine, self.volume_shape, self.nvertices) + return self.__class__( + self.name[item], + self.voxels[item], + self.vertices[item], + self.affine, + self.volume_shape, + self.nvertices, + ) def get_element(self, index): """ @@ -1039,9 +1093,11 @@ def __init__(self, name, meta=None): self.meta = np.asanyarray(meta, dtype='object') for check_name in ('name', 'meta'): - if getattr(self, check_name).shape != (self.size, ): - raise ValueError(f"Input {check_name} has incorrect shape " - f"({getattr(self, check_name).shape}) for ScalarAxis axis") + if getattr(self, check_name).shape != (self.size,): + raise ValueError( + f'Input {check_name} has incorrect shape ' + f'({getattr(self, check_name).shape}) for ScalarAxis axis' + ) @classmethod def from_index_mapping(cls, mim): @@ -1115,8 +1171,8 @@ def __add__(self, other): if not isinstance(other, ScalarAxis): return NotImplemented return ScalarAxis( - np.append(self.name, other.name), - np.append(self.meta, other.meta), + np.append(self.name, other.name), + np.append(self.meta, other.meta), ) def __getitem__(self, item): @@ -1172,9 +1228,11 @@ def __init__(self, name, label, meta=None): self.meta = np.asanyarray(meta, dtype='object') for check_name in ('name', 'meta', 'label'): - if getattr(self, check_name).shape != (self.size, ): - raise ValueError(f"Input {check_name} has incorrect shape " - f"({getattr(self, check_name).shape}) for LabelAxis axis") + if getattr(self, check_name).shape != (self.size,): + raise ValueError( + f'Input {check_name} has incorrect shape ' + f'({getattr(self, check_name).shape}) for LabelAxis axis' + ) @classmethod def from_index_mapping(cls, mim): @@ -1189,8 +1247,10 @@ def from_index_mapping(cls, mim): ------- LabelAxis """ - tables = [{key: (value.label, value.rgba) for key, value in nm.label_table.items()} - for nm in mim.named_maps] + tables = [ + {key: (value.label, value.rgba) for key, value in nm.label_table.items()} + for nm in mim.named_maps + ] rest = ScalarAxis.from_index_mapping(mim) return LabelAxis(rest.name, tables, rest.meta) @@ -1212,8 +1272,7 @@ def to_mapping(self, dim): label_table = cifti2.Cifti2LabelTable() for key, value in label.items(): label_table[key] = (value[0],) + tuple(value[1]) - named_map = cifti2.Cifti2NamedMap(name, cifti2.Cifti2MetaData(meta), - label_table) + named_map = cifti2.Cifti2NamedMap(name, cifti2.Cifti2MetaData(meta), label_table) mim.append(named_map) return mim @@ -1236,9 +1295,9 @@ def __eq__(self, other): if not isinstance(other, LabelAxis) or self.size != other.size: return False return ( - np.array_equal(self.name, other.name) and - np.array_equal(self.meta, other.meta) and - np.array_equal(self.label, other.label) + np.array_equal(self.name, other.name) + and np.array_equal(self.meta, other.meta) + and np.array_equal(self.label, other.label) ) def __add__(self, other): @@ -1257,9 +1316,9 @@ def __add__(self, other): if not isinstance(other, LabelAxis): return NotImplemented return LabelAxis( - np.append(self.name, other.name), - np.append(self.label, other.label), - np.append(self.meta, other.meta), + np.append(self.name, other.name), + np.append(self.label, other.label), + np.append(self.meta, other.meta), ) def __getitem__(self, item): @@ -1292,9 +1351,10 @@ class SeriesAxis(Axis): This Axis describes the time point of each row/column. """ + size = None - def __init__(self, start, step, size, unit="SECOND"): + def __init__(self, start, step, size, unit='SECOND'): """ Creates a new SeriesAxis axis @@ -1331,8 +1391,8 @@ def from_index_mapping(cls, mim): ------- SeriesAxis """ - start = mim.series_start * 10 ** mim.series_exponent - step = mim.series_step * 10 ** mim.series_exponent + start = mim.series_start * 10**mim.series_exponent + step = mim.series_step * 10**mim.series_exponent return cls(start, step, mim.number_of_series_points, mim.series_unit) def to_mapping(self, dim): @@ -1364,9 +1424,10 @@ def unit(self): @unit.setter def unit(self, value): - if value.upper() not in ("SECOND", "HERTZ", "METER", "RADIAN"): - raise ValueError("SeriesAxis unit should be one of " + - "('second', 'hertz', 'meter', or 'radian'") + if value.upper() not in ('SECOND', 'HERTZ', 'METER', 'RADIAN'): + raise ValueError( + 'SeriesAxis unit should be one of ' + "('second', 'hertz', 'meter', or 'radian'" + ) self._unit = value.upper() def __len__(self): @@ -1377,11 +1438,11 @@ def __eq__(self, other): True if start, step, size, and unit are the same. """ return ( - isinstance(other, SeriesAxis) and - self.start == other.start and - self.step == other.step and - self.size == other.size and - self.unit == other.unit + isinstance(other, SeriesAxis) + and self.start == other.start + and self.step == other.step + and self.size == other.size + and self.unit == other.unit ) def __add__(self, other): @@ -1415,12 +1476,16 @@ def __add__(self, other): def __getitem__(self, item): if isinstance(item, slice): step = 1 if item.step is None else item.step - idx_start = ((self.size - 1 if step < 0 else 0) - if item.start is None else - (item.start if item.start >= 0 else self.size + item.start)) - idx_end = ((-1 if step < 0 else self.size) - if item.stop is None else - (item.stop if item.stop >= 0 else self.size + item.stop)) + idx_start = ( + (self.size - 1 if step < 0 else 0) + if item.start is None + else (item.start if item.start >= 0 else self.size + item.start) + ) + idx_end = ( + (-1 if step < 0 else self.size) + if item.stop is None + else (item.stop if item.stop >= 0 else self.size + item.stop) + ) if idx_start > self.size and step < 0: idx_start = self.size - 1 if idx_end > self.size: @@ -1428,12 +1493,15 @@ def __getitem__(self, item): nelements = (idx_end - idx_start) // step if nelements < 0: nelements = 0 - return SeriesAxis(idx_start * self.step + self.start, self.step * step, - nelements, self.unit) + return SeriesAxis( + idx_start * self.step + self.start, self.step * step, nelements, self.unit + ) elif isinstance(item, int): return self.get_element(item) - raise IndexError('SeriesAxis can only be indexed with integers or slices ' - 'without breaking the regular structure') + raise IndexError( + 'SeriesAxis can only be indexed with integers or slices ' + 'without breaking the regular structure' + ) def get_element(self, index): """ @@ -1452,6 +1520,8 @@ def get_element(self, index): if index < 0: index = self.size + index if index >= self.size or index < 0: - raise IndexError("index %i is out of range for SeriesAxis with size %i" % - (original_index, self.size)) + raise IndexError( + 'index %i is out of range for SeriesAxis with size %i' + % (original_index, self.size) + ) return self.start + self.step * index diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index a3ed49711d..36db0fa290 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -12,13 +12,27 @@ from packaging.version import Version, parse -from .cifti2 import (Cifti2MetaData, Cifti2Header, Cifti2Label, - Cifti2LabelTable, Cifti2VertexIndices, - Cifti2VoxelIndicesIJK, Cifti2BrainModel, Cifti2Matrix, - Cifti2MatrixIndicesMap, Cifti2NamedMap, Cifti2Parcel, - Cifti2Surface, Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, - Cifti2Vertices, Cifti2Volume, CIFTI_BRAIN_STRUCTURES, - CIFTI_MODEL_TYPES, _underscore, Cifti2HeaderError) +from .cifti2 import ( + Cifti2MetaData, + Cifti2Header, + Cifti2Label, + Cifti2LabelTable, + Cifti2VertexIndices, + Cifti2VoxelIndicesIJK, + Cifti2BrainModel, + Cifti2Matrix, + Cifti2MatrixIndicesMap, + Cifti2NamedMap, + Cifti2Parcel, + Cifti2Surface, + Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, + Cifti2Vertices, + Cifti2Volume, + CIFTI_BRAIN_STRUCTURES, + CIFTI_MODEL_TYPES, + _underscore, + Cifti2HeaderError, +) from .. import xmlutils as xml from ..spatialimages import HeaderDataError from ..batteryrunners import Report @@ -44,51 +58,42 @@ def _mangle(self, value): return value.to_xml() -extension_codes.add_codes(( - (Cifti2Extension.code, 'cifti', Cifti2Extension),)) - -intent_codes.add_codes(( - # The codes below appear on the CIFTI-2 standard - # http://www.nitrc.org/plugins/mwiki/index.php/cifti:ConnectivityMatrixFileFormats - # https://www.nitrc.org/forum/attachment.php?attachid=341&group_id=454&forum_id=1955 - (3000, 'ConnUnknown', (), 'NIFTI_INTENT_CONNECTIVITY_UNKNOWN'), - (3001, 'ConnDense', (), 'NIFTI_INTENT_CONNECTIVITY_DENSE'), - (3002, 'ConnDenseSeries', (), - 'NIFTI_INTENT_CONNECTIVITY_DENSE_SERIES'), - (3003, 'ConnParcels', (), - 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED'), - (3004, 'ConnParcelSries', (), - "NIFTI_INTENT_CONNECTIVITY_PARCELLATED_SERIES"), - (3006, 'ConnDenseScalar', (), - 'NIFTI_INTENT_CONNECTIVITY_DENSE_SCALARS'), - (3007, 'ConnDenseLabel', (), - 'NIFTI_INTENT_CONNECTIVITY_DENSE_LABELS'), - (3008, 'ConnParcelScalr', (), - 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_SCALAR'), - (3009, 'ConnParcelDense', (), - 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_DENSE'), - (3010, 'ConnDenseParcel', (), - 'NIFTI_INTENT_CONNECTIVITY_DENSE_PARCELLATED'), - (3011, 'ConnPPSr', (), - 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_PARCELLATED_SERIES'), - (3012, 'ConnPPSc', (), - 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_PARCELLATED_SCALAR'))) +extension_codes.add_codes(((Cifti2Extension.code, 'cifti', Cifti2Extension),)) + +intent_codes.add_codes( + ( + # The codes below appear on the CIFTI-2 standard + # http://www.nitrc.org/plugins/mwiki/index.php/cifti:ConnectivityMatrixFileFormats + # https://www.nitrc.org/forum/attachment.php?attachid=341&group_id=454&forum_id=1955 + (3000, 'ConnUnknown', (), 'NIFTI_INTENT_CONNECTIVITY_UNKNOWN'), + (3001, 'ConnDense', (), 'NIFTI_INTENT_CONNECTIVITY_DENSE'), + (3002, 'ConnDenseSeries', (), 'NIFTI_INTENT_CONNECTIVITY_DENSE_SERIES'), + (3003, 'ConnParcels', (), 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED'), + (3004, 'ConnParcelSries', (), 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_SERIES'), + (3006, 'ConnDenseScalar', (), 'NIFTI_INTENT_CONNECTIVITY_DENSE_SCALARS'), + (3007, 'ConnDenseLabel', (), 'NIFTI_INTENT_CONNECTIVITY_DENSE_LABELS'), + (3008, 'ConnParcelScalr', (), 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_SCALAR'), + (3009, 'ConnParcelDense', (), 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_DENSE'), + (3010, 'ConnDenseParcel', (), 'NIFTI_INTENT_CONNECTIVITY_DENSE_PARCELLATED'), + (3011, 'ConnPPSr', (), 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_PARCELLATED_SERIES'), + (3012, 'ConnPPSc', (), 'NIFTI_INTENT_CONNECTIVITY_PARCELLATED_PARCELLATED_SCALAR'), + ) +) class _Cifti2AsNiftiHeader(Nifti2Header): - """ Class for Cifti2 header extension """ + """Class for Cifti2 header extension""" @classmethod def _valid_intent_code(klass, intent_code): - """ Return True if `intent_code` matches our class `klass` - """ + """Return True if `intent_code` matches our class `klass`""" return intent_code >= 3000 and intent_code < 3100 @classmethod def may_contain_header(klass, binaryblock): if not super(_Cifti2AsNiftiHeader, klass).may_contain_header(binaryblock): return False - hdr = klass(binaryblock=binaryblock[:klass.sizeof_hdr]) + hdr = klass(binaryblock=binaryblock[: klass.sizeof_hdr]) return klass._valid_intent_code(hdr.get_intent('code')[0]) @staticmethod @@ -120,17 +125,19 @@ def _chk_pixdims(hdr, fix=False): class _Cifti2AsNiftiImage(Nifti2Image): - """ Load a NIfTI2 image with a Cifti2 header """ + """Load a NIfTI2 image with a Cifti2 header""" + header_class = _Cifti2AsNiftiHeader makeable = False class Cifti2Parser(xml.XmlParser): """Class to parse an XML string into a CIFTI-2 header object""" + def __init__(self, encoding=None, buffer_size=3500000, verbose=0): - super(Cifti2Parser, self).__init__(encoding=encoding, - buffer_size=buffer_size, - verbose=verbose) + super(Cifti2Parser, self).__init__( + encoding=encoding, buffer_size=buffer_size, verbose=verbose + ) self.fsm_state = [] self.struct_state = [] @@ -152,7 +159,7 @@ def StartElementHandler(self, name, attrs): # create cifti2 image self.header = Cifti2Header() self.header.version = ver = attrs['Version'] - if parse(ver) < Version("2"): + if parse(ver) < Version('2'): raise ValueError(f'Only CIFTI-2 files are supported; found version {ver}') self.fsm_state.append('CIFTI') self.struct_state.append(self.header) @@ -193,15 +200,18 @@ def StartElementHandler(self, name, attrs): elif name == 'MatrixIndicesMap': self.fsm_state.append('MatrixIndicesMap') - dimensions = [int(value) for value in attrs["AppliesToMatrixDimension"].split(',')] + dimensions = [int(value) for value in attrs['AppliesToMatrixDimension'].split(',')] mim = Cifti2MatrixIndicesMap( applies_to_matrix_dimension=dimensions, - indices_map_to_data_type=attrs["IndicesMapToDataType"]) - for key, dtype in [("NumberOfSeriesPoints", int), - ("SeriesExponent", int), - ("SeriesStart", float), - ("SeriesStep", float), - ("SeriesUnit", str)]: + indices_map_to_data_type=attrs['IndicesMapToDataType'], + ) + for key, dtype in [ + ('NumberOfSeriesPoints', int), + ('SeriesExponent', int), + ('SeriesStart', float), + ('SeriesStep', float), + ('SeriesUnit', str), + ]: if key in attrs: setattr(mim, _underscore(key), dtype(attrs[key])) matrix = self.struct_state[-1] @@ -226,7 +236,7 @@ def StartElementHandler(self, name, attrs): elif name == 'LabelTable': named_map = self.struct_state[-1] mim = self.struct_state[-2] - if mim.indices_map_to_data_type != "CIFTI_INDEX_TYPE_LABELS": + if mim.indices_map_to_data_type != 'CIFTI_INDEX_TYPE_LABELS': raise Cifti2HeaderError( 'LabelTable element can only be a child of a MatrixIndicesMap ' 'with CIFTI_INDEX_TYPE_LABELS type' @@ -247,16 +257,16 @@ def StartElementHandler(self, name, attrs): 'Label element can only be a child of the CIFTI-2 LabelTable element' ) label = Cifti2Label() - label.key = int(attrs["Key"]) - label.red = float(attrs["Red"]) - label.green = float(attrs["Green"]) - label.blue = float(attrs["Blue"]) - label.alpha = float(attrs["Alpha"]) + label.key = int(attrs['Key']) + label.red = float(attrs['Red']) + label.green = float(attrs['Green']) + label.blue = float(attrs['Blue']) + label.alpha = float(attrs['Alpha']) self.write_to = 'Label' self.fsm_state.append('Label') self.struct_state.append(label) - elif name == "MapName": + elif name == 'MapName': named_map = self.struct_state[-1] if not isinstance(named_map, Cifti2NamedMap): raise Cifti2HeaderError( @@ -266,52 +276,50 @@ def StartElementHandler(self, name, attrs): self.fsm_state.append('MapName') self.write_to = 'MapName' - elif name == "Surface": + elif name == 'Surface': surface = Cifti2Surface() mim = self.struct_state[-1] if not isinstance(mim, Cifti2MatrixIndicesMap): raise Cifti2HeaderError( 'Surface element can only be a child of the CIFTI-2 MatrixIndicesMap element' ) - if mim.indices_map_to_data_type != "CIFTI_INDEX_TYPE_PARCELS": + if mim.indices_map_to_data_type != 'CIFTI_INDEX_TYPE_PARCELS': raise Cifti2HeaderError( 'Surface element can only be a child of a MatrixIndicesMap ' 'with CIFTI_INDEX_TYPE_PARCELS type' ) - surface.brain_structure = attrs["BrainStructure"] - surface.surface_number_of_vertices = int(attrs["SurfaceNumberOfVertices"]) + surface.brain_structure = attrs['BrainStructure'] + surface.surface_number_of_vertices = int(attrs['SurfaceNumberOfVertices']) mim.append(surface) - elif name == "Parcel": + elif name == 'Parcel': parcel = Cifti2Parcel() mim = self.struct_state[-1] if not isinstance(mim, Cifti2MatrixIndicesMap): raise Cifti2HeaderError( 'Parcel element can only be a child of the CIFTI-2 MatrixIndicesMap element' ) - parcel.name = attrs["Name"] + parcel.name = attrs['Name'] mim.append(parcel) self.fsm_state.append('Parcel') self.struct_state.append(parcel) - elif name == "Vertices": + elif name == 'Vertices': vertices = Cifti2Vertices() parcel = self.struct_state[-1] if not isinstance(parcel, Cifti2Parcel): raise Cifti2HeaderError( 'Vertices element can only be a child of the CIFTI-2 Parcel element' ) - vertices.brain_structure = attrs["BrainStructure"] + vertices.brain_structure = attrs['BrainStructure'] if vertices.brain_structure not in CIFTI_BRAIN_STRUCTURES: - raise Cifti2HeaderError( - 'BrainStructure for this Vertices element is not valid' - ) + raise Cifti2HeaderError('BrainStructure for this Vertices element is not valid') parcel.append_cifti_vertices(vertices) self.fsm_state.append('Vertices') self.struct_state.append(vertices) self.write_to = 'Vertices' - elif name == "VoxelIndicesIJK": + elif name == 'VoxelIndicesIJK': parent = self.struct_state[-1] if not isinstance(parent, (Cifti2Parcel, Cifti2BrainModel)): raise Cifti2HeaderError( @@ -321,20 +329,19 @@ def StartElementHandler(self, name, attrs): parent.voxel_indices_ijk = Cifti2VoxelIndicesIJK() self.write_to = 'VoxelIndices' - elif name == "Volume": + elif name == 'Volume': mim = self.struct_state[-1] if not isinstance(mim, Cifti2MatrixIndicesMap): raise Cifti2HeaderError( 'Volume element can only be a child of the CIFTI-2 MatrixIndicesMap element' ) - dimensions = tuple([int(val) for val in - attrs["VolumeDimensions"].split(',')]) + dimensions = tuple([int(val) for val in attrs['VolumeDimensions'].split(',')]) volume = Cifti2Volume(volume_dimensions=dimensions) mim.append(volume) self.fsm_state.append('Volume') self.struct_state.append(volume) - elif name == "TransformationMatrixVoxelIndicesIJKtoXYZ": + elif name == 'TransformationMatrixVoxelIndicesIJKtoXYZ': volume = self.struct_state[-1] if not isinstance(volume, Cifti2Volume): raise Cifti2HeaderError( @@ -342,13 +349,13 @@ def StartElementHandler(self, name, attrs): 'of the CIFTI-2 Volume element' ) transform = Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ() - transform.meter_exponent = int(attrs["MeterExponent"]) + transform.meter_exponent = int(attrs['MeterExponent']) volume.transformation_matrix_voxel_indices_ijk_to_xyz = transform self.fsm_state.append('TransformMatrix') self.struct_state.append(transform) self.write_to = 'TransformMatrix' - elif name == "BrainModel": + elif name == 'BrainModel': model = Cifti2BrainModel() mim = self.struct_state[-1] if not isinstance(mim, Cifti2MatrixIndicesMap): @@ -356,31 +363,29 @@ def StartElementHandler(self, name, attrs): 'BrainModel element can only be a child ' 'of the CIFTI-2 MatrixIndicesMap element' ) - if mim.indices_map_to_data_type != "CIFTI_INDEX_TYPE_BRAIN_MODELS": + if mim.indices_map_to_data_type != 'CIFTI_INDEX_TYPE_BRAIN_MODELS': raise Cifti2HeaderError( 'BrainModel element can only be a child of a MatrixIndicesMap ' 'with CIFTI_INDEX_TYPE_BRAIN_MODELS type' ) - for key, dtype in [("IndexOffset", int), - ("IndexCount", int), - ("ModelType", str), - ("BrainStructure", str), - ("SurfaceNumberOfVertices", int)]: + for key, dtype in [ + ('IndexOffset', int), + ('IndexCount', int), + ('ModelType', str), + ('BrainStructure', str), + ('SurfaceNumberOfVertices', int), + ]: if key in attrs: setattr(model, _underscore(key), dtype(attrs[key])) if model.brain_structure not in CIFTI_BRAIN_STRUCTURES: - raise Cifti2HeaderError( - 'BrainStructure for this BrainModel element is not valid' - ) + raise Cifti2HeaderError('BrainStructure for this BrainModel element is not valid') if model.model_type not in CIFTI_MODEL_TYPES: - raise Cifti2HeaderError( - 'ModelType for this BrainModel element is not valid' - ) + raise Cifti2HeaderError('ModelType for this BrainModel element is not valid') mim.append(model) self.fsm_state.append('BrainModel') self.struct_state.append(model) - elif name == "VertexIndices": + elif name == 'VertexIndices': index = Cifti2VertexIndices() model = self.struct_state[-1] if not isinstance(model, Cifti2BrainModel): @@ -391,7 +396,7 @@ def StartElementHandler(self, name, attrs): self.fsm_state.append('VertexIndices') model.vertex_indices = index self.struct_state.append(index) - self.write_to = "VertexIndices" + self.write_to = 'VertexIndices' def EndElementHandler(self, name): self.flush_chardata() @@ -444,42 +449,42 @@ def EndElementHandler(self, name): lata.append(label) self.write_to = None - elif name == "MapName": + elif name == 'MapName': self.fsm_state.pop() self.write_to = None - elif name == "Parcel": + elif name == 'Parcel': self.fsm_state.pop() self.struct_state.pop() - elif name == "Vertices": + elif name == 'Vertices': self.fsm_state.pop() self.struct_state.pop() self.write_to = None - elif name == "VoxelIndicesIJK": + elif name == 'VoxelIndicesIJK': self.write_to = None - elif name == "Volume": + elif name == 'Volume': self.fsm_state.pop() self.struct_state.pop() - elif name == "TransformationMatrixVoxelIndicesIJKtoXYZ": + elif name == 'TransformationMatrixVoxelIndicesIJKtoXYZ': self.fsm_state.pop() self.struct_state.pop() self.write_to = None - elif name == "BrainModel": + elif name == 'BrainModel': self.fsm_state.pop() self.struct_state.pop() - elif name == "VertexIndices": + elif name == 'VertexIndices': self.fsm_state.pop() self.struct_state.pop() self.write_to = None def CharacterDataHandler(self, data): - """ Collect character data chunks pending collation + """Collect character data chunks pending collation The parser breaks the data up into chunks of size depending on the buffer_size of the parser. A large bit of character data, with standard @@ -492,8 +497,7 @@ def CharacterDataHandler(self, data): self._char_blocks.append(data) def flush_chardata(self): - """ Collate and process collected character data - """ + """Collate and process collected character data""" if self._char_blocks is None: return # Just join the strings to get the data. Maybe there are some memory @@ -552,7 +556,7 @@ def flush_chardata(self): @property def pending_data(self): - " True if there is character data pending for processing " + """True if there is character data pending for processing""" return self._char_blocks is not None diff --git a/nibabel/cifti2/tests/test_axes.py b/nibabel/cifti2/tests/test_axes.py index 21cd83e80e..ecb6be272b 100644 --- a/nibabel/cifti2/tests/test_axes.py +++ b/nibabel/cifti2/tests/test_axes.py @@ -43,7 +43,9 @@ def get_parcels(): Parcel axis """ bml = list(get_brain_models()) - return axes.ParcelsAxis.from_brain_models([('mixed', bml[0] + bml[2]), ('volume', bml[1]), ('surface', bml[3])]) + return axes.ParcelsAxis.from_brain_models( + [('mixed', bml[0] + bml[2]), ('volume', bml[1]), ('surface', bml[3])] + ) def get_scalar(): @@ -79,7 +81,7 @@ def get_series(): yield axes.SeriesAxis(3, 10, 4) yield axes.SeriesAxis(8, 10, 3) yield axes.SeriesAxis(3, 2, 4) - yield axes.SeriesAxis(5, 10, 5, "HERTZ") + yield axes.SeriesAxis(5, 10, 5, 'HERTZ') def get_axes(): @@ -123,8 +125,9 @@ def test_brain_models(): assert (bml[4].voxel == -1).all() assert (bml[4].vertex == [2, 9, 14]).all() - for bm, label, is_surface in zip(bml, ['ThalamusRight', 'Other', 'cortex_left', 'Other'], - (False, False, True, True)): + for bm, label, is_surface in zip( + bml, ['ThalamusRight', 'Other', 'cortex_left', 'Other'], (False, False, True, True) + ): assert np.all(bm.surface_mask == ~bm.volume_mask) structures = list(bm.iter_structures()) assert len(structures) == 1 @@ -162,7 +165,7 @@ def test_brain_models(): bmt.volume_shape = (5, 3, 1) with pytest.raises(ValueError): - bmt.volume_shape = (5., 3, 1) + bmt.volume_shape = (5.0, 3, 1) with pytest.raises(ValueError): bmt.volume_shape = (5, 3, 1, 4) @@ -170,7 +173,9 @@ def test_brain_models(): bmt['thalamus_left'] # Test the constructor - bm_vox = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4)) + bm_vox = axes.BrainModelAxis( + 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4) + ) assert np.all(bm_vox.name == ['CIFTI_STRUCTURE_THALAMUS_LEFT'] * 5) assert np.array_equal(bm_vox.vertex, np.full(5, -1)) assert np.array_equal(bm_vox.voxel, np.full((5, 3), 1)) @@ -179,30 +184,53 @@ def test_brain_models(): axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4)) with pytest.raises(ValueError): # no affine - axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), volume_shape=(2, 3, 4)) + axes.BrainModelAxis( + 'thalamus_left', voxel=np.ones((5, 3), dtype=int), volume_shape=(2, 3, 4) + ) with pytest.raises(ValueError): # incorrect name - axes.BrainModelAxis('random_name', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4)) + axes.BrainModelAxis( + 'random_name', + voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) with pytest.raises(ValueError): # negative voxel indices - axes.BrainModelAxis('thalamus_left', voxel=-np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4)) + axes.BrainModelAxis( + 'thalamus_left', + voxel=-np.ones((5, 3), dtype=int), + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) with pytest.raises(ValueError): # no voxels or vertices axes.BrainModelAxis('thalamus_left', affine=np.eye(4), volume_shape=(2, 3, 4)) with pytest.raises(ValueError): # incorrect voxel shape - axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 2), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4)) + axes.BrainModelAxis( + 'thalamus_left', + voxel=np.ones((5, 2), dtype=int), + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) - bm_vertex = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20}) + bm_vertex = axes.BrainModelAxis( + 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20} + ) assert np.array_equal(bm_vertex.name, ['CIFTI_STRUCTURE_CORTEX_LEFT'] * 5) assert np.array_equal(bm_vertex.vertex, np.full(5, 1)) assert np.array_equal(bm_vertex.voxel, np.full((5, 3), -1)) with pytest.raises(ValueError): axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int)) with pytest.raises(ValueError): - axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_right': 20}) + axes.BrainModelAxis( + 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_right': 20} + ) with pytest.raises(ValueError): - axes.BrainModelAxis('cortex_left', vertex=-np.ones(5, dtype=int), nvertices={'cortex_left': 20}) + axes.BrainModelAxis( + 'cortex_left', vertex=-np.ones(5, dtype=int), nvertices={'cortex_left': 20} + ) # test from_mask errors with pytest.raises(ValueError): @@ -213,11 +241,12 @@ def test_brain_models(): axes.BrainModelAxis.from_mask(np.ones((5, 3))) # tests error in adding together or combining as ParcelsAxis - bm_vox = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), - affine=np.eye(4), volume_shape=(2, 3, 4)) + bm_vox = axes.BrainModelAxis( + 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4) + ) bm_vox + bm_vox - assert (bm_vertex + bm_vox)[:bm_vertex.size] == bm_vertex - assert (bm_vox + bm_vertex)[:bm_vox.size] == bm_vox + assert (bm_vertex + bm_vox)[: bm_vertex.size] == bm_vertex + assert (bm_vox + bm_vertex)[: bm_vox.size] == bm_vox for bm_added in (bm_vox + bm_vertex, bm_vertex + bm_vox): assert bm_added.nvertices == bm_vertex.nvertices assert np.all(bm_added.affine == bm_vox.affine) @@ -227,29 +256,39 @@ def test_brain_models(): with pytest.raises(Exception): bm_vox + get_label() - bm_other_shape = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), - affine=np.eye(4), volume_shape=(4, 3, 4)) + bm_other_shape = axes.BrainModelAxis( + 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(4, 3, 4) + ) with pytest.raises(ValueError): bm_vox + bm_other_shape with pytest.raises(ValueError): axes.ParcelsAxis.from_brain_models([('a', bm_vox), ('b', bm_other_shape)]) - bm_other_affine = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), - affine=np.eye(4) * 2, volume_shape=(2, 3, 4)) + bm_other_affine = axes.BrainModelAxis( + 'thalamus_left', + voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4) * 2, + volume_shape=(2, 3, 4), + ) with pytest.raises(ValueError): bm_vox + bm_other_affine with pytest.raises(ValueError): axes.ParcelsAxis.from_brain_models([('a', bm_vox), ('b', bm_other_affine)]) - bm_vertex = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20}) - bm_other_number = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 30}) + bm_vertex = axes.BrainModelAxis( + 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20} + ) + bm_other_number = axes.BrainModelAxis( + 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 30} + ) with pytest.raises(ValueError): bm_vertex + bm_other_number with pytest.raises(ValueError): axes.ParcelsAxis.from_brain_models([('a', bm_vertex), ('b', bm_other_number)]) # test equalities - bm_vox = axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), - affine=np.eye(4), volume_shape=(2, 3, 4)) + bm_vox = axes.BrainModelAxis( + 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4) + ) bm_other = deepcopy(bm_vox) assert bm_vox == bm_other bm_other.voxel[1, 0] = 0 @@ -276,7 +315,9 @@ def test_brain_models(): bm_other.volume_shape = (10, 3, 4) assert bm_vox != bm_other - bm_vertex = axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20}) + bm_vertex = axes.BrainModelAxis( + 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20} + ) bm_other = deepcopy(bm_vertex) assert bm_vertex == bm_other bm_other.voxel[1, 0] = 0 @@ -308,31 +349,31 @@ def test_parcels(): """ prc = get_parcels() assert isinstance(prc, axes.ParcelsAxis) - assert prc[0] == ('mixed', ) + prc['mixed'] + assert prc[0] == ('mixed',) + prc['mixed'] assert prc['mixed'][0].shape == (3, 3) assert len(prc['mixed'][1]) == 1 - assert prc['mixed'][1]['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (3, ) + assert prc['mixed'][1]['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (3,) - assert prc[1] == ('volume', ) + prc['volume'] + assert prc[1] == ('volume',) + prc['volume'] assert prc['volume'][0].shape == (4, 3) assert len(prc['volume'][1]) == 0 - assert prc[2] == ('surface', ) + prc['surface'] + assert prc[2] == ('surface',) + prc['surface'] assert prc['surface'][0].shape == (0, 3) assert len(prc['surface'][1]) == 1 - assert prc['surface'][1]['CIFTI_STRUCTURE_OTHER'].shape == (4, ) + assert prc['surface'][1]['CIFTI_STRUCTURE_OTHER'].shape == (4,) prc2 = prc + prc assert len(prc2) == 6 assert (prc2.affine == prc.affine).all() - assert (prc2.nvertices == prc.nvertices) - assert (prc2.volume_shape == prc.volume_shape) + assert prc2.nvertices == prc.nvertices + assert prc2.volume_shape == prc.volume_shape assert prc2[:3] == prc assert prc2[3:] == prc assert prc2[3:]['mixed'][0].shape == (3, 3) assert len(prc2[3:]['mixed'][1]) == 1 - assert prc2[3:]['mixed'][1]['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (3, ) + assert prc2[3:]['mixed'][1]['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (3,) with pytest.raises(IndexError): prc['non_existent'] @@ -351,7 +392,7 @@ def test_parcels(): prc.volume_shape = (5, 3, 1) with pytest.raises(ValueError): - prc.volume_shape = (5., 3, 1) + prc.volume_shape = (5.0, 3, 1) with pytest.raises(ValueError): prc.volume_shape = (5, 3, 1, 4) @@ -412,7 +453,7 @@ def test_parcels(): assert prc != prc_other prc_other = deepcopy(prc) - prc_other.vertices[0]['CIFTI_STRUCTURE_CORTEX_LEFT'] = np.ones((8, ), dtype='i4') + prc_other.vertices[0]['CIFTI_STRUCTURE_CORTEX_LEFT'] = np.ones((8,), dtype='i4') assert prc != prc_other prc_other = deepcopy(prc) @@ -425,20 +466,20 @@ def test_parcels(): # test direct initialisation axes.ParcelsAxis( - voxels=[np.ones((3, 2), dtype=int)], - vertices=[{}], - name=['single_voxel'], - affine=np.eye(4), - volume_shape=(2, 3, 4), + voxels=[np.ones((3, 2), dtype=int)], + vertices=[{}], + name=['single_voxel'], + affine=np.eye(4), + volume_shape=(2, 3, 4), ) with pytest.raises(ValueError): axes.ParcelsAxis( - voxels=[np.ones((3, 2), dtype=int)], - vertices=[{}], - name=[['single_voxel']], # wrong shape name array - affine=np.eye(4), - volume_shape=(2, 3, 4), + voxels=[np.ones((3, 2), dtype=int)], + vertices=[{}], + name=[['single_voxel']], # wrong shape name array + affine=np.eye(4), + volume_shape=(2, 3, 4), ) @@ -609,10 +650,10 @@ def test_series(): assert sr == sr[:] for key, value in ( - ('start', 20), - ('step', 7), - ('size', 14), - ('unit', 'HERTZ'), + ('start', 20), + ('step', 7), + ('size', 14), + ('unit', 'HERTZ'), ): sr_other = deepcopy(sr) assert sr == sr_other @@ -638,11 +679,10 @@ def test_common_interface(): assert axis1 == axis2 concatenated = axis1 + axis2 assert axis1 != concatenated - assert axis1 == concatenated[:axis1.size] + assert axis1 == concatenated[: axis1.size] if isinstance(axis1, axes.SeriesAxis): - assert axis2 != concatenated[axis1.size:] + assert axis2 != concatenated[axis1.size :] else: - assert axis2 == concatenated[axis1.size:] + assert axis2 == concatenated[axis1.size :] assert len(axis1) == axis1.size - diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index b04d1db585..be10f8b0e0 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -1,4 +1,4 @@ -""" Testing CIFTI-2 objects +"""Testing CIFTI-2 objects """ import collections from xml.etree import ElementTree @@ -81,7 +81,10 @@ def test_cifti2_metadata(): with pytest.raises(KeyError): md.difference_update({'a': 'aval', 'd': 'dval'}) - assert md.to_xml().decode('utf-8') == 'bbval' + assert ( + md.to_xml().decode('utf-8') + == 'bbval' + ) def test__float_01(): @@ -108,7 +111,6 @@ def test_cifti2_labeltable(): lt.to_xml() with pytest.raises(ci.Cifti2HeaderError): lt._to_xml_element() - label = ci.Cifti2Label(label='Test', key=0) lt[0] = label @@ -132,9 +134,9 @@ def test_cifti2_labeltable(): with pytest.raises(ValueError): lt[0] = test_tuple[:-1] - + with pytest.raises(ValueError): - lt[0] = ('foo', 1.1, 0, 0, 1) + lt[0] = ('foo', 1.1, 0, 0, 1) with pytest.raises(ValueError): lt[0] = ('foo', 1.0, -1, 0, 1) @@ -143,14 +145,15 @@ def test_cifti2_labeltable(): lt[0] = ('foo', 1.0, 0, -0.1, 1) - def test_cifti2_label(): lb = ci.Cifti2Label() lb.label = 'Test' lb.key = 0 assert lb.rgba == (0, 0, 0, 0) - assert compare_xml_leaf(lb.to_xml().decode('utf-8'), - "") + assert compare_xml_leaf( + lb.to_xml().decode('utf-8'), + "", + ) lb.red = 0 lb.green = 0.1 @@ -158,8 +161,10 @@ def test_cifti2_label(): lb.alpha = 0.3 assert lb.rgba == (0, 0.1, 0.2, 0.3) - assert compare_xml_leaf(lb.to_xml().decode('utf-8'), - "") + assert compare_xml_leaf( + lb.to_xml().decode('utf-8'), + "", + ) lb.red = 10 with pytest.raises(ci.Cifti2HeaderError): @@ -176,20 +181,25 @@ def test_cifti2_parcel(): pl = ci.Cifti2Parcel() with pytest.raises(ci.Cifti2HeaderError): pl.to_xml() - + with pytest.raises(TypeError): pl.append_cifti_vertices(None) - + with pytest.raises(ValueError): ci.Cifti2Parcel(vertices=[1, 2, 3]) - pl = ci.Cifti2Parcel(name='region', - voxel_indices_ijk=ci.Cifti2VoxelIndicesIJK([[1, 2, 3]]), - vertices=[ci.Cifti2Vertices([0, 1, 2])]) + pl = ci.Cifti2Parcel( + name='region', + voxel_indices_ijk=ci.Cifti2VoxelIndicesIJK([[1, 2, 3]]), + vertices=[ci.Cifti2Vertices([0, 1, 2])], + ) pl.pop_cifti2_vertices(0) assert len(pl.vertices) == 0 - assert pl.to_xml().decode('utf-8') == '1 2 3' + assert ( + pl.to_xml().decode('utf-8') + == '1 2 3' + ) def test_cifti2_vertices(): @@ -209,7 +219,10 @@ def test_cifti2_vertices(): with pytest.raises(ValueError): vs.insert(1, 'a') - assert vs.to_xml().decode('utf-8') == '0 1 2' + assert ( + vs.to_xml().decode('utf-8') + == '0 1 2' + ) vs[0] = 10 assert vs[0] == 10 @@ -244,7 +257,7 @@ def test_cifti2_vertexindices(): vi.extend(np.array([0, 1, 2])) assert len(vi) == 3 assert vi.to_xml().decode('utf-8') == '0 1 2' - + with pytest.raises(ValueError): vi[0] = 'a' @@ -296,17 +309,17 @@ def test_cifti2_voxelindicesijk(): assert vi[0, 1] == 10 vi[0, 1] = 1 - #test for vi[:, 0] and other slices + # test for vi[:, 0] and other slices with pytest.raises(NotImplementedError): vi[:, 0] with pytest.raises(NotImplementedError): vi[:, 0] = 0 with pytest.raises(NotImplementedError): # Don't know how to use remove with slice - del vi[:, 0] + del vi[:, 0] with pytest.raises(ValueError): vi[0, 0, 0] - + with pytest.raises(ValueError): vi[0, 0, 0] = 0 @@ -328,11 +341,10 @@ def test_matrixindicesmap(): assert mim.volume is None mim.extend((volume, parcel)) - assert mim.volume == volume with pytest.raises(ci.Cifti2HeaderError): mim.insert(0, volume) - + with pytest.raises(ci.Cifti2HeaderError): mim[1] = volume @@ -361,7 +373,7 @@ def test_matrix(): with pytest.raises(TypeError): m[0] = ci.Cifti2Parcel() - + with pytest.raises(TypeError): m.insert(0, ci.Cifti2Parcel()) @@ -382,7 +394,7 @@ def test_matrix(): assert h.number_of_mapped_indices == 1 with pytest.raises(ci.Cifti2HeaderError): m.insert(0, mim_0) - + with pytest.raises(ci.Cifti2HeaderError): m.insert(0, mim_01) @@ -400,23 +412,24 @@ def test_matrix(): def test_underscoring(): # Pairs taken from inflection tests # https://github.com/jpvanhal/inflection/blob/663982e/test_inflection.py#L113-L125 - pairs = (("Product", "product"), - ("SpecialGuest", "special_guest"), - ("ApplicationController", "application_controller"), - ("Area51Controller", "area51_controller"), - ("HTMLTidy", "html_tidy"), - ("HTMLTidyGenerator", "html_tidy_generator"), - ("FreeBSD", "free_bsd"), - ("HTML", "html"), - ) + pairs = ( + ('Product', 'product'), + ('SpecialGuest', 'special_guest'), + ('ApplicationController', 'application_controller'), + ('Area51Controller', 'area51_controller'), + ('HTMLTidy', 'html_tidy'), + ('HTMLTidyGenerator', 'html_tidy_generator'), + ('FreeBSD', 'free_bsd'), + ('HTML', 'html'), + ) for camel, underscored in pairs: assert ci.cifti2._underscore(camel) == underscored class TestCifti2ImageAPI(_TDA, SerializeMixin, DtypeOverrideMixin): - """ Basic validation for Cifti2Image instances - """ + """Basic validation for Cifti2Image instances""" + # A callable returning an image from ``image_maker(data, header)`` image_maker = ci.Cifti2Image # A callable returning a header from ``header_maker()`` @@ -425,14 +438,22 @@ class TestCifti2ImageAPI(_TDA, SerializeMixin, DtypeOverrideMixin): ni_header_maker = Nifti2Header example_shapes = ((2,), (2, 3), (2, 3, 4)) standard_extension = '.nii' - storable_dtypes = (np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, - np.int64, np.uint64, np.float32, np.float64) + storable_dtypes = ( + np.int8, + np.uint8, + np.int16, + np.uint16, + np.int32, + np.uint32, + np.int64, + np.uint64, + np.float32, + np.float64, + ) def make_imaker(self, arr, header=None, ni_header=None): for idx, sz in enumerate(arr.shape): maps = [ci.Cifti2NamedMap(str(value)) for value in range(sz)] - mim = ci.Cifti2MatrixIndicesMap( - (idx, ), 'CIFTI_INDEX_TYPE_SCALARS', maps=maps - ) + mim = ci.Cifti2MatrixIndicesMap((idx,), 'CIFTI_INDEX_TYPE_SCALARS', maps=maps) header.matrix.append(mim) return lambda: self.image_maker(arr.copy(), header, ni_header) diff --git a/nibabel/cifti2/tests/test_cifti2io_axes.py b/nibabel/cifti2/tests/test_cifti2io_axes.py index fb5a485d98..756b0f6c9f 100644 --- a/nibabel/cifti2/tests/test_cifti2io_axes.py +++ b/nibabel/cifti2/tests/test_cifti2io_axes.py @@ -7,28 +7,68 @@ test_directory = os.path.join(get_nibabel_data(), 'nitest-cifti2') -hcp_labels = ['CortexLeft', 'CortexRight', 'AccumbensLeft', 'AccumbensRight', 'AmygdalaLeft', 'AmygdalaRight', - 'brain_stem', 'CaudateLeft', 'CaudateRight', 'CerebellumLeft', 'CerebellumRight', - 'Diencephalon_ventral_left', 'Diencephalon_ventral_right', 'HippocampusLeft', 'HippocampusRight', - 'PallidumLeft', 'PallidumRight', 'PutamenLeft', 'PutamenRight', 'ThalamusLeft', 'ThalamusRight'] - -hcp_n_elements = [29696, 29716, 135, 140, 315, 332, 3472, 728, 755, 8709, 9144, 706, - 712, 764, 795, 297, 260, 1060, 1010, 1288, 1248] - -hcp_affine = np.array([[ -2., 0., 0., 90.], - [ 0., 2., 0., -126.], - [ 0., 0., 2., -72.], - [ 0., 0., 0., 1.]]) +hcp_labels = [ + 'CortexLeft', + 'CortexRight', + 'AccumbensLeft', + 'AccumbensRight', + 'AmygdalaLeft', + 'AmygdalaRight', + 'brain_stem', + 'CaudateLeft', + 'CaudateRight', + 'CerebellumLeft', + 'CerebellumRight', + 'Diencephalon_ventral_left', + 'Diencephalon_ventral_right', + 'HippocampusLeft', + 'HippocampusRight', + 'PallidumLeft', + 'PallidumRight', + 'PutamenLeft', + 'PutamenRight', + 'ThalamusLeft', + 'ThalamusRight', +] + +hcp_n_elements = [ + 29696, + 29716, + 135, + 140, + 315, + 332, + 3472, + 728, + 755, + 8709, + 9144, + 706, + 712, + 764, + 795, + 297, + 260, + 1060, + 1010, + 1288, + 1248, +] + +hcp_affine = np.array( + [[-2.0, 0.0, 0.0, 90.0], [0.0, 2.0, 0.0, -126.0], [0.0, 0.0, 2.0, -72.0], [0.0, 0.0, 0.0, 1.0]] +) def check_hcp_grayordinates(brain_model): - """Checks that a BrainModelAxis matches the expected 32k HCP grayordinates - """ + """Checks that a BrainModelAxis matches the expected 32k HCP grayordinates""" assert isinstance(brain_model, cifti2_axes.BrainModelAxis) structures = list(brain_model.iter_structures()) assert len(structures) == len(hcp_labels) idx_start = 0 - for idx, (name, _, bm), label, nel in zip(range(len(structures)), structures, hcp_labels, hcp_n_elements): + for idx, (name, _, bm), label, nel in zip( + range(len(structures)), structures, hcp_labels, hcp_n_elements + ): if idx < 2: assert name in bm.nvertices.keys() assert (bm.voxel == -1).all() @@ -42,9 +82,9 @@ def check_hcp_grayordinates(brain_model): assert bm.volume_shape == (91, 109, 91) assert name == cifti2_axes.BrainModelAxis.to_cifti_brain_structure_name(label) assert len(bm) == nel - assert (bm.name == brain_model.name[idx_start:idx_start + nel]).all() - assert (bm.voxel == brain_model.voxel[idx_start:idx_start + nel]).all() - assert (bm.vertex == brain_model.vertex[idx_start:idx_start + nel]).all() + assert (bm.name == brain_model.name[idx_start : idx_start + nel]).all() + assert (bm.voxel == brain_model.voxel[idx_start : idx_start + nel]).all() + assert (bm.vertex == brain_model.vertex[idx_start : idx_start + nel]).all() idx_start += nel assert idx_start == len(brain_model) @@ -60,8 +100,7 @@ def check_hcp_grayordinates(brain_model): def check_Conte69(brain_model): - """Checks that the BrainModelAxis matches the expected Conte69 surface coordinates - """ + """Checks that the BrainModelAxis matches the expected Conte69 surface coordinates""" assert isinstance(brain_model, cifti2_axes.BrainModelAxis) structures = list(brain_model.iter_structures()) assert len(structures) == 2 @@ -96,7 +135,7 @@ def check_rewrite(arr, axes, extension='.nii'): arr2 = img.get_fdata() assert np.allclose(arr, arr2) for idx in range(len(img.shape)): - assert (axes[idx] == img.header.get_axis(idx)) + assert axes[idx] == img.header.get_axis(idx) return img @@ -117,21 +156,27 @@ def test_read_ones(): @needs_nibabel_data('nitest-cifti2') def test_read_conte69_dscalar(): - img = nib.load(os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dscalar.nii')) + img = nib.load( + os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dscalar.nii') + ) arr = img.get_fdata() axes = [img.header.get_axis(dim) for dim in range(2)] assert isinstance(axes[0], cifti2_axes.ScalarAxis) assert len(axes[0]) == 2 assert axes[0].name[0] == 'MyelinMap_BC_decurv' assert axes[0].name[1] == 'corrThickness' - assert axes[0].meta[0] == {'PaletteColorMapping': '\n MODE_AUTO_SCALE_PERCENTAGE\n 98.000000 2.000000 2.000000 98.000000\n -100.000000 0.000000 0.000000 100.000000\n ROY-BIG-BL\n true\n true\n false\n true\n THRESHOLD_TEST_SHOW_OUTSIDE\n THRESHOLD_TYPE_OFF\n false\n -1.000000 1.000000\n -1.000000 1.000000\n -1.000000 1.000000\n \n PALETTE_THRESHOLD_RANGE_MODE_MAP\n'} + assert axes[0].meta[0] == { + 'PaletteColorMapping': '\n MODE_AUTO_SCALE_PERCENTAGE\n 98.000000 2.000000 2.000000 98.000000\n -100.000000 0.000000 0.000000 100.000000\n ROY-BIG-BL\n true\n true\n false\n true\n THRESHOLD_TEST_SHOW_OUTSIDE\n THRESHOLD_TYPE_OFF\n false\n -1.000000 1.000000\n -1.000000 1.000000\n -1.000000 1.000000\n \n PALETTE_THRESHOLD_RANGE_MODE_MAP\n' + } check_Conte69(axes[1]) check_rewrite(arr, axes) @needs_nibabel_data('nitest-cifti2') def test_read_conte69_dtseries(): - img = nib.load(os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dtseries.nii')) + img = nib.load( + os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dtseries.nii') + ) arr = img.get_fdata() axes = [img.header.get_axis(dim) for dim in range(2)] assert isinstance(axes[0], cifti2_axes.SeriesAxis) @@ -146,13 +191,21 @@ def test_read_conte69_dtseries(): @needs_nibabel_data('nitest-cifti2') def test_read_conte69_dlabel(): - img = nib.load(os.path.join(test_directory, 'Conte69.parcellations_VGD11b.32k_fs_LR.dlabel.nii')) + img = nib.load( + os.path.join(test_directory, 'Conte69.parcellations_VGD11b.32k_fs_LR.dlabel.nii') + ) arr = img.get_fdata() axes = [img.header.get_axis(dim) for dim in range(2)] assert isinstance(axes[0], cifti2_axes.LabelAxis) assert len(axes[0]) == 3 - assert (axes[0].name == ['Composite Parcellation-lh (FRB08_OFP03_retinotopic)', - 'Brodmann lh (from colin.R via pals_R-to-fs_LR)', 'MEDIAL WALL lh (fs_LR)']).all() + assert ( + axes[0].name + == [ + 'Composite Parcellation-lh (FRB08_OFP03_retinotopic)', + 'Brodmann lh (from colin.R via pals_R-to-fs_LR)', + 'MEDIAL WALL lh (fs_LR)', + ] + ).all() assert axes[0].label[1][70] == ('19_B05', (1.0, 0.867, 0.467, 1.0)) assert (axes[0].meta == [{}] * 3).all() check_Conte69(axes[1]) @@ -161,7 +214,9 @@ def test_read_conte69_dlabel(): @needs_nibabel_data('nitest-cifti2') def test_read_conte69_ptseries(): - img = nib.load(os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.ptseries.nii')) + img = nib.load( + os.path.join(test_directory, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.ptseries.nii') + ) arr = img.get_fdata() axes = [img.header.get_axis(dim) for dim in range(2)] assert isinstance(axes[0], cifti2_axes.SeriesAxis) @@ -175,6 +230,6 @@ def test_read_conte69_ptseries(): voxels, vertices = axes[1]['ER_FRB08'] assert voxels.shape == (0, 3) assert len(vertices) == 2 - assert vertices['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (206 // 2, ) - assert vertices['CIFTI_STRUCTURE_CORTEX_RIGHT'].shape == (206 // 2, ) + assert vertices['CIFTI_STRUCTURE_CORTEX_LEFT'].shape == (206 // 2,) + assert vertices['CIFTI_STRUCTURE_CORTEX_RIGHT'].shape == (206 // 2,) check_rewrite(arr, axes) diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index 541ceaa30c..3497ec413f 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -29,21 +29,16 @@ CIFTI2_DATA = pjoin(get_nibabel_data(), 'nitest-cifti2') DATA_FILE1 = pjoin(CIFTI2_DATA, '') -DATA_FILE2 = pjoin(CIFTI2_DATA, - 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dscalar.nii') -DATA_FILE3 = pjoin(CIFTI2_DATA, - 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dtseries.nii') -DATA_FILE4 = pjoin(CIFTI2_DATA, - 'Conte69.MyelinAndCorrThickness.32k_fs_LR.ptseries.nii') -DATA_FILE5 = pjoin(CIFTI2_DATA, - 'Conte69.parcellations_VGD11b.32k_fs_LR.dlabel.nii') +DATA_FILE2 = pjoin(CIFTI2_DATA, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dscalar.nii') +DATA_FILE3 = pjoin(CIFTI2_DATA, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.dtseries.nii') +DATA_FILE4 = pjoin(CIFTI2_DATA, 'Conte69.MyelinAndCorrThickness.32k_fs_LR.ptseries.nii') +DATA_FILE5 = pjoin(CIFTI2_DATA, 'Conte69.parcellations_VGD11b.32k_fs_LR.dlabel.nii') DATA_FILE6 = pjoin(CIFTI2_DATA, 'ones.dscalar.nii') datafiles = [DATA_FILE2, DATA_FILE3, DATA_FILE4, DATA_FILE5, DATA_FILE6] def test_space_separated_affine(): - img = ci.Cifti2Image.from_filename( - pjoin(NIBABEL_TEST_DATA, "row_major.dconn.nii")) + img = ci.Cifti2Image.from_filename(pjoin(NIBABEL_TEST_DATA, 'row_major.dconn.nii')) def test_read_nifti2(): @@ -92,12 +87,9 @@ def test_readwritedata(): img2 = ci.load('test.nii') assert len(img.header.matrix) == len(img2.header.matrix) # Order should be preserved in load/save - for mim1, mim2 in zip(img.header.matrix, - img2.header.matrix): - named_maps1 = [m_ for m_ in mim1 - if isinstance(m_, ci.Cifti2NamedMap)] - named_maps2 = [m_ for m_ in mim2 - if isinstance(m_, ci.Cifti2NamedMap)] + for mim1, mim2 in zip(img.header.matrix, img2.header.matrix): + named_maps1 = [m_ for m_ in mim1 if isinstance(m_, ci.Cifti2NamedMap)] + named_maps2 = [m_ for m_ in mim2 if isinstance(m_, ci.Cifti2NamedMap)] assert len(named_maps1) == len(named_maps2) for map1, map2 in zip(named_maps1, named_maps2): assert map1.map_name == map2.map_name @@ -118,12 +110,9 @@ def test_nibabel_readwritedata(): img2 = nib.load('test.nii') assert len(img.header.matrix) == len(img2.header.matrix) # Order should be preserved in load/save - for mim1, mim2 in zip(img.header.matrix, - img2.header.matrix): - named_maps1 = [m_ for m_ in mim1 - if isinstance(m_, ci.Cifti2NamedMap)] - named_maps2 = [m_ for m_ in mim2 - if isinstance(m_, ci.Cifti2NamedMap)] + for mim1, mim2 in zip(img.header.matrix, img2.header.matrix): + named_maps1 = [m_ for m_ in mim1 if isinstance(m_, ci.Cifti2NamedMap)] + named_maps2 = [m_ for m_ in mim2 if isinstance(m_, ci.Cifti2NamedMap)] assert len(named_maps1) == len(named_maps2) for map1, map2 in zip(named_maps1, named_maps2): assert map1.map_name == map2.map_name @@ -138,19 +127,20 @@ def test_nibabel_readwritedata(): def test_cifti2types(): """Check that we instantiate Cifti2 classes correctly, and that our test files exercise all classes""" - counter = {ci.Cifti2LabelTable: 0, - ci.Cifti2Label: 0, - ci.Cifti2NamedMap: 0, - ci.Cifti2Surface: 0, - ci.Cifti2VoxelIndicesIJK: 0, - ci.Cifti2Vertices: 0, - ci.Cifti2Parcel: 0, - ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ: 0, - ci.Cifti2Volume: 0, - ci.Cifti2VertexIndices: 0, - ci.Cifti2BrainModel: 0, - ci.Cifti2MatrixIndicesMap: 0, - } + counter = { + ci.Cifti2LabelTable: 0, + ci.Cifti2Label: 0, + ci.Cifti2NamedMap: 0, + ci.Cifti2Surface: 0, + ci.Cifti2VoxelIndicesIJK: 0, + ci.Cifti2Vertices: 0, + ci.Cifti2Parcel: 0, + ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ: 0, + ci.Cifti2Volume: 0, + ci.Cifti2VertexIndices: 0, + ci.Cifti2BrainModel: 0, + ci.Cifti2MatrixIndicesMap: 0, + } for name in datafiles: hdr = ci.load(name).header @@ -166,8 +156,7 @@ def test_cifti2types(): counter[ci.Cifti2BrainModel] += 1 if isinstance(map_.vertex_indices, ci.Cifti2VertexIndices): counter[ci.Cifti2VertexIndices] += 1 - if isinstance(map_.voxel_indices_ijk, - ci.Cifti2VoxelIndicesIJK): + if isinstance(map_.voxel_indices_ijk, ci.Cifti2VoxelIndicesIJK): counter[ci.Cifti2VoxelIndicesIJK] += 1 elif isinstance(map_, ci.Cifti2NamedMap): counter[ci.Cifti2NamedMap] += 1 @@ -179,8 +168,7 @@ def test_cifti2types(): counter[ci.Cifti2Label] += 1 elif isinstance(map_, ci.Cifti2Parcel): counter[ci.Cifti2Parcel] += 1 - if isinstance(map_.voxel_indices_ijk, - ci.Cifti2VoxelIndicesIJK): + if isinstance(map_.voxel_indices_ijk, ci.Cifti2VoxelIndicesIJK): counter[ci.Cifti2VoxelIndicesIJK] += 1 assert isinstance(map_.vertices, list) for vtcs in map_.vertices: @@ -190,18 +178,24 @@ def test_cifti2types(): counter[ci.Cifti2Surface] += 1 elif isinstance(map_, ci.Cifti2Volume): counter[ci.Cifti2Volume] += 1 - if isinstance(map_.transformation_matrix_voxel_indices_ijk_to_xyz, - ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ): + if isinstance( + map_.transformation_matrix_voxel_indices_ijk_to_xyz, + ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, + ): counter[ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ] += 1 assert list(mim.named_maps) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2NamedMap)] assert list(mim.surfaces) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2Surface)] assert list(mim.parcels) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2Parcel)] - assert list(mim.brain_models) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2BrainModel)] - assert ([mim.volume] if mim.volume else []) == [m_ for m_ in mim if isinstance(m_, ci.Cifti2Volume)] + assert list(mim.brain_models) == [ + m_ for m_ in mim if isinstance(m_, ci.Cifti2BrainModel) + ] + assert ([mim.volume] if mim.volume else []) == [ + m_ for m_ in mim if isinstance(m_, ci.Cifti2Volume) + ] for klass, count in counter.items(): - assert count > 0, "No exercise of " + klass.__name__ + assert count > 0, 'No exercise of ' + klass.__name__ @needs_nibabel_data('nitest-cifti2') @@ -211,30 +205,32 @@ def test_read_geometry(): # For every brain model in ones.dscalar.nii defines: # brain structure name, number of grayordinates, first vertex or voxel, last vertex or voxel - expected_geometry = [('CIFTI_STRUCTURE_CORTEX_LEFT', 29696, 0, 32491), - ('CIFTI_STRUCTURE_CORTEX_RIGHT', 29716, 0, 32491), - ('CIFTI_STRUCTURE_ACCUMBENS_LEFT', 135, [49, 66, 28], [48, 72, 35]), - ('CIFTI_STRUCTURE_ACCUMBENS_RIGHT', 140, [40, 66, 29], [43, 66, 36]), - ('CIFTI_STRUCTURE_AMYGDALA_LEFT', 315, [55, 61, 21], [56, 58, 31]), - ('CIFTI_STRUCTURE_AMYGDALA_RIGHT', 332, [34, 62, 20], [36, 61, 31]), - ('CIFTI_STRUCTURE_BRAIN_STEM', 3472, [42, 41, 0], [46, 50, 36]), - ('CIFTI_STRUCTURE_CAUDATE_LEFT', 728, [50, 72, 32], [53, 60, 49]), - ('CIFTI_STRUCTURE_CAUDATE_RIGHT', 755, [40, 68, 33], [37, 62, 49]), - ('CIFTI_STRUCTURE_CEREBELLUM_LEFT', 8709, [49, 35, 4], [46, 37, 37]), - ('CIFTI_STRUCTURE_CEREBELLUM_RIGHT', 9144, [38, 35, 4], [44, 38, 36]), - ('CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_LEFT', 706, [52, 53, 26], [56, 49, 35]), - ('CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_RIGHT', 712, [39, 54, 26], [35, 49, 36]), - ('CIFTI_STRUCTURE_HIPPOCAMPUS_LEFT', 764, [55, 60, 21], [54, 44, 39]), - ('CIFTI_STRUCTURE_HIPPOCAMPUS_RIGHT', 795, [33, 60, 21], [38, 45, 39]), - ('CIFTI_STRUCTURE_PALLIDUM_LEFT', 297, [56, 59, 32], [55, 61, 39]), - ('CIFTI_STRUCTURE_PALLIDUM_RIGHT', 260, [36, 62, 32], [35, 62, 39]), - ('CIFTI_STRUCTURE_PUTAMEN_LEFT', 1060, [51, 66, 28], [58, 64, 43]), - ('CIFTI_STRUCTURE_PUTAMEN_RIGHT', 1010, [34, 66, 29], [31, 62, 43]), - ('CIFTI_STRUCTURE_THALAMUS_LEFT', 1288, [55, 47, 33], [52, 53, 46]), - ('CIFTI_STRUCTURE_THALAMUS_RIGHT', 1248, [32, 47, 34], [38, 55, 46])] + expected_geometry = [ + ('CIFTI_STRUCTURE_CORTEX_LEFT', 29696, 0, 32491), + ('CIFTI_STRUCTURE_CORTEX_RIGHT', 29716, 0, 32491), + ('CIFTI_STRUCTURE_ACCUMBENS_LEFT', 135, [49, 66, 28], [48, 72, 35]), + ('CIFTI_STRUCTURE_ACCUMBENS_RIGHT', 140, [40, 66, 29], [43, 66, 36]), + ('CIFTI_STRUCTURE_AMYGDALA_LEFT', 315, [55, 61, 21], [56, 58, 31]), + ('CIFTI_STRUCTURE_AMYGDALA_RIGHT', 332, [34, 62, 20], [36, 61, 31]), + ('CIFTI_STRUCTURE_BRAIN_STEM', 3472, [42, 41, 0], [46, 50, 36]), + ('CIFTI_STRUCTURE_CAUDATE_LEFT', 728, [50, 72, 32], [53, 60, 49]), + ('CIFTI_STRUCTURE_CAUDATE_RIGHT', 755, [40, 68, 33], [37, 62, 49]), + ('CIFTI_STRUCTURE_CEREBELLUM_LEFT', 8709, [49, 35, 4], [46, 37, 37]), + ('CIFTI_STRUCTURE_CEREBELLUM_RIGHT', 9144, [38, 35, 4], [44, 38, 36]), + ('CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_LEFT', 706, [52, 53, 26], [56, 49, 35]), + ('CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_RIGHT', 712, [39, 54, 26], [35, 49, 36]), + ('CIFTI_STRUCTURE_HIPPOCAMPUS_LEFT', 764, [55, 60, 21], [54, 44, 39]), + ('CIFTI_STRUCTURE_HIPPOCAMPUS_RIGHT', 795, [33, 60, 21], [38, 45, 39]), + ('CIFTI_STRUCTURE_PALLIDUM_LEFT', 297, [56, 59, 32], [55, 61, 39]), + ('CIFTI_STRUCTURE_PALLIDUM_RIGHT', 260, [36, 62, 32], [35, 62, 39]), + ('CIFTI_STRUCTURE_PUTAMEN_LEFT', 1060, [51, 66, 28], [58, 64, 43]), + ('CIFTI_STRUCTURE_PUTAMEN_RIGHT', 1010, [34, 66, 29], [31, 62, 43]), + ('CIFTI_STRUCTURE_THALAMUS_LEFT', 1288, [55, 47, 33], [52, 53, 46]), + ('CIFTI_STRUCTURE_THALAMUS_RIGHT', 1248, [32, 47, 34], [38, 55, 46]), + ] current_index = 0 for from_file, expected in zip(geometry_mapping.brain_models, expected_geometry): - assert from_file.model_type in ("CIFTI_MODEL_TYPE_SURFACE", "CIFTI_MODEL_TYPE_VOXELS") + assert from_file.model_type in ('CIFTI_MODEL_TYPE_SURFACE', 'CIFTI_MODEL_TYPE_VOXELS') assert from_file.brain_structure == expected[0] assert from_file.index_offset == current_index assert from_file.index_count == expected[1] @@ -254,13 +250,12 @@ def test_read_geometry(): assert from_file.voxel_indices_ijk[-1] == expected[3] assert current_index == img.shape[1] - expected_affine = [[-2, 0, 0, 90], - [ 0, 2, 0, -126], - [ 0, 0, 2, -72], - [ 0, 0, 0, 1]] + expected_affine = [[-2, 0, 0, 90], [0, 2, 0, -126], [0, 0, 2, -72], [0, 0, 0, 1]] expected_dimensions = (91, 109, 91) - assert (geometry_mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix == - expected_affine).all() + assert ( + geometry_mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix + == expected_affine + ).all() assert geometry_mapping.volume.volume_dimensions == expected_dimensions @@ -269,60 +264,62 @@ def test_read_parcels(): img = ci.Cifti2Image.from_filename(DATA_FILE4) parcel_mapping = img.header.matrix.get_index_map(1) - expected_parcels = [('MEDIAL.WALL', ((719, 20, 28550), (810, 21, 28631))), - ('BA2_FRB08', ((516, 6757, 17888), (461, 6757, 17887))), - ('BA1_FRB08', ((211, 5029, 17974), (214, 3433, 17934))), - ('BA3b_FRB08', ((444, 3436, 18065), (397, 3436, 18065))), - ('BA4p_FRB08', ((344, 3445, 18164), (371, 3443, 18175))), - ('BA3a_FRB08', ((290, 3441, 18140), (289, 3440, 18140))), - ('BA4a_FRB08', ((471, 3446, 18181), (455, 3446, 19759))), - ('BA6_FRB08', ((1457, 2, 30951), (1400, 2, 30951))), - ('BA17_V1_FRB08', ((629, 23155, 25785), (635, 23155, 25759))), - ('BA45_FRB08', ((245, 10100, 18774), (214, 10103, 18907))), - ('BA44_FRB08', ((226, 10118, 19240), (273, 10119, 19270))), - ('hOc5_MT_FRB08', ((104, 15019, 23329), (80, 15023, 23376))), - ('BA18_V2_FRB08', ((702, 95, 25902), (651, 98, 25903))), - ('V3A_SHM07', ((82, 4, 25050), (82, 4, 25050))), - ('V3B_SHM07', ((121, 13398, 23303), (121, 13398, 23303))), - ('LO1_KPO10', ((54, 15007, 23543), (54, 15007, 23543))), - ('LO2_KPO10', ((79, 15013, 23636), (79, 15013, 23636))), - ('PITd_KPO10', ((53, 15018, 23769), (65, 15018, 23769))), - ('PITv_KPO10', ((72, 23480, 23974), (72, 23480, 23974))), - ('OP1_BSW08', ((470, 8421, 18790), (470, 8421, 18790))), - ('OP2_BSW08', ((67, 10, 31060), (67, 10, 31060))), - ('OP3_BSW08', ((119, 10137, 18652), (119, 10137, 18652))), - ('OP4_BSW08', ((191, 16613, 19429), (192, 16613, 19429))), - ('IPS1_SHM07', ((54, 11775, 14496), (54, 11775, 14496))), - ('IPS2_SHM07', ((71, 11771, 14587), (71, 11771, 14587))), - ('IPS3_SHM07', ((114, 11764, 14783), (114, 11764, 14783))), - ('IPS4_SHM07', ((101, 11891, 12653), (101, 11891, 12653))), - ('V7_SHM07', ((140, 11779, 14002), (140, 11779, 14002))), - ('V4v_SHM07', ((81, 23815, 24557), (90, 23815, 24557))), - ('V3d_KPO10', ((90, 23143, 25192), (115, 23143, 25192))), - ('14c_OFP03', ((22, 19851, 21311), (22, 19851, 21311))), - ('13a_OFP03', ((20, 20963, 21154), (20, 20963, 21154))), - ('47s_OFP03', ((211, 10182, 20343), (211, 10182, 20343))), - ('14r_OFP03', ((54, 21187, 21324), (54, 21187, 21324))), - ('13m_OFP03', ((103, 20721, 21075), (103, 20721, 21075))), - ('13l_OFP03', ((101, 20466, 20789), (101, 20466, 20789))), - ('32pl_OFP03', ((14, 19847, 21409), (14, 19847, 21409))), - ('25_OFP03', ((8, 19844, 27750), (8, 19844, 27750))), - ('47m_OFP03', ((200, 10174, 20522), (200, 10174, 20522))), - ('47l_OFP03', ((142, 10164, 19969), (160, 10164, 19969))), - ('Iai_OFP03', ((153, 10188, 20199), (153, 10188, 20199))), - ('10r_OFP03', ((138, 19811, 28267), (138, 19811, 28267))), - ('11m_OFP03', ((92, 20850, 21165), (92, 20850, 21165))), - ('11l_OFP03', ((200, 20275, 21029), (200, 20275, 21029))), - ('47r_OFP03', ((259, 10094, 20535), (259, 10094, 20535))), - ('10m_OFP03', ((102, 19825, 21411), (102, 19825, 21411))), - ('Iam_OFP03', ((15, 20346, 20608), (15, 20346, 20608))), - ('Ial_OFP03', ((89, 10194, 11128), (89, 10194, 11128))), - ('24_OFP03', ((39, 19830, 28279), (36, 19830, 28279))), - ('Iapm_OFP03', ((7, 20200, 20299), (7, 20200, 20299))), - ('10p_OFP03', ((480, 19780, 28640), (480, 19780, 28640))), - ('V6_PHG06', ((72, 12233, 12869), (72, 12233, 12869))), - ('ER_FRB08', ((103, 21514, 26470), (103, 21514, 26470))), - ('13b_OFP03', ((60, 21042, 21194), (71, 21040, 21216)))] + expected_parcels = [ + ('MEDIAL.WALL', ((719, 20, 28550), (810, 21, 28631))), + ('BA2_FRB08', ((516, 6757, 17888), (461, 6757, 17887))), + ('BA1_FRB08', ((211, 5029, 17974), (214, 3433, 17934))), + ('BA3b_FRB08', ((444, 3436, 18065), (397, 3436, 18065))), + ('BA4p_FRB08', ((344, 3445, 18164), (371, 3443, 18175))), + ('BA3a_FRB08', ((290, 3441, 18140), (289, 3440, 18140))), + ('BA4a_FRB08', ((471, 3446, 18181), (455, 3446, 19759))), + ('BA6_FRB08', ((1457, 2, 30951), (1400, 2, 30951))), + ('BA17_V1_FRB08', ((629, 23155, 25785), (635, 23155, 25759))), + ('BA45_FRB08', ((245, 10100, 18774), (214, 10103, 18907))), + ('BA44_FRB08', ((226, 10118, 19240), (273, 10119, 19270))), + ('hOc5_MT_FRB08', ((104, 15019, 23329), (80, 15023, 23376))), + ('BA18_V2_FRB08', ((702, 95, 25902), (651, 98, 25903))), + ('V3A_SHM07', ((82, 4, 25050), (82, 4, 25050))), + ('V3B_SHM07', ((121, 13398, 23303), (121, 13398, 23303))), + ('LO1_KPO10', ((54, 15007, 23543), (54, 15007, 23543))), + ('LO2_KPO10', ((79, 15013, 23636), (79, 15013, 23636))), + ('PITd_KPO10', ((53, 15018, 23769), (65, 15018, 23769))), + ('PITv_KPO10', ((72, 23480, 23974), (72, 23480, 23974))), + ('OP1_BSW08', ((470, 8421, 18790), (470, 8421, 18790))), + ('OP2_BSW08', ((67, 10, 31060), (67, 10, 31060))), + ('OP3_BSW08', ((119, 10137, 18652), (119, 10137, 18652))), + ('OP4_BSW08', ((191, 16613, 19429), (192, 16613, 19429))), + ('IPS1_SHM07', ((54, 11775, 14496), (54, 11775, 14496))), + ('IPS2_SHM07', ((71, 11771, 14587), (71, 11771, 14587))), + ('IPS3_SHM07', ((114, 11764, 14783), (114, 11764, 14783))), + ('IPS4_SHM07', ((101, 11891, 12653), (101, 11891, 12653))), + ('V7_SHM07', ((140, 11779, 14002), (140, 11779, 14002))), + ('V4v_SHM07', ((81, 23815, 24557), (90, 23815, 24557))), + ('V3d_KPO10', ((90, 23143, 25192), (115, 23143, 25192))), + ('14c_OFP03', ((22, 19851, 21311), (22, 19851, 21311))), + ('13a_OFP03', ((20, 20963, 21154), (20, 20963, 21154))), + ('47s_OFP03', ((211, 10182, 20343), (211, 10182, 20343))), + ('14r_OFP03', ((54, 21187, 21324), (54, 21187, 21324))), + ('13m_OFP03', ((103, 20721, 21075), (103, 20721, 21075))), + ('13l_OFP03', ((101, 20466, 20789), (101, 20466, 20789))), + ('32pl_OFP03', ((14, 19847, 21409), (14, 19847, 21409))), + ('25_OFP03', ((8, 19844, 27750), (8, 19844, 27750))), + ('47m_OFP03', ((200, 10174, 20522), (200, 10174, 20522))), + ('47l_OFP03', ((142, 10164, 19969), (160, 10164, 19969))), + ('Iai_OFP03', ((153, 10188, 20199), (153, 10188, 20199))), + ('10r_OFP03', ((138, 19811, 28267), (138, 19811, 28267))), + ('11m_OFP03', ((92, 20850, 21165), (92, 20850, 21165))), + ('11l_OFP03', ((200, 20275, 21029), (200, 20275, 21029))), + ('47r_OFP03', ((259, 10094, 20535), (259, 10094, 20535))), + ('10m_OFP03', ((102, 19825, 21411), (102, 19825, 21411))), + ('Iam_OFP03', ((15, 20346, 20608), (15, 20346, 20608))), + ('Ial_OFP03', ((89, 10194, 11128), (89, 10194, 11128))), + ('24_OFP03', ((39, 19830, 28279), (36, 19830, 28279))), + ('Iapm_OFP03', ((7, 20200, 20299), (7, 20200, 20299))), + ('10p_OFP03', ((480, 19780, 28640), (480, 19780, 28640))), + ('V6_PHG06', ((72, 12233, 12869), (72, 12233, 12869))), + ('ER_FRB08', ((103, 21514, 26470), (103, 21514, 26470))), + ('13b_OFP03', ((60, 21042, 21194), (71, 21040, 21216))), + ] assert img.shape[1] == len(expected_parcels) assert len(list(parcel_mapping.parcels)) == len(expected_parcels) @@ -330,8 +327,9 @@ def test_read_parcels(): for (name, expected_surfaces), parcel in zip(expected_parcels, parcel_mapping.parcels): assert parcel.name == name assert len(parcel.vertices) == 2 - for vertices, orientation, (length, first_element, last_element) in zip(parcel.vertices, ('LEFT', 'RIGHT'), - expected_surfaces): + for vertices, orientation, (length, first_element, last_element) in zip( + parcel.vertices, ('LEFT', 'RIGHT'), expected_surfaces + ): assert len(vertices) == length assert vertices[0] == first_element assert vertices[-1] == last_element @@ -355,19 +353,19 @@ def test_read_scalar(): print(expected_meta[0], scalar.metadata.data.keys()) for key, value in expected_meta: assert key in scalar.metadata.data.keys() - assert scalar.metadata[key][:len(value)] == value + assert scalar.metadata[key][: len(value)] == value - assert scalar.label_table is None, ".dscalar file should not define a label table" + assert scalar.label_table is None, '.dscalar file should not define a label table' @needs_nibabel_data('nitest-cifti2') def test_read_series(): img = ci.Cifti2Image.from_filename(DATA_FILE4) series_mapping = img.header.matrix.get_index_map(0) - assert series_mapping.series_start == 0. - assert series_mapping.series_step == 1. + assert series_mapping.series_start == 0.0 + assert series_mapping.series_step == 1.0 assert series_mapping.series_unit == 'SECOND' - assert series_mapping.series_exponent == 0. + assert series_mapping.series_exponent == 0.0 assert series_mapping.number_of_series_points == img.shape[0] @@ -376,25 +374,29 @@ def test_read_labels(): img = ci.Cifti2Image.from_filename(DATA_FILE5) label_mapping = img.header.matrix.get_index_map(0) - expected_names = ['Composite Parcellation-lh (FRB08_OFP03_retinotopic)', - 'Brodmann lh (from colin.R via pals_R-to-fs_LR)', - 'MEDIAL WALL lh (fs_LR)'] + expected_names = [ + 'Composite Parcellation-lh (FRB08_OFP03_retinotopic)', + 'Brodmann lh (from colin.R via pals_R-to-fs_LR)', + 'MEDIAL WALL lh (fs_LR)', + ] assert img.shape[0] == len(expected_names) assert len(list(label_mapping.named_maps)) == len(expected_names) - some_expected_labels = {0: ('???', (0.667, 0.667, 0.667, 0.0)), - 1: ('MEDIAL.WALL', (0.075, 0.075, 0.075, 1.0)), - 2: ('BA2_FRB08', (0.467, 0.459, 0.055, 1.0)), - 3: ('BA1_FRB08', (0.475, 0.722, 0.859, 1.0)), - 4: ('BA3b_FRB08', (0.855, 0.902, 0.286, 1.0)), - 5: ('BA4p_FRB08', (0.902, 0.573, 0.122, 1.0)), - 89: ('36_B05', (0.467, 0.0, 0.129, 1.0)), - 90: ('35_B05', (0.467, 0.067, 0.067, 1.0)), - 91: ('28_B05', (0.467, 0.337, 0.271, 1.0)), - 92: ('29_B05', (0.267, 0.0, 0.529, 1.0)), - 93: ('26_B05', (0.757, 0.2, 0.227, 1.0)), - 94: ('33_B05', (0.239, 0.082, 0.373, 1.0)), - 95: ('13b_OFP03', (1.0, 1.0, 0.0, 1.0))} + some_expected_labels = { + 0: ('???', (0.667, 0.667, 0.667, 0.0)), + 1: ('MEDIAL.WALL', (0.075, 0.075, 0.075, 1.0)), + 2: ('BA2_FRB08', (0.467, 0.459, 0.055, 1.0)), + 3: ('BA1_FRB08', (0.475, 0.722, 0.859, 1.0)), + 4: ('BA3b_FRB08', (0.855, 0.902, 0.286, 1.0)), + 5: ('BA4p_FRB08', (0.902, 0.573, 0.122, 1.0)), + 89: ('36_B05', (0.467, 0.0, 0.129, 1.0)), + 90: ('35_B05', (0.467, 0.067, 0.067, 1.0)), + 91: ('28_B05', (0.467, 0.337, 0.271, 1.0)), + 92: ('29_B05', (0.267, 0.0, 0.529, 1.0)), + 93: ('26_B05', (0.757, 0.2, 0.227, 1.0)), + 94: ('33_B05', (0.239, 0.082, 0.373, 1.0)), + 95: ('13b_OFP03', (1.0, 1.0, 0.0, 1.0)), + } for named_map, name in zip(label_mapping.named_maps, expected_names): assert named_map.map_name == name @@ -440,9 +442,9 @@ def test_pixdim_log_checks(self): fhdr, message, raiser = self.log_chk(hdr, 35) assert fhdr['pixdim'][1] == 2 assert message == self._pixdim_message + '; setting to abs of pixdim values' - + pytest.raises(*raiser) - + hdr = HC() hdr['pixdim'][1:4] = 0 # No error or warning fhdr, message, raiser = self.log_chk(hdr, 0) diff --git a/nibabel/cifti2/tests/test_name.py b/nibabel/cifti2/tests/test_name.py index 6b53d46523..789de00b58 100644 --- a/nibabel/cifti2/tests/test_name.py +++ b/nibabel/cifti2/tests/test_name.py @@ -1,11 +1,38 @@ from nibabel.cifti2 import cifti2_axes -equivalents = [('CIFTI_STRUCTURE_CORTEX_LEFT', ('CortexLeft', 'LeftCortex', 'left_cortex', 'Left Cortex', - 'Cortex_Left', 'cortex left', 'CORTEX_LEFT', 'LEFT CORTEX', - ('cortex', 'left'), ('CORTEX', 'Left'), ('LEFT', 'coRTEX'))), - ('CIFTI_STRUCTURE_CORTEX', ('Cortex', 'CortexBOTH', 'Cortex_both', 'both cortex', - 'BOTH_CORTEX', 'cortex', 'CORTEX', ('cortex', ), - ('COrtex', 'Both'), ('both', 'cortex')))] +equivalents = [ + ( + 'CIFTI_STRUCTURE_CORTEX_LEFT', + ( + 'CortexLeft', + 'LeftCortex', + 'left_cortex', + 'Left Cortex', + 'Cortex_Left', + 'cortex left', + 'CORTEX_LEFT', + 'LEFT CORTEX', + ('cortex', 'left'), + ('CORTEX', 'Left'), + ('LEFT', 'coRTEX'), + ), + ), + ( + 'CIFTI_STRUCTURE_CORTEX', + ( + 'Cortex', + 'CortexBOTH', + 'Cortex_both', + 'both cortex', + 'BOTH_CORTEX', + 'cortex', + 'CORTEX', + ('cortex',), + ('COrtex', 'Both'), + ('both', 'cortex'), + ), + ), +] def test_name_conversion(): @@ -16,4 +43,4 @@ def test_name_conversion(): for base_name, input_names in equivalents: assert base_name == func(base_name) for name in input_names: - assert base_name == func(name) \ No newline at end of file + assert base_name == func(name) diff --git a/nibabel/cifti2/tests/test_new_cifti2.py b/nibabel/cifti2/tests/test_new_cifti2.py index a49ba79d52..15c6c110b9 100644 --- a/nibabel/cifti2/tests/test_new_cifti2.py +++ b/nibabel/cifti2/tests/test_new_cifti2.py @@ -14,53 +14,63 @@ import pytest from ...testing import ( - clear_and_catch_warnings, error_warnings, suppress_warnings, assert_array_equal) + clear_and_catch_warnings, + error_warnings, + suppress_warnings, + assert_array_equal, +) -affine = [[-1.5, 0, 0, 90], - [0, 1.5, 0, -85], - [0, 0, 1.5, -71], - [0, 0, 0, 1.]] +affine = [[-1.5, 0, 0, 90], [0, 1.5, 0, -85], [0, 0, 1.5, -71], [0, 0, 0, 1.0]] dimensions = (120, 83, 78) number_of_vertices = 30000 -brain_models = [('CIFTI_STRUCTURE_THALAMUS_LEFT', [[60, 60, 60], - [61, 59, 60], - [61, 60, 59], - [80, 90, 92]]), - ('CIFTI_STRUCTURE_CORTEX_LEFT', [0, 1000, 1301, 19972, 27312]), - ('CIFTI_STRUCTURE_CORTEX_RIGHT', [207]) - ] +brain_models = [ + ('CIFTI_STRUCTURE_THALAMUS_LEFT', [[60, 60, 60], [61, 59, 60], [61, 60, 59], [80, 90, 92]]), + ('CIFTI_STRUCTURE_CORTEX_LEFT', [0, 1000, 1301, 19972, 27312]), + ('CIFTI_STRUCTURE_CORTEX_RIGHT', [207]), +] def create_geometry_map(applies_to_matrix_dimension): voxels = ci.Cifti2VoxelIndicesIJK(brain_models[0][1]) - left_thalamus = ci.Cifti2BrainModel(index_offset=0, index_count=4, - model_type='CIFTI_MODEL_TYPE_VOXELS', - brain_structure=brain_models[0][0], - voxel_indices_ijk=voxels) + left_thalamus = ci.Cifti2BrainModel( + index_offset=0, + index_count=4, + model_type='CIFTI_MODEL_TYPE_VOXELS', + brain_structure=brain_models[0][0], + voxel_indices_ijk=voxels, + ) vertices = ci.Cifti2VertexIndices(np.array(brain_models[1][1])) - left_cortex = ci.Cifti2BrainModel(index_offset=4, index_count=5, - model_type='CIFTI_MODEL_TYPE_SURFACE', - brain_structure=brain_models[1][0], - vertex_indices=vertices) + left_cortex = ci.Cifti2BrainModel( + index_offset=4, + index_count=5, + model_type='CIFTI_MODEL_TYPE_SURFACE', + brain_structure=brain_models[1][0], + vertex_indices=vertices, + ) left_cortex.surface_number_of_vertices = number_of_vertices vertices = ci.Cifti2VertexIndices(np.array(brain_models[2][1])) - right_cortex = ci.Cifti2BrainModel(index_offset=9, index_count=1, - model_type='CIFTI_MODEL_TYPE_SURFACE', - brain_structure=brain_models[2][0], - vertex_indices=vertices) + right_cortex = ci.Cifti2BrainModel( + index_offset=9, + index_count=1, + model_type='CIFTI_MODEL_TYPE_SURFACE', + brain_structure=brain_models[2][0], + vertex_indices=vertices, + ) right_cortex.surface_number_of_vertices = number_of_vertices - volume = ci.Cifti2Volume(dimensions, - ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, - affine)) - return ci.Cifti2MatrixIndicesMap(applies_to_matrix_dimension, - 'CIFTI_INDEX_TYPE_BRAIN_MODELS', - maps=[left_thalamus, left_cortex, right_cortex, volume]) + volume = ci.Cifti2Volume( + dimensions, ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, affine) + ) + return ci.Cifti2MatrixIndicesMap( + applies_to_matrix_dimension, + 'CIFTI_INDEX_TYPE_BRAIN_MODELS', + maps=[left_thalamus, left_cortex, right_cortex, volume], + ) def check_geometry_map(mapping): @@ -96,25 +106,25 @@ def check_geometry_map(mapping): assert (mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix == affine).all() -parcels = [('volume_parcel', ([[60, 60, 60], - [61, 59, 60], - [61, 60, 59], - [80, 90, 92]], )), - ('surface_parcel', (('CIFTI_STRUCTURE_CORTEX_LEFT', - [0, 1000, 1301, 19972, 27312]), - ('CIFTI_STRUCTURE_CORTEX_RIGHT', - [0, 100, 381]))), - ('mixed_parcel', ([[71, 81, 39], - [53, 21, 91]], - ('CIFTI_STRUCTURE_CORTEX_LEFT', [71, 88, 999]))), - ('single_element', ([[71, 81, 39]], - ('CIFTI_STRUCTURE_CORTEX_LEFT', [40]))), - ] +parcels = [ + ('volume_parcel', ([[60, 60, 60], [61, 59, 60], [61, 60, 59], [80, 90, 92]],)), + ( + 'surface_parcel', + ( + ('CIFTI_STRUCTURE_CORTEX_LEFT', [0, 1000, 1301, 19972, 27312]), + ('CIFTI_STRUCTURE_CORTEX_RIGHT', [0, 100, 381]), + ), + ), + ( + 'mixed_parcel', + ([[71, 81, 39], [53, 21, 91]], ('CIFTI_STRUCTURE_CORTEX_LEFT', [71, 88, 999])), + ), + ('single_element', ([[71, 81, 39]], ('CIFTI_STRUCTURE_CORTEX_LEFT', [40]))), +] def create_parcel_map(applies_to_matrix_dimension): - mapping = ci.Cifti2MatrixIndicesMap(applies_to_matrix_dimension, - 'CIFTI_INDEX_TYPE_PARCELS') + mapping = ci.Cifti2MatrixIndicesMap(applies_to_matrix_dimension, 'CIFTI_INDEX_TYPE_PARCELS') for name, elements in parcels: surfaces = [] volume = None @@ -125,10 +135,15 @@ def create_parcel_map(applies_to_matrix_dimension): volume = ci.Cifti2VoxelIndicesIJK(element) mapping.append(ci.Cifti2Parcel(name, volume, surfaces)) - mapping.extend([ci.Cifti2Surface(f'CIFTI_STRUCTURE_CORTEX_{orientation}', - number_of_vertices) for orientation in ['LEFT', 'RIGHT']]) - mapping.volume = ci.Cifti2Volume(dimensions, - ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, affine)) + mapping.extend( + [ + ci.Cifti2Surface(f'CIFTI_STRUCTURE_CORTEX_{orientation}', number_of_vertices) + for orientation in ['LEFT', 'RIGHT'] + ] + ) + mapping.volume = ci.Cifti2Volume( + dimensions, ci.Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ(-3, affine) + ) return mapping @@ -155,16 +170,14 @@ def check_parcel_map(mapping): assert (mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix == affine).all() -scalars = [('first_name', {'meta_key': 'some_metadata'}), - ('another name', {})] +scalars = [('first_name', {'meta_key': 'some_metadata'}), ('another name', {})] def create_scalar_map(applies_to_matrix_dimension): - maps = [ci.Cifti2NamedMap(name, ci.Cifti2MetaData(meta)) - for name, meta in scalars] - return ci.Cifti2MatrixIndicesMap(applies_to_matrix_dimension, - 'CIFTI_INDEX_TYPE_SCALARS', - maps=maps) + maps = [ci.Cifti2NamedMap(name, ci.Cifti2MetaData(meta)) for name, meta in scalars] + return ci.Cifti2MatrixIndicesMap( + applies_to_matrix_dimension, 'CIFTI_INDEX_TYPE_SCALARS', maps=maps + ) def check_scalar_map(mapping): @@ -179,11 +192,14 @@ def check_scalar_map(mapping): assert named_map.metadata == expected[1] -labels = [('first_name', {'meta_key': 'some_metadata'}, - {0: ('label0', (0.1, 0.3, 0.2, 0.5)), - 1: ('new_label', (0.5, 0.3, 0.1, 0.4))}), - ('another name', {}, {0: ('???', (0, 0, 0, 0)), - 1: ('great region', (0.4, 0.1, 0.23, 0.15))})] +labels = [ + ( + 'first_name', + {'meta_key': 'some_metadata'}, + {0: ('label0', (0.1, 0.3, 0.2, 0.5)), 1: ('new_label', (0.5, 0.3, 0.1, 0.4))}, + ), + ('another name', {}, {0: ('???', (0, 0, 0, 0)), 1: ('great region', (0.4, 0.1, 0.23, 0.15))}), +] def create_label_map(applies_to_matrix_dimension): @@ -192,11 +208,10 @@ def create_label_map(applies_to_matrix_dimension): label_table = ci.Cifti2LabelTable() for key, (tag, rgba) in label.items(): label_table[key] = ci.Cifti2Label(key, tag, *rgba) - maps.append(ci.Cifti2NamedMap(name, ci.Cifti2MetaData(meta), - label_table)) - return ci.Cifti2MatrixIndicesMap(applies_to_matrix_dimension, - 'CIFTI_INDEX_TYPE_LABELS', - maps=maps) + maps.append(ci.Cifti2NamedMap(name, ci.Cifti2MetaData(meta), label_table)) + return ci.Cifti2MatrixIndicesMap( + applies_to_matrix_dimension, 'CIFTI_INDEX_TYPE_LABELS', maps=maps + ) def check_label_map(mapping): @@ -212,11 +227,15 @@ def check_label_map(mapping): def create_series_map(applies_to_matrix_dimension): - return ci.Cifti2MatrixIndicesMap(applies_to_matrix_dimension, - 'CIFTI_INDEX_TYPE_SERIES', - number_of_series_points=13, - series_exponent=-3, series_start=18.2, - series_step=10.5, series_unit='SECOND') + return ci.Cifti2MatrixIndicesMap( + applies_to_matrix_dimension, + 'CIFTI_INDEX_TYPE_SERIES', + number_of_series_points=13, + series_exponent=-3, + series_start=18.2, + series_step=10.5, + series_unit='SECOND', + ) def check_series_map(mapping): @@ -229,8 +248,8 @@ def check_series_map(mapping): def test_dtseries(): - series_map = create_series_map((0, )) - geometry_map = create_geometry_map((1, )) + series_map = create_series_map((0,)) + geometry_map = create_geometry_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((series_map, geometry_map)) hdr = ci.Cifti2Header(matrix) @@ -250,8 +269,8 @@ def test_dtseries(): def test_dscalar(): - scalar_map = create_scalar_map((0, )) - geometry_map = create_geometry_map((1, )) + scalar_map = create_scalar_map((0,)) + geometry_map = create_geometry_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((scalar_map, geometry_map)) hdr = ci.Cifti2Header(matrix) @@ -271,8 +290,8 @@ def test_dscalar(): def test_dlabel(): - label_map = create_label_map((0, )) - geometry_map = create_geometry_map((1, )) + label_map = create_label_map((0,)) + geometry_map = create_geometry_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((label_map, geometry_map)) hdr = ci.Cifti2Header(matrix) @@ -312,8 +331,8 @@ def test_dconn(): def test_ptseries(): - series_map = create_series_map((0, )) - parcel_map = create_parcel_map((1, )) + series_map = create_series_map((0,)) + parcel_map = create_parcel_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((series_map, parcel_map)) hdr = ci.Cifti2Header(matrix) @@ -333,8 +352,8 @@ def test_ptseries(): def test_pscalar(): - scalar_map = create_scalar_map((0, )) - parcel_map = create_parcel_map((1, )) + scalar_map = create_scalar_map((0,)) + parcel_map = create_parcel_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((scalar_map, parcel_map)) hdr = ci.Cifti2Header(matrix) @@ -354,8 +373,8 @@ def test_pscalar(): def test_pdconn(): - geometry_map = create_geometry_map((0, )) - parcel_map = create_parcel_map((1, )) + geometry_map = create_geometry_map((0,)) + parcel_map = create_parcel_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((geometry_map, parcel_map)) hdr = ci.Cifti2Header(matrix) @@ -375,8 +394,8 @@ def test_pdconn(): def test_dpconn(): - parcel_map = create_parcel_map((0, )) - geometry_map = create_geometry_map((1, )) + parcel_map = create_parcel_map((0,)) + geometry_map = create_geometry_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((parcel_map, geometry_map)) hdr = ci.Cifti2Header(matrix) @@ -396,8 +415,8 @@ def test_dpconn(): def test_plabel(): - label_map = create_label_map((0, )) - parcel_map = create_parcel_map((1, )) + label_map = create_label_map((0,)) + parcel_map = create_parcel_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((label_map, parcel_map)) hdr = ci.Cifti2Header(matrix) @@ -437,15 +456,14 @@ def test_pconn(): def test_pconnseries(): parcel_map = create_parcel_map((0, 1)) - series_map = create_series_map((2, )) + series_map = create_series_map((2,)) matrix = ci.Cifti2Matrix() matrix.extend((parcel_map, series_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(4, 4, 13) img = ci.Cifti2Image(data, hdr) - img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_' - 'PARCELLATED_SERIES') + img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_' 'PARCELLATED_SERIES') with InTemporaryDirectory(): ci.save(img, 'test.pconnseries.nii') @@ -461,15 +479,14 @@ def test_pconnseries(): def test_pconnscalar(): parcel_map = create_parcel_map((0, 1)) - scalar_map = create_scalar_map((2, )) + scalar_map = create_scalar_map((2,)) matrix = ci.Cifti2Matrix() matrix.extend((parcel_map, scalar_map)) hdr = ci.Cifti2Header(matrix) data = np.random.randn(4, 4, 2) img = ci.Cifti2Image(data, hdr) - img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_' - 'PARCELLATED_SCALAR') + img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_' 'PARCELLATED_SCALAR') with InTemporaryDirectory(): ci.save(img, 'test.pconnscalar.nii') @@ -485,8 +502,8 @@ def test_pconnscalar(): def test_wrong_shape(): - scalar_map = create_scalar_map((0, )) - brain_model_map = create_geometry_map((1, )) + scalar_map = create_scalar_map((0,)) + brain_model_map = create_geometry_map((1,)) matrix = ci.Cifti2Matrix() matrix.extend((scalar_map, brain_model_map)) @@ -506,7 +523,6 @@ def test_wrong_shape(): ci.Cifti2Image(data, hdr) with suppress_warnings(): img = ci.Cifti2Image(data, hdr) - + with pytest.raises(ValueError): img.to_file_map() - diff --git a/nibabel/cmdline/conform.py b/nibabel/cmdline/conform.py index cfa86b6951..52c80b5263 100644 --- a/nibabel/cmdline/conform.py +++ b/nibabel/cmdline/conform.py @@ -22,19 +22,25 @@ def _get_parser(): """Return command-line argument parser.""" p = argparse.ArgumentParser(description=__doc__) - p.add_argument("infile", - help="Neuroimaging volume to conform.") - p.add_argument("outfile", - help="Name of output file.") - p.add_argument("--out-shape", nargs=3, default=(256, 256, 256), type=int, - help="Shape of the conformed output.") - p.add_argument("--voxel-size", nargs=3, default=(1, 1, 1), type=int, - help="Voxel size in millimeters of the conformed output.") - p.add_argument("--orientation", default="RAS", - help="Orientation of the conformed output.") - p.add_argument("-f", "--force", action="store_true", - help="Overwrite existing output files.") - p.add_argument("-V", "--version", action="version", version=f"{p.prog} {__version__}") + p.add_argument('infile', help='Neuroimaging volume to conform.') + p.add_argument('outfile', help='Name of output file.') + p.add_argument( + '--out-shape', + nargs=3, + default=(256, 256, 256), + type=int, + help='Shape of the conformed output.', + ) + p.add_argument( + '--voxel-size', + nargs=3, + default=(1, 1, 1), + type=int, + help='Voxel size in millimeters of the conformed output.', + ) + p.add_argument('--orientation', default='RAS', help='Orientation of the conformed output.') + p.add_argument('-f', '--force', action='store_true', help='Overwrite existing output files.') + p.add_argument('-V', '--version', action='version', version=f'{p.prog} {__version__}') return p @@ -46,7 +52,7 @@ def main(args=None): from_img = load(opts.infile) if not opts.force and Path(opts.outfile).exists(): - raise FileExistsError(f"Output file exists: {opts.outfile}") + raise FileExistsError(f'Output file exists: {opts.outfile}') out_img = conform( from_img=from_img, @@ -54,6 +60,7 @@ def main(args=None): voxel_size=opts.voxel_size, order=3, cval=0.0, - orientation=opts.orientation) + orientation=opts.orientation, + ) save(out_img, opts.outfile) diff --git a/nibabel/cmdline/convert.py b/nibabel/cmdline/convert.py index 8f1042c71d..ce80d8c709 100644 --- a/nibabel/cmdline/convert.py +++ b/nibabel/cmdline/convert.py @@ -21,20 +21,26 @@ def _get_parser(): """Return command-line argument parser.""" p = argparse.ArgumentParser(description=__doc__) - p.add_argument("infile", - help="Neuroimaging volume to convert") - p.add_argument("outfile", - help="Name of output file") - p.add_argument("--out-dtype", action="store", - help="On-disk data type; valid argument to numpy.dtype()") - p.add_argument("--image-type", action="store", - help="Name of NiBabel image class to create, e.g. Nifti1Image. " - "If specified, will be used prior to setting dtype. If unspecified, " - "a new image like `infile` will be created and converted to a type " - "matching the extension of `outfile`.") - p.add_argument("-f", "--force", action="store_true", - help="Overwrite output file if it exists, and ignore warnings if possible") - p.add_argument("-V", "--version", action="version", version=f"{p.prog} {nib.__version__}") + p.add_argument('infile', help='Neuroimaging volume to convert') + p.add_argument('outfile', help='Name of output file') + p.add_argument( + '--out-dtype', action='store', help='On-disk data type; valid argument to numpy.dtype()' + ) + p.add_argument( + '--image-type', + action='store', + help='Name of NiBabel image class to create, e.g. Nifti1Image. ' + 'If specified, will be used prior to setting dtype. If unspecified, ' + 'a new image like `infile` will be created and converted to a type ' + 'matching the extension of `outfile`.', + ) + p.add_argument( + '-f', + '--force', + action='store_true', + help='Overwrite output file if it exists, and ignore warnings if possible', + ) + p.add_argument('-V', '--version', action='version', version=f'{p.prog} {nib.__version__}') return p @@ -46,7 +52,7 @@ def main(args=None): orig = nib.load(opts.infile) if not opts.force and Path(opts.outfile).exists(): - raise FileExistsError(f"Output file exists: {opts.outfile}") + raise FileExistsError(f'Output file exists: {opts.outfile}') if opts.image_type: klass = getattr(nib, opts.image_type) @@ -59,7 +65,7 @@ def main(args=None): out_img.set_data_dtype(opts.out_dtype) except Exception as e: if opts.force: - warnings.warn(f"Ignoring error: {e!r}") + warnings.warn(f'Ignoring error: {e!r}') else: raise diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index 33532cf8e7..efba4809c7 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -20,11 +20,13 @@ class dummy_fuse: """Dummy fuse "module" so that nose does not blow during doctests""" + Fuse = object try: import fuse + uid = os.getuid() gid = os.getgid() except ImportError: @@ -43,7 +45,6 @@ class dummy_fuse: class FileHandle: - def __init__(self, fno): self.fno = fno self.keep_cache = False @@ -54,11 +55,9 @@ def __str__(self): class DICOMFS(fuse.Fuse): - def __init__(self, *args, **kwargs): if fuse is dummy_fuse: - raise RuntimeError( - "fuse module is not available, install it to use DICOMFS") + raise RuntimeError('fuse module is not available, install it to use DICOMFS') self.followlinks = kwargs.pop('followlinks', False) self.dicom_path = kwargs.pop('dicom_path', None) fuse.Fuse.__init__(self, *args, **kwargs) @@ -91,9 +90,11 @@ def get_paths(self): series_info += 'bits allocated: %d\n' % series.bits_allocated series_info += 'bits stored: %d\n' % series.bits_stored series_info += 'storage instances: %d\n' % len(series.storage_instances) - d[series.number] = {'INFO': series_info.encode('ascii', 'replace'), - f'{series.number}.nii': (series.nifti_size, series.as_nifti), - f'{series.number}.png': (series.png_size, series.as_png)} + d[series.number] = { + 'INFO': series_info.encode('ascii', 'replace'), + f'{series.number}.nii': (series.nifti_size, series.as_nifti), + f'{series.number}.png': (series.png_size, series.as_png), + } pd[study_datetime] = d return paths @@ -103,7 +104,7 @@ def match_path(self, path): logger.debug('return root') return wd for part in path.lstrip('/').split('/'): - logger.debug(f"path:{path} part:{part}") + logger.debug(f'path:{path} part:{part}') if part not in wd: return None wd = wd[part] @@ -180,7 +181,7 @@ def read(self, path, size, offset, fh): logger.debug(size) logger.debug(offset) logger.debug(fh) - return self.fhs[fh.fno][offset:offset + size] + return self.fhs[fh.fno][offset : offset + size] def release(self, path, flags, fh): logger.debug('release') @@ -192,21 +193,37 @@ def release(self, path, flags, fh): def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage="{} [OPTIONS] ".format( - os.path.basename(sys.argv[0])), - version="%prog " + nib.__version__) - - p.add_options([ - Option("-v", "--verbose", action="count", - dest="verbose", default=0, - help="make noise. Could be specified multiple times"), - ]) - - p.add_options([ - Option("-L", "--follow-links", action="store_true", - dest="followlinks", default=False, - help="Follow symbolic links in DICOM directory"), - ]) + usage='{} [OPTIONS] '.format( + os.path.basename(sys.argv[0]) + ), + version='%prog ' + nib.__version__, + ) + + p.add_options( + [ + Option( + '-v', + '--verbose', + action='count', + dest='verbose', + default=0, + help='make noise. Could be specified multiple times', + ), + ] + ) + + p.add_options( + [ + Option( + '-L', + '--follow-links', + action='store_true', + dest='followlinks', + default=False, + help='Follow symbolic links in DICOM directory', + ), + ] + ) return p @@ -219,13 +236,11 @@ def main(args=None): logger.setLevel(opts.verbose > 1 and logging.DEBUG or logging.INFO) if len(files) != 2: - sys.stderr.write(f"Please provide two arguments:\n{parser.usage}\n") + sys.stderr.write(f'Please provide two arguments:\n{parser.usage}\n') sys.exit(1) fs = DICOMFS( - dash_s_do='setsingle', - followlinks=opts.followlinks, - dicom_path=files[0].decode(encoding) + dash_s_do='setsingle', followlinks=opts.followlinks, dicom_path=files[0].decode(encoding) ) fs.parse(['-f', '-s', files[1]]) try: diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index b48033eb45..5ec5f425ee 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -32,40 +32,56 @@ def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage=f"{sys.argv[0]} [OPTIONS] [FILE ...]\n\n" + __doc__, - version="%prog " + nib.__version__) - - p.add_options([ - Option("-v", "--verbose", action="count", - dest="verbose", default=0, - help="Make more noise. Could be specified multiple times"), - - Option("-H", "--header-fields", - dest="header_fields", default='all', - help="Header fields (comma separated) to be printed as well" - " (if present)"), - - Option("--ma", "--data-max-abs-diff", - dest="data_max_abs_diff", - type=float, - default=0.0, - help="Maximal absolute difference in data between files" - " to tolerate."), - - Option("--mr", "--data-max-rel-diff", - dest="data_max_rel_diff", - type=float, - default=0.0, - help="Maximal relative difference in data between files to" - " tolerate. If --data-max-abs-diff is also specified," - " only the data points with absolute difference greater" - " than that value would be considered for relative" - " difference check."), - Option("--dt", "--datatype", - dest="dtype", - default=np.float64, - help="Enter a numpy datatype such as 'float32'.") - ]) + usage=f'{sys.argv[0]} [OPTIONS] [FILE ...]\n\n' + __doc__, + version='%prog ' + nib.__version__, + ) + + p.add_options( + [ + Option( + '-v', + '--verbose', + action='count', + dest='verbose', + default=0, + help='Make more noise. Could be specified multiple times', + ), + Option( + '-H', + '--header-fields', + dest='header_fields', + default='all', + help='Header fields (comma separated) to be printed as well' ' (if present)', + ), + Option( + '--ma', + '--data-max-abs-diff', + dest='data_max_abs_diff', + type=float, + default=0.0, + help='Maximal absolute difference in data between files' ' to tolerate.', + ), + Option( + '--mr', + '--data-max-rel-diff', + dest='data_max_rel_diff', + type=float, + default=0.0, + help='Maximal relative difference in data between files to' + ' tolerate. If --data-max-abs-diff is also specified,' + ' only the data points with absolute difference greater' + ' than that value would be considered for relative' + ' difference check.', + ), + Option( + '--dt', + '--datatype', + dest='dtype', + default=np.float64, + help="Enter a numpy datatype such as 'float32'.", + ), + ] + ) return p @@ -94,7 +110,7 @@ def are_values_different(*values): except TypeError as exc: str_exc = str(exc) # Not implemented in numpy 1.7.1 - if "not supported" in str_exc or "not implemented" in str_exc: + if 'not supported' in str_exc or 'not implemented' in str_exc: value0_nans = None else: raise @@ -104,8 +120,7 @@ def are_values_different(*values): return True elif isinstance(value0, np.ndarray): # use .dtype.type to provide endianness agnostic comparison - if value0.dtype.type != value.dtype.type or \ - value0.shape != value.shape: + if value0.dtype.type != value.dtype.type or value0.shape != value.shape: return True # there might be nans and they need special treatment if value0_nans is not None: @@ -159,15 +174,15 @@ def get_headers_diff(file_headers, names=None): def get_data_hash_diff(files, dtype=np.float64): """Get difference between md5 values of data - Parameters - ---------- - files: list of actual files + Parameters + ---------- + files: list of actual files - Returns - ------- - list - np.array: md5 values of respective files - """ + Returns + ------- + list + np.array: md5 values of respective files + """ md5sums = [ hashlib.md5(np.ascontiguousarray(nib.load(f).get_fdata(dtype=dtype))).hexdigest() @@ -209,14 +224,13 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): """ # we are doomed to keep them in RAM now - data = [f if isinstance(f, np.ndarray) else nib.load(f).get_fdata(dtype=dtype) - for f in files] + data = [f if isinstance(f, np.ndarray) else nib.load(f).get_fdata(dtype=dtype) for f in files] diffs = OrderedDict() for i, d1 in enumerate(data[:-1]): # populate empty entries for non-compared diffs1 = [None] * (i + 1) - for j, d2 in enumerate(data[i + 1:], i + 1): + for j, d2 in enumerate(data[i + 1 :], i + 1): if d1.shape == d2.shape: abs_diff = np.abs(d1 - d2) @@ -251,7 +265,7 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): diffs1.append(None) else: - diffs1.append({'CMP': "incompat"}) + diffs1.append({'CMP': 'incompat'}) if any(diffs1): @@ -263,28 +277,28 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): def display_diff(files, diff): """Format header differences into a nice string - Parameters - ---------- - files: list of files that were compared so we can print their names - diff: dict of different valued header fields + Parameters + ---------- + files: list of files that were compared so we can print their names + diff: dict of different valued header fields - Returns - ------- - str - string-formatted table of differences + Returns + ------- + str + string-formatted table of differences """ - output = "" - field_width = "{:<15}" - filename_width = "{:<53}" - value_width = "{:<55}" + output = '' + field_width = '{:<15}' + filename_width = '{:<53}' + value_width = '{:<55}' - output += "These files are different.\n" + output += 'These files are different.\n' output += field_width.format('Field/File') for i, f in enumerate(files, 1): - output += "%d:%s" % (i, filename_width.format(os.path.basename(f))) + output += '%d:%s' % (i, filename_width.format(os.path.basename(f))) - output += "\n" + output += '\n' for key, value in diff.items(): output += field_width.format(key) @@ -305,14 +319,15 @@ def display_diff(files, diff): item_str = re.sub('[\x00]', '?', item_str) output += value_width.format(item_str) - output += "\n" + output += '\n' return output -def diff(files, header_fields='all', data_max_abs_diff=None, - data_max_rel_diff=None, dtype=np.float64): - assert len(files) >= 2, "Please enter at least two files" +def diff( + files, header_fields='all', data_max_abs_diff=None, data_max_rel_diff=None, dtype=np.float64 +): + assert len(files) >= 2, 'Please enter at least two files' file_headers = [nib.load(f).header for f in files] @@ -330,10 +345,9 @@ def diff(files, header_fields='all', data_max_abs_diff=None, if data_md5_diffs: # provide details, possibly triggering the ignore of the difference # in data - data_diffs = get_data_diff(files, - max_abs=data_max_abs_diff, - max_rel=data_max_rel_diff, - dtype=dtype) + data_diffs = get_data_diff( + files, max_abs=data_max_abs_diff, max_rel=data_max_rel_diff, dtype=dtype + ) if data_diffs: diff['DATA(md5)'] = data_md5_diffs diff.update(data_diffs) @@ -359,12 +373,12 @@ def main(args=None, out=None): header_fields=opts.header_fields, data_max_abs_diff=opts.data_max_abs_diff, data_max_rel_diff=opts.data_max_rel_diff, - dtype=opts.dtype + dtype=opts.dtype, ) if files_diff: out.write(display_diff(files, files_diff)) raise SystemExit(1) else: - out.write("These files are identical.\n") + out.write('These files are identical.\n') raise SystemExit(0) diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index 2995ff58c5..1bb9396bb3 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -20,8 +20,7 @@ import nibabel.cmdline.utils from nibabel.cmdline.utils import _err, verbose, table2string, ap, safe_get -__copyright__ = 'Copyright (c) 2011-18 Yaroslav Halchenko ' \ - 'and NiBabel contributors' +__copyright__ = 'Copyright (c) 2011-18 Yaroslav Halchenko ' 'and NiBabel contributors' __license__ = 'MIT' @@ -31,58 +30,88 @@ def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage=f"{sys.argv[0]} [OPTIONS] [FILE ...]\n\n" + __doc__, - version="%prog " + nib.__version__) - - p.add_options([ - Option("-v", "--verbose", action="count", - dest="verbose", default=0, - help="Make more noise. Could be specified multiple times"), - - Option("-H", "--header-fields", - dest="header_fields", default='', - help="Header fields (comma separated) to be printed as well (if present)"), - - Option("-s", "--stats", - action="store_true", dest='stats', default=False, - help="Output basic data statistics"), - - Option("-c", "--counts", - action="store_true", dest='counts', default=False, - help="Output counts - number of entries for each numeric value " - "(useful for int ROI maps)"), - - Option("--all-counts", - action="store_true", dest='all_counts', default=False, - help="Output all counts, even if number of unique values > %d" % MAX_UNIQUE), - - Option("-z", "--zeros", - action="store_true", dest='stats_zeros', default=False, - help="Include zeros into output basic data statistics (--stats, --counts)"), - ]) + usage=f'{sys.argv[0]} [OPTIONS] [FILE ...]\n\n' + __doc__, + version='%prog ' + nib.__version__, + ) + + p.add_options( + [ + Option( + '-v', + '--verbose', + action='count', + dest='verbose', + default=0, + help='Make more noise. Could be specified multiple times', + ), + Option( + '-H', + '--header-fields', + dest='header_fields', + default='', + help='Header fields (comma separated) to be printed as well (if present)', + ), + Option( + '-s', + '--stats', + action='store_true', + dest='stats', + default=False, + help='Output basic data statistics', + ), + Option( + '-c', + '--counts', + action='store_true', + dest='counts', + default=False, + help='Output counts - number of entries for each numeric value ' + '(useful for int ROI maps)', + ), + Option( + '--all-counts', + action='store_true', + dest='all_counts', + default=False, + help='Output all counts, even if number of unique values > %d' % MAX_UNIQUE, + ), + Option( + '-z', + '--zeros', + action='store_true', + dest='stats_zeros', + default=False, + help='Include zeros into output basic data statistics (--stats, --counts)', + ), + ] + ) return p def proc_file(f, opts): - verbose(1, f"Loading {f}") + verbose(1, f'Loading {f}') - row = [f"@l{f}"] + row = [f'@l{f}'] try: vol = nib.load(f) h = vol.header except Exception as e: row += ['failed'] - verbose(2, f"Failed to gather information -- {e}") + verbose(2, f'Failed to gather information -- {e}') return row - row += [str(safe_get(h, 'data_dtype')), - f"@l[{ap(safe_get(h, 'data_shape'), '%3g')}]", - f"@l{ap(safe_get(h, 'zooms'), '%.2f', 'x')}"] + row += [ + str(safe_get(h, 'data_dtype')), + f"@l[{ap(safe_get(h, 'data_shape'), '%3g')}]", + f"@l{ap(safe_get(h, 'zooms'), '%.2f', 'x')}", + ] # Slope - if hasattr(h, 'has_data_slope') and \ - (h.has_data_slope or h.has_data_intercept) and \ - not h.get_slope_inter() in [(1.0, 0.0), (None, None)]: + if ( + hasattr(h, 'has_data_slope') + and (h.has_data_slope or h.has_data_intercept) + and not h.get_slope_inter() in [(1.0, 0.0), (None, None)] + ): row += ['@l*%.3g+%.3g' % h.get_slope_inter()] else: row += [''] @@ -110,13 +139,16 @@ def proc_file(f, opts): row += [_err()] try: - if (hasattr(h, 'get_qform') and hasattr(h, 'get_sform') and - (h.get_qform() != h.get_sform()).any()): + if ( + hasattr(h, 'get_qform') + and hasattr(h, 'get_sform') + and (h.get_qform() != h.get_sform()).any() + ): row += ['sform'] else: row += [''] except Exception as e: - verbose(2, f"Failed to obtain qform or sform -- {e}") + verbose(2, f'Failed to obtain qform or sform -- {e}') if isinstance(h, nib.AnalyzeHeader): row += [''] else: @@ -134,19 +166,19 @@ def proc_file(f, opts): d = d.reshape(-1) if opts.stats: # just # of elements - row += ["@l[%d]" % np.prod(d.shape)] + row += ['@l[%d]' % np.prod(d.shape)] # stats row += [f'@l[{np.min(d):.2g}, {np.max(d):.2g}]' if len(d) else '-'] if opts.counts: items, inv = np.unique(d, return_inverse=True) if len(items) > 1000 and not opts.all_counts: - counts = _err("%d uniques. Use --all-counts" % len(items)) + counts = _err('%d uniques. Use --all-counts' % len(items)) else: freq = np.bincount(inv) - counts = " ".join("%g:%d" % (i, f) for i, f in zip(items, freq)) - row += ["@l" + counts] + counts = ' '.join('%g:%d' % (i, f) for i, f in zip(items, freq)) + row += ['@l' + counts] except OSError as e: - verbose(2, f"Failed to obtain stats/counts -- {e}") + verbose(2, f'Failed to obtain stats/counts -- {e}') row += [_err()] return row diff --git a/nibabel/cmdline/nifti_dx.py b/nibabel/cmdline/nifti_dx.py index 51867da065..64f02694ee 100644 --- a/nibabel/cmdline/nifti_dx.py +++ b/nibabel/cmdline/nifti_dx.py @@ -7,7 +7,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Print nifti diagnostics for header files """ +"""Print nifti diagnostics for header files""" import sys from optparse import OptionParser @@ -15,16 +15,15 @@ import nibabel as nib __author__ = 'Matthew Brett' -__copyright__ = 'Copyright (c) 2011-18 Matthew Brett ' \ - 'and NiBabel contributors' +__copyright__ = 'Copyright (c) 2011-18 Matthew Brett ' 'and NiBabel contributors' __license__ = 'MIT' def main(args=None): - """ Go go team """ + """Go go team""" parser = OptionParser( - usage=f"{sys.argv[0]} [FILE ...]\n\n" + __doc__, - version="%prog " + nib.__version__) + usage=f'{sys.argv[0]} [FILE ...]\n\n' + __doc__, version='%prog ' + nib.__version__ + ) (opts, files) = parser.parse_args(args=args) for fname in files: @@ -32,7 +31,7 @@ def main(args=None): hdr = fobj.read(nib.nifti1.header_dtype.itemsize) result = nib.Nifti1Header.diagnose_binaryblock(hdr) if len(result): - print(f'Picky header check output for "{fname}\"\n') + print(f'Picky header check output for "{fname}"\n') print(result + '\n') else: print(f'Header for "{fname}" is clean') diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index 0f868bd06b..f0d5b207f7 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -14,123 +14,227 @@ import nibabel.nifti1 as nifti1 from nibabel.filename_parser import splitext_addext from nibabel.volumeutils import fname_ext_ul_case -from nibabel.orientations import (io_orientation, inv_ornt_aff, - apply_orientation) +from nibabel.orientations import io_orientation, inv_ornt_aff, apply_orientation from nibabel.affines import apply_affine, from_matvec, to_matvec def get_opt_parser(): # use module docstring for help output p = OptionParser( - usage=f"{sys.argv[0]} [OPTIONS] \n\n" + __doc__, - version="%prog " + nibabel.__version__) + usage=f'{sys.argv[0]} [OPTIONS] \n\n' + __doc__, + version='%prog ' + nibabel.__version__, + ) p.add_option( - Option("-v", "--verbose", action="store_true", dest="verbose", - default=False, - help="""Make some noise.""")) + Option( + '-v', + '--verbose', + action='store_true', + dest='verbose', + default=False, + help="""Make some noise.""", + ) + ) p.add_option( - Option("-o", "--output-dir", action="store", type="string", - dest="outdir", default=None, - help=one_line("""Destination directory for NIfTI files. - Default: current directory."""))) + Option( + '-o', + '--output-dir', + action='store', + type='string', + dest='outdir', + default=None, + help=one_line( + """Destination directory for NIfTI files. + Default: current directory.""" + ), + ) + ) p.add_option( - Option("-c", "--compressed", action="store_true", - dest="compressed", default=False, - help="Whether to write compressed NIfTI files or not.")) + Option( + '-c', + '--compressed', + action='store_true', + dest='compressed', + default=False, + help='Whether to write compressed NIfTI files or not.', + ) + ) p.add_option( - Option("-p", "--permit-truncated", action="store_true", - dest="permit_truncated", default=False, - help=one_line( - """Permit conversion of truncated recordings. Support for + Option( + '-p', + '--permit-truncated', + action='store_true', + dest='permit_truncated', + default=False, + help=one_line( + """Permit conversion of truncated recordings. Support for this is experimental, and results *must* be checked - afterward for validity."""))) + afterward for validity.""" + ), + ) + ) p.add_option( - Option("-b", "--bvs", action="store_true", dest="bvs", default=False, - help=one_line( - """Output bvals/bvecs files in addition to NIFTI - image."""))) + Option( + '-b', + '--bvs', + action='store_true', + dest='bvs', + default=False, + help=one_line( + """Output bvals/bvecs files in addition to NIFTI + image.""" + ), + ) + ) p.add_option( - Option("-d", "--dwell-time", action="store_true", default=False, - dest="dwell_time", - help=one_line( - """Calculate the scan dwell time. If supplied, the magnetic + Option( + '-d', + '--dwell-time', + action='store_true', + default=False, + dest='dwell_time', + help=one_line( + """Calculate the scan dwell time. If supplied, the magnetic field strength should also be supplied using --field-strength (default 3). The field strength must be supplied because it is not encoded in the PAR/REC - format."""))) + format.""" + ), + ) + ) p.add_option( - Option("--field-strength", action="store", type="float", - dest="field_strength", - help=one_line( - """The magnetic field strength of the recording, only needed + Option( + '--field-strength', + action='store', + type='float', + dest='field_strength', + help=one_line( + """The magnetic field strength of the recording, only needed for --dwell-time. The field strength must be supplied - because it is not encoded in the PAR/REC format."""))) + because it is not encoded in the PAR/REC format.""" + ), + ) + ) p.add_option( - Option("-i", "--volume-info", action="store_true", dest="vol_info", - default=False, - help=one_line( - """Export .PAR volume labels corresponding to the fourth + Option( + '-i', + '--volume-info', + action='store_true', + dest='vol_info', + default=False, + help=one_line( + """Export .PAR volume labels corresponding to the fourth dimension of the data. The dimension info will be stored in CSV format with the first row containing dimension labels and the subsequent rows (one per volume), the corresponding indices. Only labels that vary along the 4th dimension are exported (e.g. for a single volume structural scan there are no dynamic labels and no output file will be created). - """))) + """ + ), + ) + ) p.add_option( - Option("--origin", action="store", dest="origin", default="scanner", - help=one_line( - """Reference point of the q-form transformation of the NIfTI + Option( + '--origin', + action='store', + dest='origin', + default='scanner', + help=one_line( + """Reference point of the q-form transformation of the NIfTI image. If 'scanner' the (0,0,0) coordinates will refer to the scanner's iso center. If 'fov', this coordinate will be the center of the recorded volume (field of view). Default: - 'scanner'."""))) + 'scanner'.""" + ), + ) + ) p.add_option( - Option("--minmax", action="store", nargs=2, dest="minmax", - help=one_line( - """Minimum and maximum settings to be stored in the NIfTI + Option( + '--minmax', + action='store', + nargs=2, + dest='minmax', + help=one_line( + """Minimum and maximum settings to be stored in the NIfTI header. If any of them is set to 'parse', the scaled data is scanned for the actual minimum and maximum. To bypass this potentially slow and memory intensive step (the data has to be scaled and fully loaded into memory), fixed values can be provided as space-separated pair, e.g. '5.4 120.4'. It is possible to set a fixed minimum as scan for the actual - maximum (and vice versa). Default: 'parse parse'."""))) + maximum (and vice versa). Default: 'parse parse'.""" + ), + ) + ) p.set_defaults(minmax=('parse', 'parse')) p.add_option( - Option("--store-header", action="store_true", dest="store_header", - default=False, - help=one_line( - """If set, all information from the PAR header is stored in - an extension of the NIfTI file header. Default: off"""))) + Option( + '--store-header', + action='store_true', + dest='store_header', + default=False, + help=one_line( + """If set, all information from the PAR header is stored in + an extension of the NIfTI file header. Default: off""" + ), + ) + ) p.add_option( - Option("--scaling", action="store", dest="scaling", default='dv', - help=one_line( - """Choose data scaling setting. The PAR header defines two + Option( + '--scaling', + action='store', + dest='scaling', + default='dv', + help=one_line( + """Choose data scaling setting. The PAR header defines two different data scaling settings: 'dv' (values displayed on console) and 'fp' (floating point values). Either one can be chosen, or scaling can be disabled completely ('off'). Note that neither method will actually scale the data, but just store the corresponding settings in the NIfTI header, unless non-uniform scaling is used, in which case the data is - stored in the file in scaled form. Default: 'dv'"""))) + stored in the file in scaled form. Default: 'dv'""" + ), + ) + ) p.add_option( - Option('--keep-trace', action="store_true", dest='keep_trace', - default=False, - help=one_line("""Do not discard the diagnostic Philips DTI - trace volume, if it exists in the data."""))) + Option( + '--keep-trace', + action='store_true', + dest='keep_trace', + default=False, + help=one_line( + """Do not discard the diagnostic Philips DTI + trace volume, if it exists in the data.""" + ), + ) + ) p.add_option( - Option("--overwrite", action="store_true", dest="overwrite", - default=False, - help=one_line("""Overwrite file if it exists. Default: - False"""))) + Option( + '--overwrite', + action='store_true', + dest='overwrite', + default=False, + help=one_line( + """Overwrite file if it exists. Default: + False""" + ), + ) + ) p.add_option( - Option("--strict-sort", action="store_true", dest="strict_sort", - default=False, - help=one_line("""Use additional keys in determining the order + Option( + '--strict-sort', + action='store_true', + dest='strict_sort', + default=False, + help=one_line( + """Use additional keys in determining the order to sort the slices within the .REC file. This may be necessary for more complicated scans with multiple echos, - cardiac phases, ASL label states, etc."""))) + cardiac phases, ASL label states, etc.""" + ), + ) + ) return p @@ -163,10 +267,12 @@ def proc_file(infile, opts): # load the PAR header and data scaling = 'dv' if opts.scaling == 'off' else opts.scaling infile = fname_ext_ul_case(infile) - pr_img = pr.load(infile, - permit_truncated=opts.permit_truncated, - scaling=scaling, - strict_sort=opts.strict_sort) + pr_img = pr.load( + infile, + permit_truncated=opts.permit_truncated, + scaling=scaling, + strict_sort=opts.strict_sort, + ) pr_hdr = pr_img.header affine = pr_hdr.get_affine(origin=opts.origin) slope, intercept = pr_hdr.get_data_scaling(scaling) @@ -174,8 +280,8 @@ def proc_file(infile, opts): verbose(f'Using data scaling "{opts.scaling}"') # get original scaling, and decide if we scale in-place or not if opts.scaling == 'off': - slope = np.array([1.]) - intercept = np.array([0.]) + slope = np.array([1.0]) + intercept = np.array([0.0]) in_data = pr_img.dataobj.get_unscaled() out_dtype = pr_hdr.get_data_dtype() elif not np.any(np.diff(slope)) and not np.any(np.diff(intercept)): @@ -186,15 +292,13 @@ def proc_file(infile, opts): out_dtype = pr_hdr.get_data_dtype() else: # Multi scalefactor case - slope = np.array([1.]) - intercept = np.array([0.]) + slope = np.array([1.0]) + intercept = np.array([0.0]) in_data = np.array(pr_img.dataobj) out_dtype = np.float64 # Reorient data block to LAS+ if necessary ornt = io_orientation(np.diag([-1, 1, 1, 1]).dot(affine)) - if np.all(ornt == [[0, 1], - [1, 1], - [2, 1]]): # already in LAS+ + if np.all(ornt == [[0, 1], [1, 1], [2, 1]]): # already in LAS+ t_aff = np.eye(4) else: # Not in LAS+ t_aff = inv_ornt_aff(ornt, pr_img.shape) @@ -249,8 +353,10 @@ def proc_file(infile, opts): if bvals is None and bvecs is None: verbose('No DTI volumes detected, bvals and bvecs not written') elif bvecs is None: - verbose('DTI volumes detected, but no diffusion direction info was' - 'found. Writing .bvals file only.') + verbose( + 'DTI volumes detected, but no diffusion direction info was' + 'found. Writing .bvals file only.' + ) with open(basefilename + '.bvals', 'w') as fid: # np.savetxt could do this, but it's just a loop anyway for val in bvals: @@ -288,14 +394,15 @@ def proc_file(infile, opts): if opts.dwell_time: try: dwell_time = calculate_dwell_time( - pr_hdr.get_water_fat_shift(), - pr_hdr.get_echo_train_length(), - opts.field_strength) + pr_hdr.get_water_fat_shift(), pr_hdr.get_echo_train_length(), opts.field_strength + ) except MRIError: verbose('No EPI factors, dwell time not written') else: - verbose(f'Writing dwell time ({dwell_time!r} sec) ' - f'calculated assuming {opts.field_strength}T magnet') + verbose( + f'Writing dwell time ({dwell_time!r} sec) ' + f'calculated assuming {opts.field_strength}T magnet' + ) with open(basefilename + '.dwell_time', 'w') as fid: fid.write(f'{dwell_time!r}\n') # done @@ -322,7 +429,6 @@ def main(): errs.append(f'{infile}: {e}') if len(errs): - error('Caught %i exceptions. Dump follows:\n\n %s' - % (len(errs), '\n'.join(errs)), 1) + error('Caught %i exceptions. Dump follows:\n\n %s' % (len(errs), '\n'.join(errs)), 1) else: verbose('Done') diff --git a/nibabel/cmdline/roi.py b/nibabel/cmdline/roi.py index 0631ecc0d1..690bb0b646 100644 --- a/nibabel/cmdline/roi.py +++ b/nibabel/cmdline/roi.py @@ -6,7 +6,7 @@ def lossless_slice(img, slicers): if not nb.imageclasses.spatial_axes_first(img): - raise ValueError("Cannot slice an image that is not known to have spatial axes first") + raise ValueError('Cannot slice an image that is not known to have spatial axes first') scaling = hasattr(img.header, 'set_slope_inter') @@ -21,41 +21,44 @@ def lossless_slice(img, slicers): def parse_slice(crop, allow_step=True): if crop is None: return slice(None) - start, stop, *extra = [int(val) if val else None for val in crop.split(":")] + start, stop, *extra = [int(val) if val else None for val in crop.split(':')] if len(extra) > 1: - raise ValueError(f"Cannot parse specification: {crop}") + raise ValueError(f'Cannot parse specification: {crop}') if not allow_step and extra and extra[0] not in (1, None): - raise ValueError(f"Step entry not permitted: {crop}") + raise ValueError(f'Step entry not permitted: {crop}') step = extra[0] if extra else None if step not in (1, -1, None): - raise ValueError(f"Downsampling is not supported: {crop}") + raise ValueError(f'Downsampling is not supported: {crop}') return slice(start, stop, step) def sanitize(args): # Argparse likes to treat "-1:..." as a flag - return [f' {arg}' if arg[0] == '-' and ":" in arg else arg - for arg in args] + return [f' {arg}' if arg[0] == '-' and ':' in arg else arg for arg in args] def main(args=None): if args is None: args = sys.argv[1:] parser = argparse.ArgumentParser( - description="Crop images to a region of interest", - epilog="If a start or stop value is omitted, the start or end of the axis is assumed.") + description='Crop images to a region of interest', + epilog='If a start or stop value is omitted, the start or end of the axis is assumed.', + ) parser.add_argument('--version', action='version', version=nb.__version__) - parser.add_argument("-i", metavar="I1:I2[:-1]", - help="Start/stop [flip] along first axis (0-indexed)") - parser.add_argument("-j", metavar="J1:J2[:-1]", - help="Start/stop [flip] along second axis (0-indexed)") - parser.add_argument("-k", metavar="K1:K2[:-1]", - help="Start/stop [flip] along third axis (0-indexed)") - parser.add_argument("-t", metavar="T1:T2", help="Start/stop along fourth axis (0-indexed)") - parser.add_argument("in_file", help="Image file to crop") - parser.add_argument("out_file", help="Output file name") + parser.add_argument( + '-i', metavar='I1:I2[:-1]', help='Start/stop [flip] along first axis (0-indexed)' + ) + parser.add_argument( + '-j', metavar='J1:J2[:-1]', help='Start/stop [flip] along second axis (0-indexed)' + ) + parser.add_argument( + '-k', metavar='K1:K2[:-1]', help='Start/stop [flip] along third axis (0-indexed)' + ) + parser.add_argument('-t', metavar='T1:T2', help='Start/stop along fourth axis (0-indexed)') + parser.add_argument('in_file', help='Image file to crop') + parser.add_argument('out_file', help='Output file name') opts = parser.parse_args(args=sanitize(args)) @@ -65,7 +68,7 @@ def main(args=None): kslice = parse_slice(opts.k) tslice = parse_slice(opts.t, allow_step=False) except ValueError as err: - print(f"Could not parse input arguments. Reason follows.\n{err}") + print(f'Could not parse input arguments. Reason follows.\n{err}') return 1 kwargs = {} @@ -73,16 +76,16 @@ def main(args=None): kwargs['mmap'] = False img = nb.load(opts.in_file, **kwargs) - slicers = (islice, jslice, kslice, tslice)[:img.ndim] + slicers = (islice, jslice, kslice, tslice)[: img.ndim] expected_shape = nb.fileslice.predict_shape(slicers, img.shape) if any(dim == 0 for dim in expected_shape): - print(f"Cannot take zero-length slices. Predicted shape {expected_shape}.") + print(f'Cannot take zero-length slices. Predicted shape {expected_shape}.') return 1 try: sliced_img = lossless_slice(img, slicers) except Exception: - print("Could not slice image. Full traceback follows.") + print('Could not slice image. Full traceback follows.') raise nb.save(sliced_img, opts.out_file) return 0 diff --git a/nibabel/cmdline/stats.py b/nibabel/cmdline/stats.py index 91b9f7c104..5c5d58f93c 100644 --- a/nibabel/cmdline/stats.py +++ b/nibabel/cmdline/stats.py @@ -19,12 +19,21 @@ def _get_parser(): """Return command-line argument parser.""" p = argparse.ArgumentParser(description=__doc__) - p.add_argument("infile", - help="Neuroimaging volume to compute statistics on.") - p.add_argument("-V", "--Volume", action="store_true", required=False, - help="Compute mask volume of a given mask image.") - p.add_argument("--units", default="mm3", required=False, - choices=("mm3", "vox"), help="Preferred output units") + p.add_argument('infile', help='Neuroimaging volume to compute statistics on.') + p.add_argument( + '-V', + '--Volume', + action='store_true', + required=False, + help='Compute mask volume of a given mask image.', + ) + p.add_argument( + '--units', + default='mm3', + required=False, + choices=('mm3', 'vox'), + help='Preferred output units', + ) return p diff --git a/nibabel/cmdline/tck2trk.py b/nibabel/cmdline/tck2trk.py index 3c25ea3266..f50801c714 100644 --- a/nibabel/cmdline/tck2trk.py +++ b/nibabel/cmdline/tck2trk.py @@ -11,14 +11,15 @@ def parse_args(): - DESCRIPTION = "Convert tractograms (TCK -> TRK)." + DESCRIPTION = 'Convert tractograms (TCK -> TRK).' parser = argparse.ArgumentParser(description=DESCRIPTION) - parser.add_argument("anatomy", - help="reference anatomical image (.nii|.nii.gz.") - parser.add_argument("tractograms", metavar="tractogram", nargs="+", - help="list of tractograms (.tck).") - parser.add_argument("-f", "--force", action="store_true", - help="overwrite existing output files.") + parser.add_argument('anatomy', help='reference anatomical image (.nii|.nii.gz.') + parser.add_argument( + 'tractograms', metavar='tractogram', nargs='+', help='list of tractograms (.tck).' + ) + parser.add_argument( + '-f', '--force', action='store_true', help='overwrite existing output files.' + ) args = parser.parse_args() return args, parser @@ -30,7 +31,7 @@ def main(): try: nii = nib.load(args.anatomy) except Exception: - parser.error("Expecting anatomical image as first argument.") + parser.error('Expecting anatomical image as first argument.') for tractogram in args.tractograms: tractogram_format = nib.streamlines.detect_format(tractogram) @@ -49,7 +50,7 @@ def main(): header[Field.VOXEL_TO_RASMM] = nii.affine.copy() header[Field.VOXEL_SIZES] = nii.header.get_zooms()[:3] header[Field.DIMENSIONS] = nii.shape[:3] - header[Field.VOXEL_ORDER] = "".join(aff2axcodes(nii.affine)) + header[Field.VOXEL_ORDER] = ''.join(aff2axcodes(nii.affine)) tck = nib.streamlines.load(tractogram) nib.streamlines.save(tck.tractogram, output_filename, header=header) diff --git a/nibabel/cmdline/tests/test_conform.py b/nibabel/cmdline/tests/test_conform.py index 0f64f5953b..8e203b68f9 100644 --- a/nibabel/cmdline/tests/test_conform.py +++ b/nibabel/cmdline/tests/test_conform.py @@ -23,8 +23,8 @@ @needs_scipy def test_default(tmpdir): - infile = test_data(fname="anatomical.nii") - outfile = tmpdir / "output.nii.gz" + infile = test_data(fname='anatomical.nii') + outfile = tmpdir / 'output.nii.gz' main([str(infile), str(outfile)]) assert outfile.isfile() c = nib.load(outfile) @@ -35,19 +35,21 @@ def test_default(tmpdir): with pytest.raises(FileExistsError): main([str(infile), str(outfile)]) - main([str(infile), str(outfile), "--force"]) + main([str(infile), str(outfile), '--force']) assert outfile.isfile() @needs_scipy def test_nondefault(tmpdir): - infile = test_data(fname="anatomical.nii") - outfile = tmpdir / "output.nii.gz" + infile = test_data(fname='anatomical.nii') + outfile = tmpdir / 'output.nii.gz' out_shape = (100, 100, 150) voxel_size = (1, 2, 4) - orientation = "LAS" - args = (f"{infile} {outfile} --out-shape {' '.join(map(str, out_shape))} " - f"--voxel-size {' '.join(map(str, voxel_size))} --orientation {orientation}") + orientation = 'LAS' + args = ( + f"{infile} {outfile} --out-shape {' '.join(map(str, out_shape))} " + f"--voxel-size {' '.join(map(str, voxel_size))} --orientation {orientation}" + ) main(args.split()) assert outfile.isfile() c = nib.load(outfile) diff --git a/nibabel/cmdline/tests/test_convert.py b/nibabel/cmdline/tests/test_convert.py index 487bfb7401..00f00602af 100644 --- a/nibabel/cmdline/tests/test_convert.py +++ b/nibabel/cmdline/tests/test_convert.py @@ -71,10 +71,13 @@ def test_convert_dtype(tmp_path, data_dtype): assert converted.get_data_dtype() == expected_dtype -@pytest.mark.parametrize('ext,img_class', [ - ('mgh', nib.MGHImage), - ('img', nib.Nifti1Pair), -]) +@pytest.mark.parametrize( + 'ext,img_class', + [ + ('mgh', nib.MGHImage), + ('img', nib.Nifti1Pair), + ], +) def test_convert_by_extension(tmp_path, ext, img_class): infile = test_data(fname='anatomical.nii') outfile = tmp_path / f'output.{ext}' @@ -91,11 +94,14 @@ def test_convert_by_extension(tmp_path, ext, img_class): assert converted.__class__ == img_class -@pytest.mark.parametrize('ext,img_class', [ - ('mgh', nib.MGHImage), - ('img', nib.Nifti1Pair), - ('nii', nib.Nifti2Image), -]) +@pytest.mark.parametrize( + 'ext,img_class', + [ + ('mgh', nib.MGHImage), + ('img', nib.Nifti1Pair), + ('nii', nib.Nifti2Image), + ], +) def test_convert_imgtype(tmp_path, ext, img_class): infile = test_data(fname='anatomical.nii') outfile = tmp_path / f'output.{ext}' @@ -122,7 +128,7 @@ def test_convert_nifti_int_fail(tmp_path): with pytest.raises(ValueError): convert.main([str(infile), str(outfile), '--out-dtype', 'int']) assert not outfile.exists() - + with pytest.warns(UserWarning): convert.main([str(infile), str(outfile), '--out-dtype', 'int', '--force']) assert outfile.is_file() @@ -135,13 +141,16 @@ def test_convert_nifti_int_fail(tmp_path): assert converted.get_data_dtype() == orig.get_data_dtype() -@pytest.mark.parametrize('orig_dtype,alias,expected_dtype', [ - ('int64', 'mask', 'uint8'), - ('int64', 'compat', 'int32'), - ('int64', 'smallest', 'uint8'), - ('float64', 'mask', 'uint8'), - ('float64', 'compat', 'float32'), -]) +@pytest.mark.parametrize( + 'orig_dtype,alias,expected_dtype', + [ + ('int64', 'mask', 'uint8'), + ('int64', 'compat', 'int32'), + ('int64', 'smallest', 'uint8'), + ('float64', 'mask', 'uint8'), + ('float64', 'compat', 'float32'), + ], +) def test_convert_aliases(tmp_path, orig_dtype, alias, expected_dtype): orig_fname = tmp_path / 'orig.nii' out_fname = tmp_path / 'out.nii' diff --git a/nibabel/cmdline/tests/test_parrec2nii.py b/nibabel/cmdline/tests/test_parrec2nii.py index c41679d84d..2100f3f478 100644 --- a/nibabel/cmdline/tests/test_parrec2nii.py +++ b/nibabel/cmdline/tests/test_parrec2nii.py @@ -1,4 +1,4 @@ -""" Tests for the parrec2nii exe code +"""Tests for the parrec2nii exe code """ from os.path import join, isfile, basename @@ -9,23 +9,29 @@ from nibabel.cmdline import parrec2nii from unittest.mock import Mock, MagicMock, patch -from numpy.testing import (assert_almost_equal, assert_array_equal) +from numpy.testing import assert_almost_equal, assert_array_equal from nibabel.tests.test_parrec import EG_PAR, VARY_PAR from nibabel.tmpdirs import InTemporaryDirectory AN_OLD_AFFINE = numpy.array( - [[-3.64994708, 0., 1.83564171, 123.66276611], - [0., -3.75, 0., 115.617], - [0.86045705, 0., 7.78655376, -27.91161211], - [0., 0., 0., 1.]]) + [ + [-3.64994708, 0.0, 1.83564171, 123.66276611], + [0.0, -3.75, 0.0, 115.617], + [0.86045705, 0.0, 7.78655376, -27.91161211], + [0.0, 0.0, 0.0, 1.0], + ] +) PAR_AFFINE = numpy.array( -[[ -3.64994708, 0. , 1.83564171, 107.63076611], - [ 0. , 3.75, 0. , -118.125 ], - [ 0.86045705, 0. , 7.78655376, -58.25061211], - [ 0. , 0. , 0. , 1. ]]) + [ + [-3.64994708, 0.0, 1.83564171, 107.63076611], + [0.0, 3.75, 0.0, -118.125], + [0.86045705, 0.0, 7.78655376, -58.25061211], + [0.0, 0.0, 0.0, 1.0], + ] +) @patch('nibabel.cmdline.parrec2nii.verbose') @@ -36,7 +42,7 @@ def test_parrec2nii_sets_qform_sform_code1(*args): # Check that set_sform(), set_qform() are called on the new header. parrec2nii.verbose.switch = False - parrec2nii.io_orientation.return_value = [[0, 1],[1, 1],[2, 1]] # LAS+ + parrec2nii.io_orientation.return_value = [[0, 1], [1, 1], [2, 1]] # LAS+ nimg = Mock() nhdr = MagicMock() diff --git a/nibabel/cmdline/tests/test_roi.py b/nibabel/cmdline/tests/test_roi.py index 4c640e9136..6a1229f72e 100644 --- a/nibabel/cmdline/tests/test_roi.py +++ b/nibabel/cmdline/tests/test_roi.py @@ -11,50 +11,50 @@ def test_parse_slice(): assert parse_slice(None) == slice(None) - assert parse_slice("1:5") == slice(1, 5) - assert parse_slice("1:") == slice(1, None) - assert parse_slice(":5") == slice(None, 5) - assert parse_slice(":-1") == slice(None, -1) - assert parse_slice("-5:-1") == slice(-5, -1) - assert parse_slice("1:5:") == slice(1, 5, None) - assert parse_slice("1::") == slice(1, None, None) - assert parse_slice(":5:") == slice(None, 5, None) - assert parse_slice(":-1:") == slice(None, -1, None) - assert parse_slice("-5:-1:") == slice(-5, -1, None) - assert parse_slice("1:5:1") == slice(1, 5, 1) - assert parse_slice("1::1") == slice(1, None, 1) - assert parse_slice(":5:1") == slice(None, 5, 1) - assert parse_slice(":-1:1") == slice(None, -1, 1) - assert parse_slice("-5:-1:1") == slice(-5, -1, 1) - assert parse_slice("5:1:-1") == slice(5, 1, -1) - assert parse_slice(":1:-1") == slice(None, 1, -1) - assert parse_slice("5::-1") == slice(5, None, -1) - assert parse_slice("-1::-1") == slice(-1, None, -1) - assert parse_slice("-1:-5:-1") == slice(-1, -5, -1) + assert parse_slice('1:5') == slice(1, 5) + assert parse_slice('1:') == slice(1, None) + assert parse_slice(':5') == slice(None, 5) + assert parse_slice(':-1') == slice(None, -1) + assert parse_slice('-5:-1') == slice(-5, -1) + assert parse_slice('1:5:') == slice(1, 5, None) + assert parse_slice('1::') == slice(1, None, None) + assert parse_slice(':5:') == slice(None, 5, None) + assert parse_slice(':-1:') == slice(None, -1, None) + assert parse_slice('-5:-1:') == slice(-5, -1, None) + assert parse_slice('1:5:1') == slice(1, 5, 1) + assert parse_slice('1::1') == slice(1, None, 1) + assert parse_slice(':5:1') == slice(None, 5, 1) + assert parse_slice(':-1:1') == slice(None, -1, 1) + assert parse_slice('-5:-1:1') == slice(-5, -1, 1) + assert parse_slice('5:1:-1') == slice(5, 1, -1) + assert parse_slice(':1:-1') == slice(None, 1, -1) + assert parse_slice('5::-1') == slice(5, None, -1) + assert parse_slice('-1::-1') == slice(-1, None, -1) + assert parse_slice('-1:-5:-1') == slice(-1, -5, -1) # Max of start:stop:step with pytest.raises(ValueError): - parse_slice("1:2:3:4") + parse_slice('1:2:3:4') # Integers only with pytest.raises(ValueError): - parse_slice("abc:2:3") + parse_slice('abc:2:3') with pytest.raises(ValueError): - parse_slice("1.2:2:3") + parse_slice('1.2:2:3') # Unit steps only with pytest.raises(ValueError): - parse_slice("1:5:2") + parse_slice('1:5:2') def test_parse_slice_disallow_step(): # Permit steps of 1 - assert parse_slice("1:5", False) == slice(1, 5) - assert parse_slice("1:5:", False) == slice(1, 5) - assert parse_slice("1:5:1", False) == slice(1, 5, 1) + assert parse_slice('1:5', False) == slice(1, 5) + assert parse_slice('1:5:', False) == slice(1, 5) + assert parse_slice('1:5:1', False) == slice(1, 5, 1) # Disable other steps with pytest.raises(ValueError): - parse_slice("1:5:-1", False) + parse_slice('1:5:-1', False) with pytest.raises(ValueError): - parse_slice("1:5:-2", False) + parse_slice('1:5:-2', False) def test_lossless_slice_unknown_axes(): @@ -66,7 +66,7 @@ def test_lossless_slice_unknown_axes(): def test_lossless_slice_scaling(tmp_path): fname = tmp_path / 'image.nii' img = nb.Nifti1Image(np.random.uniform(-20000, 20000, (5, 5, 5, 5)), affine=np.eye(4)) - img.header.set_data_dtype("int16") + img.header.set_data_dtype('int16') img.to_filename(fname) img1 = nb.load(fname) sliced_fname = tmp_path / 'sliced.nii' @@ -81,8 +81,9 @@ def test_lossless_slice_scaling(tmp_path): def test_lossless_slice_noscaling(tmp_path): fname = tmp_path / 'image.mgh' - img = nb.MGHImage(np.random.uniform(-20000, 20000, (5, 5, 5, 5)).astype("float32"), - affine=np.eye(4)) + img = nb.MGHImage( + np.random.uniform(-20000, 20000, (5, 5, 5, 5)).astype('float32'), affine=np.eye(4) + ) img.to_filename(fname) img1 = nb.load(fname) sliced_fname = tmp_path / 'sliced.mgh' @@ -95,7 +96,7 @@ def test_lossless_slice_noscaling(tmp_path): assert img1.dataobj.inter == img2.dataobj.inter -@pytest.mark.parametrize("inplace", (True, False)) +@pytest.mark.parametrize('inplace', (True, False)) def test_nib_roi(tmp_path, inplace): in_file = os.path.join(data_path, 'functional.nii') out_file = str(tmp_path / 'sliced.nii') @@ -117,11 +118,14 @@ def test_nib_roi(tmp_path, inplace): assert np.allclose(in_sliced.affine, out_img.affine) -@pytest.mark.parametrize("args, errmsg", ( - (("-i", "1:1"), "Cannot take zero-length slice"), - (("-j", "1::2"), "Downsampling is not supported"), - (("-t", "5::-1"), "Step entry not permitted"), -)) +@pytest.mark.parametrize( + 'args, errmsg', + ( + (('-i', '1:1'), 'Cannot take zero-length slice'), + (('-j', '1::2'), 'Downsampling is not supported'), + (('-t', '5::-1'), 'Step entry not permitted'), + ), +) def test_nib_roi_bad_slices(capsys, args, errmsg): in_file = os.path.join(data_path, 'functional.nii') @@ -133,20 +137,20 @@ def test_nib_roi_bad_slices(capsys, args, errmsg): def test_entrypoint(capsys): # Check that we handle missing args as expected - with mock.patch("sys.argv", ["nib-roi", "--help"]): + with mock.patch('sys.argv', ['nib-roi', '--help']): try: retval = main() except SystemExit: pass else: - assert False, "argparse exits on --help. If changing to another parser, update test." + assert False, 'argparse exits on --help. If changing to another parser, update test.' captured = capsys.readouterr() - assert captured.out.startswith("usage: nib-roi") + assert captured.out.startswith('usage: nib-roi') def test_nib_roi_unknown_axes(capsys): in_file = os.path.join(data_path, 'minc1_4d.mnc') with pytest.raises(ValueError): - main([in_file, os.devnull, "-i", ":"]) + main([in_file, os.devnull, '-i', ':']) captured = capsys.readouterr() - assert "Could not slice image." in captured.out + assert 'Could not slice image.' in captured.out diff --git a/nibabel/cmdline/tests/test_stats.py b/nibabel/cmdline/tests/test_stats.py index 1ceac90231..ced289cebb 100644 --- a/nibabel/cmdline/tests/test_stats.py +++ b/nibabel/cmdline/tests/test_stats.py @@ -22,15 +22,15 @@ def test_volume(tmpdir, capsys): mask_data[5:15, 5:15, 5:15] = 1 img = Nifti1Image(mask_data, np.eye(4)) - infile = tmpdir / "input.nii" + infile = tmpdir / 'input.nii' save(img, infile) - args = (f"{infile} --Volume") + args = f'{infile} --Volume' main(args.split()) vol_mm3 = capsys.readouterr() - args = (f"{infile} --Volume --units vox") + args = f'{infile} --Volume --units vox' main(args.split()) vol_vox = capsys.readouterr() assert float(vol_mm3[0]) == 1000.0 - assert int(vol_vox[0]) == 1000 \ No newline at end of file + assert int(vol_vox[0]) == 1000 diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index 460f0d40d6..58cab3ba42 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -1,6 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" Test scripts +"""Test scripts Test running scripts """ @@ -11,25 +11,32 @@ import numpy as np from nibabel.cmdline.utils import * from nibabel.cmdline.diff import * -from os.path import (join as pjoin) +from os.path import join as pjoin from nibabel.testing import data_path from collections import OrderedDict from io import StringIO def test_table2string(): - assert table2string([["A", "B", "C", "D"], ["E", "F", "G", "H"]]) == "A B C D\nE F G H\n" - assert table2string([["Let's", "Make", "Tests", "And"], ["Have", "Lots", "Of", "Fun"], - ["With", "Python", "Guys", "!"]]) == "Let's Make Tests And\n Have Lots Of Fun"+ \ - "\n With Python Guys !\n" + assert table2string([['A', 'B', 'C', 'D'], ['E', 'F', 'G', 'H']]) == 'A B C D\nE F G H\n' + assert ( + table2string( + [ + ["Let's", 'Make', 'Tests', 'And'], + ['Have', 'Lots', 'Of', 'Fun'], + ['With', 'Python', 'Guys', '!'], + ] + ) + == "Let's Make Tests And\n Have Lots Of Fun" + '\n With Python Guys !\n' + ) def test_ap(): - assert ap([1, 2], "%2d") == " 1, 2" - assert ap([1, 2], "%3d") == " 1, 2" - assert ap([1, 2], "%-2d") == "1 , 2 " - assert ap([1, 2], "%d", "+") == "1+2" - assert ap([1, 2, 3], "%d", "-") == "1-2-3" + assert ap([1, 2], '%2d') == ' 1, 2' + assert ap([1, 2], '%3d') == ' 1, 2' + assert ap([1, 2], '%-2d') == '1 , 2 ' + assert ap([1, 2], '%d', '+') == '1+2' + assert ap([1, 2, 3], '%d', '-') == '1-2-3' def test_safe_get(): @@ -43,76 +50,174 @@ def get_test(self): test = TestObject() test.test = 2 - assert safe_get(test, "test") == 2 - assert safe_get(test, "failtest") == "-" + assert safe_get(test, 'test') == 2 + assert safe_get(test, 'failtest') == '-' def test_get_headers_diff(): - fnames = [pjoin(data_path, f) - for f in ('standard.nii.gz', 'example4d.nii.gz')] + fnames = [pjoin(data_path, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] actual_difference = get_headers_diff([nib.load(f).header for f in fnames]) - expected_difference = OrderedDict([ - ("regular", [np.asarray("".encode("utf-8")), np.asarray("r".encode("utf-8"))]), - ("dim_info", [np.asarray(0).astype(dtype="uint8"), np.asarray(57).astype(dtype="uint8")]), - ("dim", [np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype="int16"), - np.array([ 4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype="int16")]), - ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), - ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]), - ("pixdim", [np.array([ 1., 1., 3., 2., 1., 1., 1., 1.]).astype(dtype="float32"), np.array( - [ -1.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.19999909e+00, 2.00000000e+03, 1.00000000e+00, - 1.00000000e+00, 1.00000000e+00]).astype(dtype="float32")]), - ("slice_end", [np.array(0).astype(dtype="uint8"), np.array(23).astype(dtype="uint8")]), - ("xyzt_units", [np.array(0).astype(dtype="uint8"), np.array(10).astype(dtype="uint8")]), - ("cal_max", [np.array(0.0).astype(dtype="float32"), np.asarray(1162.0).astype(dtype="float32")]), - ("descrip", [np.array("".encode("utf-8")).astype(dtype="S80"), - np.array("FSL3.3\x00 v2.25 NIfTI-1 Single file format".encode("utf-8")).astype(dtype="S80")]), - ("qform_code", [np.array(0).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), - ("sform_code", [np.array(2).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), - ("quatern_b", [np.array(0.0).astype(dtype="float32"), - np.array(-1.9451068140294884e-26).astype(dtype="float32")]), - ("quatern_c", [np.array(0.0).astype(dtype="float32"), np.array(-0.9967085123062134).astype(dtype="float32")]), - ("quatern_d", [np.array(0.0).astype(dtype="float32"), np.array(-0.0810687392950058).astype(dtype="float32")]), - ("qoffset_x", [np.array(0.0).astype(dtype="float32"), np.array(117.8551025390625).astype(dtype="float32")]), - ("qoffset_y", [np.array(0.0).astype(dtype="float32"), np.array(-35.72294235229492).astype(dtype="float32")]), - ("qoffset_z", [np.array(0.0).astype(dtype="float32"), np.array(-7.248798370361328).astype(dtype="float32")]), - ("srow_x", [np.array([ 1., 0., 0., 0.]).astype(dtype="float32"), - np.array([ -2.00000000e+00, 6.71471565e-19, 9.08102451e-18, - 1.17855103e+02]).astype(dtype="float32")]), - ("srow_y", [np.array([ 0., 3., 0., 0.]).astype(dtype="float32"), - np.array([ -6.71471565e-19, 1.97371149e+00, -3.55528235e-01, -3.57229424e+01]).astype(dtype="float32")]), - ("srow_z", [np.array([ 0., 0., 2., 0.]).astype(dtype="float32"), - np.array([ 8.25548089e-18, 3.23207617e-01, 2.17108178e+00, - -7.24879837e+00]).astype(dtype="float32")])]) + expected_difference = OrderedDict( + [ + ('regular', [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))]), + ( + 'dim_info', + [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], + ), + ( + 'dim', + [ + np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), + np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), + ], + ), + ('datatype', [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')]), + ('bitpix', [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')]), + ( + 'pixdim', + [ + np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), + np.array( + [ + -1.00000000e00, + 2.00000000e00, + 2.00000000e00, + 2.19999909e00, + 2.00000000e03, + 1.00000000e00, + 1.00000000e00, + 1.00000000e00, + ] + ).astype(dtype='float32'), + ], + ), + ('slice_end', [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')]), + ( + 'xyzt_units', + [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], + ), + ( + 'cal_max', + [ + np.array(0.0).astype(dtype='float32'), + np.asarray(1162.0).astype(dtype='float32'), + ], + ), + ( + 'descrip', + [ + np.array(''.encode('utf-8')).astype(dtype='S80'), + np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( + dtype='S80' + ), + ], + ), + ('qform_code', [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), + ('sform_code', [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), + ( + 'quatern_b', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-1.9451068140294884e-26).astype(dtype='float32'), + ], + ), + ( + 'quatern_c', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.9967085123062134).astype(dtype='float32'), + ], + ), + ( + 'quatern_d', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.0810687392950058).astype(dtype='float32'), + ], + ), + ( + 'qoffset_x', + [ + np.array(0.0).astype(dtype='float32'), + np.array(117.8551025390625).astype(dtype='float32'), + ], + ), + ( + 'qoffset_y', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-35.72294235229492).astype(dtype='float32'), + ], + ), + ( + 'qoffset_z', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-7.248798370361328).astype(dtype='float32'), + ], + ), + ( + 'srow_x', + [ + np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), + np.array( + [-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02] + ).astype(dtype='float32'), + ], + ), + ( + 'srow_y', + [ + np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), + np.array( + [-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01] + ).astype(dtype='float32'), + ], + ), + ( + 'srow_z', + [ + np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), + np.array( + [8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00] + ).astype(dtype='float32'), + ], + ), + ] + ) np.testing.assert_equal(actual_difference, expected_difference) def test_display_diff(): - bogus_names = ["hellokitty.nii.gz", "privettovarish.nii.gz"] - - dict_values = OrderedDict([ - ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), - ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]) - ]) - - expected_output = "These files are different.\n" + "Field/File 1:hellokitty.nii.gz" \ - " " \ - "2:privettovarish.nii.gz \n" \ - "datatype " \ - "2 " \ - "4 \n" \ - "bitpix " \ - "8 16" \ - " " \ - "\n" + bogus_names = ['hellokitty.nii.gz', 'privettovarish.nii.gz'] + + dict_values = OrderedDict( + [ + ('datatype', [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')]), + ('bitpix', [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')]), + ] + ) + + expected_output = ( + 'These files are different.\n' + 'Field/File 1:hellokitty.nii.gz' + ' ' + '2:privettovarish.nii.gz \n' + 'datatype ' + '2 ' + '4 \n' + 'bitpix ' + '8 16' + ' ' + '\n' + ) assert display_diff(bogus_names, dict_values) == expected_output def test_get_data_diff(): # testing for identical files specifically as md5 may vary by computer - test_names = [pjoin(data_path, f) - for f in ('standard.nii.gz', 'standard.nii.gz')] + test_names = [pjoin(data_path, f) for f in ('standard.nii.gz', 'standard.nii.gz')] assert get_data_hash_diff(test_names) == [] # testing the maximum relative and absolute differences' different use cases @@ -123,27 +228,43 @@ def test_get_data_diff(): test_array_5 = np.arange(64).reshape(8, 8) # same shape, 2 files - assert get_data_diff([test_array, test_array_2]) == \ - OrderedDict([('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)])])]) + assert get_data_diff([test_array, test_array_2]) == OrderedDict( + [('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)])])] + ) # same shape, 3 files - assert get_data_diff([test_array, test_array_2, test_array_3]) == \ - OrderedDict([('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)]), - OrderedDict([('abs', 2), ('rel', 2.0)])]), - ('DATA(diff 2:)', [None, None, - OrderedDict([('abs', 1), ('rel', 0.66666666666666663)])])]) + assert get_data_diff([test_array, test_array_2, test_array_3]) == OrderedDict( + [ + ( + 'DATA(diff 1:)', + [ + None, + OrderedDict([('abs', 1), ('rel', 2.0)]), + OrderedDict([('abs', 2), ('rel', 2.0)]), + ], + ), + ( + 'DATA(diff 2:)', + [None, None, OrderedDict([('abs', 1), ('rel', 0.66666666666666663)])], + ), + ] + ) # same shape, 2 files, modified maximum abs/rel assert get_data_diff([test_array, test_array_2], max_abs=2, max_rel=2) == OrderedDict() # different shape, 2 files - assert get_data_diff([test_array_2, test_array_4]) == \ - OrderedDict([('DATA(diff 1:)', [None, {'CMP': 'incompat'}])]) + assert get_data_diff([test_array_2, test_array_4]) == OrderedDict( + [('DATA(diff 1:)', [None, {'CMP': 'incompat'}])] + ) # different shape, 3 files - assert get_data_diff([test_array_4, test_array_5, test_array_2]) == \ - OrderedDict([('DATA(diff 1:)', [None, {'CMP': 'incompat'}, {'CMP': 'incompat'}]), - ('DATA(diff 2:)', [None, None, {'CMP': 'incompat'}])]) + assert get_data_diff([test_array_4, test_array_5, test_array_2]) == OrderedDict( + [ + ('DATA(diff 1:)', [None, {'CMP': 'incompat'}, {'CMP': 'incompat'}]), + ('DATA(diff 2:)', [None, None, {'CMP': 'incompat'}]), + ] + ) test_return = get_data_diff([test_array, test_array_2], dtype=np.float32) assert type(test_return['DATA(diff 1:)'][1]['abs']) is np.float32 @@ -157,42 +278,139 @@ def test_get_data_diff(): def test_main(): - test_names = [pjoin(data_path, f) - for f in ('standard.nii.gz', 'example4d.nii.gz')] - expected_difference = OrderedDict([ - ("regular", [np.asarray("".encode("utf-8")), np.asarray("r".encode("utf-8"))]), - ("dim_info", [np.asarray(0).astype(dtype="uint8"), np.asarray(57).astype(dtype="uint8")]), - ("dim", [np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype="int16"), - np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype="int16")]), - ("datatype", [np.array(2).astype(dtype="uint8"), np.array(4).astype(dtype="uint8")]), - ("bitpix", [np.array(8).astype(dtype="uint8"), np.array(16).astype(dtype="uint8")]), - ("pixdim", [np.array([1., 1., 3., 2., 1., 1., 1., 1.]).astype(dtype="float32"), np.array( - [-1.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.19999909e+00, 2.00000000e+03, 1.00000000e+00, - 1.00000000e+00, 1.00000000e+00]).astype(dtype="float32")]), - ("slice_end", [np.array(0).astype(dtype="uint8"), np.array(23).astype(dtype="uint8")]), - ("xyzt_units", [np.array(0).astype(dtype="uint8"), np.array(10).astype(dtype="uint8")]), - ("cal_max", [np.array(0.0).astype(dtype="float32"), np.asarray(1162.0).astype(dtype="float32")]), - ("descrip", [np.array("".encode("utf-8")).astype(dtype="S80"), - np.array("FSL3.3\x00 v2.25 NIfTI-1 Single file format".encode("utf-8")).astype(dtype="S80")]), - ("qform_code", [np.array(0).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), - ("sform_code", [np.array(2).astype(dtype="int16"), np.array(1).astype(dtype="int16")]), - ("quatern_b", [np.array(0.0).astype(dtype="float32"), - np.array(-1.9451068140294884e-26).astype(dtype="float32")]), - ("quatern_c", [np.array(0.0).astype(dtype="float32"), np.array(-0.9967085123062134).astype(dtype="float32")]), - ("quatern_d", [np.array(0.0).astype(dtype="float32"), np.array(-0.0810687392950058).astype(dtype="float32")]), - ("qoffset_x", [np.array(0.0).astype(dtype="float32"), np.array(117.8551025390625).astype(dtype="float32")]), - ("qoffset_y", [np.array(0.0).astype(dtype="float32"), np.array(-35.72294235229492).astype(dtype="float32")]), - ("qoffset_z", [np.array(0.0).astype(dtype="float32"), np.array(-7.248798370361328).astype(dtype="float32")]), - ("srow_x", [np.array([1., 0., 0., 0.]).astype(dtype="float32"), - np.array([-2.00000000e+00, 6.71471565e-19, 9.08102451e-18, - 1.17855103e+02]).astype(dtype="float32")]), - ("srow_y", [np.array([0., 3., 0., 0.]).astype(dtype="float32"), - np.array([-6.71471565e-19, 1.97371149e+00, -3.55528235e-01, -3.57229424e+01]).astype( - dtype="float32")]), - ("srow_z", [np.array([0., 0., 2., 0.]).astype(dtype="float32"), - np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e+00, - -7.24879837e+00]).astype(dtype="float32")]), - ('DATA(md5)', ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'])]) + test_names = [pjoin(data_path, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] + expected_difference = OrderedDict( + [ + ('regular', [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))]), + ( + 'dim_info', + [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], + ), + ( + 'dim', + [ + np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), + np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), + ], + ), + ('datatype', [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')]), + ('bitpix', [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')]), + ( + 'pixdim', + [ + np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), + np.array( + [ + -1.00000000e00, + 2.00000000e00, + 2.00000000e00, + 2.19999909e00, + 2.00000000e03, + 1.00000000e00, + 1.00000000e00, + 1.00000000e00, + ] + ).astype(dtype='float32'), + ], + ), + ('slice_end', [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')]), + ( + 'xyzt_units', + [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], + ), + ( + 'cal_max', + [ + np.array(0.0).astype(dtype='float32'), + np.asarray(1162.0).astype(dtype='float32'), + ], + ), + ( + 'descrip', + [ + np.array(''.encode('utf-8')).astype(dtype='S80'), + np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( + dtype='S80' + ), + ], + ), + ('qform_code', [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), + ('sform_code', [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), + ( + 'quatern_b', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-1.9451068140294884e-26).astype(dtype='float32'), + ], + ), + ( + 'quatern_c', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.9967085123062134).astype(dtype='float32'), + ], + ), + ( + 'quatern_d', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.0810687392950058).astype(dtype='float32'), + ], + ), + ( + 'qoffset_x', + [ + np.array(0.0).astype(dtype='float32'), + np.array(117.8551025390625).astype(dtype='float32'), + ], + ), + ( + 'qoffset_y', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-35.72294235229492).astype(dtype='float32'), + ], + ), + ( + 'qoffset_z', + [ + np.array(0.0).astype(dtype='float32'), + np.array(-7.248798370361328).astype(dtype='float32'), + ], + ), + ( + 'srow_x', + [ + np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), + np.array( + [-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02] + ).astype(dtype='float32'), + ], + ), + ( + 'srow_y', + [ + np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), + np.array( + [-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01] + ).astype(dtype='float32'), + ], + ), + ( + 'srow_z', + [ + np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), + np.array( + [8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00] + ).astype(dtype='float32'), + ], + ), + ( + 'DATA(md5)', + ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'], + ), + ] + ) with pytest.raises(SystemExit): np.testing.assert_equal(main(test_names, StringIO()), expected_difference) @@ -200,4 +418,4 @@ def test_main(): test_names_2 = [pjoin(data_path, f) for f in ('standard.nii.gz', 'standard.nii.gz')] with pytest.raises(SystemExit): - assert main(test_names_2, StringIO()) == "These files are identical." + assert main(test_names_2, StringIO()) == 'These files are identical.' diff --git a/nibabel/cmdline/trk2tck.py b/nibabel/cmdline/trk2tck.py index bddb58c7b1..cc364af06d 100644 --- a/nibabel/cmdline/trk2tck.py +++ b/nibabel/cmdline/trk2tck.py @@ -9,12 +9,14 @@ def parse_args(): - DESCRIPTION = "Convert tractograms (TRK -> TCK)." + DESCRIPTION = 'Convert tractograms (TRK -> TCK).' parser = argparse.ArgumentParser(description=DESCRIPTION) - parser.add_argument("tractograms", metavar="tractogram", nargs="+", - help="list of tractograms (.trk).") - parser.add_argument("-f", "--force", action="store_true", - help="overwrite existing output files.") + parser.add_argument( + 'tractograms', metavar='tractogram', nargs='+', help='list of tractograms (.trk).' + ) + parser.add_argument( + '-f', '--force', action='store_true', help='overwrite existing output files.' + ) args = parser.parse_args() return args, parser diff --git a/nibabel/cmdline/utils.py b/nibabel/cmdline/utils.py index e6aa0a2fb5..41b10d6b31 100644 --- a/nibabel/cmdline/utils.py +++ b/nibabel/cmdline/utils.py @@ -29,8 +29,7 @@ def _err(msg=None): def verbose(thing, msg): - """Print `s` if `thing` is less than the `verbose_level` - """ + """Print `s` if `thing` is less than the `verbose_level`""" # TODO: consider using nibabel's logger if thing <= verbose_level: print(' ' * thing + msg) @@ -56,9 +55,7 @@ def table2string(table, out=None): out = StringIO() # equalize number of elements in each row - nelements_max = \ - len(table) and \ - max(len(x) for x in table) + nelements_max = len(table) and max(len(x) for x in table) for i, table_ in enumerate(table): table[i] += [''] * (nelements_max - len(table_)) @@ -67,11 +64,10 @@ def table2string(table, out=None): atable = np.asarray(table) # eat whole entry while computing width for @w (for wide) markup_strip = re.compile('^@([lrc]|w.*)') - col_width = [max([len(markup_strip.sub('', x)) - for x in column]) for column in atable.T] - string = "" + col_width = [max([len(markup_strip.sub('', x)) for x in column]) for column in atable.T] + string = '' for i, table_ in enumerate(table): - string_ = "" + string_ = '' for j, item in enumerate(table_): item = str(item) if item.startswith('@'): @@ -94,8 +90,7 @@ def table2string(table, out=None): else: raise RuntimeError(f'Should not get here with align={align}') - string_ += "%%%ds%%s%%%ds " \ - % (nspacesl, nspacesr) % ('', item, '') + string_ += '%%%ds%%s%%%ds ' % (nspacesl, nspacesr) % ('', item, '') string += string_.rstrip() + '\n' out.write(string) @@ -114,11 +109,10 @@ def ap(helplist, format_, sep=', '): def safe_get(obj, name): - """A getattr which would return '-' if getattr fails - """ + """A getattr which would return '-' if getattr fails""" try: f = getattr(obj, 'get_' + name) return f() except Exception as e: - verbose(2, f"get_{name}() failed -- {e}") + verbose(2, f'get_{name}() failed -- {e}') return '-' diff --git a/nibabel/data.py b/nibabel/data.py index f3773d3241..b29476a2d2 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -2,7 +2,6 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """ Utilities to find files from NIPY data packages - """ import os from os.path import join as pjoin @@ -14,8 +13,9 @@ from .environment import get_nipy_user_dir, get_nipy_system_dir -DEFAULT_INSTALL_HINT = ('If you have the package, have you set the ' - 'path to the package correctly?') +DEFAULT_INSTALL_HINT = ( + 'If you have the package, have you set the ' 'path to the package correctly?' +) class DataError(Exception): @@ -23,19 +23,20 @@ class DataError(Exception): class BomberError(DataError, AttributeError): - """ Error when trying to access Bomber instance + """Error when trying to access Bomber instance Should be instance of AttributeError to allow Python 3 inspect to do various ``hasattr`` checks without raising an error """ + pass class Datasource: - """ Simple class to add base path to relative path """ + """Simple class to add base path to relative path""" def __init__(self, base_path): - """ Initialize datasource + """Initialize datasource Parameters ---------- @@ -53,7 +54,7 @@ def __init__(self, base_path): self.base_path = base_path def get_filename(self, *path_parts): - """ Prepend base path to `*path_parts` + """Prepend base path to `*path_parts` We make no check whether the returned path exists. @@ -71,36 +72,34 @@ def get_filename(self, *path_parts): return pjoin(self.base_path, *path_parts) def list_files(self, relative=True): - """ Recursively list the files in the data source directory. + """Recursively list the files in the data source directory. - Parameters - ---------- - relative: bool, optional - If True, path returned are relative to the base path of - the data source. + Parameters + ---------- + relative: bool, optional + If True, path returned are relative to the base path of + the data source. - Returns - ------- - file_list: list of strings - List of the paths of all the files in the data source. + Returns + ------- + file_list: list of strings + List of the paths of all the files in the data source. """ out_list = list() for base, dirs, files in os.walk(self.base_path): if relative: - base = base[len(self.base_path) + 1:] + base = base[len(self.base_path) + 1 :] for filename in files: out_list.append(pjoin(base, filename)) return out_list class VersionedDatasource(Datasource): - """ Datasource with version information in config file - - """ + """Datasource with version information in config file""" def __init__(self, base_path, config_filename=None): - """ Initialize versioned datasource + """Initialize versioned datasource We assume that there is a configuration file with version information in datasource directory tree. @@ -136,12 +135,11 @@ def __init__(self, base_path, config_filename=None): version_parts = self.version.split('.') self.major_version = int(version_parts[0]) self.minor_version = int(version_parts[1]) - self.version_no = float('%d.%d' % (self.major_version, - self.minor_version)) + self.version_no = float('%d.%d' % (self.major_version, self.minor_version)) def _cfg_value(fname, section='DATA', value='path'): - """ Utility function to fetch value from config file """ + """Utility function to fetch value from config file""" configp = configparser.ConfigParser() readfiles = configp.read(fname) if not readfiles: @@ -153,7 +151,7 @@ def _cfg_value(fname, section='DATA', value='path'): def get_data_path(): - """ Return specified or guessed locations of NIPY data files + """Return specified or guessed locations of NIPY data files The algorithm is to return paths, extracted from strings, where strings are found in the following order: @@ -217,7 +215,7 @@ def get_data_path(): def find_data_dir(root_dirs, *names): - """ Find relative path given path prefixes to search + """Find relative path given path prefixes to search We raise a DataError if we can't find the relative path @@ -240,12 +238,14 @@ def find_data_dir(root_dirs, *names): pth = pjoin(path, ds_relative) if os.path.isdir(pth): return pth - raise DataError(f'Could not find datasource "{ds_relative}" in ' - f'data path "{os.path.pathsep.join(root_dirs)}"') + raise DataError( + f'Could not find datasource "{ds_relative}" in ' + f'data path "{os.path.pathsep.join(root_dirs)}"' + ) def make_datasource(pkg_def, **kwargs): - """ Return datasource defined by `pkg_def` as found in `data_path` + """Return datasource defined by `pkg_def` as found in `data_path` `data_path` is the only allowed keyword argument. @@ -290,8 +290,7 @@ def make_datasource(pkg_def, **kwargs): try: pth = find_data_dir(data_path, *names) except DataError as e: - pth = [pjoin(this_data_path, *names) - for this_data_path in data_path] + pth = [pjoin(this_data_path, *names) for this_data_path in data_path] pkg_hint = pkg_def.get('install hint', DEFAULT_INSTALL_HINT) msg = f'{e}; Is it possible you have not installed a data package?' if 'name' in pkg_def: @@ -303,21 +302,22 @@ def make_datasource(pkg_def, **kwargs): class Bomber: - """ Class to raise an informative error when used """ + """Class to raise an informative error when used""" def __init__(self, name, msg): self.name = name self.msg = msg def __getattr__(self, attr_name): - """ Raise informative error accessing not-found attributes """ + """Raise informative error accessing not-found attributes""" raise BomberError( f'Trying to access attribute "{attr_name}" of ' - f'non-existent data "{self.name}"\n\n{self.msg}\n') + f'non-existent data "{self.name}"\n\n{self.msg}\n' + ) def datasource_or_bomber(pkg_def, **options): - """ Return a viable datasource or a Bomber + """Return a viable datasource or a Bomber This is to allow module level creation of datasource objects. We create the objects, so that, if the data exist, and are the correct @@ -355,5 +355,5 @@ def datasource_or_bomber(pkg_def, **options): pkg_name = pkg_def['name'] else: pkg_name = 'data at ' + unix_relpath - msg = f"{pkg_name} is version {ds.version} but we need version >= {version}\n\n{pkg_hint}" + msg = f'{pkg_name} is version {ds.version} but we need version >= {version}\n\n{pkg_hint}' return Bomber(sys_relpath, DataError(msg)) diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index f1c6b663c0..f8df06157b 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -1,4 +1,4 @@ -""" File-based images that have data arrays +"""File-based images that have data arrays The class:`DataObjImage` class defines an image that extends the :class:`FileBasedImage` by adding an array-like object, named ``dataobj``. @@ -15,10 +15,10 @@ class DataobjImage(FileBasedImage): - """ Template class for images that have dataobj data stores""" + """Template class for images that have dataobj data stores""" def __init__(self, dataobj, header=None, extra=None, file_map=None): - """ Initialize dataobj image + """Initialize dataobj image The datobj image is a combination of (dataobj, header), with optional metadata in `extra`, and filename / file-like objects contained in the @@ -38,8 +38,7 @@ def __init__(self, dataobj, header=None, extra=None, file_map=None): file_map : mapping, optional mapping giving file information for this image format """ - super(DataobjImage, self).__init__(header=header, extra=extra, - file_map=file_map) + super(DataobjImage, self).__init__(header=header, extra=extra, file_map=file_map) self._dataobj = dataobj self._fdata_cache = None self._data_cache = None @@ -48,13 +47,16 @@ def __init__(self, dataobj, header=None, extra=None, file_map=None): def dataobj(self): return self._dataobj - @deprecate_with_version('get_data() is deprecated in favor of get_fdata(),' - ' which has a more predictable return type. To ' - 'obtain get_data() behavior going forward, use ' - 'numpy.asanyarray(img.dataobj).', - '3.0', '5.0') + @deprecate_with_version( + 'get_data() is deprecated in favor of get_fdata(),' + ' which has a more predictable return type. To ' + 'obtain get_data() behavior going forward, use ' + 'numpy.asanyarray(img.dataobj).', + '3.0', + '5.0', + ) def get_data(self, caching='fill'): - """ Return image data from image with any necessary scaling applied + """Return image data from image with any necessary scaling applied .. WARNING:: @@ -203,7 +205,7 @@ def get_data(self, caching='fill'): return data def get_fdata(self, caching='fill', dtype=np.float64): - """ Return floating point image data with necessary scaling applied + """Return floating point image data with necessary scaling applied The image ``dataobj`` property can be an array proxy or an array. An array proxy is an object that knows how to load the image data from @@ -352,17 +354,19 @@ def get_fdata(self, caching='fill', dtype=np.float64): @property def in_memory(self): - """ True when any array data is in memory cache + """True when any array data is in memory cache There are separate caches for `get_data` reads and `get_fdata` reads. This property is True if either of those caches are set. """ - return (isinstance(self._dataobj, np.ndarray) or - self._fdata_cache is not None or - self._data_cache is not None) + return ( + isinstance(self._dataobj, np.ndarray) + or self._fdata_cache is not None + or self._data_cache is not None + ) def uncache(self): - """ Delete any cached read of data from proxied data + """Delete any cached read of data from proxied data Remember there are two types of images: @@ -399,7 +403,7 @@ def ndim(self): @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - """ Class method to create image from mapping in ``file_map`` + """Class method to create image from mapping in ``file_map`` Parameters ---------- @@ -461,7 +465,6 @@ def from_filename(klass, filename, *, mmap=True, keep_file_open=None): if mmap not in (True, False, 'c', 'r'): raise ValueError("mmap should be one of {True, False, 'c', 'r'}") file_map = klass.filespec_to_file_map(filename) - return klass.from_file_map(file_map, mmap=mmap, - keep_file_open=keep_file_open) + return klass.from_file_map(file_map, mmap=mmap, keep_file_open=keep_file_open) load = from_filename diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index 576d18b5ce..900c0fcf4d 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -1,4 +1,4 @@ -""" Module to help with deprecating objects and classes +"""Module to help with deprecating objects and classes """ import warnings @@ -8,7 +8,7 @@ class ModuleProxy: - """ Proxy for module that may not yet have been imported + """Proxy for module that may not yet have been imported Parameters ---------- @@ -36,11 +36,11 @@ def __getattr__(self, key): return getattr(mod, key) def __repr__(self): - return f"" + return f'' class FutureWarningMixin: - """ Insert FutureWarning for object creation + """Insert FutureWarning for object creation Examples -------- @@ -55,17 +55,16 @@ class FutureWarningMixin: ... warns[0].message.args[0] "Please, don't use this class" """ + warn_message = 'This class will be removed in future versions' def __init__(self, *args, **kwargs): - warnings.warn(self.warn_message, - FutureWarning, - stacklevel=2) + warnings.warn(self.warn_message, FutureWarning, stacklevel=2) super(FutureWarningMixin, self).__init__(*args, **kwargs) class VisibleDeprecationWarning(UserWarning): - """ Deprecation warning that will be shown by default + """Deprecation warning that will be shown by default Python >= 2.7 does not show standard DeprecationWarnings by default: @@ -73,6 +72,7 @@ class VisibleDeprecationWarning(UserWarning): Use this class for cases where we do want to show deprecations by default. """ + pass diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 031a05e601..7b4ef5221f 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -1,4 +1,4 @@ -""" Class for recording and reporting deprecations +"""Class for recording and reporting deprecations """ import functools @@ -29,16 +29,17 @@ class ExpiredDeprecationError(RuntimeError): - """ Error for expired deprecation + """Error for expired deprecation Error raised when a called function or method has passed out of its deprecation period. """ + pass def _ensure_cr(text): - """ Remove trailing whitespace and add carriage return + """Remove trailing whitespace and add carriage return Ensures that `text` always ends with a carriage return """ @@ -46,7 +47,7 @@ def _ensure_cr(text): def _add_dep_doc(old_doc, dep_doc, setup='', cleanup=''): - """ Add deprecation message `dep_doc` to docstring in `old_doc` + """Add deprecation message `dep_doc` to docstring in `old_doc` Parameters ---------- @@ -79,12 +80,13 @@ def _add_dep_doc(old_doc, dep_doc, setup='', cleanup=''): setup_lines = [indent + L for L in setup.splitlines()] dep_lines = [indent + L for L in [''] + dep_doc.splitlines() + ['']] cleanup_lines = [indent + L for L in cleanup.splitlines()] - return '\n'.join(new_lines + dep_lines + setup_lines + - old_lines[next_line:] + cleanup_lines + ['']) + return '\n'.join( + new_lines + dep_lines + setup_lines + old_lines[next_line:] + cleanup_lines + [''] + ) class Deprecator: - """ Class to make decorator marking function or method as deprecated + """Class to make decorator marking function or method as deprecated The decorated function / method will: @@ -109,16 +111,18 @@ class Deprecator: given argument of ``until`` in the ``__call__`` method (see below). """ - def __init__(self, - version_comparator, - warn_class=DeprecationWarning, - error_class=ExpiredDeprecationError): + def __init__( + self, + version_comparator, + warn_class=DeprecationWarning, + error_class=ExpiredDeprecationError, + ): self.version_comparator = version_comparator self.warn_class = warn_class self.error_class = error_class def is_bad_version(self, version_str): - """ Return True if `version_str` is too high + """Return True if `version_str` is too high Tests `version_str` with ``self.version_comparator`` @@ -135,9 +139,8 @@ def is_bad_version(self, version_str): """ return self.version_comparator(version_str) == -1 - def __call__(self, message, since='', until='', - warn_class=None, error_class=None): - """ Return decorator function function for deprecation warning / error + def __call__(self, message, since='', until='', warn_class=None, error_class=None): + """Return decorator function function for deprecation warning / error Parameters ---------- @@ -169,12 +172,13 @@ def __call__(self, message, since='', until='', if since: messages.append('* deprecated from version: ' + since) if until: - messages.append(f"* {'Raises' if self.is_bad_version(until) else 'Will raise'} " - f"{error_class} as of version: {until}") + messages.append( + f"* {'Raises' if self.is_bad_version(until) else 'Will raise'} " + f'{error_class} as of version: {until}' + ) message = '\n'.join(messages) def deprecator(func): - @functools.wraps(func) def deprecated_func(*args, **kwargs): if until and self.is_bad_version(until): diff --git a/nibabel/dft.py b/nibabel/dft.py index 51b6424a84..fd944a2556 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -7,7 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # Copyright (C) 2011 Christian Haselgrove -""" DICOM filesystem tools +"""DICOM filesystem tools """ @@ -27,26 +27,26 @@ from .nifti1 import Nifti1Header from nibabel.optpkg import optional_package -pydicom = optional_package("pydicom")[0] +pydicom = optional_package('pydicom')[0] logger = logging.getLogger('nibabel.dft') class DFTError(Exception): - "base class for DFT exceptions" + """base class for DFT exceptions""" class CachingError(DFTError): - "error while caching" + """error while caching""" class VolumeError(DFTError): - "unsupported volume parameter" + """unsupported volume parameter""" class InstanceStackError(DFTError): - "bad series of instance numbers" + """bad series of instance numbers""" def __init__(self, series, i, si): self.series = series @@ -59,7 +59,6 @@ def __str__(self): class _Study: - def __init__(self, d): self.uid = d['uid'] self.date = d['date'] @@ -76,7 +75,7 @@ def __getattribute__(self, name): if name == 'series' and val is None: val = [] with DB.readonly_cursor() as c: - c.execute("SELECT * FROM series WHERE study = ?", (self.uid, )) + c.execute('SELECT * FROM series WHERE study = ?', (self.uid,)) cols = [el[0] for el in c.description] for row in c: d = dict(zip(cols, row)) @@ -91,7 +90,6 @@ def patient_name_or_uid(self): class _Series: - def __init__(self, d): self.uid = d['uid'] self.study = d['study'] @@ -112,7 +110,7 @@ def __getattribute__(self, name): FROM storage_instance WHERE series = ? ORDER BY instance_number""" - c.execute(query, (self.uid, )) + c.execute(query, (self.uid,)) cols = [el[0] for el in c.description] for row in c: d = dict(zip(cols, row)) @@ -122,6 +120,7 @@ def __getattribute__(self, name): def as_png(self, index=None, scale_to_slice=True): import PIL.Image + # For compatibility with older versions of PIL that did not # have `frombytes`: if hasattr(PIL.Image, 'frombytes'): @@ -160,8 +159,9 @@ def as_nifti(self): raise VolumeError('unsupported bits allocated') if self.bits_stored != 12: raise VolumeError('unsupported bits stored') - data = numpy.ndarray((len(self.storage_instances), self.rows, - self.columns), dtype=numpy.int16) + data = numpy.ndarray( + (len(self.storage_instances), self.rows, self.columns), dtype=numpy.int16 + ) for (i, si) in enumerate(self.storage_instances): if i + 1 != si.instance_number: raise InstanceStackError(self, i, si) @@ -192,10 +192,12 @@ def as_nifti(self): cosk = pos_n - pos_1 cosk = cosk / numpy.linalg.norm(cosk) - m = ((pdi * cosi[0], pdj * cosj[0], pdk * cosk[0], pos_1[0]), - (pdi * cosi[1], pdj * cosj[1], pdk * cosk[1], pos_1[1]), - (pdi * cosi[2], pdj * cosj[2], pdk * cosk[2], pos_1[2]), - (0, 0, 0, 1)) + m = ( + (pdi * cosi[0], pdj * cosj[0], pdk * cosk[0], pos_1[0]), + (pdi * cosi[1], pdj * cosj[1], pdk * cosk[1], pos_1[1]), + (pdi * cosi[2], pdj * cosj[2], pdk * cosk[2], pos_1[2]), + (0, 0, 0, 1), + ) # Values are python Decimals in pydicom 0.9.7 m = numpy.array(m, dtype=float) @@ -205,8 +207,7 @@ def as_nifti(self): hdr.set_qform(m, 1) hdr.set_xyzt_units(2, 8) hdr.set_data_dtype(numpy.int16) - hdr.set_data_shape((self.columns, self.rows, - len(self.storage_instances))) + hdr.set_data_shape((self.columns, self.rows, len(self.storage_instances))) s = BytesIO() hdr.write_to(s) @@ -218,7 +219,6 @@ def nifti_size(self): class _StorageInstance: - def __init__(self, d): self.uid = d['uid'] self.instance_number = d['instance_number'] @@ -233,7 +233,7 @@ def __getattribute__(self, name): FROM file WHERE storage_instance = ? ORDER BY directory, name""" - c.execute(query, (self.uid, )) + c.execute(query, (self.uid,)) val = ['%s/%s' % tuple(row) for row in c] self.files = val return val @@ -262,25 +262,24 @@ def update_cache(base_dir, followlinks=False): os.stat(d) mtimes[d] = os.stat(d).st_mtime with DB.readwrite_cursor() as c: - c.execute("SELECT path, mtime FROM directory") + c.execute('SELECT path, mtime FROM directory') db_mtimes = dict(c) - c.execute("SELECT uid FROM study") + c.execute('SELECT uid FROM study') studies = [row[0] for row in c] - c.execute("SELECT uid FROM series") + c.execute('SELECT uid FROM series') series = [row[0] for row in c] - c.execute("SELECT uid FROM storage_instance") + c.execute('SELECT uid FROM storage_instance') storage_instances = [row[0] for row in c] for dir in sorted(mtimes.keys()): if dir in db_mtimes and mtimes[dir] <= db_mtimes[dir]: continue logger.debug(f'updating {dir}') - _update_dir(c, dir, files_by_dir[dir], studies, series, - storage_instances) + _update_dir(c, dir, files_by_dir[dir], studies, series, storage_instances) if dir in db_mtimes: - query = "UPDATE directory SET mtime = ? WHERE path = ?" + query = 'UPDATE directory SET mtime = ? WHERE path = ?' c.execute(query, (mtimes[dir], dir)) else: - query = "INSERT INTO directory (path, mtime) VALUES (?, ?)" + query = 'INSERT INTO directory (path, mtime) VALUES (?, ?)' c.execute(query, (dir, mtimes[dir])) @@ -289,7 +288,7 @@ def get_studies(base_dir=None, followlinks=False): update_cache(base_dir, followlinks) if base_dir is None: with DB.readonly_cursor() as c: - c.execute("SELECT * FROM study") + c.execute('SELECT * FROM study') studies = [] cols = [el[0] for el in c.description] for row in c: @@ -306,12 +305,12 @@ def get_studies(base_dir=None, followlinks=False): with DB.readonly_cursor() as c: study_uids = {} for dir in _get_subdirs(base_dir, followlinks=followlinks): - c.execute(query, (dir, )) + c.execute(query, (dir,)) for row in c: study_uids[row[0]] = None studies = [] for uid in study_uids: - c.execute("SELECT * FROM study WHERE uid = ?", (uid, )) + c.execute('SELECT * FROM study WHERE uid = ?', (uid,)) cols = [el[0] for el in c.description] d = dict(zip(cols, c.fetchone())) studies.append(_Study(d)) @@ -320,21 +319,19 @@ def get_studies(base_dir=None, followlinks=False): def _update_dir(c, dir, files, studies, series, storage_instances): logger.debug(f'Updating directory {dir}') - c.execute("SELECT name, mtime FROM file WHERE directory = ?", (dir, )) + c.execute('SELECT name, mtime FROM file WHERE directory = ?', (dir,)) db_mtimes = dict(c) for fname in db_mtimes: if fname not in files: logger.debug(f' remove {fname}') - c.execute("DELETE FROM file WHERE directory = ? AND name = ?", - (dir, fname)) + c.execute('DELETE FROM file WHERE directory = ? AND name = ?', (dir, fname)) for fname in files: mtime = os.lstat(f'{dir}/{fname}').st_mtime if fname in db_mtimes and mtime <= db_mtimes[fname]: logger.debug(f' okay {fname}') else: logger.debug(f' update {fname}') - si_uid = _update_file(c, dir, fname, studies, series, - storage_instances) + si_uid = _update_file(c, dir, fname, studies, series, storage_instances) if fname not in db_mtimes: query = """INSERT INTO file (directory, name, @@ -371,14 +368,16 @@ def _update_file(c, path, fname, studies, series, storage_instances): patient_birth_date, patient_sex) VALUES (?, ?, ?, ?, ?, ?, ?, ?)""" - params = (str(do.StudyInstanceUID), - do.StudyDate, - do.StudyTime, - study_comments, - str(do.PatientName), - do.PatientID, - do.PatientBirthDate, - do.PatientSex) + params = ( + str(do.StudyInstanceUID), + do.StudyDate, + do.StudyTime, + study_comments, + str(do.PatientName), + do.PatientID, + do.PatientBirthDate, + do.PatientSex, + ) c.execute(query, params) studies.append(str(do.StudyInstanceUID)) if str(do.SeriesInstanceUID) not in series: @@ -391,21 +390,22 @@ def _update_file(c, path, fname, studies, series, storage_instances): bits_allocated, bits_stored) VALUES (?, ?, ?, ?, ?, ?, ?, ?)""" - params = (str(do.SeriesInstanceUID), - str(do.StudyInstanceUID), - do.SeriesNumber, - do.SeriesDescription, - do.Rows, - do.Columns, - do.BitsAllocated, - do.BitsStored) + params = ( + str(do.SeriesInstanceUID), + str(do.StudyInstanceUID), + do.SeriesNumber, + do.SeriesDescription, + do.Rows, + do.Columns, + do.BitsAllocated, + do.BitsStored, + ) c.execute(query, params) series.append(str(do.SeriesInstanceUID)) if str(do.SOPInstanceUID) not in storage_instances: query = """INSERT INTO storage_instance (uid, instance_number, series) VALUES (?, ?, ?)""" - params = (str(do.SOPInstanceUID), do.InstanceNumber, - str(do.SeriesInstanceUID)) + params = (str(do.SOPInstanceUID), do.InstanceNumber, str(do.SeriesInstanceUID)) c.execute(query, params) storage_instances.append(str(do.SOPInstanceUID)) except AttributeError as data: @@ -416,11 +416,11 @@ def _update_file(c, path, fname, studies, series, storage_instances): def clear_cache(): with DB.readwrite_cursor() as c: - c.execute("DELETE FROM file") - c.execute("DELETE FROM directory") - c.execute("DELETE FROM storage_instance") - c.execute("DELETE FROM series") - c.execute("DELETE FROM study") + c.execute('DELETE FROM file') + c.execute('DELETE FROM directory') + c.execute('DELETE FROM storage_instance') + c.execute('DELETE FROM series') + c.execute('DELETE FROM study') CREATE_QUERIES = ( @@ -449,7 +449,8 @@ def clear_cache(): name TEXT NOT NULL, mtime INTEGER NOT NULL, storage_instance TEXT DEFAULT NULL REFERENCES storage_instance, - PRIMARY KEY (directory, name))""") + PRIMARY KEY (directory, name))""", +) class _DB: @@ -473,7 +474,7 @@ def _init_db(self): if self.verbose: logger.info('db filename: ' + self.fname) - self._session = sqlite3.connect(self.fname, isolation_level="EXCLUSIVE") + self._session = sqlite3.connect(self.fname, isolation_level='EXCLUSIVE') with self.readwrite_cursor() as c: c.execute("SELECT COUNT(*) FROM sqlite_master WHERE type = 'table'") if c.fetchone()[0] == 0: @@ -482,7 +483,7 @@ def _init_db(self): c.execute(q) def __repr__(self): - return f"" + return f'' @contextlib.contextmanager def readonly_cursor(self): diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 54f600f147..f72a81d5a4 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read ECAT format images +"""Read ECAT format images An ECAT format image consists of: @@ -48,8 +48,7 @@ import numpy as np -from .volumeutils import (native_code, swapped_code, make_dt_codes, - array_from_file) +from .volumeutils import native_code, swapped_code, make_dt_codes, array_from_file from .spatialimages import SpatialImage from .arraywriters import make_array_writer from .wrapstruct import WrapStruct @@ -117,7 +116,7 @@ ('well_counter_corr_factor', np.float32), ('data_units', '32S'), ('septa_state', np.uint16), - ('fill', '12S') + ('fill', '12S'), ] hdr_dtype = np.dtype(main_header_dtd) @@ -183,7 +182,8 @@ ('recon_type', np.uint16), ('recon_views', np.uint16), ('fill', '174S'), - ('fill2', '96S')] + ('fill2', '96S'), +] subhdr_dtype = np.dtype(subheader_dtd) # Ecat Data Types @@ -199,7 +199,8 @@ (4, 'ECAT7_VAXR4', np.float32), (5, 'ECAT7_IEEER4', np.float32), (6, 'ECAT7_SUNI2', np.int16), - (7, 'ECAT7_SUNI4', np.int32)) + (7, 'ECAT7_SUNI4', np.int32), +) data_type_codes = make_dt_codes(_dtdefs) @@ -219,7 +220,8 @@ (11, 'ECAT7_3DSCAN'), (12, 'ECAT7_3DSCAN8'), (13, 'ECAT7_3DNORM'), - (14, 'ECAT7_3DSCANFIT')) + (14, 'ECAT7_3DSCANFIT'), +) file_type_codes = dict(ft_defs) patient_orient_defs = ( # code, description @@ -231,7 +233,8 @@ (5, 'ECAT7_Head_First_Decubitus_Right'), (6, 'ECAT7_Feet_First_Decubitus_Left'), (7, 'ECAT7_Head_First_Decubitus_Left'), - (8, 'ECAT7_Unknown_Orientation')) + (8, 'ECAT7_Unknown_Orientation'), +) patient_orient_codes = dict(patient_orient_defs) # Indexes from the patient_orient_defs structure defined above for the @@ -255,14 +258,12 @@ class EcatHeader(WrapStruct): This just reads the main Ecat Header, it does not load the data or read the mlist or any sub headers """ + template_dtype = hdr_dtype _ft_codes = file_type_codes _patient_orient_codes = patient_orient_codes - def __init__(self, - binaryblock=None, - endianness=None, - check=True): + def __init__(self, binaryblock=None, endianness=None, check=True): """Initialize Ecat header from bytes object Parameters @@ -281,8 +282,7 @@ def __init__(self, @classmethod def guessed_endian(klass, hdr): - """Guess endian from MAGIC NUMBER value of header data - """ + """Guess endian from MAGIC NUMBER value of header data""" if not hdr['sw_version'] == 74: return swapped_code else: @@ -290,8 +290,7 @@ def guessed_endian(klass, hdr): @classmethod def default_structarr(klass, endianness=None): - """ Return header data for empty header with given endianness - """ + """Return header data for empty header with given endianness""" hdr_data = super(EcatHeader, klass).default_structarr(endianness) hdr_data['magic_number'] = 'MATRIX72' hdr_data['sw_version'] = 74 @@ -301,11 +300,11 @@ def default_structarr(klass, endianness=None): return hdr_data def get_data_dtype(self): - """ Get numpy dtype for data from header""" - raise NotImplementedError("dtype is only valid from subheaders") + """Get numpy dtype for data from header""" + raise NotImplementedError('dtype is only valid from subheaders') def get_patient_orient(self): - """ gets orientation of patient based on code stored + """gets orientation of patient based on code stored in header, not always reliable """ code = self._structarr['patient_orientation'].item() @@ -314,7 +313,7 @@ def get_patient_orient(self): return self._patient_orient_codes[code] def get_filetype(self): - """ Type of ECAT Matrix File from code stored in header""" + """Type of ECAT Matrix File from code stored in header""" code = self._structarr['file_type'].item() if code not in self._ft_codes: raise KeyError('Ecat Filetype CODE %d not recognized' % code) @@ -322,12 +321,12 @@ def get_filetype(self): @classmethod def _get_checks(klass): - """ Return sequence of check functions for this class """ + """Return sequence of check functions for this class""" return () def read_mlist(fileobj, endianness): - """ read (nframes, 4) matrix list array from `fileobj` + """read (nframes, 4) matrix list array from `fileobj` Parameters ---------- @@ -387,7 +386,7 @@ def read_mlist(fileobj, endianness): mlist = [] return mlist # Use all but first housekeeping row - mlists.append(rows[1:n_rows + 1]) + mlists.append(rows[1 : n_rows + 1]) mlist_index += n_rows if mlist_block_no <= 2: # should block_no in (1, 2) be an error? break @@ -424,9 +423,13 @@ def get_frame_order(mlist): valid_order = np.argsort(ids) if not all(valid_order == sorted(valid_order)): # raise UserWarning if Frames stored out of order - warnings.warn_explicit(f'Frames stored out of order; true order = {valid_order}\n' - 'frames will be accessed in order STORED, NOT true order', - UserWarning, 'ecat', 0) + warnings.warn_explicit( + f'Frames stored out of order; true order = {valid_order}\n' + 'frames will be accessed in order STORED, NOT true order', + UserWarning, + 'ecat', + 0, + ) id_dict = {} for i in range(n_valid): id_dict[i] = [valid_order[i], ids[valid_order[i]]] @@ -434,7 +437,7 @@ def get_frame_order(mlist): def get_series_framenumbers(mlist): - """ Returns framenumber of data as it was collected, + """Returns framenumber of data as it was collected, as part of a series; not just the order of how it was stored in this or across other files @@ -475,7 +478,7 @@ def get_series_framenumbers(mlist): def read_subheaders(fileobj, mlist, endianness): - """ Retrieve all subheaders and return list of subheader recarrays + """Retrieve all subheaders and return list of subheader recarrays Parameters ---------- @@ -535,7 +538,7 @@ def __init__(self, hdr, mlist, fileobj): self.subheaders = read_subheaders(fileobj, mlist, hdr.endianness) def get_shape(self, frame=0): - """ returns shape of given frame""" + """returns shape of given frame""" subhdr = self.subheaders[frame] x = subhdr['x_dimension'].item() y = subhdr['y_dimension'].item() @@ -574,8 +577,7 @@ def get_frame_affine(self, frame=0): # get translations from center of image origin_offset = (np.array(dims) - 1) / 2.0 aff = np.diag(zooms) - aff[:3, -1] = -origin_offset * zooms[:-1] + np.array([x_off, y_off, - z_off]) + aff[:3, -1] = -origin_offset * zooms[:-1] + np.array([x_off, y_off, z_off]) return aff def get_zooms(self, frame=0): @@ -659,7 +661,7 @@ def data_from_fileobj(self, frame=0, orientation=None): class EcatImageArrayProxy: - """ Ecat implementation of array proxy protocol + """Ecat implementation of array proxy protocol The array proxy allows us to freeze the passed fileobj and header such that it returns the expected data array. @@ -685,7 +687,7 @@ def is_proxy(self): return True def __array__(self, dtype=None): - """ Read of data from file + """Read of data from file This reads ALL FRAMES into one array, can be memory expensive. @@ -706,15 +708,13 @@ def __array__(self, dtype=None): data = np.empty(self.shape) frame_mapping = get_frame_order(self._subheader._mlist) for i in sorted(frame_mapping): - data[:, :, :, i] = self._subheader.data_from_fileobj( - frame_mapping[i][0]) + data[:, :, :, i] = self._subheader.data_from_fileobj(frame_mapping[i][0]) if dtype is not None: data = data.astype(dtype, copy=False) return data def __getitem__(self, sliceobj): - """ Return slice `sliceobj` from ECAT data, optimizing if possible - """ + """Return slice `sliceobj` from ECAT data, optimizing if possible""" sliceobj = canonical_slicers(sliceobj, self.shape) # Indices into sliceobj referring to image axes ax_inds = [i for i, obj in enumerate(sliceobj) if obj is not None] @@ -724,7 +724,7 @@ def __getitem__(self, sliceobj): slice3 = sliceobj[ax_inds[3]] # We will load volume by volume. Make slicer into volume by dropping # index over the volume axis - in_slicer = sliceobj[:ax_inds[3]] + sliceobj[ax_inds[3] + 1:] + in_slicer = sliceobj[: ax_inds[3]] + sliceobj[ax_inds[3] + 1 :] # int index for 4th axis, load one slice if isinstance(slice3, Integral): data = self._subheader.data_from_fileobj(frame_mapping[slice3][0]) @@ -738,16 +738,15 @@ def __getitem__(self, sliceobj): in2out_ind = slice2outax(len(self.shape), sliceobj)[3] # Iterate over specified 4th axis indices for i in list(range(self.shape[3]))[slice3]: - data = self._subheader.data_from_fileobj( - frame_mapping[i][0]) + data = self._subheader.data_from_fileobj(frame_mapping[i][0]) out_slicer[in2out_ind] = i out_data[tuple(out_slicer)] = data[in_slicer] return out_data class EcatImage(SpatialImage): - """ Class returns a list of Ecat images, with one image(hdr/data) per frame - """ + """Class returns a list of Ecat images, with one image(hdr/data) per frame""" + _header = EcatHeader header_class = _header valid_exts = ('.v',) @@ -756,10 +755,8 @@ class EcatImage(SpatialImage): ImageArrayProxy = EcatImageArrayProxy - def __init__(self, dataobj, affine, header, - subheader, mlist, - extra=None, file_map=None): - """ Initialize Image + def __init__(self, dataobj, affine, header, subheader, mlist, extra=None, file_map=None): + """Initialize Image The image is a combination of (array, affine matrix, header, subheader, mlist) @@ -824,8 +821,9 @@ def __init__(self, dataobj, affine, header, @property def affine(self): if not self._subheader._check_affines(): - warnings.warn('Affines different across frames, loading affine ' - 'from FIRST frame', UserWarning) + warnings.warn( + 'Affines different across frames, loading affine ' 'from FIRST frame', UserWarning + ) return self._affine def get_frame_affine(self, frame): @@ -854,8 +852,7 @@ def shape(self): return (x, y, z, nframes) def get_mlist(self): - """ get access to the mlist - """ + """get access to the mlist""" return self._mlist def get_subheaders(self): @@ -864,7 +861,7 @@ def get_subheaders(self): @staticmethod def _get_fileholders(file_map): - """ returns files specific to header and image of the image + """returns files specific to header and image of the image for ecat .v this is the same image file Returns @@ -887,7 +884,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): # LOAD MLIST mlist = np.zeros((header['num_frames'], 4), dtype=np.int32) mlist_data = read_mlist(hdr_fid, hdr_copy.endianness) - mlist[:len(mlist_data)] = mlist_data + mlist[: len(mlist_data)] = mlist_data # LOAD SUBHEADERS subheaders = klass._subheader(hdr_copy, mlist, hdr_fid) # LOAD DATA @@ -895,11 +892,11 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): data = klass.ImageArrayProxy(subheaders) # Get affine if not subheaders._check_affines(): - warnings.warn('Affines different across frames, loading affine ' - 'from FIRST frame', UserWarning) + warnings.warn( + 'Affines different across frames, loading affine ' 'from FIRST frame', UserWarning + ) aff = subheaders.get_frame_affine() - img = klass(data, aff, header, subheaders, mlist, - extra=None, file_map=file_map) + img = klass(data, aff, header, subheaders, mlist, extra=None, file_map=file_map) return img def _get_empty_dir(self): @@ -925,11 +922,10 @@ def _write_data(self, data, stream, pos, dtype=None, endianness=None): endianness = native_code stream.seek(pos) - make_array_writer(data.newbyteorder(endianness), - dtype).to_fileobj(stream) + make_array_writer(data.newbyteorder(endianness), dtype).to_fileobj(stream) def to_file_map(self, file_map=None): - """ Write ECAT7 image to `file_map` or contained ``self.file_map`` + """Write ECAT7 image to `file_map` or contained ``self.file_map`` The format consist of: @@ -1014,8 +1010,7 @@ def to_file_map(self, file_map=None): @classmethod def from_image(klass, img): - raise NotImplementedError("Ecat images can only be generated " - "from file objects") + raise NotImplementedError('Ecat images can only be generated ' 'from file objects') @classmethod def load(klass, filespec): diff --git a/nibabel/environment.py b/nibabel/environment.py index 768b4de34b..6f331eed5a 100644 --- a/nibabel/environment.py +++ b/nibabel/environment.py @@ -66,7 +66,7 @@ def get_nipy_user_dir(): def get_nipy_system_dir(): - r""" Get systemwide NIPY configuration file directory + r"""Get systemwide NIPY configuration file directory On posix systems this will be ``/etc/nipy``. On Windows, the directory is less useful, but by default it will be diff --git a/nibabel/eulerangles.py b/nibabel/eulerangles.py index 11a10bbe2b..bb75b54b1e 100644 --- a/nibabel/eulerangles.py +++ b/nibabel/eulerangles.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Module implementing Euler angle rotations and their conversions +"""Module implementing Euler angle rotations and their conversions See: @@ -94,7 +94,7 @@ def euler2mat(z=0, y=0, x=0): - """ Return matrix for rotations around z, y and x axes + """Return matrix for rotations around z, y and x axes Uses the z, then y, then x convention above @@ -170,28 +170,22 @@ def euler2mat(z=0, y=0, x=0): if z: cosz = math.cos(z) sinz = math.sin(z) - Ms.append(np.array([[cosz, -sinz, 0], - [sinz, cosz, 0], - [0, 0, 1]])) + Ms.append(np.array([[cosz, -sinz, 0], [sinz, cosz, 0], [0, 0, 1]])) if y: cosy = math.cos(y) siny = math.sin(y) - Ms.append(np.array([[cosy, 0, siny], - [0, 1, 0], - [-siny, 0, cosy]])) + Ms.append(np.array([[cosy, 0, siny], [0, 1, 0], [-siny, 0, cosy]])) if x: cosx = math.cos(x) sinx = math.sin(x) - Ms.append(np.array([[1, 0, 0], - [0, cosx, -sinx], - [0, sinx, cosx]])) + Ms.append(np.array([[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]])) if Ms: return reduce(np.dot, Ms[::-1]) return np.eye(3) def mat2euler(M, cy_thresh=None): - """ Discover Euler angle vector from 3x3 matrix + """Discover Euler angle vector from 3x3 matrix Uses the conventions above. @@ -264,7 +258,7 @@ def mat2euler(M, cy_thresh=None): def euler2quat(z=0, y=0, x=0): - """ Return quaternion corresponding to these Euler angles + """Return quaternion corresponding to these Euler angles Uses the z, then y, then x convention above @@ -304,14 +298,18 @@ def euler2quat(z=0, y=0, x=0): sy = math.sin(y) cx = math.cos(x) sx = math.sin(x) - return np.array([cx * cy * cz - sx * sy * sz, - cx * sy * sz + cy * cz * sx, - cx * cz * sy - sx * cy * sz, - cx * cy * sz + sx * cz * sy]) + return np.array( + [ + cx * cy * cz - sx * sy * sz, + cx * sy * sz + cy * cz * sx, + cx * cz * sy - sx * cy * sz, + cx * cy * sz + sx * cz * sy, + ] + ) def quat2euler(q): - """ Return Euler angles corresponding to quaternion `q` + """Return Euler angles corresponding to quaternion `q` Parameters ---------- @@ -336,11 +334,12 @@ def quat2euler(q): """ # delayed import to avoid cyclic dependencies from . import quaternions as nq + return mat2euler(nq.quat2mat(q)) def euler2angle_axis(z=0, y=0, x=0): - """ Return angle, axis corresponding to these Euler angles + """Return angle, axis corresponding to these Euler angles Uses the z, then y, then x convention above @@ -370,11 +369,12 @@ def euler2angle_axis(z=0, y=0, x=0): """ # delayed import to avoid cyclic dependencies from . import quaternions as nq + return nq.quat2angle_axis(euler2quat(z, y, x)) def angle_axis2euler(theta, vector, is_normalized=False): - """ Convert angle, axis pair to Euler angles + """Convert angle, axis pair to Euler angles Parameters ---------- @@ -408,5 +408,6 @@ def angle_axis2euler(theta, vector, is_normalized=False): """ # delayed import to avoid cyclic dependencies from . import quaternions as nq + M = nq.angle_axis2mat(theta, vector, is_normalized) return mat2euler(M) diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index 17ac3e8180..f74c7b56eb 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -6,14 +6,13 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Common interface for any image format--volume or surface, binary or xml.""" +"""Common interface for any image format--volume or surface, binary or xml.""" import io from copy import deepcopy from urllib import request from .fileholders import FileHolder -from .filename_parser import (types_filenames, TypesFilenamesError, - splitext_addext) +from .filename_parser import types_filenames, TypesFilenamesError, splitext_addext from .openers import ImageOpener @@ -22,7 +21,7 @@ class ImageFileError(Exception): class FileBasedHeader: - """ Template class to implement header protocol """ + """Template class to implement header protocol""" @classmethod def from_header(klass, header=None): @@ -34,8 +33,9 @@ def from_header(klass, header=None): # different field names if type(header) == klass: return header.copy() - raise NotImplementedError("Header class requires a conversion " - f"from {klass} to {type(header)}") + raise NotImplementedError( + 'Header class requires a conversion ' f'from {klass} to {type(header)}' + ) @classmethod def from_fileobj(klass, fileobj): @@ -51,7 +51,7 @@ def __ne__(self, other): return not self == other def copy(self): - """ Copy object to independent representation + """Copy object to independent representation The copy should not be affected by any changes to the original object. @@ -162,6 +162,7 @@ class FileBasedImage: data. The ``file_map`` contents should therefore be such, that this will work. """ + header_class = FileBasedHeader _meta_sniff_len = 0 files_types = (('image', None),) @@ -172,7 +173,7 @@ class FileBasedImage: rw = True # Used in test code def __init__(self, header=None, extra=None, file_map=None): - """ Initialize image + """Initialize image The image is a combination of (header), with optional metadata in `extra`, and filename / file-like objects @@ -202,12 +203,11 @@ def header(self): return self._header def __getitem__(self, key): - """ No slicing or dictionary interface for images - """ - raise TypeError("Cannot slice image objects.") + """No slicing or dictionary interface for images""" + raise TypeError('Cannot slice image objects.') def get_filename(self): - """ Fetch the image filename + """Fetch the image filename Parameters ---------- @@ -228,7 +228,7 @@ def get_filename(self): return self.file_map[characteristic_type].filename def set_filename(self, filename): - """ Sets the files in the object from a given filename + """Sets the files in the object from a given filename The different image formats may check whether the filename has an extension characteristic of the format, and raise an error if @@ -255,7 +255,7 @@ def from_file_map(klass, file_map): @classmethod def filespec_to_file_map(klass, filespec): - """ Make `file_map` for this class from filename `filespec` + """Make `file_map` for this class from filename `filespec` Class method @@ -279,18 +279,17 @@ def filespec_to_file_map(klass, filespec): """ try: filenames = types_filenames( - filespec, klass.files_types, - trailing_suffixes=klass._compressed_suffixes) + filespec, klass.files_types, trailing_suffixes=klass._compressed_suffixes + ) except TypesFilenamesError: - raise ImageFileError( - f'Filespec "{filespec}" does not look right for class {klass}') + raise ImageFileError(f'Filespec "{filespec}" does not look right for class {klass}') file_map = {} for key, fname in filenames.items(): file_map[key] = FileHolder(filename=fname) return file_map def to_filename(self, filename, **kwargs): - r""" Write image to files implied by filename string + r"""Write image to files implied by filename string Parameters ---------- @@ -313,7 +312,7 @@ def to_file_map(self, file_map=None, **kwargs): @classmethod def make_file_map(klass, mapping=None): - """ Class method to make files holder for this image type + """Class method to make files holder for this image type Parameters ---------- @@ -346,7 +345,7 @@ def make_file_map(klass, mapping=None): @classmethod def instance_to_filename(klass, img, filename): - """ Save `img` in our own format, to name implied by `filename` + """Save `img` in our own format, to name implied by `filename` This is a class method @@ -362,7 +361,7 @@ def instance_to_filename(klass, img, filename): @classmethod def from_image(klass, img): - """ Class method to create new instance of own class from `img` + """Class method to create new instance of own class from `img` Parameters ---------- @@ -378,7 +377,7 @@ def from_image(klass, img): @classmethod def _sniff_meta_for(klass, filename, sniff_nbytes, sniff=None): - """ Sniff metadata for image represented by `filename` + """Sniff metadata for image represented by `filename` Parameters ---------- @@ -402,13 +401,11 @@ def _sniff_meta_for(klass, filename, sniff_nbytes, sniff=None): metadata file, whichever is the shorter. `fname` is the name of the sniffed file. """ - froot, ext, trailing = splitext_addext(filename, - klass._compressed_suffixes) + froot, ext, trailing = splitext_addext(filename, klass._compressed_suffixes) # Determine the metadata location t_fnames = types_filenames( - filename, - klass.files_types, - trailing_suffixes=klass._compressed_suffixes) + filename, klass.files_types, trailing_suffixes=klass._compressed_suffixes + ) meta_fname = t_fnames.get('header', filename) # Do not re-sniff if it would be from the same file @@ -425,7 +422,7 @@ def _sniff_meta_for(klass, filename, sniff_nbytes, sniff=None): @classmethod def path_maybe_image(klass, filename, sniff=None, sniff_max=1024): - """ Return True if `filename` may be image matching this class + """Return True if `filename` may be image matching this class Parameters ---------- @@ -458,8 +455,7 @@ def path_maybe_image(klass, filename, sniff=None, sniff_max=1024): Read bytes content from found metadata. May be None if the file does not appear to have useful metadata. """ - froot, ext, trailing = splitext_addext(filename, - klass._compressed_suffixes) + froot, ext, trailing = splitext_addext(filename, klass._compressed_suffixes) if ext.lower() not in klass.valid_exts: return False, sniff if not hasattr(klass.header_class, 'may_contain_header'): @@ -468,9 +464,7 @@ def path_maybe_image(klass, filename, sniff=None, sniff_max=1024): # Force re-sniff on too-short sniff if sniff is not None and len(sniff[0]) < klass._meta_sniff_len: sniff = None - sniff = klass._sniff_meta_for(filename, - max(klass._meta_sniff_len, sniff_max), - sniff) + sniff = klass._sniff_meta_for(filename, max(klass._meta_sniff_len, sniff_max), sniff) if sniff is None or len(sniff[0]) < klass._meta_sniff_len: return False, sniff return klass.header_class.may_contain_header(sniff[0]), sniff @@ -532,9 +526,7 @@ class SerializableImage(FileBasedImage): def _filemap_from_iobase(klass, io_obj: io.IOBase): """For single-file image types, make a file map with the correct key""" if len(klass.files_types) > 1: - raise NotImplementedError( - "(de)serialization is undefined for multi-file images" - ) + raise NotImplementedError('(de)serialization is undefined for multi-file images') return klass.make_file_map({klass.files_types[0][0]: io_obj}) @classmethod @@ -566,7 +558,7 @@ def to_stream(self, io_obj: io.IOBase, **kwargs): @classmethod def from_bytes(klass, bytestring: bytes): - """ Construct image from a byte string + """Construct image from a byte string Class method @@ -578,7 +570,7 @@ def from_bytes(klass, bytestring: bytes): return klass.from_stream(io.BytesIO(bytestring)) def to_bytes(self, **kwargs) -> bytes: - r""" Return a ``bytes`` object with the contents of the file that would + r"""Return a ``bytes`` object with the contents of the file that would be written if the image were saved. Parameters diff --git a/nibabel/fileholders.py b/nibabel/fileholders.py index f7dc9629fd..f2ec992da5 100644 --- a/nibabel/fileholders.py +++ b/nibabel/fileholders.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Fileholder class """ +"""Fileholder class""" from copy import copy @@ -18,14 +18,10 @@ class FileHolderError(Exception): class FileHolder: - """ class to contain filename, fileobj and file position - """ + """class to contain filename, fileobj and file position""" - def __init__(self, - filename=None, - fileobj=None, - pos=0): - """ Initialize FileHolder instance + def __init__(self, filename=None, fileobj=None, pos=0): + """Initialize FileHolder instance Parameters ---------- @@ -43,7 +39,7 @@ def __init__(self, self.pos = pos def get_prepare_fileobj(self, *args, **kwargs): - """ Return fileobj if present, or return fileobj from filename + """Return fileobj if present, or return fileobj from filename Set position to that given in self.pos @@ -75,7 +71,7 @@ def get_prepare_fileobj(self, *args, **kwargs): return obj def same_file_as(self, other): - """ Test if `self` refers to same files / fileobj as `other` + """Test if `self` refers to same files / fileobj as `other` Parameters ---------- @@ -88,18 +84,16 @@ def same_file_as(self, other): True if `other` has the same filename (or both have None) and the same fileobj (or both have None """ - return ((self.filename == other.filename) and - (self.fileobj == other.fileobj)) + return (self.filename == other.filename) and (self.fileobj == other.fileobj) @property def file_like(self): - """ Return ``self.fileobj`` if not None, otherwise ``self.filename`` - """ + """Return ``self.fileobj`` if not None, otherwise ``self.filename``""" return self.fileobj if self.fileobj is not None else self.filename def copy_file_map(file_map): - r""" Copy mapping of fileholders given by `file_map` + r"""Copy mapping of fileholders given by `file_map` Parameters ---------- diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index e254019883..42e89fa721 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Create filename pairs, triplets etc, with expected extensions """ +"""Create filename pairs, triplets etc, with expected extensions""" import os import pathlib @@ -39,18 +39,21 @@ def _stringify_path(filepath_or_buffer): Copied from: https://github.com/pandas-dev/pandas/blob/325dd686de1589c17731cf93b649ed5ccb5a99b4/pandas/io/common.py#L131-L160 """ - if hasattr(filepath_or_buffer, "__fspath__"): + if hasattr(filepath_or_buffer, '__fspath__'): return filepath_or_buffer.__fspath__() elif isinstance(filepath_or_buffer, pathlib.Path): return str(filepath_or_buffer) return filepath_or_buffer -def types_filenames(template_fname, types_exts, - trailing_suffixes=('.gz', '.bz2'), - enforce_extensions=True, - match_case=False): - """ Return filenames with standard extensions from template name +def types_filenames( + template_fname, + types_exts, + trailing_suffixes=('.gz', '.bz2'), + enforce_extensions=True, + match_case=False, +): + """Return filenames with standard extensions from template name The typical case is returning image and header filenames for an Analyze image, that expects an 'image' file type with extension ``.img``, @@ -111,13 +114,12 @@ def types_filenames(template_fname, types_exts, """ template_fname = _stringify_path(template_fname) if not isinstance(template_fname, str): - raise TypesFilenamesError('Need file name as input ' - 'to set_filenames') + raise TypesFilenamesError('Need file name as input ' 'to set_filenames') if template_fname.endswith('.'): template_fname = template_fname[:-1] - filename, found_ext, ignored, guessed_name = \ - parse_filename(template_fname, types_exts, trailing_suffixes, - match_case) + filename, found_ext, ignored, guessed_name = parse_filename( + template_fname, types_exts, trailing_suffixes, match_case + ) # Flag cases where we just set the input name directly direct_set_name = None if enforce_extensions: @@ -128,13 +130,13 @@ def types_filenames(template_fname, types_exts, # an extension, but the wrong one raise TypesFilenamesError( f'File extension "{found_ext}" was not in ' - f'expected list: {[e for t, e in types_exts]}') + f'expected list: {[e for t, e in types_exts]}' + ) elif ignored: # there was no extension, but an ignored suffix # This is a special case like 'test.gz' (where .gz # is ignored). It's confusing to change # this to test.img.gz, or test.gz.img, so error - raise TypesFilenamesError( - f'Confusing ignored suffix {ignored} without extension') + raise TypesFilenamesError(f'Confusing ignored suffix {ignored} without extension') # if we've got to here, we have a guessed name and a found # extension. else: # not enforcing extensions. If there's an extension, we set the @@ -170,10 +172,7 @@ def types_filenames(template_fname, types_exts, return tfns -def parse_filename(filename, - types_exts, - trailing_suffixes, - match_case=False): +def parse_filename(filename, types_exts, trailing_suffixes, match_case=False): """Split filename into fileroot, extension, trailing suffix; guess type. Parameters @@ -252,10 +251,8 @@ def _iendswith(whole, end): return whole.lower().endswith(end.lower()) -def splitext_addext(filename, - addexts=('.gz', '.bz2', '.zst'), - match_case=False): - """ Split ``/pth/fname.ext.gz`` into ``/pth/fname, .ext, .gz`` +def splitext_addext(filename, addexts=('.gz', '.bz2', '.zst'), match_case=False): + """Split ``/pth/fname.ext.gz`` into ``/pth/fname, .ext, .gz`` where ``.gz`` may be any of passed `addext` trailing suffixes. diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index cc850132b8..8df199d0d2 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -1,4 +1,4 @@ -""" Utilities for getting array slices out of file-like objects +"""Utilities for getting array slices out of file-like objects """ import operator @@ -13,7 +13,7 @@ # Threshold for memory gap above which we always skip, to save memory # This value came from trying various values and looking at the timing with # ``bench_fileslice`` -SKIP_THRESH = 2 ** 8 +SKIP_THRESH = 2**8 class _NullLock: @@ -25,6 +25,7 @@ class _NullLock: It is used by the ``read_segments`` function in the event that a ``Lock`` is not provided by the caller. """ + def __enter__(self): pass @@ -33,7 +34,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def is_fancy(sliceobj): - """ Returns True if sliceobj is attempting fancy indexing + """Returns True if sliceobj is attempting fancy indexing Parameters ---------- @@ -61,7 +62,7 @@ def is_fancy(sliceobj): def canonical_slicers(sliceobj, shape, check_inds=True): - """ Return canonical version of `sliceobj` for array shape `shape` + """Return canonical version of `sliceobj` for array shape `shape` `sliceobj` is a slicer for an array ``A`` implied by `shape`. @@ -94,7 +95,7 @@ def canonical_slicers(sliceobj, shape, check_inds=True): if not isinstance(sliceobj, tuple): sliceobj = (sliceobj,) if is_fancy(sliceobj): - raise ValueError("Cannot handle fancy indexing") + raise ValueError('Cannot handle fancy indexing') can_slicers = [] n_dim = len(shape) n_real = 0 @@ -103,10 +104,9 @@ def canonical_slicers(sliceobj, shape, check_inds=True): can_slicers.append(None) continue if slicer == Ellipsis: - remaining = sliceobj[i + 1:] + remaining = sliceobj[i + 1 :] if Ellipsis in remaining: - raise ValueError("More than one Ellipsis in slicing " - "expression") + raise ValueError('More than one Ellipsis in slicing ' 'expression') real_remaining = [r for r in remaining if r is not None] n_ellided = n_dim - n_real - len(real_remaining) can_slicers.extend((slice(None),) * n_ellided) @@ -120,8 +120,11 @@ def canonical_slicers(sliceobj, shape, check_inds=True): except TypeError: # should be slice object if slicer != slice(None): # Could this be full slice? - if slicer.stop == dim_len and slicer.start in (None, 0) and \ - slicer.step in (None, 1): + if ( + slicer.stop == dim_len + and slicer.start in (None, 0) + and slicer.step in (None, 1) + ): slicer = slice(None) else: if slicer < 0: @@ -136,7 +139,7 @@ def canonical_slicers(sliceobj, shape, check_inds=True): def slice2outax(ndim, sliceobj): - """ Matching output axes for input array ndim `ndim` and slice `sliceobj` + """Matching output axes for input array ndim `ndim` and slice `sliceobj` Parameters ---------- @@ -166,7 +169,7 @@ def slice2outax(ndim, sliceobj): def slice2len(slicer, in_len): - """ Output length after slicing original length `in_len` with `slicer` + """Output length after slicing original length `in_len` with `slicer` Parameters ---------- slicer : slice object @@ -188,8 +191,7 @@ def slice2len(slicer, in_len): def _full_slicer_len(full_slicer): - """ Return length of slicer processed by ``fill_slicer`` - """ + """Return length of slicer processed by ``fill_slicer``""" start, stop, step = full_slicer.start, full_slicer.stop, full_slicer.step if stop is None: # case of negative step stop = -1 @@ -200,7 +202,7 @@ def _full_slicer_len(full_slicer): def fill_slicer(slicer, in_len): - """ Return slice object with Nones filled out to match `in_len` + """Return slice object with Nones filled out to match `in_len` Also fixes too large stop / start values according to slice() slicing rules. @@ -245,7 +247,7 @@ def fill_slicer(slicer, in_len): def predict_shape(sliceobj, in_shape): - """ Predict shape of array from slicing array shape `shape` with `sliceobj` + """Predict shape of array from slicing array shape `shape` with `sliceobj` Parameters ---------- @@ -278,7 +280,7 @@ def predict_shape(sliceobj, in_shape): def _positive_slice(slicer): - """ Return full slice `slicer` enforcing positive step size + """Return full slice `slicer` enforcing positive step size `slicer` assumed full in the sense of :func:`fill_slicer` """ @@ -294,11 +296,8 @@ def _positive_slice(slicer): return slice(end, start + 1, -step) -def threshold_heuristic(slicer, - dim_len, - stride, - skip_thresh=SKIP_THRESH): - """ Whether to force full axis read or contiguous read of stepped slice +def threshold_heuristic(slicer, dim_len, stride, skip_thresh=SKIP_THRESH): + """Whether to force full axis read or contiguous read of stepped slice Allows :func:`fileslice` to sometimes read memory that it will throw away in order to get maximum speed. In other words, trade memory for fewer disk @@ -350,9 +349,8 @@ def threshold_heuristic(slicer, return 'full' if gap_size <= skip_thresh else 'contiguous' -def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, - heuristic=threshold_heuristic): - """ Return maybe modified slice and post-slice slicing for `slicer` +def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, heuristic=threshold_heuristic): + """Return maybe modified slice and post-slice slicing for `slicer` Parameters ---------- @@ -428,7 +426,7 @@ def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, if action not in ('full', 'contiguous', None): raise ValueError(f'Unexpected return {action} from heuristic') if is_int and action == 'contiguous': - raise ValueError("int index cannot be contiguous") + raise ValueError('int index cannot be contiguous') # If this is the slowest changing dimension, never upgrade None or # contiguous beyond contiguous (we've already covered the already-full # case) @@ -442,8 +440,7 @@ def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, if step not in (-1, 1): if step < 0: slicer = _positive_slice(slicer) - return (slice(slicer.start, slicer.stop, 1), - slice(None, None, step)) + return (slice(slicer.start, slicer.stop, 1), slice(None, None, step)) # We only need to be positive if is_int: return slicer, 'dropped' @@ -452,9 +449,8 @@ def optimize_slicer(slicer, dim_len, all_full, is_slowest, stride, return _positive_slice(slicer), slice(None, None, -1) -def calc_slicedefs(sliceobj, in_shape, itemsize, offset, order, - heuristic=threshold_heuristic): - """ Return parameters for slicing array with `sliceobj` given memory layout +def calc_slicedefs(sliceobj, in_shape, itemsize, offset, order, heuristic=threshold_heuristic): + """Return parameters for slicing array with `sliceobj` given memory layout Calculate the best combination of skips / (read + discard) to use for reading the data from disk / memory, then generate corresponding @@ -495,7 +491,7 @@ def calc_slicedefs(sliceobj, in_shape, itemsize, offset, order, `segments` and reshaping via `read_shape`. Slices are in terms of `read_shape`. If empty, no new slicing to apply """ - if order not in "CF": + if order not in 'CF': raise ValueError("order should be one of 'CF'") sliceobj = canonical_slicers(sliceobj, in_shape) # order fastest changing first (record reordering) @@ -505,8 +501,7 @@ def calc_slicedefs(sliceobj, in_shape, itemsize, offset, order, # Analyze sliceobj for new read_slicers and fixup post_slicers # read_slicers are the virtual slices; we don't slice with these, but use # the slice definitions to read the relevant memory from disk - read_slicers, post_slicers = optimize_read_slicers( - sliceobj, in_shape, itemsize, heuristic) + read_slicers, post_slicers = optimize_read_slicers(sliceobj, in_shape, itemsize, heuristic) # work out segments corresponding to read_slicers segments = slicers2segments(read_slicers, in_shape, offset, itemsize) # Make post_slicers empty if it is the slicing identity operation @@ -521,7 +516,7 @@ def calc_slicedefs(sliceobj, in_shape, itemsize, offset, order, def optimize_read_slicers(sliceobj, in_shape, itemsize, heuristic): - """ Calculates slices to read from disk, and apply after reading + """Calculates slices to read from disk, and apply after reading Parameters ---------- @@ -569,7 +564,8 @@ def optimize_read_slicers(sliceobj, in_shape, itemsize, heuristic): is_last = real_no == len(in_shape) # make modified sliceobj (to_read, post_slice) read_slicer, post_slicer = optimize_slicer( - slicer, dim_len, all_full, is_last, stride, heuristic) + slicer, dim_len, all_full, is_last, stride, heuristic + ) read_slicers.append(read_slicer) all_full = all_full and read_slicer == slice(None) if not isinstance(read_slicer, Integral): @@ -579,7 +575,7 @@ def optimize_read_slicers(sliceobj, in_shape, itemsize, heuristic): def slicers2segments(read_slicers, in_shape, offset, itemsize): - """ Get segments from `read_slicers` given `in_shape` and memory steps + """Get segments from `read_slicers` given `in_shape` and memory steps Parameters ---------- @@ -627,9 +623,7 @@ def slicers2segments(read_slicers, in_shape, offset, itemsize): else: # slice object segments = all_segments all_segments = [] - for i in range(read_slicer.start, - read_slicer.stop, - read_slicer.step): + for i in range(read_slicer.start, read_slicer.stop, read_slicer.step): for s in segments: all_segments.append([s[0] + stride * i, s[1]]) all_full = all_full and is_full @@ -638,7 +632,7 @@ def slicers2segments(read_slicers, in_shape, offset, itemsize): def read_segments(fileobj, segments, n_bytes, lock=None): - """ Read `n_bytes` byte data implied by `segments` from `fileobj` + """Read `n_bytes` byte data implied by `segments` from `fileobj` Parameters ---------- @@ -670,7 +664,7 @@ def read_segments(fileobj, segments, n_bytes, lock=None): if len(segments) == 0: if n_bytes != 0: - raise ValueError("No segments, but non-zero n_bytes") + raise ValueError('No segments, but non-zero n_bytes') return b'' if len(segments) == 1: offset, length = segments[0] @@ -678,7 +672,7 @@ def read_segments(fileobj, segments, n_bytes, lock=None): fileobj.seek(offset) bytes = fileobj.read(length) if len(bytes) != n_bytes: - raise ValueError("Whoops, not enough data in file") + raise ValueError('Whoops, not enough data in file') return bytes # More than one segment bytes = mmap(-1, n_bytes) @@ -687,13 +681,12 @@ def read_segments(fileobj, segments, n_bytes, lock=None): fileobj.seek(offset) bytes.write(fileobj.read(length)) if bytes.tell() != n_bytes: - raise ValueError("Oh dear, n_bytes does not look right") + raise ValueError('Oh dear, n_bytes does not look right') return bytes -def _simple_fileslice(fileobj, sliceobj, shape, dtype, offset=0, order='C', - heuristic=None): - """ Read all data from `fileobj` into array, then slice with `sliceobj` +def _simple_fileslice(fileobj, sliceobj, shape, dtype, offset=0, order='C', heuristic=None): + """Read all data from `fileobj` into array, then slice with `sliceobj` The simplest possible thing; read all the data into the full array, then slice the full array. @@ -728,9 +721,10 @@ def _simple_fileslice(fileobj, sliceobj, shape, dtype, offset=0, order='C', return new_arr[sliceobj] -def fileslice(fileobj, sliceobj, shape, dtype, offset=0, order='C', - heuristic=threshold_heuristic, lock=None): - """ Slice array in `fileobj` using `sliceobj` slicer and array definitions +def fileslice( + fileobj, sliceobj, shape, dtype, offset=0, order='C', heuristic=threshold_heuristic, lock=None +): + """Slice array in `fileobj` using `sliceobj` slicer and array definitions `fileobj` contains the contiguous binary data for an array ``A`` of shape, dtype, memory layout `shape`, `dtype`, `order`, with the binary data @@ -781,19 +775,18 @@ def fileslice(fileobj, sliceobj, shape, dtype, offset=0, order='C', Array in `fileobj` as sliced with `sliceobj` """ if is_fancy(sliceobj): - raise ValueError("Cannot handle fancy indexing") + raise ValueError('Cannot handle fancy indexing') dtype = np.dtype(dtype) itemsize = int(dtype.itemsize) - segments, sliced_shape, post_slicers = calc_slicedefs( - sliceobj, shape, itemsize, offset, order) + segments, sliced_shape, post_slicers = calc_slicedefs(sliceobj, shape, itemsize, offset, order) n_bytes = reduce(operator.mul, sliced_shape, 1) * itemsize arr_data = read_segments(fileobj, segments, n_bytes, lock) sliced = np.ndarray(sliced_shape, dtype, buffer=arr_data, order=order) return sliced[post_slicers] -def strided_scalar(shape, scalar=0.): - """ Return array shape `shape` where all entries point to value `scalar` +def strided_scalar(shape, scalar=0.0): + """Return array shape `shape` where all entries point to value `scalar` Parameters ---------- diff --git a/nibabel/fileutils.py b/nibabel/fileutils.py index c518cdd921..da44fe51a9 100644 --- a/nibabel/fileutils.py +++ b/nibabel/fileutils.py @@ -6,7 +6,7 @@ # copyright and license terms. # # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Utilities for reading and writing to binary file formats +"""Utilities for reading and writing to binary file formats """ diff --git a/nibabel/freesurfer/__init__.py b/nibabel/freesurfer/__init__.py index a588fb06e5..83c12f8682 100644 --- a/nibabel/freesurfer/__init__.py +++ b/nibabel/freesurfer/__init__.py @@ -1,6 +1,13 @@ """Reading functions for freesurfer files """ -from .io import read_geometry, read_morph_data, write_morph_data, \ - read_annot, read_label, write_geometry, write_annot +from .io import ( + read_geometry, + read_morph_data, + write_morph_data, + read_annot, + read_label, + write_geometry, + write_annot, +) from .mghformat import load, save, MGHImage diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 77f7fe892a..36013c3af2 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -1,4 +1,4 @@ -""" Read / write FreeSurfer geometry, morphometry, label, annotation formats +"""Read / write FreeSurfer geometry, morphometry, label, annotation formats """ import warnings @@ -10,7 +10,7 @@ from ..openers import Opener -_ANNOT_DT = ">i4" +_ANNOT_DT = '>i4' """Data type for Freesurfer `.annot` files. Used by :func:`read_annot` and :func:`write_annot`. All data (apart from @@ -31,7 +31,7 @@ def _fread3(fobj): n : int A 3 byte int """ - b1, b2, b3 = np.fromfile(fobj, ">u1", 3) + b1, b2, b3 = np.fromfile(fobj, '>u1', 3) return (b1 << 16) + (b2 << 8) + b3 @@ -48,8 +48,7 @@ def _fread3_many(fobj, n): out : 1D array An array of 3 byte int """ - b1, b2, b3 = np.fromfile(fobj, ">u1", 3 * n).reshape(-1, - 3).astype(int).T + b1, b2, b3 = np.fromfile(fobj, '>u1', 3 * n).reshape(-1, 3).astype(int).T return (b1 << 16) + (b2 << 8) + b3 @@ -60,12 +59,11 @@ def _read_volume_info(fobj): if not np.array_equal(head, [20]): # Read two bytes more head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)]) if not np.array_equal(head, [2, 0, 20]): - warnings.warn("Unknown extension code.") + warnings.warn('Unknown extension code.') return volume_info volume_info['head'] = head - for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras', - 'zras', 'cras']: + for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras', 'zras', 'cras']: pair = fobj.readline().decode('utf-8').split('=') if pair[0].strip() != key or len(pair) != 2: raise OSError('Error parsing volume info.') @@ -142,12 +140,12 @@ def read_geometry(filepath, read_metadata=False, read_stamp=False): TRIANGLE_MAGIC = 16777214 QUAD_MAGIC = 16777215 NEW_QUAD_MAGIC = 16777213 - with open(filepath, "rb") as fobj: + with open(filepath, 'rb') as fobj: magic = _fread3(fobj) if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file nvert = _fread3(fobj) nquad = _fread3(fobj) - (fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.) + (fmt, div) = ('>i2', 100.0) if magic == QUAD_MAGIC else ('>f4', 1.0) coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float64) / div coords = coords.reshape(-1, 3) quads = _fread3_many(fobj, nquad * 4) @@ -172,15 +170,15 @@ def read_geometry(filepath, read_metadata=False, read_stamp=False): elif magic == TRIANGLE_MAGIC: # Triangle file create_stamp = fobj.readline().rstrip(b'\n').decode('utf-8') fobj.readline() - vnum = np.fromfile(fobj, ">i4", 1)[0] - fnum = np.fromfile(fobj, ">i4", 1)[0] - coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3) - faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3) + vnum = np.fromfile(fobj, '>i4', 1)[0] + fnum = np.fromfile(fobj, '>i4', 1)[0] + coords = np.fromfile(fobj, '>f4', vnum * 3).reshape(vnum, 3) + faces = np.fromfile(fobj, '>i4', fnum * 3).reshape(fnum, 3) if read_metadata: volume_info = _read_volume_info(fobj) else: - raise ValueError("File does not appear to be a Freesurfer surface") + raise ValueError('File does not appear to be a Freesurfer surface') coords = coords.astype(np.float64) # XXX: due to mayavi bug on mac 32bits @@ -195,8 +193,7 @@ def read_geometry(filepath, read_metadata=False, read_stamp=False): return ret -def write_geometry(filepath, coords, faces, create_stamp=None, - volume_info=None): +def write_geometry(filepath, coords, faces, create_stamp=None, volume_info=None): """Write a triangular format Freesurfer surface mesh. Parameters @@ -228,11 +225,11 @@ def write_geometry(filepath, coords, faces, create_stamp=None, magic_bytes = np.array([255, 255, 254], dtype=np.uint8) if create_stamp is None: - create_stamp = f"created by {getpass.getuser()} on {time.ctime()}" + create_stamp = f'created by {getpass.getuser()} on {time.ctime()}' with open(filepath, 'wb') as fobj: magic_bytes.tofile(fobj) - fobj.write((f"{create_stamp}\n\n").encode('utf-8')) + fobj.write((f'{create_stamp}\n\n').encode('utf-8')) np.array([coords.shape[0], faces.shape[0]], dtype='>i4').tofile(fobj) @@ -263,15 +260,15 @@ def read_morph_data(filepath): curv : numpy array Vector representation of surface morpometry values """ - with open(filepath, "rb") as fobj: + with open(filepath, 'rb') as fobj: magic = _fread3(fobj) if magic == 16777215: - vnum = np.fromfile(fobj, ">i4", 3)[0] - curv = np.fromfile(fobj, ">f4", vnum) + vnum = np.fromfile(fobj, '>i4', 3)[0] + curv = np.fromfile(fobj, '>f4', vnum) else: vnum = magic _fread3(fobj) - curv = np.fromfile(fobj, ">i2", vnum) / 100 + curv = np.fromfile(fobj, '>i2', vnum) / 100 return curv @@ -302,13 +299,13 @@ def write_morph_data(file_like, values, fnum=0): vector = np.asarray(values) vnum = np.prod(vector.shape) if vector.shape not in ((vnum,), (vnum, 1), (1, vnum), (vnum, 1, 1)): - raise ValueError("Invalid shape: argument values must be a vector") + raise ValueError('Invalid shape: argument values must be a vector') i4info = np.iinfo('i4') if vnum > i4info.max: - raise ValueError("Too many values for morphometry file") + raise ValueError('Too many values for morphometry file') if not i4info.min <= fnum <= i4info.max: - raise ValueError(f"Argument fnum must be between {i4info.min} and {i4info.max}") + raise ValueError(f'Argument fnum must be between {i4info.min} and {i4info.max}') with Opener(file_like, 'wb') as fobj: fobj.write(magic_bytes) @@ -356,7 +353,7 @@ def read_annot(filepath, orig_ids=False): names : list of bytes The names of the labels. The length of the list is n_labels. """ - with open(filepath, "rb") as fobj: + with open(filepath, 'rb') as fobj: dt = _ANNOT_DT # number of vertices @@ -431,7 +428,7 @@ def _read_annot_ctab_old_format(fobj, n_entries): for i in range(n_entries): # structure name length + string name_length = np.fromfile(fobj, dt, 1)[0] - name = np.fromfile(fobj, "|S%d" % name_length, 1)[0] + name = np.fromfile(fobj, '|S%d' % name_length, 1)[0] names.append(name) # read RGBT for this entry ctab[i, :4] = np.fromfile(fobj, dt, 4) @@ -475,7 +472,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): ctab = np.zeros((max_index, 5), dt) # orig_tab string length + string length = np.fromfile(fobj, dt, 1)[0] - np.fromfile(fobj, "|S%d" % length, 1)[0] # Orig table path + np.fromfile(fobj, '|S%d' % length, 1)[0] # Orig table path # number of LUT entries present in the file entries_to_read = np.fromfile(fobj, dt, 1)[0] names = list() @@ -484,7 +481,7 @@ def _read_annot_ctab_new_format(fobj, ctab_version): idx = np.fromfile(fobj, dt, 1)[0] # structure name length + string name_length = np.fromfile(fobj, dt, 1)[0] - name = np.fromfile(fobj, "|S%d" % name_length, 1)[0] + name = np.fromfile(fobj, '|S%d' % name_length, 1)[0] names.append(name) # RGBT ctab[idx, :4] = np.fromfile(fobj, dt, 4) @@ -519,7 +516,7 @@ def write_annot(filepath, labels, ctab, names, fill_ctab=True): (n_labels, 4) or (n_labels, 5) - if the latter, the final column is ignored. """ - with open(filepath, "wb") as fobj: + with open(filepath, 'wb') as fobj: dt = _ANNOT_DT vnum = len(labels) @@ -545,8 +542,7 @@ def write_string(s): clut_labels[np.where(labels == -1)] = 0 # vno, label - data = np.vstack((np.array(range(vnum)), - clut_labels)).T.astype(dt) + data = np.vstack((np.array(range(vnum)), clut_labels)).T.astype(dt) data.tofile(fobj) # tag @@ -598,8 +594,7 @@ def read_label(filepath, read_scalars=False): def _serialize_volume_info(volume_info): """Helper for serializing the volume info.""" - keys = ['head', 'valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras', - 'zras', 'cras'] + keys = ['head', 'valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras', 'zras', 'cras'] diff = set(volume_info.keys()).difference(keys) if len(diff) > 0: raise ValueError(f'Invalid volume info: {diff.pop()}.') @@ -607,9 +602,11 @@ def _serialize_volume_info(volume_info): strings = list() for key in keys: if key == 'head': - if not (np.array_equal(volume_info[key], [20]) or np.array_equal( - volume_info[key], [2, 0, 20])): - warnings.warn("Unknown extension code.") + if not ( + np.array_equal(volume_info[key], [20]) + or np.array_equal(volume_info[key], [2, 0, 20]) + ): + warnings.warn('Unknown extension code.') strings.append(np.array(volume_info[key], dtype='>i4').tobytes()) elif key in ('valid', 'filename'): val = volume_info[key] @@ -620,5 +617,6 @@ def _serialize_volume_info(volume_info): else: val = volume_info[key] strings.append( - f'{key:6s} = {val[0]:.10g} {val[1]:.10g} {val[2]:.10g}\n'.encode('utf-8')) + f'{key:6s} = {val[0]:.10g} {val[1]:.10g} {val[2]:.10g}\n'.encode('utf-8') + ) return b''.join(strings) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 9d2cdb905b..45881ba313 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Header and image reading / writing functions for MGH image format +"""Header and image reading / writing functions for MGH image format Author: Krish Subramaniam """ @@ -14,8 +14,7 @@ import numpy as np from ..affines import voxel_sizes, from_matvec -from ..volumeutils import (array_to_file, array_from_file, endian_codes, - Recoder) +from ..volumeutils import array_to_file, array_from_file, endian_codes, Recoder from ..filebasedimages import SerializableImage from ..filename_parser import _stringify_path from ..spatialimages import HeaderDataError, SpatialImage @@ -55,20 +54,62 @@ # caveat: Note that it's ambiguous to get the code given the bytespervoxel # caveat 2: Note that the bytespervox you get is in str ( not an int) _dtdefs = ( # code, conversion function, dtype, bytes per voxel - (0, 'uint8', '>u1', '1', 'MRI_UCHAR', np.uint8, np.dtype(np.uint8), - np.dtype(np.uint8).newbyteorder('>')), - (4, 'int16', '>i2', '2', 'MRI_SHORT', np.int16, np.dtype(np.int16), - np.dtype(np.int16).newbyteorder('>')), - (1, 'int32', '>i4', '4', 'MRI_INT', np.int32, np.dtype(np.int32), - np.dtype(np.int32).newbyteorder('>')), - (3, 'float', '>f4', '4', 'MRI_FLOAT', np.float32, np.dtype(np.float32), - np.dtype(np.float32).newbyteorder('>'))) + ( + 0, + 'uint8', + '>u1', + '1', + 'MRI_UCHAR', + np.uint8, + np.dtype(np.uint8), + np.dtype(np.uint8).newbyteorder('>'), + ), + ( + 4, + 'int16', + '>i2', + '2', + 'MRI_SHORT', + np.int16, + np.dtype(np.int16), + np.dtype(np.int16).newbyteorder('>'), + ), + ( + 1, + 'int32', + '>i4', + '4', + 'MRI_INT', + np.int32, + np.dtype(np.int32), + np.dtype(np.int32).newbyteorder('>'), + ), + ( + 3, + 'float', + '>f4', + '4', + 'MRI_FLOAT', + np.float32, + np.dtype(np.float32), + np.dtype(np.float32).newbyteorder('>'), + ), +) # make full code alias bank, including dtype column -data_type_codes = Recoder(_dtdefs, fields=('code', 'label', 'dtype', - 'bytespervox', 'mritype', - 'np_dtype1', 'np_dtype2', - 'numpy_dtype')) +data_type_codes = Recoder( + _dtdefs, + fields=( + 'code', + 'label', + 'dtype', + 'bytespervox', + 'mritype', + 'np_dtype1', + 'np_dtype2', + 'numpy_dtype', + ), +) class MGHError(Exception): @@ -80,21 +121,20 @@ class MGHError(Exception): class MGHHeader(LabeledWrapStruct): - """ Class for MGH format header + """Class for MGH format header The header also consists of the footer data which MGH places after the data chunk. """ + # Copies of module-level definitions template_dtype = hf_dtype _hdrdtype = header_dtype _ftrdtype = footer_dtype _data_type_codes = data_type_codes - def __init__(self, - binaryblock=None, - check=True): - """ Initialize header from binary data block + def __init__(self, binaryblock=None, check=True): + """Initialize header from binary data block Parameters ---------- @@ -111,11 +151,8 @@ def __init__(self, # Right zero-pad or truncate binaryblock to appropriate size # Footer is optional and may contain variable-length text fields, # so limit to fixed fields - binaryblock = (binaryblock[:full_size] + - b'\x00' * (full_size - len(binaryblock))) - super(MGHHeader, self).__init__(binaryblock=binaryblock, - endianness='big', - check=False) + binaryblock = binaryblock[:full_size] + b'\x00' * (full_size - len(binaryblock)) + super(MGHHeader, self).__init__(binaryblock=binaryblock, endianness='big', check=False) if not self._structarr['goodRASFlag']: self._set_affine_default() if check: @@ -137,8 +174,7 @@ def _get_checks(klass): @classmethod def from_header(klass, header=None, check=True): - """ Class method to create MGH header from another MGH header - """ + """Class method to create MGH header from another MGH header""" # own type, return copy if type(header) == klass: obj = header.copy() @@ -159,19 +195,19 @@ def from_fileobj(klass, fileobj, check=True): # dimensions from the header, skip over and then read the footer # information hdr_str = fileobj.read(klass._hdrdtype.itemsize) - hdr_str_to_np = np.ndarray(shape=(), dtype=klass._hdrdtype, - buffer=hdr_str) + hdr_str_to_np = np.ndarray(shape=(), dtype=klass._hdrdtype, buffer=hdr_str) if not np.all(hdr_str_to_np['dims']): raise MGHError('Dimensions of the data should be non-zero') tp = int(hdr_str_to_np['type']) - fileobj.seek(DATA_OFFSET + - int(klass._data_type_codes.bytespervox[tp]) * - np.prod(hdr_str_to_np['dims'])) + fileobj.seek( + DATA_OFFSET + + int(klass._data_type_codes.bytespervox[tp]) * np.prod(hdr_str_to_np['dims']) + ) ftr_str = fileobj.read(klass._ftrdtype.itemsize) return klass(hdr_str + ftr_str, check=check) def get_affine(self): - """ Get the affine transform from the header information. + """Get the affine transform from the header information. MGH format doesn't store the transform directly. Instead it's gleaned from the zooms ( delta ), direction cosines ( Mdc ), RAS centers ( @@ -186,29 +222,27 @@ def get_affine(self): get_best_affine = get_affine def get_vox2ras(self): - """return the get_affine() - """ + """return the get_affine()""" return self.get_affine() def get_vox2ras_tkr(self): - """ Get the vox2ras-tkr transform. See "Torig" here: - https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems + """Get the vox2ras-tkr transform. See "Torig" here: + https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems """ ds = self._structarr['delta'] ns = self._structarr['dims'][:3] * ds / 2.0 - v2rtkr = np.array([[-ds[0], 0, 0, ns[0]], - [0, 0, ds[2], -ns[2]], - [0, -ds[1], 0, ns[1]], - [0, 0, 0, 1]], dtype=np.float32) + v2rtkr = np.array( + [[-ds[0], 0, 0, ns[0]], [0, 0, ds[2], -ns[2]], [0, -ds[1], 0, ns[1]], [0, 0, 0, 1]], + dtype=np.float32, + ) return v2rtkr def get_ras2vox(self): - """return the inverse get_affine() - """ + """return the inverse get_affine()""" return np.linalg.inv(self.get_affine()) def get_data_dtype(self): - """ Get numpy dtype for MGH data + """Get numpy dtype for MGH data For examples see ``set_data_dtype`` """ @@ -217,8 +251,7 @@ def get_data_dtype(self): return dtype def set_data_dtype(self, datatype): - """ Set numpy dtype for data from code or dtype or type - """ + """Set numpy dtype for data from code or dtype or type""" try: code = self._data_type_codes[datatype] except KeyError: @@ -226,7 +259,7 @@ def set_data_dtype(self, datatype): self._structarr['type'] = code def _ndims(self): - """ Get dimensionality of data + """Get dimensionality of data MGH does not encode dimensionality explicitly, so an image where the fourth dimension is 1 is treated as three-dimensional. @@ -238,7 +271,7 @@ def _ndims(self): return 3 + (self._structarr['dims'][3] > 1) def get_zooms(self): - """ Get zooms from header + """Get zooms from header Returns the spacing of voxels in the x, y, and z dimensions. For four-dimensional files, a fourth zoom is included, equal to the @@ -259,7 +292,7 @@ def get_zooms(self): return tuple(self._structarr['delta']) + tzoom def set_zooms(self, zooms): - """ Set zooms into header fields + """Set zooms into header fields Sets the spacing of voxels in the x, y, and z dimensions. For four-dimensional files, a temporal zoom (repetition time, or TR, in @@ -277,8 +310,9 @@ def set_zooms(self, zooms): if len(zooms) > ndims: raise HeaderDataError('Expecting %d zoom values' % ndims) if np.any(zooms[:3] <= 0): - raise HeaderDataError('Spatial (first three) zooms must be positive; got ' - f'{tuple(zooms[:3])}') + raise HeaderDataError( + 'Spatial (first three) zooms must be positive; got ' f'{tuple(zooms[:3])}' + ) hdr['delta'] = zooms[:3] if len(zooms) == 4: if zooms[3] < 0: @@ -286,8 +320,7 @@ def set_zooms(self, zooms): hdr['tr'] = zooms[3] def get_data_shape(self): - """ Get shape of data - """ + """Get shape of data""" shape = tuple(self._structarr['dims']) # If last dimension (nframes) is 1, remove it because # we want to maintain 3D and it's redundant @@ -296,7 +329,7 @@ def get_data_shape(self): return shape def set_data_shape(self, shape): - """ Set shape of data + """Set shape of data Parameters ---------- @@ -305,34 +338,30 @@ def set_data_shape(self, shape): """ shape = tuple(shape) if len(shape) > 4: - raise ValueError("Shape may be at most 4 dimensional") + raise ValueError('Shape may be at most 4 dimensional') self._structarr['dims'] = shape + (1,) * (4 - len(shape)) self._structarr['delta'] = 1 def get_data_bytespervox(self): - """ Get the number of bytes per voxel of the data - """ - return int(self._data_type_codes.bytespervox[ - int(self._structarr['type'])]) + """Get the number of bytes per voxel of the data""" + return int(self._data_type_codes.bytespervox[int(self._structarr['type'])]) def get_data_size(self): - """ Get the number of bytes the data chunk occupies. - """ + """Get the number of bytes the data chunk occupies.""" return self.get_data_bytespervox() * np.prod(self._structarr['dims']) def get_data_offset(self): - """ Return offset into data file to read data - """ + """Return offset into data file to read data""" return DATA_OFFSET def get_footer_offset(self): - """ Return offset where the footer resides. - Occurs immediately after the data chunk. + """Return offset where the footer resides. + Occurs immediately after the data chunk. """ return self.get_data_offset() + self.get_data_size() def data_from_fileobj(self, fileobj): - """ Read data array from `fileobj` + """Read data array from `fileobj` Parameters ---------- @@ -350,25 +379,23 @@ def data_from_fileobj(self, fileobj): return array_from_file(shape, dtype, fileobj, offset) def get_slope_inter(self): - """ MGH format does not do scaling? - """ + """MGH format does not do scaling?""" return None, None @classmethod def guessed_endian(klass, mapping): - """ MGHHeader data must be big-endian """ + """MGHHeader data must be big-endian""" return '>' @classmethod def default_structarr(klass, endianness=None): - """ Return header data for empty header + """Return header data for empty header Ignores byte order; always big endian """ if endianness is not None and endian_codes[endianness] != '>': raise ValueError('MGHHeader must always be big endian') - structarr = super(MGHHeader, - klass).default_structarr(endianness=endianness) + structarr = super(MGHHeader, klass).default_structarr(endianness=endianness) structarr['version'] = 1 structarr['dims'] = 1 structarr['type'] = 3 @@ -378,15 +405,14 @@ def default_structarr(klass, endianness=None): return structarr def _set_affine_default(self): - """ If goodRASFlag is 0, set the default affine - """ + """If goodRASFlag is 0, set the default affine""" self._structarr['goodRASFlag'] = 1 self._structarr['delta'] = 1 self._structarr['Mdc'] = [[-1, 0, 0], [0, 0, 1], [0, -1, 0]] self._structarr['Pxyz_c'] = 0 def writehdr_to(self, fileobj): - """ Write header to fileobj + """Write header to fileobj Write starts at the beginning. @@ -399,14 +425,13 @@ def writehdr_to(self, fileobj): ------- None """ - hdr_nofooter = np.ndarray((), dtype=self._hdrdtype, - buffer=self.binaryblock) + hdr_nofooter = np.ndarray((), dtype=self._hdrdtype, buffer=self.binaryblock) # goto the very beginning of the file-like obj fileobj.seek(0) fileobj.write(hdr_nofooter.tobytes()) def writeftr_to(self, fileobj): - """ Write footer to fileobj + """Write footer to fileobj Footer data is located after the data chunk. So move there and write. @@ -420,17 +445,18 @@ def writeftr_to(self, fileobj): None """ ftr_loc_in_hdr = len(self.binaryblock) - self._ftrdtype.itemsize - ftr_nd = np.ndarray((), dtype=self._ftrdtype, - buffer=self.binaryblock, offset=ftr_loc_in_hdr) + ftr_nd = np.ndarray( + (), dtype=self._ftrdtype, buffer=self.binaryblock, offset=ftr_loc_in_hdr + ) fileobj.seek(self.get_footer_offset()) fileobj.write(ftr_nd.tobytes()) def copy(self): - """ Return copy of structure """ + """Return copy of structure""" return self.__class__(self.binaryblock, check=False) def as_byteswapped(self, endianness=None): - """ Return new object with given ``endianness`` + """Return new object with given ``endianness`` If big endian, returns a copy of the object. Otherwise raises ValueError. @@ -447,8 +473,7 @@ def as_byteswapped(self, endianness=None): """ if endianness is None or endian_codes[endianness] != '>': - raise ValueError('Cannot byteswap MGHHeader - ' - 'must always be big endian') + raise ValueError('Cannot byteswap MGHHeader - ' 'must always be big endian') return self.copy() @classmethod @@ -458,13 +483,12 @@ def diagnose_binaryblock(klass, binaryblock, endianness=None): wstr = klass(binaryblock, check=False) battrun = BatteryRunner(klass._get_checks()) reports = battrun.check_only(wstr) - return '\n'.join([report.message - for report in reports if report.message]) + return '\n'.join([report.message for report in reports if report.message]) class MGHImage(SpatialImage, SerializableImage): - """ Class for MGH format image - """ + """Class for MGH format image""" + header_class = MGHHeader valid_exts = ('.mgh', '.mgz') # Register that .mgz extension signals gzip compression @@ -477,13 +501,13 @@ class MGHImage(SpatialImage, SerializableImage): ImageArrayProxy = ArrayProxy - def __init__(self, dataobj, affine, header=None, - extra=None, file_map=None): + def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): shape = dataobj.shape if len(shape) < 3: dataobj = reshape_dataobj(dataobj, shape + (1,) * (3 - len(shape))) - super(MGHImage, self).__init__(dataobj, affine, header=header, - extra=extra, file_map=file_map) + super(MGHImage, self).__init__( + dataobj, affine, header=header, extra=extra, file_map=file_map + ) @classmethod def filespec_to_file_map(klass, filespec): @@ -495,7 +519,7 @@ def filespec_to_file_map(klass, filespec): @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - """ Class method to create image from mapping in ``file_map`` + """Class method to create image from mapping in ``file_map`` Parameters ---------- @@ -532,13 +556,14 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): affine = header.get_affine() hdr_copy = header.copy() # Pass original image fileobj / filename to array proxy - data = klass.ImageArrayProxy(img_fh.file_like, hdr_copy, mmap=mmap, - keep_file_open=keep_file_open) + data = klass.ImageArrayProxy( + img_fh.file_like, hdr_copy, mmap=mmap, keep_file_open=keep_file_open + ) img = klass(data, affine, header, file_map=file_map) return img def to_file_map(self, file_map=None): - """ Write image to `file_map` or contained ``self.file_map`` + """Write image to `file_map` or contained ``self.file_map`` Parameters ---------- @@ -559,7 +584,7 @@ def to_file_map(self, file_map=None): self.file_map = file_map def _write_data(self, mghfile, data, header): - """ Utility routine to write image + """Utility routine to write image Parameters ---------- @@ -573,14 +598,13 @@ def _write_data(self, mghfile, data, header): """ shape = header.get_data_shape() if data.shape != shape: - raise HeaderDataError('Data should be shape (%s)' % - ', '.join(str(s) for s in shape)) + raise HeaderDataError('Data should be shape (%s)' % ', '.join(str(s) for s in shape)) offset = header.get_data_offset() out_dtype = header.get_data_dtype() array_to_file(data, mghfile, out_dtype, offset) def _affine2header(self): - """ Unconditionally set affine into the header """ + """Unconditionally set affine into the header""" hdr = self._header shape = np.array(self._dataobj.shape[:3]) diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 177688c216..3c47f82031 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -13,8 +13,15 @@ import numpy as np from numpy.testing import assert_allclose, assert_array_equal -from .. import (read_geometry, read_morph_data, read_annot, read_label, - write_geometry, write_morph_data, write_annot) +from .. import ( + read_geometry, + read_morph_data, + read_annot, + read_label, + write_geometry, + write_morph_data, + write_annot, +) from ..io import _pack_rgb from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data @@ -26,7 +33,7 @@ have_freesurfer = False if 'SUBJECTS_DIR' in os.environ: # May have Freesurfer installed with data - data_path = pjoin(os.environ["SUBJECTS_DIR"], DATA_SDIR) + data_path = pjoin(os.environ['SUBJECTS_DIR'], DATA_SDIR) have_freesurfer = isdir(data_path) else: # May have nibabel test data submodule checked out @@ -35,8 +42,10 @@ data_path = pjoin(nib_data, 'nitest-freesurfer', DATA_SDIR) have_freesurfer = isdir(data_path) -freesurfer_test = unittest.skipUnless(have_freesurfer, - f'cannot find freesurfer {DATA_SDIR} directory') +freesurfer_test = unittest.skipUnless( + have_freesurfer, f'cannot find freesurfer {DATA_SDIR} directory' +) + def _hash_file_content(fname): hasher = hashlib.md5() @@ -49,14 +58,15 @@ def _hash_file_content(fname): @freesurfer_test def test_geometry(): """Test IO of .surf""" - surf_path = pjoin(data_path, "surf", "lh.inflated") + surf_path = pjoin(data_path, 'surf', 'lh.inflated') coords, faces = read_geometry(surf_path) assert 0 == faces.min() assert coords.shape[0] == faces.max() + 1 - surf_path = pjoin(data_path, "surf", "lh.sphere") + surf_path = pjoin(data_path, 'surf', 'lh.sphere') coords, faces, volume_info, create_stamp = read_geometry( - surf_path, read_metadata=True, read_stamp=True) + surf_path, read_metadata=True, read_stamp=True + ) assert 0 == faces.min() assert coords.shape[0] == faces.max() + 1 @@ -68,20 +78,18 @@ def test_geometry(): # with respect to read_geometry() with InTemporaryDirectory(): surf_path = 'test' - create_stamp = f"created by {getpass.getuser()} on {time.ctime()}" - volume_info['cras'] = [1., 2., 3.] + create_stamp = f'created by {getpass.getuser()} on {time.ctime()}' + volume_info['cras'] = [1.0, 2.0, 3.0] write_geometry(surf_path, coords, faces, create_stamp, volume_info) - coords2, faces2, volume_info2 = \ - read_geometry(surf_path, read_metadata=True) + coords2, faces2, volume_info2 = read_geometry(surf_path, read_metadata=True) for key in ('xras', 'yras', 'zras', 'cras'): - assert_allclose(volume_info2[key], volume_info[key], - rtol=1e-7, atol=1e-30) + assert_allclose(volume_info2[key], volume_info[key], rtol=1e-7, atol=1e-30) assert np.array_equal(volume_info2['cras'], volume_info['cras']) with open(surf_path, 'rb') as fobj: - np.fromfile(fobj, ">u1", 3) + np.fromfile(fobj, '>u1', 3) read_create_stamp = fobj.readline().decode().rstrip('\n') # now write an incomplete file @@ -92,7 +100,7 @@ def test_geometry(): assert any('extension code' in str(ww.message) for ww in w) volume_info['head'] = [1, 2] - with pytest.warns(UserWarning, match="Unknown extension"): + with pytest.warns(UserWarning, match='Unknown extension'): write_geometry(surf_path, coords, faces, create_stamp, volume_info) volume_info['a'] = 0 @@ -115,8 +123,9 @@ def test_geometry(): @needs_nibabel_data('nitest-freesurfer') def test_quad_geometry(): """Test IO of freesurfer quad files.""" - new_quad = pjoin(get_nibabel_data(), 'nitest-freesurfer', 'subjects', - 'bert', 'surf', 'lh.inflated.nofix') + new_quad = pjoin( + get_nibabel_data(), 'nitest-freesurfer', 'subjects', 'bert', 'surf', 'lh.inflated.nofix' + ) coords, faces = read_geometry(new_quad) assert 0 == faces.min() assert coords.shape[0] == (faces.max() + 1) @@ -124,14 +133,14 @@ def test_quad_geometry(): new_path = 'test' write_geometry(new_path, coords, faces) coords2, faces2 = read_geometry(new_path) - assert np.array_equal(coords,coords2) + assert np.array_equal(coords, coords2) assert np.array_equal(faces, faces2) @freesurfer_test def test_morph_data(): """Test IO of morphometry data file (eg. curvature).""" - curv_path = pjoin(data_path, "surf", "lh.curv") + curv_path = pjoin(data_path, 'surf', 'lh.curv') curv = read_morph_data(curv_path) assert -1.0 < curv.min() < 0 assert 0 < curv.max() < 1.0 @@ -159,21 +168,22 @@ def test_write_morph_data(): # Windows 32-bit overflows Python int if np.dtype(int) != np.dtype(np.int32): with pytest.raises(ValueError): - write_morph_data('test.curv', strided_scalar((big_num,))) + write_morph_data('test.curv', strided_scalar((big_num,))) for shape in bad_shapes: with pytest.raises(ValueError): write_morph_data('test.curv', values.reshape(shape)) + @freesurfer_test def test_annot(): """Test IO of .annot against freesurfer example data.""" annots = ['aparc', 'aparc.a2005s'] for a in annots: - annot_path = pjoin(data_path, "label", f"lh.{a}.annot") + annot_path = pjoin(data_path, 'label', f'lh.{a}.annot') hash_ = _hash_file_content(annot_path) labels, ctab, names = read_annot(annot_path) - assert labels.shape == (163842, ) + assert labels.shape == (163842,) assert ctab.shape == (len(names), 5) labels_orig = None @@ -186,8 +196,10 @@ def test_annot(): elif hash_ == 'd4f5b7cbc2ed363ac6fcf89e19353504': assert np.sum(labels_orig == 1639705) == 13327 else: - raise RuntimeError("Unknown freesurfer file. Please report " - "the problem to the maintainer of nibabel.") + raise RuntimeError( + 'Unknown freesurfer file. Please report ' + 'the problem to the maintainer of nibabel.' + ) # Test equivalence of freesurfer- and nibabel-generated annot files # with respect to read_annot() @@ -217,8 +229,7 @@ def test_read_write_annot(): # that at least one of each label value is present. Label # values are in the range (0, nlabels-1) - they are used # as indices into the lookup table (generated below). - labels = list(range(nlabels)) + \ - list(np.random.randint(0, nlabels, nvertices - nlabels)) + labels = list(range(nlabels)) + list(np.random.randint(0, nlabels, nvertices - nlabels)) labels = np.array(labels, dtype=np.int32) np.random.shuffle(labels) # Generate some random colours for the LUT @@ -229,9 +240,7 @@ def test_read_write_annot(): # for the annotation value. rgbal[0, 3] = 255 # Generate the annotation values for each LUT entry - rgbal[:, 4] = (rgbal[:, 0] + - rgbal[:, 1] * (2 ** 8) + - rgbal[:, 2] * (2 ** 16)) + rgbal[:, 4] = rgbal[:, 0] + rgbal[:, 1] * (2**8) + rgbal[:, 2] * (2**16) annot_path = 'c.annot' with InTemporaryDirectory(): write_annot(annot_path, labels, rgbal, names, fill_ctab=False) @@ -243,12 +252,11 @@ def test_read_write_annot(): def test_write_annot_fill_ctab(): - """Test the `fill_ctab` parameter to :func:`.write_annot`. """ + """Test the `fill_ctab` parameter to :func:`.write_annot`.""" nvertices = 10 nlabels = 3 names = [f'label {l}' for l in range(1, nlabels + 1)] - labels = list(range(nlabels)) + \ - list(np.random.randint(0, nlabels, nvertices - nlabels)) + labels = list(range(nlabels)) + list(np.random.randint(0, nlabels, nvertices - nlabels)) labels = np.array(labels, dtype=np.int32) np.random.shuffle(labels) rgba = np.array(np.random.randint(0, 255, (nlabels, 4)), dtype=np.int32) @@ -265,8 +273,9 @@ def test_write_annot_fill_ctab(): # values back. badannot = (10 * np.arange(nlabels, dtype=np.int32)).reshape(-1, 1) rgbal = np.hstack((rgba, badannot)) - with pytest.warns(UserWarning, - match=f'Annotation values in {annot_path} will be incorrect'): + with pytest.warns( + UserWarning, match=f'Annotation values in {annot_path} will be incorrect' + ): write_annot(annot_path, labels, rgbal, names, fill_ctab=False) labels2, rgbal2, names2 = read_annot(annot_path, orig_ids=True) names2 = [n.decode('ascii') for n in names2] @@ -276,13 +285,12 @@ def test_write_annot_fill_ctab(): # make sure a warning is *not* emitted if fill_ctab is False, but the # annotation values are correct. rgbal = np.hstack((rgba, np.zeros((nlabels, 1), dtype=np.int32))) - rgbal[:, 4] = (rgbal[:, 0] + - rgbal[:, 1] * (2 ** 8) + - rgbal[:, 2] * (2 ** 16)) + rgbal[:, 4] = rgbal[:, 0] + rgbal[:, 1] * (2**8) + rgbal[:, 2] * (2**16) with clear_and_catch_warnings() as w: write_annot(annot_path, labels, rgbal, names, fill_ctab=False) - assert all(f'Annotation values in {annot_path} will be incorrect' != str(ww.message) - for ww in w) + assert all( + f'Annotation values in {annot_path} will be incorrect' != str(ww.message) for ww in w + ) labels2, rgbal2, names2 = read_annot(annot_path) names2 = [n.decode('ascii') for n in names2] assert np.all(np.isclose(rgbal2[:, :4], rgba)) @@ -292,6 +300,7 @@ def test_write_annot_fill_ctab(): def test_read_annot_old_format(): """Test reading an old-style .annot file.""" + def gen_old_annot_file(fpath, nverts, labels, rgba, names): dt = '>i' vdata = np.zeros((nverts, 2), dtype=dt) @@ -316,12 +325,14 @@ def gen_old_annot_file(fpath, nverts, labels, rgba, names): fbytes += rgba[i, :].astype(dt).tobytes() with open(fpath, 'wb') as f: f.write(fbytes) + with InTemporaryDirectory(): nverts = 10 nlabels = 3 names = [f'Label {l}' for l in range(nlabels)] - labels = np.concatenate(( - np.arange(nlabels), np.random.randint(0, nlabels, nverts - nlabels))) + labels = np.concatenate( + (np.arange(nlabels), np.random.randint(0, nlabels, nverts - nlabels)) + ) np.random.shuffle(labels) rgba = np.random.randint(0, 255, (nlabels, 4)) # write an old .annot file @@ -337,7 +348,7 @@ def gen_old_annot_file(fpath, nverts, labels, rgba, names): @freesurfer_test def test_label(): """Test IO of .label""" - label_path = pjoin(data_path, "label", "lh.cortex.label") + label_path = pjoin(data_path, 'label', 'lh.cortex.label') label = read_label(label_path) # XXX : test more assert label.min() >= 0 diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 4c812087c2..29f1687c29 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -36,13 +36,14 @@ MGZ_FNAME = os.path.join(data_path, 'test.mgz') # sample voxel to ras matrix (mri_info --vox2ras) -v2r = np.array([[1, 2, 3, -13], [2, 3, 1, -11.5], - [3, 1, 2, -11.5], [0, 0, 0, 1]], dtype=np.float32) +v2r = np.array( + [[1, 2, 3, -13], [2, 3, 1, -11.5], [3, 1, 2, -11.5], [0, 0, 0, 1]], dtype=np.float32 +) # sample voxel to ras - tkr matrix (mri_info --vox2ras-tkr) -v2rtkr = np.array([[-1.0, 0.0, 0.0, 1.5], - [0.0, 0.0, 1.0, -2.5], - [0.0, -1.0, 0.0, 2.0], - [0.0, 0.0, 0.0, 1.0]], dtype=np.float32) +v2rtkr = np.array( + [[-1.0, 0.0, 0.0, 1.5], [0.0, 0.0, 1.0, -2.5], [0.0, -1.0, 0.0, 2.0], [0.0, 0.0, 0.0, 1.0]], + dtype=np.float32, +) BIG_CODES = ('>', 'big', 'BIG', 'b', 'be', 'B', 'BE') LITTLE_CODES = ('<', 'little', 'l', 'le', 'L', 'LE') @@ -55,7 +56,6 @@ LITTLE_CODES += ('swapped', 's', 'S', '!') - def test_read_mgh(): # test.mgz was generated by the following command # mri_volsynth --dim 3 4 5 2 --vol test.mgz @@ -150,11 +150,7 @@ def test_set_zooms(): assert_array_almost_equal(h.get_zooms(), [1, 1, 1, 2]) h.set_zooms([1, 1, 1, 3]) assert_array_almost_equal(h.get_zooms(), [1, 1, 1, 3]) - for zooms in ((-1, 1, 1, 1), - (1, -1, 1, 1), - (1, 1, -1, 1), - (1, 1, 1, -1), - (1, 1, 1, 1, 5)): + for zooms in ((-1, 1, 1, 1), (1, -1, 1, 1), (1, 1, -1, 1), (1, 1, 1, -1), (1, 1, 1, 1, 5)): with pytest.raises(HeaderDataError): h.set_zooms(zooms) # smoke test for tr=0 @@ -162,7 +158,7 @@ def test_set_zooms(): def bad_dtype_mgh(): - """ This function raises an MGHError exception because + """This function raises an MGHError exception because uint16 is not a valid MGH datatype. """ # try to write an unsigned short and make sure it @@ -209,11 +205,15 @@ def test_header_updating(): mgz = load(MGZ_FNAME) hdr = mgz.header # Test against mri_info output - exp_aff = np.loadtxt(io.BytesIO(b""" + exp_aff = np.loadtxt( + io.BytesIO( + b""" 1.0000 2.0000 3.0000 -13.0000 2.0000 3.0000 1.0000 -11.5000 3.0000 1.0000 2.0000 -11.5000 - 0.0000 0.0000 0.0000 1.0000""")) + 0.0000 0.0000 0.0000 1.0000""" + ) + ) assert_almost_equal(mgz.affine, exp_aff, 6) assert_almost_equal(hdr.get_affine(), exp_aff, 6) # Test that initial wonky header elements have not changed @@ -224,7 +224,7 @@ def test_header_updating(): mgz2 = _mgh_rt(mgz, img_fobj) hdr2 = mgz2.header assert_almost_equal(hdr2.get_affine(), exp_aff, 6) - assert_array_equal(hdr2['delta'],1) + assert_array_equal(hdr2['delta'], 1) # Change affine, change underlying header info exp_aff_d = exp_aff.copy() exp_aff_d[0, -1] = -14 @@ -233,14 +233,14 @@ def test_header_updating(): mgz2.update_header() assert_almost_equal(hdr2.get_affine(), exp_aff_d, 6) RZS = exp_aff_d[:3, :3] - assert_almost_equal(hdr2['delta'], np.sqrt(np.sum(RZS ** 2, axis=0))) + assert_almost_equal(hdr2['delta'], np.sqrt(np.sum(RZS**2, axis=0))) assert_almost_equal(hdr2['Mdc'].T, RZS / hdr2['delta']) def test_cosine_order(): # Test we are interpreting the cosine order right data = np.arange(60).reshape((3, 4, 5)).astype(np.int32) - aff = np.diag([2., 3, 4, 1]) + aff = np.diag([2.0, 3, 4, 1]) aff[0] = [2, 1, 0, 10] img = MGHImage(data, aff) assert_almost_equal(img.affine, aff, 6) @@ -248,7 +248,7 @@ def test_cosine_order(): img2 = _mgh_rt(img, img_fobj) hdr2 = img2.header RZS = aff[:3, :3] - zooms = np.sqrt(np.sum(RZS ** 2, axis=0)) + zooms = np.sqrt(np.sum(RZS**2, axis=0)) assert_almost_equal(hdr2['Mdc'].T, RZS / zooms) assert_almost_equal(hdr2['delta'], zooms) @@ -259,7 +259,7 @@ def test_eq(): hdr2 = MGHHeader() assert hdr == hdr2 hdr.set_data_shape((2, 3, 4)) - assert(hdr != hdr2) + assert hdr != hdr2 hdr2.set_data_shape((2, 3, 4)) assert hdr == hdr2 @@ -286,7 +286,7 @@ def test_mgh_load_fileobj(): bio = io.BytesIO(contents) fm = MGHImage.make_file_map(mapping=dict(image=bio)) img2 = MGHImage.from_file_map(fm) - assert(img2.dataobj.file_like is bio) + assert img2.dataobj.file_like is bio assert_array_equal(img.get_fdata(), img2.get_fdata()) @@ -340,8 +340,8 @@ def test_mghheader_default_structarr(): class TestMGHImage(tsi.TestSpatialImage, tsi.MmapImageMixin): - """ Apply general image tests to MGHImage - """ + """Apply general image tests to MGHImage""" + image_class = MGHImage can_save = True @@ -419,7 +419,7 @@ def test_bytes(self): # Short binaryblocks give errors (here set through init) # Long binaryblocks are truncated with pytest.raises(WrapStructError): - self.header_class(bb[:self.header_class._hdrdtype.itemsize - 1]) + self.header_class(bb[: self.header_class._hdrdtype.itemsize - 1]) # Checking set to true by default, and prevents nonsense being # set into the header. @@ -440,7 +440,7 @@ def test_as_byteswapped(self): # same code just returns a copy for endianness in BIG_CODES: hdr2 = hdr.as_byteswapped(endianness) - assert(hdr2 is not hdr) + assert hdr2 is not hdr assert hdr2 == hdr # Different code raises error diff --git a/nibabel/funcs.py b/nibabel/funcs.py index e5db0477b0..02b9e3ecd7 100644 --- a/nibabel/funcs.py +++ b/nibabel/funcs.py @@ -1,4 +1,3 @@ - # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## @@ -7,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Processor functions for images """ +"""Processor functions for images""" import numpy as np from .orientations import io_orientation, OrientationError @@ -15,7 +14,7 @@ def squeeze_image(img): - """ Return image, remove axes length 1 at end of image shape + """Return image, remove axes length 1 at end of image shape For example, an image may have shape (10,20,30,1,1). In this case squeeze will result in an image with shape (10,20,30). See doctests @@ -80,14 +79,11 @@ def squeeze_image(img): return klass.from_image(img) shape = shape[:slen] data = np.asanyarray(img.dataobj).reshape(shape) - return klass(data, - img.affine, - img.header, - img.extra) + return klass(data, img.affine, img.header, img.extra) def concat_images(images, check_affines=True, axis=None): - r""" Concatenate images in list to single image, along specified dimension + r"""Concatenate images in list to single image, along specified dimension Parameters ---------- @@ -108,11 +104,10 @@ def concat_images(images, check_affines=True, axis=None): New image resulting from concatenating `images` across last dimension """ - images = [load(img) if not hasattr(img, 'get_data') - else img for img in images] + images = [load(img) if not hasattr(img, 'get_data') else img for img in images] n_imgs = len(images) if n_imgs == 0: - raise ValueError("Cannot concatenate an empty list of images.") + raise ValueError('Cannot concatenate an empty list of images.') img0 = images[0] affine = img0.affine header = img0.header @@ -121,7 +116,7 @@ def concat_images(images, check_affines=True, axis=None): n_dim = len(shape0) if axis is None: # collect images in output array for efficiency - out_shape = (n_imgs, ) + shape0 + out_shape = (n_imgs,) + shape0 out_data = np.empty(out_shape) else: # collect images in list for use with np.concatenate @@ -135,8 +130,10 @@ def concat_images(images, check_affines=True, axis=None): if len(img.shape) != n_dim: raise ValueError(f'Image {i} has {len(img.shape)} dimensions, image 0 has {n_dim}') if not np.all(np.array(img.shape)[idx_mask] == masked_shape): - raise ValueError(f'shape {img.shape} for image {i} not compatible with ' - f'first image shape {shape0} with axis == {axis}') + raise ValueError( + f'shape {img.shape} for image {i} not compatible with ' + f'first image shape {shape0} with axis == {axis}' + ) if check_affines and not np.all(img.affine == affine): raise ValueError(f'Affine for image {i} does not match affine for first image') # Do not fill cache in image if it is empty @@ -151,7 +148,7 @@ def concat_images(images, check_affines=True, axis=None): def four_to_three(img): - """ Create 3D images from 4D image by slicing over last axis + """Create 3D images from 4D image by slicing over last axis Parameters ---------- @@ -180,7 +177,7 @@ def four_to_three(img): def as_closest_canonical(img, enforce_diag=False): - """ Return `img` with data reordered to be closest to canonical + """Return `img` with data reordered to be closest to canonical Canonical order is the ordering of the output axes. @@ -212,6 +209,6 @@ def as_closest_canonical(img, enforce_diag=False): def _aff_is_diag(aff): - """ Utility function returning True if affine is nearly diagonal """ + """Utility function returning True if affine is nearly diagonal""" rzs_aff = aff[:3, :3] return np.allclose(rzs_aff, np.diag(np.diag(rzs_aff))) diff --git a/nibabel/gifti/__init__.py b/nibabel/gifti/__init__.py index 54bfbd0ffa..2faaf5ab57 100644 --- a/nibabel/gifti/__init__.py +++ b/nibabel/gifti/__init__.py @@ -17,5 +17,12 @@ gifti """ -from .gifti import (GiftiMetaData, GiftiNVPairs, GiftiLabelTable, GiftiLabel, - GiftiCoordSystem, GiftiDataArray, GiftiImage) +from .gifti import ( + GiftiMetaData, + GiftiNVPairs, + GiftiLabelTable, + GiftiLabel, + GiftiCoordSystem, + GiftiDataArray, + GiftiImage, +) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 31df1d813e..8f5efa8ad8 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Classes defining Gifti objects +"""Classes defining Gifti objects The Gifti specification was (at time of writing) available as a PDF download from http://www.nitrc.org/projects/gifti/ @@ -21,19 +21,16 @@ from ..filebasedimages import SerializableImage from ..nifti1 import data_type_codes, xform_codes, intent_codes from ..caret import CaretMetaData -from .util import (array_index_order_codes, gifti_encoding_codes, - gifti_endian_codes, KIND2FMT) +from .util import array_index_order_codes, gifti_encoding_codes, gifti_endian_codes, KIND2FMT from ..deprecated import deprecate_with_version class _GiftiMDList(list): """List view of GiftiMetaData object that will translate most operations""" + def __init__(self, metadata): self._md = metadata - super().__init__( - GiftiNVPairs._private_init(k, v, metadata) - for k, v in metadata.items() - ) + super().__init__(GiftiNVPairs._private_init(k, v, metadata) for k, v in metadata.items()) def append(self, nvpair): self._md[nvpair.name] = nvpair.value @@ -63,12 +60,11 @@ def remove(self, nvpair): class GiftiMetaData(CaretMetaData): - """ A sequence of GiftiNVPairs containing metadata for a gifti data array - """ + """A sequence of GiftiNVPairs containing metadata for a gifti data array""" @staticmethod def _sanitize(args, kwargs): - """ Sanitize and warn on deprecated arguments + """Sanitize and warn on deprecated arguments Accept nvpair positional/keyword argument that is a single ``GiftiNVPairs`` object. @@ -94,31 +90,34 @@ def _sanitize(args, kwargs): # Positional arg dep_init |= not kwargs and len(args) == 1 and isinstance(args[0], GiftiNVPairs) # Keyword arg - dep_init |= not args and list(kwargs) == ["nvpair"] + dep_init |= not args and list(kwargs) == ['nvpair'] if not dep_init: return args, kwargs warnings.warn( - "GiftiMetaData now has a dict-like interface. " - "See ``pydoc dict`` for initialization options. " - "Passing ``GiftiNVPairs()`` or using the ``nvpair`` " - "keyword will fail or behave unexpectedly in NiBabel 6.0.", - FutureWarning, stacklevel=3) - pair = args[0] if args else kwargs.get("nvpair") + 'GiftiMetaData now has a dict-like interface. ' + 'See ``pydoc dict`` for initialization options. ' + 'Passing ``GiftiNVPairs()`` or using the ``nvpair`` ' + 'keyword will fail or behave unexpectedly in NiBabel 6.0.', + FutureWarning, + stacklevel=3, + ) + pair = args[0] if args else kwargs.get('nvpair') return (), {pair.name: pair.value} @property @deprecate_with_version( - 'The data attribute is deprecated. Use GiftiMetaData object ' - 'directly as a dict.', - '4.0', '6.0') + 'The data attribute is deprecated. Use GiftiMetaData object ' 'directly as a dict.', + '4.0', + '6.0', + ) def data(self): return _GiftiMDList(self) @classmethod @deprecate_with_version( - 'from_dict class method deprecated. Use GiftiMetaData directly.', - '4.0', '6.0') + 'from_dict class method deprecated. Use GiftiMetaData directly.', '4.0', '6.0' + ) def from_dict(klass, data_dict): return klass(data_dict) @@ -126,9 +125,11 @@ def from_dict(klass, data_dict): @deprecate_with_version( 'metadata property deprecated. Use GiftiMetaData object ' 'as dict or pass to dict() for a standard dictionary.', - '4.0', '6.0') + '4.0', + '6.0', + ) def metadata(self): - """ Returns metadata as dictionary """ + """Returns metadata as dictionary""" return dict(self) def print_summary(self): @@ -136,17 +137,19 @@ def print_summary(self): class GiftiNVPairs: - """ Gifti name / value pairs + """Gifti name / value pairs Attributes ---------- name : str value : str """ + @deprecate_with_version( - 'GiftiNVPairs objects are deprecated. Use the GiftiMetaData object ' - 'as a dict, instead.', - '4.0', '6.0') + 'GiftiNVPairs objects are deprecated. Use the GiftiMetaData object ' 'as a dict, instead.', + '4.0', + '6.0', + ) def __init__(self, name='', value=''): self._name = name self._value = value @@ -188,7 +191,7 @@ def value(self, val): class GiftiLabelTable(xml.XmlSerializable): - """ Gifti label table: a sequence of key, label pairs + """Gifti label table: a sequence of key, label pairs From the gifti spec dated 2011-01-14: The label table is used by DataArrays whose values are an key into the @@ -200,7 +203,7 @@ def __init__(self): self.labels = [] def __repr__(self): - return f"" + return f'' def get_labels_as_dict(self): self.labels_as_dict = {} @@ -224,7 +227,7 @@ def print_summary(self): class GiftiLabel(xml.XmlSerializable): - """ Gifti label: association of integer key with optional RGBA values + """Gifti label: association of integer key with optional RGBA values Quotes are from the gifti spec dated 2011-01-14. @@ -261,20 +264,18 @@ def __init__(self, key=0, red=None, green=None, blue=None, alpha=None): self.alpha = alpha def __repr__(self): - chars = 255 * np.array( - [self.red or 0, self.green or 0, self.blue or 0, self.alpha or 0] - ) + chars = 255 * np.array([self.red or 0, self.green or 0, self.blue or 0, self.alpha or 0]) r, g, b, a = chars.astype('u1') return f'' @property def rgba(self): - """ Returns RGBA as tuple """ + """Returns RGBA as tuple""" return (self.red, self.green, self.blue, self.alpha) @rgba.setter def rgba(self, rgba): - """ Set RGBA via sequence + """Set RGBA via sequence Parameters ---------- @@ -296,7 +297,7 @@ def _arr2txt(arr, elem_fmt): class GiftiCoordSystem(xml.XmlSerializable): - """ Gifti coordinate system transform matrix + """Gifti coordinate system transform matrix Quotes are from the gifti spec dated 2011-01-14. @@ -345,7 +346,7 @@ def __init__(self, dataspace=0, xformspace=0, xform=None): def __repr__(self): src = xform_codes.label[self.dataspace] dst = xform_codes.label[self.xformspace] - return f"" + return f'' def _to_xml_element(self): coord_xform = xml.Element('CoordinateSystemTransformMatrix') @@ -365,9 +366,9 @@ def print_summary(self): def _data_tag_element(dataarray, encoding, dtype, ordering): - """ Creates data tag with given `encoding`, returns as XML element - """ + """Creates data tag with given `encoding`, returns as XML element""" import zlib + order = array_index_order_codes.npcode[ordering] enclabel = gifti_encoding_codes.label[encoding] if enclabel == 'ASCII': @@ -378,7 +379,7 @@ def _data_tag_element(dataarray, encoding, dtype, ordering): out = zlib.compress(out) da = base64.b64encode(out).decode() elif enclabel == 'External': - raise NotImplementedError("In what format are the external files?") + raise NotImplementedError('In what format are the external files?') else: da = '' @@ -388,7 +389,7 @@ def _data_tag_element(dataarray, encoding, dtype, ordering): class GiftiDataArray(xml.XmlSerializable): - """ Container for Gifti numerical data array and associated metadata + """Container for Gifti numerical data array and associated metadata Quotes are from the gifti spec dated 2011-01-14. @@ -437,17 +438,19 @@ class GiftiDataArray(xml.XmlSerializable): Position in bytes within `ext_fname` at which to start reading data. """ - def __init__(self, - data=None, - intent='NIFTI_INTENT_NONE', - datatype=None, - encoding="GIFTI_ENCODING_B64GZ", - endian=sys.byteorder, - coordsys=None, - ordering="C", - meta=None, - ext_fname='', - ext_offset=0): + def __init__( + self, + data=None, + intent='NIFTI_INTENT_NONE', + datatype=None, + encoding='GIFTI_ENCODING_B64GZ', + endian=sys.byteorder, + coordsys=None, + ordering='C', + meta=None, + ext_fname='', + ext_offset=0, + ): """ Returns a shell object that cannot be saved. """ @@ -460,15 +463,19 @@ def __init__(self, self.endian = gifti_endian_codes.code[endian] self.coordsys = coordsys or GiftiCoordSystem() self.ind_ord = array_index_order_codes.code[ordering] - self.meta = (GiftiMetaData() if meta is None else - meta if isinstance(meta, GiftiMetaData) else - GiftiMetaData(meta)) + self.meta = ( + GiftiMetaData() + if meta is None + else meta + if isinstance(meta, GiftiMetaData) + else GiftiMetaData(meta) + ) self.ext_fname = ext_fname self.ext_offset = ext_offset self.dims = [] if self.data is None else list(self.data.shape) def __repr__(self): - return f"" + return f'' @property def num_dim(self): @@ -479,15 +486,19 @@ def _to_xml_element(self): self.endian = gifti_endian_codes.code[sys.byteorder] # All attribute values must be strings - data_array = xml.Element('DataArray', attrib={ - 'Intent': intent_codes.niistring[self.intent], - 'DataType': data_type_codes.niistring[self.datatype], - 'ArrayIndexingOrder': array_index_order_codes.label[self.ind_ord], - 'Dimensionality': str(self.num_dim), - 'Encoding': gifti_encoding_codes.specs[self.encoding], - 'Endian': gifti_endian_codes.specs[self.endian], - 'ExternalFileName': self.ext_fname, - 'ExternalFileOffset': str(self.ext_offset)}) + data_array = xml.Element( + 'DataArray', + attrib={ + 'Intent': intent_codes.niistring[self.intent], + 'DataType': data_type_codes.niistring[self.datatype], + 'ArrayIndexingOrder': array_index_order_codes.label[self.ind_ord], + 'Dimensionality': str(self.num_dim), + 'Encoding': gifti_encoding_codes.specs[self.encoding], + 'Endian': gifti_endian_codes.specs[self.endian], + 'ExternalFileName': self.ext_fname, + 'ExternalFileOffset': str(self.ext_offset), + }, + ) for di, dn in enumerate(self.dims): data_array.attrib['Dim%d' % di] = str(dn) @@ -497,18 +508,20 @@ def _to_xml_element(self): data_array.append(self.coordsys._to_xml_element()) # write data array depending on the encoding data_array.append( - _data_tag_element(self.data, - gifti_encoding_codes.specs[self.encoding], - data_type_codes.dtype[self.datatype], - self.ind_ord)) + _data_tag_element( + self.data, + gifti_encoding_codes.specs[self.encoding], + data_type_codes.dtype[self.datatype], + self.ind_ord, + ) + ) return data_array def print_summary(self): print('Intent: ', intent_codes.niistring[self.intent]) print('DataType: ', data_type_codes.niistring[self.datatype]) - print('ArrayIndexingOrder: ', - array_index_order_codes.label[self.ind_ord]) + print('ArrayIndexingOrder: ', array_index_order_codes.label[self.ind_ord]) print('Dimensionality: ', self.num_dim) print('Dimensions: ', self.dims) print('Encoding: ', gifti_encoding_codes.specs[self.encoding]) @@ -522,12 +535,12 @@ def print_summary(self): @property def metadata(self): - """ Returns metadata as dictionary """ + """Returns metadata as dictionary""" return dict(self.meta) class GiftiImage(xml.XmlSerializable, SerializableImage): - """ GIFTI image object + """GIFTI image object The Gifti spec suggests using the following suffixes to your filename when saving each specific type of data: @@ -555,6 +568,7 @@ class GiftiImage(xml.XmlSerializable, SerializableImage): The Gifti file is stored in endian convention of the current machine. """ + valid_exts = ('.gii',) files_types = (('image', '.gii'),) _compressed_suffixes = ('.gz', '.bz2') @@ -564,10 +578,17 @@ class GiftiImage(xml.XmlSerializable, SerializableImage): # the class has been defined, at the end of the class definition. parser = None - def __init__(self, header=None, extra=None, file_map=None, meta=None, - labeltable=None, darrays=None, version="1.0"): - super(GiftiImage, self).__init__(header=header, extra=extra, - file_map=file_map) + def __init__( + self, + header=None, + extra=None, + file_map=None, + meta=None, + labeltable=None, + darrays=None, + version='1.0', + ): + super(GiftiImage, self).__init__(header=header, extra=extra, file_map=file_map) if darrays is None: darrays = [] if meta is None: @@ -591,14 +612,14 @@ def labeltable(self): @labeltable.setter def labeltable(self, labeltable): - """ Set the labeltable for this GiftiImage + """Set the labeltable for this GiftiImage Parameters ---------- labeltable : :class:`GiftiLabelTable` instance """ if not isinstance(labeltable, GiftiLabelTable): - raise TypeError("Not a valid GiftiLabelTable instance") + raise TypeError('Not a valid GiftiLabelTable instance') self._labeltable = labeltable @property @@ -607,41 +628,40 @@ def meta(self): @meta.setter def meta(self, meta): - """ Set the metadata for this GiftiImage + """Set the metadata for this GiftiImage Parameters ---------- meta : :class:`GiftiMetaData` instance """ if not isinstance(meta, GiftiMetaData): - raise TypeError("Not a valid GiftiMetaData instance") + raise TypeError('Not a valid GiftiMetaData instance') self._meta = meta def add_gifti_data_array(self, dataarr): - """ Adds a data array to the GiftiImage + """Adds a data array to the GiftiImage Parameters ---------- dataarr : :class:`GiftiDataArray` instance """ if not isinstance(dataarr, GiftiDataArray): - raise TypeError("Not a valid GiftiDataArray instance") + raise TypeError('Not a valid GiftiDataArray instance') self.darrays.append(dataarr) def remove_gifti_data_array(self, ith): - """ Removes the ith data array element from the GiftiImage """ + """Removes the ith data array element from the GiftiImage""" self.darrays.pop(ith) def remove_gifti_data_array_by_intent(self, intent): - """ Removes all the data arrays with the given intent type """ + """Removes all the data arrays with the given intent type""" intent2remove = intent_codes.code[intent] for dele in self.darrays: if dele.intent == intent2remove: self.darrays.remove(dele) def get_arrays_from_intent(self, intent): - """ Return list of GiftiDataArray elements matching given intent - """ + """Return list of GiftiDataArray elements matching given intent""" it = intent_codes.code[intent] return [x for x in self.darrays if x.intent == it] @@ -800,9 +820,9 @@ def print_summary(self): print('----end----') def _to_xml_element(self): - GIFTI = xml.Element('GIFTI', attrib={ - 'Version': self.version, - 'NumberOfDataArrays': str(self.numDA)}) + GIFTI = xml.Element( + 'GIFTI', attrib={'Version': self.version, 'NumberOfDataArrays': str(self.numDA)} + ) if self.meta is not None: GIFTI.append(self.meta._to_xml_element()) if self.labeltable is not None: @@ -812,16 +832,18 @@ def _to_xml_element(self): return GIFTI def to_xml(self, enc='utf-8'): - """ Return XML corresponding to image content """ + """Return XML corresponding to image content""" return b""" -""" + xml.XmlSerializable.to_xml(self, enc) +""" + xml.XmlSerializable.to_xml( + self, enc + ) # Avoid the indirection of going through to_file_map to_bytes = to_xml def to_file_map(self, file_map=None): - """ Save the current image to the specified file_map + """Save the current image to the specified file_map Parameters ---------- @@ -880,4 +902,5 @@ def from_filename(klass, filename, buffer_size=35000000, mmap=True): # Now GiftiImage is defined, we can import the parser module and set the parser from .parse_gifti_fast import GiftiImageParser + GiftiImage.parser = GiftiImageParser diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index ed55fd97ea..5de4c2e22c 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -17,17 +17,21 @@ import numpy as np -from .gifti import (GiftiMetaData, GiftiImage, GiftiLabel, - GiftiLabelTable, GiftiDataArray, - GiftiCoordSystem) -from .util import (array_index_order_codes, gifti_encoding_codes, - gifti_endian_codes) +from .gifti import ( + GiftiMetaData, + GiftiImage, + GiftiLabel, + GiftiLabelTable, + GiftiDataArray, + GiftiCoordSystem, +) +from .util import array_index_order_codes, gifti_encoding_codes, gifti_endian_codes from ..nifti1 import data_type_codes, xform_codes, intent_codes from ..xmlutils import XmlParser class GiftiParseError(ExpatError): - """ Gifti-specific parsing error """ + """Gifti-specific parsing error""" def read_data_block(darray, fname, data, mmap): @@ -60,8 +64,7 @@ def read_data_block(darray, fname, data, mmap): ``numpy.ndarray`` or ``numpy.memmap`` containing the parsed data """ if mmap not in (True, False, 'c', 'r', 'r+'): - raise ValueError("mmap value should be one of True, False, 'c', " - "'r', 'r+'") + raise ValueError("mmap value should be one of True, False, 'c', " "'r', 'r+'") if mmap is True: mmap = 'c' enclabel = gifti_encoding_codes.label[darray.encoding] @@ -81,8 +84,9 @@ def read_data_block(darray, fname, data, mmap): # attributes if enclabel == 'External': if fname is None: - raise GiftiParseError('ExternalFileBinary is not supported ' - 'when loading from in-memory XML') + raise GiftiParseError( + 'ExternalFileBinary is not supported ' 'when loading from in-memory XML' + ) ext_fname = op.join(op.dirname(fname), darray.ext_fname) if not op.exists(ext_fname): raise GiftiParseError('Cannot locate external file ' + ext_fname) @@ -90,11 +94,13 @@ def read_data_block(darray, fname, data, mmap): newarr = None if mmap: try: - newarr = np.memmap(ext_fname, - dtype=dtype, - mode=mmap, - offset=darray.ext_offset, - shape=tuple(darray.dims)) + newarr = np.memmap( + ext_fname, + dtype=dtype, + mode=mmap, + offset=darray.ext_offset, + shape=tuple(darray.dims), + ) # If the memmap fails, we ignore the error and load the data into # memory below except (AttributeError, TypeError, ValueError): @@ -128,13 +134,11 @@ def read_data_block(darray, fname, data, mmap): sh = tuple(darray.dims) if len(newarr.shape) != len(sh): - newarr = newarr.reshape( - sh, order=array_index_order_codes.npcode[darray.ind_ord]) + newarr = newarr.reshape(sh, order=array_index_order_codes.npcode[darray.ind_ord]) # check if we need to byteswap required_byteorder = gifti_endian_codes.byteorder[darray.endian] - if (required_byteorder in ('big', 'little') and - required_byteorder != sys.byteorder): + if required_byteorder in ('big', 'little') and required_byteorder != sys.byteorder: newarr = newarr.byteswap() return newarr @@ -145,12 +149,10 @@ def _str2int(in_str): class GiftiImageParser(XmlParser): - - def __init__(self, encoding=None, buffer_size=35000000, verbose=0, - mmap=True): - super(GiftiImageParser, self).__init__(encoding=encoding, - buffer_size=buffer_size, - verbose=verbose) + def __init__(self, encoding=None, buffer_size=35000000, verbose=0, mmap=True): + super(GiftiImageParser, self).__init__( + encoding=encoding, buffer_size=buffer_size, verbose=verbose + ) # output self.img = None @@ -220,45 +222,44 @@ def StartElementHandler(self, name, attrs): elif name == 'Label': self.label = GiftiLabel() - if "Index" in attrs: - self.label.key = int(attrs["Index"]) - if "Key" in attrs: - self.label.key = int(attrs["Key"]) - if "Red" in attrs: - self.label.red = float(attrs["Red"]) - if "Green" in attrs: - self.label.green = float(attrs["Green"]) - if "Blue" in attrs: - self.label.blue = float(attrs["Blue"]) - if "Alpha" in attrs: - self.label.alpha = float(attrs["Alpha"]) + if 'Index' in attrs: + self.label.key = int(attrs['Index']) + if 'Key' in attrs: + self.label.key = int(attrs['Key']) + if 'Red' in attrs: + self.label.red = float(attrs['Red']) + if 'Green' in attrs: + self.label.green = float(attrs['Green']) + if 'Blue' in attrs: + self.label.blue = float(attrs['Blue']) + if 'Alpha' in attrs: + self.label.alpha = float(attrs['Alpha']) self.write_to = 'Label' elif name == 'DataArray': self.da = GiftiDataArray() - if "Intent" in attrs: - self.da.intent = intent_codes.code[attrs["Intent"]] - if "DataType" in attrs: - self.da.datatype = data_type_codes.code[attrs["DataType"]] - if "ArrayIndexingOrder" in attrs: - self.da.ind_ord = array_index_order_codes.code[ - attrs["ArrayIndexingOrder"]] - num_dim = int(attrs.get("Dimensionality", 0)) + if 'Intent' in attrs: + self.da.intent = intent_codes.code[attrs['Intent']] + if 'DataType' in attrs: + self.da.datatype = data_type_codes.code[attrs['DataType']] + if 'ArrayIndexingOrder' in attrs: + self.da.ind_ord = array_index_order_codes.code[attrs['ArrayIndexingOrder']] + num_dim = int(attrs.get('Dimensionality', 0)) for i in range(num_dim): - di = f"Dim{i}" + di = f'Dim{i}' if di in attrs: self.da.dims.append(int(attrs[di])) # dimensionality has to correspond to the number of DimX given # TODO (bcipolli): don't assert; raise parse warning, and recover. assert len(self.da.dims) == num_dim - if "Encoding" in attrs: - self.da.encoding = gifti_encoding_codes.code[attrs["Encoding"]] - if "Endian" in attrs: - self.da.endian = gifti_endian_codes.code[attrs["Endian"]] - if "ExternalFileName" in attrs: - self.da.ext_fname = attrs["ExternalFileName"] - if "ExternalFileOffset" in attrs: - self.da.ext_offset = _str2int(attrs["ExternalFileOffset"]) + if 'Encoding' in attrs: + self.da.encoding = gifti_encoding_codes.code[attrs['Encoding']] + if 'Endian' in attrs: + self.da.endian = gifti_endian_codes.code[attrs['Endian']] + if 'ExternalFileName' in attrs: + self.da.ext_fname = attrs['ExternalFileName'] + if 'ExternalFileOffset' in attrs: + self.da.ext_offset = _str2int(attrs['ExternalFileOffset']) self.img.darrays.append(self.da) self.fsm_state.append('DataArray') @@ -292,9 +293,10 @@ def EndElementHandler(self, name): if name == 'GIFTI': if hasattr(self, 'expected_numDA') and self.expected_numDA != self.img.numDA: - warnings.warn("Actual # of data arrays does not match " - "# expected: %d != %d." % (self.expected_numDA, - self.img.numDA)) + warnings.warn( + 'Actual # of data arrays does not match ' + '# expected: %d != %d.' % (self.expected_numDA, self.img.numDA) + ) # remove last element of the list self.fsm_state.pop() # assert len(self.fsm_state) == 0 @@ -333,8 +335,7 @@ def EndElementHandler(self, name): self.fsm_state.pop() self.coordsys = None - elif name in ['DataSpace', 'TransformedSpace', 'MatrixData', - 'Name', 'Value', 'Data']: + elif name in ['DataSpace', 'TransformedSpace', 'MatrixData', 'Name', 'Value', 'Data']: self.write_to = None elif name == 'Label': @@ -343,7 +344,7 @@ def EndElementHandler(self, name): self.write_to = None def CharacterDataHandler(self, data): - """ Collect character data chunks pending collation + """Collect character data chunks pending collation The parser breaks the data up into chunks of size depending on the buffer_size of the parser. A large bit of character data, with @@ -356,7 +357,7 @@ def CharacterDataHandler(self, data): self._char_blocks.append(data) def flush_chardata(self): - """ Collate and process collected character data""" + """Collate and process collected character data""" # Nothing to do for empty elements, except for Data elements which # are within a DataArray with an external file if self.write_to != 'Data' and self._char_blocks is None: @@ -395,8 +396,7 @@ def flush_chardata(self): c.close() elif self.write_to == 'Data': - self.da.data = read_data_block(self.da, self.fname, data, - self.mmap) + self.da.data = read_data_block(self.da, self.fname, data, self.mmap) # update the endianness according to the # current machine setting self.endian = gifti_endian_codes.code[sys.byteorder] diff --git a/nibabel/gifti/tests/test_1.py b/nibabel/gifti/tests/test_1.py index a464ee49ef..0e19e59c43 100644 --- a/nibabel/gifti/tests/test_1.py +++ b/nibabel/gifti/tests/test_1.py @@ -1,4 +1,4 @@ -""" Testing loading of gifti file +"""Testing loading of gifti file The file is ``test_1`` because we are testing a bug where, if we try to load a file before instantiating some Gifti objects, loading fails with an diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 1fa4eb8917..73ae9ed95d 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -1,4 +1,4 @@ -""" Testing gifti objects +"""Testing gifti objects """ import warnings import sys @@ -9,17 +9,29 @@ from nibabel.tmpdirs import InTemporaryDirectory from ... import load -from .. import (GiftiImage, GiftiDataArray, GiftiLabel, - GiftiLabelTable, GiftiMetaData, GiftiNVPairs, - GiftiCoordSystem) +from .. import ( + GiftiImage, + GiftiDataArray, + GiftiLabel, + GiftiLabelTable, + GiftiMetaData, + GiftiNVPairs, + GiftiCoordSystem, +) from ...nifti1 import data_type_codes from ...fileholders import FileHolder from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest from ...testing import test_data -from .test_parse_gifti_fast import (DATA_FILE1, DATA_FILE2, DATA_FILE3, - DATA_FILE4, DATA_FILE5, DATA_FILE6) +from .test_parse_gifti_fast import ( + DATA_FILE1, + DATA_FILE2, + DATA_FILE3, + DATA_FILE4, + DATA_FILE5, + DATA_FILE6, +) import itertools @@ -51,6 +63,7 @@ def test_agg_data(): assert surf_gii_img.agg_data(('pointset', 'triangle')) == (point_data, triangle_data) assert surf_gii_img.agg_data(('triangle', 'pointset')) == (triangle_data, point_data) + def test_gifti_image(): # Check that we're not modifying the default empty list in the default # arguments. @@ -104,11 +117,13 @@ def test_gifti_image_bad_inputs(): # Try to set to non-table def assign_labeltable(val): img.labeltable = val + pytest.raises(TypeError, assign_labeltable, 'not-a-table') # Try to set to non-table def assign_metadata(val): img.meta = val + pytest.raises(TypeError, assign_metadata, 'not-a-meta') @@ -172,7 +187,7 @@ def test_dataarray_init(): assert gda(ordering='ColumnMajorOrder').ind_ord == 2 pytest.raises(KeyError, gda, ordering='not an ordering') # metadata - meta_dict=dict(one=1, two=2) + meta_dict = dict(one=1, two=2) assert gda(meta=GiftiMetaData(meta_dict)).meta == meta_dict assert gda(meta=meta_dict).meta == meta_dict assert gda(meta=None).meta == {} @@ -307,6 +322,7 @@ def test_gifti_label_rgba(): def assign_rgba(gl, val): gl.rgba = val + gl3 = GiftiLabel(**kwargs) pytest.raises(ValueError, assign_rgba, gl3, rgba[:2]) pytest.raises(ValueError, assign_rgba, gl3, rgba.tolist() + rgba.tolist()) @@ -318,14 +334,14 @@ def assign_rgba(gl, val): def test_print_summary(): - for fil in [DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, - DATA_FILE5, DATA_FILE6]: + for fil in [DATA_FILE1, DATA_FILE2, DATA_FILE3, DATA_FILE4, DATA_FILE5, DATA_FILE6]: gimg = load(fil) gimg.print_summary() def test_gifti_coord(): from ..gifti import GiftiCoordSystem + gcs = GiftiCoordSystem() assert gcs.xform is not None @@ -339,7 +355,7 @@ def test_gifti_round_trip(): # From section 14.4 in GIFTI Surface Data Format Version 1.0 # (with some adaptations) - test_data = b''' + test_data = b""" -''' +""" exp_verts = np.zeros((4, 3)) exp_verts[0, 0] = 10.5 exp_verts[1, 1] = 20.5 exp_verts[2, 2] = 30.5 - exp_faces = np.asarray([[0, 1, 2], [1, 2, 3], [0, 1, 3], [0, 2, 3]], - dtype=np.int32) + exp_faces = np.asarray([[0, 1, 2], [1, 2, 3], [0, 1, 3], [0, 2, 3]], dtype=np.int32) def _check_gifti(gio): vertices = gio.get_arrays_from_intent('NIFTI_INTENT_POINTSET')[0].data @@ -449,13 +464,13 @@ def test_data_array_round_trip(): def test_darray_dtype_coercion_failures(): dtypes = (np.uint8, np.int32, np.int64, np.float32, np.float64) encodings = ('ASCII', 'B64BIN', 'B64GZ') - for data_dtype, darray_dtype, encoding in itertools.product(dtypes, - dtypes, - encodings): - da = GiftiDataArray(np.arange(10).astype(data_dtype), - encoding=encoding, - intent='NIFTI_INTENT_NODE_INDEX', - datatype=darray_dtype) + for data_dtype, darray_dtype, encoding in itertools.product(dtypes, dtypes, encodings): + da = GiftiDataArray( + np.arange(10).astype(data_dtype), + encoding=encoding, + intent='NIFTI_INTENT_NODE_INDEX', + datatype=darray_dtype, + ) gii = GiftiImage(darrays=[da]) gii_copy = GiftiImage.from_bytes(gii.to_bytes()) da_copy = gii_copy.darrays[0] diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index d376611581..d1f61d3c22 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -46,80 +46,113 @@ numDA = [2, 1, 1, 1, 2, 1, 2] DATA_FILE1_darr1 = np.array( - [[-16.07201, -66.187515, 21.266994], - [-16.705893, -66.054337, 21.232786], - [-17.614349, -65.401642, 21.071466]]) + [ + [-16.07201, -66.187515, 21.266994], + [-16.705893, -66.054337, 21.232786], + [-17.614349, -65.401642, 21.071466], + ] +) DATA_FILE1_darr2 = np.array([0, 1, 2]) -DATA_FILE2_darr1 = np.array([[0.43635699], - [0.270017], - [0.133239], - [0.35054299], - [0.26538199], - [0.32122701], - [0.23495001], - [0.26671499], - [0.306851], - [0.36302799]], dtype=np.float32) +DATA_FILE2_darr1 = np.array( + [ + [0.43635699], + [0.270017], + [0.133239], + [0.35054299], + [0.26538199], + [0.32122701], + [0.23495001], + [0.26671499], + [0.306851], + [0.36302799], + ], + dtype=np.float32, +) DATA_FILE3_darr1 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]) -DATA_FILE4_darr1 = np.array([[-0.57811606], - [-0.53871965], - [-0.44602534], - [-0.56532663], - [-0.51392376], - [-0.43225467], - [-0.54646534], - [-0.48011276], - [-0.45624232], - [-0.31101292]], dtype=np.float32) - -DATA_FILE5_darr1 = np.array([[155.17539978, 135.58103943, 98.30715179], - [140.33973694, 190.0491333, 73.24776459], - [157.3598938, 196.97969055, 83.65809631], - [171.46174622, 137.43661499, 78.4709549], - [148.54592896, 97.06752777, 65.96373749], - [123.45701599, 111.46841431, 66.3571167], - [135.30892944, 202.28720093, 36.38148499], - [178.28155518, 162.59469604, 37.75128937], - [178.11087036, 115.28820038, 57.17986679], - [142.81582642, 82.82115173, 31.02205276]], dtype=np.float32) - -DATA_FILE5_darr2 = np.array([[6402, 17923, 25602], - [14085, 25602, 17923], - [25602, 14085, 4483], - [17923, 1602, 14085], - [4483, 25603, 25602], - [25604, 25602, 25603], - [25602, 25604, 6402], - [25603, 3525, 25604], - [1123, 17922, 12168], - [25604, 12168, 17922]], dtype=np.int32) +DATA_FILE4_darr1 = np.array( + [ + [-0.57811606], + [-0.53871965], + [-0.44602534], + [-0.56532663], + [-0.51392376], + [-0.43225467], + [-0.54646534], + [-0.48011276], + [-0.45624232], + [-0.31101292], + ], + dtype=np.float32, +) + +DATA_FILE5_darr1 = np.array( + [ + [155.17539978, 135.58103943, 98.30715179], + [140.33973694, 190.0491333, 73.24776459], + [157.3598938, 196.97969055, 83.65809631], + [171.46174622, 137.43661499, 78.4709549], + [148.54592896, 97.06752777, 65.96373749], + [123.45701599, 111.46841431, 66.3571167], + [135.30892944, 202.28720093, 36.38148499], + [178.28155518, 162.59469604, 37.75128937], + [178.11087036, 115.28820038, 57.17986679], + [142.81582642, 82.82115173, 31.02205276], + ], + dtype=np.float32, +) + +DATA_FILE5_darr2 = np.array( + [ + [6402, 17923, 25602], + [14085, 25602, 17923], + [25602, 14085, 4483], + [17923, 1602, 14085], + [4483, 25603, 25602], + [25604, 25602, 25603], + [25602, 25604, 6402], + [25603, 3525, 25604], + [1123, 17922, 12168], + [25604, 12168, 17922], + ], + dtype=np.int32, +) DATA_FILE6_darr1 = np.array([9182740, 9182740, 9182740], dtype=np.float32) -DATA_FILE7_darr1 = np.array([[-1., -1., -1.], - [-1., -1., 1.], - [-1., 1., -1.], - [-1., 1., 1.], - [ 1., -1., -1.], - [ 1., -1., 1.], - [ 1., 1., -1.], - [ 1., 1., 1.]], dtype=np.float32) - -DATA_FILE7_darr2 = np.array([[0, 6, 4], - [0, 2, 6], - [1, 5, 3], - [3, 5, 7], - [0, 4, 1], - [1, 4, 5], - [2, 7, 6], - [2, 3, 7], - [0, 1, 2], - [1, 3, 2], - [4, 7, 5], - [4, 6, 7]], dtype=np.int32) +DATA_FILE7_darr1 = np.array( + [ + [-1.0, -1.0, -1.0], + [-1.0, -1.0, 1.0], + [-1.0, 1.0, -1.0], + [-1.0, 1.0, 1.0], + [1.0, -1.0, -1.0], + [1.0, -1.0, 1.0], + [1.0, 1.0, -1.0], + [1.0, 1.0, 1.0], + ], + dtype=np.float32, +) + +DATA_FILE7_darr2 = np.array( + [ + [0, 6, 4], + [0, 2, 6], + [1, 5, 3], + [3, 5, 7], + [0, 4, 1], + [1, 4, 5], + [2, 7, 6], + [2, 3, 7], + [0, 1, 2], + [1, 3, 2], + [4, 7, 5], + [4, 6, 7], + ], + dtype=np.int32, +) def assert_default_types(loaded): @@ -132,8 +165,9 @@ def assert_default_types(loaded): continue with suppress_warnings(): loadedtype = type(getattr(loaded, attr)) - assert loadedtype == defaulttype, ( - f"Type mismatch for attribute: {attr} ({loadedtype} != {defaulttype})") + assert ( + loadedtype == defaulttype + ), f'Type mismatch for attribute: {attr} ({loadedtype} != {defaulttype})' def test_default_types(): @@ -197,10 +231,8 @@ def test_load_dataarray1(): assert 'AnatomicalStructureSecondary' in me me['AnatomicalStructurePrimary'] == 'CortexLeft' assert_array_almost_equal(img.darrays[0].coordsys.xform, np.eye(4, 4)) - assert xform_codes.niistring[ - img.darrays[0].coordsys.dataspace] == 'NIFTI_XFORM_TALAIRACH' - assert xform_codes.niistring[img.darrays[ - 0].coordsys.xformspace] == 'NIFTI_XFORM_TALAIRACH' + assert xform_codes.niistring[img.darrays[0].coordsys.dataspace] == 'NIFTI_XFORM_TALAIRACH' + assert xform_codes.niistring[img.darrays[0].coordsys.xformspace] == 'NIFTI_XFORM_TALAIRACH' def test_load_dataarray2(): @@ -276,8 +308,8 @@ def test_readwritedata(): save(img, 'test.gii') img2 = load('test.gii') assert img.numDA == img2.numDA - assert_array_almost_equal(img.darrays[0].data, - img2.darrays[0].data) + assert_array_almost_equal(img.darrays[0].data, img2.darrays[0].data) + def test_modify_darray(): for fname in (DATA_FILE1, DATA_FILE2, DATA_FILE5): @@ -302,13 +334,13 @@ def test_write_newmetadata(): def test_load_getbyintent(): img = load(DATA_FILE1) - da = img.get_arrays_from_intent("NIFTI_INTENT_POINTSET") + da = img.get_arrays_from_intent('NIFTI_INTENT_POINTSET') assert len(da) == 1 - da = img.get_arrays_from_intent("NIFTI_INTENT_TRIANGLE") + da = img.get_arrays_from_intent('NIFTI_INTENT_TRIANGLE') assert len(da) == 1 - da = img.get_arrays_from_intent("NIFTI_INTENT_CORREL") + da = img.get_arrays_from_intent('NIFTI_INTENT_CORREL') assert len(da) == 0 assert da == [] diff --git a/nibabel/gifti/util.py b/nibabel/gifti/util.py index 4071c97312..7659ee33cc 100644 --- a/nibabel/gifti/util.py +++ b/nibabel/gifti/util.py @@ -10,27 +10,28 @@ from ..volumeutils import Recoder # Translate dtype.kind char codes to XML text output strings -KIND2FMT = { - 'i': '%i', - 'u': '%i', - 'f': '%10.6f', - 'c': '%10.6f', - 'V': ''} +KIND2FMT = {'i': '%i', 'u': '%i', 'f': '%10.6f', 'c': '%10.6f', 'V': ''} -array_index_order_codes = Recoder(((1, "RowMajorOrder", 'C'), - (2, "ColumnMajorOrder", 'F')), - fields=('code', 'label', 'npcode')) +array_index_order_codes = Recoder( + ((1, 'RowMajorOrder', 'C'), (2, 'ColumnMajorOrder', 'F')), fields=('code', 'label', 'npcode') +) gifti_encoding_codes = Recoder( - ((0, "undef", "GIFTI_ENCODING_UNDEF", "undef"), - (1, "ASCII", "GIFTI_ENCODING_ASCII", "ASCII"), - (2, "B64BIN", "GIFTI_ENCODING_B64BIN", "Base64Binary"), - (3, "B64GZ", "GIFTI_ENCODING_B64GZ", "GZipBase64Binary"), - (4, "External", "GIFTI_ENCODING_EXTBIN", "ExternalFileBinary")), - fields=('code', 'label', 'giistring', 'specs')) + ( + (0, 'undef', 'GIFTI_ENCODING_UNDEF', 'undef'), + (1, 'ASCII', 'GIFTI_ENCODING_ASCII', 'ASCII'), + (2, 'B64BIN', 'GIFTI_ENCODING_B64BIN', 'Base64Binary'), + (3, 'B64GZ', 'GIFTI_ENCODING_B64GZ', 'GZipBase64Binary'), + (4, 'External', 'GIFTI_ENCODING_EXTBIN', 'ExternalFileBinary'), + ), + fields=('code', 'label', 'giistring', 'specs'), +) gifti_endian_codes = Recoder( - ((0, "GIFTI_ENDIAN_UNDEF", "Undef", "undef"), - (1, "GIFTI_ENDIAN_BIG", "BigEndian", "big"), - (2, "GIFTI_ENDIAN_LITTLE", "LittleEndian", "little")), - fields=('code', 'giistring', 'specs', 'byteorder')) + ( + (0, 'GIFTI_ENDIAN_UNDEF', 'Undef', 'undef'), + (1, 'GIFTI_ENDIAN_BIG', 'BigEndian', 'big'), + (2, 'GIFTI_ENDIAN_LITTLE', 'LittleEndian', 'little'), + ), + fields=('code', 'giistring', 'specs', 'byteorder'), +) diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index 6b26ac0c05..614692daac 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Define supported image classes and names """ +"""Define supported image classes and names""" from .analyze import AnalyzeImage from .brikhead import AFNIImage @@ -22,22 +22,42 @@ from .spm2analyze import Spm2AnalyzeImage # Ordered by the load/save priority. -all_image_classes = [Nifti1Pair, Nifti1Image, Nifti2Pair, - Cifti2Image, Nifti2Image, # Cifti2 before Nifti2 - Spm2AnalyzeImage, Spm99AnalyzeImage, AnalyzeImage, - Minc1Image, Minc2Image, MGHImage, - PARRECImage, GiftiImage, AFNIImage] +all_image_classes = [ + Nifti1Pair, + Nifti1Image, + Nifti2Pair, + Cifti2Image, + Nifti2Image, # Cifti2 before Nifti2 + Spm2AnalyzeImage, + Spm99AnalyzeImage, + AnalyzeImage, + Minc1Image, + Minc2Image, + MGHImage, + PARRECImage, + GiftiImage, + AFNIImage, +] # Image classes known to require spatial axes to be first in index ordering. # When adding an image class, consider whether the new class should be listed # here. -KNOWN_SPATIAL_FIRST = (Nifti1Pair, Nifti1Image, Nifti2Pair, Nifti2Image, - Spm2AnalyzeImage, Spm99AnalyzeImage, AnalyzeImage, - MGHImage, PARRECImage, AFNIImage) +KNOWN_SPATIAL_FIRST = ( + Nifti1Pair, + Nifti1Image, + Nifti2Pair, + Nifti2Image, + Spm2AnalyzeImage, + Spm99AnalyzeImage, + AnalyzeImage, + MGHImage, + PARRECImage, + AFNIImage, +) def spatial_axes_first(img): - """ True if spatial image axes for `img` always precede other axes + """True if spatial image axes for `img` always precede other axes Parameters ---------- diff --git a/nibabel/imageglobals.py b/nibabel/imageglobals.py index 4cdeb7b1a3..81a1742809 100644 --- a/nibabel/imageglobals.py +++ b/nibabel/imageglobals.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Defaults for images and headers +"""Defaults for images and headers error_level is the problem level (see BatteryRunners) at which an error will be raised, by the batteryrunners ``log_raise`` method. Thus a level of 0 will @@ -32,8 +32,7 @@ class ErrorLevel: - """ Context manager to set log error level - """ + """Context manager to set log error level""" def __init__(self, level): self.level = level diff --git a/nibabel/imagestats.py b/nibabel/imagestats.py index 4520d7f612..f507365e93 100644 --- a/nibabel/imagestats.py +++ b/nibabel/imagestats.py @@ -33,7 +33,7 @@ def count_nonzero_voxels(img): def mask_volume(img): - """ Compute volume of mask image. + """Compute volume of mask image. Equivalent to "fslstats /path/file.nii -V" @@ -58,7 +58,7 @@ def mask_volume(img): 1000.0 """ if not spatial_axes_first(img): - raise ValueError("Cannot calculate voxel volume for image with unknown spatial axes") + raise ValueError('Cannot calculate voxel volume for image with unknown spatial axes') voxel_volume_mm3 = np.prod(img.header.get_zooms()[:3]) mask_volume_vx = count_nonzero_voxels(img) mask_volume_mm3 = mask_volume_vx * voxel_volume_mm3 diff --git a/nibabel/info.py b/nibabel/info.py index 38690246c3..bdd291728a 100644 --- a/nibabel/info.py +++ b/nibabel/info.py @@ -1,4 +1,4 @@ -""" Define distribution parameters for nibabel, including package version +"""Define distribution parameters for nibabel, including package version The long description parameter is used to fill settings in setup.py, the nibabel top-level docstring, and in building the docs. @@ -16,7 +16,7 @@ _version_extra = '.dev0' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" -VERSION = f"{_version_major}.{_version_minor}.{_version_micro}{_version_extra}" +VERSION = f'{_version_major}.{_version_minor}.{_version_micro}{_version_extra}' # Note: this long_description is the canonical place to edit this text. diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index 763bf20788..187644a8e1 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -7,7 +7,7 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # module imports -""" Utilities to load and save image objects """ +"""Utilities to load and save image objects""" import os import numpy as np @@ -43,29 +43,29 @@ def _signature_matches_extension(filename): """ signatures = { - ".gz": {"signature": b"\x1f\x8b", "format_name": "gzip"}, - ".bz2": {"signature": b"BZh", "format_name": "bzip2"}, - ".zst": {"signature": b"\x28\xb5\x2f\xfd", "format_name": "ztsd"}, + '.gz': {'signature': b'\x1f\x8b', 'format_name': 'gzip'}, + '.bz2': {'signature': b'BZh', 'format_name': 'bzip2'}, + '.zst': {'signature': b'\x28\xb5\x2f\xfd', 'format_name': 'ztsd'}, } filename = _stringify_path(filename) *_, ext = splitext_addext(filename) ext = ext.lower() if ext not in signatures: - return True, "" - expected_signature = signatures[ext]["signature"] + return True, '' + expected_signature = signatures[ext]['signature'] try: - with open(filename, "rb") as fh: + with open(filename, 'rb') as fh: sniff = fh.read(len(expected_signature)) except OSError: - return False, f"Could not read file: {filename}" + return False, f'Could not read file: {filename}' if sniff.startswith(expected_signature): - return True, "" - format_name = signatures[ext]["format_name"] - return False, f"File {filename} is not a {format_name} file" + return True, '' + format_name = signatures[ext]['format_name'] + return False, f'File {filename} is not a {format_name} file' def load(filename, **kwargs): - r""" Load file given filename, guessing at file type + r"""Load file given filename, guessing at file type Parameters ---------- @@ -105,7 +105,7 @@ def load(filename, **kwargs): @deprecate_with_version('guessed_image_type deprecated.', '3.2', '5.0') def guessed_image_type(filename): - """ Guess image type from file `filename` + """Guess image type from file `filename` Parameters ---------- @@ -127,7 +127,7 @@ def guessed_image_type(filename): def save(img, filename, **kwargs): - r""" Save an image to file adapting format to `filename` + r"""Save an image to file adapting format to `filename` Parameters ---------- @@ -173,8 +173,7 @@ def save(img, filename, **kwargs): elif type(img) == Nifti2Pair and lext == '.nii': klass = Nifti2Image else: # arbitrary conversion - valid_klasses = [klass for klass in all_image_classes - if ext in klass.valid_exts] + valid_klasses = [klass for klass in all_image_classes if ext in klass.valid_exts] if not valid_klasses: # if list is empty raise ImageFileError(f'Cannot work out file type of "{filename}"') @@ -197,12 +196,11 @@ def save(img, filename, **kwargs): converted.to_filename(filename, **kwargs) -@deprecate_with_version('read_img_data deprecated. ' - 'Please use ``img.dataobj.get_unscaled()`` instead.', - '3.2', - '5.0') +@deprecate_with_version( + 'read_img_data deprecated. ' 'Please use ``img.dataobj.get_unscaled()`` instead.', '3.2', '5.0' +) def read_img_data(img, prefer='scaled'): - """ Read data from image associated with files + """Read data from image associated with files If you want unscaled data, please use ``img.dataobj.get_unscaled()`` instead. If you want scaled data, use ``img.get_fdata()`` (which will cache @@ -257,12 +255,11 @@ def read_img_data(img, prefer='scaled'): if not hasattr(hdr, 'raw_data_from_fileobj'): # We can only do scaled if prefer == 'unscaled': - raise ValueError("Can only do unscaled for Analyze types") + raise ValueError('Can only do unscaled for Analyze types') return np.array(img.dataobj) # Analyze types img_fh = img.file_map['image'] - img_file_like = (img_fh.filename if img_fh.fileobj is None - else img_fh.fileobj) + img_file_like = img_fh.filename if img_fh.fileobj is None else img_fh.fileobj if img_file_like is None: raise ImageFileError('No image file specified for this image') # Check the consumable values in the header diff --git a/nibabel/minc1.py b/nibabel/minc1.py index c0ae95bd7b..56b8747fb4 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read MINC1 format images """ +"""Read MINC1 format images""" from numbers import Integral @@ -29,18 +29,15 @@ # See # https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#MINC_specific_convenience_functions -_default_dir_cos = { - 'xspace': [1, 0, 0], - 'yspace': [0, 1, 0], - 'zspace': [0, 0, 1]} +_default_dir_cos = {'xspace': [1, 0, 0], 'yspace': [0, 1, 0], 'zspace': [0, 0, 1]} class MincError(Exception): - """ Error when reading MINC files """ + """Error when reading MINC files""" class Minc1File: - """ Class to wrap MINC1 format opened netcdf object + """Class to wrap MINC1 format opened netcdf object Although it has some of the same methods as a ``Header``, we use this only when reading a MINC file, to pull out useful header @@ -54,15 +51,13 @@ def __init__(self, mincfile): # The code below will error with vector_dimensions. See: # https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#An_Introduction_to_NetCDF # https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#Image_dimensions - self._dims = [self._mincfile.variables[s] - for s in self._dim_names] + self._dims = [self._mincfile.variables[s] for s in self._dim_names] # We don't currently support irregular spacing # https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#MINC_specific_convenience_functions for dim in self._dims: if dim.spacing != b'regular__': raise ValueError('Irregular spacing not supported') - self._spatial_dims = [name for name in self._dim_names - if name.endswith('space')] + self._spatial_dims = [name for name in self._dim_names if name.endswith('space')] # the MINC standard appears to allow the following variables to # be undefined. # https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#Image_conversion_variables @@ -91,10 +86,9 @@ def get_data_shape(self): return self._image.data.shape def get_zooms(self): - """ Get real-world sizes of voxels """ + """Get real-world sizes of voxels""" # zooms must be positive; but steps in MINC can be negative - return tuple([abs(float(dim.step)) if hasattr(dim, 'step') else 1.0 - for dim in self._dims]) + return tuple([abs(float(dim.step)) if hasattr(dim, 'step') else 1.0 for dim in self._dims]) def get_affine(self): nspatial = len(self._spatial_dims) @@ -104,9 +98,11 @@ def get_affine(self): dim_names = list(self._dim_names) # for indexing in loop for i, name in enumerate(self._spatial_dims): dim = self._dims[dim_names.index(name)] - rot_mat[:, i] = (dim.direction_cosines - if hasattr(dim, 'direction_cosines') - else _default_dir_cos[name]) + rot_mat[:, i] = ( + dim.direction_cosines + if hasattr(dim, 'direction_cosines') + else _default_dir_cos[name] + ) steps[i] = dim.step if hasattr(dim, 'step') else 1.0 starts[i] = dim.start if hasattr(dim, 'start') else 0.0 origin = np.dot(rot_mat, starts) @@ -116,7 +112,7 @@ def get_affine(self): return aff def _get_valid_range(self): - """ Return valid range for image data + """Return valid range for image data The valid range can come from the image 'valid_range' or image 'valid_min' and 'valid_max', or, failing that, from the @@ -128,25 +124,23 @@ def _get_valid_range(self): valid_range = self._image.valid_range except AttributeError: try: - valid_range = [self._image.valid_min, - self._image.valid_max] + valid_range = [self._image.valid_min, self._image.valid_max] except AttributeError: valid_range = [info.min, info.max] if valid_range[0] < info.min or valid_range[1] > info.max: - raise ValueError('Valid range outside input ' - 'data type range') + raise ValueError('Valid range outside input ' 'data type range') return np.asarray(valid_range, dtype=np.float64) def _get_scalar(self, var): - """ Get scalar value from NetCDF scalar """ + """Get scalar value from NetCDF scalar""" return var.getValue() def _get_array(self, var): - """ Get array from NetCDF array """ + """Get array from NetCDF array""" return var.data def _normalize(self, data, sliceobj=()): - """ Apply scaling to image data `data` already sliced with `sliceobj` + """Apply scaling to image data `data` already sliced with `sliceobj` https://en.wikibooks.org/wiki/MINC/Reference/MINC1-programmers-guide#Pixel_values_and_real_values @@ -177,8 +171,7 @@ def _normalize(self, data, sliceobj=()): mx_dims = self._get_dimensions(image_max) mn_dims = self._get_dimensions(image_min) if mx_dims != mn_dims: - raise MincError('"image-max" and "image-min" do not have the same' - 'dimensions') + raise MincError('"image-max" and "image-min" do not have the same' 'dimensions') nscales = len(mx_dims) if nscales > 2: raise MincError('More than two scaling dimensions') @@ -202,19 +195,20 @@ def _normalize(self, data, sliceobj=()): i_slicer = sliceobj[:nscales_ax] # Fill slicer to broadcast against sliced data; add length 1 axis # for each axis except int axes (which are dropped by slicing) - broad_part = tuple(None for s in sliceobj[ax_inds[nscales]:] - if not isinstance(s, Integral)) + broad_part = tuple( + None for s in sliceobj[ax_inds[nscales] :] if not isinstance(s, Integral) + ) i_slicer += broad_part imax = self._get_array(image_max)[i_slicer] imin = self._get_array(image_min)[i_slicer] slope = (imax - imin) / (dmax - dmin) - inter = (imin - dmin * slope) + inter = imin - dmin * slope out_data *= slope out_data += inter return out_data def get_scaled_data(self, sliceobj=()): - """ Return scaled data for slice definition `sliceobj` + """Return scaled data for slice definition `sliceobj` Parameters ---------- @@ -236,7 +230,7 @@ def get_scaled_data(self, sliceobj=()): class MincImageArrayProxy: - """ MINC implementation of array proxy protocol + """MINC implementation of array proxy protocol The array proxy allows us to freeze the passed fileobj and header such that it returns the expected data array. @@ -259,7 +253,7 @@ def is_proxy(self): return True def __array__(self, dtype=None): - """ Read data from file and apply scaling, casting to ``dtype`` + """Read data from file and apply scaling, casting to ``dtype`` If ``dtype`` is unspecified, the dtype is automatically determined. @@ -279,39 +273,39 @@ def __array__(self, dtype=None): return arr def __getitem__(self, sliceobj): - """ Read slice `sliceobj` of data from file """ + """Read slice `sliceobj` of data from file""" return self.minc_file.get_scaled_data(sliceobj) class MincHeader(SpatialHeader): - """ Class to contain header for MINC formats - """ + """Class to contain header for MINC formats""" + # We don't use the data layout - this just in case we do later data_layout = 'C' def data_to_fileobj(self, data, fileobj, rescale=True): - """ See Header class for an implementation we can't use """ + """See Header class for an implementation we can't use""" raise NotImplementedError def data_from_fileobj(self, fileobj): - """ See Header class for an implementation we can't use """ + """See Header class for an implementation we can't use""" raise NotImplementedError class Minc1Header(MincHeader): - @classmethod def may_contain_header(klass, binaryblock): return binaryblock[:4] == b'CDF\x01' class Minc1Image(SpatialImage): - """ Class for MINC1 format images + """Class for MINC1 format images The MINC1 image class uses the default header type, rather than a specific MINC header type - and reads the relevant information from the MINC file on load. """ + header_class = Minc1Header _meta_sniff_len = 4 valid_exts = ('.mnc',) diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 3dce425609..275a7799c8 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Preliminary MINC2 support +"""Preliminary MINC2 support Use with care; I haven't tested this against a wide range of MINC files. @@ -31,8 +31,7 @@ class Hdf5Bunch: - """ Make object for accessing attributes of variable - """ + """Make object for accessing attributes of variable""" def __init__(self, var): for name, value in var.attrs.items(): @@ -40,7 +39,7 @@ def __init__(self, var): class Minc2File(Minc1File): - """ Class to wrap MINC2 format file + """Class to wrap MINC2 format file Although it has some of the same methods as a ``Header``, we use this only when reading a MINC2 file, to pull out useful header @@ -61,8 +60,7 @@ def __init__(self, mincfile): for dim in self._dims: if dim.spacing != b'regular__': raise ValueError('Irregular spacing not supported') - self._spatial_dims = [name for name in self._dim_names - if name.endswith('space')] + self._spatial_dims = [name for name in self._dim_names if name.endswith('space')] self._image_max = image['image-max'] self._image_min = image['image-min'] @@ -77,7 +75,7 @@ def _get_dimensions(self, var): # The dimension name list must contain only as many entries # as the variable has dimensions. This reduces errors when an # unnecessary dimorder attribute is left behind. - return dimorder.split(',')[:len(var.shape)] + return dimorder.split(',')[: len(var.shape)] def get_data_dtype(self): return self._image.dtype @@ -86,7 +84,7 @@ def get_data_shape(self): return self._image.shape def _get_valid_range(self): - """ Return valid range for image data + """Return valid range for image data The valid range can come from the image 'valid_range' or failing that, from the data type range @@ -99,20 +97,19 @@ def _get_valid_range(self): valid_range = [info.min, info.max] else: if valid_range[0] < info.min or valid_range[1] > info.max: - raise ValueError('Valid range outside input ' - 'data type range') + raise ValueError('Valid range outside input ' 'data type range') return np.asarray(valid_range, dtype=np.float64) def _get_scalar(self, var): - """ Get scalar value from HDF5 scalar """ + """Get scalar value from HDF5 scalar""" return var[()] def _get_array(self, var): - """ Get array from HDF5 array """ + """Get array from HDF5 array""" return np.asanyarray(var) def get_scaled_data(self, sliceobj=()): - """ Return scaled data for slice definition `sliceobj` + """Return scaled data for slice definition `sliceobj` Parameters ---------- @@ -137,19 +134,19 @@ def get_scaled_data(self, sliceobj=()): class Minc2Header(MincHeader): - @classmethod def may_contain_header(klass, binaryblock): return binaryblock[:4] == b'\211HDF' class Minc2Image(Minc1Image): - """ Class for MINC2 images + """Class for MINC2 images The MINC2 image class uses the default header type, rather than a specific MINC header type - and reads the relevant information from the MINC file on load. """ + # MINC2 does not do compressed whole files _compressed_suffixes = () header_class = Minc2Header @@ -159,6 +156,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): # Import of h5py might take awhile for MPI-enabled builds # So we are importing it here "on demand" import h5py + holder = file_map['image'] if holder.filename is None: raise MincError('MINC2 needs filename for load') diff --git a/nibabel/mriutils.py b/nibabel/mriutils.py index b0f3f6a86f..d993d26a21 100644 --- a/nibabel/mriutils.py +++ b/nibabel/mriutils.py @@ -43,9 +43,11 @@ def calculate_dwell_time(water_fat_shift, echo_train_length, field_strength): if values are out of range """ if field_strength < 0: - raise MRIError("Field strength should be positive") + raise MRIError('Field strength should be positive') if echo_train_length <= 0: - raise MRIError("Echo train length should be >= 1") - return ((echo_train_length - 1) * water_fat_shift / - (GYROMAGNETIC_RATIO * PROTON_WATER_FAT_SHIFT * - field_strength * (echo_train_length + 1))) + raise MRIError('Echo train length should be >= 1') + return ( + (echo_train_length - 1) + * water_fat_shift + / (GYROMAGNETIC_RATIO * PROTON_WATER_FAT_SHIFT * field_strength * (echo_train_length + 1)) + ) diff --git a/nibabel/nicom/__init__.py b/nibabel/nicom/__init__.py index 240779a019..3a389db172 100644 --- a/nibabel/nicom/__init__.py +++ b/nibabel/nicom/__init__.py @@ -21,8 +21,11 @@ """ import warnings -warnings.warn('The DICOM readers are highly experimental, unstable,' - ' and only work for Siemens time-series at the moment\n' - 'Please use with caution. We would be grateful for your ' - 'help in improving them', - UserWarning, stacklevel=2) +warnings.warn( + 'The DICOM readers are highly experimental, unstable,' + ' and only work for Siemens time-series at the moment\n' + 'Please use with caution. We would be grateful for your ' + 'help in improving them', + UserWarning, + stacklevel=2, +) diff --git a/nibabel/nicom/ascconv.py b/nibabel/nicom/ascconv.py index 22aa3c88e6..10471e586a 100644 --- a/nibabel/nicom/ascconv.py +++ b/nibabel/nicom/ascconv.py @@ -10,15 +10,16 @@ ASCCONV_RE = re.compile( r'### ASCCONV BEGIN((?:\s*[^=\s]+=[^=\s]+)*) ###\n(.*?)\n### ASCCONV END ###', - flags=re.M | re.S) + flags=re.M | re.S, +) class AscconvParseError(Exception): - """ Error parsing ascconv file """ + """Error parsing ascconv file""" class Atom: - """ Object to hold operation, object type and object identifier + """Object to hold operation, object type and object identifier An atom represents an element in an expression. For example:: @@ -55,11 +56,11 @@ def __init__(self, op, obj_type, obj_id): class NoValue: - """ Signals no value present """ + """Signals no value present""" def assign2atoms(assign_ast, default_class=int): - """ Parse single assignment ast from ascconv line into atoms + """Parse single assignment ast from ascconv line into atoms Parameters ---------- @@ -102,7 +103,7 @@ def assign2atoms(assign_ast, default_class=int): def _create_obj_in(atom, root): - """ Find / create object defined in `atom` in dict-like given by `root` + """Find / create object defined in `atom` in dict-like given by `root` Returns corresponding value if there is already a key matching `atom.obj_id` in `root`. @@ -122,7 +123,7 @@ def _create_obj_in(atom, root): def _create_subscript_in(atom, root): - """ Find / create and insert object defined by `atom` from list `root` + """Find / create and insert object defined by `atom` from list `root` The `atom` has an index, defined in ``atom.obj_id``. If `root` is long enough to contain this index, return the object at that index. Otherwise, @@ -142,7 +143,7 @@ def _create_subscript_in(atom, root): def obj_from_atoms(atoms, namespace): - """ Return object defined by list `atoms` in dict-like `namespace` + """Return object defined by list `atoms` in dict-like `namespace` Parameters ---------- @@ -167,8 +168,7 @@ def obj_from_atoms(atoms, namespace): else: root_obj = _create_subscript_in(el, root_obj) if not isinstance(root_obj, el.obj_type): - raise AscconvParseError( - f'Unexpected type for {el.obj_id} in {prev_root}') + raise AscconvParseError(f'Unexpected type for {el.obj_id} in {prev_root}') return prev_root, el.obj_id @@ -184,7 +184,7 @@ def _get_value(assign): def parse_ascconv(ascconv_str, str_delim='"'): - '''Parse the 'ASCCONV' format from `input_str`. + """Parse the 'ASCCONV' format from `input_str`. Parameters ---------- @@ -204,11 +204,11 @@ def parse_ascconv(ascconv_str, str_delim='"'): ------ AsconvParseError A line of the ASCCONV section could not be parsed. - ''' + """ attrs, content = ASCCONV_RE.match(ascconv_str).groups() attrs = OrderedDict((tuple(x.split('=')) for x in attrs.split())) # Normalize string start / end markers to something Python understands - content = content.replace(str_delim, '"""').replace("\\", "\\\\") + content = content.replace(str_delim, '"""').replace('\\', '\\\\') # Use Python's own parser to parse modified ASCCONV assignments tree = ast.parse(content) diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index 7e465ff19a..376dcb5b5a 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -1,5 +1,4 @@ -""" CSA header reader from SPM spec - +"""CSA header reader from SPM spec """ import numpy as np @@ -30,7 +29,7 @@ class CSAReadError(CSAError): def get_csa_header(dcm_data, csa_type='image'): - """ Get CSA header information from DICOM header + """Get CSA header information from DICOM header Return None if the header does not contain CSA information of the specified `csa_type` @@ -72,7 +71,7 @@ def get_csa_header(dcm_data, csa_type='image'): def read(csa_str): - """ Read CSA header from string `csa_str` + """Read CSA header from string `csa_str` Parameters ---------- @@ -99,20 +98,22 @@ def read(csa_str): csa_dict['type'] = hdr_type csa_dict['n_tags'], csa_dict['check'] = up_str.unpack('2I') if not 0 < csa_dict['n_tags'] <= MAX_CSA_ITEMS: - raise CSAReadError('Number of tags `t` should be ' - '0 < t <= %d. Instead found %d tags.' - % (MAX_CSA_ITEMS, csa_dict['n_tags'])) + raise CSAReadError( + 'Number of tags `t` should be ' + '0 < t <= %d. Instead found %d tags.' % (MAX_CSA_ITEMS, csa_dict['n_tags']) + ) for tag_no in range(csa_dict['n_tags']): - name, vm, vr, syngodt, n_items, last3 = \ - up_str.unpack('64si4s3i') + name, vm, vr, syngodt, n_items, last3 = up_str.unpack('64si4s3i') vr = nt_str(vr) name = nt_str(name) - tag = {'n_items': n_items, - 'vm': vm, # value multiplicity - 'vr': vr, # value representation - 'syngodt': syngodt, - 'last3': last3, - 'tag_no': tag_no} + tag = { + 'n_items': n_items, + 'vm': vm, # value multiplicity + 'vr': vr, # value representation + 'syngodt': syngodt, + 'last3': last3, + 'tag_no': tag_no, + } if vm == 0: n_values = n_items else: @@ -137,8 +138,7 @@ def read(csa_str): else: # CSA2 item_len = x1 if (ptr + item_len) > csa_len: - raise CSAReadError('Item is too long, ' - 'aborting read') + raise CSAReadError('Item is too long, ' 'aborting read') if item_no >= n_values: assert item_len == 0 continue @@ -155,7 +155,7 @@ def read(csa_str): # go to 4 byte boundary plus4 = item_len % 4 if plus4 != 0: - up_str.ptr += (4 - plus4) + up_str.ptr += 4 - plus4 tag['items'] = items csa_dict['tags'][name] = tag return csa_dict @@ -184,7 +184,7 @@ def get_vector(csa_dict, tag_name, n): def is_mosaic(csa_dict): - """ Return True if the data is of Mosaic type + """Return True if the data is of Mosaic type Parameters ---------- @@ -243,7 +243,7 @@ def get_ice_dims(csa_dict): def nt_str(s): - """ Strip string to first null + """Strip string to first null Parameters ---------- diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index 56d7d56946..3f5293dcc3 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -1,11 +1,10 @@ - from os.path import join as pjoin import glob import numpy as np from .. import Nifti1Image -from .dicomwrappers import (wrapper_from_data, wrapper_from_file) +from .dicomwrappers import wrapper_from_data, wrapper_from_file class DicomReadError(Exception): @@ -16,7 +15,7 @@ class DicomReadError(Exception): def mosaic_to_nii(dcm_data): - """ Get Nifti file from Siemens + """Get Nifti file from Siemens Parameters ---------- @@ -37,15 +36,11 @@ def mosaic_to_nii(dcm_data): def read_mosaic_dwi_dir(dicom_path, globber='*.dcm', dicom_kwargs=None): - return read_mosaic_dir(dicom_path, - globber, - check_is_dwi=True, - dicom_kwargs=dicom_kwargs) + return read_mosaic_dir(dicom_path, globber, check_is_dwi=True, dicom_kwargs=dicom_kwargs) -def read_mosaic_dir(dicom_path, - globber='*.dcm', check_is_dwi=False, dicom_kwargs=None): - """ Read all Siemens mosaic DICOMs in directory, return arrays, params +def read_mosaic_dir(dicom_path, globber='*.dcm', check_is_dwi=False, dicom_kwargs=None): + """Read all Siemens mosaic DICOMs in directory, return arrays, params Parameters ---------- @@ -98,7 +93,8 @@ def read_mosaic_dir(dicom_path, raise DicomReadError( f'Could not find diffusion information reading file "{fname}"; ' 'is it possible this is not a _raw_ diffusion directory? ' - 'Could it be a processed dataset like ADC etc?') + 'Could it be a processed dataset like ADC etc?' + ) b = np.nan g = np.ones((3,)) + np.nan else: @@ -107,14 +103,11 @@ def read_mosaic_dir(dicom_path, b_values.append(b) gradients.append(g) affine = np.dot(DPCS_TO_TAL, dcm_w.affine) - return (np.concatenate(arrays, -1), - affine, - np.array(b_values), - np.array(gradients)) + return (np.concatenate(arrays, -1), affine, np.array(b_values), np.array(gradients)) def slices_to_series(wrappers): - """ Sort sequence of slice wrappers into series + """Sort sequence of slice wrappers into series This follows the SPM model fairly closely @@ -169,17 +162,17 @@ def _instance_sorter(s): def _third_pass(wrappers): - """ What we do when there are not unique zs in a slice set """ + """What we do when there are not unique zs in a slice set""" inos = [s.instance_number for s in wrappers] - msg_fmt = ('Plausibly matching slices, but where some have ' - 'the same apparent slice location, and %s; ' - '- slices are probably unsortable') + msg_fmt = ( + 'Plausibly matching slices, but where some have ' + 'the same apparent slice location, and %s; ' + '- slices are probably unsortable' + ) if None in inos: - raise DicomReadError(msg_fmt % 'some or all slices with ' - 'missing InstanceNumber') + raise DicomReadError(msg_fmt % 'some or all slices with ' 'missing InstanceNumber') if len(set(inos)) < len(inos): - raise DicomReadError(msg_fmt % 'some or all slices with ' - 'the same InstanceNumber') + raise DicomReadError(msg_fmt % 'some or all slices with ' 'the same InstanceNumber') # sort by instance number wrappers.sort(key=_instance_sorter) # start loop, in which we start a new volume, each time we see a z diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 9f180a86a3..3c7268dbe0 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -1,4 +1,4 @@ -""" Classes to wrap DICOM objects and files +"""Classes to wrap DICOM objects and files The wrappers encapsulate the capabilities of the different DICOM formats. @@ -23,7 +23,7 @@ from ..openers import ImageOpener from ..onetime import auto_attr as one_time -pydicom = optional_package("pydicom")[0] +pydicom = optional_package('pydicom')[0] class WrapperError(Exception): @@ -35,7 +35,7 @@ class WrapperPrecisionError(WrapperError): def wrapper_from_file(file_like, *args, **kwargs): - r""" Create DICOM wrapper from `file_like` object + r"""Create DICOM wrapper from `file_like` object Parameters ---------- @@ -59,7 +59,7 @@ def wrapper_from_file(file_like, *args, **kwargs): def wrapper_from_data(dcm_data): - """ Create DICOM wrapper from DICOM data object + """Create DICOM wrapper from DICOM data object Parameters ---------- @@ -82,9 +82,11 @@ def wrapper_from_data(dcm_data): try: csa = csar.get_csa_header(dcm_data) except csar.CSAReadError as e: - warnings.warn('Error while attempting to read CSA header: ' + - str(e.args) + - '\n Ignoring Siemens private (CSA) header info.') + warnings.warn( + 'Error while attempting to read CSA header: ' + + str(e.args) + + '\n Ignoring Siemens private (CSA) header info.' + ) csa = None if csa is None: return Wrapper(dcm_data) @@ -96,7 +98,7 @@ def wrapper_from_data(dcm_data): class Wrapper: - """ Class to wrap general DICOM files + """Class to wrap general DICOM files Methods: @@ -119,6 +121,7 @@ class Wrapper: * slice_indicator : float * series_signature : tuple """ + is_csa = False is_mosaic = False is_multiframe = False @@ -128,7 +131,7 @@ class Wrapper: b_vector = None def __init__(self, dcm_data): - """ Initialize wrapper + """Initialize wrapper Parameters ---------- @@ -141,8 +144,7 @@ def __init__(self, dcm_data): @one_time def image_shape(self): - """ The array shape as it will be returned by ``get_data()`` - """ + """The array shape as it will be returned by ``get_data()``""" shape = (self.get('Rows'), self.get('Columns')) if None in shape: return None @@ -150,7 +152,7 @@ def image_shape(self): @one_time def image_orient_patient(self): - """ Note that this is _not_ LR flipped """ + """Note that this is _not_ LR flipped""" iop = self.get('ImageOrientationPatient') if iop is None: return None @@ -168,7 +170,7 @@ def slice_normal(self): @one_time def rotation_matrix(self): - """ Return rotation matrix between array indices and mm + """Return rotation matrix between array indices and mm Note that we swap the two columns of the 'ImageOrientPatient' when we create the rotation matrix. This is takes into account @@ -190,14 +192,12 @@ def rotation_matrix(self): # motivated in ``doc/source/notebooks/ata_error.ipynb``, and from # discussion at https://github.com/nipy/nibabel/pull/156 if not np.allclose(np.eye(3), np.dot(R, R.T), atol=5e-5): - raise WrapperPrecisionError('Rotation matrix not nearly ' - 'orthogonal') + raise WrapperPrecisionError('Rotation matrix not nearly ' 'orthogonal') return R @one_time def voxel_sizes(self): - """ voxel sizes for array as returned by ``get_data()`` - """ + """voxel sizes for array as returned by ``get_data()``""" # pix space gives (row_spacing, column_spacing). That is, the # mm you move when moving from one row to the next, and the mm # you move when moving from one column to the next @@ -216,7 +216,7 @@ def voxel_sizes(self): @one_time def image_position(self): - """ Return position of first voxel in data block + """Return position of first voxel in data block Parameters ---------- @@ -235,7 +235,7 @@ def image_position(self): @one_time def slice_indicator(self): - """ A number that is higher for higher slices in Z + """A number that is higher for higher slices in Z Comparing this number between two adjacent slices should give a difference equal to the voxel size in Z. @@ -250,12 +250,12 @@ def slice_indicator(self): @one_time def instance_number(self): - """ Just because we use this a lot for sorting """ + """Just because we use this a lot for sorting""" return self.get('InstanceNumber') @one_time def series_signature(self): - """ Signature for matching slices into series + """Signature for matching slices into series We use `signature` in ``self.is_same_series(other)``. @@ -270,11 +270,13 @@ def series_signature(self): # dictionary with value, comparison func tuple signature = {} eq = operator.eq - for key in ('SeriesInstanceUID', - 'SeriesNumber', - 'ImageType', - 'SequenceName', - 'EchoNumbers'): + for key in ( + 'SeriesInstanceUID', + 'SeriesNumber', + 'ImageType', + 'SequenceName', + 'EchoNumbers', + ): signature[key] = (self.get(key), eq) signature['image_shape'] = (self.image_shape, eq) signature['iop'] = (self.image_orient_patient, none_or_close) @@ -282,18 +284,18 @@ def series_signature(self): return signature def __getitem__(self, key): - """ Return values from DICOM object""" + """Return values from DICOM object""" if key not in self.dcm_data: raise KeyError(f'"{key}" not in self.dcm_data') return self.dcm_data.get(key) def get(self, key, default=None): - """ Get values from underlying dicom data """ + """Get values from underlying dicom data""" return self.dcm_data.get(key, default) @property def affine(self): - """ Mapping between voxel and DICOM coordinate system + """Mapping between voxel and DICOM coordinate system (4, 4) affine matrix giving transformation between voxels in data array and mm in the DICOM patient coordinate system. @@ -315,14 +317,14 @@ def affine(self): return aff def get_pixel_array(self): - """ Return unscaled pixel array from DICOM """ + """Return unscaled pixel array from DICOM""" data = self.dcm_data.get('pixel_array') if data is None: raise WrapperError('Cannot find data in DICOM') return data def get_data(self): - """ Get scaled image data from DICOMs + """Get scaled image data from DICOMs We return the data as DICOM understands it, first dimension is rows, second dimension is columns @@ -336,7 +338,7 @@ def get_data(self): return self._scale_data(self.get_pixel_array()) def is_same_series(self, other): - """ Return True if `other` appears to be in same series + """Return True if `other` appears to be in same series Parameters ---------- @@ -365,8 +367,7 @@ def is_same_series(self, other): if not func(v1, v2): return False # values present in one or the other but not both - for keys, sig in ((my_keys - your_keys, my_sig), - (your_keys - my_keys, your_sig)): + for keys, sig in ((my_keys - your_keys, my_sig), (your_keys - my_keys, your_sig)): for key in keys: v1, func = sig[key] if not func(v1, None): @@ -393,8 +394,7 @@ def _apply_scale_offset(self, data, scale, offset): @one_time def b_value(self): - """ Return b value for diffusion or None if not available - """ + """Return b value for diffusion or None if not available""" q_vec = self.q_vector if q_vec is None: return None @@ -402,8 +402,7 @@ def b_value(self): @one_time def b_vector(self): - """ Return b vector for diffusion or None if not available - """ + """Return b vector for diffusion or None if not available""" q_vec = self.q_vector if q_vec is None: return None @@ -446,6 +445,7 @@ class MultiframeWrapper(Wrapper): series_signature(self) get_data(self) """ + is_multiframe = True def __init__(self, dcm_data): @@ -464,11 +464,11 @@ def __init__(self, dcm_data): try: self.frames[0] except TypeError: - raise WrapperError("PerFrameFunctionalGroupsSequence is empty.") + raise WrapperError('PerFrameFunctionalGroupsSequence is empty.') try: self.shared = dcm_data.get('SharedFunctionalGroupsSequence')[0] except TypeError: - raise WrapperError("SharedFunctionalGroupsSequence is empty.") + raise WrapperError('SharedFunctionalGroupsSequence is empty.') self._shape = None @one_time @@ -501,7 +501,7 @@ def image_shape(self): """ rows, cols = self.get('Rows'), self.get('Columns') if None in (rows, cols): - raise WrapperError("Rows and/or Columns are empty.") + raise WrapperError('Rows and/or Columns are empty.') # Check number of frames first_frame = self.frames[0] @@ -512,35 +512,34 @@ def image_shape(self): # DWI image may include derived isotropic, ADC or trace volume try: self.frames = pydicom.Sequence( - frame for frame in self.frames if - frame.MRDiffusionSequence[0].DiffusionDirectionality - != 'ISOTROPIC' - ) + frame + for frame in self.frames + if frame.MRDiffusionSequence[0].DiffusionDirectionality != 'ISOTROPIC' + ) except IndexError: # Sequence tag is found but missing items! - raise WrapperError("Diffusion file missing information") + raise WrapperError('Diffusion file missing information') except AttributeError: # DiffusionDirectionality tag is not required pass else: if n_frames != len(self.frames): - warnings.warn("Derived images found and removed") + warnings.warn('Derived images found and removed') n_frames = len(self.frames) has_derived = True assert len(self.frames) == n_frames frame_indices = np.array( - [frame.FrameContentSequence[0].DimensionIndexValues - for frame in self.frames]) + [frame.FrameContentSequence[0].DimensionIndexValues for frame in self.frames] + ) # Check that there is only one multiframe stack index - stack_ids = set(frame.FrameContentSequence[0].StackID - for frame in self.frames) + stack_ids = set(frame.FrameContentSequence[0].StackID for frame in self.frames) if len(stack_ids) > 1: - raise WrapperError("File contains more than one StackID. " - "Cannot handle multi-stack files") + raise WrapperError( + 'File contains more than one StackID. ' 'Cannot handle multi-stack files' + ) # Determine if one of the dimension indices refers to the stack id - dim_seq = [dim.DimensionIndexPointer - for dim in self.get('DimensionIndexSequence')] + dim_seq = [dim.DimensionIndexPointer for dim in self.get('DimensionIndexSequence')] stackid_tag = pydicom.datadict.tag_for_keyword('StackID') # remove the stack id axis if present if stackid_tag in dim_seq: @@ -549,10 +548,11 @@ def image_shape(self): dim_seq.pop(stackid_dim_idx) if has_derived: # derived volume is included - derived_tag = pydicom.datadict.tag_for_keyword("DiffusionBValue") + derived_tag = pydicom.datadict.tag_for_keyword('DiffusionBValue') if derived_tag not in dim_seq: - raise WrapperError("Missing information, cannot remove indices " - "with confidence.") + raise WrapperError( + 'Missing information, cannot remove indices ' 'with confidence.' + ) derived_dim_idx = dim_seq.index(derived_tag) frame_indices = np.delete(frame_indices, derived_dim_idx, axis=1) # account for the 2 additional dimensions (row and column) not included @@ -567,8 +567,7 @@ def image_shape(self): shape = (rows, cols) + tuple(ns_unique) n_vols = np.prod(shape[3:]) if n_frames != n_vols * shape[2]: - raise WrapperError("Calculated shape does not match number of " - "frames.") + raise WrapperError('Calculated shape does not match number of ' 'frames.') return tuple(shape) @one_time @@ -582,8 +581,7 @@ def image_orient_patient(self): try: iop = self.frames[0].PlaneOrientationSequence[0].ImageOrientationPatient except AttributeError: - raise WrapperError("Not enough information for " - "image_orient_patient") + raise WrapperError('Not enough information for ' 'image_orient_patient') if iop is None: return None iop = np.array(list(map(float, iop))) @@ -591,14 +589,14 @@ def image_orient_patient(self): @one_time def voxel_sizes(self): - """ Get i, j, k voxel sizes """ + """Get i, j, k voxel sizes""" try: pix_measures = self.shared.PixelMeasuresSequence[0] except AttributeError: try: pix_measures = self.frames[0].PixelMeasuresSequence[0] except AttributeError: - raise WrapperError("Not enough data for pixel spacing") + raise WrapperError('Not enough data for pixel spacing') pix_space = pix_measures.PixelSpacing try: zs = pix_measures.SliceThickness @@ -626,9 +624,7 @@ def image_position(self): def series_signature(self): signature = {} eq = operator.eq - for key in ('SeriesInstanceUID', - 'SeriesNumber', - 'ImageType'): + for key in ('SeriesInstanceUID', 'SeriesNumber', 'ImageType'): signature[key] = (self.get(key), eq) signature['image_shape'] = (self.image_shape, eq) signature['iop'] = (self.image_orient_patient, none_or_close) @@ -649,8 +645,7 @@ def get_data(self): return self._scale_data(data) def _scale_data(self, data): - pix_trans = getattr( - self.frames[0], 'PixelValueTransformationSequence', None) + pix_trans = getattr(self.frames[0], 'PixelValueTransformationSequence', None) if pix_trans is None: return super(MultiframeWrapper, self)._scale_data(data) scale = float(pix_trans[0].RescaleSlope) @@ -659,7 +654,7 @@ def _scale_data(self, data): class SiemensWrapper(Wrapper): - """ Wrapper for Siemens format DICOMs + """Wrapper for Siemens format DICOMs Adds attributes: @@ -667,10 +662,11 @@ class SiemensWrapper(Wrapper): * b_matrix : (3,3) array * q_vector : (3,) array """ + is_csa = True def __init__(self, dcm_data, csa_header=None): - """ Initialize Siemens wrapper + """Initialize Siemens wrapper The Siemens-specific information is in the `csa_header`, either passed in here, or read from the input `dcm_data`. @@ -723,7 +719,7 @@ def slice_normal(self): @one_time def series_signature(self): - """ Add ICE dims from CSA header to signature """ + """Add ICE dims from CSA header to signature""" signature = super(SiemensWrapper, self).series_signature ice = csar.get_ice_dims(self.csa_header) if ice is not None: @@ -733,7 +729,7 @@ def series_signature(self): @one_time def b_matrix(self): - """ Get DWI B matrix referring to voxel space + """Get DWI B matrix referring to voxel space Parameters ---------- @@ -770,7 +766,7 @@ def b_matrix(self): @one_time def q_vector(self): - """ Get DWI q vector referring to voxel space + """Get DWI q vector referring to voxel space Parameters ---------- @@ -791,7 +787,7 @@ def q_vector(self): class MosaicWrapper(SiemensWrapper): - """ Class for Siemens mosaic format data + """Class for Siemens mosaic format data Mosaic format is a way of storing a 3D image in a 2D slice - and it's as simple as you'd imagine it would be - just storing the slices @@ -806,10 +802,11 @@ class MosaicWrapper(SiemensWrapper): * n_mosaic : int * mosaic_size : int """ + is_mosaic = True def __init__(self, dcm_data, csa_header=None, n_mosaic=None): - """ Initialize Siemens Mosaic wrapper + """Initialize Siemens Mosaic wrapper The Siemens-specific information is in the `csa_header`, either passed in here, or read from the input `dcm_data`. @@ -834,28 +831,28 @@ def __init__(self, dcm_data, csa_header=None, n_mosaic=None): except KeyError: pass if n_mosaic is None or n_mosaic == 0: - raise WrapperError('No valid mosaic number in CSA ' - 'header; is this really ' - 'Siemens mosiac data?') + raise WrapperError( + 'No valid mosaic number in CSA ' + 'header; is this really ' + 'Siemens mosiac data?' + ) self.n_mosaic = n_mosaic self.mosaic_size = int(np.ceil(np.sqrt(n_mosaic))) @one_time def image_shape(self): - """ Return image shape as returned by ``get_data()`` """ + """Return image shape as returned by ``get_data()``""" # reshape pixel slice array back from mosaic rows = self.get('Rows') cols = self.get('Columns') if None in (rows, cols): return None mosaic_size = self.mosaic_size - return (int(rows / mosaic_size), - int(cols / mosaic_size), - self.n_mosaic) + return (int(rows / mosaic_size), int(cols / mosaic_size), self.n_mosaic) @one_time def image_position(self): - """ Return position of first voxel in data block + """Return position of first voxel in data block Adjusts Siemens mosaic position vector for bug in mosaic format position. See ``dicom_mosaic`` in doc/theory for details. @@ -891,7 +888,7 @@ def image_position(self): return ipp + np.dot(Q, vox_trans_fixes[:, None]).ravel() def get_data(self): - """ Get scaled image data from DICOMs + """Get scaled image data from DICOMs Resorts data block from mosaic to 3D @@ -925,10 +922,9 @@ def get_data(self): raise WrapperError('No valid information for image shape') n_slice_rows, n_slice_cols, n_mosaic = shape n_slab_rows = self.mosaic_size - n_blocks = n_slab_rows ** 2 + n_blocks = n_slab_rows**2 data = self.get_pixel_array() - v4 = data.reshape(n_slab_rows, n_slice_rows, - n_slab_rows, n_slice_cols) + v4 = data.reshape(n_slab_rows, n_slice_rows, n_slab_rows, n_slice_cols) # move the mosaic dims to the end v4 = v4.transpose((1, 3, 0, 2)) # pool mosaic-generated dims @@ -939,7 +935,7 @@ def get_data(self): def none_or_close(val1, val2, rtol=1e-5, atol=1e-6): - """ Match if `val1` and `val2` are both None, or are close + """Match if `val1` and `val2` are both None, or are close Parameters ---------- diff --git a/nibabel/nicom/dwiparams.py b/nibabel/nicom/dwiparams.py index 62b28cb7e3..cb0e501202 100644 --- a/nibabel/nicom/dwiparams.py +++ b/nibabel/nicom/dwiparams.py @@ -1,4 +1,4 @@ -""" Process diffusion imaging parameters +"""Process diffusion imaging parameters * ``q`` is a vector in Q space * ``b`` is a b value @@ -17,14 +17,13 @@ ``q_est`` is the closest q vector equivalent to the B matrix, then: B ~ (q_est . q_est.T) / norm(q_est) - """ import numpy as np import numpy.linalg as npl def B2q(B, tol=None): - """ Estimate q vector from input B matrix `B` + """Estimate q vector from input B matrix `B` We require that the input `B` is symmetric positive definite. @@ -68,7 +67,7 @@ def B2q(B, tol=None): def nearest_pos_semi_def(B): - """ Least squares positive semi-definite tensor estimation + """Least squares positive semi-definite tensor estimation Reference: Niethammer M, San Jose Estepar R, Bouix S, Shenton M, Westin CF. On diffusion tensor estimation. Conf Proc IEEE Eng Med @@ -106,7 +105,7 @@ def nearest_pos_semi_def(B): lam1a, lam2a, lam3a = vals scalers = np.zeros((3,)) if cardneg == 2: - b112 = np.max([0, lam1a + (lam2a + lam3a) / 3.]) + b112 = np.max([0, lam1a + (lam2a + lam3a) / 3.0]) scalers[0] = b112 elif cardneg == 1: lam1b = lam1a + 0.25 * lam3a @@ -115,10 +114,10 @@ def nearest_pos_semi_def(B): scalers[:2] = lam1b, lam2b else: # one of the lam1b, lam2b is < 0 if lam2b < 0: - b111 = np.max([0, lam1a + (lam2a + lam3a) / 3.]) + b111 = np.max([0, lam1a + (lam2a + lam3a) / 3.0]) scalers[0] = b111 if lam1b < 0: - b221 = np.max([0, lam2a + (lam1a + lam3a) / 3.]) + b221 = np.max([0, lam2a + (lam1a + lam3a) / 3.0]) scalers[1] = b221 # resort the scalers to match the original vecs scalers = scalers[np.argsort(inds)] @@ -126,7 +125,7 @@ def nearest_pos_semi_def(B): def q2bg(q_vector, tol=1e-5): - """ Return b value and q unit vector from q vector `q_vector` + """Return b value and q unit vector from q vector `q_vector` Parameters ---------- @@ -155,5 +154,5 @@ def q2bg(q_vector, tol=1e-5): q_vec = np.asarray(q_vector) norm = np.sqrt(np.sum(q_vec * q_vec)) if norm < tol: - return (0., np.zeros((3,))) + return (0.0, np.zeros((3,))) return norm, q_vec / norm diff --git a/nibabel/nicom/structreader.py b/nibabel/nicom/structreader.py index eb714804f1..086a463d2e 100644 --- a/nibabel/nicom/structreader.py +++ b/nibabel/nicom/structreader.py @@ -1,4 +1,4 @@ -""" Stream-like reader for packed data """ +"""Stream-like reader for packed data""" from struct import Struct @@ -6,7 +6,7 @@ class Unpacker: - """ Class to unpack values from buffer object + """Class to unpack values from buffer object The buffer object is usually a string. Caches compiled :mod:`struct` format strings so that repeated unpacking with the same format @@ -29,7 +29,7 @@ class Unpacker: """ def __init__(self, buf, ptr=0, endian=None): - """ Initialize unpacker + """Initialize unpacker Parameters ---------- @@ -50,7 +50,7 @@ def __init__(self, buf, ptr=0, endian=None): self._cache = {} def unpack(self, fmt): - """ Unpack values from contained buffer + """Unpack values from contained buffer Unpacks values from ``self.buf`` and updates ``self.ptr`` to the position after the read data. @@ -89,7 +89,7 @@ def unpack(self, fmt): return values def read(self, n_bytes=-1): - """ Return byte string of length `n_bytes` at current position + """Return byte string of length `n_bytes` at current position Returns sub-string from ``self.buf`` and updates ``self.ptr`` to the position after the read data. diff --git a/nibabel/nicom/tests/__init__.py b/nibabel/nicom/tests/__init__.py index 75f5dbc5ac..4a7ea3b284 100644 --- a/nibabel/nicom/tests/__init__.py +++ b/nibabel/nicom/tests/__init__.py @@ -1,6 +1,6 @@ import unittest from nibabel.optpkg import optional_package -pydicom, have_dicom, _ = optional_package("pydicom") +pydicom, have_dicom, _ = optional_package('pydicom') -dicom_test = unittest.skipUnless(have_dicom, "Could not import pydicom") +dicom_test = unittest.skipUnless(have_dicom, 'Could not import pydicom') diff --git a/nibabel/nicom/tests/data_pkgs.py b/nibabel/nicom/tests/data_pkgs.py index 2424666a72..e95478ef90 100644 --- a/nibabel/nicom/tests/data_pkgs.py +++ b/nibabel/nicom/tests/data_pkgs.py @@ -1,16 +1,10 @@ -""" Data packages for DICOM testing """ +"""Data packages for DICOM testing""" from ... import data as nibd -PUBLIC_PKG_DEF = dict( - relpath='nipy/dicom/public', - name='nipy-dicom-public', - version='0.1') +PUBLIC_PKG_DEF = dict(relpath='nipy/dicom/public', name='nipy-dicom-public', version='0.1') -PRIVATE_PKG_DEF = dict( - relpath='nipy/dicom/private', - name='nipy-dicom-private', - version='0.1') +PRIVATE_PKG_DEF = dict(relpath='nipy/dicom/private', name='nipy-dicom-private', version='0.1') PUBLIC_DS = nibd.datasource_or_bomber(PUBLIC_PKG_DEF) diff --git a/nibabel/nicom/tests/test_ascconv.py b/nibabel/nicom/tests/test_ascconv.py index a1efd7fa29..6415c2725e 100644 --- a/nibabel/nicom/tests/test_ascconv.py +++ b/nibabel/nicom/tests/test_ascconv.py @@ -1,4 +1,4 @@ -""" Testing Siemens "ASCCONV" parser +"""Testing Siemens "ASCCONV" parser """ from os.path import join as pjoin, dirname @@ -22,12 +22,17 @@ def test_ascconv_parse(): assert len(ascconv_dict) == 72 assert ascconv_dict['tProtocolName'] == 'CBU+AF8-DTI+AF8-64D+AF8-1A' assert ascconv_dict['ucScanRegionPosValid'] == 1 - assert_array_almost_equal(ascconv_dict['sProtConsistencyInfo']['flNominalB0'], - 2.89362) + assert_array_almost_equal(ascconv_dict['sProtConsistencyInfo']['flNominalB0'], 2.89362) assert ascconv_dict['sProtConsistencyInfo']['flGMax'] == 26 - assert (list(ascconv_dict['sSliceArray'].keys()) == - ['asSlice', 'anAsc', 'anPos', 'lSize', 'lConc', 'ucMode', - 'sTSat']) + assert list(ascconv_dict['sSliceArray'].keys()) == [ + 'asSlice', + 'anAsc', + 'anPos', + 'lSize', + 'lConc', + 'ucMode', + 'sTSat', + ] slice_arr = ascconv_dict['sSliceArray'] as_slice = slice_arr['asSlice'] assert_array_equal([e['dPhaseFOV'] for e in as_slice], 230) @@ -42,8 +47,7 @@ def test_ascconv_parse(): # This lower-level list does start indexing at 0 assert len(as_list) == 12 for i, el in enumerate(as_list): - assert (list(el.keys()) == - ['sCoilElementID', 'lElementSelected', 'lRxChannelConnected']) + assert list(el.keys()) == ['sCoilElementID', 'lElementSelected', 'lRxChannelConnected'] assert el['lElementSelected'] == 1 assert el['lRxChannelConnected'] == i + 1 # Test negative number @@ -51,11 +55,13 @@ def test_ascconv_parse(): def test_ascconv_w_attrs(): - in_str = ("### ASCCONV BEGIN object=MrProtDataImpl@MrProtocolData " - "version=41340006 " - "converter=%MEASCONST%/ConverterList/Prot_Converter.txt ###\n" - "test = \"hello\"\n" - "### ASCCONV END ###") + in_str = ( + '### ASCCONV BEGIN object=MrProtDataImpl@MrProtocolData ' + 'version=41340006 ' + 'converter=%MEASCONST%/ConverterList/Prot_Converter.txt ###\n' + 'test = "hello"\n' + '### ASCCONV END ###' + ) ascconv_dict, attrs = ascconv.parse_ascconv(in_str, '""') assert attrs['object'] == 'MrProtDataImpl@MrProtocolData' assert attrs['version'] == '41340006' diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index 912e98fe18..1dfe348c4b 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -1,4 +1,4 @@ -""" Testing Siemens CSA header reader +"""Testing Siemens CSA header reader """ import sys from os.path import join as pjoin @@ -114,12 +114,9 @@ def test_csa_params(): def test_ice_dims(): - ex_dims0 = ['X', '1', '1', '1', '1', '1', '1', - '48', '1', '1', '1', '1', '201'] - ex_dims1 = ['X', '1', '1', '1', '2', '1', '1', - '48', '1', '1', '1', '1', '201'] - for csa_str, ex_dims in ((CSA2_B0, ex_dims0), - (CSA2_B1000, ex_dims1)): + ex_dims0 = ['X', '1', '1', '1', '1', '1', '1', '48', '1', '1', '1', '1', '201'] + ex_dims1 = ['X', '1', '1', '1', '2', '1', '1', '48', '1', '1', '1', '1', '201'] + for csa_str, ex_dims in ((CSA2_B0, ex_dims0), (CSA2_B1000, ex_dims1)): csa_info = csa.read(csa_str) assert csa.get_ice_dims(csa_info) == ex_dims assert csa.get_ice_dims({}) is None diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index b1ae9edae9..dba29b6503 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -1,5 +1,4 @@ -""" Testing reading DICOM files - +"""Testing reading DICOM files """ from os.path import join as pjoin @@ -13,7 +12,7 @@ import pytest from numpy.testing import assert_array_equal, assert_array_almost_equal -pydicom, _, setup_module = optional_package("pydicom") +pydicom, _, setup_module = optional_package('pydicom') def test_read_dwi(): @@ -24,8 +23,7 @@ def test_read_dwi(): def test_read_dwis(): - data, aff, bs, gs = didr.read_mosaic_dwi_dir(IO_DATA_PATH, - 'siemens_dwi_*.dcm.gz') + data, aff, bs, gs = didr.read_mosaic_dwi_dir(IO_DATA_PATH, 'siemens_dwi_*.dcm.gz') assert data.ndim == 4 assert_array_almost_equal(aff, EXPECTED_AFFINE) assert_array_almost_equal(bs, (0, EXPECTED_PARAMS[0])) @@ -41,10 +39,7 @@ def test_passing_kwds(): for func in (didr.read_mosaic_dwi_dir, didr.read_mosaic_dir): data, aff, bs, gs = func(IO_DATA_PATH, dwi_glob) # This should not raise an error - data2, aff2, bs2, gs2 = func( - IO_DATA_PATH, - dwi_glob, - dicom_kwargs=dict(force=True)) + data2, aff2, bs2, gs2 = func(IO_DATA_PATH, dwi_glob, dicom_kwargs=dict(force=True)) assert_array_equal(data, data2) # This should raise an error in pydicom.dicomio.read_file with pytest.raises(TypeError): @@ -59,9 +54,8 @@ def test_passing_kwds(): def test_slices_to_series(): - dicom_files = (pjoin(IO_DATA_PATH, "%d.dcm" % i) for i in range(2)) + dicom_files = (pjoin(IO_DATA_PATH, '%d.dcm' % i) for i in range(2)) wrappers = [didr.wrapper_from_file(f) for f in dicom_files] series = didr.slices_to_series(wrappers) assert len(series) == 1 assert len(series[0]) == 2 - diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index d65afc6d27..3dd1665c3f 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -1,4 +1,4 @@ -""" Testing DICOM wrappers +"""Testing DICOM wrappers """ from os.path import join as pjoin, dirname @@ -34,26 +34,25 @@ DATA_FILE_DEC_RSCL = pjoin(IO_DATA_PATH, 'decimal_rescale.dcm') DATA_FILE_4D = pjoin(IO_DATA_PATH, '4d_multiframe_test.dcm') DATA_FILE_EMPTY_ST = pjoin(IO_DATA_PATH, 'slicethickness_empty_string.dcm') -DATA_FILE_4D_DERIVED = pjoin(get_nibabel_data(), 'nitest-dicom', - '4d_multiframe_with_derived.dcm') -DATA_FILE_CT = pjoin(get_nibabel_data(), 'nitest-dicom', - 'siemens_ct_header_csa.dcm') +DATA_FILE_4D_DERIVED = pjoin(get_nibabel_data(), 'nitest-dicom', '4d_multiframe_with_derived.dcm') +DATA_FILE_CT = pjoin(get_nibabel_data(), 'nitest-dicom', 'siemens_ct_header_csa.dcm') # This affine from our converted image was shown to match our image spatially # with an image from SPM DICOM conversion. We checked the matching with SPM # check reg. We have flipped the first and second rows to allow for rows, cols # transpose in current return compared to original case. EXPECTED_AFFINE = np.array( # do this for philips? - [[-1.796875, 0, 0, 115], - [0, -1.79684984, -0.01570896, 135.028779], - [0, -0.00940843750, 2.99995887, -78.710481], - [0, 0, 0, 1]])[:, [1, 0, 2, 3]] + [ + [-1.796875, 0, 0, 115], + [0, -1.79684984, -0.01570896, 135.028779], + [0, -0.00940843750, 2.99995887, -78.710481], + [0, 0, 0, 1], + ] +)[:, [1, 0, 2, 3]] # from Guys and Matthew's SPM code, undoing SPM's Y flip, and swapping first two # values in vector, to account for data rows, cols difference. -EXPECTED_PARAMS = [992.05050247, (0.00507649, - 0.99997450, - -0.005023611)] +EXPECTED_PARAMS = [992.05050247, (0.00507649, 0.99997450, -0.005023611)] @dicom_test @@ -62,11 +61,14 @@ def test_wrappers(): # first with empty or minimal data multi_minimal = { 'PerFrameFunctionalGroupsSequence': [None], - 'SharedFunctionalGroupsSequence': [None]} - for maker, args in ((didw.Wrapper, ({},)), - (didw.SiemensWrapper, ({},)), - (didw.MosaicWrapper, ({}, None, 10)), - (didw.MultiframeWrapper, (multi_minimal,))): + 'SharedFunctionalGroupsSequence': [None], + } + for maker, args in ( + (didw.Wrapper, ({},)), + (didw.SiemensWrapper, ({},)), + (didw.MosaicWrapper, ({}, None, 10)), + (didw.MultiframeWrapper, (multi_minimal,)), + ): dw = maker(*args) assert dw.get('InstanceNumber') is None assert dw.get('AcquisitionNumber') is None @@ -83,11 +85,7 @@ def test_wrappers(): assert not dw.is_mosaic assert dw.b_matrix is None assert dw.q_vector is None - for maker in (didw.wrapper_from_data, - didw.Wrapper, - didw.SiemensWrapper, - didw.MosaicWrapper - ): + for maker in (didw.wrapper_from_data, didw.Wrapper, didw.SiemensWrapper, didw.MosaicWrapper): dw = maker(DATA) assert dw.get('InstanceNumber') == 2 assert dw.get('AcquisitionNumber') == 2 @@ -117,6 +115,7 @@ def test_get_from_wrapper(): class FakeData(dict): pass + d = FakeData() d.some_key = 'another bit of data' dw = didw.Wrapper(d) @@ -124,9 +123,9 @@ class FakeData(dict): # Check get defers to dcm_data get class FakeData2: - def get(self, key, default): return 1 + d = FakeData2() d.some_key = 'another bit of data' dw = didw.Wrapper(d) @@ -136,18 +135,14 @@ def get(self, key, default): @dicom_test def test_wrapper_from_data(): # test wrapper from data, wrapper from file - for dw in (didw.wrapper_from_data(DATA), - didw.wrapper_from_file(DATA_FILE)): + for dw in (didw.wrapper_from_data(DATA), didw.wrapper_from_file(DATA_FILE)): assert dw.get('InstanceNumber') == 2 assert dw.get('AcquisitionNumber') == 2 with pytest.raises(KeyError): dw['not an item'] assert dw.is_mosaic - assert_array_almost_equal( - np.dot(didr.DPCS_TO_TAL, dw.affine), - EXPECTED_AFFINE) - for dw in (didw.wrapper_from_data(DATA_PHILIPS), - didw.wrapper_from_file(DATA_FILE_PHILIPS)): + assert_array_almost_equal(np.dot(didr.DPCS_TO_TAL, dw.affine), EXPECTED_AFFINE) + for dw in (didw.wrapper_from_data(DATA_PHILIPS), didw.wrapper_from_file(DATA_FILE_PHILIPS)): assert dw.get('InstanceNumber') == 1 assert dw.get('AcquisitionNumber') == 3 with pytest.raises(KeyError): @@ -216,13 +211,13 @@ def test_q_vector_etc(): assert dw.b_vector is None for pos in range(3): q_vec = np.zeros((3,)) - q_vec[pos] = 10. + q_vec[pos] = 10.0 # Reset wrapped dicom to refresh one_time property dw = didw.Wrapper(DATA) dw.q_vector = q_vec assert_array_equal(dw.q_vector, q_vec) assert dw.b_value == 10 - assert_array_equal(dw.b_vector, q_vec / 10.) + assert_array_equal(dw.b_vector, q_vec / 10.0) # Reset wrapped dicom to refresh one_time property dw = didw.Wrapper(DATA) dw.q_vector = np.array([0, 0, 1e-6]) @@ -269,6 +264,7 @@ def test_vol_matching(): class C: series_signature = {} + assert dw_empty.is_same_series(C()) # make the Philips wrapper, check it compares True against itself @@ -333,9 +329,7 @@ def test_rotation_matrix(): assert_array_equal(dw.rotation_matrix, np.eye(3)) d['ImageOrientationPatient'] = [1, 0, 0, 0, 1, 0] dw = didw.wrapper_from_data(d) - assert_array_equal(dw.rotation_matrix, [[0, 1, 0], - [1, 0, 0], - [0, 0, -1]]) + assert_array_equal(dw.rotation_matrix, [[0, 1, 0], [1, 0, 0], [0, 0, -1]]) @dicom_test @@ -354,7 +348,7 @@ def test_assert_parallel(): # Test that we get an AssertionError if the cross product and the CSA # slice normal are not parallel dw = didw.wrapper_from_file(DATA_FILE_SLC_NORM) - dw.image_orient_patient = np.c_[[1., 0., 0.], [0., 1., 0.]] + dw.image_orient_patient = np.c_[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]] with pytest.raises(AssertionError): dw.slice_normal @@ -368,7 +362,7 @@ def test_decimal_rescale(): def fake_frames(seq_name, field_name, value_seq): - """ Make fake frames for multiframe testing + """Make fake frames for multiframe testing Parameters ---------- @@ -385,8 +379,10 @@ def fake_frames(seq_name, field_name, value_seq): each element in list is obj.[0]. = value_seq[n] for n in range(N) """ + class Fake: pass + frames = [] for value in value_seq: fake_frame = Fake() @@ -398,7 +394,7 @@ class Fake: def fake_shape_dependents(div_seq, sid_seq=None, sid_dim=None): - """ Make a fake dictionary of data that ``image_shape`` is dependent on. + """Make a fake dictionary of data that ``image_shape`` is dependent on. Parameters ---------- @@ -409,18 +405,22 @@ def fake_shape_dependents(div_seq, sid_seq=None, sid_dim=None): sid_dim : int the index of the column in 'div_seq' to use as 'sid_seq' """ + class DimIdxSeqElem: def __init__(self, dip=(0, 0), fgp=None): self.DimensionIndexPointer = dip if fgp is not None: self.FunctionalGroupPointer = fgp + class FrmContSeqElem: def __init__(self, div, sid): self.DimensionIndexValues = div self.StackID = sid + class PerFrmFuncGrpSeqElem: def __init__(self, div, sid): self.FrameContentSequence = [FrmContSeqElem(div, sid)] + # if no StackID values passed in then use the values at index 'sid_dim' in # the value for DimensionIndexValues for it if sid_seq is None: @@ -436,11 +436,12 @@ def __init__(self, div, sid): fcs_tag = pydicom.datadict.tag_for_keyword('FrameContentSequence') dim_idx_seq[sid_dim] = DimIdxSeqElem(sid_tag, fcs_tag) # create the PerFrameFunctionalGroupsSequence - frames = [PerFrmFuncGrpSeqElem(div, sid) - for div, sid in zip(div_seq, sid_seq)] - return {'NumberOfFrames' : num_of_frames, - 'DimensionIndexSequence' : dim_idx_seq, - 'PerFrameFunctionalGroupsSequence' : frames} + frames = [PerFrmFuncGrpSeqElem(div, sid) for div, sid in zip(div_seq, sid_seq)] + return { + 'NumberOfFrames': num_of_frames, + 'DimensionIndexSequence': dim_idx_seq, + 'PerFrameFunctionalGroupsSequence': frames, + } class TestMultiFrameWrapper(TestCase): @@ -448,7 +449,8 @@ class TestMultiFrameWrapper(TestCase): MINIMAL_MF = { # Minimal contents of dcm_data for this wrapper 'PerFrameFunctionalGroupsSequence': [None], - 'SharedFunctionalGroupsSequence': [None]} + 'SharedFunctionalGroupsSequence': [None], + } WRAPCLASS = didw.MultiframeWrapper @dicom_test @@ -485,13 +487,11 @@ def test_shape(self): with pytest.raises(didw.WrapperError): MFW(fake_mf).image_shape # Make some fake frame data for 4D when StackID index is 0 - div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), - (1, 1, 3), (1, 2, 3)) + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (1, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) assert MFW(fake_mf).image_shape == (32, 64, 2, 3) # Check stack number matching for 4D when StackID index is 0 - div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), - (1, 1, 3), (2, 2, 3)) + div_seq = ((1, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2), (1, 1, 3), (2, 2, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=0)) with pytest.raises(didw.WrapperError): MFW(fake_mf).image_shape @@ -535,8 +535,7 @@ def test_shape(self): with pytest.raises(didw.WrapperError): MFW(fake_mf).image_shape # Make some fake frame data for 4D when StackID index is 1 - div_seq = ((1, 1, 1), (2, 1, 1), (1, 1, 2), (2, 1, 2), - (1, 1, 3), (2, 1, 3)) + div_seq = ((1, 1, 1), (2, 1, 1), (1, 1, 2), (2, 1, 2), (1, 1, 3), (2, 1, 3)) fake_mf.update(fake_shape_dependents(div_seq, sid_dim=1)) assert MFW(fake_mf).image_shape == (32, 64, 2, 3) @@ -548,18 +547,16 @@ def test_iop(self): with pytest.raises(didw.WrapperError): dw.image_orient_patient # Make a fake frame - fake_frame = fake_frames('PlaneOrientationSequence', - 'ImageOrientationPatient', - [[0, 1, 0, 1, 0, 0]])[0] + fake_frame = fake_frames( + 'PlaneOrientationSequence', 'ImageOrientationPatient', [[0, 1, 0, 1, 0, 0]] + )[0] fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] - assert_array_equal(MFW(fake_mf).image_orient_patient, - [[0, 1], [1, 0], [0, 0]]) + assert_array_equal(MFW(fake_mf).image_orient_patient, [[0, 1], [1, 0], [0, 0]]) fake_mf['SharedFunctionalGroupsSequence'] = [None] with pytest.raises(didw.WrapperError): MFW(fake_mf).image_orient_patient fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] - assert_array_equal(MFW(fake_mf).image_orient_patient, - [[0, 1], [1, 0], [0, 0]]) + assert_array_equal(MFW(fake_mf).image_orient_patient, [[0, 1], [1, 0], [0, 0]]) def test_voxel_sizes(self): # Test voxel size calculation @@ -569,9 +566,7 @@ def test_voxel_sizes(self): with pytest.raises(didw.WrapperError): dw.voxel_sizes # Make a fake frame - fake_frame = fake_frames('PixelMeasuresSequence', - 'PixelSpacing', - [[2.1, 3.2]])[0] + fake_frame = fake_frames('PixelMeasuresSequence', 'PixelSpacing', [[2.1, 3.2]])[0] fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] # Still not enough, we lack information for slice distances with pytest.raises(didw.WrapperError): @@ -593,9 +588,9 @@ def test_voxel_sizes(self): fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 5.4]) # Decimals in any field are OK - fake_frame = fake_frames('PixelMeasuresSequence', - 'PixelSpacing', - [[Decimal('2.1'), Decimal('3.2')]])[0] + fake_frame = fake_frames( + 'PixelMeasuresSequence', 'PixelSpacing', [[Decimal('2.1'), Decimal('3.2')]] + )[0] fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] fake_mf['SpacingBetweenSlices'] = Decimal('4.3') assert_array_equal(MFW(fake_mf).voxel_sizes, [2.1, 3.2, 4.3]) @@ -610,9 +605,9 @@ def test_image_position(self): with pytest.raises(didw.WrapperError): dw.image_position # Make a fake frame - fake_frame = fake_frames('PlanePositionSequence', - 'ImagePositionPatient', - [[-2.0, 3., 7]])[0] + fake_frame = fake_frames( + 'PlanePositionSequence', 'ImagePositionPatient', [[-2.0, 3.0, 7]] + )[0] fake_mf['SharedFunctionalGroupsSequence'] = [fake_frame] assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) fake_mf['SharedFunctionalGroupsSequence'] = [None] @@ -622,7 +617,8 @@ def test_image_position(self): assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) # Check lists of Decimals work fake_frame.PlanePositionSequence[0].ImagePositionPatient = [ - Decimal(str(v)) for v in [-2, 3, 7]] + Decimal(str(v)) for v in [-2, 3, 7] + ] assert_array_equal(MFW(fake_mf).image_position, [-2, 3, 7]) assert MFW(fake_mf).image_position.dtype == float @@ -656,14 +652,14 @@ def test_data_derived_shape(self): # Test 4D diffusion data with an additional trace volume included # Excludes the trace volume and generates the correct shape dw = didw.wrapper_from_file(DATA_FILE_4D_DERIVED) - with pytest.warns(UserWarning, match="Derived images found and removed"): + with pytest.warns(UserWarning, match='Derived images found and removed'): assert dw.image_shape == (96, 96, 60, 33) @dicom_test @needs_nibabel_data('nitest-dicom') def test_data_unreadable_private_headers(self): # Test CT image with unreadable CSA tags - with pytest.warns(UserWarning, match="Error while attempting to read CSA header"): + with pytest.warns(UserWarning, match='Error while attempting to read CSA header'): dw = didw.wrapper_from_file(DATA_FILE_CT) assert dw.image_shape == (512, 571) @@ -724,13 +720,13 @@ def test_data_fake(self): [1, 4, 1, 2], [1, 2, 1, 2], [1, 3, 1, 2], - [1, 1, 1, 2]] + [1, 1, 1, 2], + ] fake_mf.update(fake_shape_dependents(dim_idxs, sid_dim=0)) shape = (2, 3, 4, 2, 2) data = np.arange(np.prod(shape)).reshape(shape) sorted_data = data.reshape(shape[:2] + (-1,), order='F') - order = [11, 9, 10, 8, 3, 1, 2, 0, - 15, 13, 14, 12, 7, 5, 6, 4] + order = [11, 9, 10, 8, 3, 1, 2, 0, 15, 13, 14, 12, 7, 5, 6, 4] sorted_data = sorted_data[..., np.argsort(order)] fake_mf['pixel_array'] = np.rollaxis(sorted_data, 2) assert_array_equal(MFW(fake_mf).get_data(), data * 2.0 - 1) @@ -745,9 +741,7 @@ def test__scale_data(self): fake_mf['RescaleSlope'] = 2.0 fake_mf['RescaleIntercept'] = -1.0 assert_array_equal(data * 2 - 1, dw._scale_data(data)) - fake_frame = fake_frames('PixelValueTransformationSequence', - 'RescaleSlope', - [3.0])[0] + fake_frame = fake_frames('PixelValueTransformationSequence', 'RescaleSlope', [3.0])[0] fake_mf['PerFrameFunctionalGroupsSequence'] = [fake_frame] # Lacking RescaleIntercept -> Error dw = MFW(fake_mf) diff --git a/nibabel/nicom/tests/test_dwiparams.py b/nibabel/nicom/tests/test_dwiparams.py index d0d20e574a..8a869c01db 100644 --- a/nibabel/nicom/tests/test_dwiparams.py +++ b/nibabel/nicom/tests/test_dwiparams.py @@ -1,5 +1,4 @@ -""" Testing diffusion parameter processing - +"""Testing diffusion parameter processing """ import numpy as np @@ -8,7 +7,7 @@ import pytest -from numpy.testing import (assert_array_almost_equal, assert_equal as np_assert_equal) +from numpy.testing import assert_array_almost_equal, assert_equal as np_assert_equal def test_b2q(): @@ -31,7 +30,7 @@ def test_b2q(): # no error if we up the tolerance q = B2q(B, tol=1) # Less massive negativity, dropping tol - B = np.diag([-1e-14, 10., 1]) + B = np.diag([-1e-14, 10.0, 1]) with pytest.raises(ValueError): B2q(B) assert_array_almost_equal(B2q(B, tol=5e-13), [0, 10, 0]) @@ -46,8 +45,8 @@ def test_q2bg(): # Conversion of q vector to b value and unit vector for pos in range(3): q_vec = np.zeros((3,)) - q_vec[pos] = 10. - np_assert_equal(q2bg(q_vec), (10, q_vec / 10.)) + q_vec[pos] = 10.0 + np_assert_equal(q2bg(q_vec), (10, q_vec / 10.0)) # Also - check array-like q_vec = [0, 1e-6, 0] np_assert_equal(q2bg(q_vec), (0, 0)) diff --git a/nibabel/nicom/tests/test_structreader.py b/nibabel/nicom/tests/test_structreader.py index 6e58931559..c7815cd6fb 100644 --- a/nibabel/nicom/tests/test_structreader.py +++ b/nibabel/nicom/tests/test_structreader.py @@ -1,4 +1,4 @@ -""" Testing Siemens CSA header reader +"""Testing Siemens CSA header reader """ import sys import struct @@ -8,8 +8,8 @@ def test_unpacker(): s = b'1234\x00\x01' - le_int, = struct.unpack('h', b'\x00\x01') + (le_int,) = struct.unpack('h', b'\x00\x01') if sys.byteorder == 'little': native_int = le_int swapped_int = be_int diff --git a/nibabel/nicom/tests/test_utils.py b/nibabel/nicom/tests/test_utils.py index ddfe68075c..edd20f9973 100644 --- a/nibabel/nicom/tests/test_utils.py +++ b/nibabel/nicom/tests/test_utils.py @@ -1,4 +1,4 @@ -""" Testing nicom.utils module +"""Testing nicom.utils module """ import re @@ -6,7 +6,7 @@ from .test_dicomwrappers import DATA, DATA_PHILIPS from ..utils import find_private_section -pydicom, _, setup_module = optional_package("pydicom") +pydicom, _, setup_module = optional_package('pydicom') def test_find_private_section_real(): diff --git a/nibabel/nicom/utils.py b/nibabel/nicom/utils.py index f1d5810775..48a010903a 100644 --- a/nibabel/nicom/utils.py +++ b/nibabel/nicom/utils.py @@ -1,11 +1,11 @@ -""" Utilities for working with DICOM datasets +"""Utilities for working with DICOM datasets """ from numpy.compat.py3k import asstr def find_private_section(dcm_data, group_no, creator): - """ Return start element in group `group_no` given creator name `creator` + """Return start element in group `group_no` given creator name `creator` Private attribute tags need to announce where they will go by putting a tag in the private group (here `group_no`) between elements 1 and 0xFF. The diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index a951522c8d..625fe6baa9 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read / write access to NIfTI1 image format +"""Read / write access to NIfTI1 image format NIfTI1 format defined at http://nifti.nimh.nih.gov/nifti-1/ """ @@ -28,7 +28,7 @@ from .spm99analyze import SpmAnalyzeHeader from .casting import have_binary128 -pdcm, have_dicom, _ = optional_package("pydicom") +pdcm, have_dicom, _ = optional_package('pydicom') # nifti1 flat header definition for Analyze-like first 348 bytes # first number in comments indicates offset in file header in bytes @@ -75,7 +75,7 @@ ('srow_y', 'f4', (4,)), # 296; 2nd row affine transform ('srow_z', 'f4', (4,)), # 312; 3rd row affine transform ('intent_name', 'S16'), # 328; name or meaning of data - ('magic', 'S4') # 344; must be 'ni1\0' or 'n+1\0' + ('magic', 'S4'), # 344; must be 'ni1\0' or 'n+1\0' ] # Full header numpy dtype @@ -91,166 +91,191 @@ _complex256t = np.void _dtdefs = ( # code, label, dtype definition, niistring - (0, 'none', np.void, ""), - (1, 'binary', np.void, ""), - (2, 'uint8', np.uint8, "NIFTI_TYPE_UINT8"), - (4, 'int16', np.int16, "NIFTI_TYPE_INT16"), - (8, 'int32', np.int32, "NIFTI_TYPE_INT32"), - (16, 'float32', np.float32, "NIFTI_TYPE_FLOAT32"), - (32, 'complex64', np.complex64, "NIFTI_TYPE_COMPLEX64"), - (64, 'float64', np.float64, "NIFTI_TYPE_FLOAT64"), - (128, 'RGB', np.dtype([('R', 'u1'), - ('G', 'u1'), - ('B', 'u1')]), "NIFTI_TYPE_RGB24"), + (0, 'none', np.void, ''), + (1, 'binary', np.void, ''), + (2, 'uint8', np.uint8, 'NIFTI_TYPE_UINT8'), + (4, 'int16', np.int16, 'NIFTI_TYPE_INT16'), + (8, 'int32', np.int32, 'NIFTI_TYPE_INT32'), + (16, 'float32', np.float32, 'NIFTI_TYPE_FLOAT32'), + (32, 'complex64', np.complex64, 'NIFTI_TYPE_COMPLEX64'), + (64, 'float64', np.float64, 'NIFTI_TYPE_FLOAT64'), + (128, 'RGB', np.dtype([('R', 'u1'), ('G', 'u1'), ('B', 'u1')]), 'NIFTI_TYPE_RGB24'), (255, 'all', np.void, ''), - (256, 'int8', np.int8, "NIFTI_TYPE_INT8"), - (512, 'uint16', np.uint16, "NIFTI_TYPE_UINT16"), - (768, 'uint32', np.uint32, "NIFTI_TYPE_UINT32"), - (1024, 'int64', np.int64, "NIFTI_TYPE_INT64"), - (1280, 'uint64', np.uint64, "NIFTI_TYPE_UINT64"), - (1536, 'float128', _float128t, "NIFTI_TYPE_FLOAT128"), - (1792, 'complex128', np.complex128, "NIFTI_TYPE_COMPLEX128"), - (2048, 'complex256', _complex256t, "NIFTI_TYPE_COMPLEX256"), - (2304, 'RGBA', np.dtype([('R', 'u1'), - ('G', 'u1'), - ('B', 'u1'), - ('A', 'u1')]), "NIFTI_TYPE_RGBA32"), + (256, 'int8', np.int8, 'NIFTI_TYPE_INT8'), + (512, 'uint16', np.uint16, 'NIFTI_TYPE_UINT16'), + (768, 'uint32', np.uint32, 'NIFTI_TYPE_UINT32'), + (1024, 'int64', np.int64, 'NIFTI_TYPE_INT64'), + (1280, 'uint64', np.uint64, 'NIFTI_TYPE_UINT64'), + (1536, 'float128', _float128t, 'NIFTI_TYPE_FLOAT128'), + (1792, 'complex128', np.complex128, 'NIFTI_TYPE_COMPLEX128'), + (2048, 'complex256', _complex256t, 'NIFTI_TYPE_COMPLEX256'), + ( + 2304, + 'RGBA', + np.dtype([('R', 'u1'), ('G', 'u1'), ('B', 'u1'), ('A', 'u1')]), + 'NIFTI_TYPE_RGBA32', + ), ) # Make full code alias bank, including dtype column data_type_codes = make_dt_codes(_dtdefs) # Transform (qform, sform) codes -xform_codes = Recoder(( # code, label, niistring - (0, 'unknown', "NIFTI_XFORM_UNKNOWN"), - (1, 'scanner', "NIFTI_XFORM_SCANNER_ANAT"), - (2, 'aligned', "NIFTI_XFORM_ALIGNED_ANAT"), - (3, 'talairach', "NIFTI_XFORM_TALAIRACH"), - (4, 'mni', "NIFTI_XFORM_MNI_152"), - (5, 'template', "NIFTI_XFORM_TEMPLATE_OTHER"), - ), fields=('code', 'label', 'niistring')) +xform_codes = Recoder( + ( # code, label, niistring + (0, 'unknown', 'NIFTI_XFORM_UNKNOWN'), + (1, 'scanner', 'NIFTI_XFORM_SCANNER_ANAT'), + (2, 'aligned', 'NIFTI_XFORM_ALIGNED_ANAT'), + (3, 'talairach', 'NIFTI_XFORM_TALAIRACH'), + (4, 'mni', 'NIFTI_XFORM_MNI_152'), + (5, 'template', 'NIFTI_XFORM_TEMPLATE_OTHER'), + ), + fields=('code', 'label', 'niistring'), +) # unit codes -unit_codes = Recoder(( # code, label - (0, 'unknown'), - (1, 'meter'), - (2, 'mm'), - (3, 'micron'), - (8, 'sec'), - (16, 'msec'), - (24, 'usec'), - (32, 'hz'), - (40, 'ppm'), - (48, 'rads')), fields=('code', 'label')) - -slice_order_codes = Recoder(( # code, label - (0, 'unknown'), - (1, 'sequential increasing', 'seq inc'), - (2, 'sequential decreasing', 'seq dec'), - (3, 'alternating increasing', 'alt inc'), - (4, 'alternating decreasing', 'alt dec'), - (5, 'alternating increasing 2', 'alt inc 2'), - (6, 'alternating decreasing 2', 'alt dec 2')), fields=('code', 'label')) - -intent_codes = Recoder(( - # code, label, parameters description tuple - (0, 'none', (), "NIFTI_INTENT_NONE"), - (2, 'correlation', ('p1 = DOF',), "NIFTI_INTENT_CORREL"), - (3, 't test', ('p1 = DOF',), "NIFTI_INTENT_TTEST"), - (4, 'f test', ('p1 = numerator DOF', 'p2 = denominator DOF'), - "NIFTI_INTENT_FTEST"), - (5, 'z score', (), "NIFTI_INTENT_ZSCORE"), - (6, 'chi2', ('p1 = DOF',), "NIFTI_INTENT_CHISQ"), - # two parameter beta distribution - (7, 'beta', - ('p1=a', 'p2=b'), - "NIFTI_INTENT_BETA"), - # Prob(x) = (p1 choose x) * p2^x * (1-p2)^(p1-x), for x=0,1,...,p1 - (8, 'binomial', - ('p1 = number of trials', 'p2 = probability per trial'), - "NIFTI_INTENT_BINOM"), - # 2 parameter gamma - # Density(x) proportional to # x^(p1-1) * exp(-p2*x) - (9, 'gamma', - ('p1 = shape, p2 = scale', 2), - "NIFTI_INTENT_GAMMA"), - (10, 'poisson', - ('p1 = mean',), - "NIFTI_INTENT_POISSON"), - (11, 'normal', - ('p1 = mean', 'p2 = standard deviation',), - "NIFTI_INTENT_NORMAL"), - (12, 'non central f test', - ('p1 = numerator DOF', - 'p2 = denominator DOF', - 'p3 = numerator noncentrality parameter',), - "NIFTI_INTENT_FTEST_NONC"), - (13, 'non central chi2', - ('p1 = DOF', 'p2 = noncentrality parameter',), - "NIFTI_INTENT_CHISQ_NONC"), - (14, 'logistic', - ('p1 = location', 'p2 = scale',), - "NIFTI_INTENT_LOGISTIC"), - (15, 'laplace', - ('p1 = location', 'p2 = scale'), - "NIFTI_INTENT_LAPLACE"), - (16, 'uniform', - ('p1 = lower end', 'p2 = upper end'), - "NIFTI_INTENT_UNIFORM"), - (17, 'non central t test', - ('p1 = DOF', 'p2 = noncentrality parameter'), - "NIFTI_INTENT_TTEST_NONC"), - (18, 'weibull', - ('p1 = location', 'p2 = scale, p3 = power'), - "NIFTI_INTENT_WEIBULL"), - # p1 = 1 = 'half normal' distribution - # p1 = 2 = Rayleigh distribution - # p1 = 3 = Maxwell-Boltzmann distribution. - (19, 'chi', ('p1 = DOF',), "NIFTI_INTENT_CHI"), - (20, 'inverse gaussian', - ('pi = mu', 'p2 = lambda'), - "NIFTI_INTENT_INVGAUSS"), - (21, 'extreme value 1', - ('p1 = location', 'p2 = scale'), - "NIFTI_INTENT_EXTVAL"), - (22, 'p value', (), "NIFTI_INTENT_PVAL"), - (23, 'log p value', (), "NIFTI_INTENT_LOGPVAL"), - (24, 'log10 p value', (), "NIFTI_INTENT_LOG10PVAL"), - (1001, 'estimate', (), "NIFTI_INTENT_ESTIMATE"), - (1002, 'label', (), "NIFTI_INTENT_LABEL"), - (1003, 'neuroname', (), "NIFTI_INTENT_NEURONAME"), - (1004, 'general matrix', - ('p1 = M', 'p2 = N'), - "NIFTI_INTENT_GENMATRIX"), - (1005, 'symmetric matrix', ('p1 = M',), "NIFTI_INTENT_SYMMATRIX"), - (1006, 'displacement vector', (), "NIFTI_INTENT_DISPVECT"), - (1007, 'vector', (), "NIFTI_INTENT_VECTOR"), - (1008, 'pointset', (), "NIFTI_INTENT_POINTSET"), - (1009, 'triangle', (), "NIFTI_INTENT_TRIANGLE"), - (1010, 'quaternion', (), "NIFTI_INTENT_QUATERNION"), - (1011, 'dimensionless', (), "NIFTI_INTENT_DIMLESS"), - (2001, 'time series', - (), - "NIFTI_INTENT_TIME_SERIES", - "NIFTI_INTENT_TIMESERIES"), # this mis-spell occurs in the wild - (2002, 'node index', (), "NIFTI_INTENT_NODE_INDEX"), - (2003, 'rgb vector', (), "NIFTI_INTENT_RGB_VECTOR"), - (2004, 'rgba vector', (), "NIFTI_INTENT_RGBA_VECTOR"), - (2005, 'shape', (), "NIFTI_INTENT_SHAPE"), - # FSL-specific intent codes - codes used by FNIRT - # ($FSLDIR/warpfns/fnirt_file_reader.h:104) - (2006, 'fnirt disp field', (), 'FSL_FNIRT_DISPLACEMENT_FIELD'), - (2007, 'fnirt cubic spline coef', (), 'FSL_CUBIC_SPLINE_COEFFICIENTS'), - (2008, 'fnirt dct coef', (), 'FSL_DCT_COEFFICIENTS'), - (2009, 'fnirt quad spline coef', (), 'FSL_QUADRATIC_SPLINE_COEFFICIENTS'), - # FSL-specific intent codes - codes used by TOPUP - # ($FSLDIR/topup/topup_file_io.h:104) - (2016, 'topup cubic spline coef ', (), - 'FSL_TOPUP_CUBIC_SPLINE_COEFFICIENTS'), - (2017, 'topup quad spline coef', (), - 'FSL_TOPUP_QUADRATIC_SPLINE_COEFFICIENTS'), - (2018, 'topup field', (), 'FSL_TOPUP_FIELD'), -), fields=('code', 'label', 'parameters', 'niistring')) +unit_codes = Recoder( + ( # code, label + (0, 'unknown'), + (1, 'meter'), + (2, 'mm'), + (3, 'micron'), + (8, 'sec'), + (16, 'msec'), + (24, 'usec'), + (32, 'hz'), + (40, 'ppm'), + (48, 'rads'), + ), + fields=('code', 'label'), +) + +slice_order_codes = Recoder( + ( # code, label + (0, 'unknown'), + (1, 'sequential increasing', 'seq inc'), + (2, 'sequential decreasing', 'seq dec'), + (3, 'alternating increasing', 'alt inc'), + (4, 'alternating decreasing', 'alt dec'), + (5, 'alternating increasing 2', 'alt inc 2'), + (6, 'alternating decreasing 2', 'alt dec 2'), + ), + fields=('code', 'label'), +) + +intent_codes = Recoder( + ( + # code, label, parameters description tuple + (0, 'none', (), 'NIFTI_INTENT_NONE'), + (2, 'correlation', ('p1 = DOF',), 'NIFTI_INTENT_CORREL'), + (3, 't test', ('p1 = DOF',), 'NIFTI_INTENT_TTEST'), + (4, 'f test', ('p1 = numerator DOF', 'p2 = denominator DOF'), 'NIFTI_INTENT_FTEST'), + (5, 'z score', (), 'NIFTI_INTENT_ZSCORE'), + (6, 'chi2', ('p1 = DOF',), 'NIFTI_INTENT_CHISQ'), + # two parameter beta distribution + (7, 'beta', ('p1=a', 'p2=b'), 'NIFTI_INTENT_BETA'), + # Prob(x) = (p1 choose x) * p2^x * (1-p2)^(p1-x), for x=0,1,...,p1 + ( + 8, + 'binomial', + ('p1 = number of trials', 'p2 = probability per trial'), + 'NIFTI_INTENT_BINOM', + ), + # 2 parameter gamma + # Density(x) proportional to # x^(p1-1) * exp(-p2*x) + (9, 'gamma', ('p1 = shape, p2 = scale', 2), 'NIFTI_INTENT_GAMMA'), + (10, 'poisson', ('p1 = mean',), 'NIFTI_INTENT_POISSON'), + ( + 11, + 'normal', + ( + 'p1 = mean', + 'p2 = standard deviation', + ), + 'NIFTI_INTENT_NORMAL', + ), + ( + 12, + 'non central f test', + ( + 'p1 = numerator DOF', + 'p2 = denominator DOF', + 'p3 = numerator noncentrality parameter', + ), + 'NIFTI_INTENT_FTEST_NONC', + ), + ( + 13, + 'non central chi2', + ( + 'p1 = DOF', + 'p2 = noncentrality parameter', + ), + 'NIFTI_INTENT_CHISQ_NONC', + ), + ( + 14, + 'logistic', + ( + 'p1 = location', + 'p2 = scale', + ), + 'NIFTI_INTENT_LOGISTIC', + ), + (15, 'laplace', ('p1 = location', 'p2 = scale'), 'NIFTI_INTENT_LAPLACE'), + (16, 'uniform', ('p1 = lower end', 'p2 = upper end'), 'NIFTI_INTENT_UNIFORM'), + ( + 17, + 'non central t test', + ('p1 = DOF', 'p2 = noncentrality parameter'), + 'NIFTI_INTENT_TTEST_NONC', + ), + (18, 'weibull', ('p1 = location', 'p2 = scale, p3 = power'), 'NIFTI_INTENT_WEIBULL'), + # p1 = 1 = 'half normal' distribution + # p1 = 2 = Rayleigh distribution + # p1 = 3 = Maxwell-Boltzmann distribution. + (19, 'chi', ('p1 = DOF',), 'NIFTI_INTENT_CHI'), + (20, 'inverse gaussian', ('pi = mu', 'p2 = lambda'), 'NIFTI_INTENT_INVGAUSS'), + (21, 'extreme value 1', ('p1 = location', 'p2 = scale'), 'NIFTI_INTENT_EXTVAL'), + (22, 'p value', (), 'NIFTI_INTENT_PVAL'), + (23, 'log p value', (), 'NIFTI_INTENT_LOGPVAL'), + (24, 'log10 p value', (), 'NIFTI_INTENT_LOG10PVAL'), + (1001, 'estimate', (), 'NIFTI_INTENT_ESTIMATE'), + (1002, 'label', (), 'NIFTI_INTENT_LABEL'), + (1003, 'neuroname', (), 'NIFTI_INTENT_NEURONAME'), + (1004, 'general matrix', ('p1 = M', 'p2 = N'), 'NIFTI_INTENT_GENMATRIX'), + (1005, 'symmetric matrix', ('p1 = M',), 'NIFTI_INTENT_SYMMATRIX'), + (1006, 'displacement vector', (), 'NIFTI_INTENT_DISPVECT'), + (1007, 'vector', (), 'NIFTI_INTENT_VECTOR'), + (1008, 'pointset', (), 'NIFTI_INTENT_POINTSET'), + (1009, 'triangle', (), 'NIFTI_INTENT_TRIANGLE'), + (1010, 'quaternion', (), 'NIFTI_INTENT_QUATERNION'), + (1011, 'dimensionless', (), 'NIFTI_INTENT_DIMLESS'), + ( + 2001, + 'time series', + (), + 'NIFTI_INTENT_TIME_SERIES', + 'NIFTI_INTENT_TIMESERIES', + ), # this mis-spell occurs in the wild + (2002, 'node index', (), 'NIFTI_INTENT_NODE_INDEX'), + (2003, 'rgb vector', (), 'NIFTI_INTENT_RGB_VECTOR'), + (2004, 'rgba vector', (), 'NIFTI_INTENT_RGBA_VECTOR'), + (2005, 'shape', (), 'NIFTI_INTENT_SHAPE'), + # FSL-specific intent codes - codes used by FNIRT + # ($FSLDIR/warpfns/fnirt_file_reader.h:104) + (2006, 'fnirt disp field', (), 'FSL_FNIRT_DISPLACEMENT_FIELD'), + (2007, 'fnirt cubic spline coef', (), 'FSL_CUBIC_SPLINE_COEFFICIENTS'), + (2008, 'fnirt dct coef', (), 'FSL_DCT_COEFFICIENTS'), + (2009, 'fnirt quad spline coef', (), 'FSL_QUADRATIC_SPLINE_COEFFICIENTS'), + # FSL-specific intent codes - codes used by TOPUP + # ($FSLDIR/topup/topup_file_io.h:104) + (2016, 'topup cubic spline coef ', (), 'FSL_TOPUP_CUBIC_SPLINE_COEFFICIENTS'), + (2017, 'topup quad spline coef', (), 'FSL_TOPUP_QUADRATIC_SPLINE_COEFFICIENTS'), + (2018, 'topup field', (), 'FSL_TOPUP_FIELD'), + ), + fields=('code', 'label', 'parameters', 'niistring'), +) class Nifti1Extension: @@ -331,8 +356,7 @@ def get_content(self): return self._content def get_sizeondisk(self): - """Return the size of the extension in the NIfTI file. - """ + """Return the size of the extension in the NIfTI file.""" # need raw value size plus 8 bytes for esize and ecode size = len(self._mangle(self._content)) size += 8 @@ -358,7 +382,7 @@ def __ne__(self, other): return not self == other def write_to(self, fileobj, byteswap): - """ Write header extensions to fileobj + """Write header extensions to fileobj Write starts at fileobj current file position. @@ -397,6 +421,7 @@ class Nifti1DicomExtension(Nifti1Extension): and content is the raw bytestring loaded directly from the nifti file header. """ + def __init__(self, code, content, parent_hdr=None): """ Parameters @@ -434,15 +459,16 @@ def __init__(self, code, content, parent_hdr=None): elif isinstance(content, bytes): # Got a byte string - unmangle it self._raw_content = content self._is_implicit_VR = self._guess_implicit_VR() - ds = self._unmangle(content, self._is_implicit_VR, - self._is_little_endian) + ds = self._unmangle(content, self._is_implicit_VR, self._is_little_endian) self._content = ds elif content is None: # initialize a new dicom dataset self._is_implicit_VR = False self._content = pdcm.dataset.Dataset() else: - raise TypeError(f"content must be either a bytestring or a pydicom Dataset. " - f"Got {content.__class__}") + raise TypeError( + f'content must be either a bytestring or a pydicom Dataset. ' + f'Got {content.__class__}' + ) def _guess_implicit_VR(self): """Try to guess DICOM syntax by checking for valid VRs. @@ -461,9 +487,7 @@ def _guess_implicit_VR(self): def _unmangle(self, value, is_implicit_VR=False, is_little_endian=True): bio = BytesIO(value) - ds = pdcm.filereader.read_dataset(bio, - is_implicit_VR, - is_little_endian) + ds = pdcm.filereader.read_dataset(bio, is_implicit_VR, is_little_endian) return ds def _mangle(self, dataset): @@ -480,22 +504,24 @@ def _mangle(self, dataset): # see nifti1_io.h for a complete list of all known extensions and # references to their description or contacts of the respective # initiators -extension_codes = Recoder(( - (0, "ignore", Nifti1Extension), - (2, "dicom", Nifti1DicomExtension if have_dicom else Nifti1Extension), - (4, "afni", Nifti1Extension), - (6, "comment", Nifti1Extension), - (8, "xcede", Nifti1Extension), - (10, "jimdiminfo", Nifti1Extension), - (12, "workflow_fwds", Nifti1Extension), - (14, "freesurfer", Nifti1Extension), - (16, "pypickle", Nifti1Extension), -), fields=('code', 'label', 'handler')) +extension_codes = Recoder( + ( + (0, 'ignore', Nifti1Extension), + (2, 'dicom', Nifti1DicomExtension if have_dicom else Nifti1Extension), + (4, 'afni', Nifti1Extension), + (6, 'comment', Nifti1Extension), + (8, 'xcede', Nifti1Extension), + (10, 'jimdiminfo', Nifti1Extension), + (12, 'workflow_fwds', Nifti1Extension), + (14, 'freesurfer', Nifti1Extension), + (16, 'pypickle', Nifti1Extension), + ), + fields=('code', 'label', 'handler'), +) class Nifti1Extensions(list): - """Simple extension collection, implemented as a list-subclass. - """ + """Simple extension collection, implemented as a list-subclass.""" def count(self, ecode): """Returns the number of extensions matching a given *ecode*. @@ -517,15 +543,14 @@ def get_codes(self): return [e.get_code() for e in self] def get_sizeondisk(self): - """Return the size of the complete header extensions in the NIfTI file. - """ + """Return the size of the complete header extensions in the NIfTI file.""" return np.sum([e.get_sizeondisk() for e in self]) def __repr__(self): - return "Nifti1Extensions(%s)" % ', '.join(str(e) for e in self) + return 'Nifti1Extensions(%s)' % ', '.join(str(e) for e in self) def write_to(self, fileobj, byteswap): - """ Write header extensions to fileobj + """Write header extensions to fileobj Write starts at fileobj current file position. @@ -588,7 +613,8 @@ def from_fileobj(klass, fileobj, size, byteswap): warnings.warn( 'Extension size is not a multiple of 16 bytes; ' 'Assuming size is correct and hoping for the best', - UserWarning) + UserWarning, + ) # read extension itself; esize includes the 8 bytes already read evalue = fileobj.read(int(esize - 8)) if not len(evalue) == esize - 8: @@ -610,7 +636,7 @@ def from_fileobj(klass, fileobj, size, byteswap): class Nifti1Header(SpmAnalyzeHeader): - """ Class for NIfTI1 header + """Class for NIfTI1 header The NIfTI1 header has many more coded fields than the simpler Analyze variants. NIfTI1 headers also have extensions. @@ -622,16 +648,19 @@ class Nifti1Header(SpmAnalyzeHeader): This class handles the header-preceding-data case. """ + # Copies of module level definitions template_dtype = header_dtype _data_type_codes = data_type_codes # fields with recoders for their values - _field_recoders = {'datatype': data_type_codes, - 'qform_code': xform_codes, - 'sform_code': xform_codes, - 'intent_code': intent_codes, - 'slice_code': slice_order_codes} + _field_recoders = { + 'datatype': data_type_codes, + 'qform_code': xform_codes, + 'sform_code': xform_codes, + 'intent_code': intent_codes, + 'slice_code': slice_order_codes, + } # data scaling capabilities has_data_slope = True @@ -655,28 +684,17 @@ class Nifti1Header(SpmAnalyzeHeader): # Quaternion threshold near 0, based on float32 precision quaternion_threshold = -np.finfo(np.float32).eps * 3 - def __init__(self, - binaryblock=None, - endianness=None, - check=True, - extensions=()): - """ Initialize header from binary data block and extensions - """ - super(Nifti1Header, self).__init__(binaryblock, - endianness, - check) + def __init__(self, binaryblock=None, endianness=None, check=True, extensions=()): + """Initialize header from binary data block and extensions""" + super(Nifti1Header, self).__init__(binaryblock, endianness, check) self.extensions = self.exts_klass(extensions) def copy(self): - """ Return copy of header + """Return copy of header Take reference to extensions as well as copy of header contents """ - return self.__class__( - self.binaryblock, - self.endianness, - False, - self.extensions) + return self.__class__(self.binaryblock, self.endianness, False, self.extensions) @classmethod def from_fileobj(klass, fileobj, endianness=None, check=True): @@ -696,21 +714,20 @@ def from_fileobj(klass, fileobj, endianness=None, check=True): else: # otherwise read until the beginning of the data extsize = hdr._structarr['vox_offset'] - fileobj.tell() byteswap = endian_codes['native'] != hdr.endianness - hdr.extensions = klass.exts_klass.from_fileobj(fileobj, extsize, - byteswap) + hdr.extensions = klass.exts_klass.from_fileobj(fileobj, extsize, byteswap) return hdr def write_to(self, fileobj): # First check that vox offset is large enough; set if necessary if self.is_single: vox_offset = self._structarr['vox_offset'] - min_vox_offset = (self.single_vox_offset + - self.extensions.get_sizeondisk()) + min_vox_offset = self.single_vox_offset + self.extensions.get_sizeondisk() if vox_offset == 0: # vox offset unset; set as necessary self._structarr['vox_offset'] = min_vox_offset elif vox_offset < min_vox_offset: raise HeaderDataError( - f'vox offset set to {vox_offset}, but need at least {min_vox_offset}') + f'vox offset set to {vox_offset}, but need at least {min_vox_offset}' + ) super(Nifti1Header, self).write_to(fileobj) # Write extensions if len(self.extensions) == 0: @@ -724,7 +741,7 @@ def write_to(self, fileobj): self.extensions.write_to(fileobj, byteswap) def get_best_affine(self): - """ Select best of available transforms """ + """Select best of available transforms""" hdr = self._structarr if hdr['sform_code'] != 0: return self.get_sform() @@ -734,7 +751,7 @@ def get_best_affine(self): @classmethod def default_structarr(klass, endianness=None): - """ Create empty header binary block with given endianness """ + """Create empty header binary block with given endianness""" hdr_data = super(Nifti1Header, klass).default_structarr(endianness) if klass.is_single: hdr_data['magic'] = klass.single_magic @@ -744,7 +761,7 @@ def default_structarr(klass, endianness=None): @classmethod def from_header(klass, header=None, check=True): - """ Class method to create header from another header + """Class method to create header from another header Extend Analyze header copy by copying extensions from other Nifti types. @@ -768,7 +785,7 @@ def from_header(klass, header=None, check=True): return new_hdr def get_data_shape(self): - """ Get shape of data + """Get shape of data Examples -------- @@ -797,8 +814,9 @@ def get_data_shape(self): if shape[:3] == (-1, 1, 1): vec_len = int(self._structarr['glmin']) if vec_len == 0: - raise HeaderDataError('-1 in dim[1] but 0 in glmin; ' - 'inconsistent freesurfer type header?') + raise HeaderDataError( + '-1 in dim[1] but 0 in glmin; ' 'inconsistent freesurfer type header?' + ) return (vec_len, 1, 1) + shape[3:] # Apply freesurfer hack for ico7 surface elif shape[:3] == (27307, 1, 6): @@ -807,7 +825,7 @@ def get_data_shape(self): return shape def set_data_shape(self, shape): - """ Set shape of data # noqa + """Set shape of data # noqa If ``ndims == len(shape)`` then we set zooms for dimensions higher than ``ndims`` to 1.0 @@ -864,8 +882,11 @@ def set_data_shape(self, shape): if shape[:3] == (163842, 1, 1): shape = (27307, 1, 6) + shape[3:] # Apply freesurfer hack for large vectors - elif (len(shape) >= 3 and shape[1:3] == (1, 1) and - shape[0] > np.iinfo(hdr['dim'].dtype.base).max): + elif ( + len(shape) >= 3 + and shape[1:3] == (1, 1) + and shape[0] > np.iinfo(hdr['dim'].dtype.base).max + ): try: hdr['glmin'] = shape[0] except OverflowError: @@ -874,13 +895,16 @@ def set_data_shape(self, shape): overflow = hdr['glmin'] != shape[0] if overflow: raise HeaderDataError(f'shape[0] {shape[0]} does not fit in glmax datatype') - warnings.warn('Using large vector Freesurfer hack; header will ' - 'not be compatible with SPM or FSL', stacklevel=2) + warnings.warn( + 'Using large vector Freesurfer hack; header will ' + 'not be compatible with SPM or FSL', + stacklevel=2, + ) shape = (-1, 1, 1) + shape[3:] super(Nifti1Header, self).set_data_shape(shape) def set_data_dtype(self, datatype): - """ Set numpy dtype for data from code or dtype or type + """Set numpy dtype for data from code or dtype or type Using :py:class:`int` or ``"int"`` is disallowed, as these types will be interpreted as ``np.int64``, which is almost never desired. @@ -921,13 +945,15 @@ def set_data_dtype(self, datatype): >>> hdr.get_data_dtype() == np.dtype('int64') True """ - if not isinstance(datatype, np.dtype) and datatype in (int, "int"): - raise ValueError(f"Invalid data type {datatype!r}. Specify a sized integer, " - "e.g., 'uint8' or numpy.int16.") + if not isinstance(datatype, np.dtype) and datatype in (int, 'int'): + raise ValueError( + f'Invalid data type {datatype!r}. Specify a sized integer, ' + "e.g., 'uint8' or numpy.int16." + ) super().set_data_dtype(datatype) def get_qform_quaternion(self): - """ Compute quaternion from b, c, d of quaternion + """Compute quaternion from b, c, d of quaternion Fills a value by assuming this is a unit quaternion """ @@ -937,7 +963,7 @@ def get_qform_quaternion(self): return fillpositive(bcd, self.quaternion_threshold) def get_qform(self, coded=False): - """ Return 4x4 affine matrix from qform parameters in header + """Return 4x4 affine matrix from qform parameters in header Parameters ---------- @@ -978,7 +1004,7 @@ def get_qform(self, coded=False): return out def set_qform(self, affine, code=None, strip_shears=True): - """ Set qform header values from 4x4 affine + """Set qform header values from 4x4 affine Parameters ---------- @@ -1069,8 +1095,7 @@ def set_qform(self, affine, code=None, strip_shears=True): P, S, Qs = npl.svd(R) PR = np.dot(P, Qs) if not strip_shears and not np.allclose(PR, R): - raise HeaderDataError("Shears in affine and `strip_shears` is " - "False") + raise HeaderDataError('Shears in affine and `strip_shears` is ' 'False') # Convert to quaternion quat = mat2quat(PR) # Set into header @@ -1080,7 +1105,7 @@ def set_qform(self, affine, code=None, strip_shears=True): hdr['quatern_b'], hdr['quatern_c'], hdr['quatern_d'] = quat[1:] def get_sform(self, coded=False): - """ Return 4x4 affine matrix from sform parameters in header + """Return 4x4 affine matrix from sform parameters in header Parameters ---------- @@ -1111,7 +1136,7 @@ def get_sform(self, coded=False): return out def set_sform(self, affine, code=None): - """ Set sform transform from 4x4 affine + """Set sform transform from 4x4 affine Parameters ---------- @@ -1173,7 +1198,7 @@ def set_sform(self, affine, code=None): hdr['srow_z'][:] = affine[2, :] def get_slope_inter(self): - """ Get data scaling (slope) and DC offset (intercept) from header data + """Get data scaling (slope) and DC offset (intercept) from header data Returns ------- @@ -1216,7 +1241,7 @@ def get_slope_inter(self): return slope, inter def set_slope_inter(self, slope, inter=None): - """ Set slope and / or intercept into header + """Set slope and / or intercept into header Set slope and intercept for image data, such that, if the image data is ``arr``, then the scaled image data will be ``(arr * @@ -1250,7 +1275,7 @@ def set_slope_inter(self, slope, inter=None): self._structarr['scl_inter'] = inter def get_dim_info(self): - """ Gets NIfTI MRI slice etc dimension information + """Gets NIfTI MRI slice etc dimension information Returns ------- @@ -1280,12 +1305,14 @@ def get_dim_info(self): freq = info & 3 phase = (info >> 2) & 3 slice = (info >> 4) & 3 - return (freq - 1 if freq else None, - phase - 1 if phase else None, - slice - 1 if slice else None) + return ( + freq - 1 if freq else None, + phase - 1 if phase else None, + slice - 1 if slice else None, + ) def set_dim_info(self, freq=None, phase=None, slice=None): - """ Sets nifti MRI slice etc dimension information + """Sets nifti MRI slice etc dimension information Parameters ---------- @@ -1332,7 +1359,7 @@ def set_dim_info(self, freq=None, phase=None, slice=None): self._structarr['dim_info'] = info def get_intent(self, code_repr='label'): - """ Get intent code, parameters and name + """Get intent code, parameters and name Parameters ---------- @@ -1377,7 +1404,7 @@ def get_intent(self, code_repr='label'): return label, tuple(params), name def set_intent(self, code, params=(), name='', allow_unknown=False): - """ Set the intent code, parameters and name + """Set the intent code, parameters and name If parameters are not specified, assumed to be all zero. Each intent code has a set number of parameters associated. If you @@ -1444,12 +1471,12 @@ def set_intent(self, code, params=(), name='', allow_unknown=False): hdr['intent_code'] = icode hdr['intent_name'] = name all_params = [0] * 3 - all_params[:len(params)] = params[:] + all_params[: len(params)] = params[:] for i, param in enumerate(all_params): hdr['intent_p%d' % (i + 1)] = param def get_slice_duration(self): - """ Get slice duration + """Get slice duration Returns ------- @@ -1471,12 +1498,11 @@ def get_slice_duration(self): """ _, _, slice_dim = self.get_dim_info() if slice_dim is None: - raise HeaderDataError('Slice dimension must be set ' - 'for duration to be valid') + raise HeaderDataError('Slice dimension must be set ' 'for duration to be valid') return float(self._structarr['slice_duration']) def set_slice_duration(self, duration): - """ Set slice duration + """Set slice duration Parameters ---------- @@ -1489,27 +1515,25 @@ def set_slice_duration(self, duration): """ _, _, slice_dim = self.get_dim_info() if slice_dim is None: - raise HeaderDataError('Slice dimension must be set ' - 'for duration to be valid') + raise HeaderDataError('Slice dimension must be set ' 'for duration to be valid') self._structarr['slice_duration'] = duration def get_n_slices(self): - """ Return the number of slices - """ + """Return the number of slices""" _, _, slice_dim = self.get_dim_info() if slice_dim is None: - raise HeaderDataError('Slice dimension not set in header ' - 'dim_info') + raise HeaderDataError('Slice dimension not set in header ' 'dim_info') shape = self.get_data_shape() try: slice_len = shape[slice_dim] except IndexError: - raise HeaderDataError(f'Slice dimension index ({slice_dim}) ' - f'outside shape tuple ({shape})') + raise HeaderDataError( + f'Slice dimension index ({slice_dim}) ' f'outside shape tuple ({shape})' + ) return slice_len def get_slice_times(self): - """ Get slice times from slice timing information + """Get slice times from slice timing information Returns ------- @@ -1537,10 +1561,8 @@ def get_slice_times(self): duration = self.get_slice_duration() slabel = self.get_value_label('slice_code') if slabel == 'unknown': - raise HeaderDataError('Cannot get slice times when ' - 'Slice code is "unknown"') - slice_start, slice_end = (int(hdr['slice_start']), - int(hdr['slice_end'])) + raise HeaderDataError('Cannot get slice times when ' 'Slice code is "unknown"') + slice_start, slice_end = (int(hdr['slice_start']), int(hdr['slice_end'])) if slice_start < 0: raise HeaderDataError('slice_start should be >= 0') if slice_end == 0: @@ -1550,12 +1572,10 @@ def get_slice_times(self): raise HeaderDataError('slice_end should be > slice_start') st_order = self._slice_time_order(slabel, n_timed) times = st_order * duration - return ((None,) * slice_start + - tuple(times) + - (None,) * (slice_len - slice_end - 1)) + return (None,) * slice_start + tuple(times) + (None,) * (slice_len - slice_end - 1) def set_slice_times(self, slice_times): - """ Set slice times into *hdr* + """Set slice times into *hdr* Parameters ---------- @@ -1582,8 +1602,7 @@ def set_slice_times(self, slice_times): hdr = self._structarr slice_len = self.get_n_slices() if slice_len != len(slice_times): - raise HeaderDataError('Number of slice times does not ' - 'match number of slices') + raise HeaderDataError('Number of slice times does not ' 'match number of slices') # Extract Nones at beginning and end. Check for others for ind, time in enumerate(slice_times): if time is not None: @@ -1595,17 +1614,15 @@ def set_slice_times(self, slice_times): if time is not None: slice_end = slice_len - ind - 1 break - timed = slice_times[slice_start:slice_end + 1] + timed = slice_times[slice_start : slice_end + 1] for time in timed: if time is None: - raise HeaderDataError('Cannot have None in middle ' - 'of slice time vector') + raise HeaderDataError('Cannot have None in middle ' 'of slice time vector') # Find slice duration, check times are compatible with single # duration tdiffs = np.diff(np.sort(timed)) if not np.allclose(np.diff(tdiffs), 0): - raise HeaderDataError('Slice times not compatible with ' - 'single slice duration') + raise HeaderDataError('Slice times not compatible with ' 'single slice duration') duration = np.mean(tdiffs) # To slice time order st_order = np.round(np.array(timed) / duration) @@ -1617,9 +1634,7 @@ def set_slice_times(self, slice_times): matching_labels = [] for label in labels: - if np.all(st_order == self._slice_time_order( - label, - n_timed)): + if np.all(st_order == self._slice_time_order(label, n_timed)): matching_labels.append(label) if not matching_labels: @@ -1627,7 +1642,8 @@ def set_slice_times(self, slice_times): if len(matching_labels) > 1: warnings.warn( f"Multiple slice orders satisfy: {', '.join(matching_labels)}. " - "Choosing the first one") + 'Choosing the first one' + ) label = matching_labels[0] # Set values into header hdr['slice_start'] = slice_start @@ -1636,23 +1652,23 @@ def set_slice_times(self, slice_times): hdr['slice_code'] = slice_order_codes.code[label] def _slice_time_order(self, slabel, n_slices): - """ Supporting function to give time order of slices from label """ + """Supporting function to give time order of slices from label""" if slabel == 'sequential increasing': sp_ind_time_order = list(range(n_slices)) elif slabel == 'sequential decreasing': sp_ind_time_order = list(range(n_slices)[::-1]) elif slabel == 'alternating increasing': - sp_ind_time_order = (list(range(0, n_slices, 2)) + - list(range(1, n_slices, 2))) + sp_ind_time_order = list(range(0, n_slices, 2)) + list(range(1, n_slices, 2)) elif slabel == 'alternating decreasing': - sp_ind_time_order = (list(range(n_slices - 1, -1, -2)) + - list(range(n_slices - 2, -1, -2))) + sp_ind_time_order = list(range(n_slices - 1, -1, -2)) + list( + range(n_slices - 2, -1, -2) + ) elif slabel == 'alternating increasing 2': - sp_ind_time_order = (list(range(1, n_slices, 2)) + - list(range(0, n_slices, 2))) + sp_ind_time_order = list(range(1, n_slices, 2)) + list(range(0, n_slices, 2)) elif slabel == 'alternating decreasing 2': - sp_ind_time_order = (list(range(n_slices - 2, -1, -2)) + - list(range(n_slices - 1, -1, -2))) + sp_ind_time_order = list(range(n_slices - 2, -1, -2)) + list( + range(n_slices - 1, -1, -2) + ) else: raise HeaderDataError(f'We do not handle slice ordering "{slabel}"') return np.argsort(sp_ind_time_order) @@ -1660,8 +1676,7 @@ def _slice_time_order(self, slabel, n_slices): def get_xyzt_units(self): xyz_code = self.structarr['xyzt_units'] % 8 t_code = self.structarr['xyzt_units'] - xyz_code - return (unit_codes.label[xyz_code], - unit_codes.label[t_code]) + return (unit_codes.label[xyz_code], unit_codes.label[t_code]) def set_xyzt_units(self, xyz=None, t=None): if xyz is None: @@ -1675,7 +1690,7 @@ def set_xyzt_units(self, xyz=None, t=None): self.structarr['xyzt_units'] = xyz_code + t_code def _clean_after_mapping(self): - """ Set format-specific stuff after converting header from mapping + """Set format-specific stuff after converting header from mapping Clean up header after it has been initialized from an ``as_analyze_map`` method of another header type @@ -1683,8 +1698,7 @@ def _clean_after_mapping(self): See :meth:`nibabel.analyze.AnalyzeHeader._clean_after_mapping` for a more detailed description. """ - self._structarr['magic'] = (self.single_magic if self.is_single - else self.pair_magic) + self._structarr['magic'] = self.single_magic if self.is_single else self.pair_magic """ Checks only below here """ @@ -1692,15 +1706,17 @@ def _clean_after_mapping(self): def _get_checks(klass): # We need to return our own versions of - e.g. chk_datatype, to # pick up the Nifti datatypes from our class - return (klass._chk_sizeof_hdr, - klass._chk_datatype, - klass._chk_bitpix, - klass._chk_pixdims, - klass._chk_qfac, - klass._chk_magic, - klass._chk_offset, - klass._chk_qform_code, - klass._chk_sform_code) + return ( + klass._chk_sizeof_hdr, + klass._chk_datatype, + klass._chk_bitpix, + klass._chk_pixdims, + klass._chk_qfac, + klass._chk_magic, + klass._chk_offset, + klass._chk_qform_code, + klass._chk_sform_code, + ) @staticmethod def _chk_qfac(hdr, fix=False): @@ -1736,8 +1752,7 @@ def _chk_offset(hdr, fix=False): return hdr, rep if magic == hdr.single_magic and offset < hdr.single_vox_offset: rep.problem_level = 40 - rep.problem_msg = ('vox offset %d too low for ' - 'single file nifti1' % offset) + rep.problem_msg = 'vox offset %d too low for ' 'single file nifti1' % offset if fix: hdr['vox_offset'] = hdr.single_vox_offset rep.fix_msg = f'setting to minimum value of {hdr.single_vox_offset}' @@ -1780,20 +1795,22 @@ def may_contain_header(klass, binaryblock): if len(binaryblock) < klass.sizeof_hdr: return False - hdr_struct = np.ndarray(shape=(), dtype=header_dtype, - buffer=binaryblock[:klass.sizeof_hdr]) + hdr_struct = np.ndarray( + shape=(), dtype=header_dtype, buffer=binaryblock[: klass.sizeof_hdr] + ) return hdr_struct['magic'] in (b'ni1', b'n+1') class Nifti1PairHeader(Nifti1Header): - """ Class for NIfTI1 pair header """ + """Class for NIfTI1 pair header""" + # Signal whether this is single (header + data) file is_single = False class Nifti1Pair(analyze.AnalyzeImage): - """ Class for NIfTI1 format image, header pair - """ + """Class for NIfTI1 format image, header pair""" + header_class = Nifti1PairHeader _meta_sniff_len = header_class.sizeof_hdr rw = True @@ -1802,8 +1819,7 @@ class Nifti1Pair(analyze.AnalyzeImage): # the data at serialization time _dtype_alias = None - def __init__(self, dataobj, affine, header=None, - extra=None, file_map=None, dtype=None): + def __init__(self, dataobj, affine, header=None, extra=None, file_map=None, dtype=None): # Special carve-out for 64 bit integers # See GitHub issues # * https://github.com/nipy/nibabel/issues/1046 @@ -1811,24 +1827,24 @@ def __init__(self, dataobj, affine, header=None, # This only applies to NIfTI because the parent Analyze formats did # not support 64-bit integer data, so `set_data_dtype(int64)` would # already fail. - danger_dts = (np.dtype("int64"), np.dtype("uint64")) + danger_dts = (np.dtype('int64'), np.dtype('uint64')) if header is None and dtype is None and get_obj_dtype(dataobj) in danger_dts: - msg = (f"Image data has type {dataobj.dtype}, which may cause " - "incompatibilities with other tools. This will error in " - "NiBabel 5.0. This warning can be silenced " - f"by passing the dtype argument to {self.__class__.__name__}().") + msg = ( + f'Image data has type {dataobj.dtype}, which may cause ' + 'incompatibilities with other tools. This will error in ' + 'NiBabel 5.0. This warning can be silenced ' + f'by passing the dtype argument to {self.__class__.__name__}().' + ) warnings.warn(msg, FutureWarning, stacklevel=2) - super(Nifti1Pair, self).__init__(dataobj, - affine, - header, - extra, - file_map, - dtype) + super(Nifti1Pair, self).__init__(dataobj, affine, header, extra, file_map, dtype) # Force set of s/q form when header is None unless affine is also None if header is None and affine is not None: self._affine2header() + # Copy docstring - __init__.__doc__ = analyze.AnalyzeImage.__init__.__doc__ + """ + __init__.__doc__ = ( + analyze.AnalyzeImage.__init__.__doc__ + + """ Notes ----- @@ -1841,9 +1857,10 @@ def __init__(self, dataobj, affine, header=None, :meth:`set_qform` methods can be used to update the codes after an image has been created - see those methods, and the :ref:`manual ` for more details. """ + ) def update_header(self): - """ Harmonize header with image data and affine + """Harmonize header with image data and affine See AnalyzeImage.update_header for more examples @@ -1863,7 +1880,7 @@ def update_header(self): hdr['magic'] = hdr.pair_magic def _affine2header(self): - """ Unconditionally set affine into the header """ + """Unconditionally set affine into the header""" hdr = self._header # Set affine into sform with default code hdr.set_sform(self._affine, code='aligned') @@ -1871,7 +1888,7 @@ def _affine2header(self): hdr.set_qform(self._affine, code='unknown') def get_qform(self, coded=False): - """ Return 4x4 affine matrix from qform parameters in header + """Return 4x4 affine matrix from qform parameters in header Parameters ---------- @@ -1897,7 +1914,7 @@ def get_qform(self, coded=False): return self._header.get_qform(coded) def set_qform(self, affine, code=None, strip_shears=True, **kwargs): - """ Set qform header values from 4x4 affine + """Set qform header values from 4x4 affine Parameters ---------- @@ -1958,7 +1975,7 @@ def set_qform(self, affine, code=None, strip_shears=True, **kwargs): self._affine[:] = self._header.get_best_affine() def get_sform(self, coded=False): - """ Return 4x4 affine matrix from sform parameters in header + """Return 4x4 affine matrix from sform parameters in header Parameters ---------- @@ -1984,7 +2001,7 @@ def get_sform(self, coded=False): return self._header.get_sform(coded) def set_sform(self, affine, code=None, **kwargs): - """ Set sform transform from 4x4 affine + """Set sform transform from 4x4 affine Parameters ---------- @@ -2047,7 +2064,7 @@ def set_sform(self, affine, code=None, **kwargs): self._affine[:] = self._header.get_best_affine() def set_data_dtype(self, datatype): - """ Set numpy dtype for data from code, dtype, type or alias + """Set numpy dtype for data from code, dtype, type or alias Using :py:class:`int` or ``"int"`` is disallowed, as these types will be interpreted as ``np.int64``, which is almost never desired. @@ -2147,7 +2164,7 @@ def set_data_dtype(self, datatype): super().set_data_dtype(datatype) def get_data_dtype(self, finalize=False): - """ Get numpy dtype for data + """Get numpy dtype for data If ``set_data_dtype()`` has been called with an alias and ``finalize`` is ``False``, return the alias. @@ -2163,22 +2180,24 @@ def get_data_dtype(self, finalize=False): datatype = None if self._dtype_alias == 'compat': datatype = _get_analyze_compat_dtype(self._dataobj) - descrip = "an Analyze-compatible dtype" + descrip = 'an Analyze-compatible dtype' elif self._dtype_alias == 'smallest': datatype = _get_smallest_dtype(self._dataobj) - descrip = "an integer type with fewer than 64 bits" + descrip = 'an integer type with fewer than 64 bits' else: - raise ValueError(f"Unknown dtype alias {self._dtype_alias}.") + raise ValueError(f'Unknown dtype alias {self._dtype_alias}.') if datatype is None: dt = get_obj_dtype(self._dataobj) - raise ValueError(f"Cannot automatically cast array (of type {dt}) to {descrip}." - " Please set_data_dtype() to an explicit data type.") + raise ValueError( + f'Cannot automatically cast array (of type {dt}) to {descrip}.' + ' Please set_data_dtype() to an explicit data type.' + ) self.set_data_dtype(datatype) # Clears the alias return super().get_data_dtype() def to_file_map(self, file_map=None, dtype=None): - """ Write image to `file_map` or contained ``self.file_map`` + """Write image to `file_map` or contained ``self.file_map`` Parameters ---------- @@ -2219,7 +2238,8 @@ def as_reoriented(self, ornt): # Also apply the transform to the dim_info fields new_dim = [ None if orig_dim is None else int(ornt[orig_dim, 0]) - for orig_dim in img.header.get_dim_info()] + for orig_dim in img.header.get_dim_info() + ] img.header.set_dim_info(*new_dim) @@ -2227,15 +2247,15 @@ def as_reoriented(self, ornt): class Nifti1Image(Nifti1Pair, SerializableImage): - """ Class for single file NIfTI1 format image - """ + """Class for single file NIfTI1 format image""" + header_class = Nifti1Header valid_exts = ('.nii',) files_types = (('image', '.nii'),) @staticmethod def _get_fileholders(file_map): - """ Return fileholder for header and image + """Return fileholder for header and image For single-file niftis, the fileholder for the header and the image will be the same @@ -2243,14 +2263,14 @@ def _get_fileholders(file_map): return file_map['image'], file_map['image'] def update_header(self): - """ Harmonize header with image data and affine """ + """Harmonize header with image data and affine""" super(Nifti1Image, self).update_header() hdr = self._header hdr['magic'] = hdr.single_magic def load(filename): - """ Load NIfTI1 single or pair from `filename` + """Load NIfTI1 single or pair from `filename` Parameters ---------- @@ -2277,7 +2297,7 @@ def load(filename): def save(img, filename): - """ Save NIfTI1 single or pair to `filename` + """Save NIfTI1 single or pair to `filename` Parameters ---------- @@ -2291,11 +2311,11 @@ def save(img, filename): def _get_smallest_dtype( - arr, - itypes=(np.uint8, np.int16, np.int32), - ftypes=(), - ): - """ Return the smallest "sensible" dtype that will hold the array data + arr, + itypes=(np.uint8, np.int16, np.int32), + ftypes=(), +): + """Return the smallest "sensible" dtype that will hold the array data The purpose of this function is to support automatic type selection for serialization, so "sensible" here means well-supported in the NIfTI-1 world. @@ -2351,7 +2371,7 @@ def _get_smallest_dtype( def _get_analyze_compat_dtype(arr): - """ Return an Analyze-compatible dtype that ``arr`` can be safely cast to + """Return an Analyze-compatible dtype that ``arr`` can be safely cast to Analyze-compatible types are returned without inspection: @@ -2424,5 +2444,5 @@ def _get_analyze_compat_dtype(arr): return np.dtype('float32') raise ValueError( - f"Cannot find analyze-compatible dtype for array with dtype={dtype} (min={mn}, max={mx})" + f'Cannot find analyze-compatible dtype for array with dtype={dtype} (min={mn}, max={mx})' ) diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index 10e789d076..9e8e597772 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read / write access to NIfTI2 image format +"""Read / write access to NIfTI2 image format Format described here: @@ -120,12 +120,13 @@ class Nifti2Header(Nifti1Header): - """ Class for NIfTI2 header + """Class for NIfTI2 header NIfTI2 is a slightly simplified variant of NIfTI1 which replaces 32-bit floats with 64-bit floats, and increases some integer widths to 32 or 64 bits. """ + template_dtype = header_dtype pair_vox_offset = 0 single_vox_offset = 544 @@ -141,7 +142,7 @@ class Nifti2Header(Nifti1Header): quaternion_threshold = -np.finfo(np.float64).eps * 3 def get_data_shape(self): - """ Get shape of data + """Get shape of data Examples -------- @@ -165,7 +166,7 @@ def get_data_shape(self): return AnalyzeHeader.get_data_shape(self) def set_data_shape(self, shape): - """ Set shape of data + """Set shape of data If ``ndims == len(shape)`` then we set zooms for dimensions higher than ``ndims`` to 1.0 @@ -184,7 +185,7 @@ def set_data_shape(self, shape): @classmethod def default_structarr(klass, endianness=None): - """ Create empty header binary block with given endianness """ + """Create empty header binary block with given endianness""" hdr_data = super(Nifti2Header, klass).default_structarr(endianness) hdr_data['eol_check'] = (13, 10, 26, 10) return hdr_data @@ -194,8 +195,7 @@ def default_structarr(klass, endianness=None): @classmethod def _get_checks(klass): # Add our own checks - return (super(Nifti2Header, klass)._get_checks() + - (klass._chk_eol_check,)) + return super(Nifti2Header, klass)._get_checks() + (klass._chk_eol_check,) @staticmethod def _chk_eol_check(hdr, fix=False): @@ -210,8 +210,9 @@ def _chk_eol_check(hdr, fix=False): rep.fix_msg = 'setting EOL check to 13, 10, 26, 10' return hdr, rep rep.problem_level = 40 - rep.problem_msg = ('EOL check not 0 or 13, 10, 26, 10; data may be ' - 'corrupted by EOL conversion') + rep.problem_msg = ( + 'EOL check not 0 or 13, 10, 26, 10; data may be ' 'corrupted by EOL conversion' + ) if fix: hdr['eol_check'] = (13, 10, 26, 10) rep.fix_msg = 'setting EOL check to 13, 10, 26, 10' @@ -222,34 +223,36 @@ def may_contain_header(klass, binaryblock): if len(binaryblock) < klass.sizeof_hdr: return False - hdr_struct = np.ndarray(shape=(), dtype=header_dtype, - buffer=binaryblock[:klass.sizeof_hdr]) + hdr_struct = np.ndarray( + shape=(), dtype=header_dtype, buffer=binaryblock[: klass.sizeof_hdr] + ) bs_hdr_struct = hdr_struct.byteswap() return 540 in (hdr_struct['sizeof_hdr'], bs_hdr_struct['sizeof_hdr']) class Nifti2PairHeader(Nifti2Header): - """ Class for NIfTI2 pair header """ + """Class for NIfTI2 pair header""" + # Signal whether this is single (header + data) file is_single = False class Nifti2Pair(Nifti1Pair): - """ Class for NIfTI2 format image, header pair - """ + """Class for NIfTI2 format image, header pair""" + header_class = Nifti2PairHeader _meta_sniff_len = header_class.sizeof_hdr class Nifti2Image(Nifti1Image): - """ Class for single file NIfTI2 format image - """ + """Class for single file NIfTI2 format image""" + header_class = Nifti2Header _meta_sniff_len = header_class.sizeof_hdr def load(filename): - """ Load NIfTI2 single or pair image from `filename` + """Load NIfTI2 single or pair image from `filename` Parameters ---------- @@ -276,7 +279,7 @@ def load(filename): def save(img, filename): - """ Save NIfTI2 single or pair to `filename` + """Save NIfTI2 single or pair to `filename` Parameters ---------- diff --git a/nibabel/onetime.py b/nibabel/onetime.py index 6b8debc51b..8156b1a403 100644 --- a/nibabel/onetime.py +++ b/nibabel/onetime.py @@ -179,5 +179,7 @@ def auto_attr(func): # For backwards compatibility setattr_on_read = deprecate_with_version( - message="setattr_on_read has been renamed to auto_attr. Please use nibabel.onetime.auto_attr", - since="3.2", until="5.0")(auto_attr) + message='setattr_on_read has been renamed to auto_attr. Please use nibabel.onetime.auto_attr', + since='3.2', + until='5.0', +)(auto_attr) diff --git a/nibabel/openers.py b/nibabel/openers.py index b50da10c59..6338711cd7 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Context manager openers for various fileobject types +"""Context manager openers for various fileobject types """ from bz2 import BZ2File @@ -20,6 +20,7 @@ # is indexed_gzip present and modern? try: import indexed_gzip as igzip + version = igzip.__version__ HAVE_INDEXED_GZIP = True @@ -43,11 +44,12 @@ class DeterministicGzipFile(gzip.GzipFile): - """ Deterministic variant of GzipFile + """Deterministic variant of GzipFile This writer does not add filename information to the header, and defaults to a modification time (``mtime``) of 0 seconds. """ + def __init__(self, filename=None, mode=None, compresslevel=9, fileobj=None, mtime=0): # These two guards are copied from # https://github.com/python/cpython/blob/6ab65c6/Lib/gzip.py#L171-L174 @@ -55,8 +57,9 @@ def __init__(self, filename=None, mode=None, compresslevel=9, fileobj=None, mtim mode += 'b' if fileobj is None: fileobj = self.myfileobj = open(filename, mode or 'rb') - return super().__init__(filename="", mode=mode, compresslevel=compresslevel, - fileobj=fileobj, mtime=mtime) + return super().__init__( + filename='', mode=mode, compresslevel=compresslevel, fileobj=fileobj, mtime=mtime + ) def _gzip_open(filename, mode='rb', compresslevel=9, mtime=0, keep_open=False): @@ -74,14 +77,13 @@ def _gzip_open(filename, mode='rb', compresslevel=9, mtime=0, keep_open=False): return gzip_file -def _zstd_open(filename, mode="r", *, level_or_option=None, zstd_dict=None): - pyzstd = optional_package("pyzstd")[0] - return pyzstd.ZstdFile(filename, mode, - level_or_option=level_or_option, zstd_dict=zstd_dict) +def _zstd_open(filename, mode='r', *, level_or_option=None, zstd_dict=None): + pyzstd = optional_package('pyzstd')[0] + return pyzstd.ZstdFile(filename, mode, level_or_option=level_or_option, zstd_dict=zstd_dict) class Opener: - r""" Class to accept, maybe open, and context-manage file-likes / filenames + r"""Class to accept, maybe open, and context-manage file-likes / filenames Provides context manager to close files that the constructor opened for you. @@ -107,15 +109,18 @@ class Opener: '.gz': gz_def, '.bz2': bz2_def, '.zst': zstd_def, - None: (open, ('mode', 'buffering')) # default + None: (open, ('mode', 'buffering')), # default } #: default compression level when writing gz and bz2 files default_compresslevel = 1 #: default option for zst files default_zst_compresslevel = 3 - default_level_or_option = {"rb": None, "r": None, - "wb": default_zst_compresslevel, - "w": default_zst_compresslevel} + default_level_or_option = { + 'rb': None, + 'r': None, + 'wb': default_zst_compresslevel, + 'w': default_zst_compresslevel, + } #: whether to ignore case looking for compression extensions compress_ext_icase = True @@ -165,8 +170,7 @@ def _get_opener_argnames(self, fileish): return self.compress_ext_map[None] def _is_fileobj(self, obj): - """ Is `obj` a file-like object? - """ + """Is `obj` a file-like object?""" return hasattr(obj, 'read') and hasattr(obj, 'write') @property @@ -175,7 +179,7 @@ def closed(self): @property def name(self): - """ Return ``self.fobj.name`` or self._name if not present + """Return ``self.fobj.name`` or self._name if not present self._name will be None if object was created with a fileobj, otherwise it will be the filename. @@ -211,8 +215,7 @@ def __iter__(self): return iter(self.fobj) def close_if_mine(self): - """ Close ``self.fobj`` iff we opened it in the constructor - """ + """Close ``self.fobj`` iff we opened it in the constructor""" if self.me_opened: self.close() @@ -224,7 +227,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class ImageOpener(Opener): - """ Opener-type class to collect extra compressed extensions + """Opener-type class to collect extra compressed extensions A trivial sub-class of opener to which image classes can add extra extensions with custom openers, such as compressed openers. @@ -241,5 +244,6 @@ class ImageOpener(Opener): that `function` accepts. These arguments must be any (unordered) subset of `mode`, `compresslevel`, and `buffering`. """ + # Add new extensions to this dictionary compress_ext_map = Opener.compress_ext_map.copy() diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index 348cf1b995..090a73c366 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -1,4 +1,4 @@ -""" Routines to support optional packages """ +"""Routines to support optional packages""" from packaging.version import Version from .tripwire import TripWire @@ -14,7 +14,7 @@ def _check_pkg_version(pkg, min_version): def optional_package(name, trip_msg=None, min_version=None): - """ Return package-like thing and module setup for package `name` + """Return package-like thing and module setup for package `name` Parameters ---------- @@ -103,12 +103,14 @@ def optional_package(name, trip_msg=None, min_version=None): else: trip_msg = f'These functions need {name} version >= {min_version}' if trip_msg is None: - trip_msg = (f'We need package {name} for these functions, ' - f'but ``import {name}`` raised {exc}') + trip_msg = ( + f'We need package {name} for these functions, ' f'but ``import {name}`` raised {exc}' + ) pkg = TripWire(trip_msg) def setup_module(): import unittest + raise unittest.SkipTest(f'No {name} for these tests') return pkg, False, setup_module diff --git a/nibabel/orientations.py b/nibabel/orientations.py index fab106cab5..0adf19ca78 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Utilities for calculating and applying affine orientations """ +"""Utilities for calculating and applying affine orientations""" import numpy as np @@ -20,7 +20,7 @@ class OrientationError(Exception): def io_orientation(affine, tol=None): - """ Orientation of input axes in terms of output axes for `affine` + """Orientation of input axes in terms of output axes for `affine` Valid for an affine transformation from ``p`` dimensions to ``q`` dimensions (``affine.shape == (q + 1, p + 1)``). @@ -66,7 +66,7 @@ def io_orientation(affine, tol=None): # Threshold the singular values to determine the rank. if tol is None: tol = S.max() * max(RS.shape) * np.finfo(S.dtype).eps - keep = (S > tol) + keep = S > tol R = np.dot(P[:, keep], Qs[keep]) # the matrix R is such that np.dot(R,R.T) is projection onto the # columns of P[:,keep] and np.dot(R.T,R) is projection onto the rows @@ -111,9 +111,9 @@ def ornt_transform(start_ornt, end_ornt): start_ornt = np.asarray(start_ornt) end_ornt = np.asarray(end_ornt) if start_ornt.shape != end_ornt.shape: - raise ValueError("The orientations must have the same shape") + raise ValueError('The orientations must have the same shape') if start_ornt.shape[1] != 2: - raise ValueError(f"Invalid shape for an orientation: {start_ornt.shape}") + raise ValueError(f'Invalid shape for an orientation: {start_ornt.shape}') result = np.empty_like(start_ornt) for end_in_idx, (end_out_idx, end_flip) in enumerate(end_ornt): for start_in_idx, (start_out_idx, start_flip) in enumerate(start_ornt): @@ -125,13 +125,12 @@ def ornt_transform(start_ornt, end_ornt): result[start_in_idx, :] = [end_in_idx, flip] break else: - raise ValueError("Unable to find out axis %d in start_ornt" % - end_out_idx) + raise ValueError('Unable to find out axis %d in start_ornt' % end_out_idx) return result def apply_orientation(arr, ornt): - """ Apply transformations implied by `ornt` to the first + """Apply transformations implied by `ornt` to the first n axes of the array `arr` Parameters @@ -155,12 +154,10 @@ def apply_orientation(arr, ornt): ornt = np.asarray(ornt) n = ornt.shape[0] if t_arr.ndim < n: - raise OrientationError('Data array has fewer dimensions than ' - 'orientation') + raise OrientationError('Data array has fewer dimensions than ' 'orientation') # no coordinates can be dropped for applying the orientations if np.any(np.isnan(ornt[:, 0])): - raise OrientationError('Cannot drop coordinates when ' - 'applying orientation to data') + raise OrientationError('Cannot drop coordinates when ' 'applying orientation to data') # apply ornt transformations for ax, flip in enumerate(ornt[:, 1]): if flip == -1: @@ -173,7 +170,7 @@ def apply_orientation(arr, ornt): def inv_ornt_aff(ornt, shape): - """ Affine transform reversing transforms implied in `ornt` + """Affine transform reversing transforms implied in `ornt` Imagine you have an array ``arr`` of shape `shape`, and you apply the transforms implied by `ornt` (more below), to get ``tarr``. @@ -211,7 +208,7 @@ def inv_ornt_aff(ornt, shape): """ ornt = np.asarray(ornt) if np.any(np.isnan(ornt)): - raise OrientationError("We cannot invert orientation transform") + raise OrientationError('We cannot invert orientation transform') p = ornt.shape[0] shape = np.array(shape)[:p] # ornt implies a flip, followed by a transpose. We need the affine @@ -228,12 +225,9 @@ def inv_ornt_aff(ornt, shape): return np.dot(undo_flip, undo_reorder) -@deprecate_with_version('flip_axis is deprecated. ' - 'Please use numpy.flip instead.', - '3.2', - '5.0') +@deprecate_with_version('flip_axis is deprecated. ' 'Please use numpy.flip instead.', '3.2', '5.0') def flip_axis(arr, axis=0): - """ Flip contents of `axis` in array `arr` + """Flip contents of `axis` in array `arr` Equivalent to ``np.flip(arr, axis)``. @@ -252,7 +246,7 @@ def flip_axis(arr, axis=0): def ornt2axcodes(ornt, labels=None): - """ Convert orientation `ornt` to labels for axis directions + """Convert orientation `ornt` to labels for axis directions Parameters ---------- @@ -299,7 +293,7 @@ def ornt2axcodes(ornt, labels=None): def axcodes2ornt(axcodes, labels=None): - """ Convert axis codes `axcodes` to an orientation + """Convert axis codes `axcodes` to an orientation Parameters ---------- @@ -346,7 +340,7 @@ def axcodes2ornt(axcodes, labels=None): def aff2axcodes(aff, labels=None, tol=None): - """ axis direction codes for affine `aff` + """axis direction codes for affine `aff` Parameters ---------- diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 304c0c2cc0..c7d7a55617 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -139,27 +139,20 @@ from .openers import ImageOpener # PSL to RAS affine -PSL_TO_RAS = np.array([[0, 0, -1, 0], # L -> R - [-1, 0, 0, 0], # P -> A - [0, 1, 0, 0], # S -> S - [0, 0, 0, 1]]) +PSL_TO_RAS = np.array( + [[0, 0, -1, 0], [-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]] # L -> R # P -> A # S -> S +) # Acquisition (tra/sag/cor) to PSL axes # These come from looking at transverse, sagittal, coronal datasets where we # can see the LR, PA, SI orientation of the slice axes from the scanned object ACQ_TO_PSL = dict( - transverse=np.array([[0, 1, 0, 0], # P - [0, 0, 1, 0], # S - [1, 0, 0, 0], # L - [0, 0, 0, 1]]), + transverse=np.array([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]), # P # S # L sagittal=np.diag([1, -1, -1, 1]), - coronal=np.array([[0, 0, 1, 0], # P - [0, -1, 0, 0], # S - [1, 0, 0, 0], # L - [0, 0, 0, 1]]) + coronal=np.array([[0, 0, 1, 0], [0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]]), # P # S # L ) -DEG2RAD = np.pi / 180. +DEG2RAD = np.pi / 180.0 # General information dict definitions # assign props to PAR header entries @@ -218,46 +211,110 @@ image_def_dtds = {} image_def_dtds['V4'] = [ ('slice number', int), - ('echo number', int,), - ('dynamic scan number', int,), - ('cardiac phase number', int,), - ('image_type_mr', int,), - ('scanning sequence', int,), - ('index in REC file', int,), - ('image pixel size', int,), - ('scan percentage', int,), + ( + 'echo number', + int, + ), + ( + 'dynamic scan number', + int, + ), + ( + 'cardiac phase number', + int, + ), + ( + 'image_type_mr', + int, + ), + ( + 'scanning sequence', + int, + ), + ( + 'index in REC file', + int, + ), + ( + 'image pixel size', + int, + ), + ( + 'scan percentage', + int, + ), ('recon resolution', int, (2,)), ('rescale intercept', float), ('rescale slope', float), ('scale slope', float), # Window center, width recorded as integer but can be float - ('window center', float,), - ('window width', float,), + ( + 'window center', + float, + ), + ( + 'window width', + float, + ), ('image angulation', float, (3,)), ('image offcentre', float, (3,)), ('slice thickness', float), ('slice gap', float), - ('image_display_orientation', int,), - ('slice orientation', int,), - ('fmri_status_indication', int,), - ('image_type_ed_es', int,), + ( + 'image_display_orientation', + int, + ), + ( + 'slice orientation', + int, + ), + ( + 'fmri_status_indication', + int, + ), + ( + 'image_type_ed_es', + int, + ), ('pixel spacing', float, (2,)), ('echo_time', float), ('dyn_scan_begin_time', float), ('trigger_time', float), ('diffusion_b_factor', float), - ('number of averages', int,), + ( + 'number of averages', + int, + ), ('image_flip_angle', float), - ('cardiac frequency', int,), - ('minimum RR-interval', int,), - ('maximum RR-interval', int,), - ('TURBO factor', int,), - ('Inversion delay', float)] + ( + 'cardiac frequency', + int, + ), + ( + 'minimum RR-interval', + int, + ), + ( + 'maximum RR-interval', + int, + ), + ( + 'TURBO factor', + int, + ), + ('Inversion delay', float), +] # Extra image def fields for 4.1 compared to 4 image_def_dtds['V4.1'] = image_def_dtds['V4'] + [ - ('diffusion b value number', int,), # (imagekey!) - ('gradient orientation number', int,), # (imagekey!) + ( + 'diffusion b value number', + int, + ), # (imagekey!) + ( + 'gradient orientation number', + int, + ), # (imagekey!) ('contrast type', 'S30'), # XXX might be too short? ('diffusion anisotropy type', 'S30'), # XXX might be too short? ('diffusion', float, (3,)), @@ -265,7 +322,10 @@ # Extra image def fields for 4.2 compared to 4.1 image_def_dtds['V4.2'] = image_def_dtds['V4.1'] + [ - ('label type', int,), # (imagekey!) + ( + 'label type', + int, + ), # (imagekey!) ] #: PAR header versions we claim to understand @@ -275,10 +335,9 @@ image_def_dtype = np.dtype(image_def_dtds['V4.2']) #: slice orientation codes -slice_orientation_codes = Recoder(( # code, label - (1, 'transverse'), - (2, 'sagittal'), - (3, 'coronal')), fields=('code', 'label')) +slice_orientation_codes = Recoder( + ((1, 'transverse'), (2, 'sagittal'), (3, 'coronal')), fields=('code', 'label') # code, label +) class PARRECError(Exception): @@ -290,11 +349,11 @@ class PARRECError(Exception): # Value after colon may be absent -GEN_RE = re.compile(r".\s+(.*?)\s*:\s*(.*)") +GEN_RE = re.compile(r'.\s+(.*?)\s*:\s*(.*)') def _split_header(fobj): - """ Split header into `version`, `gen_dict`, `image_lines` """ + """Split header into `version`, `gen_dict`, `image_lines`""" version = None gen_dict = {} image_lines = [] @@ -326,8 +385,7 @@ def _split_header(fobj): def _process_gen_dict(gen_dict): - """ Process `gen_dict` key, values into `general_info` - """ + """Process `gen_dict` key, values into `general_info`""" general_info = {} for key, value in gen_dict.items(): # get props for this hdr field @@ -347,8 +405,7 @@ def _process_gen_dict(gen_dict): def _process_image_lines(image_lines, version): - """ Process image information definition lines according to `version` - """ + """Process image information definition lines according to `version`""" # postproc image def props image_def_dtd = image_def_dtds[version] # create an array for all image defs @@ -368,7 +425,7 @@ def _process_image_lines(image_lines, version): elif len(props) == 3: name, np_type, shape = props nelements = np.prod(shape) - value = items[item_counter:item_counter + nelements] + value = items[item_counter : item_counter + nelements] value = [np_type(v) for v in value] item_counter += nelements image_defs[name][i] = value @@ -376,7 +433,7 @@ def _process_image_lines(image_lines, version): def vol_numbers(slice_nos): - """ Calculate volume numbers inferred from slice numbers `slice_nos` + """Calculate volume numbers inferred from slice numbers `slice_nos` The volume number for each slice is the number of times this slice number has occurred previously in the `slice_nos` sequence @@ -402,7 +459,7 @@ def vol_numbers(slice_nos): def vol_is_full(slice_nos, slice_max, slice_min=1): - """ Vector with True for slices in complete volume, False otherwise + """Vector with True for slices in complete volume, False otherwise Parameters ---------- @@ -440,10 +497,11 @@ def vol_is_full(slice_nos, slice_max, slice_min=1): def _truncation_checks(general_info, image_defs, permit_truncated): - """ Check for presence of truncation in PAR file parameters + """Check for presence of truncation in PAR file parameters Raise error if truncation present and `permit_truncated` is False. """ + def _err_or_warn(msg): if not permit_truncated: raise PARRECError(msg) @@ -457,8 +515,9 @@ def _chk_trunc(idef_name, gdef_max_name): n_expected = general_info[gdef_max_name] if n_have != n_expected: _err_or_warn( - f"Header inconsistency: Found {n_have} {idef_name} " - f"values, but expected {n_expected}") + f'Header inconsistency: Found {n_have} {idef_name} ' + f'values, but expected {n_expected}' + ) _chk_trunc('slice', 'max_slices') _chk_trunc('echo', 'max_echoes') @@ -467,13 +526,12 @@ def _chk_trunc(idef_name, gdef_max_name): _chk_trunc('gradient orientation', 'max_gradient_orient') # Final check for partial volumes - if not np.all(vol_is_full(image_defs['slice number'], - general_info['max_slices'])): - _err_or_warn("Found one or more partial volume(s)") + if not np.all(vol_is_full(image_defs['slice number'], general_info['max_slices'])): + _err_or_warn('Found one or more partial volume(s)') def one_line(long_str): - """ Make maybe mutli-line `long_str` into one long line """ + """Make maybe mutli-line `long_str` into one long line""" return ' '.join(line.strip() for line in long_str.splitlines()) @@ -496,18 +554,22 @@ def parse_PAR_header(fobj): # single pass through the header version, gen_dict, image_lines = _split_header(fobj) if version not in supported_versions: - warnings.warn(one_line( - f""" PAR/REC version '{version}' is currently not supported -- making an + warnings.warn( + one_line( + f""" PAR/REC version '{version}' is currently not supported -- making an attempt to read nevertheless. Please email the NiBabel mailing list, if you are interested in adding support for this version. - """)) + """ + ) + ) general_info = _process_gen_dict(gen_dict) image_defs = _process_image_lines(image_lines, version) return general_info, image_defs -def _data_from_rec(rec_fileobj, in_shape, dtype, slice_indices, out_shape, - scalings=None, mmap=True): +def _data_from_rec( + rec_fileobj, in_shape, dtype, slice_indices, out_shape, scalings=None, mmap=True +): """Load and return array data from REC file Parameters @@ -564,10 +626,8 @@ def exts2pars(exts_source): element contains a PARRECHeader read from the contained extensions. """ headers = [] - exts_source = (exts_source.header if hasattr(exts_source, 'header') - else exts_source) - exts_source = (exts_source.extensions if hasattr(exts_source, 'extensions') - else exts_source) + exts_source = exts_source.header if hasattr(exts_source, 'header') else exts_source + exts_source = exts_source.extensions if hasattr(exts_source, 'extensions') else exts_source for extension in exts_source: content = extension.get_content() content = content.decode(getpreferredencoding(False)) @@ -579,9 +639,8 @@ def exts2pars(exts_source): class PARRECArrayProxy: - def __init__(self, file_like, header, *, mmap=True, scaling='dv'): - """ Initialize PARREC array proxy + """Initialize PARREC array proxy Parameters ---------- @@ -663,16 +722,15 @@ def _get_scaled(self, dtype, slicer): # Slice scaling to give output shape return raw_data * slopes[slicer].astype(final_type) + inters[slicer].astype(final_type) - def get_unscaled(self): - """ Read data from file + """Read data from file This is an optional part of the proxy API """ return self._get_unscaled(slicer=()) def __array__(self, dtype=None): - """ Read data from file and apply scaling, casting to ``dtype`` + """Read data from file and apply scaling, casting to ``dtype`` If ``dtype`` is unspecified, the dtype of the returned array is the narrowest dtype that can represent the data without overflow. @@ -700,8 +758,7 @@ def __getitem__(self, slicer): class PARRECHeader(SpatialHeader): """PAR/REC header""" - def __init__(self, info, image_defs, permit_truncated=False, - strict_sort=False): + def __init__(self, info, image_defs, permit_truncated=False, strict_sort=False): """ Parameters ---------- @@ -730,13 +787,15 @@ def __init__(self, info, image_defs, permit_truncated=False, # dtype bitpix = self._get_unique_image_prop('image pixel size') if bitpix not in (8, 16): - raise PARRECError(f'Only 8- and 16-bit data supported (not {bitpix}) ' - 'please report this to the nibabel developers') + raise PARRECError( + f'Only 8- and 16-bit data supported (not {bitpix}) ' + 'please report this to the nibabel developers' + ) # REC data always little endian dt = np.dtype('uint' + str(bitpix)).newbyteorder('<') - super(PARRECHeader, self).__init__(data_dtype=dt, - shape=self._calc_data_shape(), - zooms=self._calc_zooms()) + super(PARRECHeader, self).__init__( + data_dtype=dt, shape=self._calc_data_shape(), zooms=self._calc_zooms() + ) @classmethod def from_header(klass, header=None): @@ -744,20 +803,20 @@ def from_header(klass, header=None): raise PARRECError('Cannot create PARRECHeader from air.') if type(header) == klass: return header.copy() - raise PARRECError('Cannot create PARREC header from ' - 'non-PARREC header.') + raise PARRECError('Cannot create PARREC header from ' 'non-PARREC header.') @classmethod - def from_fileobj(klass, fileobj, permit_truncated=False, - strict_sort=False): + def from_fileobj(klass, fileobj, permit_truncated=False, strict_sort=False): info, image_defs = parse_PAR_header(fileobj) return klass(info, image_defs, permit_truncated, strict_sort) def copy(self): - return PARRECHeader(deepcopy(self.general_info), - self.image_defs.copy(), - self.permit_truncated, - self.strict_sort) + return PARRECHeader( + deepcopy(self.general_info), + self.image_defs.copy(), + self.permit_truncated, + self.strict_sort, + ) def as_analyze_map(self): """Convert PAR parameters to NIFTI1 format""" @@ -765,12 +824,15 @@ def as_analyze_map(self): # the NIfTI1 header, specifically in nifti1.py `header_dtd` defs. # Here we set the parameters we can to simplify PAR/REC # to NIfTI conversion. - descr = (f"{self.general_info['exam_name']};" - f"{self.general_info['patient_name']};" - f"{self.general_info['exam_date'].replace(' ', '')};" - f"{self.general_info['protocol_name']}" - )[:80] # max len - is_fmri = (self.general_info['max_dynamics'] > 1) + descr = ( + f"{self.general_info['exam_name']};" + f"{self.general_info['patient_name']};" + f"{self.general_info['exam_date'].replace(' ', '')};" + f"{self.general_info['protocol_name']}" + )[ + :80 + ] # max len + is_fmri = self.general_info['max_dynamics'] > 1 # PAR/REC uses msec, but in _calc_zooms we convert to sec t = 'sec' if is_fmri else 'unknown' xyzt_units = unit_codes['mm'] + unit_codes[t] @@ -821,14 +883,14 @@ def get_bvals_bvecs(self): else: n_slices, n_vols = self.get_data_shape()[-2:] bvals = self.image_defs['diffusion_b_factor'][reorder].reshape( - (n_slices, n_vols), order='F') + (n_slices, n_vols), order='F' + ) # All bvals within volume should be the same assert not np.any(np.diff(bvals, axis=0)) bvals = bvals[0] if 'diffusion' not in self.image_defs.dtype.names: return bvals, None - bvecs = self.image_defs['diffusion'][reorder].reshape( - (n_slices, n_vols, 3), order='F') + bvecs = self.image_defs['diffusion'][reorder].reshape((n_slices, n_vols, 3), order='F') # All 3 values of bvecs should be same within volume assert not np.any(np.diff(bvecs, axis=0)) bvecs = bvecs[0] @@ -838,12 +900,12 @@ def get_bvals_bvecs(self): return bvals, bvecs def get_def(self, name): - """Return a single image definition field (or None if missing) """ + """Return a single image definition field (or None if missing)""" idef = self.image_defs return idef[name] if name in idef.dtype.names else None def _get_unique_image_prop(self, name): - """ Scan image definitions and return unique value of a property. + """Scan image definitions and return unique value of a property. * Get array for named field of ``self.image_defs``; * Check that all rows in the array are the same and raise error @@ -866,18 +928,19 @@ def _get_unique_image_prop(self, name): """ props = self.image_defs[name] if np.any(np.diff(props, axis=0)): - raise PARRECError(f'Varying {name} in image sequence ' - f'({props}). This is not supported.') + raise PARRECError( + f'Varying {name} in image sequence ' f'({props}). This is not supported.' + ) return props[0] def get_data_offset(self): - """ PAR header always has 0 data offset (into REC file) """ + """PAR header always has 0 data offset (into REC file)""" return 0 def set_data_offset(self, offset): - """ PAR header always has 0 data offset (into REC file) """ + """PAR header always has 0 data offset (into REC file)""" if offset != 0: - raise PARRECError("PAR header assumes offset 0") + raise PARRECError('PAR header assumes offset 0') def _calc_zooms(self): """Compute image zooms from header data. @@ -906,8 +969,8 @@ def _calc_zooms(self): # If 4D dynamic scan, convert time from milliseconds to seconds if len(zooms) > 3 and self.general_info['dyn_scan']: if len(self.general_info['repetition_time']) > 1: - warnings.warn("multiple TRs found in .PAR file") - zooms[3] = self.general_info['repetition_time'][0] / 1000. + warnings.warn('multiple TRs found in .PAR file') + zooms[3] = self.general_info['repetition_time'][0] / 1000.0 return zooms def get_affine(self, origin='scanner'): @@ -948,13 +1011,12 @@ def get_affine(self, origin='scanner'): """ # shape, zooms in original data ordering (ijk ordering) ijk_shape = np.array(self.get_data_shape()[:3]) - to_center = from_matvec(np.eye(3), -(ijk_shape - 1) / 2.) + to_center = from_matvec(np.eye(3), -(ijk_shape - 1) / 2.0) zoomer = np.diag(list(self.get_zooms()[:3]) + [1]) slice_orientation = self.get_slice_orientation() permute_to_psl = ACQ_TO_PSL.get(slice_orientation) if permute_to_psl is None: - raise PARRECError( - f"Unknown slice orientation ({slice_orientation}).") + raise PARRECError(f'Unknown slice orientation ({slice_orientation}).') # hdr has deg, we need radians # Order is [ap, fh, rl] ap_rot, fh_rot, rl_rot = self.general_info['angulation'] * DEG2RAD @@ -974,18 +1036,18 @@ def get_affine(self, origin='scanner'): return np.dot(PSL_TO_RAS, psl_aff) def _get_n_slices(self): - """ Get number of slices for output data """ + """Get number of slices for output data""" return len(set(self.image_defs['slice number'])) def _get_n_vols(self): - """ Get number of volumes for output data """ + """Get number of volumes for output data""" slice_nos = self.image_defs['slice number'] vol_nos = vol_numbers(slice_nos) is_full = vol_is_full(slice_nos, self.general_info['max_slices']) return len(set(np.array(vol_nos)[is_full])) def _calc_data_shape(self): - """ Calculate the output shape of the image data + """Calculate the output shape of the image data Returns length 3 tuple for 3D image, length 4 tuple for 4D. @@ -1010,7 +1072,7 @@ def _calc_data_shape(self): n_vols = self._get_n_vols() return shape + (n_vols,) if n_vols > 1 else shape - def get_data_scaling(self, method="dv"): + def get_data_scaling(self, method='dv'): """Returns scaling slope and intercept. Parameters @@ -1074,7 +1136,7 @@ def get_rec_shape(self): return inplane_shape + (len(self.image_defs),) def _strict_sort_order(self): - """ Determine the sort order based on several image definition fields. + """Determine the sort order based on several image definition fields. The fields taken into consideration, if present, are (in order from slowest to fastest variation after sorting): @@ -1112,8 +1174,7 @@ def _strict_sort_order(self): image_type = idefs['image_type_mr'] # sort keys only present in a subset of .PAR files - asl_keys = ((idefs['label type'], ) if 'label type' in - idefs.dtype.names else ()) + asl_keys = (idefs['label type'],) if 'label type' in idefs.dtype.names else () if self.general_info['diffusion'] != 0: bvals = self.get_def('diffusion b value number') if bvals is None: @@ -1121,22 +1182,20 @@ def _strict_sort_order(self): bvecs = self.get_def('gradient orientation number') if bvecs is None: # no b-vectors available - diffusion_keys = (bvals, ) + diffusion_keys = (bvals,) else: diffusion_keys = (bvecs, bvals) else: diffusion_keys = () # initial sort (last key is highest precedence) - keys = (slice_nos, echos, phases) + \ - diffusion_keys + asl_keys + (dynamics, image_type) + keys = (slice_nos, echos, phases) + diffusion_keys + asl_keys + (dynamics, image_type) initial_sort_order = np.lexsort(keys) # sequentially number the volumes based on the initial sort vol_nos = vol_numbers(slice_nos[initial_sort_order]) # identify truncated volumes - is_full = vol_is_full(slice_nos[initial_sort_order], - self.general_info['max_slices']) + is_full = vol_is_full(slice_nos[initial_sort_order], self.general_info['max_slices']) # second stage of sorting return initial_sort_order[np.lexsort((vol_nos, is_full))] @@ -1182,7 +1241,7 @@ def get_sorted_slice_indices(self): return sort_order[:n_used] def get_volume_labels(self): - """ Dynamic labels corresponding to the final data dimension(s). + """Dynamic labels corresponding to the final data dimension(s). This is useful for custom data sorting. A subset of the info in ``self.image_defs`` is returned in an order that matches the final @@ -1200,18 +1259,19 @@ def get_volume_labels(self): image_defs = self.image_defs # define which keys which might vary across image volumes - dynamic_keys = ['cardiac phase number', - 'echo number', - 'label type', - 'image_type_mr', - 'dynamic scan number', - 'scanning sequence', - 'gradient orientation number', - 'diffusion b value number'] + dynamic_keys = [ + 'cardiac phase number', + 'echo number', + 'label type', + 'image_type_mr', + 'dynamic scan number', + 'scanning sequence', + 'gradient orientation number', + 'diffusion b value number', + ] # remove dynamic keys that may not be present in older .PAR versions - dynamic_keys = [d for d in dynamic_keys if d in - image_defs.dtype.fields] + dynamic_keys = [d for d in dynamic_keys if d in image_defs.dtype.fields] non_unique_keys = [] for key in dynamic_keys: @@ -1219,7 +1279,7 @@ def get_volume_labels(self): if ndim == 1: num_unique = len(np.unique(image_defs[key])) else: - raise ValueError("unexpected image_defs shape > 1D") + raise ValueError('unexpected image_defs shape > 1D') if num_unique > 1: non_unique_keys.append(key) @@ -1235,6 +1295,7 @@ def get_volume_labels(self): class PARRECImage(SpatialImage): """PAR/REC image""" + header_class = PARRECHeader valid_exts = ('.rec', '.par') files_types = (('image', '.rec'), ('header', '.par')) @@ -1245,9 +1306,10 @@ class PARRECImage(SpatialImage): ImageArrayProxy = PARRECArrayProxy @classmethod - def from_file_map(klass, file_map, *, mmap=True, permit_truncated=False, - scaling='dv', strict_sort=False): - """ Create PARREC image from file map `file_map` + def from_file_map( + klass, file_map, *, mmap=True, permit_truncated=False, scaling='dv', strict_sort=False + ): + """Create PARREC image from file map `file_map` Parameters ---------- @@ -1275,19 +1337,17 @@ def from_file_map(klass, file_map, *, mmap=True, permit_truncated=False, """ with file_map['header'].get_prepare_fileobj('rt') as hdr_fobj: hdr = klass.header_class.from_fileobj( - hdr_fobj, - permit_truncated=permit_truncated, - strict_sort=strict_sort) + hdr_fobj, permit_truncated=permit_truncated, strict_sort=strict_sort + ) rec_fobj = file_map['image'].get_prepare_fileobj() - data = klass.ImageArrayProxy(rec_fobj, hdr, - mmap=mmap, scaling=scaling) - return klass(data, hdr.get_affine(), header=hdr, extra=None, - file_map=file_map) + data = klass.ImageArrayProxy(rec_fobj, hdr, mmap=mmap, scaling=scaling) + return klass(data, hdr.get_affine(), header=hdr, extra=None, file_map=file_map) @classmethod - def from_filename(klass, filename, *, mmap=True, permit_truncated=False, - scaling='dv', strict_sort=False): - """ Create PARREC image from filename `filename` + def from_filename( + klass, filename, *, mmap=True, permit_truncated=False, scaling='dv', strict_sort=False + ): + """Create PARREC image from filename `filename` Parameters ---------- @@ -1313,11 +1373,13 @@ def from_filename(klass, filename, *, mmap=True, permit_truncated=False, the slices appear in the .PAR file. """ file_map = klass.filespec_to_file_map(filename) - return klass.from_file_map(file_map, - mmap=mmap, - permit_truncated=permit_truncated, - scaling=scaling, - strict_sort=strict_sort) + return klass.from_file_map( + file_map, + mmap=mmap, + permit_truncated=permit_truncated, + scaling=scaling, + strict_sort=strict_sort, + ) load = from_filename diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index e28cc6e28d..4d0257f4d6 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -6,13 +6,12 @@ def _cmp(a, b): - """ Implementation of ``cmp`` for Python 3 - """ + """Implementation of ``cmp`` for Python 3""" return (a > b) - (a < b) def cmp_pkg_version(version_str, pkg_version_str=__version__): - """ Compare ``version_str`` to current package version + """Compare ``version_str`` to current package version This comparator follows `PEP-440`_ conventions for determining version ordering. @@ -63,7 +62,7 @@ def cmp_pkg_version(version_str, pkg_version_str=__version__): def pkg_commit_hash(pkg_path=None): - """ Get short form of commit hash + """Get short form of commit hash Versioneer placed a ``_version.py`` file in the package directory. This file gets updated on installation or ``git archive``. @@ -98,7 +97,7 @@ def pkg_commit_hash(pkg_path=None): def get_pkg_info(pkg_path): - """ Return dict describing the context of this package + """Return dict describing the context of this package Parameters ---------- @@ -112,6 +111,7 @@ def get_pkg_info(pkg_path): """ src, hsh = pkg_commit_hash() import numpy + return dict( pkg_path=pkg_path, commit_source=src, @@ -119,4 +119,5 @@ def get_pkg_info(pkg_path): sys_version=sys.version, sys_executable=sys.executable, sys_platform=sys.platform, - np_version=numpy.__version__) + np_version=numpy.__version__, + ) diff --git a/nibabel/processing.py b/nibabel/processing.py index b7abfb8c75..336e9b40f1 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Image processing functions for: +"""Image processing functions for: * smoothing * resampling @@ -19,6 +19,7 @@ import numpy.linalg as npl from .optpkg import optional_package + spnd, _, _ = optional_package('scipy.ndimage') from .affines import AffineError, to_matvec, from_matvec, append_diag, rescale_affine @@ -31,7 +32,7 @@ def fwhm2sigma(fwhm): - """ Convert a FWHM value to sigma in a Gaussian kernel. + """Convert a FWHM value to sigma in a Gaussian kernel. Parameters ---------- @@ -54,7 +55,7 @@ def fwhm2sigma(fwhm): def sigma2fwhm(sigma): - """ Convert a sigma in a Gaussian kernel to a FWHM value + """Convert a sigma in a Gaussian kernel to a FWHM value Parameters ---------- @@ -77,7 +78,7 @@ def sigma2fwhm(sigma): def adapt_affine(affine, n_dim): - """ Adapt input / output dimensions of spatial `affine` for `n_dims` + """Adapt input / output dimensions of spatial `affine` for `n_dims` Adapts a spatial (4, 4) affine that is being applied to an image with fewer than 3 spatial dimensions, or more than 3 dimensions. If there are more @@ -112,13 +113,10 @@ def adapt_affine(affine, n_dim): return adapted -def resample_from_to(from_img, - to_vox_map, - order=3, - mode='constant', - cval=0., - out_class=Nifti1Image): - """ Resample image `from_img` to mapped voxel space `to_vox_map` +def resample_from_to( + from_img, to_vox_map, order=3, mode='constant', cval=0.0, out_class=Nifti1Image +): + """Resample image `from_img` to mapped voxel space `to_vox_map` Resample using N-d spline interpolation. @@ -156,8 +154,9 @@ def resample_from_to(from_img, """ # This check requires `shape` attribute of image if not spatial_axes_first(from_img): - raise ValueError('Cannot predict position of spatial axes for Image ' - 'type ' + str(type(from_img))) + raise ValueError( + 'Cannot predict position of spatial axes for Image ' 'type ' + str(type(from_img)) + ) try: to_shape, to_affine = to_vox_map.shape, to_vox_map.affine except AttributeError: @@ -171,23 +170,16 @@ def resample_from_to(from_img, a_from_affine = adapt_affine(from_img.affine, from_n_dim) to_vox2from_vox = npl.inv(a_from_affine).dot(a_to_affine) rzs, trans = to_matvec(to_vox2from_vox) - data = spnd.affine_transform(from_img.dataobj, - rzs, - trans, - to_shape, - order=order, - mode=mode, - cval=cval) + data = spnd.affine_transform( + from_img.dataobj, rzs, trans, to_shape, order=order, mode=mode, cval=cval + ) return out_class(data, to_affine, from_img.header) -def resample_to_output(in_img, - voxel_sizes=None, - order=3, - mode='constant', - cval=0., - out_class=Nifti1Image): - """ Resample image `in_img` to output voxel axes (world space) +def resample_to_output( + in_img, voxel_sizes=None, order=3, mode='constant', cval=0.0, out_class=Nifti1Image +): + """Resample image `in_img` to output voxel axes (world space) Parameters ---------- @@ -243,12 +235,8 @@ def resample_to_output(in_img, return resample_from_to(in_img, out_vox_map, order, mode, cval, out_class) -def smooth_image(img, - fwhm, - mode='nearest', - cval=0., - out_class=Nifti1Image): - """ Smooth image `img` along voxel axes by FWHM `fwhm` millimeters +def smooth_image(img, fwhm, mode='nearest', cval=0.0, out_class=Nifti1Image): + """Smooth image `img` along voxel axes by FWHM `fwhm` millimeters Parameters ---------- @@ -287,8 +275,9 @@ def smooth_image(img, """ # This check requires `shape` attribute of image if not spatial_axes_first(img): - raise ValueError('Cannot predict position of spatial axes for Image ' - 'type ' + str(type(img))) + raise ValueError( + 'Cannot predict position of spatial axes for Image ' 'type ' + str(type(img)) + ) if out_class is None: out_class = img.__class__ n_dim = len(img.shape) @@ -301,26 +290,25 @@ def smooth_image(img, fwhm[:3] = fwhm_scalar # Voxel sizes RZS = img.affine[:, :n_dim] - vox = np.sqrt(np.sum(RZS ** 2, 0)) + vox = np.sqrt(np.sum(RZS**2, 0)) # Smoothing in terms of voxels vox_fwhm = fwhm / vox vox_sd = fwhm2sigma(vox_fwhm) # Do the smoothing - sm_data = spnd.gaussian_filter(img.dataobj, - vox_sd, - mode=mode, - cval=cval) + sm_data = spnd.gaussian_filter(img.dataobj, vox_sd, mode=mode, cval=cval) return out_class(sm_data, img.affine, img.header) -def conform(from_img, - out_shape=(256, 256, 256), - voxel_size=(1.0, 1.0, 1.0), - order=3, - cval=0.0, - orientation='RAS', - out_class=None): - """ Resample image to ``out_shape`` with voxels of size ``voxel_size``. +def conform( + from_img, + out_shape=(256, 256, 256), + voxel_size=(1.0, 1.0, 1.0), + order=3, + cval=0.0, + orientation='RAS', + out_class=None, +): + """Resample image to ``out_shape`` with voxels of size ``voxel_size``. Using the default arguments, this function is meant to replicate most parts of FreeSurfer's ``mri_convert --conform`` command. Specifically, this @@ -367,11 +355,11 @@ def conform(from_img, # are written. required_ndim = 3 if from_img.ndim != required_ndim: - raise ValueError("Only 3D images are supported.") + raise ValueError('Only 3D images are supported.') elif len(out_shape) != required_ndim: - raise ValueError(f"`out_shape` must have {required_ndim} values") + raise ValueError(f'`out_shape` must have {required_ndim} values') elif len(voxel_size) != required_ndim: - raise ValueError(f"`voxel_size` must have {required_ndim} values") + raise ValueError(f'`voxel_size` must have {required_ndim} values') start_ornt = io_orientation(from_img.affine) end_ornt = axcodes2ornt(orientation) @@ -384,7 +372,12 @@ def conform(from_img, # Resample input image. out_img = resample_from_to( - from_img=from_img, to_vox_map=(out_shape, out_aff), order=order, mode="constant", - cval=cval, out_class=out_class) + from_img=from_img, + to_vox_map=(out_shape, out_aff), + order=order, + mode='constant', + cval=cval, + out_class=out_class, + ) return out_img diff --git a/nibabel/pydicom_compat.py b/nibabel/pydicom_compat.py index 5f827e2bbf..a58c2fdba9 100644 --- a/nibabel/pydicom_compat.py +++ b/nibabel/pydicom_compat.py @@ -1,4 +1,4 @@ -""" Adapter module for working with pydicom < 1.0 and >= 1.0 +"""Adapter module for working with pydicom < 1.0 and >= 1.0 In what follows, "dicom is available" means we can import either a) ``dicom`` (pydicom < 1.0) or or b) ``pydicom`` (pydicom >= 1.0). @@ -35,6 +35,7 @@ else: # pydicom module available from pydicom.dicomio import read_file from pydicom.sequence import Sequence + # Values not imported by default import pydicom.values @@ -42,9 +43,11 @@ tag_for_keyword = pydicom.datadict.tag_for_keyword -@deprecate_with_version("dicom_test has been moved to nibabel.nicom.tests", - since="3.1", until="5.0") +@deprecate_with_version( + 'dicom_test has been moved to nibabel.nicom.tests', since='3.1', until='5.0' +) def dicom_test(func): # Import locally to avoid circular dependency from .nicom.tests import dicom_test + return dicom_test(func) diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index 1b8e8b0454..7ae9a3c63a 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -33,7 +33,7 @@ def fillpositive(xyz, w2_thresh=None): - """ Compute unit quaternion from last 3 values + """Compute unit quaternion from last 3 values Parameters ---------- @@ -104,7 +104,7 @@ def fillpositive(xyz, w2_thresh=None): def quat2mat(q): - """ Calculate rotation matrix corresponding to quaternion + """Calculate rotation matrix corresponding to quaternion Parameters ---------- @@ -147,13 +147,17 @@ def quat2mat(q): wX, wY, wZ = w * X, w * Y, w * Z xX, xY, xZ = x * X, x * Y, x * Z yY, yZ, zZ = y * Y, y * Z, z * Z - return np.array([[1.0 - (yY + zZ), xY - wZ, xZ + wY], - [xY + wZ, 1.0 - (xX + zZ), yZ - wX], - [xZ - wY, yZ + wX, 1.0 - (xX + yY)]]) + return np.array( + [ + [1.0 - (yY + zZ), xY - wZ, xZ + wY], + [xY + wZ, 1.0 - (xX + zZ), yZ - wX], + [xZ - wY, yZ + wX, 1.0 - (xX + yY)], + ] + ) def mat2quat(M): - """ Calculate quaternion corresponding to given rotation matrix + """Calculate quaternion corresponding to given rotation matrix Parameters ---------- @@ -201,12 +205,17 @@ def mat2quat(M): # M[0,1]. The notation is from the Wikipedia article. Qxx, Qyx, Qzx, Qxy, Qyy, Qzy, Qxz, Qyz, Qzz = M.flat # Fill only lower half of symmetric matrix - K = np.array([ - [Qxx - Qyy - Qzz, 0, 0, 0], - [Qyx + Qxy, Qyy - Qxx - Qzz, 0, 0], - [Qzx + Qxz, Qzy + Qyz, Qzz - Qxx - Qyy, 0], - [Qyz - Qzy, Qzx - Qxz, Qxy - Qyx, Qxx + Qyy + Qzz]] - ) / 3.0 + K = ( + np.array( + [ + [Qxx - Qyy - Qzz, 0, 0, 0], + [Qyx + Qxy, Qyy - Qxx - Qzz, 0, 0], + [Qzx + Qxz, Qzy + Qyz, Qzz - Qxx - Qyy, 0], + [Qyz - Qzy, Qzx - Qxz, Qxy - Qyx, Qxx + Qyy + Qzz], + ] + ) + / 3.0 + ) # Use Hermitian eigenvectors, values for speed vals, vecs = np.linalg.eigh(K) # Select largest eigenvector, reorder to w,x,y,z quaternion @@ -219,7 +228,7 @@ def mat2quat(M): def mult(q1, q2): - """ Multiply two quaternions + """Multiply two quaternions Parameters ---------- @@ -244,7 +253,7 @@ def mult(q1, q2): def conjugate(q): - """ Conjugate of quaternion + """Conjugate of quaternion Parameters ---------- @@ -260,7 +269,7 @@ def conjugate(q): def norm(q): - """ Return norm of quaternion + """Return norm of quaternion Parameters ---------- @@ -276,12 +285,12 @@ def norm(q): def isunit(q): - """ Return True is this is very nearly a unit quaternion """ + """Return True is this is very nearly a unit quaternion""" return np.allclose(norm(q), 1) def inverse(q): - """ Return multiplicative inverse of quaternion `q` + """Return multiplicative inverse of quaternion `q` Parameters ---------- @@ -297,12 +306,12 @@ def inverse(q): def eye(): - """ Return identity quaternion """ + """Return identity quaternion""" return np.array([1.0, 0, 0, 0]) def rotate_vector(v, q): - """ Apply transformation in quaternion `q` to vector `v` + """Apply transformation in quaternion `q` to vector `v` Parameters ---------- @@ -328,7 +337,7 @@ def rotate_vector(v, q): def nearly_equivalent(q1, q2, rtol=1e-5, atol=1e-8): - """ Returns True if `q1` and `q2` give near equivalent transforms + """Returns True if `q1` and `q2` give near equivalent transforms `q1` may be nearly numerically equal to `q2`, or nearly equal to `q2` * -1 (because a quaternion multiplied by -1 gives the same transform). @@ -363,7 +372,7 @@ def nearly_equivalent(q1, q2, rtol=1e-5, atol=1e-8): def angle_axis2quat(theta, vector, is_normalized=False): - """ Quaternion for rotation of angle `theta` around `vector` + """Quaternion for rotation of angle `theta` around `vector` Parameters ---------- @@ -398,12 +407,11 @@ def angle_axis2quat(theta, vector, is_normalized=False): vector = vector / math.sqrt(np.dot(vector, vector)) t2 = theta / 2.0 st2 = math.sin(t2) - return np.concatenate(([math.cos(t2)], - vector * st2)) + return np.concatenate(([math.cos(t2)], vector * st2)) def angle_axis2mat(theta, vector, is_normalized=False): - """ Rotation matrix of angle `theta` around `vector` + """Rotation matrix of angle `theta` around `vector` Parameters ---------- @@ -435,13 +443,17 @@ def angle_axis2mat(theta, vector, is_normalized=False): xs, ys, zs = x * s, y * s, z * s xC, yC, zC = x * C, y * C, z * C xyC, yzC, zxC = x * yC, y * zC, z * xC - return np.array([[x * xC + c, xyC - zs, zxC + ys], - [xyC + zs, y * yC + c, yzC - xs], - [zxC - ys, yzC + xs, z * zC + c]]) + return np.array( + [ + [x * xC + c, xyC - zs, zxC + ys], + [xyC + zs, y * yC + c, yzC - xs], + [zxC - ys, yzC + xs, z * zC + c], + ] + ) def quat2angle_axis(quat, identity_thresh=None): - """ Convert quaternion to rotation of angle around axis + """Convert quaternion to rotation of angle around axis Parameters ---------- diff --git a/nibabel/rstutils.py b/nibabel/rstutils.py index 1e4033b676..a63894cef8 100644 --- a/nibabel/rstutils.py +++ b/nibabel/rstutils.py @@ -1,4 +1,4 @@ -""" ReStructured Text utilities +"""ReStructured Text utilities * Make ReST table given array of values """ @@ -6,14 +6,10 @@ import numpy as np -def rst_table(cell_values, - row_names=None, - col_names=None, - title='', - val_fmt='{0:5.2f}', - format_chars=None - ): - """ Return string for ReST table with entries `cell_values` +def rst_table( + cell_values, row_names=None, col_names=None, title='', val_fmt='{0:5.2f}', format_chars=None +): + """Return string for ReST table with entries `cell_values` Parameters ---------- @@ -82,36 +78,26 @@ def rst_table(cell_values, if max_len > col_len: col_len = max_len row_str_list.append(row_strs) - row_name_fmt = "{0:<" + str(row_len) + "}" + row_name_fmt = '{0:<' + str(row_len) + '}' row_names = [row_name_fmt.format(name) for name in row_names] - col_name_fmt = "{0:^" + str(col_len) + "}" + col_name_fmt = '{0:^' + str(col_len) + '}' col_names = [col_name_fmt.format(name) for name in col_names] col_headings = [' ' * row_len] + col_names col_header = down_joiner.join(col_headings) row_val_fmt = '{0:<' + str(col_len) + '}' table_strs = [] if title != '': - table_strs += [title_heading * len(title), - title, - title_heading * len(title), - ''] + table_strs += [title_heading * len(title), title, title_heading * len(title), ''] along_headings = [along * len(h) for h in col_headings] - crossed_line = (cross_starter + - cross_joiner.join(along_headings) + - cross_ender) + crossed_line = cross_starter + cross_joiner.join(along_headings) + cross_ender thick_long_headings = [thick_long * len(h) for h in col_headings] - crossed_thick_line = (cross_thick_starter + - cross_thick_joiner.join(thick_long_headings) + - cross_thick_ender) - table_strs += [crossed_line, - down_starter + col_header + down_ender, - crossed_thick_line] + crossed_thick_line = ( + cross_thick_starter + cross_thick_joiner.join(thick_long_headings) + cross_thick_ender + ) + table_strs += [crossed_line, down_starter + col_header + down_ender, crossed_thick_line] for row_no, row_name in enumerate(row_names): - row_vals = [row_val_fmt.format(row_str) - for row_str in row_str_list[row_no]] - row_line = (down_starter + - down_joiner.join([row_name] + row_vals) + - down_ender) + row_vals = [row_val_fmt.format(row_str) for row_str in row_str_list[row_no]] + row_line = down_starter + down_joiner.join([row_name] + row_vals) + down_ender table_strs.append(row_line) table_strs.append(crossed_line) return '\n'.join(table_strs) diff --git a/nibabel/spaces.py b/nibabel/spaces.py index dac8fdd049..d06a39b0ed 100644 --- a/nibabel/spaces.py +++ b/nibabel/spaces.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Routines to work with spaces +"""Routines to work with spaces A space is defined by coordinate axes. @@ -28,7 +28,7 @@ def vox2out_vox(mapped_voxels, voxel_sizes=None): - """ output-aligned shape, affine for input implied by `mapped_voxels` + """output-aligned shape, affine for input implied by `mapped_voxels` The input (voxel) space, and the affine mapping to output space, are given in `mapped_voxels`. @@ -95,7 +95,7 @@ def vox2out_vox(mapped_voxels, voxel_sizes=None): def slice2volume(index, axis, shape=None): - """ Affine expressing selection of a single slice from 3D volume + """Affine expressing selection of a single slice from 3D volume Imagine we have taken a slice from an image data array, ``s = data[:, :, index]``. This function returns the affine to map the array coordinates of @@ -129,9 +129,9 @@ def slice2volume(index, axis, shape=None): the embedded volume """ if index < 0: - raise ValueError("Cannot handle negative index") + raise ValueError('Cannot handle negative index') if not 0 <= axis <= 2: - raise ValueError("Axis should be between 0 and 2") + raise ValueError('Axis should be between 0 and 2') axes = list(range(4)) axes.remove(axis) slice_aff = np.eye(4)[:, axes] diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 09744d0149..7977943ffd 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" A simple spatial image class +"""A simple spatial image class The image class maintains the association between a 3D (or greater) array, and an affine transform that maps voxel coordinates to some world space. @@ -127,7 +127,6 @@ >>> img3 = nib.AnalyzeImage.from_file_map(file_map) >>> np.all(img3.get_fdata(dtype=np.float32) == data) True - """ import numpy as np @@ -142,22 +141,20 @@ class HeaderDataError(Exception): - """ Class to indicate error in getting or setting header data """ + """Class to indicate error in getting or setting header data""" class HeaderTypeError(Exception): - """ Class to indicate error in parameters into header functions """ + """Class to indicate error in parameters into header functions""" class SpatialHeader(FileBasedHeader): - """ Template class to implement header protocol """ + """Template class to implement header protocol""" + default_x_flip = True data_layout = 'F' - def __init__(self, - data_dtype=np.float32, - shape=(0,), - zooms=None): + def __init__(self, data_dtype=np.float32, shape=(0,), zooms=None): self.set_data_dtype(data_dtype) self._zooms = () self.set_data_shape(shape) @@ -174,9 +171,7 @@ def from_header(klass, header=None): # different field names if type(header) == klass: return header.copy() - return klass(header.get_data_dtype(), - header.get_data_shape(), - header.get_zooms()) + return klass(header.get_data_dtype(), header.get_data_shape(), header.get_zooms()) @classmethod def from_fileobj(klass, fileobj): @@ -186,18 +181,17 @@ def write_to(self, fileobj): raise NotImplementedError def __eq__(self, other): - return ((self.get_data_dtype(), - self.get_data_shape(), - self.get_zooms()) == - (other.get_data_dtype(), - other.get_data_shape(), - other.get_zooms())) + return (self.get_data_dtype(), self.get_data_shape(), self.get_zooms()) == ( + other.get_data_dtype(), + other.get_data_shape(), + other.get_zooms(), + ) def __ne__(self, other): return not self == other def copy(self): - """ Copy object to independent representation + """Copy object to independent representation The copy should not be affected by any changes to the original object. @@ -232,8 +226,7 @@ def set_zooms(self, zooms): shape = self.get_data_shape() ndim = len(shape) if len(zooms) != ndim: - raise HeaderDataError('Expecting %d zoom values for ndim %d' - % (ndim, ndim)) + raise HeaderDataError('Expecting %d zoom values for ndim %d' % (ndim, ndim)) if len([z for z in zooms if z < 0]): raise HeaderDataError('zooms must be positive') self._zooms = zooms @@ -241,13 +234,12 @@ def set_zooms(self, zooms): def get_base_affine(self): shape = self.get_data_shape() zooms = self.get_zooms() - return shape_zoom_affine(shape, zooms, - self.default_x_flip) + return shape_zoom_affine(shape, zooms, self.default_x_flip) get_best_affine = get_base_affine def data_to_fileobj(self, data, fileobj, rescale=True): - """ Write array data `data` as binary to `fileobj` + """Write array data `data` as binary to `fileobj` Parameters ---------- @@ -264,7 +256,7 @@ def data_to_fileobj(self, data, fileobj, rescale=True): fileobj.write(data.astype(dtype).tobytes(order=self.data_layout)) def data_from_fileobj(self, fileobj): - """ Read binary image data from `fileobj` """ + """Read binary image data from `fileobj`""" dtype = self.get_data_dtype() shape = self.get_data_shape() data_size = int(np.prod(shape) * dtype.itemsize) @@ -273,7 +265,7 @@ def data_from_fileobj(self, fileobj): def supported_np_types(obj): - """ Numpy data types that instance `obj` supports + """Numpy data types that instance `obj` supports Parameters ---------- @@ -308,16 +300,20 @@ class ImageDataError(Exception): class SpatialFirstSlicer: - """ Slicing interface that returns a new image with an updated affine + """Slicing interface that returns a new image with an updated affine Checks that an image's first three axes are spatial """ + def __init__(self, img): # Local import to avoid circular import on module load from .imageclasses import spatial_axes_first + if not spatial_axes_first(img): - raise ValueError("Cannot predict position of spatial axes for " - "Image type " + img.__class__.__name__) + raise ValueError( + 'Cannot predict position of spatial axes for ' + 'Image type ' + img.__class__.__name__ + ) self.img = img def __getitem__(self, slicer): @@ -328,13 +324,13 @@ def __getitem__(self, slicer): dataobj = self.img.dataobj[slicer] if any(dim == 0 for dim in dataobj.shape): - raise IndexError("Empty slice requested") + raise IndexError('Empty slice requested') affine = self.slice_affine(slicer) return self.img.__class__(dataobj.copy(), affine, self.img.header) def check_slicing(self, slicer, return_spatial=False): - """ Canonicalize slicers and check for scalar indices in spatial dims + """Canonicalize slicers and check for scalar indices in spatial dims Parameters ---------- @@ -357,14 +353,15 @@ def check_slicing(self, slicer, return_spatial=False): spatial_slices = slicer[:3] for subslicer in spatial_slices: if subslicer is None: - raise IndexError("New axis not permitted in spatial dimensions") + raise IndexError('New axis not permitted in spatial dimensions') elif isinstance(subslicer, int): - raise IndexError("Scalar indices disallowed in spatial dimensions; " - "Use `[x]` or `x:x+1`.") + raise IndexError( + 'Scalar indices disallowed in spatial dimensions; ' 'Use `[x]` or `x:x+1`.' + ) return spatial_slices if return_spatial else slicer def slice_affine(self, slicer): - """ Retrieve affine for current image, if sliced by a given index + """Retrieve affine for current image, if sliced by a given index Applies scaling if down-sampling is applied, and adjusts the intercept to account for any cropping. @@ -392,7 +389,7 @@ def slice_affine(self, slicer): for i, subslicer in enumerate(slicer): if isinstance(subslicer, slice): if subslicer.step == 0: - raise ValueError("slice step cannot be 0") + raise ValueError('slice step cannot be 0') transform[i, i] = subslicer.step if subslicer.step is not None else 1 transform[i, 3] = subslicer.start or 0 # If slicer is None, nothing to do @@ -401,13 +398,13 @@ def slice_affine(self, slicer): class SpatialImage(DataobjImage): - """ Template class for volumetric (3D/4D) images """ + """Template class for volumetric (3D/4D) images""" + header_class = SpatialHeader ImageSlicer = SpatialFirstSlicer - def __init__(self, dataobj, affine, header=None, - extra=None, file_map=None): - """ Initialize image + def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): + """Initialize image The image is a combination of (array-like, affine matrix, header), with optional metadata in `extra`, and filename / file-like objects @@ -432,8 +429,7 @@ def __init__(self, dataobj, affine, header=None, file_map : mapping, optional mapping giving file information for this image format """ - super(SpatialImage, self).__init__(dataobj, header=header, extra=extra, - file_map=file_map) + super(SpatialImage, self).__init__(dataobj, header=header, extra=extra, file_map=file_map) if affine is not None: # Check that affine is array-like 4,4. Maybe this is too strict at # this abstract level, but so far I think all image formats we know @@ -458,7 +454,7 @@ def affine(self): return self._affine def update_header(self): - """ Harmonize header with image data and affine + """Harmonize header with image data and affine >>> data = np.zeros((2,3,4)) >>> affine = np.diag([1.0,2.0,3.0,1.0]) @@ -487,7 +483,7 @@ def update_header(self): self._affine2header() def _affine2header(self): - """ Unconditionally set affine into the header """ + """Unconditionally set affine into the header""" RZS = self._affine[:3, :3] vox = np.sqrt(np.sum(RZS * RZS, axis=0)) hdr = self._header @@ -499,12 +495,16 @@ def _affine2header(self): def __str__(self): shape = self.shape affine = self.affine - return '\n'.join((str(self.__class__), - f'data shape {shape}', - 'affine: ', - str(affine), - 'metadata:', - str(self._header))) + return '\n'.join( + ( + str(self.__class__), + f'data shape {shape}', + 'affine: ', + str(affine), + 'metadata:', + str(self._header), + ) + ) def get_data_dtype(self): return self._header.get_data_dtype() @@ -514,7 +514,7 @@ def set_data_dtype(self, dtype): @classmethod def from_image(klass, img): - """ Class method to create new instance of own class from `img` + """Class method to create new instance of own class from `img` Parameters ---------- @@ -527,14 +527,16 @@ def from_image(klass, img): cimg : ``spatialimage`` instance Image, of our own class """ - return klass(img.dataobj, - img.affine, - klass.header_class.from_header(img.header), - extra=img.extra.copy()) + return klass( + img.dataobj, + img.affine, + klass.header_class.from_header(img.header), + extra=img.extra.copy(), + ) @property def slicer(self): - """ Slicer object that returns cropped and subsampled images + """Slicer object that returns cropped and subsampled images The image is resliced in the current orientation; no rotation or resampling is performed, and no attempt is made to filter the image @@ -553,16 +555,17 @@ def slicer(self): return self.ImageSlicer(self) def __getitem__(self, idx): - """ No slicing or dictionary interface for images + """No slicing or dictionary interface for images Use the slicer attribute to perform cropping and subsampling at your own risk. """ raise TypeError( - "Cannot slice image objects; consider using `img.slicer[slice]` " - "to generate a sliced image (see documentation for caveats) or " - "slicing image array data with `img.dataobj[slice]` or " - "`img.get_fdata()[slice]`") + 'Cannot slice image objects; consider using `img.slicer[slice]` ' + 'to generate a sliced image (see documentation for caveats) or ' + 'slicing image array data with `img.dataobj[slice]` or ' + '`img.get_fdata()[slice]`' + ) def orthoview(self): """Plot the image using OrthoSlicer3D @@ -578,8 +581,7 @@ def orthoview(self): consider using viewer.show() (equivalently plt.show()) to show the figure. """ - return OrthoSlicer3D(self.dataobj, self.affine, - title=self.get_filename()) + return OrthoSlicer3D(self.dataobj, self.affine, title=self.get_filename()) def as_reoriented(self, ornt): """Apply an orientation change and return a new image diff --git a/nibabel/spm2analyze.py b/nibabel/spm2analyze.py index 6786b19a0c..67389403b9 100644 --- a/nibabel/spm2analyze.py +++ b/nibabel/spm2analyze.py @@ -6,24 +6,20 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read / write access to SPM2 version of analyze image format """ +"""Read / write access to SPM2 version of analyze image format""" import numpy as np from . import spm99analyze as spm99 # module import image_dimension_dtd = spm99.image_dimension_dtd[:] -image_dimension_dtd[ - image_dimension_dtd.index(('funused2', 'f4')) -] = ('scl_inter', 'f4') +image_dimension_dtd[image_dimension_dtd.index(('funused2', 'f4'))] = ('scl_inter', 'f4') # Full header numpy dtype combined across sub-fields -header_dtype = np.dtype(spm99.header_key_dtd + - image_dimension_dtd + - spm99.data_history_dtd) +header_dtype = np.dtype(spm99.header_key_dtd + image_dimension_dtd + spm99.data_history_dtd) class Spm2AnalyzeHeader(spm99.Spm99AnalyzeHeader): - """ Class for SPM2 variant of basic Analyze header + """Class for SPM2 variant of basic Analyze header SPM2 variant adds the following to basic Analyze format: @@ -36,7 +32,7 @@ class Spm2AnalyzeHeader(spm99.Spm99AnalyzeHeader): template_dtype = header_dtype def get_slope_inter(self): - """ Get data scaling (slope) and intercept from header data + """Get data scaling (slope) and intercept from header data Uses the algorithm from SPM2 spm_vol_ana.m by John Ashburner @@ -118,16 +114,19 @@ def may_contain_header(klass, binaryblock): if len(binaryblock) < klass.sizeof_hdr: return False - hdr_struct = np.ndarray(shape=(), dtype=header_dtype, - buffer=binaryblock[:klass.sizeof_hdr]) + hdr_struct = np.ndarray( + shape=(), dtype=header_dtype, buffer=binaryblock[: klass.sizeof_hdr] + ) bs_hdr_struct = hdr_struct.byteswap() - return (binaryblock[344:348] not in (b'ni1\x00', b'n+1\x00') and - 348 in (hdr_struct['sizeof_hdr'], bs_hdr_struct['sizeof_hdr'])) + return binaryblock[344:348] not in (b'ni1\x00', b'n+1\x00') and 348 in ( + hdr_struct['sizeof_hdr'], + bs_hdr_struct['sizeof_hdr'], + ) class Spm2AnalyzeImage(spm99.Spm99AnalyzeImage): - """ Class for SPM2 variant of basic Analyze image - """ + """Class for SPM2 variant of basic Analyze image""" + header_class = Spm2AnalyzeHeader diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index b858a5efff..1f9d7a3589 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Read / write access to SPM99 version of analyze image format """ +"""Read / write access to SPM99 version of analyze image format""" import warnings import numpy as np @@ -17,29 +17,25 @@ from .batteryrunners import Report from . import analyze # module import from .optpkg import optional_package + have_scipy = optional_package('scipy')[1] """ Support subtle variations of SPM version of Analyze """ header_key_dtd = analyze.header_key_dtd # funused1 in dime subfield is scalefactor image_dimension_dtd = analyze.image_dimension_dtd[:] -image_dimension_dtd[ - image_dimension_dtd.index(('funused1', 'f4')) -] = ('scl_slope', 'f4') +image_dimension_dtd[image_dimension_dtd.index(('funused1', 'f4'))] = ('scl_slope', 'f4') # originator text field used as image origin (translations) data_history_dtd = analyze.data_history_dtd[:] -data_history_dtd[ - data_history_dtd.index(('originator', 'S10')) -] = ('origin', 'i2', (5,)) +data_history_dtd[data_history_dtd.index(('originator', 'S10'))] = ('origin', 'i2', (5,)) # Full header numpy dtype combined across sub-fields -header_dtype = np.dtype(header_key_dtd + - image_dimension_dtd + - data_history_dtd) +header_dtype = np.dtype(header_key_dtd + image_dimension_dtd + data_history_dtd) class SpmAnalyzeHeader(analyze.AnalyzeHeader): - """ Basic scaling Spm Analyze header """ + """Basic scaling Spm Analyze header""" + # Copies of module level definitions template_dtype = header_dtype @@ -49,13 +45,13 @@ class SpmAnalyzeHeader(analyze.AnalyzeHeader): @classmethod def default_structarr(klass, endianness=None): - """ Create empty header binary block with given endianness """ + """Create empty header binary block with given endianness""" hdr_data = super(SpmAnalyzeHeader, klass).default_structarr(endianness) hdr_data['scl_slope'] = 1 return hdr_data def get_slope_inter(self): - """ Get scalefactor and intercept + """Get scalefactor and intercept If scalefactor is 0.0 return None to indicate no scalefactor. Intercept is always None because SPM99 analyze cannot store intercepts. @@ -67,7 +63,7 @@ def get_slope_inter(self): return slope, None def set_slope_inter(self, slope, inter=None): - """ Set slope and / or intercept into header + """Set slope and / or intercept into header Set slope and intercept for image data, such that, if the image data is ``arr``, then the scaled image data will be ``(arr * @@ -93,12 +89,11 @@ def set_slope_inter(self, slope, inter=None): self._structarr['scl_slope'] = slope if inter in (None, 0) or np.isnan(inter): return - raise HeaderTypeError('Cannot set non-zero intercept ' - 'for SPM headers') + raise HeaderTypeError('Cannot set non-zero intercept ' 'for SPM headers') class Spm99AnalyzeHeader(SpmAnalyzeHeader): - """ Class for SPM99 variant of basic Analyze header + """Class for SPM99 variant of basic Analyze header SPM99 variant adds the following to basic Analyze format: @@ -107,7 +102,7 @@ class Spm99AnalyzeHeader(SpmAnalyzeHeader): """ def get_origin_affine(self): - """ Get affine from header, using SPM origin field if sensible + """Get affine from header, using SPM origin field if sensible The default translations are got from the ``origin`` field, if set, or from the center of the image otherwise. @@ -146,8 +141,7 @@ def get_origin_affine(self): # Remember that the origin is for matlab (1-based indexing) origin = hdr['origin'][:3] dims = hdr['dim'][1:4] - if (np.any(origin) and - np.all(origin > -dims) and np.all(origin < dims * 2)): + if np.any(origin) and np.all(origin > -dims) and np.all(origin < dims * 2): origin = origin - 1 else: origin = (dims - 1) / 2.0 @@ -159,7 +153,7 @@ def get_origin_affine(self): get_best_affine = get_origin_affine def set_origin_from_affine(self, affine): - """ Set SPM origin to header from affine matrix. + """Set SPM origin to header from affine matrix. The ``origin`` field was read but not written by SPM99 and 2. It was used for storing a central voxel coordinate, that could be used in @@ -221,8 +215,7 @@ def _chk_origin(hdr, fix=False): rep = Report(HeaderDataError) origin = hdr['origin'][0:3] dims = hdr['dim'][1:4] - if (not np.any(origin) or - (np.all(origin > -dims) and np.all(origin < dims * 2))): + if not np.any(origin) or (np.all(origin > -dims) and np.all(origin < dims * 2)): return hdr, rep rep.problem_level = 20 rep.problem_msg = 'very large origin values relative to dims' @@ -232,19 +225,17 @@ def _chk_origin(hdr, fix=False): class Spm99AnalyzeImage(analyze.AnalyzeImage): - """ Class for SPM99 variant of basic Analyze image - """ + """Class for SPM99 variant of basic Analyze image""" + header_class = Spm99AnalyzeHeader - files_types = (('image', '.img'), - ('header', '.hdr'), - ('mat', '.mat')) + files_types = (('image', '.img'), ('header', '.hdr'), ('mat', '.mat')) has_affine = True makeable = True rw = have_scipy @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): - """ Class method to create image from mapping in ``file_map`` + """Class method to create image from mapping in ``file_map`` Parameters ---------- @@ -275,7 +266,8 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): """ ret = super(Spm99AnalyzeImage, klass).from_file_map( - file_map, mmap=mmap, keep_file_open=keep_file_open) + file_map, mmap=mmap, keep_file_open=keep_file_open + ) try: matf = file_map['mat'].get_prepare_fileobj() except OSError: @@ -286,12 +278,12 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): if len(contents) == 0: return ret import scipy.io as sio + mats = sio.loadmat(BytesIO(contents)) if 'mat' in mats: # this overrides a 'M', and includes any flip mat = mats['mat'] if mat.ndim > 2: - warnings.warn('More than one affine in "mat" matrix, ' - 'using first') + warnings.warn('More than one affine in "mat" matrix, ' 'using first') mat = mat[:, :, 0] ret._affine = mat elif 'M' in mats: # the 'M' matrix does not include flips @@ -309,7 +301,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): return ret def to_file_map(self, file_map=None, dtype=None): - """ Write image to `file_map` or contained ``self.file_map`` + """Write image to `file_map` or contained ``self.file_map`` Extends Analyze ``to_file_map`` method by writing ``mat`` file @@ -326,6 +318,7 @@ def to_file_map(self, file_map=None, dtype=None): if mat is None: return import scipy.io as sio + hdr = self._header if hdr.default_x_flip: M = np.dot(np.diag([-1, 1, 1, 1]), mat) diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index 860ae2cb39..5e8d87b671 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -1,4 +1,4 @@ -""" Multiformat-capable streamline format read / write interface +"""Multiformat-capable streamline format read / write interface """ import os import warnings @@ -12,13 +12,11 @@ from .tck import TckFile # List of all supported formats -FORMATS = {".trk": TrkFile, - ".tck": TckFile - } +FORMATS = {'.trk': TrkFile, '.tck': TckFile} def is_supported(fileobj): - """ Checks if the file-like object if supported by NiBabel. + """Checks if the file-like object if supported by NiBabel. Parameters ---------- @@ -35,7 +33,7 @@ def is_supported(fileobj): def detect_format(fileobj): - """ Returns the StreamlinesFile object guessed from the file-like object. + """Returns the StreamlinesFile object guessed from the file-like object. Parameters ---------- @@ -64,7 +62,7 @@ def detect_format(fileobj): def load(fileobj, lazy_load=False): - """ Loads streamlines in *RAS+* and *mm* space from a file-like object. + """Loads streamlines in *RAS+* and *mm* space from a file-like object. Parameters ---------- @@ -96,7 +94,7 @@ def load(fileobj, lazy_load=False): def save(tractogram, filename, **kwargs): - r""" Saves a tractogram to a file. + r"""Saves a tractogram to a file. Parameters ---------- @@ -123,15 +121,15 @@ def save(tractogram, filename, **kwargs): else: # Assume it's a TractogramFile object. tractogram_file = tractogram - if (tractogram_file_class is None or - not isinstance(tractogram_file, tractogram_file_class)): - msg = ("The extension you specified is unusual for the provided" - " 'TractogramFile' object.") + if tractogram_file_class is None or not isinstance(tractogram_file, tractogram_file_class): + msg = ( + 'The extension you specified is unusual for the provided' + " 'TractogramFile' object." + ) warnings.warn(msg, ExtensionWarning) if kwargs: - msg = ("A 'TractogramFile' object was provided, no need for" - " keyword arguments.") + msg = "A 'TractogramFile' object was provided, no need for" ' keyword arguments.' raise ValueError(msg) tractogram_file.save(filename) diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index cff930aaee..bb03e6bfd0 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -1,4 +1,3 @@ - import numbers from operator import mul from functools import reduce @@ -9,7 +8,7 @@ def is_array_sequence(obj): - """ Return True if `obj` is an array sequence. """ + """Return True if `obj` is an array sequence.""" try: return obj.is_array_sequence except AttributeError: @@ -17,9 +16,9 @@ def is_array_sequence(obj): def is_ndarray_of_int_or_bool(obj): - return (isinstance(obj, np.ndarray) and - (np.issubdtype(obj.dtype, np.integer) or - np.issubdtype(obj.dtype, np.bool_))) + return isinstance(obj, np.ndarray) and ( + np.issubdtype(obj.dtype, np.integer) or np.issubdtype(obj.dtype, np.bool_) + ) class _BuildCache: @@ -31,8 +30,7 @@ def __init__(self, arr_seq, common_shape, dtype): # Use the passed dtype only if null data array self.dtype = dtype if arr_seq._data.size == 0 else arr_seq._data.dtype if arr_seq.common_shape != () and common_shape != arr_seq.common_shape: - raise ValueError( - "All dimensions, except the first one, must match exactly") + raise ValueError('All dimensions, except the first one, must match exactly') self.common_shape = common_shape n_in_row = reduce(mul, common_shape, 1) bytes_per_row = n_in_row * dtype.itemsize @@ -44,25 +42,29 @@ def update_seq(self, arr_seq): def _define_operators(cls): - """ Decorator which adds support for some Python operators. """ - def _wrap(cls, op, inplace=False, unary=False): + """Decorator which adds support for some Python operators.""" + def _wrap(cls, op, inplace=False, unary=False): def fn_unary_op(self): try: return self._op(op) except SystemError as e: - message = ("Numpy returned an uninformative error. It possibly should be " - "'Integers to negative integer powers are not allowed.' " - "See https://github.com/numpy/numpy/issues/19634 for details.") + message = ( + 'Numpy returned an uninformative error. It possibly should be ' + "'Integers to negative integer powers are not allowed.' " + 'See https://github.com/numpy/numpy/issues/19634 for details.' + ) raise ValueError(message) from e def fn_binary_op(self, value): try: return self._op(op, value, inplace=inplace) except SystemError as e: - message = ("Numpy returned an uninformative error. It possibly should be " - "'Integers to negative integer powers are not allowed.' " - "See https://github.com/numpy/numpy/issues/19634 for details.") + message = ( + 'Numpy returned an uninformative error. It possibly should be ' + "'Integers to negative integer powers are not allowed.' " + 'See https://github.com/numpy/numpy/issues/19634 for details.' + ) raise ValueError(message) from e setattr(cls, op, fn_unary_op if unary else fn_binary_op) @@ -70,16 +72,27 @@ def fn_binary_op(self, value): fn.__name__ = op fn.__doc__ = getattr(np.ndarray, op).__doc__ - for op in ["__add__", "__sub__", "__mul__", "__mod__", "__pow__", - "__floordiv__", "__truediv__", "__lshift__", "__rshift__", - "__or__", "__and__", "__xor__"]: + for op in [ + '__add__', + '__sub__', + '__mul__', + '__mod__', + '__pow__', + '__floordiv__', + '__truediv__', + '__lshift__', + '__rshift__', + '__or__', + '__and__', + '__xor__', + ]: _wrap(cls, op=op, inplace=False) _wrap(cls, op=f"__i{op.strip('_')}__", inplace=True) - for op in ["__eq__", "__ne__", "__lt__", "__le__", "__gt__", "__ge__"]: + for op in ['__eq__', '__ne__', '__lt__', '__le__', '__gt__', '__ge__']: _wrap(cls, op) - for op in ["__neg__", "__abs__", "__invert__"]: + for op in ['__neg__', '__abs__', '__invert__']: _wrap(cls, op, unary=True) return cls @@ -87,7 +100,7 @@ def fn_binary_op(self, value): @_define_operators class ArraySequence: - """ Sequence of ndarrays having variable first dimension sizes. + """Sequence of ndarrays having variable first dimension sizes. This is a container that can store multiple ndarrays where each ndarray might have a different first dimension size but a *common* size for the @@ -100,7 +113,7 @@ class ArraySequence: """ def __init__(self, iterable=None, buffer_size=4): - """ Initialize array sequence instance + """Initialize array sequence instance Parameters ---------- @@ -144,16 +157,16 @@ def is_array_sequence(self): @property def common_shape(self): - """ Matching shape of the elements in this array sequence. """ + """Matching shape of the elements in this array sequence.""" return self._data.shape[1:] @property def total_nb_rows(self): - """ Total number of rows in this array sequence. """ + """Total number of rows in this array sequence.""" return np.sum(self._lengths) def get_data(self): - """ Returns a *copy* of the elements in this array sequence. + """Returns a *copy* of the elements in this array sequence. Notes ----- @@ -164,31 +177,31 @@ def get_data(self): return self.copy()._data def _check_shape(self, arrseq): - """ Check whether this array sequence is compatible with another. """ - msg = "cannot perform operation - array sequences have different" + """Check whether this array sequence is compatible with another.""" + msg = 'cannot perform operation - array sequences have different' if len(self._lengths) != len(arrseq._lengths): - msg += f" lengths: {len(self._lengths)} vs. {len(arrseq._lengths)}." + msg += f' lengths: {len(self._lengths)} vs. {len(arrseq._lengths)}.' raise ValueError(msg) if self.total_nb_rows != arrseq.total_nb_rows: - msg += f" amount of data: {self.total_nb_rows} vs. {arrseq.total_nb_rows}." + msg += f' amount of data: {self.total_nb_rows} vs. {arrseq.total_nb_rows}.' raise ValueError(msg) if self.common_shape != arrseq.common_shape: - msg += f" common shape: {self.common_shape} vs. {arrseq.common_shape}." + msg += f' common shape: {self.common_shape} vs. {arrseq.common_shape}.' raise ValueError(msg) return True def _get_next_offset(self): - """ Offset in ``self._data`` at which to write next rowelement """ + """Offset in ``self._data`` at which to write next rowelement""" if len(self._offsets) == 0: return 0 imax = np.argmax(self._offsets) return self._offsets[imax] + self._lengths[imax] def append(self, element, cache_build=False): - """ Appends `element` to this array sequence. + """Appends `element` to this array sequence. Append can be a lot faster if it knows that it is appending several elements instead of a single element. In that case it can cache the @@ -242,7 +255,7 @@ def append(self, element, cache_build=False): build_cache.update_seq(self) def finalize_append(self): - """ Finalize process of appending several elements to `self` + """Finalize process of appending several elements to `self` :meth:`append` can be a lot faster if it knows that it is appending several elements instead of a single element. To tell the append @@ -257,7 +270,7 @@ def finalize_append(self): self.shrink_data() def _resize_data_to(self, n_rows, build_cache): - """ Resize data array if required """ + """Resize data array if required""" # Calculate new data shape, rounding up to nearest buffer size n_bufs = np.ceil(n_rows / build_cache.rows_per_buf) extended_n_rows = int(n_bufs * build_cache.rows_per_buf) @@ -272,11 +285,10 @@ def _resize_data_to(self, n_rows, build_cache): self._data.resize(new_shape, refcheck=False) def shrink_data(self): - self._data.resize((self._get_next_offset(),) + self.common_shape, - refcheck=False) + self._data.resize((self._get_next_offset(),) + self.common_shape, refcheck=False) def extend(self, elements): - """ Appends all `elements` to this array sequence. + """Appends all `elements` to this array sequence. Parameters ---------- @@ -307,8 +319,7 @@ def extend(self, elements): e0 = np.asarray(elements[0]) n_elements = np.sum([len(e) for e in elements]) self._build_cache = _BuildCache(self, e0.shape[1:], e0.dtype) - self._resize_data_to(self._get_next_offset() + n_elements, - self._build_cache) + self._resize_data_to(self._get_next_offset() + n_elements, self._build_cache) for e in elements: self.append(e, cache_build=True) @@ -316,7 +327,7 @@ def extend(self, elements): self.finalize_append() def copy(self): - """ Creates a copy of this :class:`ArraySequence` object. + """Creates a copy of this :class:`ArraySequence` object. Returns ------- @@ -331,15 +342,14 @@ def copy(self): """ seq = self.__class__() total_lengths = np.sum(self._lengths) - seq._data = np.empty((total_lengths,) + self._data.shape[1:], - dtype=self._data.dtype) + seq._data = np.empty((total_lengths,) + self._data.shape[1:], dtype=self._data.dtype) next_offset = 0 offsets = [] for offset, length in zip(self._offsets, self._lengths): offsets.append(next_offset) - chunk = self._data[offset:offset + length] - seq._data[next_offset:next_offset + length] = chunk + chunk = self._data[offset : offset + length] + seq._data[next_offset : next_offset + length] = chunk next_offset += length seq._offsets = np.asarray(offsets) @@ -348,7 +358,7 @@ def copy(self): return seq def __getitem__(self, idx): - """ Get sequence(s) through standard or advanced numpy indexing. + """Get sequence(s) through standard or advanced numpy indexing. Parameters ---------- @@ -368,7 +378,7 @@ def __getitem__(self, idx): """ if isinstance(idx, (numbers.Integral, np.integer)): start = self._offsets[idx] - return self._data[start:start + self._lengths[idx]] + return self._data[start : start + self._lengths[idx]] seq = self.__class__() seq._is_view = True @@ -390,11 +400,13 @@ def __getitem__(self, idx): seq._lengths = self._lengths[off_idx] return seq - raise TypeError("Index must be either an int, a slice, a list of int" - " or a ndarray of bool! Not " + str(type(idx))) + raise TypeError( + 'Index must be either an int, a slice, a list of int' + ' or a ndarray of bool! Not ' + str(type(idx)) + ) def __setitem__(self, idx, elements): - """ Set sequence(s) through standard or advanced numpy indexing. + """Set sequence(s) through standard or advanced numpy indexing. Parameters ---------- @@ -411,7 +423,7 @@ def __setitem__(self, idx, elements): """ if isinstance(idx, (numbers.Integral, np.integer)): start = self._offsets[idx] - self._data[start:start + self._lengths[idx]] = elements + self._data[start : start + self._lengths[idx]] = elements return if isinstance(idx, tuple): @@ -431,31 +443,33 @@ def __setitem__(self, idx, elements): lengths = self._lengths[off_idx] else: - raise TypeError("Index must be either an int, a slice, a list of int" - " or a ndarray of bool! Not " + str(type(idx))) + raise TypeError( + 'Index must be either an int, a slice, a list of int' + ' or a ndarray of bool! Not ' + str(type(idx)) + ) if is_array_sequence(elements): if len(lengths) != len(elements): - msg = f"Trying to set {len(lengths)} sequences with {len(elements)} sequences." + msg = f'Trying to set {len(lengths)} sequences with {len(elements)} sequences.' raise ValueError(msg) if sum(lengths) != elements.total_nb_rows: - msg = f"Trying to set {sum(lengths)} points with {elements.total_nb_rows} points." + msg = f'Trying to set {sum(lengths)} points with {elements.total_nb_rows} points.' raise ValueError(msg) for o1, l1, o2, l2 in zip(offsets, lengths, elements._offsets, elements._lengths): - data[o1:o1 + l1] = elements._data[o2:o2 + l2] + data[o1 : o1 + l1] = elements._data[o2 : o2 + l2] elif isinstance(elements, numbers.Number): for o1, l1 in zip(offsets, lengths): - data[o1:o1 + l1] = elements + data[o1 : o1 + l1] = elements else: # Try to iterate over it. for o1, l1, element in zip(offsets, lengths, elements): - data[o1:o1 + l1] = element + data[o1 : o1 + l1] = element def _op(self, op, value=None, inplace=False): - """ Applies some operator to this arraysequence. + """Applies some operator to this arraysequence. This handles both unary and binary operators with a scalar or another array sequence. Operations are performed directly on the underlying @@ -475,18 +489,25 @@ def _op(self, op, value=None, inplace=False): seq = self if inplace else self.copy() if is_array_sequence(value) and seq._check_shape(value): - elements = zip(seq._offsets, seq._lengths, - self._offsets, self._lengths, - value._offsets, value._lengths) + elements = zip( + seq._offsets, + seq._lengths, + self._offsets, + self._lengths, + value._offsets, + value._lengths, + ) # Change seq.dtype to match the operation resulting type. o0, l0, o1, l1, o2, l2 = next(elements) - tmp = getattr(self._data[o1:o1 + l1], op)(value._data[o2:o2 + l2]) + tmp = getattr(self._data[o1 : o1 + l1], op)(value._data[o2 : o2 + l2]) seq._data = seq._data.astype(tmp.dtype) - seq._data[o0:o0 + l0] = tmp + seq._data[o0 : o0 + l0] = tmp for o0, l0, o1, l1, o2, l2 in elements: - seq._data[o0:o0 + l0] = getattr(self._data[o1:o1 + l1], op)(value._data[o2:o2 + l2]) + seq._data[o0 : o0 + l0] = getattr(self._data[o1 : o1 + l1], op)( + value._data[o2 : o2 + l2] + ) else: args = [] if value is None else [value] # Dealing with unary and binary ops. @@ -494,22 +515,23 @@ def _op(self, op, value=None, inplace=False): # Change seq.dtype to match the operation resulting type. o0, l0, o1, l1 = next(elements) - tmp = getattr(self._data[o1:o1 + l1], op)(*args) + tmp = getattr(self._data[o1 : o1 + l1], op)(*args) seq._data = seq._data.astype(tmp.dtype) - seq._data[o0:o0 + l0] = tmp + seq._data[o0 : o0 + l0] = tmp for o0, l0, o1, l1 in elements: - seq._data[o0:o0 + l0] = getattr(self._data[o1:o1 + l1], op)(*args) + seq._data[o0 : o0 + l0] = getattr(self._data[o1 : o1 + l1], op)(*args) return seq def __iter__(self): if len(self._lengths) != len(self._offsets): - raise ValueError("ArraySequence object corrupted:" - " len(self._lengths) != len(self._offsets)") + raise ValueError( + 'ArraySequence object corrupted:' ' len(self._lengths) != len(self._offsets)' + ) for offset, lengths in zip(self._offsets, self._lengths): - yield self._data[offset: offset + lengths] + yield self._data[offset : offset + lengths] def __len__(self): return len(self._offsets) @@ -519,33 +541,30 @@ def __repr__(self): # Show only the first and last edgeitems. edgeitems = np.get_printoptions()['edgeitems'] data = str(list(self[:edgeitems]))[:-1] - data += ", ..., " + data += ', ..., ' data += str(list(self[-edgeitems:]))[1:] else: data = str(list(self)) - return f"{self.__class__.__name__}({data})" + return f'{self.__class__.__name__}({data})' def save(self, filename): - """ Saves this :class:`ArraySequence` object to a .npz file. """ - np.savez(filename, - data=self._data, - offsets=self._offsets, - lengths=self._lengths) + """Saves this :class:`ArraySequence` object to a .npz file.""" + np.savez(filename, data=self._data, offsets=self._offsets, lengths=self._lengths) @classmethod def load(cls, filename): - """ Loads a :class:`ArraySequence` object from a .npz file. """ + """Loads a :class:`ArraySequence` object from a .npz file.""" content = np.load(filename) seq = cls() - seq._data = content["data"] - seq._offsets = content["offsets"] - seq._lengths = content["lengths"] + seq._data = content['data'] + seq._offsets = content['offsets'] + seq._lengths = content['lengths'] return seq def create_arraysequences_from_generator(gen, n, buffer_sizes=None): - """ Creates :class:`ArraySequence` objects from a generator yielding tuples + """Creates :class:`ArraySequence` objects from a generator yielding tuples Parameters ---------- @@ -572,7 +591,7 @@ def create_arraysequences_from_generator(gen, n, buffer_sizes=None): def concatenate(seqs, axis): - """ Concatenates multiple :class:`ArraySequence` objects along an axis. + """Concatenates multiple :class:`ArraySequence` objects along an axis. Parameters ---------- diff --git a/nibabel/streamlines/header.py b/nibabel/streamlines/header.py index 523035f3ee..2aed10c62c 100644 --- a/nibabel/streamlines/header.py +++ b/nibabel/streamlines/header.py @@ -1,22 +1,23 @@ -""" Field class defining common header fields in tractogram files +"""Field class defining common header fields in tractogram files """ class Field: - """ Header fields common to multiple streamline file formats. + """Header fields common to multiple streamline file formats. In IPython, use `nibabel.streamlines.Field??` to list them. """ - NB_STREAMLINES = "nb_streamlines" - STEP_SIZE = "step_size" - METHOD = "method" - NB_SCALARS_PER_POINT = "nb_scalars_per_point" - NB_PROPERTIES_PER_STREAMLINE = "nb_properties_per_streamline" - NB_POINTS = "nb_points" - VOXEL_SIZES = "voxel_sizes" - DIMENSIONS = "dimensions" - MAGIC_NUMBER = "magic_number" - ORIGIN = "origin" - VOXEL_TO_RASMM = "voxel_to_rasmm" - VOXEL_ORDER = "voxel_order" - ENDIANNESS = "endianness" + + NB_STREAMLINES = 'nb_streamlines' + STEP_SIZE = 'step_size' + METHOD = 'method' + NB_SCALARS_PER_POINT = 'nb_scalars_per_point' + NB_PROPERTIES_PER_STREAMLINE = 'nb_properties_per_streamline' + NB_POINTS = 'nb_points' + VOXEL_SIZES = 'voxel_sizes' + DIMENSIONS = 'dimensions' + MAGIC_NUMBER = 'magic_number' + ORIGIN = 'origin' + VOXEL_TO_RASMM = 'voxel_to_rasmm' + VOXEL_ORDER = 'voxel_order' + ENDIANNESS = 'endianness' diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 37bdbe3ffb..7fb5cde8b3 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -1,4 +1,4 @@ -""" Read / write access to TCK streamlines format. +"""Read / write access to TCK streamlines format. TCK format is defined at http://mrtrix.readthedocs.io/en/latest/getting_started/image_data.html?highlight=format#tracks-file-format-tck @@ -23,7 +23,7 @@ class TckFile(TractogramFile): - """ Convenience class to encapsulate TCK file format. + """Convenience class to encapsulate TCK file format. Notes ----- @@ -42,8 +42,9 @@ class TckFile(TractogramFile): .. [#] http://www.nitrc.org/pipermail/mrtrix-discussion/2014-January/000859.html .. [#] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space """ + # Constants - MAGIC_NUMBER = b"mrtrix tracks" + MAGIC_NUMBER = b'mrtrix tracks' SUPPORTS_DATA_PER_POINT = False # Not yet SUPPORTS_DATA_PER_STREAMLINE = False # Not yet @@ -73,7 +74,7 @@ def __init__(self, tractogram, header=None): @classmethod def is_correct_format(cls, fileobj): - """ Check if the file is in TCK format. + """Check if the file is in TCK format. Parameters ---------- @@ -97,18 +98,18 @@ def is_correct_format(cls, fileobj): @classmethod def create_empty_header(cls): - """ Return an empty compliant TCK header as dict """ + """Return an empty compliant TCK header as dict""" header = {} # Default values header[Field.MAGIC_NUMBER] = cls.MAGIC_NUMBER header[Field.NB_STREAMLINES] = 0 - header['datatype'] = "Float32LE" + header['datatype'] = 'Float32LE' return header @classmethod def load(cls, fileobj, lazy_load=False): - """ Loads streamlines from a filename or file-like object. + """Loads streamlines from a filename or file-like object. Parameters ---------- @@ -139,6 +140,7 @@ def load(cls, fileobj, lazy_load=False): hdr = cls._read_header(fileobj) if lazy_load: + def _read(): for pts in cls._read(fileobj, hdr): yield TractogramItem(pts, {}, {}) @@ -162,7 +164,7 @@ def _finalize_header(self, f, header, offset=0): self._write_header(f, header) def save(self, fileobj): - """ Save tractogram to a filename or file-like object using TCK format. + """Save tractogram to a filename or file-like object using TCK format. Parameters ---------- @@ -181,7 +183,7 @@ def save(self, fileobj): # Keep counts for correcting incoherent fields or warn. nb_streamlines = 0 - with Opener(fileobj, mode="wb") as f: + with Opener(fileobj, mode='wb') as f: # Keep track of the beginning of the header. beginning = f.tell() @@ -209,16 +211,20 @@ def save(self, fileobj): data_for_streamline = first_item.data_for_streamline if len(data_for_streamline) > 0: - keys = ", ".join(data_for_streamline.keys()) - msg = ("TCK format does not support saving additional " - f"data alongside streamlines. Dropping: {keys}") + keys = ', '.join(data_for_streamline.keys()) + msg = ( + 'TCK format does not support saving additional ' + f'data alongside streamlines. Dropping: {keys}' + ) warnings.warn(msg, DataWarning) data_for_points = first_item.data_for_points if len(data_for_points) > 0: - keys = ", ".join(data_for_points.keys()) - msg = ("TCK format does not support saving additional " - f"data alongside points. Dropping: {keys}") + keys = ', '.join(data_for_points.keys()) + msg = ( + 'TCK format does not support saving additional ' + f'data alongside points. Dropping: {keys}' + ) warnings.warn(msg, DataWarning) for t in tractogram: @@ -234,7 +240,7 @@ def save(self, fileobj): @staticmethod def _write_header(fileobj, header): - """ Write TCK header to file-like object. + """Write TCK header to file-like object. Parameters ---------- @@ -243,32 +249,36 @@ def _write_header(fileobj, header): ready to read from the beginning of the TCK header). """ # Fields to exclude - exclude = [Field.MAGIC_NUMBER, # Handled separately. - Field.NB_STREAMLINES, # Handled separately. - Field.ENDIANNESS, # Handled separately. - Field.VOXEL_TO_RASMM, # Streamlines are always in RAS+ mm. - "count", "datatype", "file"] # Fields being replaced. + exclude = [ + Field.MAGIC_NUMBER, # Handled separately. + Field.NB_STREAMLINES, # Handled separately. + Field.ENDIANNESS, # Handled separately. + Field.VOXEL_TO_RASMM, # Streamlines are always in RAS+ mm. + 'count', + 'datatype', + 'file', + ] # Fields being replaced. lines = [ - f"count: {header[Field.NB_STREAMLINES]:010}", - "datatype: Float32LE", # Always Float32LE. + f'count: {header[Field.NB_STREAMLINES]:010}', + 'datatype: Float32LE', # Always Float32LE. ] - lines.extend(f"{k}: {v}" - for k, v in header.items() - if k not in exclude and not k.startswith("_")) - out = "\n".join(lines) + lines.extend( + f'{k}: {v}' for k, v in header.items() if k not in exclude and not k.startswith('_') + ) + out = '\n'.join(lines) # Check the header is well formatted. - if out.count("\n") > len(lines) - 1: # \n only allowed between lines. + if out.count('\n') > len(lines) - 1: # \n only allowed between lines. msg = f"Key-value pairs cannot contain '\\n':\n{out}" raise HeaderError(msg) - if out.count(":") > len(lines): + if out.count(':') > len(lines): # : only one per line (except the last one which contains END). msg = f"Key-value pairs cannot contain ':':\n{out}" raise HeaderError(msg) - out = header[Field.MAGIC_NUMBER] + b"\n" + out.encode('utf-8') + out = header[Field.MAGIC_NUMBER] + b'\n' + out.encode('utf-8') # Compute data offset considering the offset string representation # headers + "file" header + END + \n's @@ -284,7 +294,7 @@ def _write_header(fileobj, header): @classmethod def _read_header(cls, fileobj): - """ Reads a TCK header from a file. + """Reads a TCK header from a file. Parameters ---------- @@ -316,7 +326,7 @@ def _read_header(cls, fileobj): magic_number = f.read(len(cls.MAGIC_NUMBER)) if magic_number != cls.MAGIC_NUMBER: - raise HeaderError(f"Invalid magic number: {magic_number}") + raise HeaderError(f'Invalid magic number: {magic_number}') hdr[Field.MAGIC_NUMBER] = magic_number @@ -331,18 +341,18 @@ def _read_header(cls, fileobj): if not line: # Skip empty lines continue - if line == "END": # End of the header + if line == 'END': # End of the header found_end = True break if ':' not in line: # Invalid header line - raise HeaderError(f"Invalid header (line {n_line}): {line}") + raise HeaderError(f'Invalid header (line {n_line}): {line}') - key, value = line.split(":", 1) + key, value = line.split(':', 1) hdr[key.strip()] = value.strip() if not found_end: - raise HeaderError("Missing END in the header.") + raise HeaderError('Missing END in the header.') offset_data = f.tell() @@ -352,14 +362,15 @@ def _read_header(cls, fileobj): # Check integrity of TCK header. if 'datatype' not in hdr: - msg = ("Missing 'datatype' attribute in TCK header." - " Assuming it is Float32LE.") + msg = "Missing 'datatype' attribute in TCK header." ' Assuming it is Float32LE.' warnings.warn(msg, HeaderWarning) - hdr['datatype'] = "Float32LE" + hdr['datatype'] = 'Float32LE' if not hdr['datatype'].startswith('Float32'): - msg = ("TCK only supports float32 dtype but 'datatype: " - f"{hdr['datatype']}' was specified in the header.") + msg = ( + "TCK only supports float32 dtype but 'datatype: " + f"{hdr['datatype']}' was specified in the header." + ) raise HeaderError(msg) if 'file' not in hdr: @@ -368,8 +379,10 @@ def _read_header(cls, fileobj): hdr['file'] = f'. {offset_data}' if hdr['file'].split()[0] != '.': - msg = ("TCK only supports single-file - in other words the filename part must be " - f"specified as '.' but '{hdr['file'].split()[0]}' was specified.") + msg = ( + 'TCK only supports single-file - in other words the filename part must be ' + f"specified as '.' but '{hdr['file'].split()[0]}' was specified." + ) raise HeaderError("Missing 'file' attribute in TCK header.") # Set endianness and _dtype attributes in the header. @@ -384,7 +397,7 @@ def _read_header(cls, fileobj): @classmethod def _read(cls, fileobj, header, buffer_size=4): - """ Return generator that reads TCK data from `fileobj` given `header` + """Return generator that reads TCK data from `fileobj` given `header` Parameters ---------- @@ -403,7 +416,7 @@ def _read(cls, fileobj, header, buffer_size=4): points : ndarray of shape (n_pts, 3) Streamline points """ - dtype = header["_dtype"] + dtype = header['_dtype'] coordinate_size = 3 * dtype.itemsize # Make buffer_size an integer and a multiple of coordinate_size. buffer_size = int(buffer_size * MEGABYTE) @@ -413,7 +426,7 @@ def _read(cls, fileobj, header, buffer_size=4): start_position = f.tell() # Set the file position at the beginning of the data. - f.seek(header["_offset_data"], os.SEEK_SET) + f.seek(header['_offset_data'], os.SEEK_SET) eof = False leftover = np.empty((0, 3), dtype='=' hdr_size = trk_struct['hdr_size'] @@ -197,8 +208,7 @@ def test_load_complex_file_in_big_endian(self): assert hdr_size == 1000 for lazy_load in [False, True]: - trk = TrkFile.load(DATA['complex_trk_big_endian_fname'], - lazy_load=lazy_load) + trk = TrkFile.load(DATA['complex_trk_big_endian_fname'], lazy_load=lazy_load) with pytest.warns(Warning) if lazy_load else error_warnings(): assert_tractogram_equal(trk.tractogram, DATA['complex_tractogram']) @@ -225,8 +235,7 @@ def test_write_empty_file(self): assert trk_file.read() == open(DATA['empty_trk_fname'], 'rb').read() def test_write_simple_file(self): - tractogram = Tractogram(DATA['streamlines'], - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram(DATA['streamlines'], affine_to_rasmm=np.eye(4)) trk_file = BytesIO() trk = TrkFile(tractogram) @@ -244,9 +253,9 @@ def test_write_simple_file(self): def test_write_complex_file(self): # With scalars - tractogram = Tractogram(DATA['streamlines'], - data_per_point=DATA['data_per_point'], - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram( + DATA['streamlines'], data_per_point=DATA['data_per_point'], affine_to_rasmm=np.eye(4) + ) trk_file = BytesIO() trk = TrkFile(tractogram) @@ -258,9 +267,9 @@ def test_write_complex_file(self): # With properties data_per_streamline = DATA['data_per_streamline'] - tractogram = Tractogram(DATA['streamlines'], - data_per_streamline=data_per_streamline, - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram( + DATA['streamlines'], data_per_streamline=data_per_streamline, affine_to_rasmm=np.eye(4) + ) trk = TrkFile(tractogram) trk_file = BytesIO() @@ -272,10 +281,12 @@ def test_write_complex_file(self): # With scalars and properties data_per_streamline = DATA['data_per_streamline'] - tractogram = Tractogram(DATA['streamlines'], - data_per_point=DATA['data_per_point'], - data_per_streamline=data_per_streamline, - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram( + DATA['streamlines'], + data_per_point=DATA['data_per_point'], + data_per_streamline=data_per_streamline, + affine_to_rasmm=np.eye(4), + ) trk_file = BytesIO() trk = TrkFile(tractogram) @@ -292,9 +303,11 @@ def test_write_complex_file(self): assert trk_file.read() == open(DATA['complex_trk_fname'], 'rb').read() def test_load_write_file(self): - for fname in [DATA['empty_trk_fname'], - DATA['simple_trk_fname'], - DATA['complex_trk_fname']]: + for fname in [ + DATA['empty_trk_fname'], + DATA['simple_trk_fname'], + DATA['complex_trk_fname'], + ]: for lazy_load in [False, True]: trk = TrkFile.load(fname, lazy_load=lazy_load) trk_file = BytesIO() @@ -332,7 +345,7 @@ def test_load_write_LPS_file(self): # For TRK file format, the default voxel order is LPS. header = copy.deepcopy(trk_LPS.header) - header[Field.VOXEL_ORDER] = b"" + header[Field.VOXEL_ORDER] = b'' trk = TrkFile(trk_LPS.tractogram, header) trk.save(trk_file) @@ -361,7 +374,7 @@ def test_write_optional_header_fields(self): trk_file.seek(0, os.SEEK_SET) new_trk = TrkFile.load(trk_file) - assert "extra" not in new_trk.header + assert 'extra' not in new_trk.header def test_write_too_many_scalars_and_properties(self): # TRK supports up to 10 data_per_point. @@ -369,9 +382,9 @@ def test_write_too_many_scalars_and_properties(self): for i in range(10): data_per_point[f'#{i}'] = DATA['fa'] - tractogram = Tractogram(DATA['streamlines'], - data_per_point=data_per_point, - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram( + DATA['streamlines'], data_per_point=data_per_point, affine_to_rasmm=np.eye(4) + ) trk_file = BytesIO() trk = TrkFile(tractogram) @@ -384,9 +397,9 @@ def test_write_too_many_scalars_and_properties(self): # More than 10 data_per_point should raise an error. data_per_point[f'#{i + 1}'] = DATA['fa'] - tractogram = Tractogram(DATA['streamlines'], - data_per_point=data_per_point, - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram( + DATA['streamlines'], data_per_point=data_per_point, affine_to_rasmm=np.eye(4) + ) trk = TrkFile(tractogram) with pytest.raises(ValueError): @@ -397,9 +410,11 @@ def test_write_too_many_scalars_and_properties(self): for i in range(10): data_per_streamline[f'#{i}'] = DATA['mean_torsion'] - tractogram = Tractogram(DATA['streamlines'], - data_per_streamline=data_per_streamline, - affine_to_rasmm=np.eye(4)) + tractogram = Tractogram( + DATA['streamlines'], + data_per_streamline=data_per_streamline, + affine_to_rasmm=np.eye(4), + ) trk_file = BytesIO() trk = TrkFile(tractogram) @@ -412,8 +427,7 @@ def test_write_too_many_scalars_and_properties(self): # More than 10 data_per_streamline should raise an error. data_per_streamline[f'#{i + 1}'] = DATA['mean_torsion'] - tractogram = Tractogram(DATA['streamlines'], - data_per_streamline=data_per_streamline) + tractogram = Tractogram(DATA['streamlines'], data_per_streamline=data_per_streamline) trk = TrkFile(tractogram) with pytest.raises(ValueError): @@ -426,10 +440,10 @@ def test_write_scalars_and_properties_name_too_long(self): # So in reality we allow name of 18 characters, otherwise # the name is truncated and warning is issue. for nb_chars in range(22): - data_per_point = {'A'*nb_chars: DATA['colors']} - tractogram = Tractogram(DATA['streamlines'], - data_per_point=data_per_point, - affine_to_rasmm=np.eye(4)) + data_per_point = {'A' * nb_chars: DATA['colors']} + tractogram = Tractogram( + DATA['streamlines'], data_per_point=data_per_point, affine_to_rasmm=np.eye(4) + ) trk = TrkFile(tractogram) if nb_chars > 18: @@ -438,10 +452,10 @@ def test_write_scalars_and_properties_name_too_long(self): else: trk.save(BytesIO()) - data_per_point = {'A'*nb_chars: DATA['fa']} - tractogram = Tractogram(DATA['streamlines'], - data_per_point=data_per_point, - affine_to_rasmm=np.eye(4)) + data_per_point = {'A' * nb_chars: DATA['fa']} + tractogram = Tractogram( + DATA['streamlines'], data_per_point=data_per_point, affine_to_rasmm=np.eye(4) + ) trk = TrkFile(tractogram) if nb_chars > 20: @@ -456,10 +470,12 @@ def test_write_scalars_and_properties_name_too_long(self): # So in reality we allow name of 18 characters, otherwise # the name is truncated and warning is issue. for nb_chars in range(22): - data_per_streamline = {'A'*nb_chars: DATA['mean_colors']} - tractogram = Tractogram(DATA['streamlines'], - data_per_streamline=data_per_streamline, - affine_to_rasmm=np.eye(4)) + data_per_streamline = {'A' * nb_chars: DATA['mean_colors']} + tractogram = Tractogram( + DATA['streamlines'], + data_per_streamline=data_per_streamline, + affine_to_rasmm=np.eye(4), + ) trk = TrkFile(tractogram) if nb_chars > 18: @@ -468,10 +484,12 @@ def test_write_scalars_and_properties_name_too_long(self): else: trk.save(BytesIO()) - data_per_streamline = {'A'*nb_chars: DATA['mean_torsion']} - tractogram = Tractogram(DATA['streamlines'], - data_per_streamline=data_per_streamline, - affine_to_rasmm=np.eye(4)) + data_per_streamline = {'A' * nb_chars: DATA['mean_torsion']} + tractogram = Tractogram( + DATA['streamlines'], + data_per_streamline=data_per_streamline, + affine_to_rasmm=np.eye(4), + ) trk = TrkFile(tractogram) if nb_chars > 20: diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index 5b67af1ab3..cf9a099fe4 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -10,17 +10,17 @@ def is_data_dict(obj): - """ True if `obj` seems to implement the :class:`DataDict` API """ + """True if `obj` seems to implement the :class:`DataDict` API""" return hasattr(obj, 'store') def is_lazy_dict(obj): - """ True if `obj` seems to implement the :class:`LazyDict` API """ + """True if `obj` seems to implement the :class:`LazyDict` API""" return is_data_dict(obj) and callable(list(obj.store.values())[0]) class SliceableDataDict(MutableMapping): - r""" Dictionary for which key access can do slicing on the values. + r"""Dictionary for which key access can do slicing on the values. This container behaves like a standard dictionary but extends key access to allow keys for key access to be indices slicing into the contained ndarray @@ -33,6 +33,7 @@ class SliceableDataDict(MutableMapping): Positional and keyword arguments, passed straight through the ``dict`` constructor. """ + def __init__(self, *args, **kwargs): self.store = dict() self.update(dict(*args, **kwargs)) @@ -73,7 +74,7 @@ def __len__(self): class PerArrayDict(SliceableDataDict): - r""" Dictionary for which key access can do slicing on the values. + r"""Dictionary for which key access can do slicing on the values. This container behaves like a standard dictionary but extends key access to allow keys for key access to be indices slicing into the contained ndarray @@ -93,6 +94,7 @@ class PerArrayDict(SliceableDataDict): Positional and keyword arguments, passed straight through the ``dict`` constructor. """ + def __init__(self, n_rows=0, *args, **kwargs): self.n_rows = n_rows super(PerArrayDict, self).__init__(*args, **kwargs) @@ -102,24 +104,24 @@ def __setitem__(self, key, value): if value.ndim == 1 and value.dtype != object: # Reshape without copy - value.shape = ((len(value), 1)) + value.shape = (len(value), 1) if value.ndim != 2: - raise ValueError("data_per_streamline must be a 2D array.") + raise ValueError('data_per_streamline must be a 2D array.') # We make sure there is the right amount of values if 0 < self.n_rows != len(value): - msg = f"The number of values ({len(value)}) should match n_elements ({self.n_rows})." + msg = f'The number of values ({len(value)}) should match n_elements ({self.n_rows}).' raise ValueError(msg) self.store[key] = value def _extend_entry(self, key, value): - """ Appends the `value` to the entry specified by `key`. """ + """Appends the `value` to the entry specified by `key`.""" self[key] = np.concatenate([self[key], value]) def extend(self, other): - """ Appends the elements of another :class:`PerArrayDict`. + """Appends the elements of another :class:`PerArrayDict`. That is, for each entry in this dictionary, we append the elements coming from the other dictionary at the corresponding entry. @@ -137,11 +139,12 @@ def extend(self, other): ----- The keys in both dictionaries must be the same. """ - if (len(self) > 0 and len(other) > 0 and - sorted(self.keys()) != sorted(other.keys())): - msg = ("Entry mismatched between the two PerArrayDict objects. " - f"This PerArrayDict contains '{sorted(self.keys())}' " - f"whereas the other contains '{sorted(other.keys())}'.") + if len(self) > 0 and len(other) > 0 and sorted(self.keys()) != sorted(other.keys()): + msg = ( + 'Entry mismatched between the two PerArrayDict objects. ' + f"This PerArrayDict contains '{sorted(self.keys())}' " + f"whereas the other contains '{sorted(other.keys())}'." + ) raise ValueError(msg) self.n_rows += other.n_rows @@ -153,7 +156,7 @@ def extend(self, other): class PerArraySequenceDict(PerArrayDict): - """ Dictionary for which key access can do slicing on the values. + """Dictionary for which key access can do slicing on the values. This container behaves like a standard dictionary but extends key access to allow keys for key access to be indices slicing into the contained ndarray @@ -163,29 +166,31 @@ class PerArraySequenceDict(PerArrayDict): sequences matches the number of elements given at the instantiation of the instance. """ + def __setitem__(self, key, value): value = ArraySequence(value) # We make sure there is the right amount of data. if 0 < self.n_rows != value.total_nb_rows: - msg = f"The number of values ({value.total_nb_rows}) should match ({self.n_rows})." + msg = f'The number of values ({value.total_nb_rows}) should match ({self.n_rows}).' raise ValueError(msg) self.store[key] = value def _extend_entry(self, key, value): - """ Appends the `value` to the entry specified by `key`. """ + """Appends the `value` to the entry specified by `key`.""" self[key].extend(value) class LazyDict(MutableMapping): - """ Dictionary of generator functions. + """Dictionary of generator functions. This container behaves like a dictionary but it makes sure its elements are callable objects that it assumes are generator functions yielding values. When getting the element associated with a given key, the element (i.e. a generator function) is first called before being returned. """ + def __init__(self, *args, **kwargs): self.store = dict() # Use the 'update' method to set the keys. @@ -204,9 +209,11 @@ def __getitem__(self, key): def __setitem__(self, key, value): if not callable(value): - msg = ("Values in a `LazyDict` must be generator functions." - " These are functions which, when called, return an" - " instantiated generator.") + msg = ( + 'Values in a `LazyDict` must be generator functions.' + ' These are functions which, when called, return an' + ' instantiated generator.' + ) raise TypeError(msg) self.store[key] = value @@ -221,7 +228,7 @@ def __len__(self): class TractogramItem: - """ Class containing information about one streamline. + """Class containing information about one streamline. :class:`TractogramItem` objects have three public attributes: `streamline`, `data_for_streamline`, and `data_for_points`. @@ -241,6 +248,7 @@ class TractogramItem: (Nt, Mk), where ``Nt`` is the number of points of this streamline and ``Mk`` is the dimension of the data associated with key ``k``. """ + def __init__(self, streamline, data_for_streamline, data_for_points): self.streamline = np.asarray(streamline) self.data_for_streamline = data_for_streamline @@ -254,7 +262,7 @@ def __len__(self): class Tractogram: - """ Container for streamlines and their data information. + """Container for streamlines and their data information. Streamlines of a tractogram can be in any coordinate system of your choice as long as you provide the correct `affine_to_rasmm` matrix, at @@ -292,10 +300,10 @@ class Tractogram: .. [#] http://nipy.org/nibabel/coordinate_systems.html#naming-reference-spaces .. [#] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space """ - def __init__(self, streamlines=None, - data_per_streamline=None, - data_per_point=None, - affine_to_rasmm=None): + + def __init__( + self, streamlines=None, data_per_streamline=None, data_per_point=None, affine_to_rasmm=None + ): """ Parameters ---------- @@ -341,7 +349,8 @@ def data_per_streamline(self): @data_per_streamline.setter def data_per_streamline(self, value): self._data_per_streamline = PerArrayDict( - len(self.streamlines), {} if value is None else value) + len(self.streamlines), {} if value is None else value + ) @property def data_per_point(self): @@ -350,11 +359,12 @@ def data_per_point(self): @data_per_point.setter def data_per_point(self, value): self._data_per_point = PerArraySequenceDict( - self.streamlines.total_nb_rows, {} if value is None else value) + self.streamlines.total_nb_rows, {} if value is None else value + ) @property def affine_to_rasmm(self): - """ Affine bringing streamlines in this tractogram to RAS+mm. """ + """Affine bringing streamlines in this tractogram to RAS+mm.""" return copy.deepcopy(self._affine_to_rasmm) @affine_to_rasmm.setter @@ -362,8 +372,10 @@ def affine_to_rasmm(self, value): if value is not None: value = np.array(value) if value.shape != (4, 4): - msg = ("Affine matrix has a shape of (4, 4) but a ndarray with " - f"shape {value.shape} was provided instead.") + msg = ( + 'Affine matrix has a shape of (4, 4) but a ndarray with ' + f'shape {value.shape} was provided instead.' + ) raise ValueError(msg) self._affine_to_rasmm = value @@ -386,18 +398,19 @@ def __getitem__(self, idx): if isinstance(idx, (numbers.Integral, np.integer)): return TractogramItem(pts, data_per_streamline, data_per_point) - return Tractogram(pts, data_per_streamline, data_per_point, - affine_to_rasmm=self.affine_to_rasmm) + return Tractogram( + pts, data_per_streamline, data_per_point, affine_to_rasmm=self.affine_to_rasmm + ) def __len__(self): return len(self.streamlines) def copy(self): - """ Returns a copy of this :class:`Tractogram` object. """ + """Returns a copy of this :class:`Tractogram` object.""" return copy.deepcopy(self) def apply_affine(self, affine, lazy=False): - """ Applies an affine transformation on the points of each streamline. + """Applies an affine transformation on the points of each streamline. If `lazy` is not specified, this is performed *in-place*. @@ -438,13 +451,12 @@ def apply_affine(self, affine, lazy=False): if self.affine_to_rasmm is not None: # Update the affine that brings back the streamlines to RASmm. - self.affine_to_rasmm = np.dot(self.affine_to_rasmm, - np.linalg.inv(affine)) + self.affine_to_rasmm = np.dot(self.affine_to_rasmm, np.linalg.inv(affine)) return self def to_world(self, lazy=False): - """ Brings the streamlines to world space (i.e. RAS+ and mm). + """Brings the streamlines to world space (i.e. RAS+ and mm). If `lazy` is not specified, this is performed *in-place*. @@ -464,14 +476,16 @@ def to_world(self, lazy=False): :class:`Tractogram` object with updated streamlines. """ if self.affine_to_rasmm is None: - msg = ("Streamlines are in a unknown space. This error can be" - " avoided by setting the 'affine_to_rasmm' property.") + msg = ( + 'Streamlines are in a unknown space. This error can be' + " avoided by setting the 'affine_to_rasmm' property." + ) raise ValueError(msg) return self.apply_affine(self.affine_to_rasmm, lazy=lazy) def extend(self, other): - """ Appends the data of another :class:`Tractogram`. + """Appends the data of another :class:`Tractogram`. Data that will be appended includes the streamlines and the content of both dictionaries `data_per_streamline` and `data_per_point`. @@ -506,7 +520,7 @@ def __add__(self, other): class LazyTractogram(Tractogram): - """ Lazy container for streamlines and their data information. + """Lazy container for streamlines and their data information. This container behaves lazily as it uses generator functions to manage streamlines and their data information. This container is thus memory @@ -557,10 +571,10 @@ class LazyTractogram(Tractogram): .. [#] http://nipy.org/nibabel/coordinate_systems.html#naming-reference-spaces .. [#] http://nipy.org/nibabel/coordinate_systems.html#voxel-coordinates-are-in-voxel-space """ - def __init__(self, streamlines=None, - data_per_streamline=None, - data_per_point=None, - affine_to_rasmm=None): + + def __init__( + self, streamlines=None, data_per_streamline=None, data_per_point=None, affine_to_rasmm=None + ): """ Parameters ---------- @@ -589,17 +603,16 @@ def __init__(self, streamlines=None, refers to the center of the voxel. By default, the streamlines are in an unknown space, i.e. affine_to_rasmm is None. """ - super(LazyTractogram, self).__init__(streamlines, - data_per_streamline, - data_per_point, - affine_to_rasmm) + super(LazyTractogram, self).__init__( + streamlines, data_per_streamline, data_per_point, affine_to_rasmm + ) self._nb_streamlines = None self._data = None self._affine_to_apply = np.eye(4) @classmethod def from_tractogram(cls, tractogram): - """ Creates a :class:`LazyTractogram` object from a :class:`Tractogram` object. + """Creates a :class:`LazyTractogram` object from a :class:`Tractogram` object. Parameters ---------- @@ -633,7 +646,7 @@ def _gen(key): @classmethod def from_data_func(cls, data_func): - """ Creates an instance from a generator function. + """Creates an instance from a generator function. The generator function must yield :class:`TractogramItem` objects. @@ -650,7 +663,7 @@ def from_data_func(cls, data_func): New lazy tractogram. """ if not callable(data_func): - raise TypeError("`data_func` must be a generator function.") + raise TypeError('`data_func` must be a generator function.') lazy_tractogram = cls() lazy_tractogram._data = data_func @@ -660,8 +673,7 @@ def from_data_func(cls, data_func): # Set data_per_streamline using data_func def _gen(key): - return lambda: (t.data_for_streamline[key] - for t in data_func()) + return lambda: (t.data_for_streamline[key] for t in data_func()) data_per_streamline_keys = first_item.data_for_streamline.keys() for k in data_per_streamline_keys: @@ -690,6 +702,7 @@ def streamlines(self): # Check if we need to apply an affine. if not np.allclose(self._affine_to_apply, np.eye(4)): + def _apply_affine(): for s in streamlines_gen: yield apply_affine(self._affine_to_apply, s) @@ -700,9 +713,11 @@ def _apply_affine(): def _set_streamlines(self, value): if value is not None and not callable(value): - msg = ("`streamlines` must be a generator function. That is a" - " function which, when called, returns an instantiated" - " generator.") + msg = ( + '`streamlines` must be a generator function. That is a' + ' function which, when called, returns an instantiated' + ' generator.' + ) raise TypeError(msg) self._streamlines = value @@ -768,28 +783,33 @@ def __iter__(self): def __len__(self): # Check if we know how many streamlines there are. if self._nb_streamlines is None: - warn("Number of streamlines will be determined manually by looping" - " through the streamlines. If you know the actual number of" - " streamlines, you might want to set it beforehand via" - " `self.header.nb_streamlines`.", Warning) + warn( + 'Number of streamlines will be determined manually by looping' + ' through the streamlines. If you know the actual number of' + ' streamlines, you might want to set it beforehand via' + ' `self.header.nb_streamlines`.', + Warning, + ) # Count the number of streamlines. self._nb_streamlines = sum(1 for _ in self.streamlines) return self._nb_streamlines def copy(self): - """ Returns a copy of this :class:`LazyTractogram` object. """ - tractogram = LazyTractogram(self._streamlines, - self._data_per_streamline, - self._data_per_point, - self.affine_to_rasmm) + """Returns a copy of this :class:`LazyTractogram` object.""" + tractogram = LazyTractogram( + self._streamlines, + self._data_per_streamline, + self._data_per_point, + self.affine_to_rasmm, + ) tractogram._nb_streamlines = self._nb_streamlines tractogram._data = self._data tractogram._affine_to_apply = self._affine_to_apply.copy() return tractogram def apply_affine(self, affine, lazy=True): - """ Applies an affine transformation to the streamlines. + """Applies an affine transformation to the streamlines. The transformation given by the `affine` matrix is applied after any other pending transformations to the streamline points. @@ -809,7 +829,7 @@ def apply_affine(self, affine, lazy=True): transformation to be applied on the streamlines. """ if not lazy: - msg = "LazyTractogram only supports lazy transformations." + msg = 'LazyTractogram only supports lazy transformations.' raise ValueError(msg) tractogram = self.copy() # New instance. @@ -819,12 +839,11 @@ def apply_affine(self, affine, lazy=True): if tractogram.affine_to_rasmm is not None: # Update the affine that brings back the streamlines to RASmm. - tractogram.affine_to_rasmm = np.dot(self.affine_to_rasmm, - np.linalg.inv(affine)) + tractogram.affine_to_rasmm = np.dot(self.affine_to_rasmm, np.linalg.inv(affine)) return tractogram def to_world(self, lazy=True): - """ Brings the streamlines to world space (i.e. RAS+ and mm). + """Brings the streamlines to world space (i.e. RAS+ and mm). The transformation is applied after any other pending transformations to the streamline points. @@ -842,8 +861,10 @@ def to_world(self, lazy=True): transformation to be applied on the streamlines. """ if self.affine_to_rasmm is None: - msg = ("Streamlines are in a unknown space. This error can be" - " avoided by setting the 'affine_to_rasmm' property.") + msg = ( + 'Streamlines are in a unknown space. This error can be' + " avoided by setting the 'affine_to_rasmm' property." + ) raise ValueError(msg) return self.apply_affine(self.affine_to_rasmm, lazy=lazy) diff --git a/nibabel/streamlines/tractogram_file.py b/nibabel/streamlines/tractogram_file.py index f8184c8ba9..321ea3d2ad 100644 --- a/nibabel/streamlines/tractogram_file.py +++ b/nibabel/streamlines/tractogram_file.py @@ -1,4 +1,4 @@ -""" Define abstract interface for Tractogram file classes +"""Define abstract interface for Tractogram file classes """ from abc import ABC, abstractmethod @@ -6,23 +6,23 @@ class ExtensionWarning(Warning): - """ Base class for warnings about tractogram file extension. """ + """Base class for warnings about tractogram file extension.""" class HeaderWarning(Warning): - """ Base class for warnings about tractogram file header. """ + """Base class for warnings about tractogram file header.""" class DataWarning(Warning): - """ Base class for warnings about tractogram file data. """ + """Base class for warnings about tractogram file data.""" class HeaderError(Exception): - """ Raised when a tractogram file header contains invalid information. """ + """Raised when a tractogram file header contains invalid information.""" class DataError(Exception): - """ Raised when data is missing or inconsistent in a tractogram file. """ + """Raised when data is missing or inconsistent in a tractogram file.""" class abstractclassmethod(classmethod): @@ -34,7 +34,7 @@ def __init__(self, callable): class TractogramFile(ABC): - """ Convenience class to encapsulate tractogram file format. """ + """Convenience class to encapsulate tractogram file format.""" def __init__(self, tractogram, header=None): self._tractogram = tractogram @@ -54,12 +54,12 @@ def header(self): @property def affine(self): - """ voxmm -> rasmm affine. """ + """voxmm -> rasmm affine.""" return self.header.get(Field.VOXEL_TO_RASMM) @abstractclassmethod def is_correct_format(cls, fileobj): - """ Checks if the file has the right streamlines file format. + """Checks if the file has the right streamlines file format. Parameters ---------- @@ -78,12 +78,12 @@ def is_correct_format(cls, fileobj): @classmethod def create_empty_header(cls): - """ Returns an empty header for this streamlines file format. """ + """Returns an empty header for this streamlines file format.""" return {} @abstractclassmethod def load(cls, fileobj, lazy_load=True): - """ Loads streamlines from a filename or file-like object. + """Loads streamlines from a filename or file-like object. Parameters ---------- @@ -105,7 +105,7 @@ def load(cls, fileobj, lazy_load=True): @abstractmethod def save(self, fileobj): - """ Saves streamlines to a filename or file-like object. + """Saves streamlines to a filename or file-like object. Parameters ---------- diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index 6b45aae122..eb382af4d0 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -1,4 +1,3 @@ - # Definition of trackvis header structure: # http://www.trackvis.org/docs/?subsect=fileformat @@ -13,8 +12,8 @@ import nibabel as nib from nibabel.openers import Opener -from nibabel.volumeutils import (native_code, swapped_code, endian_codes) -from nibabel.orientations import (aff2axcodes, axcodes2ornt) +from nibabel.volumeutils import native_code, swapped_code, endian_codes +from nibabel.orientations import aff2axcodes, axcodes2ornt from .array_sequence import create_arraysequences_from_generator from .tractogram_file import TractogramFile @@ -32,38 +31,38 @@ # coordinates (axes L->R, P->A, I->S). If (0 based) value [3, 3] from # this matrix is 0, this means the matrix is not recorded. # See http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html -header_2_dtd = [(Field.MAGIC_NUMBER, 'S6'), - (Field.DIMENSIONS, 'h', 3), - (Field.VOXEL_SIZES, 'f4', 3), - (Field.ORIGIN, 'f4', 3), - (Field.NB_SCALARS_PER_POINT, 'h'), - ('scalar_name', 'S20', MAX_NB_NAMED_SCALARS_PER_POINT), - (Field.NB_PROPERTIES_PER_STREAMLINE, 'h'), - ('property_name', 'S20', - MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE), - (Field.VOXEL_TO_RASMM, 'f4', (4, 4)), # New in version 2. - ('reserved', 'S444'), - (Field.VOXEL_ORDER, 'S4'), - ('pad2', 'S4'), - ('image_orientation_patient', 'f4', 6), - ('pad1', 'S2'), - ('invert_x', 'S1'), - ('invert_y', 'S1'), - ('invert_z', 'S1'), - ('swap_xy', 'S1'), - ('swap_yz', 'S1'), - ('swap_zx', 'S1'), - (Field.NB_STREAMLINES, 'i4'), - ('version', 'i4'), - ('hdr_size', 'i4'), - ] +header_2_dtd = [ + (Field.MAGIC_NUMBER, 'S6'), + (Field.DIMENSIONS, 'h', 3), + (Field.VOXEL_SIZES, 'f4', 3), + (Field.ORIGIN, 'f4', 3), + (Field.NB_SCALARS_PER_POINT, 'h'), + ('scalar_name', 'S20', MAX_NB_NAMED_SCALARS_PER_POINT), + (Field.NB_PROPERTIES_PER_STREAMLINE, 'h'), + ('property_name', 'S20', MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE), + (Field.VOXEL_TO_RASMM, 'f4', (4, 4)), # New in version 2. + ('reserved', 'S444'), + (Field.VOXEL_ORDER, 'S4'), + ('pad2', 'S4'), + ('image_orientation_patient', 'f4', 6), + ('pad1', 'S2'), + ('invert_x', 'S1'), + ('invert_y', 'S1'), + ('invert_z', 'S1'), + ('swap_xy', 'S1'), + ('swap_yz', 'S1'), + ('swap_zx', 'S1'), + (Field.NB_STREAMLINES, 'i4'), + ('version', 'i4'), + ('hdr_size', 'i4'), +] # Full header numpy dtypes header_2_dtype = np.dtype(header_2_dtd) def get_affine_trackvis_to_rasmm(header): - """ Get affine mapping trackvis voxelmm space to RAS+ mm space + """Get affine mapping trackvis voxelmm space to RAS+ mm space The streamlines in a trackvis file are in 'voxelmm' space, where the coordinates refer to the corner of the voxel. @@ -106,7 +105,7 @@ def get_affine_trackvis_to_rasmm(header): # Input header can be dict or structured array if hasattr(vox_order, 'item'): # structured array vox_order = header[Field.VOXEL_ORDER].item() - affine_ornt = "".join(aff2axcodes(header[Field.VOXEL_TO_RASMM])) + affine_ornt = ''.join(aff2axcodes(header[Field.VOXEL_TO_RASMM])) header_ornt = axcodes2ornt(vox_order.decode('latin1').upper()) affine_ornt = axcodes2ornt(affine_ornt) ornt = nib.orientations.ornt_transform(header_ornt, affine_ornt) @@ -125,7 +124,7 @@ def get_affine_rasmm_to_trackvis(header): def encode_value_in_name(value, name, max_name_len=20): - """ Return `name` as fixed-length string, appending `value` as string. + """Return `name` as fixed-length string, appending `value` as string. Form output from `name` if `value <= 1` else `name` + ``\x00`` + str(value). @@ -157,16 +156,18 @@ def encode_value_in_name(value, name, max_name_len=20): raise ValueError(msg) encoded_name = name if value <= 1 else name + '\x00' + str(value) if len(encoded_name) > max_name_len: - msg = (f"Data information named '{name}' is too long (need to be less" - f" than {max_name_len - (len(str(value)) + 1)} characters " - "when storing more than one value for a given data information.") + msg = ( + f"Data information named '{name}' is too long (need to be less" + f' than {max_name_len - (len(str(value)) + 1)} characters ' + 'when storing more than one value for a given data information.' + ) raise ValueError(msg) # Fill to the end with zeros return encoded_name.ljust(max_name_len, '\x00').encode('latin1') def decode_value_from_name(encoded_name): - """ Decodes a value that has been encoded in the last bytes of a string. + """Decodes a value that has been encoded in the last bytes of a string. Check :func:`encode_value_in_name` to see how the value has been encoded. @@ -194,15 +195,17 @@ def decode_value_from_name(encoded_name): value = int(splits[1]) # Decode value. elif len(splits) > 2: # The remaining bytes are not \x00, raising. - msg = (f"Wrong scalar_name or property_name: '{encoded_name}'. " - "Unused characters should be \\x00.") + msg = ( + f"Wrong scalar_name or property_name: '{encoded_name}'. " + 'Unused characters should be \\x00.' + ) raise HeaderError(msg) return name, value class TrkFile(TractogramFile): - """ Convenience class to encapsulate TRK file format. + """Convenience class to encapsulate TRK file format. Notes ----- @@ -216,7 +219,7 @@ class TrkFile(TractogramFile): """ # Constants - MAGIC_NUMBER = b"TRACK" + MAGIC_NUMBER = b'TRACK' HEADER_SIZE = 1000 SUPPORTS_DATA_PER_POINT = True SUPPORTS_DATA_PER_STREAMLINE = True @@ -241,7 +244,7 @@ def __init__(self, tractogram, header=None): @classmethod def is_correct_format(cls, fileobj): - """ Check if the file is in TRK format. + """Check if the file is in TRK format. Parameters ---------- @@ -265,8 +268,7 @@ def is_correct_format(cls, fileobj): @classmethod def _default_structarr(cls, endianness=None): - """ Return an empty compliant TRK header as numpy structured array - """ + """Return an empty compliant TRK header as numpy structured array""" dt = header_2_dtype if endianness is not None: endianness = endian_codes[endianness] @@ -275,10 +277,10 @@ def _default_structarr(cls, endianness=None): # Default values st_arr[Field.MAGIC_NUMBER] = cls.MAGIC_NUMBER - st_arr[Field.VOXEL_SIZES] = np.array((1, 1, 1), dtype="f4") - st_arr[Field.DIMENSIONS] = np.array((1, 1, 1), dtype="h") - st_arr[Field.VOXEL_TO_RASMM] = np.eye(4, dtype="f4") - st_arr[Field.VOXEL_ORDER] = b"RAS" + st_arr[Field.VOXEL_SIZES] = np.array((1, 1, 1), dtype='f4') + st_arr[Field.DIMENSIONS] = np.array((1, 1, 1), dtype='h') + st_arr[Field.VOXEL_TO_RASMM] = np.eye(4, dtype='f4') + st_arr[Field.VOXEL_ORDER] = b'RAS' st_arr['version'] = 2 st_arr['hdr_size'] = cls.HEADER_SIZE @@ -286,14 +288,13 @@ def _default_structarr(cls, endianness=None): @classmethod def create_empty_header(cls, endianness=None): - """ Return an empty compliant TRK header as dict - """ + """Return an empty compliant TRK header as dict""" st_arr = cls._default_structarr(endianness) return dict(zip(st_arr.dtype.names, st_arr.tolist())) @classmethod def load(cls, fileobj, lazy_load=False): - """ Loads streamlines from a filename or file-like object. + """Loads streamlines from a filename or file-like object. Parameters ---------- @@ -357,15 +358,14 @@ def load(cls, fileobj, lazy_load=False): data_per_streamline_slice['properties'] = slice_obj if lazy_load: + def _read(): for pts, scals, props in cls._read(fileobj, hdr): items = data_per_point_slice.items() data_for_points = dict((k, scals[:, v]) for k, v in items) items = data_per_streamline_slice.items() data_for_streamline = dict((k, props[v]) for k, v in items) - yield TractogramItem(pts, - data_for_streamline, - data_for_points) + yield TractogramItem(pts, data_for_streamline, data_for_points) tractogram = LazyTractogram.from_data_func(_read) @@ -381,12 +381,11 @@ def _read(): # Buffer size is in mega bytes. mbytes = size // (1024 * 1024) sizes = [mbytes, 4, 4] - if hdr["nb_scalars_per_point"] > 0: + if hdr['nb_scalars_per_point'] > 0: sizes = [mbytes // 2, mbytes // 2, 4] trk_reader = cls._read(fileobj, hdr) - arr_seqs = create_arraysequences_from_generator(trk_reader, n=3, - buffer_sizes=sizes) + arr_seqs = create_arraysequences_from_generator(trk_reader, n=3, buffer_sizes=sizes) streamlines, scalars, properties = arr_seqs properties = np.asarray(properties) # Actually a 2d array. tractogram = Tractogram(streamlines) @@ -403,7 +402,7 @@ def _read(): return cls(tractogram, header=hdr) def save(self, fileobj): - """ Save tractogram to a filename or file-like object using TRK format. + """Save tractogram to a filename or file-like object using TRK format. Parameters ---------- @@ -422,8 +421,8 @@ def save(self, fileobj): # By default, the voxel order is LPS. # http://trackvis.org/blog/forum/diffusion-toolkit-usage/interpretation-of-track-point-coordinates - if header[Field.VOXEL_ORDER] == b"": - header[Field.VOXEL_ORDER] = b"LPS" + if header[Field.VOXEL_ORDER] == b'': + header[Field.VOXEL_ORDER] = b'LPS' # Keep counts for correcting incoherent fields or warn. nb_streamlines = 0 @@ -431,15 +430,15 @@ def save(self, fileobj): nb_scalars = 0 nb_properties = 0 - with Opener(fileobj, mode="wb") as f: + with Opener(fileobj, mode='wb') as f: # Keep track of the beginning of the header. beginning = f.tell() # Write temporary header that we will update at the end f.write(header.tobytes()) - i4_dtype = np.dtype(" MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE: - msg = (f"Can only store {MAX_NB_NAMED_SCALARS_PER_POINT} named " - "data_per_streamline (also known as 'properties' in the " - "TRK format).") + msg = ( + f'Can only store {MAX_NB_NAMED_SCALARS_PER_POINT} named ' + "data_per_streamline (also known as 'properties' in the " + 'TRK format).' + ) raise ValueError(msg) data_for_streamline_keys = sorted(data_for_streamline.keys()) - property_name = np.zeros(MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE, - dtype='S20') + property_name = np.zeros(MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE, dtype='S20') for i, name in enumerate(data_for_streamline_keys): # Append number of values as ascii to zero-terminated name # to encode number of values into trackvis name. @@ -489,9 +489,11 @@ def save(self, fileobj): # Update field 'scalar_name' using 'tractogram.data_per_point'. data_for_points = first_item.data_for_points if len(data_for_points) > MAX_NB_NAMED_SCALARS_PER_POINT: - msg = (f"Can only store {MAX_NB_NAMED_SCALARS_PER_POINT} " - "named data_per_point (also known as 'scalars' in " - "the TRK format).") + msg = ( + f'Can only store {MAX_NB_NAMED_SCALARS_PER_POINT} ' + "named data_per_point (also known as 'scalars' in " + 'the TRK format).' + ) raise ValueError(msg) data_for_points_keys = sorted(data_for_points.keys()) @@ -504,23 +506,27 @@ def save(self, fileobj): header['scalar_name'][:] = scalar_name for t in tractogram: - if any((len(d) != len(t.streamline) - for d in t.data_for_points.values())): - raise DataError("Missing scalars for some points!") + if any((len(d) != len(t.streamline) for d in t.data_for_points.values())): + raise DataError('Missing scalars for some points!') points = np.asarray(t.streamline) - scalars = [np.asarray(t.data_for_points[k]) - for k in data_for_points_keys] - scalars = np.concatenate([np.ndarray((len(points), 0),) - ] + scalars, axis=1) - properties = [np.asarray(t.data_for_streamline[k]) - for k in data_for_streamline_keys] - properties = np.concatenate( - [np.array([])] + properties).astype(f4_dtype) + scalars = [np.asarray(t.data_for_points[k]) for k in data_for_points_keys] + scalars = np.concatenate( + [ + np.ndarray( + (len(points), 0), + ) + ] + + scalars, + axis=1, + ) + properties = [ + np.asarray(t.data_for_streamline[k]) for k in data_for_streamline_keys + ] + properties = np.concatenate([np.array([])] + properties).astype(f4_dtype) data = struct.pack(i4_dtype.str[:-1], len(points)) - pts_scalars = np.concatenate( - [points, scalars], axis=1).astype(f4_dtype) + pts_scalars = np.concatenate([points, scalars], axis=1).astype(f4_dtype) data += pts_scalars.tobytes() data += properties.tobytes() f.write(data) @@ -536,12 +542,11 @@ def save(self, fileobj): # Check for errors if nb_scalars_per_point != int(nb_scalars_per_point): - msg = "Nb. of scalars differs from one point to another!" + msg = 'Nb. of scalars differs from one point to another!' raise DataError(msg) if nb_properties_per_streamline != int(nb_properties_per_streamline): - msg = ("Nb. of properties differs from one streamline to" - " another!") + msg = 'Nb. of properties differs from one streamline to' ' another!' raise DataError(msg) header[Field.NB_STREAMLINES] = nb_streamlines @@ -554,7 +559,7 @@ def save(self, fileobj): @staticmethod def _read_header(fileobj): - """ Reads a TRK header from a file. + """Reads a TRK header from a file. Parameters ---------- @@ -586,21 +591,26 @@ def _read_header(fileobj): # Swap byte order header_rec = header_rec.newbyteorder() if header_rec['hdr_size'] != TrkFile.HEADER_SIZE: - msg = (f"Invalid hdr_size: {header_rec['hdr_size']} " - f"instead of {TrkFile.HEADER_SIZE}") + msg = ( + f"Invalid hdr_size: {header_rec['hdr_size']} " + f'instead of {TrkFile.HEADER_SIZE}' + ) raise HeaderError(msg) if header_rec['version'] == 1: # There is no 4x4 matrix for voxel to RAS transformation. header_rec[Field.VOXEL_TO_RASMM] = np.zeros((4, 4)) elif header_rec['version'] == 3: - warnings.warn('Parsing a TRK v3 file as v2. Some features may not ' - 'be handled correctly.', HeaderWarning) + warnings.warn( + 'Parsing a TRK v3 file as v2. Some features may not ' 'be handled correctly.', + HeaderWarning, + ) elif header_rec['version'] in (2, 3): pass # Nothing more to do. else: - raise HeaderError('NiBabel only supports versions 1 and 2 of ' - 'the Trackvis file format') + raise HeaderError( + 'NiBabel only supports versions 1 and 2 of ' 'the Trackvis file format' + ) # Convert the first record of `header_rec` into a dictionary header = dict(zip(header_rec.dtype.names, header_rec[0])) @@ -609,26 +619,35 @@ def _read_header(fileobj): # If vox_to_ras[3][3] is 0, it means the matrix is not recorded. if header[Field.VOXEL_TO_RASMM][3][3] == 0: header[Field.VOXEL_TO_RASMM] = np.eye(4, dtype=np.float32) - warnings.warn(("Field 'vox_to_ras' in the TRK's header was" - " not recorded. Will continue assuming it's" - " the identity."), HeaderWarning) + warnings.warn( + ( + "Field 'vox_to_ras' in the TRK's header was" + " not recorded. Will continue assuming it's" + ' the identity.' + ), + HeaderWarning, + ) # Check that the 'vox_to_ras' affine is valid, i.e. should be # able to determine the axis directions. axcodes = aff2axcodes(header[Field.VOXEL_TO_RASMM]) if None in axcodes: - msg = ("The 'vox_to_ras' affine is invalid! Could not" - " determine the axis directions from it.\n" - f"{header[Field.VOXEL_TO_RASMM]}") + msg = ( + "The 'vox_to_ras' affine is invalid! Could not" + ' determine the axis directions from it.\n' + f'{header[Field.VOXEL_TO_RASMM]}' + ) raise HeaderError(msg) # By default, the voxel order is LPS. # http://trackvis.org/blog/forum/diffusion-toolkit-usage/interpretation-of-track-point-coordinates - if header[Field.VOXEL_ORDER] == b"": - msg = ("Voxel order is not specified, will assume 'LPS' since" - " it is Trackvis software's default.") + if header[Field.VOXEL_ORDER] == b'': + msg = ( + "Voxel order is not specified, will assume 'LPS' since" + " it is Trackvis software's default." + ) warnings.warn(msg, HeaderWarning) - header[Field.VOXEL_ORDER] = b"LPS" + header[Field.VOXEL_ORDER] = b'LPS' # Keep the file position where the data begin. header['_offset_data'] = f.tell() @@ -641,7 +660,7 @@ def _read_header(fileobj): @staticmethod def _read(fileobj, header): - """ Return generator that reads TRK data from `fileobj` given `header` + """Return generator that reads TRK data from `fileobj` given `header` Parameters ---------- @@ -663,20 +682,19 @@ def _read(fileobj, header): * scalars: ndarray of shape (n_pts, nb_scalars_per_point) * properties: ndarray of shape (nb_properties_per_point,) """ - i4_dtype = np.dtype(header[Field.ENDIANNESS] + "i4") - f4_dtype = np.dtype(header[Field.ENDIANNESS] + "f4") + i4_dtype = np.dtype(header[Field.ENDIANNESS] + 'i4') + f4_dtype = np.dtype(header[Field.ENDIANNESS] + 'f4') with Opener(fileobj) as f: start_position = f.tell() - nb_pts_and_scalars = int(3 + - header[Field.NB_SCALARS_PER_POINT]) + nb_pts_and_scalars = int(3 + header[Field.NB_SCALARS_PER_POINT]) pts_and_scalars_size = int(nb_pts_and_scalars * f4_dtype.itemsize) nb_properties = header[Field.NB_PROPERTIES_PER_STREAMLINE] properties_size = int(nb_properties * f4_dtype.itemsize) # Set the file position at the beginning of the data. - f.seek(header["_offset_data"], os.SEEK_SET) + f.seek(header['_offset_data'], os.SEEK_SET) # If 'count' field is 0, i.e. not provided, we have to loop # until the EOF. @@ -700,16 +718,16 @@ def _read(fileobj, header): points_and_scalars = np.ndarray( shape=(nb_pts, nb_pts_and_scalars), dtype=f4_dtype, - buffer=f.read(nb_pts * pts_and_scalars_size)) + buffer=f.read(nb_pts * pts_and_scalars_size), + ) points = points_and_scalars[:, :3] scalars = points_and_scalars[:, 3:] # Read properties properties = np.ndarray( - shape=(nb_properties,), - dtype=f4_dtype, - buffer=f.read(properties_size)) + shape=(nb_properties,), dtype=f4_dtype, buffer=f.read(properties_size) + ) yield points, scalars, properties count += 1 @@ -721,7 +739,7 @@ def _read(fileobj, header): f.seek(start_position, os.SEEK_CUR) def __str__(self): - """ Gets a formatted string of the header of a TRK file. + """Gets a formatted string of the header of a TRK file. Returns ------- @@ -736,19 +754,14 @@ def __str__(self): vars[attr] = vars[hdr_field] nb_scalars = self.header[Field.NB_SCALARS_PER_POINT] - scalar_names = [asstr(s) - for s in vars['scalar_name'][:nb_scalars] - if len(s) > 0] + scalar_names = [asstr(s) for s in vars['scalar_name'][:nb_scalars] if len(s) > 0] vars['scalar_names'] = '\n '.join(scalar_names) nb_properties = self.header[Field.NB_PROPERTIES_PER_STREAMLINE] - property_names = [asstr(s) - for s in vars['property_name'][:nb_properties] - if len(s) > 0] - vars['property_names'] = "\n ".join(property_names) + property_names = [asstr(s) for s in vars['property_name'][:nb_properties] if len(s) > 0] + vars['property_names'] = '\n '.join(property_names) # Make all byte strings into strings # Fixes recursion error on Python 3.3 - vars = dict((k, asstr(v) if hasattr(v, 'decode') else v) - for k, v in vars.items()) + vars = dict((k, asstr(v) if hasattr(v, 'decode') else v) for k, v in vars.items()) return """\ MAGIC NUMBER: {MAGIC_NUMBER} v.{version} @@ -771,4 +784,6 @@ def __str__(self): swap_yz: {swap_yz} swap_zx: {swap_zx} n_count: {NB_STREAMLINES} -hdr_size: {hdr_size}""".format(**vars) +hdr_size: {hdr_size}""".format( + **vars + ) diff --git a/nibabel/streamlines/utils.py b/nibabel/streamlines/utils.py index 085179da9e..80764700f2 100644 --- a/nibabel/streamlines/utils.py +++ b/nibabel/streamlines/utils.py @@ -4,7 +4,7 @@ def get_affine_from_reference(ref): - """ Returns the affine defining the reference space. + """Returns the affine defining the reference space. Parameters ---------- @@ -24,7 +24,7 @@ def get_affine_from_reference(ref): if hasattr(ref, 'shape'): if ref.shape != (4, 4): - msg = "`ref` needs to be a numpy array with shape (4, 4)!" + msg = '`ref` needs to be a numpy array with shape (4, 4)!' raise ValueError(msg) return ref @@ -34,7 +34,7 @@ def get_affine_from_reference(ref): def peek_next(iterable): - """ Peek next element of iterable. + """Peek next element of iterable. Parameters ---------- diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 8c9411ec91..44cc82890b 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Utilities for testing """ +"""Utilities for testing""" import re import os @@ -34,7 +34,7 @@ def test_data(subdir=None, fname=None): elif subdir in ('gifti', 'nicom', 'externals'): resource = os.path.join(subdir, 'tests', 'data') else: - raise ValueError(f"Unknown test data directory: {subdir}") + raise ValueError(f'Unknown test data directory: {subdir}') if fname is not None: resource = os.path.join(resource, fname) @@ -47,7 +47,7 @@ def test_data(subdir=None, fname=None): def assert_dt_equal(a, b): - """ Assert two numpy dtype specifiers are equal + """Assert two numpy dtype specifiers are equal Avoids failed comparison between int32 / int64 and intp """ @@ -55,8 +55,7 @@ def assert_dt_equal(a, b): def assert_allclose_safely(a, b, match_nans=True, rtol=1e-5, atol=1e-8): - """ Allclose in integers go all wrong for large integers - """ + """Allclose in integers go all wrong for large integers""" a = np.atleast_1d(a) # 0d arrays cannot be indexed a, b = np.broadcast_arrays(a, b) if match_nans: @@ -78,21 +77,20 @@ def assert_allclose_safely(a, b, match_nans=True, rtol=1e-5, atol=1e-8): def assert_arrays_equal(arrays1, arrays2): - """ Check two iterables yield the same sequence of arrays. """ + """Check two iterables yield the same sequence of arrays.""" for arr1, arr2 in zip_longest(arrays1, arrays2, fillvalue=None): - assert (arr1 is not None and arr2 is not None) + assert arr1 is not None and arr2 is not None assert_array_equal(arr1, arr2) def assert_re_in(regex, c, flags=0): - """Assert that container (list, str, etc) contains entry matching the regex - """ + """Assert that container (list, str, etc) contains entry matching the regex""" if not isinstance(c, (list, tuple)): c = [c] for e in c: if re.match(regex, e, flags=flags): return - raise AssertionError(f"Not a single entry matched {regex!r} in {c!r}") + raise AssertionError(f'Not a single entry matched {regex!r} in {c!r}') def get_fresh_mod(mod_name=__name__): @@ -106,7 +104,7 @@ def get_fresh_mod(mod_name=__name__): class clear_and_catch_warnings(warnings.catch_warnings): - """ Context manager that resets warning registry for catching warnings + """Context manager that resets warning registry for catching warnings Warnings can be slippery, because, whenever a warning is triggered, Python adds a ``__warningregistry__`` member to the *calling* module. This makes @@ -146,6 +144,7 @@ class clear_and_catch_warnings(warnings.catch_warnings): ... warnings.simplefilter('always') ... # do something that raises a warning in np.core.fromnumeric """ + class_modules = () def __init__(self, record=True, modules=()): @@ -171,7 +170,7 @@ def __exit__(self, *exc_info): class error_warnings(clear_and_catch_warnings): - """ Context manager to check for warnings as errors. Usually used with + """Context manager to check for warnings as errors. Usually used with ``assert_raises`` in the with block Examples @@ -183,6 +182,7 @@ class error_warnings(clear_and_catch_warnings): ... print('I consider myself warned') I consider myself warned """ + filter = 'error' def __enter__(self): @@ -192,8 +192,8 @@ def __enter__(self): class suppress_warnings(error_warnings): - """ Version of ``catch_warnings`` class that suppresses warnings - """ + """Version of ``catch_warnings`` class that suppresses warnings""" + filter = 'ignore' @@ -202,12 +202,11 @@ class suppress_warnings(error_warnings): def runif_extra_has(test_str): """Decorator checks to see if NIPY_EXTRA_TESTS env var contains test_str""" - return unittest.skipUnless(test_str in EXTRA_SET, f"Skip {test_str} tests.") + return unittest.skipUnless(test_str in EXTRA_SET, f'Skip {test_str} tests.') def assert_arr_dict_equal(dict1, dict2): - """ Assert that two dicts are equal, where dicts contain arrays - """ + """Assert that two dicts are equal, where dicts contain arrays""" assert set(dict1) == set(dict2) for key, value1 in dict1.items(): value2 = dict2[key] @@ -215,19 +214,20 @@ def assert_arr_dict_equal(dict1, dict2): class BaseTestCase(unittest.TestCase): - """ TestCase that does not attempt to run if prefixed with a ``_`` + """TestCase that does not attempt to run if prefixed with a ``_`` This restores the nose-like behavior of skipping so-named test cases in test runners like pytest. """ + def setUp(self): if self.__class__.__name__.startswith('_'): - raise unittest.SkipTest("Base test case - subclass to run") + raise unittest.SkipTest('Base test case - subclass to run') super().setUp() def expires(version): - "Decorator to mark a test as xfail with ExpiredDeprecationError after version" + """Decorator to mark a test as xfail with ExpiredDeprecationError after version""" from packaging.version import Version from nibabel import __version__ as nbver from nibabel.deprecator import ExpiredDeprecationError diff --git a/nibabel/testing/helpers.py b/nibabel/testing/helpers.py index 49112fddfb..35b13049f1 100644 --- a/nibabel/testing/helpers.py +++ b/nibabel/testing/helpers.py @@ -1,17 +1,18 @@ -""" Helper functions for tests +"""Helper functions for tests """ from io import BytesIO import numpy as np from ..optpkg import optional_package + _, have_scipy, _ = optional_package('scipy.io') from numpy.testing import assert_array_equal def bytesio_filemap(klass): - """ Return bytes io filemap for this image class `klass` """ + """Return bytes io filemap for this image class `klass`""" file_map = klass.make_file_map() for name, fileholder in file_map.items(): fileholder.fileobj = BytesIO() @@ -20,8 +21,7 @@ def bytesio_filemap(klass): def bytesio_round_trip(img): - """ Save then load image from bytesio - """ + """Save then load image from bytesio""" klass = img.__class__ bytes_map = bytesio_filemap(klass) img.to_file_map(bytes_map) @@ -29,7 +29,7 @@ def bytesio_round_trip(img): def assert_data_similar(arr, params): - """ Check data is the same if recorded, otherwise check summaries + """Check data is the same if recorded, otherwise check summaries Helper function to test image array data `arr` against record in `params`, where record can be the array itself, or summary values from the array. @@ -50,4 +50,5 @@ def assert_data_similar(arr, params): real_arr = np.asarray(arr) assert np.allclose( (real_arr.min(), real_arr.max(), real_arr.mean()), - (summary['min'], summary['max'], summary['mean'])) + (summary['min'], summary['max'], summary['mean']), + ) diff --git a/nibabel/testing/np_features.py b/nibabel/testing/np_features.py index 8919542d1c..c0739a8502 100644 --- a/nibabel/testing/np_features.py +++ b/nibabel/testing/np_features.py @@ -1,11 +1,11 @@ -""" Look for changes in numpy behavior over versions +"""Look for changes in numpy behavior over versions """ import numpy as np def memmap_after_ufunc(): - """ Return True if ufuncs on memmap arrays always return memmap arrays + """Return True if ufuncs on memmap arrays always return memmap arrays This should be True for numpy < 1.12, False otherwise. @@ -20,4 +20,5 @@ def memmap_after_ufunc(): memmap_after_ufunc.result = isinstance(mm_arr + 1, np.memmap) return memmap_after_ufunc.result + memmap_after_ufunc.result = None diff --git a/nibabel/tests/data/check_parrec_reslice.py b/nibabel/tests/data/check_parrec_reslice.py index ffee1f3829..17b36bd6dd 100644 --- a/nibabel/tests/data/check_parrec_reslice.py +++ b/nibabel/tests/data/check_parrec_reslice.py @@ -1,4 +1,4 @@ -""" Test differences in affines by reslicing +"""Test differences in affines by reslicing Should be run from directory containing .PAR _and_ matching .REC files from Michael's PAR / REC dataset at: @@ -38,13 +38,10 @@ def resample_img2img(img_to, img_from, order=1, out_class=nib.Nifti1Image): raise Exception('Scipy must be installed to run resample_img2img.') from scipy import ndimage as spnd + vox2vox = npl.inv(img_from.affine).dot(img_to.affine) rzs, trans = to_matvec(vox2vox) - data = spnd.affine_transform(img_from.get_fdata(), - rzs, - trans, - img_to.shape, - order=order) + data = spnd.affine_transform(img_from.get_fdata(), rzs, trans, img_to.shape, order=order) return out_class(data, img_to.affine) @@ -56,14 +53,14 @@ def gmean_norm(data): if __name__ == '__main__': np.set_printoptions(suppress=True, precision=4) - normal_fname = "Phantom_EPI_3mm_tra_SENSE_6_1.PAR" + normal_fname = 'Phantom_EPI_3mm_tra_SENSE_6_1.PAR' normal_img = parrec.load(normal_fname) normal_data = normal_img.get_fdata() normal_normed = gmean_norm(normal_data) - print(f"RMS of standard image {normal_fname:<44}: {np.sqrt(np.sum(normal_normed ** 2))}") + print(f'RMS of standard image {normal_fname:<44}: {np.sqrt(np.sum(normal_normed ** 2))}') - for parfile in glob.glob("*.PAR"): + for parfile in glob.glob('*.PAR'): if parfile == normal_fname: continue funny_img = parrec.load(parfile) diff --git a/nibabel/tests/data/gen_standard.py b/nibabel/tests/data/gen_standard.py index 477e687224..2d736fb445 100644 --- a/nibabel/tests/data/gen_standard.py +++ b/nibabel/tests/data/gen_standard.py @@ -1,4 +1,4 @@ -""" Generate mask and testing tractogram in known formats: +"""Generate mask and testing tractogram in known formats: * mask: standard.nii.gz * tractogram: @@ -13,7 +13,7 @@ def mark_the_spot(mask): - """ Marks every nonzero voxel using streamlines to form a 3D 'X' inside. + """Marks every nonzero voxel using streamlines to form a 3D 'X' inside. Generates streamlines forming a 3D 'X' inside every nonzero voxel. @@ -27,6 +27,7 @@ def mark_the_spot(mask): list of ndarrays All streamlines needed to mark every nonzero voxel in the `mask`. """ + def _gen_straight_streamline(start, end, steps=3): coords = [] for s, e in zip(start, end): @@ -57,11 +58,11 @@ def _gen_straight_streamline(start, end, steps=3): height = 5 # Sagittal depth = 7 # Axial - voxel_size = np.array((1., 3., 2.)) + voxel_size = np.array((1.0, 3.0, 2.0)) # Generate a random mask with voxel order RAS+. mask = rng.rand(width, height, depth) > 0.8 - mask = (255*mask).astype(np.uint8) + mask = (255 * mask).astype(np.uint8) # Build tractogram streamlines = mark_the_spot(mask) @@ -70,16 +71,18 @@ def _gen_straight_streamline(start, end, steps=3): # Build header affine = np.eye(4) affine[range(3), range(3)] = voxel_size - header = {Field.DIMENSIONS: (width, height, depth), - Field.VOXEL_SIZES: voxel_size, - Field.VOXEL_TO_RASMM: affine, - Field.VOXEL_ORDER: 'RAS'} + header = { + Field.DIMENSIONS: (width, height, depth), + Field.VOXEL_SIZES: voxel_size, + Field.VOXEL_TO_RASMM: affine, + Field.VOXEL_ORDER: 'RAS', + } # Save the standard mask. nii = nib.Nifti1Image(mask, affine=affine) - nib.save(nii, "standard.nii.gz") + nib.save(nii, 'standard.nii.gz') # Save the standard tractogram in every available file format. for ext, cls in FORMATS.items(): tfile = cls(tractogram, header) - nib.streamlines.save(tfile, "standard" + ext) + nib.streamlines.save(tfile, 'standard' + ext) diff --git a/nibabel/tests/data/make_moved_anat.py b/nibabel/tests/data/make_moved_anat.py index ec0817885c..aee20eda97 100644 --- a/nibabel/tests/data/make_moved_anat.py +++ b/nibabel/tests/data/make_moved_anat.py @@ -1,4 +1,4 @@ -""" Make anatomical image with altered affine +"""Make anatomical image with altered affine * Add some rotations and translations to affine; * Save as ``.nii`` file so SPM can read it. @@ -16,8 +16,6 @@ img = nib.load('anatomical.nii') some_rotations = euler2mat(0.1, 0.2, 0.3) extra_affine = from_matvec(some_rotations, [3, 4, 5]) - moved_anat = nib.Nifti1Image(img.dataobj, - extra_affine.dot(img.affine), - img.header) + moved_anat = nib.Nifti1Image(img.dataobj, extra_affine.dot(img.affine), img.header) moved_anat.set_data_dtype(np.float32) nib.save(moved_anat, 'anat_moved.nii') diff --git a/nibabel/tests/nibabel_data.py b/nibabel/tests/nibabel_data.py index 663d7845a8..06e5540674 100644 --- a/nibabel/tests/nibabel_data.py +++ b/nibabel/tests/nibabel_data.py @@ -1,4 +1,4 @@ -""" Functions / decorators for finding / requiring nibabel-data directory +"""Functions / decorators for finding / requiring nibabel-data directory """ from os import environ, listdir @@ -8,7 +8,7 @@ def get_nibabel_data(): - """ Return path to nibabel-data or empty string if missing + """Return path to nibabel-data or empty string if missing First use ``NIBABEL_DATA_DIR`` environment variable. @@ -24,7 +24,7 @@ def get_nibabel_data(): def needs_nibabel_data(subdir=None): - """ Decorator for tests needing nibabel-data + """Decorator for tests needing nibabel-data Parameters ---------- @@ -39,11 +39,10 @@ def needs_nibabel_data(subdir=None): """ nibabel_data = get_nibabel_data() if nibabel_data == '': - return unittest.skip("Need nibabel-data directory for this test") + return unittest.skip('Need nibabel-data directory for this test') if subdir is None: return lambda x: x required_path = pjoin(nibabel_data, subdir) # Path should not be empty (as is the case for not-updated submodules) have_files = exists(required_path) and len(listdir(required_path)) > 0 - return unittest.skipUnless(have_files, - f"Need files in {required_path} for these tests") + return unittest.skipUnless(have_files, f'Need files in {required_path} for these tests') diff --git a/nibabel/tests/scriptrunner.py b/nibabel/tests/scriptrunner.py index 1bffd01929..474eeceb2c 100644 --- a/nibabel/tests/scriptrunner.py +++ b/nibabel/tests/scriptrunner.py @@ -1,4 +1,4 @@ -""" Module to help tests check script output +"""Module to help tests check script output Provides class to be instantiated in tests that check scripts. Usually works something like this in a test module:: @@ -14,7 +14,7 @@ """ import sys import os -from os.path import (dirname, join as pjoin, isfile, isdir, realpath, pathsep) +from os.path import dirname, join as pjoin, isfile, isdir, realpath, pathsep from subprocess import Popen, PIPE @@ -23,8 +23,7 @@ def local_script_dir(script_sdir): - """ Get local script directory if running in development dir, else None - """ + """Get local script directory if running in development dir, else None""" # Check for presence of scripts in development directory. ``realpath`` # allows for the situation where the development directory has been linked # into the path. @@ -37,8 +36,7 @@ def local_script_dir(script_sdir): def local_module_dir(module_name): - """ Get local module directory if running in development dir, else None - """ + """Get local module directory if running in development dir, else None""" mod = __import__(module_name) containing_path = dirname(dirname(realpath(mod.__file__))) if containing_path == realpath(os.getcwd()): @@ -47,19 +45,20 @@ def local_module_dir(module_name): class ScriptRunner: - """ Class to run scripts and return output + """Class to run scripts and return output Finds local scripts and local modules if running in the development directory, otherwise finds system scripts and modules. """ - def __init__(self, - script_sdir='scripts', - module_sdir=MY_PACKAGE, - debug_print_var=None, - output_processor=lambda x: x - ): - """ Init ScriptRunner instance + def __init__( + self, + script_sdir='scripts', + module_sdir=MY_PACKAGE, + debug_print_var=None, + output_processor=lambda x: x, + ): + """Init ScriptRunner instance Parameters ---------- @@ -85,7 +84,7 @@ def __init__(self, self.output_processor = output_processor def run_command(self, cmd, check_code=True): - """ Run command sequence `cmd` returning exit code, stdout, stderr + """Run command sequence `cmd` returning exit code, stdout, stderr Parameters ---------- @@ -113,8 +112,7 @@ def run_command(self, cmd, check_code=True): # Unix, we might have the wrong incantation for the Python interpreter # in the hash bang first line in the source file. So, either way, run # the script through the Python interpreter - cmd = [sys.executable, - pjoin(self.local_script_dir, cmd[0])] + cmd[1:] + cmd = [sys.executable, pjoin(self.local_script_dir, cmd[0])] + cmd[1:] if os.name == 'nt': # Quote any arguments with spaces. The quotes delimit the arguments # on Windows, and the arguments might be file paths with spaces. @@ -146,6 +144,7 @@ def run_command(self, cmd, check_code=True): stderr ------ {stderr} - """) + """ + ) opp = self.output_processor return proc.returncode, opp(stdout), opp(stderr) diff --git a/nibabel/tests/test_affines.py b/nibabel/tests/test_affines.py index 0209ff3e69..08166df6e8 100644 --- a/nibabel/tests/test_affines.py +++ b/nibabel/tests/test_affines.py @@ -6,14 +6,22 @@ import numpy as np from ..eulerangles import euler2mat -from ..affines import (AffineError, apply_affine, append_diag, to_matvec, - from_matvec, dot_reduce, voxel_sizes, obliquity, rescale_affine) +from ..affines import ( + AffineError, + apply_affine, + append_diag, + to_matvec, + from_matvec, + dot_reduce, + voxel_sizes, + obliquity, + rescale_affine, +) from ..orientations import aff2axcodes import pytest -from numpy.testing import assert_array_equal, assert_almost_equal, \ - assert_array_almost_equal +from numpy.testing import assert_array_equal, assert_almost_equal, assert_array_almost_equal def validated_apply_affine(T, xyz): @@ -33,11 +41,9 @@ def test_apply_affine(): rng = np.random.RandomState(20110903) aff = np.diag([2, 3, 4, 1]) pts = rng.uniform(size=(4, 3)) - assert_array_equal(apply_affine(aff, pts), - pts * [[2, 3, 4]]) + assert_array_equal(apply_affine(aff, pts), pts * [[2, 3, 4]]) aff[:3, 3] = [10, 11, 12] - assert_array_equal(apply_affine(aff, pts), - pts * [[2, 3, 4]] + [[10, 11, 12]]) + assert_array_equal(apply_affine(aff, pts), pts * [[2, 3, 4]] + [[10, 11, 12]]) aff[:3, :] = rng.normal(size=(3, 4)) exp_res = np.concatenate((pts.T, np.ones((1, 4))), axis=0) exp_res = np.dot(aff, exp_res)[:3, :].T @@ -103,35 +109,29 @@ def test_matrix_vector(): def test_append_diag(): # Routine for appending diagonal elements - assert_array_equal(append_diag(np.diag([2, 3, 1]), [1]), - np.diag([2, 3, 1, 1])) - assert_array_equal(append_diag(np.diag([2, 3, 1]), [1, 1]), - np.diag([2, 3, 1, 1, 1])) - aff = np.array([[2, 0, 0], - [0, 3, 0], - [0, 0, 1], - [0, 0, 1]]) - assert_array_equal(append_diag(aff, [5], [9]), - [[2, 0, 0, 0], - [0, 3, 0, 0], - [0, 0, 0, 1], - [0, 0, 5, 9], - [0, 0, 0, 1]]) - assert_array_equal(append_diag(aff, [5, 6], [9, 10]), - [[2, 0, 0, 0, 0], - [0, 3, 0, 0, 0], - [0, 0, 0, 0, 1], - [0, 0, 5, 0, 9], - [0, 0, 0, 6, 10], - [0, 0, 0, 0, 1]]) - aff = np.array([[2, 0, 0, 0], - [0, 3, 0, 0], - [0, 0, 0, 1]]) - assert_array_equal(append_diag(aff, [5], [9]), - [[2, 0, 0, 0, 0], - [0, 3, 0, 0, 0], - [0, 0, 0, 5, 9], - [0, 0, 0, 0, 1]]) + assert_array_equal(append_diag(np.diag([2, 3, 1]), [1]), np.diag([2, 3, 1, 1])) + assert_array_equal(append_diag(np.diag([2, 3, 1]), [1, 1]), np.diag([2, 3, 1, 1, 1])) + aff = np.array([[2, 0, 0], [0, 3, 0], [0, 0, 1], [0, 0, 1]]) + assert_array_equal( + append_diag(aff, [5], [9]), + [[2, 0, 0, 0], [0, 3, 0, 0], [0, 0, 0, 1], [0, 0, 5, 9], [0, 0, 0, 1]], + ) + assert_array_equal( + append_diag(aff, [5, 6], [9, 10]), + [ + [2, 0, 0, 0, 0], + [0, 3, 0, 0, 0], + [0, 0, 0, 0, 1], + [0, 0, 5, 0, 9], + [0, 0, 0, 6, 10], + [0, 0, 0, 0, 1], + ], + ) + aff = np.array([[2, 0, 0, 0], [0, 3, 0, 0], [0, 0, 0, 1]]) + assert_array_equal( + append_diag(aff, [5], [9]), + [[2, 0, 0, 0, 0], [0, 3, 0, 0, 0], [0, 0, 0, 5, 9], [0, 0, 0, 0, 1]], + ) # Length of starts has to match length of steps with pytest.raises(AffineError): append_diag(aff, [5, 6], [9]) @@ -152,10 +152,15 @@ def test_dot_reduce(): assert_array_equal(dot_reduce(vec, mat), np.dot(vec, mat)) assert_array_equal(dot_reduce(mat, vec), np.dot(mat, vec)) mat2 = np.arange(13, 22).reshape((3, 3)) - assert_array_equal(dot_reduce(mat2, vec, mat), - np.dot(mat2, np.dot(vec, mat))) - assert_array_equal(dot_reduce(mat, vec, mat2, ), - np.dot(mat, np.dot(vec, mat2))) + assert_array_equal(dot_reduce(mat2, vec, mat), np.dot(mat2, np.dot(vec, mat))) + assert_array_equal( + dot_reduce( + mat, + vec, + mat2, + ), + np.dot(mat, np.dot(vec, mat2)), + ) def test_voxel_sizes(): @@ -177,8 +182,7 @@ def test_voxel_sizes(): new_row = np.vstack((np.zeros(n + 1), aff)) assert_almost_equal(voxel_sizes(new_row), vox_sizes) new_col = np.c_[np.zeros(n + 1), aff] - assert_almost_equal(voxel_sizes(new_col), - [0] + list(vox_sizes)) + assert_almost_equal(voxel_sizes(new_col), [0] + list(vox_sizes)) if n < 3: continue # Rotations do not change the voxel size @@ -192,13 +196,13 @@ def test_voxel_sizes(): def test_obliquity(): """Check the calculation of inclination of an affine axes.""" from math import pi + aligned = np.diag([2.0, 2.0, 2.3, 1.0]) aligned[:-1, -1] = [-10, -10, -7] R = from_matvec(euler2mat(x=0.09, y=0.001, z=0.001), [0.0, 0.0, 0.0]) oblique = R.dot(aligned) assert_almost_equal(obliquity(aligned), [0.0, 0.0, 0.0]) - assert_almost_equal(obliquity(oblique) * 180 / pi, - [0.0810285, 5.1569949, 5.1569376]) + assert_almost_equal(obliquity(oblique) * 180 / pi, [0.0810285, 5.1569949, 5.1569376]) def test_rescale_affine(): diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 7f32e2d8a7..2cea69413f 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test Analyze headers +"""Test Analyze headers See test_wrapstruct.py for tests of the wrapped structarr-ness of the Analyze header @@ -21,8 +21,7 @@ import numpy as np from io import BytesIO, StringIO -from ..spatialimages import (HeaderDataError, HeaderTypeError, - supported_np_types) +from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types from ..analyze import AnalyzeHeader, AnalyzeImage from ..nifti1 import Nifti1Header from ..loadsave import read_img_data @@ -33,15 +32,20 @@ from ..optpkg import optional_package import pytest -from numpy.testing import (assert_array_equal, assert_array_almost_equal) +from numpy.testing import assert_array_equal, assert_array_almost_equal -from ..testing import (data_path, suppress_warnings, assert_dt_equal, - bytesio_filemap, bytesio_round_trip) +from ..testing import ( + data_path, + suppress_warnings, + assert_dt_equal, + bytesio_filemap, + bytesio_round_trip, +) from . import test_wrapstruct as tws from . import test_spatialimages as tsi -HAVE_ZSTD = optional_package("pyzstd")[1] +HAVE_ZSTD = optional_package('pyzstd')[1] header_file = os.path.join(data_path, 'analyze.hdr') @@ -60,12 +64,7 @@ class TestAnalyzeHeader(tws._TestLabeledWrapStruct): header_class = AnalyzeHeader example_file = header_file sizeof_hdr = AnalyzeHeader.sizeof_hdr - supported_np_types = set((np.uint8, - np.int16, - np.int32, - np.float32, - np.float64, - np.complex64)) + supported_np_types = set((np.uint8, np.int16, np.int32, np.float32, np.float64, np.complex64)) add_intp(supported_np_types) def test_supported_types(self): @@ -88,8 +87,7 @@ def test_general_init(self): # is True (which it is by default). We have to be careful of the # translations though - these arise from SPM's use of the origin # field, and the center of the image. - assert_array_equal(np.diag(hdr.get_base_affine()), - [-1, 1, 1, 1]) + assert_array_equal(np.diag(hdr.get_base_affine()), [-1, 1, 1, 1]) # But zooms only go with number of dimensions assert hdr.get_zooms() == (1.0,) @@ -144,8 +142,10 @@ def test_log_checks(self): fhdr, message, raiser = self.log_chk(hdr, 30) assert fhdr['sizeof_hdr'] == self.sizeof_hdr - assert (message == f'sizeof_hdr should be {self.sizeof_hdr}; ' - f'set sizeof_hdr to {self.sizeof_hdr}') + assert ( + message == f'sizeof_hdr should be {self.sizeof_hdr}; ' + f'set sizeof_hdr to {self.sizeof_hdr}' + ) pytest.raises(*raiser) # RGB datatype does not raise error hdr = HC() @@ -195,8 +195,10 @@ def test_pixdim_log_checks(self): fhdr, message, raiser = self.log_chk(hdr, 35) assert fhdr['pixdim'][1] == 1 assert fhdr['pixdim'][2] == 2 - assert message == ('pixdim[1,2,3] should be non-zero and pixdim[1,2,3] should be ' - 'positive; setting 0 dims to 1 and setting to abs of pixdim values') + assert message == ( + 'pixdim[1,2,3] should be non-zero and pixdim[1,2,3] should be ' + 'positive; setting 0 dims to 1 and setting to abs of pixdim values' + ) pytest.raises(*raiser) def test_no_scaling_fixes(self): @@ -238,8 +240,9 @@ def test_logger_error(self): # Check log message appears in new logger imageglobals.logger = logger hdr.copy().check_fix() - assert str_io.getvalue() == ('bitpix does not match datatype; ' - 'setting bitpix to match datatype\n') + assert str_io.getvalue() == ( + 'bitpix does not match datatype; ' 'setting bitpix to match datatype\n' + ) # Check that error_level in fact causes error to be raised imageglobals.error_level = 10 with pytest.raises(HeaderDataError): @@ -250,15 +253,15 @@ def test_logger_error(self): def test_data_dtype(self): # check getting and setting of data type # codes / types supported by all binary headers - all_supported_types = ((2, np.uint8), - (4, np.int16), - (8, np.int32), - (16, np.float32), - (32, np.complex64), - (64, np.float64), - (128, np.dtype([('R', 'u1'), - ('G', 'u1'), - ('B', 'u1')]))) + all_supported_types = ( + (2, np.uint8), + (4, np.int16), + (8, np.int32), + (16, np.float32), + (32, np.complex64), + (64, np.float64), + (128, np.dtype([('R', 'u1'), ('G', 'u1'), ('B', 'u1')])), + ) # and unsupported - here using some labels instead all_unsupported_types = (np.void, 'none', 'all', 0) @@ -266,6 +269,7 @@ def assert_set_dtype(dt_spec, np_dtype): hdr = self.header_class() hdr.set_data_dtype(dt_spec) assert_dt_equal(hdr.get_data_dtype(), np_dtype) + # Test code, type known to be supported by all types for code, npt in all_supported_types: # Can set with code value @@ -461,23 +465,19 @@ def test_data_shape_zooms_affine(self): hdr.set_data_shape((1, 2, 3)) assert_array_equal(hdr.get_zooms(), (4, 5, 1)) # Setting zooms changes affine - assert_array_equal(np.diag(hdr.get_base_affine()), - [-4, 5, 1, 1]) + assert_array_equal(np.diag(hdr.get_base_affine()), [-4, 5, 1, 1]) hdr.set_zooms((1, 1, 1)) - assert_array_equal(np.diag(hdr.get_base_affine()), - [-1, 1, 1, 1]) + assert_array_equal(np.diag(hdr.get_base_affine()), [-1, 1, 1, 1]) def test_default_x_flip(self): hdr = self.header_class() hdr.default_x_flip = True hdr.set_data_shape((1, 2, 3)) hdr.set_zooms((1, 1, 1)) - assert_array_equal(np.diag(hdr.get_base_affine()), - [-1, 1, 1, 1]) + assert_array_equal(np.diag(hdr.get_base_affine()), [-1, 1, 1, 1]) hdr.default_x_flip = False # Check avoids translations - assert_array_equal(np.diag(hdr.get_base_affine()), - [1, 1, 1, 1]) + assert_array_equal(np.diag(hdr.get_base_affine()), [1, 1, 1, 1]) def test_from_eg_file(self): fileobj = open(self.example_file, 'rb') @@ -524,12 +524,15 @@ def test_from_header(self): assert hdr is not copy class C: + def get_data_dtype(self): + return np.dtype('i2') - def get_data_dtype(self): return np.dtype('i2') + def get_data_shape(self): + return (5, 4, 3) - def get_data_shape(self): return (5, 4, 3) + def get_zooms(self): + return (10.0, 9.0, 8.0) - def get_zooms(self): return (10.0, 9.0, 8.0) converted = klass.from_header(C()) assert isinstance(converted, klass) assert converted.get_data_dtype() == np.dtype('i2') @@ -544,24 +547,33 @@ def test_base_affine(self): assert hdr.default_x_flip assert_array_almost_equal( hdr.get_base_affine(), - [[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -3.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -3.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) hdr.set_data_shape((3, 5)) assert_array_almost_equal( hdr.get_base_affine(), - [[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -0.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -0.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) hdr.set_data_shape((3, 5, 7)) assert_array_almost_equal( hdr.get_base_affine(), - [[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -3.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -3.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) def test_scaling(self): # Test integer scaling from float @@ -598,15 +610,17 @@ def test_scaling(self): def test_slope_inter(self): hdr = self.header_class() assert hdr.get_slope_inter() == (None, None) - for slinter in ((None,), - (None, None), - (np.nan, np.nan), - (np.nan, None), - (None, np.nan), - (1.0,), - (1.0, None), - (None, 0), - (1.0, 0)): + for slinter in ( + (None,), + (None, None), + (np.nan, np.nan), + (np.nan, None), + (None, np.nan), + (1.0,), + (1.0, None), + (None, 0), + (1.0, 0), + ): hdr.set_slope_inter(*slinter) assert hdr.get_slope_inter() == (None, None) with pytest.raises(HeaderTypeError): @@ -621,27 +635,28 @@ def test_from_analyze_map(self): class H1: pass + with pytest.raises(AttributeError): klass.from_header(H1()) class H2: - def get_data_dtype(self): return np.dtype('u1') + with pytest.raises(AttributeError): klass.from_header(H2()) class H3(H2): - def get_data_shape(self): return (2, 3, 4) + with pytest.raises(AttributeError): klass.from_header(H3()) class H4(H3): - def get_zooms(self): - return 4., 5., 6. + return 4.0, 5.0, 6.0 + exp_hdr = klass() exp_hdr.set_data_dtype(np.dtype('u1')) exp_hdr.set_data_shape((2, 3, 4)) @@ -650,30 +665,29 @@ def get_zooms(self): # cal_max, cal_min get properly set from ``as_analyze_map`` class H5(H4): - def as_analyze_map(self): return dict(cal_min=-100, cal_max=100) + exp_hdr['cal_min'] = -100 exp_hdr['cal_max'] = 100 assert klass.from_header(H5()) == exp_hdr # set_* methods override fields from header class H6(H5): - def as_analyze_map(self): - return dict(datatype=4, bitpix=32, - cal_min=-100, cal_max=100) + return dict(datatype=4, bitpix=32, cal_min=-100, cal_max=100) + assert klass.from_header(H6()) == exp_hdr # Any mapping will do, including a Nifti header class H7(H5): - def as_analyze_map(self): n_hdr = Nifti1Header() n_hdr.set_data_dtype(np.dtype('i2')) n_hdr['cal_min'] = -100 n_hdr['cal_max'] = 100 return n_hdr + # Values from methods still override values from header (shape, dtype, # zooms still at defaults from n_hdr header fields above) assert klass.from_header(H7()) == exp_hdr diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index 54c1c0fd95..2382847da4 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -1,16 +1,17 @@ -""" Metaclass and class for validating instance APIs +"""Metaclass and class for validating instance APIs """ import os import pytest class validator2test(type): - """ Wrap ``validator_*`` methods with test method testing instances + """Wrap ``validator_*`` methods with test method testing instances * Find methods with names starting with 'validate_' * Create test method with same name * Test method iterates, running validate method over all obj, param pairs """ + def __new__(mcs, name, bases, dict): klass = type.__new__(mcs, name, bases, dict) @@ -18,11 +19,13 @@ def make_test(name, validator): def meth(self): for imaker, params in self.obj_params(): validator(self, imaker, params) - meth.__name__ = 'test_' + name[len('validate_'):] + + meth.__name__ = 'test_' + name[len('validate_') :] meth.__doc__ = f'autogenerated test from {klass.__name__}.{name}' if hasattr(validator, 'pytestmark'): meth.pytestmark = validator.pytestmark return meth + for name in dir(klass): if not name.startswith('validate_'): continue @@ -33,7 +36,7 @@ def meth(self): class ValidateAPI(metaclass=validator2test): - """ A class to validate APIs + """A class to validate APIs Your job is twofold: @@ -53,10 +56,10 @@ class ValidateAPI(metaclass=validator2test): class TestValidateSomething(ValidateAPI): - """ Example implementing an API validator test class """ + """Example implementing an API validator test class""" def obj_params(self): - """ Iterator returning (obj, params) pairs + """Iterator returning (obj, params) pairs ``obj`` is some instance for which we want to check the API. @@ -64,8 +67,8 @@ def obj_params(self): against ``obj``. See the :meth:`validate_something` method for an example. """ - class C: + class C: def __init__(self, var): self.var = var @@ -76,7 +79,7 @@ def get_var(self): yield C('easypeasy'), {'var': 'easypeasy'} def validate_something(self, obj, params): - """ Do some checks of the `obj` API against `params` + """Do some checks of the `obj` API against `params` The metaclass sets up a ``test_something`` function that runs these checks on each ( @@ -86,14 +89,15 @@ def validate_something(self, obj, params): @pytest.mark.xfail( - os.getenv("PYTEST_XDIST_WORKER") is not None, - reason="Execution in the same scope cannot be guaranteed" + os.getenv('PYTEST_XDIST_WORKER') is not None, + reason='Execution in the same scope cannot be guaranteed', ) class TestRunAllTests(ValidateAPI): - """ Class to test that each validator test gets run + """Class to test that each validator test gets run We check this in the module teardown function """ + run_tests = [] def obj_params(self): diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index eb296b516f..e4d16e7dd8 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for arrayproxy module +"""Tests for arrayproxy module """ import warnings @@ -21,7 +21,7 @@ import numpy as np from .. import __version__ -from ..arrayproxy import (ArrayProxy, is_proxy, reshape_dataobj, get_obj_dtype) +from ..arrayproxy import ArrayProxy, is_proxy, reshape_dataobj, get_obj_dtype from ..openers import ImageOpener from ..nifti1 import Nifti1Header from ..deprecator import ExpiredDeprecationError @@ -37,7 +37,6 @@ class FunkyHeader: - def __init__(self, shape): self.shape = shape @@ -114,18 +113,14 @@ def test_tuplespec(): bio.write(arr.tobytes(order='F')) # Create equivalent header and tuple specs hdr = FunkyHeader(shape) - tuple_spec = (hdr.get_data_shape(), hdr.get_data_dtype(), - hdr.get_data_offset(), 1., 0.) + tuple_spec = (hdr.get_data_shape(), hdr.get_data_dtype(), hdr.get_data_offset(), 1.0, 0.0) ap_header = ArrayProxy(bio, hdr) ap_tuple = ArrayProxy(bio, tuple_spec) # Header and tuple specs produce identical behavior for prop in ('shape', 'dtype', 'offset', 'slope', 'inter', 'is_proxy'): assert getattr(ap_header, prop) == getattr(ap_tuple, prop) - for method, args in (('get_unscaled', ()), ('__array__', ()), - ('__getitem__', ((0, 2, 1), )) - ): - assert_array_equal(getattr(ap_header, method)(*args), - getattr(ap_tuple, method)(*args)) + for method, args in (('get_unscaled', ()), ('__array__', ()), ('__getitem__', ((0, 2, 1),))): + assert_array_equal(getattr(ap_header, method)(*args), getattr(ap_tuple, method)(*args)) # Partial tuples of length 2-4 are also valid for n in range(2, 5): ArrayProxy(bio, tuple_spec[:n]) @@ -167,8 +162,8 @@ def test_nifti1_init(): assert_array_equal(np.asarray(ap), arr * 2.0 + 10) -@pytest.mark.parametrize("n_dim", (1, 2, 3)) -@pytest.mark.parametrize("offset", (0, 20)) +@pytest.mark.parametrize('n_dim', (1, 2, 3)) +@pytest.mark.parametrize('offset', (0, 20)) def test_proxy_slicing(n_dim, offset): shape = (15, 16, 17)[:n_dim] arr = np.arange(np.prod(shape)).reshape(shape) @@ -203,7 +198,7 @@ def test_proxy_slicing_with_scaling(): assert_array_equal(arr[sliceobj] * 2.0 + 1.0, prox[sliceobj]) -@pytest.mark.parametrize("order", ("C", "F")) +@pytest.mark.parametrize('order', ('C', 'F')) def test_order_override(order): shape = (15, 16, 17) arr = np.arange(np.prod(shape)).reshape(shape) @@ -260,6 +255,7 @@ def test_is_proxy(): class NP: is_proxy = False + assert not is_proxy(NP()) @@ -272,21 +268,17 @@ def test_reshape_dataobj(): arr = np.arange(np.prod(shape), dtype=prox.dtype).reshape(shape) bio.write(b'\x00' * prox.offset + arr.tobytes(order='F')) assert_array_equal(prox, arr) - assert_array_equal(reshape_dataobj(prox, (2, 3, 4)), - np.reshape(arr, (2, 3, 4))) + assert_array_equal(reshape_dataobj(prox, (2, 3, 4)), np.reshape(arr, (2, 3, 4))) assert prox.shape == shape assert arr.shape == shape - assert_array_equal(reshape_dataobj(arr, (2, 3, 4)), - np.reshape(arr, (2, 3, 4))) + assert_array_equal(reshape_dataobj(arr, (2, 3, 4)), np.reshape(arr, (2, 3, 4))) assert arr.shape == shape class ArrGiver: - def __array__(self): return arr - assert_array_equal(reshape_dataobj(ArrGiver(), (2, 3, 4)), - np.reshape(arr, (2, 3, 4))) + assert_array_equal(reshape_dataobj(ArrGiver(), (2, 3, 4)), np.reshape(arr, (2, 3, 4))) assert arr.shape == shape @@ -326,15 +318,16 @@ def test_get_obj_dtype(): class ArrGiver: def __array__(self): return arr + assert get_obj_dtype(ArrGiver()) == np.dtype('int16') def test_get_unscaled(): # Test fetch of raw array class FunkyHeader2(FunkyHeader): - def get_slope_inter(self): return 2.1, 3.14 + shape = (2, 3, 4) hdr = FunkyHeader2(shape) bio = BytesIO() @@ -354,10 +347,8 @@ def test_mmap(): check_mmap(hdr, hdr.get_data_offset(), ArrayProxy) -def check_mmap(hdr, offset, proxy_class, - has_scaling=False, - unscaled_is_view=True): - """ Assert that array proxies return memory maps as expected +def check_mmap(hdr, offset, proxy_class, has_scaling=False, unscaled_is_view=True): + """Assert that array proxies return memory maps as expected Parameters ---------- @@ -392,14 +383,15 @@ def check_mmap(hdr, offset, proxy_class, fobj.write(b' ' * offset) fobj.write(arr.tobytes(order='F')) for mmap, expected_mode in ( - # mmap value, expected memmap mode - # mmap=None -> no mmap value - # expected mode=None -> no memmap returned - (None, 'c'), - (True, 'c'), - ('c', 'c'), - ('r', 'r'), - (False, None)): + # mmap value, expected memmap mode + # mmap=None -> no mmap value + # expected mode=None -> no memmap returned + (None, 'c'), + (True, 'c'), + ('c', 'c'), + ('r', 'r'), + (False, None), + ): kwargs = {} if mmap is not None: kwargs['mmap'] = mmap @@ -407,7 +399,7 @@ def check_mmap(hdr, offset, proxy_class, unscaled = prox.get_unscaled() back_data = np.asanyarray(prox) unscaled_is_mmap = isinstance(unscaled, np.memmap) - back_is_mmap = isinstance(back_data, np.memmap) + back_is_mmap = isinstance(back_data, np.memmap) if expected_mode is None: assert not unscaled_is_mmap assert not back_is_mmap @@ -431,6 +423,7 @@ def check_mmap(hdr, offset, proxy_class, # created class CountingImageOpener(ImageOpener): num_openers = 0 + def __init__(self, *args, **kwargs): super(CountingImageOpener, self).__init__(*args, **kwargs) CountingImageOpener.num_openers += 1 @@ -472,32 +465,32 @@ def test_keep_file_open_true_false_invalid(): # - expected value for internal ArrayProxy._keep_file_open flag tests = [ # open file handle - kfo and have_igzip are both irrelevant - ('open', False, False, False, False), - ('open', False, True, False, False), - ('open', True, False, False, False), - ('open', True, True, False, False), + ('open', False, False, False, False), + ('open', False, True, False, False), + ('open', True, False, False, False), + ('open', True, True, False, False), # non-gzip file - have_igzip is irrelevant, decision should be made # solely from kfo flag - ('bin', False, False, False, False), - ('bin', False, True, False, False), - ('bin', True, False, True, True), - ('bin', True, True, True, True), + ('bin', False, False, False, False), + ('bin', False, True, False, False), + ('bin', True, False, True, True), + ('bin', True, True, True, True), # gzip file. If igzip is present, we persist the ImageOpener. - ('gz', False, False, False, False), - ('gz', False, True, True, False), - ('gz', True, False, True, True), - ('gz', True, True, True, True), - ] + ('gz', False, False, False, False), + ('gz', False, True, True, False), + ('gz', True, False, True, True), + ('gz', True, True, True, True), + ] dtype = np.float32 - data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) + data = np.arange(1000, dtype=dtype).reshape((10, 10, 10)) voxels = np.random.randint(0, 10, (10, 3)) for test in tests: filetype, kfo, have_igzip, exp_persist, exp_kfo = test - with InTemporaryDirectory(), \ - mock.patch('nibabel.openers.ImageOpener', CountingImageOpener), \ - patch_indexed_gzip(have_igzip): + with InTemporaryDirectory(), mock.patch( + 'nibabel.openers.ImageOpener', CountingImageOpener + ), patch_indexed_gzip(have_igzip): fname = f'testdata.{filetype}' # create the test data file if filetype == 'gz': @@ -516,8 +509,7 @@ def test_keep_file_open_true_false_invalid(): fobj1 = fname fobj2 = fname try: - proxy = ArrayProxy(fobj1, ((10, 10, 10), dtype), - keep_file_open=kfo) + proxy = ArrayProxy(fobj1, ((10, 10, 10), dtype), keep_file_open=kfo) # We also test that we get the same behaviour when the # KEEP_FILE_OPEN_DEFAULT flag is changed with patch_keep_file_open_default(kfo): @@ -560,8 +552,7 @@ def test_keep_file_open_true_false_invalid(): for invalid_kfo in (55, 'auto', 'cauto'): with pytest.raises(ValueError): - ArrayProxy(fname, ((10, 10, 10), dtype), - keep_file_open=invalid_kfo) + ArrayProxy(fname, ((10, 10, 10), dtype), keep_file_open=invalid_kfo) with patch_keep_file_open_default(invalid_kfo): with pytest.raises(ValueError): ArrayProxy(fname, ((10, 10, 10), dtype)) diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index de55cd334b..1fbaa38916 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -1,4 +1,4 @@ -""" Testing array writer objects +"""Testing array writer objects See docstring of :mod:`nibabel.arraywriters` for API. """ @@ -8,9 +8,15 @@ import numpy as np from io import BytesIO -from ..arraywriters import (SlopeInterArrayWriter, SlopeArrayWriter, - WriterError, ScalingError, ArrayWriter, - make_array_writer, get_slope_inter) +from ..arraywriters import ( + SlopeInterArrayWriter, + SlopeArrayWriter, + WriterError, + ScalingError, + ArrayWriter, + make_array_writer, + get_slope_inter, +) from ..casting import int_abs, type_info, shared_range, on_powerpc from ..volumeutils import array_from_file, apply_read_scaling, _dt_min_max @@ -101,13 +107,11 @@ def test_arraywriter_check_scaling(): def test_no_scaling(): # Test arraywriter when writing different types without scaling for in_dtype, out_dtype, awt in itertools.product( - NUMERIC_TYPES, - NUMERIC_TYPES, - (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter)): + NUMERIC_TYPES, NUMERIC_TYPES, (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter) + ): mn_in, mx_in = _dt_min_max(in_dtype) arr = np.array([mn_in, 0, 1, mx_in], dtype=in_dtype) - kwargs = (dict(check_scaling=False) if awt == ArrayWriter - else dict(calc_scale=False)) + kwargs = dict(check_scaling=False) if awt == ArrayWriter else dict(calc_scale=False) aw = awt(arr, out_dtype, **kwargs) with suppress_warnings(): back_arr = round_trip(aw) @@ -127,8 +131,7 @@ def test_no_scaling(): exp_back = np.clip(exp_back, 0, 1) else: # Clip to shared range of working precision - exp_back = np.clip(exp_back, - *shared_range(float, out_dtype)) + exp_back = np.clip(exp_back, *shared_range(float, out_dtype)) else: # iu input and output type # No scaling, never gets converted to float. # Does get clipped to range of output type @@ -136,9 +139,7 @@ def test_no_scaling(): if (mn_in, mx_in) != (mn_out, mx_out): # Use smaller of input, output range to avoid np.clip # upcasting the array because of large clip limits. - exp_back = np.clip(exp_back, - max(mn_in, mn_out), - min(mx_in, mx_out)) + exp_back = np.clip(exp_back, max(mn_in, mn_out), min(mx_in, mx_out)) elif in_dtype in COMPLEX_TYPES: # always cast to real from complex with suppress_warnings(): @@ -244,16 +245,14 @@ def test_special_rt(): ArrayWriter(in_arr, out_dtt) aw = ArrayWriter(in_arr, out_dtt, check_scaling=False) mn, mx = shared_range(float, out_dtt) - assert np.allclose(round_trip(aw).astype(float), - [mx, 0, mn]) + assert np.allclose(round_trip(aw).astype(float), [mx, 0, mn]) for klass in (SlopeArrayWriter, SlopeInterArrayWriter): aw = klass(in_arr, out_dtt) assert get_slope_inter(aw) == (1, 0) assert_array_equal(round_trip(aw), 0) for in_dtt, out_dtt, awt in itertools.product( - FLOAT_TYPES, - IUINT_TYPES, - (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter)): + FLOAT_TYPES, IUINT_TYPES, (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter) + ): arr = np.zeros((3,), dtype=in_dtt) aw = awt(arr, out_dtt) assert get_slope_inter(aw) == (1, 0) @@ -365,8 +364,10 @@ def test_calculate_scale(): def test_resets(): # Test reset of values, caching of scales - for klass, inp, outp in ((SlopeInterArrayWriter, (1, 511), (2.0, 1.0)), - (SlopeArrayWriter, (0, 510), (2.0, 0.0))): + for klass, inp, outp in ( + (SlopeInterArrayWriter, (1, 511), (2.0, 1.0)), + (SlopeArrayWriter, (0, 510), (2.0, 0.0)), + ): arr = np.array(inp) outp = np.array(outp) aw = klass(arr, np.uint8) @@ -390,13 +391,15 @@ def test_no_offset_scale(): # Specific tests of no-offset scaling SAW = SlopeArrayWriter # Floating point - for data in ((-128, 127), - (-128, 126), - (-128, -127), - (-128, 0), - (-128, -1), - (126, 127), - (-127, 127)): + for data in ( + (-128, 127), + (-128, 126), + (-128, -127), + (-128, 0), + (-128, -1), + (126, 127), + (-127, 127), + ): aw = SAW(np.array(data, dtype=np.float32), np.int8) assert aw.slope == 1.0 aw = SAW(np.array([-126, 127 * 2.0], dtype=np.float32), np.int8) @@ -404,7 +407,7 @@ def test_no_offset_scale(): aw = SAW(np.array([-128 * 2.0, 127], dtype=np.float32), np.int8) assert aw.slope == 2 # Test that nasty abs behavior does not upset us - n = -2**15 + n = -(2**15) aw = SAW(np.array([n, n], dtype=np.int16), np.uint8) assert_array_almost_equal(aw.slope, n / 255.0, 5) @@ -431,22 +434,17 @@ def test_io_scaling(): # and from float to integer. bio = BytesIO() for in_type, out_type in itertools.product( - (np.int16, np.uint16, np.float32), - (np.int8, np.uint8, np.int16, np.uint16)): + (np.int16, np.uint16, np.float32), (np.int8, np.uint8, np.int16, np.uint16) + ): out_dtype = np.dtype(out_type) info = type_info(in_type) imin, imax = info['min'], info['max'] if imin == 0: # unsigned int - val_tuples = ((0, imax), - (100, imax)) + val_tuples = ((0, imax), (100, imax)) else: - val_tuples = ((imin, 0, imax), - (imin, 0), - (0, imax), - (imin, 100, imax)) + val_tuples = ((imin, 0, imax), (imin, 0), (0, imax), (imin, 100, imax)) if imin != 0: - val_tuples += ((imin, 0), - (0, imax)) + val_tuples += ((imin, 0), (0, imax)) for vals in val_tuples: arr = np.array(vals, dtype=in_type) aw = SlopeInterArrayWriter(arr, out_dtype) @@ -455,7 +453,7 @@ def test_io_scaling(): arr3 = apply_read_scaling(arr2, aw.slope, aw.inter) # Max rounding error for integer type # Slope might be negative - max_miss = np.abs(aw.slope) / 2. + max_miss = np.abs(aw.slope) / 2.0 abs_err = np.abs(arr - arr3) assert np.all(abs_err <= max_miss) if out_type in UINT_TYPES and 0 in (min(arr), max(arr)): @@ -471,16 +469,14 @@ def test_input_ranges(): bio = BytesIO() working_type = np.float32 work_eps = np.finfo(working_type).eps - for out_type, offset in itertools.product( - IUINT_TYPES, - range(-1000, 1000, 100)): + for out_type, offset in itertools.product(IUINT_TYPES, range(-1000, 1000, 100)): aw = SlopeInterArrayWriter(arr, out_type) aw.to_fileobj(bio) arr2 = array_from_file(arr.shape, out_type, bio) arr3 = apply_read_scaling(arr2, aw.slope, aw.inter) # Max rounding error for integer type # Slope might be negative - max_miss = np.abs(aw.slope) / working_type(2.) + work_eps * 10 + max_miss = np.abs(aw.slope) / working_type(2.0) + work_eps * 10 abs_err = np.abs(arr - arr3) max_err = np.abs(arr) * work_eps + max_miss assert np.all(abs_err <= max_err) @@ -496,10 +492,12 @@ def test_nan2zero(): # nan2zero as argument to `to_fileobj` deprecated, raises error if not the # same as input nan2zero - meaning that by default, nan2zero of False will # raise an error. - arr = np.array([np.nan, 99.], dtype=np.float32) - for awt, kwargs in ((ArrayWriter, dict(check_scaling=False)), - (SlopeArrayWriter, dict(calc_scale=False)), - (SlopeInterArrayWriter, dict(calc_scale=False))): + arr = np.array([np.nan, 99.0], dtype=np.float32) + for awt, kwargs in ( + (ArrayWriter, dict(check_scaling=False)), + (SlopeArrayWriter, dict(calc_scale=False)), + (SlopeInterArrayWriter, dict(calc_scale=False)), + ): # nan2zero default is True # nan2zero ignored for floats aw = awt(arr, np.float32, **kwargs) @@ -527,8 +525,7 @@ def test_byte_orders(): dt = np.dtype(tp) for code in '<>': ndt = dt.newbyteorder(code) - for klass in (SlopeInterArrayWriter, SlopeArrayWriter, - ArrayWriter): + for klass in (SlopeInterArrayWriter, SlopeArrayWriter, ArrayWriter): aw = klass(arr, ndt) data_back = round_trip(aw) assert_array_almost_equal(arr, data_back) @@ -568,8 +565,7 @@ def test_to_float(): arr[-1] = mx for out_type in CFLOAT_TYPES: out_info = type_info(out_type) - for klass in (SlopeInterArrayWriter, SlopeArrayWriter, - ArrayWriter): + for klass in (SlopeInterArrayWriter, SlopeArrayWriter, ArrayWriter): if in_type in COMPLEX_TYPES and out_type in FLOAT_TYPES: with pytest.raises(WriterError): klass(arr, out_type) @@ -678,8 +674,7 @@ def test_int_int_slope(): if kinds in ('ii', 'uu', 'ui'): arrs = (np.array([iinf.min, iinf.max], dtype=in_dt),) elif kinds == 'iu': - arrs = (np.array([iinf.min, 0], dtype=in_dt), - np.array([0, iinf.max], dtype=in_dt)) + arrs = (np.array([iinf.min, 0], dtype=in_dt), np.array([0, iinf.max], dtype=in_dt)) for arr in arrs: try: aw = SlopeArrayWriter(arr, out_dt) @@ -696,17 +691,14 @@ def test_int_int_slope(): def test_float_int_spread(): # Test rounding error for spread of values powers = np.arange(-10, 10, 0.5) - arr = np.concatenate((-10**powers, 10**powers)) + arr = np.concatenate((-(10**powers), 10**powers)) for in_dt in (np.float32, np.float64): arr_t = arr.astype(in_dt) for out_dt in IUINT_TYPES: aw = SlopeInterArrayWriter(arr_t, out_dt) arr_back_sc = round_trip(aw) # Get estimate for error - max_miss = rt_err_estimate(arr_t, - arr_back_sc.dtype, - aw.slope, - aw.inter) + max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, aw.slope, aw.inter) # Simulate allclose test with large atol diff = np.abs(arr_t - arr_back_sc) rdiff = diff / np.abs(arr_t) @@ -717,7 +709,7 @@ def rt_err_estimate(arr_t, out_dtype, slope, inter): # Error attributable to rounding slope = 1 if slope is None else slope inter = 1 if inter is None else inter - max_int_miss = slope / 2. + max_int_miss = slope / 2.0 # Estimate error attributable to floating point slope / inter; # Remove inter / slope, put in a float type to simulate the type # promotion for the multiplication, apply slope / inter @@ -741,10 +733,7 @@ def test_rt_bias(): arr_back_sc = round_trip(aw) bias = np.mean(arr_t - arr_back_sc) # Get estimate for error - max_miss = rt_err_estimate(arr_t, - arr_back_sc.dtype, - aw.slope, - aw.inter) + max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, aw.slope, aw.inter) # Hokey use of max_miss as a std estimate bias_thresh = np.max([max_miss / np.sqrt(count), eps]) assert np.abs(bias) < bias_thresh @@ -774,7 +763,7 @@ def test_nan2zero_scaling(): # Skip impossible combinations if in_info['min'] == 0 and sign == -1: continue - mx = min(in_info['max'], out_info['max'] * 2., 2**32) + mx = min(in_info['max'], out_info['max'] * 2.0, 2**32) vals = [np.nan] + [100, mx] nan_arr = np.array(vals, dtype=in_dt) * sign # Check that nan scales to same value as zero within same array @@ -814,16 +803,18 @@ def test_finite_range_nan(): ([[], []], (np.inf, -np.inf)), # empty array (np.array([[-3, 0, 1], [2, -1, 4]], dtype=int), (-3, 4)), (np.array([[1, 0, 1], [2, 3, 4]], dtype=np.uint), (0, 4)), - ([0., 1, 2, 3], (0, 3)), + ([0.0, 1, 2, 3], (0, 3)), # Complex comparison works as if they are floats ([[np.nan, -1 - 100j, 2], [-2, np.nan, 1 + 100j]], (-2, 2)), ([[np.nan, -1, 2 - 100j], [-2 + 100j, np.nan, 1]], (-2 + 100j, 2 - 100j)), ): - for awt, kwargs in ((ArrayWriter, dict(check_scaling=False)), - (SlopeArrayWriter, {}), - (SlopeArrayWriter, dict(calc_scale=False)), - (SlopeInterArrayWriter, {}), - (SlopeInterArrayWriter, dict(calc_scale=False))): + for awt, kwargs in ( + (ArrayWriter, dict(check_scaling=False)), + (SlopeArrayWriter, {}), + (SlopeArrayWriter, dict(calc_scale=False)), + (SlopeInterArrayWriter, {}), + (SlopeInterArrayWriter, dict(calc_scale=False)), + ): for out_type in NUMERIC_TYPES: has_nan = np.any(np.isnan(in_arr)) try: @@ -849,7 +840,7 @@ def test_finite_range_nan(): assert aw.has_nan == has_nan assert aw.finite_range() == res # Structured type cannot be nan and we can test this - a = np.array([[1., 0, 1], [2, 3, 4]]).view([('f1', 'f')]) + a = np.array([[1.0, 0, 1], [2, 3, 4]]).view([('f1', 'f')]) aw = awt(a, a.dtype, **kwargs) with pytest.raises(TypeError): aw.finite_range() diff --git a/nibabel/tests/test_batteryrunners.py b/nibabel/tests/test_batteryrunners.py index 586f277150..d260d2db76 100644 --- a/nibabel/tests/test_batteryrunners.py +++ b/nibabel/tests/test_batteryrunners.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for BatteryRunner and Report objects +"""Tests for BatteryRunner and Report objects """ from io import StringIO diff --git a/nibabel/tests/test_brikhead.py b/nibabel/tests/test_brikhead.py index 9f3bfdd93c..ff9e91520e 100644 --- a/nibabel/tests/test_brikhead.py +++ b/nibabel/tests/test_brikhead.py @@ -26,51 +26,41 @@ fname=pjoin(data_path, 'example4d+orig.BRIK.gz'), shape=(33, 41, 25, 3), dtype=np.int16, - affine=np.array([[-3.0,0,0,49.5], - [0,-3.0,0,82.312], - [0,0,3.0,-52.3511], - [0,0,0,1.0]]), - zooms=(3., 3., 3., 3.), - data_summary=dict( - min=0, - max=13722, - mean=4266.76024636), + affine=np.array( + [[-3.0, 0, 0, 49.5], [0, -3.0, 0, 82.312], [0, 0, 3.0, -52.3511], [0, 0, 0, 1.0]] + ), + zooms=(3.0, 3.0, 3.0, 3.0), + data_summary=dict(min=0, max=13722, mean=4266.76024636), is_proxy=True, space='ORIG', labels=['#0', '#1', '#2'], - scaling=None), + scaling=None, + ), dict( head=pjoin(data_path, 'scaled+tlrc.HEAD'), fname=pjoin(data_path, 'scaled+tlrc.BRIK'), - shape=(47, 54, 43, 1.), + shape=(47, 54, 43, 1.0), dtype=np.int16, - affine=np.array([[3.0,0,0,-66.], - [0,3.0,0,-87.], - [0,0,3.0,-54.], - [0,0,0,1.0]]), - zooms=(3., 3., 3., 0.), + affine=np.array( + [[3.0, 0, 0, -66.0], [0, 3.0, 0, -87.0], [0, 0, 3.0, -54.0], [0, 0, 0, 1.0]] + ), + zooms=(3.0, 3.0, 3.0, 0.0), data_summary=dict( - min=1.9416814999999998e-07, - max=0.0012724615542099998, - mean=0.00023919645351876782), + min=1.9416814999999998e-07, max=0.0012724615542099998, mean=0.00023919645351876782 + ), is_proxy=True, space='TLRC', labels=['#0'], - scaling=np.array([ 3.88336300e-08]), - ) + scaling=np.array([3.88336300e-08]), + ), ] EXAMPLE_BAD_IMAGES = [ - dict( - head=pjoin(data_path, 'bad_datatype+orig.HEAD'), - err=brikhead.AFNIImageError - ), - dict( - head=pjoin(data_path, 'bad_attribute+orig.HEAD'), - err=brikhead.AFNIHeaderError - ) + dict(head=pjoin(data_path, 'bad_datatype+orig.HEAD'), err=brikhead.AFNIImageError), + dict(head=pjoin(data_path, 'bad_attribute+orig.HEAD'), err=brikhead.AFNIHeaderError), ] + class TestAFNIHeader: module = brikhead test_files = EXAMPLE_IMAGES @@ -139,8 +129,10 @@ def test_brikheadfile(self): class TestBadVars: module = brikhead - vars = ['type = badtype-attribute\nname = BRICK_TYPES\ncount = 1\n1\n', - 'type = integer-attribute\ncount = 1\n1\n'] + vars = [ + 'type = badtype-attribute\nname = BRICK_TYPES\ncount = 1\n1\n', + 'type = integer-attribute\ncount = 1\n1\n', + ] def test_unpack_var(self): for var in self.vars: diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index b8f56454b5..d16541b352 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -1,16 +1,26 @@ -""" Test casting utilities +"""Test casting utilities """ import os from platform import machine import numpy as np -from ..casting import (float_to_int, shared_range, CastingError, int_to_float, - as_int, int_abs, floor_log2, able_int_type, best_float, - ulp, longdouble_precision_improved) +from ..casting import ( + float_to_int, + shared_range, + CastingError, + int_to_float, + as_int, + int_abs, + floor_log2, + able_int_type, + best_float, + ulp, + longdouble_precision_improved, +) from ..testing import suppress_warnings -from numpy.testing import (assert_array_almost_equal, assert_array_equal) +from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest @@ -148,7 +158,7 @@ def test_int_abs(): def test_floor_log2(): assert floor_log2(2**9 + 1) == 9 - assert floor_log2(-2**9 + 1) == 8 + assert floor_log2(-(2**9) + 1) == 8 assert floor_log2(2) == 1 assert floor_log2(1) == 0 assert floor_log2(0.5) == -1 @@ -161,19 +171,20 @@ def test_floor_log2(): def test_able_int_type(): # The integer type cabable of containing values for vals, exp_out in ( - ([0, 1], np.uint8), - ([0, 255], np.uint8), - ([-1, 1], np.int8), - ([0, 256], np.uint16), - ([-1, 128], np.int16), - ([0.1, 1], None), - ([0, 2**16], np.uint32), - ([-1, 2**15], np.int32), - ([0, 2**32], np.uint64), - ([-1, 2**31], np.int64), - ([-1, 2**64 - 1], None), - ([0, 2**64 - 1], np.uint64), - ([0, 2**64], None)): + ([0, 1], np.uint8), + ([0, 255], np.uint8), + ([-1, 1], np.int8), + ([0, 256], np.uint16), + ([-1, 128], np.int16), + ([0.1, 1], None), + ([0, 2**16], np.uint32), + ([-1, 2**15], np.int32), + ([0, 2**32], np.uint64), + ([-1, 2**31], np.int64), + ([-1, 2**64 - 1], None), + ([0, 2**64 - 1], np.uint64), + ([0, 2**64], None), + ): assert able_int_type(vals) == exp_out @@ -200,7 +211,7 @@ def test_able_casting(): def test_best_float(): # Finds the most capable floating point type - """ most capable type will be np.longdouble except when + """most capable type will be np.longdouble except when * np.longdouble has float64 precision (MSVC compiled numpy) * machine is sparc64 (float128 very slow) @@ -213,9 +224,11 @@ def test_best_float(): assert end_of_ints == end_of_ints + 1 # longdouble may have more, but not on 32 bit windows, at least end_of_ints = np.longdouble(2**53) - if (end_of_ints == (end_of_ints + 1) or # off continuous integers - machine() == 'sparc64' or # crippling slow longdouble on sparc - longdouble_precision_improved()): # Windows precisions can change + if ( + end_of_ints == (end_of_ints + 1) + or machine() == 'sparc64' # off continuous integers + or longdouble_precision_improved() # crippling slow longdouble on sparc + ): # Windows precisions can change assert best == np.float64 else: assert best == np.longdouble @@ -224,6 +237,7 @@ def test_best_float(): def test_longdouble_precision_improved(): # Just check that this can only be True on windows, msvc from numpy.distutils.ccompiler import get_default_compiler + if not (os.name == 'nt' and get_default_compiler() == 'msvc'): assert not longdouble_precision_improved() @@ -248,8 +262,8 @@ def test_ulp(): assert np.isnan(ulp(-np.inf)) assert np.isnan(ulp(np.nan)) # 0 gives subnormal smallest - subn64 = np.float64(2**(-1022 - 52)) - subn32 = np.float32(2**(-126 - 23)) + subn64 = np.float64(2 ** (-1022 - 52)) + subn32 = np.float32(2 ** (-126 - 23)) assert ulp(0.0) == subn64 assert ulp(np.float64(0)) == subn64 assert ulp(np.float32(0)) == subn32 diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index 0c1671dfbf..0fbadc6af0 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -1,16 +1,23 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" Tests for data module """ +"""Tests for data module""" import os from os.path import join as pjoin from os import environ as env import sys import tempfile -from ..data import (get_data_path, find_data_dir, - DataError, _cfg_value, make_datasource, - Datasource, VersionedDatasource, Bomber, - datasource_or_bomber) +from ..data import ( + get_data_path, + find_data_dir, + DataError, + _cfg_value, + make_datasource, + Datasource, + VersionedDatasource, + Bomber, + datasource_or_bomber, +) from ..tmpdirs import TemporaryDirectory @@ -182,8 +189,7 @@ def test_find_data_dir(): def test_make_datasource(with_nimd_env): - pkg_def = dict( - relpath='pkg') + pkg_def = dict(relpath='pkg') with TemporaryDirectory() as tmpdir: nibd.get_data_path = lambda: [tmpdir] with pytest.raises(DataError): diff --git a/nibabel/tests/test_dataobj_images.py b/nibabel/tests/test_dataobj_images.py index e0f042939a..dfbb0fe4cb 100644 --- a/nibabel/tests/test_dataobj_images.py +++ b/nibabel/tests/test_dataobj_images.py @@ -1,4 +1,4 @@ -""" Testing dataobj_images module +"""Testing dataobj_images module """ import numpy as np @@ -43,7 +43,7 @@ def set_data_dtype(self, dtype): class TestDataobjAPI(_TFI, DataInterfaceMixin): - """ Validation for DataobjImage instances - """ + """Validation for DataobjImage instances""" + # A callable returning an image from ``image_maker(data, header)`` image_maker = DoNumpyImage diff --git a/nibabel/tests/test_deprecated.py b/nibabel/tests/test_deprecated.py index c09fda4988..cd56f507f9 100644 --- a/nibabel/tests/test_deprecated.py +++ b/nibabel/tests/test_deprecated.py @@ -1,12 +1,11 @@ -""" Testing `deprecated` module +"""Testing `deprecated` module """ import warnings import pytest from nibabel import pkg_info -from nibabel.deprecated import (ModuleProxy, FutureWarningMixin, - deprecate_with_version) +from nibabel.deprecated import ModuleProxy, FutureWarningMixin, deprecate_with_version from nibabel.tests.test_deprecator import TestDeprecatorFunc as _TestDF @@ -33,7 +32,6 @@ def test_module_proxy(): def test_futurewarning_mixin(): # Test mixin for FutureWarning class C: - def __init__(self, val): self.val = val @@ -44,7 +42,8 @@ class D(FutureWarningMixin, C): pass class E(FutureWarningMixin, C): - warn_message = "Oh no, not this one" + warn_message = 'Oh no, not this one' + with warnings.catch_warnings(record=True) as warns: c = C(42) assert c.meth() == 42 @@ -53,8 +52,7 @@ class E(FutureWarningMixin, C): assert d.meth() == 42 warn = warns.pop(0) assert warn.category == FutureWarning - assert (str(warn.message) == - 'This class will be removed in future versions') + assert str(warn.message) == 'This class will be removed in future versions' e = E(42) assert e.meth() == 42 warn = warns.pop(0) @@ -63,7 +61,7 @@ class E(FutureWarningMixin, C): class TestNibabelDeprecator(_TestDF): - """ Test deprecations against nibabel version """ + """Test deprecations against nibabel version""" dep_func = deprecate_with_version diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index 0280692299..31b61f5153 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -1,4 +1,4 @@ -""" Testing deprecator module / Deprecator class +"""Testing deprecator module / Deprecator class """ import sys @@ -8,9 +8,14 @@ import pytest -from nibabel.deprecator import (_ensure_cr, _add_dep_doc, - ExpiredDeprecationError, Deprecator, - TESTSETUP, TESTCLEANUP) +from nibabel.deprecator import ( + _ensure_cr, + _add_dep_doc, + ExpiredDeprecationError, + Deprecator, + TESTSETUP, + TESTCLEANUP, +) from ..testing import clear_and_catch_warnings @@ -35,18 +40,22 @@ def test__add_dep_doc(): assert _add_dep_doc(' bar', 'foo\n') == ' bar\n\nfoo\n' assert _add_dep_doc('bar\n\n', 'foo') == 'bar\n\nfoo\n' assert _add_dep_doc('bar\n \n', 'foo') == 'bar\n\nfoo\n' - assert (_add_dep_doc(' bar\n\nSome explanation', 'foo\nbaz') == - ' bar\n\nfoo\nbaz\n\nSome explanation\n') - assert (_add_dep_doc(' bar\n\n Some explanation', 'foo\nbaz') == - ' bar\n \n foo\n baz\n \n Some explanation\n') + assert ( + _add_dep_doc(' bar\n\nSome explanation', 'foo\nbaz') + == ' bar\n\nfoo\nbaz\n\nSome explanation\n' + ) + assert ( + _add_dep_doc(' bar\n\n Some explanation', 'foo\nbaz') + == ' bar\n \n foo\n baz\n \n Some explanation\n' + ) class CustomError(Exception): - """ Custom error class for testing expired deprecation errors """ + """Custom error class for testing expired deprecation errors""" def cmp_func(v): - """ Comparison func tests against version 2.0 """ + """Comparison func tests against version 2.0""" return (float(v) > 2) - (float(v) < 2) @@ -55,15 +64,15 @@ def func_no_doc(): def func_doc(i): - "A docstring" + """A docstring""" def func_doc_long(i, j): - "A docstring\n\n Some text" + """A docstring\n\n Some text""" class TestDeprecatorFunc: - """ Test deprecator function specified in ``dep_func`` """ + """Test deprecator function specified in ``dep_func``""" dep_func = Deprecator(cmp_func) @@ -83,9 +92,11 @@ def test_dep_func(self): with pytest.deprecated_call() as w: assert func(1, 2) is None assert len(w) == 1 - assert (func.__doc__ == - f'A docstring\n \n foo\n \n{indent(TESTSETUP, " ", lambda x: True)}' - f' Some text\n{indent(TESTCLEANUP, " ", lambda x: True)}') + assert ( + func.__doc__ + == f'A docstring\n \n foo\n \n{indent(TESTSETUP, " ", lambda x: True)}' + f' Some text\n{indent(TESTCLEANUP, " ", lambda x: True)}' + ) # Try some since and until versions func = dec('foo', '1.1')(func_no_doc) @@ -97,21 +108,24 @@ def test_dep_func(self): with pytest.deprecated_call() as w: assert func() is None assert len(w) == 1 - assert (func.__doc__ == - f'foo\n\n* Will raise {ExpiredDeprecationError} as of version: 99.4\n') + assert ( + func.__doc__ == f'foo\n\n* Will raise {ExpiredDeprecationError} as of version: 99.4\n' + ) func = dec('foo', until='1.8')(func_no_doc) with pytest.raises(ExpiredDeprecationError): func() - assert (func.__doc__ == - f'foo\n\n* Raises {ExpiredDeprecationError} as of version: 1.8\n') + assert func.__doc__ == f'foo\n\n* Raises {ExpiredDeprecationError} as of version: 1.8\n' func = dec('foo', '1.2', '1.8')(func_no_doc) with pytest.raises(ExpiredDeprecationError): func() - assert (func.__doc__ == - 'foo\n\n* deprecated from version: 1.2\n* Raises ' - f'{ExpiredDeprecationError} as of version: 1.8\n') + assert ( + func.__doc__ == 'foo\n\n* deprecated from version: 1.2\n* Raises ' + f'{ExpiredDeprecationError} as of version: 1.8\n' + ) func = dec('foo', '1.2', '1.8')(func_doc_long) - assert func.__doc__ == f"""\ + assert ( + func.__doc__ + == f"""\ A docstring foo @@ -119,6 +133,7 @@ def test_dep_func(self): * deprecated from version: 1.2 * Raises {ExpiredDeprecationError} as of version: 1.8 """ + ) with pytest.raises(ExpiredDeprecationError): func() @@ -140,7 +155,7 @@ def test_dep_func(self): class TestDeprecatorMaker: - """ Test deprecator class creation with custom warnings and errors """ + """Test deprecator class creation with custom warnings and errors""" dep_maker = partial(Deprecator, cmp_func) diff --git a/nibabel/tests/test_dft.py b/nibabel/tests/test_dft.py index 61e031b8d3..b43b2762f7 100644 --- a/nibabel/tests/test_dft.py +++ b/nibabel/tests/test_dft.py @@ -1,4 +1,4 @@ -""" Testing dft +"""Testing dft """ import os @@ -32,15 +32,16 @@ def setUpModule(): class Test_DBclass: """Some tests on the database manager class that don't get exercised through the API""" + def setup_method(self): - self._db = dft._DB(fname=":memory:", verbose=False) + self._db = dft._DB(fname=':memory:', verbose=False) def test_repr(self): assert repr(self._db) == "" def test_cursor_conflict(self): rwc = self._db.readwrite_cursor - statement = ("INSERT INTO directory (path, mtime) VALUES (?, ?)", ("/tmp", 0)) + statement = ('INSERT INTO directory (path, mtime) VALUES (?, ?)', ('/tmp', 0)) with pytest.raises(sqlite3.IntegrityError): # Whichever exits first will commit and make the second violate uniqueness with rwc() as c1, rwc() as c2: @@ -52,8 +53,8 @@ def test_cursor_conflict(self): def db(monkeypatch): """Build a dft database in memory to avoid cross-process races and not modify the host filesystem.""" - database = dft._DB(fname=":memory:") - monkeypatch.setattr(dft, "DB", database) + database = dft._DB(fname=':memory:') + monkeypatch.setattr(dft, 'DB', database) yield database @@ -69,8 +70,7 @@ def test_study(db): for base_dir in (data_dir, None): studies = dft.get_studies(base_dir) assert len(studies) == 1 - assert (studies[0].uid == - '1.3.12.2.1107.5.2.32.35119.30000010011408520750000000022') + assert studies[0].uid == '1.3.12.2.1107.5.2.32.35119.30000010011408520750000000022' assert studies[0].date == '20100114' assert studies[0].time == '121314.000000' assert studies[0].comments == 'dft study comments' @@ -84,8 +84,7 @@ def test_series(db): studies = dft.get_studies(data_dir) assert len(studies[0].series) == 1 ser = studies[0].series[0] - assert (ser.uid == - '1.3.12.2.1107.5.2.32.35119.2010011420292594820699190.0.0.0') + assert ser.uid == '1.3.12.2.1107.5.2.32.35119.2010011420292594820699190.0.0.0' assert ser.number == '12' assert ser.description == 'CBU_DTI_64D_1A' assert ser.rows == 256 @@ -100,10 +99,8 @@ def test_storage_instances(db): assert len(sis) == 2 assert sis[0].instance_number == 1 assert sis[1].instance_number == 2 - assert (sis[0].uid == - '1.3.12.2.1107.5.2.32.35119.2010011420300180088599504.0') - assert (sis[1].uid == - '1.3.12.2.1107.5.2.32.35119.2010011420300180088599504.1') + assert sis[0].uid == '1.3.12.2.1107.5.2.32.35119.2010011420300180088599504.0' + assert sis[1].uid == '1.3.12.2.1107.5.2.32.35119.2010011420300180088599504.1' @unittest.skipUnless(have_pil, 'could not import PIL.Image') diff --git a/nibabel/tests/test_diff.py b/nibabel/tests/test_diff.py index 4897198668..b1f05177bb 100644 --- a/nibabel/tests/test_diff.py +++ b/nibabel/tests/test_diff.py @@ -1,9 +1,9 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" Test diff +"""Test diff """ -from os.path import (dirname, join as pjoin, abspath) +from os.path import dirname, join as pjoin, abspath import numpy as np @@ -23,18 +23,18 @@ def test_diff_values_int(): def test_diff_values_float(): - assert not are_values_different(0., 0.) - assert not are_values_different(0., 0., 0.) # can take more + assert not are_values_different(0.0, 0.0) + assert not are_values_different(0.0, 0.0, 0.0) # can take more assert not are_values_different(1.1, 1.1) - assert are_values_different(0., 1.1) - assert are_values_different(0., 0, 1.1) - assert are_values_different(1., 2.) + assert are_values_different(0.0, 1.1) + assert are_values_different(0.0, 0, 1.1) + assert are_values_different(1.0, 2.0) def test_diff_values_mixed(): assert are_values_different(1.0, 1) - assert are_values_different(1.0, "1") - assert are_values_different(1, "1") + assert are_values_different(1.0, '1') + assert are_values_different(1, '1') assert are_values_different(1, None) assert are_values_different(np.ndarray([0]), 'hey') assert not are_values_different(None, None) @@ -42,6 +42,7 @@ def test_diff_values_mixed(): def test_diff_values_array(): from numpy import nan, array, inf + a_int = array([1, 2]) a_float = a_int.astype(float) diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index 9e56fd73c7..875e06c0a7 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -13,8 +13,14 @@ import numpy as np from ..openers import Opener -from ..ecat import (EcatHeader, EcatSubHeader, EcatImage, read_mlist, - get_frame_order, get_series_framenumbers) +from ..ecat import ( + EcatHeader, + EcatSubHeader, + EcatImage, + read_mlist, + get_frame_order, + get_series_framenumbers, +) from unittest import TestCase import pytest @@ -62,8 +68,7 @@ def test_header_codes(self): newhdr = hdr.from_fileobj(fid) fid.close() assert newhdr.get_filetype() == 'ECAT7_VOLUME16' - assert (newhdr.get_patient_orient() == - 'ECAT7_Unknown_Orientation') + assert newhdr.get_patient_orient() == 'ECAT7_Unknown_Orientation' def test_update(self): hdr = self.header_class() @@ -98,18 +103,16 @@ def test_mlist(self): assert get_frame_order(mlist)[0][0] == 0 assert get_frame_order(mlist)[0][1] == 16842758.0 # test badly ordered mlist - badordermlist = np.array([[1.68427540e+07, 3.00000000e+00, - 1.20350000e+04, 1.00000000e+00], - [1.68427530e+07, 1.20360000e+04, - 2.40680000e+04, 1.00000000e+00], - [1.68427550e+07, 2.40690000e+04, - 3.61010000e+04, 1.00000000e+00], - [1.68427560e+07, 3.61020000e+04, - 4.81340000e+04, 1.00000000e+00], - [1.68427570e+07, 4.81350000e+04, - 6.01670000e+04, 1.00000000e+00], - [1.68427580e+07, 6.01680000e+04, - 7.22000000e+04, 1.00000000e+00]]) + badordermlist = np.array( + [ + [1.68427540e07, 3.00000000e00, 1.20350000e04, 1.00000000e00], + [1.68427530e07, 1.20360000e04, 2.40680000e04, 1.00000000e00], + [1.68427550e07, 2.40690000e04, 3.61010000e04, 1.00000000e00], + [1.68427560e07, 3.61020000e04, 4.81340000e04, 1.00000000e00], + [1.68427570e07, 4.81350000e04, 6.01670000e04, 1.00000000e00], + [1.68427580e07, 6.01680000e04, 7.22000000e04, 1.00000000e00], + ] + ) with suppress_warnings(): # STORED order assert get_frame_order(badordermlist)[0][0] == 1 @@ -118,18 +121,16 @@ def test_mlist_errors(self): hdr = self.header_class.from_fileobj(fid) hdr['num_frames'] = 6 mlist = read_mlist(fid, hdr.endianness) - mlist = np.array([[1.68427540e+07, 3.00000000e+00, - 1.20350000e+04, 1.00000000e+00], - [1.68427530e+07, 1.20360000e+04, - 2.40680000e+04, 1.00000000e+00], - [1.68427550e+07, 2.40690000e+04, - 3.61010000e+04, 1.00000000e+00], - [1.68427560e+07, 3.61020000e+04, - 4.81340000e+04, 1.00000000e+00], - [1.68427570e+07, 4.81350000e+04, - 6.01670000e+04, 1.00000000e+00], - [1.68427580e+07, 6.01680000e+04, - 7.22000000e+04, 1.00000000e+00]]) + mlist = np.array( + [ + [1.68427540e07, 3.00000000e00, 1.20350000e04, 1.00000000e00], + [1.68427530e07, 1.20360000e04, 2.40680000e04, 1.00000000e00], + [1.68427550e07, 2.40690000e04, 3.61010000e04, 1.00000000e00], + [1.68427560e07, 3.61020000e04, 4.81340000e04, 1.00000000e00], + [1.68427570e07, 4.81350000e04, 6.01670000e04, 1.00000000e00], + [1.68427580e07, 6.01680000e04, 7.22000000e04, 1.00000000e00], + ] + ) with suppress_warnings(): # STORED order series_framenumbers = get_series_framenumbers(mlist) # first frame stored was actually 2nd frame acquired @@ -162,15 +163,15 @@ def test_subheader_size(self): def test_subheader(self): assert self.subhdr.get_shape() == (10, 10, 3) assert self.subhdr.get_nframes() == 1 - assert (self.subhdr.get_nframes() == - len(self.subhdr.subheaders)) + assert self.subhdr.get_nframes() == len(self.subhdr.subheaders) assert self.subhdr._check_affines() is True - assert_array_almost_equal(np.diag(self.subhdr.get_frame_affine()), - np.array([2.20241979, 2.20241979, 3.125, 1.])) + assert_array_almost_equal( + np.diag(self.subhdr.get_frame_affine()), np.array([2.20241979, 2.20241979, 3.125, 1.0]) + ) assert self.subhdr.get_zooms()[0] == 2.20241978764534 assert self.subhdr.get_zooms()[2] == 3.125 assert self.subhdr._get_data_dtype(0) == np.int16 - #assert_equal(self.subhdr._get_frame_offset(), 1024) + # assert_equal(self.subhdr._get_frame_offset(), 1024) assert self.subhdr._get_frame_offset() == 1536 dat = self.subhdr.raw_data_from_fileobj() assert dat.shape == self.subhdr.get_shape() @@ -185,10 +186,8 @@ class TestEcatImage(TestCase): img = image_class.load(example_file) def test_file(self): - assert (self.img.file_map['header'].filename == - self.example_file) - assert (self.img.file_map['image'].filename == - self.example_file) + assert self.img.file_map['header'].filename == self.example_file + assert self.img.file_map['image'].filename == self.example_file def test_save(self): tmp_file = 'tinypet_tmp.v' @@ -229,11 +228,13 @@ def test_array_proxy_slicing(self): def test_isolation(self): # Test image isolated from external changes to affine img_klass = self.image_class - arr, aff, hdr, sub_hdr, mlist = (self.img.get_fdata(), - self.img.affine, - self.img.header, - self.img.get_subheaders(), - self.img.get_mlist()) + arr, aff, hdr, sub_hdr, mlist = ( + self.img.get_fdata(), + self.img.affine, + self.img.header, + self.img.get_subheaders(), + self.img.get_mlist(), + ) img = img_klass(arr, aff, hdr, sub_hdr, mlist) assert_array_equal(img.affine, aff) aff[0, 0] = 99 @@ -242,11 +243,13 @@ def test_isolation(self): def test_float_affine(self): # Check affines get converted to float img_klass = self.image_class - arr, aff, hdr, sub_hdr, mlist = (self.img.get_fdata(), - self.img.affine, - self.img.header, - self.img.get_subheaders(), - self.img.get_mlist()) + arr, aff, hdr, sub_hdr, mlist = ( + self.img.get_fdata(), + self.img.affine, + self.img.header, + self.img.get_subheaders(), + self.img.get_mlist(), + ) img = img_klass(arr, aff.astype(np.float32), hdr, sub_hdr, mlist) assert img.affine.dtype == np.dtype(np.float64) img = img_klass(arr, aff.astype(np.int16), hdr, sub_hdr, mlist) @@ -255,9 +258,7 @@ def test_float_affine(self): def test_data_regression(self): # Test whether data read has changed since 1.3.0 # These values came from reading the example image using nibabel 1.3.0 - vals = dict(max=248750736458.0, - min=1125342630.0, - mean=117907565661.46666) + vals = dict(max=248750736458.0, min=1125342630.0, mean=117907565661.46666) data = self.img.get_fdata() assert data.max() == vals['max'] assert data.min() == vals['min'] @@ -265,5 +266,4 @@ def test_data_regression(self): def test_mlist_regression(self): # Test mlist is as same as for nibabel 1.3.0 - assert_array_equal(self.img.get_mlist(), - [[16842758, 3, 3011, 1]]) + assert_array_equal(self.img.get_mlist(), [[16842758, 3, 3011, 1]]) diff --git a/nibabel/tests/test_ecat_data.py b/nibabel/tests/test_ecat_data.py index dcd812c52d..de4164cd3c 100644 --- a/nibabel/tests/test_ecat_data.py +++ b/nibabel/tests/test_ecat_data.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test we can correctly import example ECAT files +"""Test we can correctly import example ECAT files """ import os @@ -17,7 +17,7 @@ from .nibabel_data import get_nibabel_data, needs_nibabel_data from ..ecat import load -from numpy.testing import (assert_array_equal, assert_almost_equal) +from numpy.testing import assert_array_equal, assert_almost_equal ECAT_TEST_PATH = pjoin(get_nibabel_data(), 'nipy-ecattest') diff --git a/nibabel/tests/test_endiancodes.py b/nibabel/tests/test_endiancodes.py index a9af11f052..ce460efbb3 100644 --- a/nibabel/tests/test_endiancodes.py +++ b/nibabel/tests/test_endiancodes.py @@ -6,11 +6,11 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for endiancodes module """ +"""Tests for endiancodes module""" import sys -from ..volumeutils import (endian_codes, native_code, swapped_code) +from ..volumeutils import endian_codes, native_code, swapped_code def test_native_swapped(): diff --git a/nibabel/tests/test_environment.py b/nibabel/tests/test_environment.py index 19891a607b..5742edef43 100644 --- a/nibabel/tests/test_environment.py +++ b/nibabel/tests/test_environment.py @@ -1,4 +1,4 @@ -""" Testing environment settings +"""Testing environment settings """ import os @@ -44,7 +44,7 @@ def test_user_dir(with_environment): if USER_KEY in env: del env[USER_KEY] home_dir = nibe.get_home_dir() - if os.name == "posix": + if os.name == 'posix': exp = pjoin(home_dir, '.nipy') else: exp = pjoin(home_dir, '_nipy') diff --git a/nibabel/tests/test_euler.py b/nibabel/tests/test_euler.py index d60b0b8b2e..25e4c776d2 100644 --- a/nibabel/tests/test_euler.py +++ b/nibabel/tests/test_euler.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for Euler angles """ +"""Tests for Euler angles""" import math import numpy as np @@ -32,28 +32,19 @@ def x_only(x): cosx = np.cos(x) sinx = np.sin(x) - return np.array( - [[1, 0, 0], - [0, cosx, -sinx], - [0, sinx, cosx]]) + return np.array([[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]]) def y_only(y): cosy = np.cos(y) siny = np.sin(y) - return np.array( - [[cosy, 0, siny], - [0, 1, 0], - [-siny, 0, cosy]]) + return np.array([[cosy, 0, siny], [0, 1, 0], [-siny, 0, cosy]]) def z_only(z): cosz = np.cos(z) sinz = np.sin(z) - return np.array( - [[cosz, -sinz, 0], - [sinz, cosz, 0], - [0, 0, 1]]) + return np.array([[cosz, -sinz, 0], [sinz, cosz, 0], [0, 0, 1]]) def sympy_euler(z, y, x): @@ -63,10 +54,16 @@ def sympy_euler(z, y, x): # the following copy / pasted from Sympy - see derivations subdirectory return [ [cos(y) * cos(z), -cos(y) * sin(z), sin(y)], - [cos(x) * sin(z) + cos(z) * sin(x) * sin(y), cos(x) * - cos(z) - sin(x) * sin(y) * sin(z), -cos(y) * sin(x)], - [sin(x) * sin(z) - cos(x) * cos(z) * sin(y), cos(z) * - sin(x) + cos(x) * sin(y) * sin(z), cos(x) * cos(y)] + [ + cos(x) * sin(z) + cos(z) * sin(x) * sin(y), + cos(x) * cos(z) - sin(x) * sin(y) * sin(z), + -cos(y) * sin(x), + ], + [ + sin(x) * sin(z) - cos(x) * cos(z) * sin(y), + cos(z) * sin(x) + cos(x) * sin(y) * sin(z), + cos(x) * cos(y), + ], ] @@ -100,15 +97,15 @@ def test_basic_euler(): assert np.all(nea.euler2mat(0, 0, xr) == nea.euler2mat(x=xr)) # Applying an opposite rotation same as inverse (the inverse is # the same as the transpose, but just for clarity) - assert np.allclose(nea.euler2mat(x=-xr), - np.linalg.inv(nea.euler2mat(x=xr))) + assert np.allclose(nea.euler2mat(x=-xr), np.linalg.inv(nea.euler2mat(x=xr))) def test_euler_mat_1(): M = nea.euler2mat() assert_array_equal(M, np.eye(3)) -@pytest.mark.parametrize("x, y, z", eg_rots) + +@pytest.mark.parametrize('x, y, z', eg_rots) def test_euler_mat_2(x, y, z): M1 = nea.euler2mat(z, y, x) M2 = sympy_euler(z, y, x) @@ -128,14 +125,16 @@ def sympy_euler2quat(z=0, y=0, x=0): cos = math.cos sin = math.sin # the following copy / pasted from Sympy output - return (cos(0.5 * x) * cos(0.5 * y) * cos(0.5 * z) - sin(0.5 * x) * sin(0.5 * y) * sin(0.5 * z), - cos(0.5 * x) * sin(0.5 * y) * sin(0.5 * z) + cos(0.5 * y) * cos(0.5 * z) * sin(0.5 * x), - cos(0.5 * x) * cos(0.5 * z) * sin(0.5 * y) - cos(0.5 * y) * sin(0.5 * x) * sin(0.5 * z), - cos(0.5 * x) * cos(0.5 * y) * sin(0.5 * z) + cos(0.5 * z) * sin(0.5 * x) * sin(0.5 * y)) + return ( + cos(0.5 * x) * cos(0.5 * y) * cos(0.5 * z) - sin(0.5 * x) * sin(0.5 * y) * sin(0.5 * z), + cos(0.5 * x) * sin(0.5 * y) * sin(0.5 * z) + cos(0.5 * y) * cos(0.5 * z) * sin(0.5 * x), + cos(0.5 * x) * cos(0.5 * z) * sin(0.5 * y) - cos(0.5 * y) * sin(0.5 * x) * sin(0.5 * z), + cos(0.5 * x) * cos(0.5 * y) * sin(0.5 * z) + cos(0.5 * z) * sin(0.5 * x) * sin(0.5 * y), + ) def crude_mat2euler(M): - """ The simplest possible - ignoring atan2 instability """ + """The simplest possible - ignoring atan2 instability""" r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat return math.atan2(-r12, r11), math.asin(r13), math.atan2(-r23, r33) @@ -159,7 +158,7 @@ def test_euler_instability(): assert not np.allclose(M_e, M_e_back) -@pytest.mark.parametrize("x, y, z", eg_rots) +@pytest.mark.parametrize('x, y, z', eg_rots) def test_quats(x, y, z): M1 = nea.euler2mat(z, y, x) quatM = nq.mat2quat(M1) diff --git a/nibabel/tests/test_filebasedimages.py b/nibabel/tests/test_filebasedimages.py index d01440eb65..aee02f5a68 100644 --- a/nibabel/tests/test_filebasedimages.py +++ b/nibabel/tests/test_filebasedimages.py @@ -1,4 +1,4 @@ -""" Testing filebasedimages module +"""Testing filebasedimages module """ from itertools import product @@ -59,8 +59,8 @@ class SerializableNumpyImage(FBNumpyImage, SerializableImage): class TestFBImageAPI(GenericImageAPI): - """ Validation for FileBasedImage instances - """ + """Validation for FileBasedImage instances""" + # A callable returning an image from ``image_maker(data, header)`` image_maker = FBNumpyImage # A callable returning a header from ``header_maker()`` @@ -80,11 +80,7 @@ def obj_params(self): arr = np.arange(np.prod(shape), dtype=dtype).reshape(shape) hdr = self.header_maker() func = self.make_imaker(arr.copy(), hdr) - params = dict( - dtype=dtype, - data=arr, - shape=shape, - is_proxy=False) + params = dict(dtype=dtype, data=arr, shape=shape, is_proxy=False) yield func, params @@ -93,8 +89,8 @@ class TestSerializableImageAPI(TestFBImageAPI, SerializeMixin): @staticmethod def _header_eq(header_a, header_b): - """ FileBasedHeader is an abstract class, so __eq__ is undefined. - Checking for the same header type is sufficient, here. """ + """FileBasedHeader is an abstract class, so __eq__ is undefined. + Checking for the same header type is sufficient, here.""" return type(header_a) == type(header_b) == FileBasedHeader @@ -102,7 +98,6 @@ def test_filebased_header(): # Test stuff about the default FileBasedHeader class H(FileBasedHeader): - def __init__(self, seq=None): if seq is None: seq = [] diff --git a/nibabel/tests/test_filehandles.py b/nibabel/tests/test_filehandles.py index ed1e80e70a..73698b23ac 100644 --- a/nibabel/tests/test_filehandles.py +++ b/nibabel/tests/test_filehandles.py @@ -21,7 +21,7 @@ from ..nifti1 import Nifti1Image -@unittest.skipIf(SOFT_LIMIT > 4900, "It would take too long to test filehandles") +@unittest.skipIf(SOFT_LIMIT > 4900, 'It would take too long to test filehandles') def test_multiload(): # Make a tiny image, save, load many times. If we are leaking filehandles, # this will cause us to run out and generate an error diff --git a/nibabel/tests/test_fileholders.py b/nibabel/tests/test_fileholders.py index e31a6efcbc..a0e50e4133 100644 --- a/nibabel/tests/test_fileholders.py +++ b/nibabel/tests/test_fileholders.py @@ -1,4 +1,4 @@ -""" Testing fileholders +"""Testing fileholders """ from io import BytesIO diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index 49112036d9..b4a816a137 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -6,10 +6,9 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for filename container """ +"""Tests for filename container""" -from ..filename_parser import (types_filenames, TypesFilenamesError, - parse_filename, splitext_addext) +from ..filename_parser import types_filenames, TypesFilenamesError, parse_filename, splitext_addext import pytest @@ -40,9 +39,9 @@ def test_filenames(): types_filenames('test.img.gz', types_exts, ()) # if we don't know about .gz extension, and not enforcing, then we # get something a bit odd - tfns = types_filenames('test.img.gz', types_exts, - trailing_suffixes=(), - enforce_extensions=False) + tfns = types_filenames( + 'test.img.gz', types_exts, trailing_suffixes=(), enforce_extensions=False + ) assert tfns == {'header': 'test.img.hdr', 'image': 'test.img.gz'} # the suffixes we remove and replaces can be any suffixes. tfns = types_filenames('test.img.bzr', types_exts, ('.bzr',)) @@ -50,9 +49,9 @@ def test_filenames(): # If we specifically pass the remove / replace suffixes, then we # don't remove / replace the .gz and .bz2, unless they are passed # specifically. - tfns = types_filenames('test.img.bzr', types_exts, - trailing_suffixes=('.bzr',), - enforce_extensions=False) + tfns = types_filenames( + 'test.img.bzr', types_exts, trailing_suffixes=('.bzr',), enforce_extensions=False + ) assert tfns == {'header': 'test.hdr.bzr', 'image': 'test.img.bzr'} # but, just .gz or .bz2 as extension gives an error, if enforcing is on with pytest.raises(TypesFilenamesError): @@ -61,8 +60,7 @@ def test_filenames(): types_filenames('test.bz2', types_exts) # if enforcing is off, it tries to work out what the other files # should be assuming the passed filename is of the first input type - tfns = types_filenames('test.gz', types_exts, - enforce_extensions=False) + tfns = types_filenames('test.gz', types_exts, enforce_extensions=False) assert tfns == {'image': 'test.gz', 'header': 'test.hdr.gz'} # case (in)sensitivity, and effect of uppercase, lowercase tfns = types_filenames('test.IMG', types_exts) @@ -76,41 +74,29 @@ def test_filenames(): def test_parse_filename(): types_exts = (('t1', 'ext1'), ('t2', 'ext2')) exp_in_outs = ( - (('/path/fname.funny', ()), - ('/path/fname', '.funny', None, None)), - (('/path/fnameext2', ()), - ('/path/fname', 'ext2', None, 't2')), - (('/path/fnameext2', ('.gz',)), - ('/path/fname', 'ext2', None, 't2')), - (('/path/fnameext2.gz', ('.gz',)), - ('/path/fname', 'ext2', '.gz', 't2')) + (('/path/fname.funny', ()), ('/path/fname', '.funny', None, None)), + (('/path/fnameext2', ()), ('/path/fname', 'ext2', None, 't2')), + (('/path/fnameext2', ('.gz',)), ('/path/fname', 'ext2', None, 't2')), + (('/path/fnameext2.gz', ('.gz',)), ('/path/fname', 'ext2', '.gz', 't2')), ) for inps, exps in exp_in_outs: pth, sufs = inps res = parse_filename(pth, types_exts, sufs) assert res == exps upth = pth.upper() - uexps = (exps[0].upper(), exps[1].upper(), - exps[2].upper() if exps[2] else None, - exps[3]) + uexps = (exps[0].upper(), exps[1].upper(), exps[2].upper() if exps[2] else None, exps[3]) res = parse_filename(upth, types_exts, sufs) assert res == uexps # test case sensitivity - res = parse_filename('/path/fnameext2.GZ', - types_exts, - ('.gz',), False) # case insensitive again + res = parse_filename( + '/path/fnameext2.GZ', types_exts, ('.gz',), False + ) # case insensitive again assert res == ('/path/fname', 'ext2', '.GZ', 't2') - res = parse_filename('/path/fnameext2.GZ', - types_exts, - ('.gz',), True) # case sensitive + res = parse_filename('/path/fnameext2.GZ', types_exts, ('.gz',), True) # case sensitive assert res == ('/path/fnameext2', '.GZ', None, None) - res = parse_filename('/path/fnameEXT2.gz', - types_exts, - ('.gz',), False) # case insensitive + res = parse_filename('/path/fnameEXT2.gz', types_exts, ('.gz',), False) # case insensitive assert res == ('/path/fname', 'EXT2', '.gz', 't2') - res = parse_filename('/path/fnameEXT2.gz', - types_exts, - ('.gz',), True) # case sensitive + res = parse_filename('/path/fnameEXT2.gz', types_exts, ('.gz',), True) # case sensitive assert res == ('/path/fnameEXT2', '', '.gz', None) diff --git a/nibabel/tests/test_files_interface.py b/nibabel/tests/test_files_interface.py index d3c895618e..80c4a0ab92 100644 --- a/nibabel/tests/test_files_interface.py +++ b/nibabel/tests/test_files_interface.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Testing filesets - a draft - +"""Testing filesets - a draft """ import numpy as np @@ -20,12 +19,14 @@ from numpy.testing import assert_array_equal import pytest + def test_files_spatialimages(): # test files creation in image classes arr = np.zeros((2, 3, 4)) aff = np.eye(4) - klasses = [klass for klass in all_image_classes - if klass.rw and issubclass(klass, SpatialImage)] + klasses = [ + klass for klass in all_image_classes if klass.rw and issubclass(klass, SpatialImage) + ] for klass in klasses: file_map = klass.make_file_map() for key, value in file_map.items(): @@ -88,8 +89,11 @@ def test_round_trip_spatialimages(): # write an image to files data = np.arange(24, dtype='i4').reshape((2, 3, 4)) aff = np.eye(4) - klasses = [klass for klass in all_image_classes - if klass.rw and klass.makeable and issubclass(klass, SpatialImage)] + klasses = [ + klass + for klass in all_image_classes + if klass.rw and klass.makeable and issubclass(klass, SpatialImage) + ] for klass in klasses: file_map = klass.make_file_map() for key in file_map: diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index 35c61e149b..e98fd473a0 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -1,4 +1,4 @@ -""" Test slicing of file-like objects """ +"""Test slicing of file-like objects""" from io import BytesIO @@ -9,12 +9,24 @@ import numpy as np -from ..fileslice import (is_fancy, canonical_slicers, fileslice, - predict_shape, read_segments, _positive_slice, - threshold_heuristic, optimize_slicer, slice2len, - fill_slicer, optimize_read_slicers, slicers2segments, - calc_slicedefs, _simple_fileslice, slice2outax, - strided_scalar) +from ..fileslice import ( + is_fancy, + canonical_slicers, + fileslice, + predict_shape, + read_segments, + _positive_slice, + threshold_heuristic, + optimize_slicer, + slice2len, + fill_slicer, + optimize_read_slicers, + slicers2segments, + calc_slicedefs, + _simple_fileslice, + slice2outax, + strided_scalar, +) import pytest from numpy.testing import assert_array_equal @@ -52,13 +64,7 @@ def test_is_fancy(): def test_canonical_slicers(): # Check transformation of sliceobj into canonical form - slicers = (slice(None), - slice(9), - slice(0, 9), - slice(1, 10), - slice(1, 10, 2), - 2, - np.array(2)) + slicers = (slice(None), slice(9), slice(0, 9), slice(1, 10), slice(1, 10, 2), 2, np.array(2)) shape = (10, 10) for slice0 in slicers: @@ -71,8 +77,9 @@ def test_canonical_slicers(): # Check None passes through assert canonical_slicers(sliceobj + (None,), shape) == sliceobj + (None,) assert canonical_slicers((None,) + sliceobj, shape) == (None,) + sliceobj - assert (canonical_slicers((None,) + sliceobj + (None,), shape) == - (None,) + sliceobj + (None,)) + assert canonical_slicers((None,) + sliceobj + (None,), shape) == (None,) + sliceobj + ( + None, + ) # Check Ellipsis assert canonical_slicers((Ellipsis,), shape) == (slice(None), slice(None)) assert canonical_slicers((Ellipsis, None), shape) == (slice(None), slice(None), None) @@ -80,8 +87,13 @@ def test_canonical_slicers(): assert canonical_slicers((1, Ellipsis), shape) == (1, slice(None)) # Ellipsis at end does nothing assert canonical_slicers((1, 1, Ellipsis), shape) == (1, 1) - assert (canonical_slicers((1, Ellipsis, 2), (10, 1, 2, 3, 11)) == - (1, slice(None), slice(None), slice(None), 2)) + assert canonical_slicers((1, Ellipsis, 2), (10, 1, 2, 3, 11)) == ( + 1, + slice(None), + slice(None), + slice(None), + 2, + ) with pytest.raises(ValueError): canonical_slicers((Ellipsis, 1, Ellipsis), (2, 3, 4, 5)) # Check full slices get expanded @@ -109,7 +121,14 @@ def test_canonical_slicers(): canonical_slicers((1, 10), shape, True) # Unless check_inds is False assert canonical_slicers((10,), shape, False) == (10, slice(None)) - assert canonical_slicers((1, 10,), shape, False) == (1, 10) + assert canonical_slicers( + ( + 1, + 10, + ), + shape, + False, + ) == (1, 10) # Check negative -> positive assert canonical_slicers(-1, shape) == (9, slice(None)) assert canonical_slicers((slice(None), -1), shape) == (slice(None), 9) @@ -150,20 +169,15 @@ def _slices_for_len(L): # Example slices for a dimension of length L if L == 0: raise ValueError('Need length > 0') - sdefs = [ - 0, - L // 2, - L - 1, - -1, - slice(None), - slice(L - 1)] + sdefs = [0, L // 2, L - 1, -1, slice(None), slice(L - 1)] if L > 1: sdefs += [ -2, slice(1, L - 1), slice(1, L - 1, 2), slice(L - 1, 1, -1), - slice(L - 1, 1, -2)] + slice(L - 1, 1, -2), + ] return tuple(sdefs) @@ -276,24 +290,26 @@ def test_optimize_slicer(): for is_slowest in (True, False): # following tests not affected by all_full or optimization # full - always passes through - assert ( - optimize_slicer(slice(None), 10, all_full, is_slowest, 4, heuristic) == - (slice(None), slice(None))) + assert optimize_slicer(slice(None), 10, all_full, is_slowest, 4, heuristic) == ( + slice(None), + slice(None), + ) # Even if full specified with explicit values - assert ( - optimize_slicer(slice(10), 10, all_full, is_slowest, 4, heuristic) == - (slice(None), slice(None))) - assert ( - optimize_slicer(slice(0, 10), 10, all_full, is_slowest, 4, heuristic) == - (slice(None), slice(None))) - assert ( - optimize_slicer(slice(0, 10, 1), 10, all_full, is_slowest, 4, heuristic) == - (slice(None), slice(None))) + assert optimize_slicer(slice(10), 10, all_full, is_slowest, 4, heuristic) == ( + slice(None), + slice(None), + ) + assert optimize_slicer(slice(0, 10), 10, all_full, is_slowest, 4, heuristic) == ( + slice(None), + slice(None), + ) + assert optimize_slicer( + slice(0, 10, 1), 10, all_full, is_slowest, 4, heuristic + ) == (slice(None), slice(None)) # Reversed full is still full, but with reversed post_slice - assert ( - optimize_slicer( - slice(None, None, -1), 10, all_full, is_slowest, 4, heuristic) == - (slice(None), slice(None, None, -1))) + assert optimize_slicer( + slice(None, None, -1), 10, all_full, is_slowest, 4, heuristic + ) == (slice(None), slice(None, None, -1)) # Contiguous is contiguous unless heuristic kicks in, in which case it may # be 'full' assert optimize_slicer(slice(9), 10, False, False, 4, _always) == (slice(0, 9, 1), slice(None)) @@ -303,48 +319,78 @@ def test_optimize_slicer(): assert optimize_slicer(slice(9), 10, True, True, 4, _always) == (slice(0, 9, 1), slice(None)) # Nor if the heuristic won't update assert optimize_slicer(slice(9), 10, True, False, 4, _never) == (slice(0, 9, 1), slice(None)) - assert (optimize_slicer(slice(1, 10), 10, True, False, 4, _never) == - (slice(1, 10, 1), slice(None))) + assert optimize_slicer(slice(1, 10), 10, True, False, 4, _never) == ( + slice(1, 10, 1), + slice(None), + ) # Reversed contiguous still contiguous - assert (optimize_slicer(slice(8, None, -1), 10, False, False, 4, _never) == - (slice(0, 9, 1), slice(None, None, -1))) - assert (optimize_slicer(slice(8, None, -1), 10, True, False, 4, _always) == - (slice(None), slice(8, None, -1))) - assert (optimize_slicer(slice(8, None, -1), 10, False, False, 4, _never) == - (slice(0, 9, 1), slice(None, None, -1))) - assert (optimize_slicer(slice(9, 0, -1), 10, False, False, 4, _never) == - (slice(1, 10, 1), slice(None, None, -1))) + assert optimize_slicer(slice(8, None, -1), 10, False, False, 4, _never) == ( + slice(0, 9, 1), + slice(None, None, -1), + ) + assert optimize_slicer(slice(8, None, -1), 10, True, False, 4, _always) == ( + slice(None), + slice(8, None, -1), + ) + assert optimize_slicer(slice(8, None, -1), 10, False, False, 4, _never) == ( + slice(0, 9, 1), + slice(None, None, -1), + ) + assert optimize_slicer(slice(9, 0, -1), 10, False, False, 4, _never) == ( + slice(1, 10, 1), + slice(None, None, -1), + ) # Non-contiguous - assert (optimize_slicer(slice(0, 10, 2), 10, False, False, 4, _never) == - (slice(0, 10, 2), slice(None))) + assert optimize_slicer(slice(0, 10, 2), 10, False, False, 4, _never) == ( + slice(0, 10, 2), + slice(None), + ) # all_full triggers optimization, but optimization does nothing - assert (optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _never) == - (slice(0, 10, 2), slice(None))) + assert optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _never) == ( + slice(0, 10, 2), + slice(None), + ) # all_full triggers optimization, optimization does something - assert (optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _always) == - (slice(None), slice(0, 10, 2))) + assert optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _always) == ( + slice(None), + slice(0, 10, 2), + ) # all_full disables optimization, optimization does something - assert (optimize_slicer(slice(0, 10, 2), 10, False, False, 4, _always) == - (slice(0, 10, 2), slice(None))) + assert optimize_slicer(slice(0, 10, 2), 10, False, False, 4, _always) == ( + slice(0, 10, 2), + slice(None), + ) # Non contiguous, reversed - assert (optimize_slicer(slice(10, None, -2), 10, False, False, 4, _never) == - (slice(1, 10, 2), slice(None, None, -1))) - assert (optimize_slicer(slice(10, None, -2), 10, True, False, 4, _always) == - (slice(None), slice(9, None, -2))) + assert optimize_slicer(slice(10, None, -2), 10, False, False, 4, _never) == ( + slice(1, 10, 2), + slice(None, None, -1), + ) + assert optimize_slicer(slice(10, None, -2), 10, True, False, 4, _always) == ( + slice(None), + slice(9, None, -2), + ) # Short non-contiguous - assert (optimize_slicer(slice(2, 8, 2), 10, False, False, 4, _never) == - (slice(2, 8, 2), slice(None))) + assert optimize_slicer(slice(2, 8, 2), 10, False, False, 4, _never) == ( + slice(2, 8, 2), + slice(None), + ) # with partial read - assert (optimize_slicer(slice(2, 8, 2), 10, True, False, 4, _partial) == - (slice(2, 8, 1), slice(None, None, 2))) + assert optimize_slicer(slice(2, 8, 2), 10, True, False, 4, _partial) == ( + slice(2, 8, 1), + slice(None, None, 2), + ) # If this is the slowest changing dimension, heuristic can upgrade None to # contiguous, but not (None, contiguous) to full # we've done this one already - assert (optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _always) == - (slice(None), slice(0, 10, 2))) + assert optimize_slicer(slice(0, 10, 2), 10, True, False, 4, _always) == ( + slice(None), + slice(0, 10, 2), + ) # if slowest, just upgrade to contiguous - assert (optimize_slicer(slice(0, 10, 2), 10, True, True, 4, _always) == - (slice(0, 10, 1), slice(None, None, 2))) + assert optimize_slicer(slice(0, 10, 2), 10, True, True, 4, _always) == ( + slice(0, 10, 1), + slice(None, None, 2), + ) # contiguous does not upgrade to full assert optimize_slicer(slice(9), 10, True, True, 4, _always) == (slice(0, 9, 1), slice(None)) # integer @@ -365,70 +411,109 @@ def test_optimize_slicer(): def test_optimize_read_slicers(): # Test function to optimize read slicers assert optimize_read_slicers((1,), (10,), 4, _never) == ((1,), ()) - assert (optimize_read_slicers((slice(None),), (10,), 4, _never) == - ((slice(None),), (slice(None),))) - assert (optimize_read_slicers((slice(9),), (10,), 4, _never) == - ((slice(0, 9, 1),), (slice(None),))) + assert optimize_read_slicers((slice(None),), (10,), 4, _never) == ( + (slice(None),), + (slice(None),), + ) + assert optimize_read_slicers((slice(9),), (10,), 4, _never) == ( + (slice(0, 9, 1),), + (slice(None),), + ) # optimize cannot update a continuous to a full if last - assert (optimize_read_slicers((slice(9),), (10,), 4, _always) == - ((slice(0, 9, 1),), (slice(None),))) + assert optimize_read_slicers((slice(9),), (10,), 4, _always) == ( + (slice(0, 9, 1),), + (slice(None),), + ) # optimize can update non-contiguous to continuous even if last # not optimizing - assert (optimize_read_slicers((slice(0, 9, 2),), (10,), 4, _never) == - ((slice(0, 9, 2),), (slice(None),))) + assert optimize_read_slicers((slice(0, 9, 2),), (10,), 4, _never) == ( + (slice(0, 9, 2),), + (slice(None),), + ) # optimizing - assert (optimize_read_slicers((slice(0, 9, 2),), (10,), 4, _always) == - ((slice(0, 9, 1),), (slice(None, None, 2),))) + assert optimize_read_slicers((slice(0, 9, 2),), (10,), 4, _always) == ( + (slice(0, 9, 1),), + (slice(None, None, 2),), + ) # Optimize does nothing for integer when last assert optimize_read_slicers((1,), (10,), 4, _always) == ((1,), ()) # 2D - assert (optimize_read_slicers((slice(None), slice(None)), (10, 6), 4, _never) == - ((slice(None), slice(None)), (slice(None), slice(None)))) - assert (optimize_read_slicers((slice(None), 1), (10, 6), 4, _never) == - ((slice(None), 1), (slice(None),))) - assert (optimize_read_slicers((1, slice(None)), (10, 6), 4, _never) == - ((1, slice(None)), (slice(None),))) + assert optimize_read_slicers((slice(None), slice(None)), (10, 6), 4, _never) == ( + (slice(None), slice(None)), + (slice(None), slice(None)), + ) + assert optimize_read_slicers((slice(None), 1), (10, 6), 4, _never) == ( + (slice(None), 1), + (slice(None),), + ) + assert optimize_read_slicers((1, slice(None)), (10, 6), 4, _never) == ( + (1, slice(None)), + (slice(None),), + ) # Not optimizing a partial slice - assert (optimize_read_slicers((slice(9), slice(None)), (10, 6), 4, _never) == - ((slice(0, 9, 1), slice(None)), (slice(None), slice(None)))) + assert optimize_read_slicers((slice(9), slice(None)), (10, 6), 4, _never) == ( + (slice(0, 9, 1), slice(None)), + (slice(None), slice(None)), + ) # Optimizing a partial slice - assert (optimize_read_slicers((slice(9), slice(None)), (10, 6), 4, _always) == - ((slice(None), slice(None)), (slice(0, 9, 1), slice(None)))) + assert optimize_read_slicers((slice(9), slice(None)), (10, 6), 4, _always) == ( + (slice(None), slice(None)), + (slice(0, 9, 1), slice(None)), + ) # Optimize cannot update a continuous to a full if last - assert (optimize_read_slicers((slice(None), slice(5)), (10, 6), 4, _always) == - ((slice(None), slice(0, 5, 1)), (slice(None), slice(None)))) + assert optimize_read_slicers((slice(None), slice(5)), (10, 6), 4, _always) == ( + (slice(None), slice(0, 5, 1)), + (slice(None), slice(None)), + ) # optimize can update non-contiguous to full if not last # not optimizing - assert (optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _never) == - ((slice(0, 9, 3), slice(None)), (slice(None), slice(None)))) + assert optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _never) == ( + (slice(0, 9, 3), slice(None)), + (slice(None), slice(None)), + ) # optimizing full - assert (optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _always) == - ((slice(None), slice(None)), (slice(0, 9, 3), slice(None)))) + assert optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _always) == ( + (slice(None), slice(None)), + (slice(0, 9, 3), slice(None)), + ) # optimizing partial - assert (optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _partial) == - ((slice(0, 9, 1), slice(None)), (slice(None, None, 3), slice(None)))) + assert optimize_read_slicers((slice(0, 9, 3), slice(None)), (10, 6), 4, _partial) == ( + (slice(0, 9, 1), slice(None)), + (slice(None, None, 3), slice(None)), + ) # optimize can update non-contiguous to continuous even if last # not optimizing - assert (optimize_read_slicers((slice(None), slice(0, 5, 2)), (10, 6), 4, _never) == - ((slice(None), slice(0, 5, 2)), (slice(None), slice(None)))) + assert optimize_read_slicers((slice(None), slice(0, 5, 2)), (10, 6), 4, _never) == ( + (slice(None), slice(0, 5, 2)), + (slice(None), slice(None)), + ) # optimizing - assert (optimize_read_slicers((slice(None), slice(0, 5, 2),), (10, 6), 4, _always) == - ((slice(None), slice(0, 5, 1)), (slice(None), slice(None, None, 2)))) + assert optimize_read_slicers( + ( + slice(None), + slice(0, 5, 2), + ), + (10, 6), + 4, + _always, + ) == ((slice(None), slice(0, 5, 1)), (slice(None), slice(None, None, 2))) # Optimize does nothing for integer when last - assert (optimize_read_slicers((slice(None), 1), (10, 6), 4, _always) == - ((slice(None), 1), (slice(None),))) + assert optimize_read_slicers((slice(None), 1), (10, 6), 4, _always) == ( + (slice(None), 1), + (slice(None),), + ) # Check gap threshold with 3D _depends0 = partial(threshold_heuristic, skip_thresh=10 * 4 - 1) _depends1 = partial(threshold_heuristic, skip_thresh=10 * 4) - assert (optimize_read_slicers( - (slice(9), slice(None), slice(None)), (10, 6, 2), 4, _depends0) == - ((slice(None), slice(None), slice(None)), (slice(0, 9, 1), slice(None), slice(None)))) - assert (optimize_read_slicers( - (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends0) == - ((slice(None), slice(0, 5, 1), slice(None)), (slice(None), slice(None), slice(None)))) - assert (optimize_read_slicers( - (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends1) == - ((slice(None), slice(None), slice(None)), (slice(None), slice(0, 5, 1), slice(None)))) + assert optimize_read_slicers( + (slice(9), slice(None), slice(None)), (10, 6, 2), 4, _depends0 + ) == ((slice(None), slice(None), slice(None)), (slice(0, 9, 1), slice(None), slice(None))) + assert optimize_read_slicers( + (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends0 + ) == ((slice(None), slice(0, 5, 1), slice(None)), (slice(None), slice(None), slice(None))) + assert optimize_read_slicers( + (slice(None), slice(5), slice(None)), (10, 6, 2), 4, _depends1 + ) == ((slice(None), slice(None), slice(None)), (slice(None), slice(0, 5, 1), slice(None))) # Check longs as integer slices sn = slice(None) assert optimize_read_slicers((1, 2, 3), (2, 3, 4), 4, _always) == ((sn, sn, 3), (1, 2)) @@ -440,94 +525,85 @@ def test_slicers2segments(): assert slicers2segments((0, 1), (10, 6), 7, 4) == [[7 + 10 * 4, 4]] assert slicers2segments((0, 1, 2), (10, 6, 4), 7, 4) == [[7 + 10 * 4 + 10 * 6 * 2 * 4, 4]] assert slicers2segments((slice(None),), (10,), 7, 4) == [[7, 10 * 4]] - assert (slicers2segments((0, slice(None)), (10, 6), 7, 4) == - [[7 + 10 * 4 * i, 4] for i in range(6)]) + assert slicers2segments((0, slice(None)), (10, 6), 7, 4) == [ + [7 + 10 * 4 * i, 4] for i in range(6) + ] assert slicers2segments((slice(None), 0), (10, 6), 7, 4) == [[7, 10 * 4]] assert slicers2segments((slice(None), slice(None)), (10, 6), 7, 4) == [[7, 10 * 6 * 4]] - assert (slicers2segments((slice(None), slice(None), 2), (10, 6, 4), 7, 4) == - [[7 + 10 * 6 * 2 * 4, 10 * 6 * 4]]) + assert slicers2segments((slice(None), slice(None), 2), (10, 6, 4), 7, 4) == [ + [7 + 10 * 6 * 2 * 4, 10 * 6 * 4] + ] def test_calc_slicedefs(): # Check get_segments routine. The tests aren't well organized because I # wrote them after the code. We live and (fail to) learn - segments, out_shape, new_slicing = calc_slicedefs( - (1,), (10,), 4, 7, 'F', _never) + segments, out_shape, new_slicing = calc_slicedefs((1,), (10,), 4, 7, 'F', _never) assert segments == [[11, 4]] assert new_slicing == () assert out_shape == () - assert ( - calc_slicedefs((slice(None),), (10,), 4, 7, 'F', _never) == - ([[7, 40]], - (10,), - (), - )) - assert ( - calc_slicedefs((slice(9),), (10,), 4, 7, 'F', _never) == - ([[7, 36]], - (9,), - (), - )) - assert ( - calc_slicedefs((slice(1, 9),), (10,), 4, 7, 'F', _never) == - ([[11, 32]], - (8,), - (), - )) + assert calc_slicedefs((slice(None),), (10,), 4, 7, 'F', _never) == ( + [[7, 40]], + (10,), + (), + ) + assert calc_slicedefs((slice(9),), (10,), 4, 7, 'F', _never) == ( + [[7, 36]], + (9,), + (), + ) + assert calc_slicedefs((slice(1, 9),), (10,), 4, 7, 'F', _never) == ( + [[11, 32]], + (8,), + (), + ) # Two dimensions, single slice - assert ( - calc_slicedefs((0,), (10, 6), 4, 7, 'F', _never) == - ([[7, 4], [47, 4], [87, 4], [127, 4], [167, 4], [207, 4]], - (6,), - (), - )) - assert ( - calc_slicedefs((0,), (10, 6), 4, 7, 'C', _never) == - ([[7, 6 * 4]], - (6,), - (), - )) + assert calc_slicedefs((0,), (10, 6), 4, 7, 'F', _never) == ( + [[7, 4], [47, 4], [87, 4], [127, 4], [167, 4], [207, 4]], + (6,), + (), + ) + assert calc_slicedefs((0,), (10, 6), 4, 7, 'C', _never) == ( + [[7, 6 * 4]], + (6,), + (), + ) # Two dimensions, contiguous not full - assert ( - calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'F', _never) == - ([[51, 4], [91, 4], [131, 4], [171, 4]], - (4,), - (), - )) - assert ( - calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'C', _never) == - ([[7 + 7 * 4, 16]], - (4,), - (), - )) + assert calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'F', _never) == ( + [[51, 4], [91, 4], [131, 4], [171, 4]], + (4,), + (), + ) + assert calc_slicedefs((1, slice(1, 5)), (10, 6), 4, 7, 'C', _never) == ( + [[7 + 7 * 4, 16]], + (4,), + (), + ) # With full slice first - assert ( - calc_slicedefs((slice(None), slice(1, 5)), (10, 6), 4, 7, 'F', _never) == - ([[47, 160]], - (10, 4), - (), - )) + assert calc_slicedefs((slice(None), slice(1, 5)), (10, 6), 4, 7, 'F', _never) == ( + [[47, 160]], + (10, 4), + (), + ) # Check effect of heuristic on calc_slicedefs # Even integer slices can generate full when heuristic says so - assert ( - calc_slicedefs((1, slice(None)), (10, 6), 4, 7, 'F', _always) == - ([[7, 10 * 6 * 4]], - (10, 6), - (1, slice(None)), - )) + assert calc_slicedefs((1, slice(None)), (10, 6), 4, 7, 'F', _always) == ( + [[7, 10 * 6 * 4]], + (10, 6), + (1, slice(None)), + ) # Except when last - assert ( - calc_slicedefs((slice(None), 1), (10, 6), 4, 7, 'F', _always) == - ([[7 + 10 * 4, 10 * 4]], - (10,), - (), - )) + assert calc_slicedefs((slice(None), 1), (10, 6), 4, 7, 'F', _always) == ( + [[7 + 10 * 4, 10 * 4]], + (10,), + (), + ) def test_predict_shape(): shapes = (15, 16, 17, 18) for n_dim in range(len(shapes)): - shape = shapes[:n_dim + 1] + shape = shapes[: n_dim + 1] arr = np.arange(np.prod(shape)).reshape(shape) slicers_list = [] for i in range(n_dim): @@ -548,8 +624,16 @@ def test_predict_shape(): def test_strided_scalar(): # Utility to make numpy array of given shape from scalar using striding for shape, scalar in product( - ((2,), (2, 3,), (2, 3, 4)), - (1, 2, np.int16(3))): + ( + (2,), + ( + 2, + 3, + ), + (2, 3, 4), + ), + (1, 2, np.int16(3)), + ): expected = np.zeros(shape, dtype=np.array(scalar).dtype) + scalar observed = strided_scalar(shape, scalar) assert_array_equal(observed, expected) @@ -563,6 +647,7 @@ def test_strided_scalar(): def setval(x): x[..., 0] = 99 + # RuntimeError for numpy < 1.10 with pytest.raises((RuntimeError, ValueError)): setval(observed) @@ -582,10 +667,8 @@ def test_read_segments(): fobj.write(arr.tobytes()) _check_bytes(read_segments(fobj, [(0, 200)], 200), arr) _check_bytes(read_segments(fobj, [(0, 100), (100, 100)], 200), arr) - _check_bytes(read_segments(fobj, [(0, 50), (100, 50)], 100), - np.r_[arr[:25], arr[50:75]]) - _check_bytes(read_segments(fobj, [(10, 40), (100, 50)], 90), - np.r_[arr[5:25], arr[50:75]]) + _check_bytes(read_segments(fobj, [(0, 50), (100, 50)], 100), np.r_[arr[:25], arr[50:75]]) + _check_bytes(read_segments(fobj, [(10, 40), (100, 50)], 90), np.r_[arr[5:25], arr[50:75]]) _check_bytes(read_segments(fobj, [], 0), arr[0:0]) # Error conditions with pytest.raises(ValueError): @@ -626,7 +709,7 @@ def random_segments(nsegs): # Get the data that should be returned for the given segments def get_expected(segs): - segs = [arr[off:off + length] for off, length in segs] + segs = [arr[off : off + length] for off, length in segs] return np.concatenate(segs) # Read from the file, check the result. We do this task simultaneously in @@ -658,8 +741,7 @@ def _check_slicer(sliceobj, arr, fobj, offset, order, heuristic=threshold_heuris def slicer_samples(shape): - """ Generator returns slice samples for given `shape` - """ + """Generator returns slice samples for given `shape`""" ndim = len(shape) slicers_list = [] for i in range(ndim): @@ -742,5 +824,6 @@ def test_fileslice_heuristic(): _check_slicer(sliceobj, arr, fobj, 0, order, heuristic) # Check _simple_fileslice while we're at it - si como no? new_slice = _simple_fileslice( - fobj, sliceobj, arr.shape, arr.dtype, 0, order, heuristic) + fobj, sliceobj, arr.shape, arr.dtype, 0, order, heuristic + ) assert_array_equal(arr[sliceobj], new_slice) diff --git a/nibabel/tests/test_fileutils.py b/nibabel/tests/test_fileutils.py index ffd7d91b6a..3544b88977 100644 --- a/nibabel/tests/test_fileutils.py +++ b/nibabel/tests/test_fileutils.py @@ -6,7 +6,7 @@ # copyright and license terms. # # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Testing fileutils module +"""Testing fileutils module """ diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index a08a24d102..62df671aca 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -1,14 +1,25 @@ -""" Test floating point deconstructions and floor methods +"""Test floating point deconstructions and floor methods """ import sys import numpy as np -from ..casting import (floor_exact, ceil_exact, as_int, FloatingError, - int_to_float, floor_log2, type_info, _check_nmant, - _check_maxexp, ok_floats, on_powerpc, have_binary128, - longdouble_precision_improved) +from ..casting import ( + floor_exact, + ceil_exact, + as_int, + FloatingError, + int_to_float, + floor_log2, + type_info, + _check_nmant, + _check_maxexp, + ok_floats, + on_powerpc, + have_binary128, + longdouble_precision_improved, +) from ..testing import suppress_warnings import pytest @@ -19,13 +30,17 @@ def dtt2dict(dtt): - """ Create info dictionary from numpy type - """ + """Create info dictionary from numpy type""" info = np.finfo(dtt) - return dict(min=info.min, max=info.max, - nexp=info.nexp, nmant=info.nmant, - minexp=info.minexp, maxexp=info.maxexp, - width=np.dtype(dtt).itemsize) + return dict( + min=info.min, + max=info.max, + nexp=info.nexp, + nmant=info.nmant, + minexp=info.minexp, + maxexp=info.maxexp, + width=np.dtype(dtt).itemsize, + ) def test_type_info(): @@ -33,10 +48,18 @@ def test_type_info(): for dtt in np.sctypes['int'] + np.sctypes['uint']: info = np.iinfo(dtt) infod = type_info(dtt) - assert dict(min=info.min, max=info.max, - nexp=None, nmant=None, - minexp=None, maxexp=None, - width=np.dtype(dtt).itemsize) == infod + assert ( + dict( + min=info.min, + max=info.max, + nexp=None, + nmant=None, + minexp=None, + maxexp=None, + width=np.dtype(dtt).itemsize, + ) + == infod + ) assert infod['min'].dtype.type == dtt assert infod['max'].dtype.type == dtt for dtt in IEEE_floats + [np.complex64, np.complex64]: @@ -51,10 +74,13 @@ def test_type_info(): vals = tuple(ld_dict[k] for k in ('nmant', 'nexp', 'width')) # Information for PPC head / tail doubles from: # https://developer.apple.com/library/mac/#documentation/Darwin/Reference/Manpages/man3/float.3.html - if vals in ((52, 11, 8), # longdouble is same as double - (63, 15, 12), (63, 15, 16), # intel 80 bit - (112, 15, 16), # real float128 - (106, 11, 16)): # PPC head, tail doubles, expected values + if vals in ( + (52, 11, 8), # longdouble is same as double + (63, 15, 12), + (63, 15, 16), # intel 80 bit + (112, 15, 16), # real float128 + (106, 11, 16), + ): # PPC head, tail doubles, expected values pass elif vals == (105, 11, 16): # bust info for PPC head / tail longdoubles # min and max broken, copy from infod @@ -67,7 +93,7 @@ def test_type_info(): ld_dict = dbl_dict.copy() ld_dict['width'] = width else: - raise ValueError(f"Unexpected float type {np.longdouble} to test") + raise ValueError(f'Unexpected float type {np.longdouble} to test') assert ld_dict == infod @@ -122,7 +148,7 @@ def test_as_int(): except FloatingError: nmant = 63 # Unknown precision, let's hope it's at least 63 v = np.longdouble(2) ** (nmant + 1) - 1 - assert as_int(v) == 2**(nmant + 1) - 1 + assert as_int(v) == 2 ** (nmant + 1) - 1 # Check for predictable overflow nexp64 = floor_log2(type_info(np.float64)['max']) with np.errstate(over='ignore'): @@ -145,7 +171,7 @@ def test_int_to_float(): # IEEEs in this case are binary formats only nexp = floor_log2(type_info(ie3)['max']) # Values too large for the format - smn, smx = -2**(nexp + 1), 2**(nexp + 1) + smn, smx = -(2 ** (nexp + 1)), 2 ** (nexp + 1) if ie3 is np.float64: with pytest.raises(OverflowError): int_to_float(smn, ie3) @@ -165,7 +191,7 @@ def test_int_to_float(): assert int_to_float(-i, LD) == LD(-i) # Above max of float64, we're hosed nexp64 = floor_log2(type_info(np.float64)['max']) - smn64, smx64 = -2**(nexp64 + 1), 2**(nexp64 + 1) + smn64, smx64 = -(2 ** (nexp64 + 1)), 2 ** (nexp64 + 1) # The algorithm here implemented goes through float64, so supermax and # supermin will cause overflow errors with pytest.raises(OverflowError): @@ -177,7 +203,7 @@ def test_int_to_float(): except FloatingError: # don't know where to test return # test we recover precision just above nmant - i = 2**(nmant + 1) - 1 + i = 2 ** (nmant + 1) - 1 assert as_int(int_to_float(i, LD)) == i assert as_int(int_to_float(-i, LD)) == -i # If longdouble can cope with 2**64, test @@ -200,7 +226,7 @@ def test_as_int_np_fix(): def test_floor_exact_16(): # A normal integer can generate an inf in float16 assert floor_exact(2**31, np.float16) == np.inf - assert floor_exact(-2**31, np.float16) == -np.inf + assert floor_exact(-(2**31), np.float16) == -np.inf def test_floor_exact_64(): @@ -212,8 +238,8 @@ def test_floor_exact_64(): assert len(gaps) == 1 gap = gaps.pop() assert gap == int(gap) - test_val = 2**(e + 1) - 1 - assert floor_exact(test_val, np.float64) == 2**(e + 1) - int(gap) + test_val = 2 ** (e + 1) - 1 + assert floor_exact(test_val, np.float64) == 2 ** (e + 1) - int(gap) def test_floor_exact(): @@ -235,8 +261,8 @@ def test_floor_exact(): assert floor_exact(2**5000, t) == np.inf assert ceil_exact(2**5000, t) == np.inf # A number more negative returns -inf - assert floor_exact(-2**5000, t) == -np.inf - assert ceil_exact(-2**5000, t) == -np.inf + assert floor_exact(-(2**5000), t) == -np.inf + assert ceil_exact(-(2**5000), t) == -np.inf # Check around end of integer precision nmant = info['nmant'] for i in range(nmant + 1): @@ -247,16 +273,14 @@ def test_floor_exact(): assert func(-iv, t) == -iv assert func(iv - 1, t) == iv - 1 assert func(-iv + 1, t) == -iv + 1 - if t is np.longdouble and ( - on_powerpc() or - longdouble_precision_improved()): + if t is np.longdouble and (on_powerpc() or longdouble_precision_improved()): # The nmant value for longdouble on PPC appears to be conservative, # so that the tests for behavior above the nmant range fail. # windows longdouble can change from float64 to Intel80 in some # situations, in which case nmant will not be correct continue # Confirm to ourselves that 2**(nmant+1) can't be exactly represented - iv = 2**(nmant + 1) + iv = 2 ** (nmant + 1) assert int_flex(iv + 1, t) == iv assert int_ceex(iv + 1, t) == iv + 2 # negatives @@ -265,8 +289,8 @@ def test_floor_exact(): # The gap in representable numbers is 2 above 2**(nmant+1), 4 above # 2**(nmant+2), and so on. for i in range(5): - iv = 2**(nmant + 1 + i) - gap = 2**(i + 1) + iv = 2 ** (nmant + 1 + i) + gap = 2 ** (i + 1) assert as_int(t(iv) + t(gap)) == iv + gap for j in range(1, gap): assert int_flex(iv + j, t) == iv @@ -286,6 +310,8 @@ def test_usable_binary128(): yes = have_binary128() with np.errstate(over='ignore'): exp_test = np.longdouble(2) ** 16383 - assert yes == (exp_test.dtype.itemsize == 16 and - np.isfinite(exp_test) and - _check_nmant(np.longdouble, 112)) + assert yes == ( + exp_test.dtype.itemsize == 16 + and np.isfinite(exp_test) + and _check_nmant(np.longdouble, 112) + ) diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index 44266f25fd..e1a7ec9264 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test for image funcs """ +"""Test for image funcs""" import numpy as np @@ -37,9 +37,7 @@ def test_concat(): concat_images([]) # Build combinations of 3D, 4D w/size[3] == 1, and 4D w/size[3] == 3 - all_shapes_5D = ((1, 4, 5, 3, 3), - (7, 3, 1, 4, 5), - (0, 2, 1, 4, 5)) + all_shapes_5D = ((1, 4, 5, 3, 3), (7, 3, 1, 4, 5), (0, 2, 1, 4, 5)) affine = np.eye(4) for dim in range(2, 6): @@ -61,7 +59,7 @@ def test_concat(): img2_mem = Nifti1Image(data1, affine + 1) # bad affine # Loop over every possible axis, including None (explicit and implied) - for axis in (list(range(-(dim - 2), (dim - 1))) + [None, '__default__']): + for axis in list(range(-(dim - 2), (dim - 1))) + [None, '__default__']: # Allow testing default vs. passing explicit param if axis == '__default__': @@ -83,12 +81,12 @@ def test_concat(): # 3D and the same size) fails, so we also # have to expect errors for those. if axis is None: # 3D from here and below - all_data = np.concatenate([data0[..., np.newaxis], - data1[..., np.newaxis]], - **np_concat_kwargs) + all_data = np.concatenate( + [data0[..., np.newaxis], data1[..., np.newaxis]], + **np_concat_kwargs, + ) else: # both 3D, appending on final axis - all_data = np.concatenate([data0, data1], - **np_concat_kwargs) + all_data = np.concatenate([data0, data1], **np_concat_kwargs) expect_error = False except ValueError: # Shapes are not combinable @@ -102,12 +100,13 @@ def test_concat(): imgs_mixed = [imgs[0], img_files[1], imgs[2]] for img0, img1, img2 in (imgs, img_files, imgs_mixed): try: - all_imgs = concat_images([img0, img1], - **concat_imgs_kwargs) + all_imgs = concat_images([img0, img1], **concat_imgs_kwargs) except ValueError as ve: assert expect_error, str(ve) else: - assert not expect_error, "Expected a concatenation error, but got none." + assert ( + not expect_error + ), 'Expected a concatenation error, but got none.' assert_array_equal(all_imgs.get_fdata(), all_data) assert_array_equal(all_imgs.affine, affine) @@ -121,7 +120,9 @@ def test_concat(): except ValueError as ve: assert expect_error, str(ve) else: - assert not expect_error, "Expected a concatenation error, but got none." + assert ( + not expect_error + ), 'Expected a concatenation error, but got none.' assert_array_equal(all_imgs.get_fdata(), all_data) assert_array_equal(all_imgs.affine, affine) @@ -176,7 +177,8 @@ def test_closest_canonical(): # an axis swap aff = np.diag([1, 0, 0, 1]) - aff[1, 2] = 1; aff[2, 1] = 1 + aff[1, 2] = 1 + aff[2, 1] = 1 img = Nifti1Image(arr, aff) img.header.set_dim_info(0, 1, 2) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index a12227a894..57a0322cab 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -1,4 +1,4 @@ -""" Validate image API +"""Validate image API What is the image API? @@ -32,13 +32,24 @@ import numpy as np from ..optpkg import optional_package + _, have_scipy, _ = optional_package('scipy') _, have_h5py, _ = optional_package('h5py') -from .. import (AnalyzeImage, Spm99AnalyzeImage, Spm2AnalyzeImage, - Nifti1Pair, Nifti1Image, Nifti2Pair, Nifti2Image, - GiftiImage, - MGHImage, Minc1Image, Minc2Image, is_proxy) +from .. import ( + AnalyzeImage, + Spm99AnalyzeImage, + Spm2AnalyzeImage, + Nifti1Pair, + Nifti1Image, + Nifti2Pair, + Nifti2Image, + GiftiImage, + MGHImage, + Minc1Image, + Minc2Image, + is_proxy, +) from ..spatialimages import SpatialImage from .. import minc1, minc2, parrec, brikhead from ..deprecator import ExpiredDeprecationError @@ -47,8 +58,14 @@ import pytest from numpy.testing import assert_almost_equal, assert_array_equal, assert_warns, assert_allclose -from nibabel.testing import (bytesio_round_trip, bytesio_filemap, assert_data_similar, - clear_and_catch_warnings, nullcontext, expires) +from nibabel.testing import ( + bytesio_round_trip, + bytesio_filemap, + assert_data_similar, + clear_and_catch_warnings, + nullcontext, + expires, +) from ..tmpdirs import InTemporaryDirectory from .test_api_validators import ValidateAPI @@ -65,7 +82,8 @@ def maybe_deprecated(meth_name): class GenericImageAPI(ValidateAPI): - """ General image validation API """ + """General image validation API""" + # Whether this image type can do scaling of data has_scaling = False # Whether the image can be saved to disk / file objects @@ -75,7 +93,7 @@ class GenericImageAPI(ValidateAPI): standard_extension = '.img' def obj_params(self): - """ Return generator returning (`img_creator`, `img_params`) tuples + """Return generator returning (`img_creator`, `img_params`) tuples ``img_creator`` is a function taking no arguments and returning a fresh image. We need to return this ``img_creator`` function rather than an @@ -150,18 +168,18 @@ def validate_filenames(self, imaker, params): # to_ / from_ filename fname = 'another_image' + self.standard_extension for path in (fname, pathlib.Path(fname)): - with InTemporaryDirectory(): - # Validate that saving or loading a file doesn't use deprecated methods internally - with clear_and_catch_warnings() as w: - warnings.filterwarnings('error', - category=DeprecationWarning, - module=r"nibabel.*") - img.to_filename(path) - rt_img = img.__class__.from_filename(path) - assert_array_equal(img.shape, rt_img.shape) - assert_almost_equal(img.get_fdata(), rt_img.get_fdata()) - assert_almost_equal(np.asanyarray(img.dataobj), np.asanyarray(rt_img.dataobj)) - del rt_img # to allow windows to delete the directory + with InTemporaryDirectory(): + # Validate that saving or loading a file doesn't use deprecated methods internally + with clear_and_catch_warnings() as w: + warnings.filterwarnings( + 'error', category=DeprecationWarning, module=r'nibabel.*' + ) + img.to_filename(path) + rt_img = img.__class__.from_filename(path) + assert_array_equal(img.shape, rt_img.shape) + assert_almost_equal(img.get_fdata(), rt_img.get_fdata()) + assert_almost_equal(np.asanyarray(img.dataobj), np.asanyarray(rt_img.dataobj)) + del rt_img # to allow windows to delete the directory def validate_no_slicing(self, imaker, params): img = imaker() @@ -170,7 +188,7 @@ def validate_no_slicing(self, imaker, params): with pytest.raises(TypeError): img[:] - @expires("5.0.0") + @expires('5.0.0') def validate_get_data_deprecated(self, imaker, params): img = imaker() with pytest.deprecated_call(): @@ -179,7 +197,7 @@ def validate_get_data_deprecated(self, imaker, params): class GetSetDtypeMixin: - """ Adds dtype tests + """Adds dtype tests Add this one if your image has ``get_data_dtype`` and ``set_data_dtype``. """ @@ -204,11 +222,12 @@ def validate_dtype(self, imaker, params): class DataInterfaceMixin(GetSetDtypeMixin): - """ Test dataobj interface for images with array backing + """Test dataobj interface for images with array backing Use this mixin if your image has a ``dataobj`` property that contains an array or an array-like thing. """ + meth_names = ('get_fdata',) def validate_data_interface(self, imaker, params): @@ -343,8 +362,7 @@ def _check_array_interface(self, imaker, meth_name): def _check_array_caching(self, imaker, meth_name, caching): img = imaker() method = getattr(img, meth_name) - get_data_func = (method if caching is None else - partial(method, caching=caching)) + get_data_func = method if caching is None else partial(method, caching=caching) assert isinstance(img.dataobj, np.ndarray) assert img.in_memory with maybe_deprecated(meth_name): @@ -445,7 +463,7 @@ def validate_mmap_parameter(self, imaker, params): class HeaderShapeMixin: - """ Tests that header shape can be set and got + """Tests that header shape can be set and got Add this one of your header supports ``get_data_shape`` and ``set_data_shape``. @@ -463,7 +481,7 @@ def validate_header_shape(self, imaker, params): class AffineMixin: - """ Adds test of affine property, method + """Adds test of affine property, method Add this one if your image has an ``affine`` property. """ @@ -498,17 +516,17 @@ def validate_file_stream_equivalence(self, imaker, params): fname = 'img' + self.standard_extension img.to_filename(fname) - with open("stream", "wb") as fobj: + with open('stream', 'wb') as fobj: img.to_stream(fobj) # Check that writing gets us the same thing contents1 = pathlib.Path(fname).read_bytes() - contents2 = pathlib.Path("stream").read_bytes() + contents2 = pathlib.Path('stream').read_bytes() assert contents1 == contents2 # Check that reading gets us the same thing img_a = klass.from_filename(fname) - with open(fname, "rb") as fobj: + with open(fname, 'rb') as fobj: img_b = klass.from_stream(fobj) # This needs to happen while the filehandle is open assert np.array_equal(img_a.get_fdata(), img_b.get_fdata()) @@ -548,9 +566,9 @@ def validate_from_url(self, imaker, params): img = imaker() img_bytes = img.to_bytes() - server.expect_oneshot_request("/img").respond_with_data(img_bytes) - url = server.url_for("/img") - assert url.startswith("http://") # Check we'll trigger an HTTP handler + server.expect_oneshot_request('/img').respond_with_data(img_bytes) + url = server.url_for('/img') + assert url.startswith('http://') # Check we'll trigger an HTTP handler rt_img = img.__class__.from_url(url) assert rt_img.to_bytes() == img_bytes @@ -564,20 +582,20 @@ def validate_from_file_url(self, imaker, params): img = imaker() import uuid + fname = tmp_path / f'img-{uuid.uuid4()}{self.standard_extension}' img.to_filename(fname) - rt_img = img.__class__.from_url(f"file:///{fname}") + rt_img = img.__class__.from_url(f'file:///{fname}') assert self._header_eq(img.header, rt_img.header) assert np.array_equal(img.get_fdata(), rt_img.get_fdata()) del img del rt_img - @staticmethod def _header_eq(header_a, header_b): - """ Header equality check that can be overridden by a subclass of this test + """Header equality check that can be overridden by a subclass of this test This allows us to retain the same tests above when testing an image that uses an abstract class as a header, namely when testing the FileBasedImage API, which @@ -586,11 +604,9 @@ def _header_eq(header_a, header_b): return header_a == header_b -class LoadImageAPI(GenericImageAPI, - DataInterfaceMixin, - AffineMixin, - GetSetDtypeMixin, - HeaderShapeMixin): +class LoadImageAPI( + GenericImageAPI, DataInterfaceMixin, AffineMixin, GetSetDtypeMixin, HeaderShapeMixin +): # Callable returning an image from a filename loader = None # Sequence of dictionaries, where dictionaries have keys @@ -613,8 +629,8 @@ def validate_path_maybe_image(self, imaker, params): class MakeImageAPI(LoadImageAPI): - """ Validation for images we can make with ``func(data, affine, header)`` - """ + """Validation for images we can make with ``func(data, affine, header)``""" + # A callable returning an image from ``image_maker(data, affine, header)`` image_maker = None # A callable returning a header from ``header_maker()`` @@ -635,7 +651,6 @@ def make_imaker(arr, aff, header=None): return lambda: self.image_maker(arr, aff, header) def make_prox_imaker(arr, aff, hdr): - def prox_imaker(): img = self.image_maker(arr, aff, hdr) rt_img = bytesio_round_trip(img) @@ -643,20 +658,14 @@ def prox_imaker(): return prox_imaker - for shape, stored_dtype in product(self.example_shapes, - self.storable_dtypes): + for shape, stored_dtype in product(self.example_shapes, self.storable_dtypes): # To make sure we do not trigger scaling, always use the # stored_dtype for the input array. arr = np.arange(np.prod(shape), dtype=stored_dtype).reshape(shape) hdr = self.header_maker() hdr.set_data_dtype(stored_dtype) func = make_imaker(arr.copy(), aff, hdr) - params = dict( - dtype=stored_dtype, - affine=aff, - data=arr, - shape=shape, - is_proxy=False) + params = dict(dtype=stored_dtype, affine=aff, data=arr, shape=shape, is_proxy=False) yield make_imaker(arr.copy(), aff, hdr), params if not self.can_save: continue @@ -667,7 +676,7 @@ def prox_imaker(): class DtypeOverrideMixin(GetSetDtypeMixin): - """ Test images that can accept ``dtype`` arguments to ``__init__`` and + """Test images that can accept ``dtype`` arguments to ``__init__`` and ``to_file_map`` """ @@ -707,8 +716,7 @@ def validate_to_file_dtype_override(self, imaker, params): class ImageHeaderAPI(MakeImageAPI): - """ When ``self.image_maker`` is an image class, make header from class - """ + """When ``self.image_maker`` is an image class, make header from class""" def header_maker(self): return self.image_maker.header_class() @@ -720,8 +728,8 @@ class TestSpatialImageAPI(ImageHeaderAPI): class TestAnalyzeAPI(TestSpatialImageAPI, DtypeOverrideMixin): - """ General image validation API instantiated for Analyze images - """ + """General image validation API instantiated for Analyze images""" + klass = image_maker = AnalyzeImage has_scaling = False can_save = True @@ -766,7 +774,6 @@ class TestMinc1API(ImageHeaderAPI): class TestMinc2API(TestMinc1API): - def setup_method(self): if not have_h5py: raise unittest.SkipTest('Need h5py for these tests') @@ -777,7 +784,6 @@ def setup_method(self): class TestPARRECAPI(LoadImageAPI): - def loader(self, fname): return parrec.load(fname) diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index c23d145a36..13c403285c 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for loader function """ +"""Tests for loader function""" from io import BytesIO import shutil @@ -22,9 +22,20 @@ from .. import spm2analyze as spm2 from .. import nifti1 as ni1 from .. import loadsave as nils -from .. import (Nifti1Image, Nifti1Header, Nifti1Pair, Nifti2Image, Nifti2Pair, - Minc1Image, Minc2Image, Spm2AnalyzeImage, Spm99AnalyzeImage, - AnalyzeImage, MGHImage, all_image_classes) +from .. import ( + Nifti1Image, + Nifti1Header, + Nifti1Pair, + Nifti2Image, + Nifti2Pair, + Minc1Image, + Minc2Image, + Spm2AnalyzeImage, + Spm99AnalyzeImage, + AnalyzeImage, + MGHImage, + all_image_classes, +) from ..tmpdirs import InTemporaryDirectory from ..volumeutils import native_code, swapped_code from ..optpkg import optional_package @@ -47,8 +58,9 @@ def round_trip(img): def test_conversion_spatialimages(caplog): shape = (2, 4, 6) affine = np.diag([1, 2, 3, 1]) - klasses = [klass for klass in all_image_classes - if klass.rw and issubclass(klass, SpatialImage)] + klasses = [ + klass for klass in all_image_classes if klass.rw and issubclass(klass, SpatialImage) + ] for npt in np.float32, np.int16: data = np.arange(np.prod(shape), dtype=npt).reshape(shape) for r_class in klasses: diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index 50142cfc92..fd9927eb00 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -6,19 +6,29 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for is_image / may_contain_header functions """ +"""Tests for is_image / may_contain_header functions""" import copy from os.path import dirname, basename, join as pjoin import numpy as np -from .. import (Nifti1Image, Nifti1Header, Nifti1Pair, - Nifti2Image, Nifti2Header, Nifti2Pair, - AnalyzeImage, AnalyzeHeader, - Minc1Image, Minc2Image, - Spm2AnalyzeImage, Spm99AnalyzeImage, - MGHImage, all_image_classes) +from .. import ( + Nifti1Image, + Nifti1Header, + Nifti1Pair, + Nifti2Image, + Nifti2Header, + Nifti2Pair, + AnalyzeImage, + AnalyzeHeader, + Minc1Image, + Minc2Image, + Spm2AnalyzeImage, + Spm99AnalyzeImage, + MGHImage, + all_image_classes, +) DATA_PATH = pjoin(dirname(__file__), 'data') @@ -35,13 +45,12 @@ def test_sniff_and_guessed_image_type(img_klasses=all_image_classes): # either work, or fail if we're doing bad stuff. # * When the file is a mismatch, the functions should not throw. def test_image_class(img_path, expected_img_klass): - """ Compare an image of one image class to all others. + """Compare an image of one image class to all others. The function should make sure that it loads the image with the expected class, but failing when given a bad sniff (when the sniff is used).""" - def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, - msg): + def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, msg): """Embedded function to do the actual checks expected.""" if sniff_mode == 'no_sniff': @@ -49,8 +58,7 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, is_img, new_sniff = img_klass.path_maybe_image(img_path) elif sniff_mode in ('empty', 'irrelevant', 'bad_sniff'): # Add img_path to binaryblock sniff parameters - is_img, new_sniff = img_klass.path_maybe_image( - img_path, (sniff, img_path)) + is_img, new_sniff = img_klass.path_maybe_image(img_path, (sniff, img_path)) else: # Pass a sniff, but don't reuse across images. is_img, new_sniff = img_klass.path_maybe_image(img_path, sniff) @@ -58,16 +66,16 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, if expect_success: # Check that the sniff returned is appropriate. new_msg = f'{img_klass.__name__} returned sniff==None ({msg})' - expected_sizeof_hdr = getattr(img_klass.header_class, - 'sizeof_hdr', 0) - current_sizeof_hdr = 0 if new_sniff is None else \ - len(new_sniff[0]) + expected_sizeof_hdr = getattr(img_klass.header_class, 'sizeof_hdr', 0) + current_sizeof_hdr = 0 if new_sniff is None else len(new_sniff[0]) assert current_sizeof_hdr >= expected_sizeof_hdr, new_msg # Check that the image type was recognized. - new_msg = (f"{basename(img_path)} ({msg}) image " - f"is{'' if is_img else ' not'} " - f"a {img_klass.__name__} image.") + new_msg = ( + f'{basename(img_path)} ({msg}) image ' + f"is{'' if is_img else ' not'} " + f'a {img_klass.__name__} image.' + ) assert is_img, new_msg if sniff_mode == 'vanilla': @@ -78,40 +86,45 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, sizeof_hdr = getattr(expected_img_klass.header_class, 'sizeof_hdr', 0) for sniff_mode, sniff in dict( - vanilla=None, # use the sniff of the previous item - no_sniff=None, # Don't pass a sniff - none=None, # pass None as the sniff, should query in fn - empty=b'', # pass an empty sniff, should query in fn - irrelevant=b'a' * (sizeof_hdr - 1), # A too-small sniff, query - bad_sniff=b'a' * sizeof_hdr, # Bad sniff, should fail + vanilla=None, # use the sniff of the previous item + no_sniff=None, # Don't pass a sniff + none=None, # pass None as the sniff, should query in fn + empty=b'', # pass an empty sniff, should query in fn + irrelevant=b'a' * (sizeof_hdr - 1), # A too-small sniff, query + bad_sniff=b'a' * sizeof_hdr, # Bad sniff, should fail ).items(): for klass in img_klasses: if klass == expected_img_klass: # Class will load unless you pass a bad sniff, # or the header ignores the sniff - expect_success = (sniff_mode != 'bad_sniff' or - sizeof_hdr == 0) + expect_success = sniff_mode != 'bad_sniff' or sizeof_hdr == 0 else: expect_success = False # Not sure the relationships # Reuse the sniff... but it will only change for some # sniff_mode values. - msg = (f'{expected_img_klass.__name__}/ {sniff_mode}/ ' - f'{expect_success}') - sniff = check_img(img_path, klass, sniff_mode=sniff_mode, - sniff=sniff, expect_success=expect_success, - msg=msg) + msg = f'{expected_img_klass.__name__}/ {sniff_mode}/ ' f'{expect_success}' + sniff = check_img( + img_path, + klass, + sniff_mode=sniff_mode, + sniff=sniff, + expect_success=expect_success, + msg=msg, + ) # Test whether we can guess the image type from example files - for img_filename, image_klass in [('example4d.nii.gz', Nifti1Image), - ('nifti1.hdr', Nifti1Pair), - ('example_nifti2.nii.gz', Nifti2Image), - ('nifti2.hdr', Nifti2Pair), - ('tiny.mnc', Minc1Image), - ('small.mnc', Minc2Image), - ('test.mgz', MGHImage), - ('analyze.hdr', Spm2AnalyzeImage)]: + for img_filename, image_klass in [ + ('example4d.nii.gz', Nifti1Image), + ('nifti1.hdr', Nifti1Pair), + ('example_nifti2.nii.gz', Nifti2Image), + ('nifti2.hdr', Nifti2Pair), + ('tiny.mnc', Minc1Image), + ('small.mnc', Minc2Image), + ('test.mgz', MGHImage), + ('analyze.hdr', Spm2AnalyzeImage), + ]: # print('Testing: %s %s' % (img_filename, image_klass.__name__)) test_image_class(pjoin(DATA_PATH, img_filename), image_klass) diff --git a/nibabel/tests/test_imageclasses.py b/nibabel/tests/test_imageclasses.py index 601414e012..472e1c5d63 100644 --- a/nibabel/tests/test_imageclasses.py +++ b/nibabel/tests/test_imageclasses.py @@ -1,4 +1,4 @@ -""" Testing imageclasses module +"""Testing imageclasses module """ from os.path import dirname, join as pjoin diff --git a/nibabel/tests/test_imageglobals.py b/nibabel/tests/test_imageglobals.py index 42cbe6fdce..ac043d192b 100644 --- a/nibabel/tests/test_imageglobals.py +++ b/nibabel/tests/test_imageglobals.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for imageglobals module +"""Tests for imageglobals module """ from .. import imageglobals as igs diff --git a/nibabel/tests/test_imagestats.py b/nibabel/tests/test_imagestats.py index e104013ddd..47dd2ecbd5 100644 --- a/nibabel/tests/test_imagestats.py +++ b/nibabel/tests/test_imagestats.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for image statistics """ +"""Tests for image statistics""" import numpy as np diff --git a/nibabel/tests/test_init.py b/nibabel/tests/test_init.py index 97f440497e..c227889e59 100644 --- a/nibabel/tests/test_init.py +++ b/nibabel/tests/test_init.py @@ -3,57 +3,56 @@ import pytest from unittest import mock -@pytest.mark.parametrize("verbose, v_args", [(-2, ["-qq"]), - (-1, ["-q"]), - (0, []), - (1, ["-v"]), - (2, ["-vv"])]) -@pytest.mark.parametrize("doctests", (True, False)) -@pytest.mark.parametrize("coverage", (True, False)) + +@pytest.mark.parametrize( + 'verbose, v_args', [(-2, ['-qq']), (-1, ['-q']), (0, []), (1, ['-v']), (2, ['-vv'])] +) +@pytest.mark.parametrize('doctests', (True, False)) +@pytest.mark.parametrize('coverage', (True, False)) def test_nibabel_test(verbose, v_args, doctests, coverage): - expected_args = v_args + ["--doctest-modules", "--cov", "nibabel", "--pyargs", "nibabel"] + expected_args = v_args + ['--doctest-modules', '--cov', 'nibabel', '--pyargs', 'nibabel'] if not doctests: - expected_args.remove("--doctest-modules") + expected_args.remove('--doctest-modules') if not coverage: expected_args[-4:-2] = [] - with mock.patch("pytest.main") as pytest_main: + with mock.patch('pytest.main') as pytest_main: nib.test(verbose=verbose, doctests=doctests, coverage=coverage) args, kwargs = pytest_main.call_args assert args == () - assert kwargs == {"args": expected_args} + assert kwargs == {'args': expected_args} def test_nibabel_test_errors(): with pytest.raises(NotImplementedError): - nib.test(label="fast") + nib.test(label='fast') with pytest.raises(NotImplementedError): nib.test(raise_warnings=[]) with pytest.raises(NotImplementedError): nib.test(timer=True) with pytest.raises(ValueError): - nib.test(verbose="-v") + nib.test(verbose='-v') def test_nibabel_bench(): - expected_args = ["-c", "--pyargs", "nibabel"] + expected_args = ['-c', '--pyargs', 'nibabel'] try: - expected_args.insert(1, resource_filename("nibabel", "benchmarks/pytest.benchmark.ini")) + expected_args.insert(1, resource_filename('nibabel', 'benchmarks/pytest.benchmark.ini')) except: - raise unittest.SkipTest("Not installed") + raise unittest.SkipTest('Not installed') - with mock.patch("pytest.main") as pytest_main: + with mock.patch('pytest.main') as pytest_main: nib.bench(verbose=0) args, kwargs = pytest_main.call_args assert args == () - assert kwargs == {"args": expected_args} + assert kwargs == {'args': expected_args} - with mock.patch("pytest.main") as pytest_main: + with mock.patch('pytest.main') as pytest_main: nib.bench(verbose=0, extra_argv=[]) args, kwargs = pytest_main.call_args assert args == () - assert kwargs == {"args": expected_args} + assert kwargs == {'args': expected_args} diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index 799952b57d..f8cc168cfd 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -1,4 +1,4 @@ -""" Testing loadsave module +"""Testing loadsave module """ from os.path import dirname, join as pjoin @@ -7,9 +7,14 @@ import numpy as np -from .. import (Spm99AnalyzeImage, Spm2AnalyzeImage, - Nifti1Pair, Nifti1Image, - Nifti2Pair, Nifti2Image) +from .. import ( + Spm99AnalyzeImage, + Spm2AnalyzeImage, + Nifti1Pair, + Nifti1Image, + Nifti2Pair, + Nifti2Image, +) from ..loadsave import load, read_img_data, _signature_matches_extension from ..filebasedimages import ImageFileError from ..tmpdirs import InTemporaryDirectory, TemporaryDirectory @@ -17,18 +22,18 @@ from ..testing import expires from ..optpkg import optional_package + _, have_scipy, _ = optional_package('scipy') _, have_pyzstd, _ = optional_package('pyzstd') -from numpy.testing import (assert_almost_equal, - assert_array_equal) +from numpy.testing import assert_almost_equal, assert_array_equal import pytest data_path = pjoin(dirname(__file__), 'data') -@expires("5.0.0") +@expires('5.0.0') def test_read_img_data(): fnames_test = [ 'example4d.nii.gz', @@ -36,7 +41,7 @@ def test_read_img_data(): 'minc1_1_scale.mnc', 'minc1_4d.mnc', 'test.mgz', - 'tiny.mnc' + 'tiny.mnc', ] fnames_test += [pathlib.Path(p) for p in fnames_test] for fname in fnames_test: @@ -78,51 +83,51 @@ def test_load_empty_image(): assert str(err.value).startswith('Empty file: ') -@pytest.mark.parametrize("extension", [".gz", ".bz2", ".zst"]) +@pytest.mark.parametrize('extension', ['.gz', '.bz2', '.zst']) def test_load_bad_compressed_extension(tmp_path, extension): - if extension == ".zst" and not have_pyzstd: + if extension == '.zst' and not have_pyzstd: pytest.skip() - file_path = tmp_path / f"img.nii{extension}" - file_path.write_bytes(b"bad") - with pytest.raises(ImageFileError, match=".*is not a .* file"): + file_path = tmp_path / f'img.nii{extension}' + file_path.write_bytes(b'bad') + with pytest.raises(ImageFileError, match='.*is not a .* file'): load(file_path) -@pytest.mark.parametrize("extension", [".gz", ".bz2", ".zst"]) +@pytest.mark.parametrize('extension', ['.gz', '.bz2', '.zst']) def test_load_good_extension_with_bad_data(tmp_path, extension): - if extension == ".zst" and not have_pyzstd: + if extension == '.zst' and not have_pyzstd: pytest.skip() - file_path = tmp_path / f"img.nii{extension}" - with Opener(file_path, "wb") as fobj: - fobj.write(b"bad") - with pytest.raises(ImageFileError, match="Cannot work out file type of .*"): + file_path = tmp_path / f'img.nii{extension}' + with Opener(file_path, 'wb') as fobj: + fobj.write(b'bad') + with pytest.raises(ImageFileError, match='Cannot work out file type of .*'): load(file_path) def test_signature_matches_extension(tmp_path): - gz_signature = b"\x1f\x8b" - good_file = tmp_path / "good.gz" + gz_signature = b'\x1f\x8b' + good_file = tmp_path / 'good.gz' good_file.write_bytes(gz_signature) - bad_file = tmp_path / "bad.gz" - bad_file.write_bytes(b"bad") - matches, msg = _signature_matches_extension(tmp_path / "uncompressed.nii") + bad_file = tmp_path / 'bad.gz' + bad_file.write_bytes(b'bad') + matches, msg = _signature_matches_extension(tmp_path / 'uncompressed.nii') assert matches - assert msg == "" - matches, msg = _signature_matches_extension(tmp_path / "missing.gz") + assert msg == '' + matches, msg = _signature_matches_extension(tmp_path / 'missing.gz') assert not matches - assert msg.startswith("Could not read") + assert msg.startswith('Could not read') matches, msg = _signature_matches_extension(bad_file) assert not matches - assert "is not a" in msg + assert 'is not a' in msg matches, msg = _signature_matches_extension(good_file) assert matches - assert msg == "" - matches, msg = _signature_matches_extension(tmp_path / "missing.nii") + assert msg == '' + matches, msg = _signature_matches_extension(tmp_path / 'missing.nii') assert matches - assert msg == "" + assert msg == '' -@expires("5.0.0") +@expires('5.0.0') def test_read_img_data_nifti(): shape = (2, 3, 4) data = np.random.normal(size=shape) @@ -152,8 +157,7 @@ def test_read_img_data_nifti(): with pytest.deprecated_call(): assert_array_equal(data_back, read_img_data(img_back)) # This is the same as if we loaded the image and header separately - hdr_fname = (img.file_map['header'].filename - if 'header' in img.file_map else img_fname) + hdr_fname = img.file_map['header'].filename if 'header' in img.file_map else img_fname with open(hdr_fname, 'rb') as fobj: hdr_back = img_back.header_class.from_fileobj(fobj) with open(img_fname, 'rb') as fobj: @@ -182,12 +186,10 @@ def test_read_img_data_nifti(): new_inter = 0 # scaled scaling comes from new parameters in header with pytest.deprecated_call(): - assert np.allclose(actual_unscaled * 2.1 + new_inter, - read_img_data(img_back)) + assert np.allclose(actual_unscaled * 2.1 + new_inter, read_img_data(img_back)) # Unscaled array didn't change with pytest.deprecated_call(): - assert_array_equal(actual_unscaled, - read_img_data(img_back, prefer='unscaled')) + assert_array_equal(actual_unscaled, read_img_data(img_back, prefer='unscaled')) # Check the offset too img.header.set_data_offset(1024) # Delete arrays still pointing to file, so Windows can re-use diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index 4fecf5782e..4556f76787 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -33,7 +33,7 @@ from . import test_spatialimages as tsi from .test_fileslice import slicer_samples -pyzstd, HAVE_ZSTD, _ = optional_package("pyzstd") +pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') EG_FNAME = pjoin(data_path, 'tiny.mnc') @@ -44,62 +44,42 @@ fname=pjoin(data_path, 'tiny.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms=(2., 2., 2.), + affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + zooms=(2.0, 2.0, 2.0), # These values from SPM2 - data_summary=dict( - min=0.20784314, - max=0.74901961, - mean=0.60602819), - is_proxy=True), + data_summary=dict(min=0.20784314, max=0.74901961, mean=0.60602819), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc1_1_scale.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms=(2., 2., 2.), + affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + zooms=(2.0, 2.0, 2.0), # These values from mincstats - data_summary=dict( - min=0.2082842439, - max=0.2094327615, - mean=0.2091292083), - is_proxy=True), + data_summary=dict(min=0.2082842439, max=0.2094327615, mean=0.2091292083), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc1_4d.mnc'), shape=(2, 10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms=(1., 2., 2., 2.), + affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + zooms=(1.0, 2.0, 2.0, 2.0), # These values from mincstats - data_summary=dict( - min=0.2078431373, - max=1.498039216, - mean=0.9090422837), - is_proxy=True), + data_summary=dict(min=0.2078431373, max=1.498039216, mean=0.9090422837), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc1-no-att.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 1.0, 0], - [0, 1.0, 0, 0], - [1.0, 0, 0, 0], - [0, 0, 0, 1]]), - zooms=(1., 1., 1.), + affine=np.array([[0, 0, 1.0, 0], [0, 1.0, 0, 0], [1.0, 0, 0, 0], [0, 0, 0, 1]]), + zooms=(1.0, 1.0, 1.0), # These values from SPM2/mincstats - data_summary=dict( - min=0.20784314, - max=0.74901961, - mean=0.6061103), - is_proxy=True), + data_summary=dict(min=0.20784314, max=0.74901961, mean=0.6061103), + is_proxy=True, + ), ] @@ -129,14 +109,15 @@ def test_mincfile_slicing(self): mnc_obj = self.opener(tp['fname'], 'r') mnc = self.file_class(mnc_obj) data = mnc.get_scaled_data() - for slicedef in ((slice(None),), - (1,), - (slice(None), 1), - (1, slice(None)), - (slice(None), 1, 1), - (1, slice(None), 1), - (1, 1, slice(None)), - ): + for slicedef in ( + (slice(None),), + (1,), + (slice(None), 1), + (1, slice(None)), + (slice(None), 1, 1), + (1, slice(None), 1), + (1, 1, slice(None)), + ): sliced_data = mnc.get_scaled_data(slicedef) assert_array_equal(sliced_data, data[slicedef]) # Can't close mmapped NetCDF with live mmap arrays @@ -167,14 +148,12 @@ def test_array_proxy_slicing(self): class TestMinc1File(_TestMincFile): - def test_compressed(self): # we can read minc compressed # Not so for MINC2; hence this small sub-class for tp in self.test_files: content = open(tp['fname'], 'rb').read() - openers_exts = [(gzip.open, '.gz'), - (bz2.BZ2File, '.bz2')] + openers_exts = [(gzip.open, '.gz'), (bz2.BZ2File, '.bz2')] if HAVE_ZSTD: # add .zst to test if installed openers_exts += [(pyzstd.ZstdFile, '.zst')] with InTemporaryDirectory(): diff --git a/nibabel/tests/test_minc2.py b/nibabel/tests/test_minc2.py index 1842ca02f9..3e220ef2d1 100644 --- a/nibabel/tests/test_minc2.py +++ b/nibabel/tests/test_minc2.py @@ -28,80 +28,63 @@ fname=pjoin(data_path, 'small.mnc'), shape=(18, 28, 29), dtype=np.int16, - affine=np.array([[0, 0, 7.0, -98], - [0, 8.0, 0, -134], - [9.0, 0, 0, -72], - [0, 0, 0, 1]]), - zooms=(9., 8., 7.), + affine=np.array([[0, 0, 7.0, -98], [0, 8.0, 0, -134], [9.0, 0, 0, -72], [0, 0, 0, 1]]), + zooms=(9.0, 8.0, 7.0), # These values from mincstats - data_summary=dict( - min=0.1185331417, - max=92.87690699, - mean=31.2127952), - is_proxy=True), + data_summary=dict(min=0.1185331417, max=92.87690699, mean=31.2127952), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc2_1_scale.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms=(2., 2., 2.), + affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + zooms=(2.0, 2.0, 2.0), # These values from mincstats - data_summary=dict( - min=0.2082842439, - max=0.2094327615, - mean=0.2091292083), - is_proxy=True), + data_summary=dict(min=0.2082842439, max=0.2094327615, mean=0.2091292083), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc2_4d.mnc'), shape=(2, 10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], - [0, 2.0, 0, -20], - [2.0, 0, 0, -10], - [0, 0, 0, 1]]), - zooms=(1., 2., 2., 2.), + affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + zooms=(1.0, 2.0, 2.0, 2.0), # These values from mincstats - data_summary=dict( - min=0.2078431373, - max=1.498039216, - mean=0.9090422837), - is_proxy=True), + data_summary=dict(min=0.2078431373, max=1.498039216, mean=0.9090422837), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc2-no-att.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 1.0, 0], - [0, 1.0, 0, 0], - [1.0, 0, 0, 0], - [0, 0, 0, 1]]), - zooms=(1., 1., 1.), + affine=np.array([[0, 0, 1.0, 0], [0, 1.0, 0, 0], [1.0, 0, 0, 0], [0, 0, 0, 1]]), + zooms=(1.0, 1.0, 1.0), # These values from SPM2/mincstats - data_summary=dict( - min=0.20784314, - max=0.74901961, - mean=0.6061103), - is_proxy=True), + data_summary=dict(min=0.20784314, max=0.74901961, mean=0.6061103), + is_proxy=True, + ), dict( fname=pjoin(data_path, 'minc2-4d-d.mnc'), shape=(5, 16, 16, 16), dtype=np.float64, - affine=np.array([[1., 0., 0., -6.96 ], - [0., 1., 0., -12.453], - [0., 0., 1., -9.48 ], - [0., 0., 0., 1.]]), - zooms=(1., 1., 1., 1.), + affine=np.array( + [ + [1.0, 0.0, 0.0, -6.96], + [0.0, 1.0, 0.0, -12.453], + [0.0, 0.0, 1.0, -9.48], + [0.0, 0.0, 0.0, 1.0], + ] + ), + zooms=(1.0, 1.0, 1.0, 1.0), # These values from mincstats - data_summary=dict( - min=0.0, - max=5.0, - mean=2.00078125), - is_proxy=True), + data_summary=dict(min=0.0, max=5.0, mean=2.00078125), + is_proxy=True, + ), ] if have_h5py: + class TestMinc2File(tm2._TestMincFile): module = minc2 file_class = Minc2File diff --git a/nibabel/tests/test_minc2_data.py b/nibabel/tests/test_minc2_data.py index fda6c1f8ec..03fb93cbea 100644 --- a/nibabel/tests/test_minc2_data.py +++ b/nibabel/tests/test_minc2_data.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test we can correctly import example MINC2_PATH files +"""Test we can correctly import example MINC2_PATH files """ import os @@ -18,7 +18,7 @@ from .. import load as top_load, Nifti1Image from ..optpkg import optional_package -from numpy.testing import (assert_array_equal, assert_almost_equal) +from numpy.testing import assert_array_equal, assert_almost_equal h5py, have_h5py, setup_module = optional_package('h5py') @@ -37,30 +37,28 @@ def _make_affine(coses, zooms, starts): class TestEPIFrame: opener = staticmethod(top_load) x_cos = [1, 0, 0] - y_cos = [0., 1, 0] + y_cos = [0.0, 1, 0] z_cos = [0, 0, 1] - zooms = [-0.8984375, -0.8984375, 3.] + zooms = [-0.8984375, -0.8984375, 3.0] starts = [117.25609125, 138.89861125, -54.442028] example_params = dict( fname=os.path.join(MINC2_PATH, 'mincex_EPI-frame.mnc'), shape=(40, 256, 256), type=np.int16, - affine=_make_affine((z_cos, y_cos, x_cos), - zooms[::-1], - starts[::-1]), + affine=_make_affine((z_cos, y_cos, x_cos), zooms[::-1], starts[::-1]), zooms=[abs(v) for v in zooms[::-1]], # These values from mincstats - min=0., + min=0.0, max=1273, - mean=93.52085367) + mean=93.52085367, + ) @needs_nibabel_data('nitest-minc2') def test_load(self): # Check highest level load of minc works img = self.opener(self.example_params['fname']) assert img.shape == self.example_params['shape'] - assert_almost_equal(img.header.get_zooms(), - self.example_params['zooms'], 5) + assert_almost_equal(img.header.get_zooms(), self.example_params['zooms'], 5) assert_almost_equal(img.affine, self.example_params['affine'], 4) assert img.get_data_dtype().type == self.example_params['type'] # Check correspondence of data and recorded shape @@ -77,8 +75,8 @@ def test_load(self): class TestB0(TestEPIFrame): - x_cos = [0.9970527523765, 0., 0.0767190261828617] - y_cos = [0., 1., -6.9388939e-18] + x_cos = [0.9970527523765, 0.0, 0.0767190261828617] + y_cos = [0.0, 1.0, -6.9388939e-18] z_cos = [-0.0767190261828617, 6.9184432614435e-18, 0.9970527523765] zooms = [-0.8984375, -0.8984375, 6.49999990444107] starts = [105.473101260826, 151.74885125, -61.8714747993248] @@ -86,14 +84,13 @@ class TestB0(TestEPIFrame): fname=os.path.join(MINC2_PATH, 'mincex_diff-B0.mnc'), shape=(19, 256, 256), type=np.int16, - affine=_make_affine((z_cos, y_cos, x_cos), - zooms[::-1], - starts[::-1]), + affine=_make_affine((z_cos, y_cos, x_cos), zooms[::-1], starts[::-1]), zooms=[abs(v) for v in zooms[::-1]], # These values from mincstats min=4.566971917, max=3260.121093, - mean=163.8305553) + mean=163.8305553, + ) class TestFA(TestEPIFrame): @@ -103,28 +100,28 @@ class TestFA(TestEPIFrame): # These values from mincstats min=0.008068881038, max=1.224754546, - mean=0.7520087469) + mean=0.7520087469, + ) example_params.update(new_params) class TestGado(TestEPIFrame): x_cos = [0.999695413509548, -0.0174524064372835, 0.0174497483512505] y_cos = [0.0174497483512505, 0.999847695156391, 0.000304586490452135] - z_cos = [-0.0174524064372835, 0., 0.999847695156391] + z_cos = [-0.0174524064372835, 0.0, 0.999847695156391] zooms = [1, -1, -1] starts = [-75.76775, 115.80462, 81.38605] example_params = dict( fname=os.path.join(MINC2_PATH, 'mincex_gado-contrast.mnc'), shape=(100, 170, 146), type=np.int16, - affine=_make_affine((z_cos, y_cos, x_cos), - zooms[::-1], - starts[::-1]), + affine=_make_affine((z_cos, y_cos, x_cos), zooms[::-1], starts[::-1]), zooms=[abs(v) for v in zooms[::-1]], # These values from mincstats min=0, max=938668.8698, - mean=128169.3488) + mean=128169.3488, + ) class TestT1(TestEPIFrame): @@ -137,14 +134,13 @@ class TestT1(TestEPIFrame): fname=os.path.join(MINC2_PATH, 'mincex_t1.mnc'), shape=(110, 217, 181), type=np.int16, - affine=_make_affine((z_cos, y_cos, x_cos), - zooms[::-1], - starts[::-1]), + affine=_make_affine((z_cos, y_cos, x_cos), zooms[::-1], starts[::-1]), zooms=[abs(v) for v in zooms[::-1]], # These values from mincstats min=0, max=100, - mean=23.1659928) + mean=23.1659928, + ) class TestPD(TestEPIFrame): @@ -154,7 +150,8 @@ class TestPD(TestEPIFrame): # These values from mincstats min=0, max=102.5024482, - mean=23.82625718) + mean=23.82625718, + ) example_params.update(new_params) @@ -166,5 +163,6 @@ class TestMask(TestEPIFrame): # These values from mincstats min=0, max=1, - mean=0.3817466618) + mean=0.3817466618, + ) example_params.update(new_params) diff --git a/nibabel/tests/test_mriutils.py b/nibabel/tests/test_mriutils.py index 8c6b198c95..082d053805 100644 --- a/nibabel/tests/test_mriutils.py +++ b/nibabel/tests/test_mriutils.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Testing mriutils module +"""Testing mriutils module """ @@ -20,8 +20,7 @@ def test_calculate_dwell_time(): # Test dwell time calculation # This tests only that the calculation does what it appears to; needs some # external check - assert_almost_equal(calculate_dwell_time(3.3, 2, 3), - 3.3 / (42.576 * 3.4 * 3 * 3)) + assert_almost_equal(calculate_dwell_time(3.3, 2, 3), 3.3 / (42.576 * 3.4 * 3 * 3)) # Echo train length of 1 is valid, but returns 0 dwell time assert_almost_equal(calculate_dwell_time(3.3, 1, 3), 0) with pytest.raises(MRIError): diff --git a/nibabel/tests/test_nibabel_data.py b/nibabel/tests/test_nibabel_data.py index 86e94f5c34..ec97108e35 100644 --- a/nibabel/tests/test_nibabel_data.py +++ b/nibabel/tests/test_nibabel_data.py @@ -1,4 +1,4 @@ -""" Tests for ``get_nibabel_data`` +"""Tests for ``get_nibabel_data`` """ import os diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 63cf13c103..0018dfe842 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for nifti reading package """ +"""Tests for nifti reading package""" import os import warnings import struct @@ -18,10 +18,19 @@ from nibabel.casting import type_info, have_binary128 from nibabel.eulerangles import euler2mat from io import BytesIO -from nibabel.nifti1 import (load, Nifti1Header, Nifti1PairHeader, Nifti1Image, - Nifti1Pair, Nifti1Extension, Nifti1DicomExtension, - Nifti1Extensions, data_type_codes, extension_codes, - slice_order_codes) +from nibabel.nifti1 import ( + load, + Nifti1Header, + Nifti1PairHeader, + Nifti1Image, + Nifti1Pair, + Nifti1Extension, + Nifti1DicomExtension, + Nifti1Extensions, + data_type_codes, + extension_codes, + slice_order_codes, +) from nibabel.spatialimages import HeaderDataError from nibabel.tmpdirs import InTemporaryDirectory from nibabel.optpkg import optional_package @@ -32,8 +41,7 @@ from .test_orientations import ALL_ORNTS from .nibabel_data import get_nibabel_data, needs_nibabel_data -from numpy.testing import (assert_array_equal, assert_array_almost_equal, - assert_almost_equal) +from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_almost_equal from ..testing import ( clear_and_catch_warnings, @@ -41,7 +49,7 @@ runif_extra_has, suppress_warnings, bytesio_filemap, - bytesio_round_trip + bytesio_round_trip, ) import unittest @@ -53,8 +61,8 @@ header_file = os.path.join(data_path, 'nifti1.hdr') image_file = os.path.join(data_path, 'example4d.nii.gz') -pydicom, have_dicom, _ = optional_package("pydicom") -dicom_test = unittest.skipUnless(have_dicom, "Could not import pydicom") +pydicom, have_dicom, _ = optional_package('pydicom') +dicom_test = unittest.skipUnless(have_dicom, 'Could not import pydicom') # Example transformation matrix @@ -70,17 +78,11 @@ class TestNifti1PairHeader(tana.TestAnalyzeHeader, tspm.HeaderScalingMixin): header_class = Nifti1PairHeader example_file = header_file quat_dtype = np.float32 - supported_np_types = tana.TestAnalyzeHeader.supported_np_types.union(( - np.int8, - np.uint16, - np.uint32, - np.int64, - np.uint64, - np.complex128)) + supported_np_types = tana.TestAnalyzeHeader.supported_np_types.union( + (np.int8, np.uint16, np.uint32, np.int64, np.uint64, np.complex128) + ) if have_binary128(): - supported_np_types = supported_np_types.union(( - np.longdouble, - np.longcomplex)) + supported_np_types = supported_np_types.union((np.longdouble, np.longcomplex)) tana.add_intp(supported_np_types) def test_empty(self): @@ -145,40 +147,41 @@ def test_slope_inter(self): HDE = HeaderDataError assert hdr.get_slope_inter() == (1.0, 0.0) for in_tup, exp_err, out_tup, raw_values in ( - # Null scalings - ((None, None), None, (None, None), (nan, nan)), - ((nan, None), None, (None, None), (nan, nan)), - ((None, nan), None, (None, None), (nan, nan)), - ((nan, nan), None, (None, None), (nan, nan)), - # Can only be one null - ((None, 0), HDE, (None, None), (nan, 0)), - ((nan, 0), HDE, (None, None), (nan, 0)), - ((1, None), HDE, (None, None), (1, nan)), - ((1, nan), HDE, (None, None), (1, nan)), - # Bad slope plus anything generates an error - ((0, 0), HDE, (None, None), (0, 0)), - ((0, None), HDE, (None, None), (0, nan)), - ((0, nan), HDE, (None, None), (0, nan)), - ((0, inf), HDE, (None, None), (0, inf)), - ((0, minf), HDE, (None, None), (0, minf)), - ((inf, 0), HDE, (None, None), (inf, 0)), - ((inf, None), HDE, (None, None), (inf, nan)), - ((inf, nan), HDE, (None, None), (inf, nan)), - ((inf, inf), HDE, (None, None), (inf, inf)), - ((inf, minf), HDE, (None, None), (inf, minf)), - ((minf, 0), HDE, (None, None), (minf, 0)), - ((minf, None), HDE, (None, None), (minf, nan)), - ((minf, nan), HDE, (None, None), (minf, nan)), - ((minf, inf), HDE, (None, None), (minf, inf)), - ((minf, minf), HDE, (None, None), (minf, minf)), - # Good slope and bad inter generates error for get_slope_inter - ((2, None), HDE, HDE, (2, nan)), - ((2, nan), HDE, HDE, (2, nan)), - ((2, inf), HDE, HDE, (2, inf)), - ((2, minf), HDE, HDE, (2, minf)), - # Good slope and inter - you guessed it - ((2, 0), None, (2, 0), (2, 0)), - ((2, 1), None, (2, 1), (2, 1))): + # Null scalings + ((None, None), None, (None, None), (nan, nan)), + ((nan, None), None, (None, None), (nan, nan)), + ((None, nan), None, (None, None), (nan, nan)), + ((nan, nan), None, (None, None), (nan, nan)), + # Can only be one null + ((None, 0), HDE, (None, None), (nan, 0)), + ((nan, 0), HDE, (None, None), (nan, 0)), + ((1, None), HDE, (None, None), (1, nan)), + ((1, nan), HDE, (None, None), (1, nan)), + # Bad slope plus anything generates an error + ((0, 0), HDE, (None, None), (0, 0)), + ((0, None), HDE, (None, None), (0, nan)), + ((0, nan), HDE, (None, None), (0, nan)), + ((0, inf), HDE, (None, None), (0, inf)), + ((0, minf), HDE, (None, None), (0, minf)), + ((inf, 0), HDE, (None, None), (inf, 0)), + ((inf, None), HDE, (None, None), (inf, nan)), + ((inf, nan), HDE, (None, None), (inf, nan)), + ((inf, inf), HDE, (None, None), (inf, inf)), + ((inf, minf), HDE, (None, None), (inf, minf)), + ((minf, 0), HDE, (None, None), (minf, 0)), + ((minf, None), HDE, (None, None), (minf, nan)), + ((minf, nan), HDE, (None, None), (minf, nan)), + ((minf, inf), HDE, (None, None), (minf, inf)), + ((minf, minf), HDE, (None, None), (minf, minf)), + # Good slope and bad inter generates error for get_slope_inter + ((2, None), HDE, HDE, (2, nan)), + ((2, nan), HDE, HDE, (2, nan)), + ((2, inf), HDE, HDE, (2, inf)), + ((2, minf), HDE, HDE, (2, minf)), + # Good slope and inter - you guessed it + ((2, 0), None, (2, 0), (2, 0)), + ((2, 1), None, (2, 1), (2, 1)), + ): hdr = self.header_class() if not exp_err is None: with pytest.raises(exp_err): @@ -250,15 +253,18 @@ def test_magic_offset_checks(self): hdr['magic'] = 'ooh' fhdr, message, raiser = self.log_chk(hdr, 45) assert fhdr['magic'] == b'ooh' - assert (message == - 'magic string "ooh" is not valid; ' - 'leaving as is, but future errors are likely') + assert ( + message == 'magic string "ooh" is not valid; ' + 'leaving as is, but future errors are likely' + ) # For pairs, any offset is OK, but should be divisible by 16 # Singles need offset of at least 352 (nifti1) or 540 (nifti2) bytes, # with the divide by 16 rule svo = hdr.single_vox_offset - for magic, ok, bad_spm in ((hdr.pair_magic, 32, 40), - (hdr.single_magic, svo + 32, svo + 40)): + for magic, ok, bad_spm in ( + (hdr.pair_magic, 32, 40), + (hdr.single_magic, svo + 32, svo + 40), + ): hdr['magic'] = magic hdr['vox_offset'] = 0 self.assert_no_log_err(hdr) @@ -267,18 +273,20 @@ def test_magic_offset_checks(self): hdr['vox_offset'] = bad_spm fhdr, message, raiser = self.log_chk(hdr, 30) assert fhdr['vox_offset'] == bad_spm - assert (message == - f'vox offset (={bad_spm:g}) not divisible by 16, ' - 'not SPM compatible; leaving at current value') + assert ( + message == f'vox offset (={bad_spm:g}) not divisible by 16, ' + 'not SPM compatible; leaving at current value' + ) # Check minimum offset (if offset set) hdr['magic'] = hdr.single_magic hdr['vox_offset'] = 10 fhdr, message, raiser = self.log_chk(hdr, 40) assert fhdr['vox_offset'] == hdr.single_vox_offset - assert (message == - 'vox offset 10 too low for single ' - 'file nifti1; setting to minimum value ' - 'of ' + str(hdr.single_vox_offset)) + assert ( + message == 'vox offset 10 too low for single ' + 'file nifti1; setting to minimum value ' + 'of ' + str(hdr.single_vox_offset) + ) def test_freesurfer_large_vector_hack(self): # For large vector images, Freesurfer appears to set dim[1] to -1 and @@ -360,14 +368,13 @@ def test_freesurfer_ico7_hack(self): pytest.raises(HeaderDataError, hdr.set_data_shape, (1, 1, 1, 163842)) # Test consistency of data in .mgh and mri_convert produced .nii nitest_path = os.path.join(get_nibabel_data(), 'nitest-freesurfer') - mgh = mghload(os.path.join(nitest_path, 'fsaverage', 'surf', - 'lh.orig.avg.area.mgh')) - nii = load(os.path.join(nitest_path, 'derivative', 'fsaverage', 'surf', - 'lh.orig.avg.area.nii')) + mgh = mghload(os.path.join(nitest_path, 'fsaverage', 'surf', 'lh.orig.avg.area.mgh')) + nii = load( + os.path.join(nitest_path, 'derivative', 'fsaverage', 'surf', 'lh.orig.avg.area.nii') + ) assert mgh.shape == nii.shape assert_array_equal(mgh.get_fdata(), nii.get_fdata()) - assert_array_equal(nii.header._structarr['dim'][1:4], - np.array([27307, 1, 6])) + assert_array_equal(nii.header._structarr['dim'][1:4], np.array([27307, 1, 6])) # Test writing produces consistent nii files with InTemporaryDirectory(): nii.to_filename('test.nii') @@ -393,8 +400,7 @@ def test_qform_sform(self): nasty_aff[0, 0] = 1 # Make full rank fixed_aff = unshear_44(nasty_aff) assert not np.allclose(fixed_aff, nasty_aff) - for in_meth, out_meth in ((hdr.set_qform, hdr.get_qform), - (hdr.set_sform, hdr.get_sform)): + for in_meth, out_meth in ((hdr.set_qform, hdr.get_qform), (hdr.set_sform, hdr.get_sform)): in_meth(nice_aff, 2) aff, code = out_meth(coded=True) assert_array_equal(aff, nice_aff) @@ -507,13 +513,14 @@ def test_sform(self): def test_dim_info(self): ehdr = self.header_class() assert ehdr.get_dim_info() == (None, None, None) - for info in ((0, 2, 1), - (None, None, None), - (0, 2, None), - (0, None, None), - (None, 2, 1), - (None, None, 1), - ): + for info in ( + (0, 2, 1), + (None, None, None), + (0, 2, None), + (0, None, None), + (None, 2, 1), + (None, None, 1), + ): ehdr.set_dim_info(*info) assert ehdr.get_dim_info() == info @@ -537,27 +544,28 @@ def test_slice_times(self): _print_me = lambda s: list(map(_stringer, s)) # The following examples are from the nifti1.h documentation. hdr['slice_code'] = slice_order_codes['sequential increasing'] - assert (_print_me(hdr.get_slice_times()) == - ['0.0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6']) + assert _print_me(hdr.get_slice_times()) == [ + '0.0', + '0.1', + '0.2', + '0.3', + '0.4', + '0.5', + '0.6', + ] hdr['slice_start'] = 1 hdr['slice_end'] = 5 - assert (_print_me(hdr.get_slice_times()) == - [None, '0.0', '0.1', '0.2', '0.3', '0.4', None]) + assert _print_me(hdr.get_slice_times()) == [None, '0.0', '0.1', '0.2', '0.3', '0.4', None] hdr['slice_code'] = slice_order_codes['sequential decreasing'] - assert (_print_me(hdr.get_slice_times()) == - [None, '0.4', '0.3', '0.2', '0.1', '0.0', None]) + assert _print_me(hdr.get_slice_times()) == [None, '0.4', '0.3', '0.2', '0.1', '0.0', None] hdr['slice_code'] = slice_order_codes['alternating increasing'] - assert (_print_me(hdr.get_slice_times()) == - [None, '0.0', '0.3', '0.1', '0.4', '0.2', None]) + assert _print_me(hdr.get_slice_times()) == [None, '0.0', '0.3', '0.1', '0.4', '0.2', None] hdr['slice_code'] = slice_order_codes['alternating decreasing'] - assert (_print_me(hdr.get_slice_times()) == - [None, '0.2', '0.4', '0.1', '0.3', '0.0', None]) + assert _print_me(hdr.get_slice_times()) == [None, '0.2', '0.4', '0.1', '0.3', '0.0', None] hdr['slice_code'] = slice_order_codes['alternating increasing 2'] - assert (_print_me(hdr.get_slice_times()) == - [None, '0.2', '0.0', '0.3', '0.1', '0.4', None]) + assert _print_me(hdr.get_slice_times()) == [None, '0.2', '0.0', '0.3', '0.1', '0.4', None] hdr['slice_code'] = slice_order_codes['alternating decreasing 2'] - assert (_print_me(hdr.get_slice_times()) == - [None, '0.4', '0.1', '0.3', '0.0', '0.2', None]) + assert _print_me(hdr.get_slice_times()) == [None, '0.4', '0.1', '0.3', '0.0', '0.2', None] # test set hdr = self.header_class() hdr.set_dim_info(slice=2) @@ -583,8 +591,7 @@ def test_slice_times(self): # can't get single slice duration hdr.set_slice_times(funny_times) hdr.set_slice_times(times) - assert (hdr.get_value_label('slice_code') == - 'alternating decreasing') + assert hdr.get_value_label('slice_code') == 'alternating decreasing' assert hdr['slice_start'] == 1 assert hdr['slice_end'] == 5 assert_array_almost_equal(hdr['slice_duration'], 0.1) @@ -605,7 +612,6 @@ def test_slice_times(self): assert len(w) == 1 assert hdr2.get_value_label('slice_code') == 'sequential increasing' - def test_intents(self): ehdr = self.header_class() ehdr.set_intent('t test', (10,), name='some score') @@ -626,8 +632,7 @@ def test_intents(self): ehdr.set_intent('f test', (10,)) # check unset parameters are set to 0, and name to '' ehdr.set_intent('t test') - assert ((ehdr['intent_p1'], ehdr['intent_p2'], ehdr['intent_p3']) == - (0, 0, 0)) + assert (ehdr['intent_p1'], ehdr['intent_p2'], ehdr['intent_p3']) == (0, 0, 0) assert ehdr['intent_name'] == b'' ehdr.set_intent('t test', (10,)) assert (ehdr['intent_p2'], ehdr['intent_p3']) == (0, 0) @@ -647,7 +652,7 @@ def test_intents(self): with pytest.raises(HeaderDataError): ehdr.set_intent(999, (1,), allow_unknown=True) with pytest.raises(HeaderDataError): - ehdr.set_intent(999, (1,2), allow_unknown=True) + ehdr.set_intent(999, (1, 2), allow_unknown=True) def test_set_slice_times(self): hdr = self.header_class() @@ -776,7 +781,7 @@ def test_int64_warning(self): img_klass(data, np.eye(4)) # No warnings if we're explicit, though with clear_and_catch_warnings(): - warnings.simplefilter("error") + warnings.simplefilter('error') img_klass(data, np.eye(4), dtype=dtype) hdr = hdr_klass() hdr.set_data_dtype(dtype) @@ -854,8 +859,7 @@ def test_header_update_affine(self): assert hdr['qform_code'] == 2 def test_set_qform(self): - img = self.image_class(np.zeros((2, 3, 4)), - np.diag([2.2, 3.3, 4.3, 1])) + img = self.image_class(np.zeros((2, 3, 4)), np.diag([2.2, 3.3, 4.3, 1])) hdr = img.header new_affine = np.diag([1.1, 1.1, 1.1, 1]) # Affine is same as sform (best affine) @@ -988,7 +992,6 @@ def test_sqform_code_type(self): img.set_sform(None, img.get_sform(coded=True)[1]) img.set_qform(None, img.get_qform(coded=True)[1]) - def test_hdr_diff(self): # Check an offset beyond data does not raise an error img = self.image_class(np.zeros((2, 3, 4)), np.eye(4)) @@ -1019,8 +1022,9 @@ def test_load_save(self): assert isinstance(img3, img.__class__) assert_array_equal(img3.get_fdata(), data) assert img3.header == img.header - assert isinstance(np.asanyarray(img3.dataobj), - np.memmap if ext == '' else np.ndarray) + assert isinstance( + np.asanyarray(img3.dataobj), np.memmap if ext == '' else np.ndarray + ) # del to avoid windows errors of form 'The process cannot # access the file because it is being used' del img3 @@ -1111,40 +1115,41 @@ def _set_raw_scaling(self, hdr, slope, inter): def test_write_scaling(self): # Check we can set slope, inter on write for slope, inter, e_slope, e_inter in ( - (1, 0, 1, 0), - (2, 0, 2, 0), - (2, 1, 2, 1), - (0, 0, 1, 0), - (np.inf, 0, 1, 0)): + (1, 0, 1, 0), + (2, 0, 2, 0), + (2, 1, 2, 1), + (0, 0, 1, 0), + (np.inf, 0, 1, 0), + ): with np.errstate(invalid='ignore'): self._check_write_scaling(slope, inter, e_slope, e_inter) def test_dynamic_dtype_aliases(self): for in_dt, mn, mx, alias, effective_dt in [ - (np.uint8, 0, 255, 'compat', np.uint8), - (np.int8, 0, 127, 'compat', np.uint8), - (np.int8, -128, 127, 'compat', np.int16), - (np.int16, -32768, 32767, 'compat', np.int16), - (np.uint16, 0, 32767, 'compat', np.int16), - (np.uint16, 0, 65535, 'compat', np.int32), - (np.int32, -2**31, 2**31-1, 'compat', np.int32), - (np.uint32, 0, 2**31-1, 'compat', np.int32), - (np.uint32, 0, 2**32-1, 'compat', None), - (np.int64, -2**31, 2**31-1, 'compat', np.int32), - (np.uint64, 0, 2**31-1, 'compat', np.int32), - (np.int64, 0, 2**32-1, 'compat', None), - (np.uint64, 0, 2**32-1, 'compat', None), - (np.float32, 0, 1e30, 'compat', np.float32), - (np.float64, 0, 1e30, 'compat', np.float32), - (np.float64, 0, 1e40, 'compat', None), - (np.int64, 0, 255, 'smallest', np.uint8), - (np.int64, 0, 256, 'smallest', np.int16), - (np.int64, -1, 255, 'smallest', np.int16), - (np.int64, 0, 32768, 'smallest', np.int32), - (np.int64, 0, 4294967296, 'smallest', None), - (np.float32, 0, 1, 'smallest', None), - (np.float64, 0, 1, 'smallest', None) - ]: + (np.uint8, 0, 255, 'compat', np.uint8), + (np.int8, 0, 127, 'compat', np.uint8), + (np.int8, -128, 127, 'compat', np.int16), + (np.int16, -32768, 32767, 'compat', np.int16), + (np.uint16, 0, 32767, 'compat', np.int16), + (np.uint16, 0, 65535, 'compat', np.int32), + (np.int32, -(2**31), 2**31 - 1, 'compat', np.int32), + (np.uint32, 0, 2**31 - 1, 'compat', np.int32), + (np.uint32, 0, 2**32 - 1, 'compat', None), + (np.int64, -(2**31), 2**31 - 1, 'compat', np.int32), + (np.uint64, 0, 2**31 - 1, 'compat', np.int32), + (np.int64, 0, 2**32 - 1, 'compat', None), + (np.uint64, 0, 2**32 - 1, 'compat', None), + (np.float32, 0, 1e30, 'compat', np.float32), + (np.float64, 0, 1e30, 'compat', np.float32), + (np.float64, 0, 1e40, 'compat', None), + (np.int64, 0, 255, 'smallest', np.uint8), + (np.int64, 0, 256, 'smallest', np.int16), + (np.int64, -1, 255, 'smallest', np.int16), + (np.int64, 0, 32768, 'smallest', np.int32), + (np.int64, 0, 4294967296, 'smallest', None), + (np.float32, 0, 1, 'smallest', None), + (np.float64, 0, 1, 'smallest', None), + ]: arr = np.arange(24, dtype=in_dt).reshape((2, 3, 4)) arr[0, 0, :2] = [mn, mx] img = self.image_class(arr, np.eye(4), dtype=alias) @@ -1167,8 +1172,8 @@ def test_dynamic_dtype_aliases(self): def test_static_dtype_aliases(self): for alias, effective_dt in [ - ("mask", np.uint8), - ]: + ('mask', np.uint8), + ]: for orig_dt in ('u1', 'i8', 'f4'): arr = np.arange(24, dtype=orig_dt).reshape((2, 3, 4)) img = self.image_class(arr, np.eye(4), dtype=alias) @@ -1320,7 +1325,6 @@ def test_nifti_dicom_extension(): assert dcmext.get_content().__class__ == pydicom.dataset.Dataset assert len(dcmext.get_content().values()) == 0 - # use a dataset if provided ds = pydicom.dataset.Dataset() ds.add_new((0x10, 0x20), 'LO', 'NiPy') @@ -1330,9 +1334,9 @@ def test_nifti_dicom_extension(): assert dcmext.get_content().PatientID == 'NiPy' # create a single dicom tag (Patient ID, [0010,0020]) with Explicit VR / LE - dcmbytes_explicit = struct.pack('2H2sH4s', 0x10, 0x20, - 'LO'.encode('utf-8'), 4, - 'NiPy'.encode('utf-8')) + dcmbytes_explicit_be = struct.pack( + '>2H2sH4s', 0x10, 0x20, 'LO'.encode('utf-8'), 4, 'NiPy'.encode('utf-8') + ) hdr_be = Nifti1Header(endianness='>') # Big Endian Nifti1Header dcmext = Nifti1DicomExtension(2, dcmbytes_explicit_be, parent_hdr=hdr_be) assert dcmext.__class__ == Nifti1DicomExtension @@ -1388,11 +1391,12 @@ def test_nifti_dicom_extension(): class TestNifti1General: - """ Test class to test nifti1 in general + """Test class to test nifti1 in general Tests here which mix the pair and the single type, and that should only be run once (not for each type) because they are slow """ + single_class = Nifti1Image pair_class = Nifti1Pair module = nifti1 @@ -1431,7 +1435,7 @@ def test_loadsave_cycle(self): lnim = bytesio_round_trip(wnim) assert lnim.get_data_dtype() == np.int16 # Scaling applied - assert_array_equal(lnim.get_fdata(), data * 2. + 8.) + assert_array_equal(lnim.get_fdata(), data * 2.0 + 8.0) # slope, inter reset by image creation, but saved in proxy assert lnim.header.get_slope_inter() == (None, None) assert (lnim.dataobj.slope, lnim.dataobj.inter) == (2, 8) @@ -1471,7 +1475,7 @@ def test_float_int_spread(self): # Test rounding error for spread of values # Parallel test to arraywriters powers = np.arange(-10, 10, 0.5) - arr = np.concatenate((-10**powers, 10**powers)) + arr = np.concatenate((-(10**powers), 10**powers)) aff = np.eye(4) for in_dt in (np.float32, np.float64): arr_t = arr.astype(in_dt) @@ -1481,8 +1485,7 @@ def test_float_int_spread(self): arr_back_sc = img_back.get_fdata() slope, inter = img_back.header.get_slope_inter() # Get estimate for error - max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, - inter) + max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, inter) # Simulate allclose test with large atol diff = np.abs(arr_t - arr_back_sc) rdiff = diff / np.abs(arr_t) @@ -1505,8 +1508,7 @@ def test_rt_bias(self): slope, inter = img_back.header.get_slope_inter() bias = np.mean(arr_t - arr_back_sc) # Get estimate for error - max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, - inter) + max_miss = rt_err_estimate(arr_t, arr_back_sc.dtype, slope, inter) # Hokey use of max_miss as a std estimate bias_thresh = np.max([max_miss / np.sqrt(count), eps]) assert np.abs(bias) < bias_thresh @@ -1517,16 +1519,17 @@ def test_reoriented_dim_info(self): # Start as RAS aff = np.diag([2, 3, 4, 1]) simg = self.single_class(arr, aff) - for freq, phas, slic in ((0, 1, 2), - (0, 2, 1), - (1, 0, 2), - (2, 0, 1), - (None, None, None), - (0, 2, None), - (0, None, None), - (None, 2, 1), - (None, None, 1), - ): + for freq, phas, slic in ( + (0, 1, 2), + (0, 2, 1), + (1, 0, 2), + (2, 0, 1), + (None, None, None), + (0, 2, None), + (0, None, None), + (None, 2, 1), + (None, None, 1), + ): simg.header.set_dim_info(freq, phas, slic) fdir = 'RAS'[freq] if freq is not None else None pdir = 'RAS'[phas] if phas is not None else None @@ -1545,8 +1548,7 @@ def test_reoriented_dim_info(self): @runif_extra_has('slow') def test_large_nifti1(): image_shape = (91, 109, 91, 1200) - img = Nifti1Image(np.ones(image_shape, dtype=np.float32), - affine=np.eye(4)) + img = Nifti1Image(np.ones(image_shape, dtype=np.float32), affine=np.eye(4)) # Dump and load the large image. with InTemporaryDirectory(): img.to_filename('test.nii.gz') @@ -1554,5 +1556,5 @@ def test_large_nifti1(): data = load('test.nii.gz').get_fdata() # Check that the data are all ones assert image_shape == data.shape - n_ones = np.sum((data == 1.)) + n_ones = np.sum((data == 1.0)) assert np.prod(image_shape) == n_ones diff --git a/nibabel/tests/test_nifti2.py b/nibabel/tests/test_nifti2.py index 106e3ec787..57a97a1322 100644 --- a/nibabel/tests/test_nifti2.py +++ b/nibabel/tests/test_nifti2.py @@ -6,15 +6,14 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for nifti2 reading package """ +"""Tests for nifti2 reading package""" import os import numpy as np from .. import nifti2 -from ..nifti1 import (Nifti1Header, Nifti1PairHeader, Nifti1Extension, - Nifti1Extensions) -from ..nifti2 import (Nifti2Header, Nifti2PairHeader, Nifti2Image, Nifti2Pair) +from ..nifti1 import Nifti1Header, Nifti1PairHeader, Nifti1Extension, Nifti1Extensions +from ..nifti2 import Nifti2Header, Nifti2PairHeader, Nifti2Image, Nifti2Pair from . import test_nifti1 as tn1 @@ -52,10 +51,11 @@ def test_eol_check(self): hdr['eol_check'] = (13, 10, 0, 10) fhdr, message, raiser = self.log_chk(hdr, 40) assert_array_equal(fhdr['eol_check'], good_eol) - assert (message == - 'EOL check not 0 or 13, 10, 26, 10; ' - 'data may be corrupted by EOL conversion; ' - 'setting EOL check to 13, 10, 26, 10') + assert ( + message == 'EOL check not 0 or 13, 10, 26, 10; ' + 'data may be corrupted by EOL conversion; ' + 'setting EOL check to 13, 10, 26, 10' + ) class TestNifti2PairHeader(_Nifti2Mixin, tn1.TestNifti1PairHeader): @@ -79,11 +79,12 @@ class TestNifti2Pair(tn1.TestNifti1Pair): class TestNifti2General(tn1.TestNifti1General): - """ Test class to test nifti2 in general + """Test class to test nifti2 in general Tests here which mix the pair and the single type, and that should only be run once (not for each type) because they are slow """ + single_class = Nifti2Image pair_class = Nifti2Pair module = nifti2 @@ -95,12 +96,14 @@ def test_nifti12_conversion(): dtype_type = np.int64 ext1 = Nifti1Extension(6, b'My comment') ext2 = Nifti1Extension(6, b'Fresh comment') - for in_type, out_type in ((Nifti1Header, Nifti2Header), - (Nifti1PairHeader, Nifti2Header), - (Nifti1PairHeader, Nifti2PairHeader), - (Nifti2Header, Nifti1Header), - (Nifti2PairHeader, Nifti1Header), - (Nifti2PairHeader, Nifti1PairHeader)): + for in_type, out_type in ( + (Nifti1Header, Nifti2Header), + (Nifti1PairHeader, Nifti2Header), + (Nifti1PairHeader, Nifti2PairHeader), + (Nifti2Header, Nifti1Header), + (Nifti2PairHeader, Nifti1Header), + (Nifti2PairHeader, Nifti1PairHeader), + ): in_hdr = in_type() in_hdr.set_data_shape(shape) in_hdr.set_data_dtype(dtype_type) diff --git a/nibabel/tests/test_onetime.py b/nibabel/tests/test_onetime.py index c1609980a3..2659b7fbbc 100644 --- a/nibabel/tests/test_onetime.py +++ b/nibabel/tests/test_onetime.py @@ -6,6 +6,7 @@ @expires('5.0.0') def test_setattr_on_read(): with pytest.deprecated_call(): + class MagicProp: @setattr_on_read def a(self): diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index b25dc2db6d..2a306079f4 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test for openers module """ +"""Test for openers module""" import os import contextlib from gzip import GzipFile @@ -16,12 +16,13 @@ import time from numpy.compat.py3k import asstr, asbytes -from ..openers import (Opener, - ImageOpener, - HAVE_INDEXED_GZIP, - BZ2File, - DeterministicGzipFile, - ) +from ..openers import ( + Opener, + ImageOpener, + HAVE_INDEXED_GZIP, + BZ2File, + DeterministicGzipFile, +) from ..tmpdirs import InTemporaryDirectory from ..optpkg import optional_package @@ -30,7 +31,7 @@ import pytest from ..deprecator import ExpiredDeprecationError -pyzstd, HAVE_ZSTD, _ = optional_package("pyzstd") +pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') class Lunk: @@ -74,16 +75,13 @@ def test_Opener(): def test_Opener_various(): # Check we can do all sorts of files here - message = b"Oh what a giveaway" + message = b'Oh what a giveaway' bz2_fileno = hasattr(BZ2File, 'fileno') if HAVE_INDEXED_GZIP: import indexed_gzip as igzip with InTemporaryDirectory(): sobj = BytesIO() - files_to_test = ['test.txt', - 'test.txt.gz', - 'test.txt.bz2', - sobj] + files_to_test = ['test.txt', 'test.txt.gz', 'test.txt.bz2', sobj] if HAVE_ZSTD: files_to_test += ['test.txt.zst'] for input in files_to_test: @@ -104,8 +102,11 @@ def test_Opener_various(): fobj.fileno() # indexed gzip is used by default, and drops file # handles by default, so we don't have a fileno. - elif input.endswith('gz') and HAVE_INDEXED_GZIP and \ - Version(igzip.__version__) >= Version('0.7.0'): + elif ( + input.endswith('gz') + and HAVE_INDEXED_GZIP + and Version(igzip.__version__) >= Version('0.7.0') + ): with pytest.raises(igzip.NoHandleError): fobj.fileno() else: @@ -127,9 +128,9 @@ def patch_indexed_gzip(state): values = (True, MockIndexedGzipFile) else: values = (False, GzipFile) - with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', values[0]), \ - mock.patch('nibabel.openers.IndexedGzipFile', values[1], - create=True): + with mock.patch('nibabel.openers.HAVE_INDEXED_GZIP', values[0]), mock.patch( + 'nibabel.openers.IndexedGzipFile', values[1], create=True + ): yield @@ -148,14 +149,14 @@ def test_Opener_gzip_type(): # Each test is specified by a tuple containing: # (indexed_gzip present, Opener kwargs, expected file type) tests = [ - (False, {'mode' : 'rb', 'keep_open' : True}, GzipFile), - (False, {'mode' : 'rb', 'keep_open' : False}, GzipFile), - (False, {'mode' : 'wb', 'keep_open' : True}, GzipFile), - (False, {'mode' : 'wb', 'keep_open' : False}, GzipFile), - (True, {'mode' : 'rb', 'keep_open' : True}, MockIndexedGzipFile), - (True, {'mode' : 'rb', 'keep_open' : False}, MockIndexedGzipFile), - (True, {'mode' : 'wb', 'keep_open' : True}, GzipFile), - (True, {'mode' : 'wb', 'keep_open' : False}, GzipFile), + (False, {'mode': 'rb', 'keep_open': True}, GzipFile), + (False, {'mode': 'rb', 'keep_open': False}, GzipFile), + (False, {'mode': 'wb', 'keep_open': True}, GzipFile), + (False, {'mode': 'wb', 'keep_open': False}, GzipFile), + (True, {'mode': 'rb', 'keep_open': True}, MockIndexedGzipFile), + (True, {'mode': 'rb', 'keep_open': False}, MockIndexedGzipFile), + (True, {'mode': 'wb', 'keep_open': True}, GzipFile), + (True, {'mode': 'wb', 'keep_open': False}, GzipFile), ] for test in tests: @@ -195,7 +196,7 @@ def file_opener(fileish, mode): def test_file_like_wrapper(): # Test wrapper using BytesIO (full API) - message = b"History of the nude in" + message = b'History of the nude in' sobj = BytesIO() fobj = Opener(sobj) assert fobj.tell() == 0 @@ -221,6 +222,7 @@ def test_compressionlevel(): class MyOpener(Opener): default_compresslevel = 5 + with InTemporaryDirectory(): for ext in ('gz', 'bz2', 'GZ', 'gZ', 'BZ2', 'Bz2'): for opener, default_val in ((Opener, 1), (MyOpener, 5)): @@ -245,6 +247,7 @@ def test_compressed_ext_case(): class StrictOpener(Opener): compress_ext_icase = False + exts = ('gz', 'bz2', 'GZ', 'gZ', 'BZ2', 'Bz2') if HAVE_ZSTD: exts += ('zst', 'ZST', 'Zst') @@ -283,11 +286,7 @@ def test_name(): sobj = BytesIO() lunk = Lunk('in ART') with InTemporaryDirectory(): - files_to_test = ['test.txt', - 'test.txt.gz', - 'test.txt.bz2', - sobj, - lunk] + files_to_test = ['test.txt', 'test.txt.gz', 'test.txt.bz2', sobj, lunk] if HAVE_ZSTD: files_to_test += ['test.txt.zst'] for input in files_to_test: @@ -307,6 +306,7 @@ def test_set_extensions(): class MyOpener(Opener): compress_ext_map = Opener.compress_ext_map.copy() compress_ext_map['.glrph'] = Opener.gz_def + with MyOpener('test.glrph', 'w') as fobj: assert hasattr(fobj.fobj, 'compress') @@ -316,11 +316,7 @@ def test_close_if_mine(): with InTemporaryDirectory(): sobj = BytesIO() lunk = Lunk('') - for input in ('test.txt', - 'test.txt.gz', - 'test.txt.bz2', - sobj, - lunk): + for input in ('test.txt', 'test.txt.gz', 'test.txt.bz2', sobj, lunk): fobj = Opener(input, 'wb') # gzip objects have no 'closed' attribute has_closed = hasattr(fobj.fobj, 'closed') @@ -334,18 +330,21 @@ def test_close_if_mine(): def test_iter(): # Check we can iterate over lines, if the underlying file object allows it - lines = \ - """On the + lines = """On the blue ridged mountains of virginia -""".split('\n') +""".split( + '\n' + ) with InTemporaryDirectory(): sobj = BytesIO() - files_to_test = [('test.txt', True), - ('test.txt.gz', False), - ('test.txt.bz2', False), - (sobj, True)] + files_to_test = [ + ('test.txt', True), + ('test.txt.gz', False), + ('test.txt.bz2', False), + (sobj, True), + ] if HAVE_ZSTD: files_to_test += [('test.txt.zst', False)] for input, does_t in files_to_test: @@ -366,7 +365,7 @@ def test_iter(): def md5sum(fname): - with open(fname, "rb") as fobj: + with open(fname, 'rb') as fobj: return hashlib.md5(fobj.read()).hexdigest() @@ -375,82 +374,82 @@ def test_DeterministicGzipFile(): msg = b"Hello, I'd like to have an argument." # No filename, no mtime - with open("ref.gz", "wb") as fobj: - with GzipFile(filename="", mode="wb", fileobj=fobj, mtime=0) as gzobj: + with open('ref.gz', 'wb') as fobj: + with GzipFile(filename='', mode='wb', fileobj=fobj, mtime=0) as gzobj: gzobj.write(msg) - anon_chksum = md5sum("ref.gz") + anon_chksum = md5sum('ref.gz') - with DeterministicGzipFile("default.gz", "wb") as fobj: + with DeterministicGzipFile('default.gz', 'wb') as fobj: internal_fobj = fobj.myfileobj fobj.write(msg) # Check that myfileobj is being closed by GzipFile.close() # This is in case GzipFile changes its internal implementation assert internal_fobj.closed - assert md5sum("default.gz") == anon_chksum + assert md5sum('default.gz') == anon_chksum # No filename, current mtime now = time.time() - with open("ref.gz", "wb") as fobj: - with GzipFile(filename="", mode="wb", fileobj=fobj, mtime=now) as gzobj: + with open('ref.gz', 'wb') as fobj: + with GzipFile(filename='', mode='wb', fileobj=fobj, mtime=now) as gzobj: gzobj.write(msg) - now_chksum = md5sum("ref.gz") + now_chksum = md5sum('ref.gz') - with DeterministicGzipFile("now.gz", "wb", mtime=now) as fobj: + with DeterministicGzipFile('now.gz', 'wb', mtime=now) as fobj: fobj.write(msg) - assert md5sum("now.gz") == now_chksum + assert md5sum('now.gz') == now_chksum # Change in default behavior - with mock.patch("time.time") as t: + with mock.patch('time.time') as t: t.return_value = now # GzipFile will use time.time() - with open("ref.gz", "wb") as fobj: - with GzipFile(filename="", mode="wb", fileobj=fobj) as gzobj: + with open('ref.gz', 'wb') as fobj: + with GzipFile(filename='', mode='wb', fileobj=fobj) as gzobj: gzobj.write(msg) - assert md5sum("ref.gz") == now_chksum + assert md5sum('ref.gz') == now_chksum # DeterministicGzipFile will use 0 - with DeterministicGzipFile("now.gz", "wb") as fobj: + with DeterministicGzipFile('now.gz', 'wb') as fobj: fobj.write(msg) - assert md5sum("now.gz") == anon_chksum + assert md5sum('now.gz') == anon_chksum # GzipFile is filename dependent, DeterministicGzipFile is independent - with GzipFile("filenameA.gz", mode="wb", mtime=0) as gzobj: + with GzipFile('filenameA.gz', mode='wb', mtime=0) as gzobj: gzobj.write(msg) - fnameA_chksum = md5sum("filenameA.gz") + fnameA_chksum = md5sum('filenameA.gz') assert fnameA_chksum != anon_chksum - with DeterministicGzipFile("filenameA.gz", "wb") as fobj: + with DeterministicGzipFile('filenameA.gz', 'wb') as fobj: fobj.write(msg) # But the contents are the same with different filenames - assert md5sum("filenameA.gz") == anon_chksum + assert md5sum('filenameA.gz') == anon_chksum def test_DeterministicGzipFile_fileobj(): with InTemporaryDirectory(): msg = b"Hello, I'd like to have an argument." - with open("ref.gz", "wb") as fobj: - with GzipFile(filename="", mode="wb", fileobj=fobj, mtime=0) as gzobj: + with open('ref.gz', 'wb') as fobj: + with GzipFile(filename='', mode='wb', fileobj=fobj, mtime=0) as gzobj: gzobj.write(msg) - ref_chksum = md5sum("ref.gz") + ref_chksum = md5sum('ref.gz') - with open("test.gz", "wb") as fobj: - with DeterministicGzipFile(filename="", mode="wb", fileobj=fobj) as gzobj: + with open('test.gz', 'wb') as fobj: + with DeterministicGzipFile(filename='', mode='wb', fileobj=fobj) as gzobj: gzobj.write(msg) - md5sum("test.gz") == ref_chksum + md5sum('test.gz') == ref_chksum - with open("test.gz", "wb") as fobj: - with DeterministicGzipFile(fileobj=fobj, mode="wb") as gzobj: + with open('test.gz', 'wb') as fobj: + with DeterministicGzipFile(fileobj=fobj, mode='wb') as gzobj: gzobj.write(msg) - md5sum("test.gz") == ref_chksum + md5sum('test.gz') == ref_chksum - with open("test.gz", "wb") as fobj: - with DeterministicGzipFile(filename="test.gz", mode="wb", fileobj=fobj) as gzobj: + with open('test.gz', 'wb') as fobj: + with DeterministicGzipFile(filename='test.gz', mode='wb', fileobj=fobj) as gzobj: gzobj.write(msg) - md5sum("test.gz") == ref_chksum + md5sum('test.gz') == ref_chksum def test_bitwise_determinism(): @@ -458,31 +457,29 @@ def test_bitwise_determinism(): msg = b"Hello, I'd like to have an argument." # Canonical reference: No filename, no mtime # Use default compresslevel - with open("ref.gz", "wb") as fobj: - with GzipFile(filename="", mode="wb", - compresslevel=1, fileobj=fobj, - mtime=0) as gzobj: + with open('ref.gz', 'wb') as fobj: + with GzipFile(filename='', mode='wb', compresslevel=1, fileobj=fobj, mtime=0) as gzobj: gzobj.write(msg) - anon_chksum = md5sum("ref.gz") + anon_chksum = md5sum('ref.gz') # Different times, different filenames now = time.time() - with mock.patch("time.time") as t: + with mock.patch('time.time') as t: t.return_value = now - with Opener("a.gz", "wb") as fobj: + with Opener('a.gz', 'wb') as fobj: fobj.write(msg) t.return_value = now + 1 - with Opener("b.gz", "wb") as fobj: + with Opener('b.gz', 'wb') as fobj: fobj.write(msg) - assert md5sum("a.gz") == anon_chksum - assert md5sum("b.gz") == anon_chksum + assert md5sum('a.gz') == anon_chksum + assert md5sum('b.gz') == anon_chksum # Users can still set mtime, but filenames will not be embedded - with Opener("filenameA.gz", "wb", mtime=0xCAFE10C0) as fobj: + with Opener('filenameA.gz', 'wb', mtime=0xCAFE10C0) as fobj: fobj.write(msg) - with Opener("filenameB.gz", "wb", mtime=0xCAFE10C0) as fobj: + with Opener('filenameB.gz', 'wb', mtime=0xCAFE10C0) as fobj: fobj.write(msg) - fnameA_chksum = md5sum("filenameA.gz") - fnameB_chksum = md5sum("filenameB.gz") + fnameA_chksum = md5sum('filenameA.gz') + fnameB_chksum = md5sum('filenameB.gz') assert fnameA_chksum == fnameB_chksum != anon_chksum diff --git a/nibabel/tests/test_optpkg.py b/nibabel/tests/test_optpkg.py index 72430aea37..875c32bbdf 100644 --- a/nibabel/tests/test_optpkg.py +++ b/nibabel/tests/test_optpkg.py @@ -1,4 +1,4 @@ -""" Testing optpkg module +"""Testing optpkg module """ from unittest import mock @@ -41,12 +41,15 @@ def test_basic(): # Only disrupt imports for "nottriedbefore" package orig_import = builtins.__import__ + def raise_Exception(*args, **kwargs): if args[0] == 'nottriedbefore': raise Exception( - "non ImportError could be thrown by some malfunctioning module " - "upon import, and optional_package should catch it too") + 'non ImportError could be thrown by some malfunctioning module ' + 'upon import, and optional_package should catch it too' + ) return orig_import(*args, **kwargs) + with mock.patch.object(builtins, '__import__', side_effect=raise_Exception): assert_bad('nottriedbefore') diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 0b3b8081d0..5d786c0eac 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Testing for orientations module """ +"""Testing for orientations module""" import numpy as np import warnings @@ -15,68 +15,54 @@ from numpy.testing import assert_array_equal -from ..orientations import (io_orientation, ornt_transform, inv_ornt_aff, - flip_axis, apply_orientation, OrientationError, - ornt2axcodes, axcodes2ornt, aff2axcodes) +from ..orientations import ( + io_orientation, + ornt_transform, + inv_ornt_aff, + flip_axis, + apply_orientation, + OrientationError, + ornt2axcodes, + axcodes2ornt, + aff2axcodes, +) from ..affines import from_matvec, to_matvec from ..testing import expires -IN_ARRS = [np.eye(4), - [[0, 0, 1, 0], - [0, 1, 0, 0], - [1, 0, 0, 0], - [0, 0, 0, 1]], - [[0, 1, 0, 0], - [0, 0, 1, 0], - [1, 0, 0, 0], - [0, 0, 0, 1]], - [[3, 1, 0, 0], - [1, 3, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]], - [[1, 3, 0, 0], - [3, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]], - ] - -OUT_ORNTS = [[[0, 1], - [1, 1], - [2, 1]], - [[2, 1], - [1, 1], - [0, 1]], - [[2, 1], - [0, 1], - [1, 1]], - [[0, 1], - [1, 1], - [2, 1]], - [[1, 1], - [0, 1], - [2, 1]], - ] - -IN_ARRS = IN_ARRS + [[[np.cos(np.pi / 6 + i * np.pi / 2), np.sin(np.pi / 6 + i * np.pi / 2), 0, 0], - [-np.sin(np.pi / 6 + i * np.pi / 2), np.cos(np.pi / 6 + i * np.pi / 2), 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]] for i in range(4)] - -OUT_ORNTS = OUT_ORNTS + [[[0, 1], - [1, 1], - [2, 1]], - [[1, -1], - [0, 1], - [2, 1]], - [[0, -1], - [1, -1], - [2, 1]], - [[1, 1], - [0, -1], - [2, 1]] - ] +IN_ARRS = [ + np.eye(4), + [[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]], + [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]], + [[3, 1, 0, 0], [1, 3, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], + [[1, 3, 0, 0], [3, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], +] + +OUT_ORNTS = [ + [[0, 1], [1, 1], [2, 1]], + [[2, 1], [1, 1], [0, 1]], + [[2, 1], [0, 1], [1, 1]], + [[0, 1], [1, 1], [2, 1]], + [[1, 1], [0, 1], [2, 1]], +] + +IN_ARRS = IN_ARRS + [ + [ + [np.cos(np.pi / 6 + i * np.pi / 2), np.sin(np.pi / 6 + i * np.pi / 2), 0, 0], + [-np.sin(np.pi / 6 + i * np.pi / 2), np.cos(np.pi / 6 + i * np.pi / 2), 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ] + for i in range(4) +] + +OUT_ORNTS = OUT_ORNTS + [ + [[0, 1], [1, 1], [2, 1]], + [[1, -1], [0, 1], [2, 1]], + [[0, -1], [1, -1], [2, 1]], + [[1, 1], [0, -1], [2, 1]], +] IN_ARRS = [np.array(arr) for arr in IN_ARRS] @@ -84,15 +70,27 @@ _LABELS = ['RL', 'AP', 'SI'] -ALL_AXCODES = [(_LABELS[i0][j0], _LABELS[i1][j1], _LABELS[i2][j2]) - for i0 in range(3) for i1 in range(3) for i2 in range(3) - if i0 != i1 != i2 != i0 - for j0 in range(2) for j1 in range(2) for j2 in range(2)] - -ALL_ORNTS = [[[i0, j0], [i1, j1], [i2, j2]] - for i0 in range(3) for i1 in range(3) for i2 in range(3) - if i0 != i1 != i2 != i0 - for j0 in [1, -1] for j1 in [1, -1] for j2 in [1, -1]] +ALL_AXCODES = [ + (_LABELS[i0][j0], _LABELS[i1][j1], _LABELS[i2][j2]) + for i0 in range(3) + for i1 in range(3) + for i2 in range(3) + if i0 != i1 != i2 != i0 + for j0 in range(2) + for j1 in range(2) + for j2 in range(2) +] + +ALL_ORNTS = [ + [[i0, j0], [i1, j1], [i2, j2]] + for i0 in range(3) + for i1 in range(3) + for i2 in range(3) + if i0 != i1 != i2 != i0 + for j0 in [1, -1] + for j1 in [1, -1] + for j2 in [1, -1] +] def same_transform(taff, ornt, shape): @@ -162,32 +160,23 @@ def test_io_orientation(): rzs = np.c_[np.diag([2, 3, 4, 5]), np.zeros((4, 3))] arr = from_matvec(rzs, [15, 16, 17, 18]) ornt = io_orientation(arr) - assert_array_equal(ornt, [[0, 1], - [1, 1], - [2, 1], - [3, 1], - [np.nan, np.nan], - [np.nan, np.nan], - [np.nan, np.nan]]) + assert_array_equal( + ornt, + [[0, 1], [1, 1], [2, 1], [3, 1], [np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], + ) # Test behavior of thresholding - def_aff = np.array([[1., 1, 0, 0], - [0, 0, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]]) - fail_tol = np.array([[0, 1], - [np.nan, np.nan], - [2, 1]]) - pass_tol = np.array([[0, 1], - [1, 1], - [2, 1]]) + def_aff = np.array([[1.0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) + fail_tol = np.array([[0, 1], [np.nan, np.nan], [2, 1]]) + pass_tol = np.array([[0, 1], [1, 1], [2, 1]]) eps = np.finfo(float).eps # Test that a Y axis appears as we increase the difference between the # first two columns - for y_val, has_y in ((0, False), - (eps, False), - (eps * 5, False), - (eps * 10, True), - ): + for y_val, has_y in ( + (0, False), + (eps, False), + (eps * 5, False), + (eps * 10, True), + ): def_aff[1, 1] = y_val res = pass_tol if has_y else fail_tol assert_array_equal(io_orientation(def_aff), res) @@ -202,68 +191,50 @@ def test_io_orientation(): aff_extra_col[-1, -1] = 1 # Not strictly necessary, but for completeness aff_extra_col[:3, :3] = mat aff_extra_col[:3, -1] = vec - assert_array_equal(io_orientation(aff_extra_col, tol=1e-5), - [[0, 1], - [np.nan, np.nan], - [2, 1], - [np.nan, np.nan]]) + assert_array_equal( + io_orientation(aff_extra_col, tol=1e-5), + [[0, 1], [np.nan, np.nan], [2, 1], [np.nan, np.nan]], + ) aff_extra_row = np.zeros((5, 4)) aff_extra_row[-1, -1] = 1 # Not strictly necessary, but for completeness aff_extra_row[:3, :3] = mat aff_extra_row[:3, -1] = vec - assert_array_equal(io_orientation(aff_extra_row, tol=1e-5), - [[0, 1], - [np.nan, np.nan], - [2, 1]]) + assert_array_equal(io_orientation(aff_extra_row, tol=1e-5), [[0, 1], [np.nan, np.nan], [2, 1]]) def test_ornt_transform(): - assert_array_equal(ornt_transform([[0, 1], [1, 1], [2, -1]], - [[1, 1], [0, 1], [2, 1]]), - [[1, 1], [0, 1], [2, -1]] - ) - assert_array_equal(ornt_transform([[0, 1], [1, 1], [2, 1]], - [[2, 1], [0, -1], [1, 1]]), - [[1, -1], [2, 1], [0, 1]] - ) + assert_array_equal( + ornt_transform([[0, 1], [1, 1], [2, -1]], [[1, 1], [0, 1], [2, 1]]), + [[1, 1], [0, 1], [2, -1]], + ) + assert_array_equal( + ornt_transform([[0, 1], [1, 1], [2, 1]], [[2, 1], [0, -1], [1, 1]]), + [[1, -1], [2, 1], [0, 1]], + ) # Must have same shape with pytest.raises(ValueError): ornt_transform([[0, 1], [1, 1]], [[0, 1], [1, 1], [2, 1]]) # Must be (N,2) in shape with pytest.raises(ValueError): - ornt_transform([[0, 1, 1], [1, 1, 1]], - [[0, 1, 1], [1, 1, 1]]) + ornt_transform([[0, 1, 1], [1, 1, 1]], [[0, 1, 1], [1, 1, 1]]) # Target axes must exist in source with pytest.raises(ValueError): - ornt_transform([[0, 1], [1, 1], [1, 1]], - [[0, 1], [1, 1], [2, 1]]) + ornt_transform([[0, 1], [1, 1], [1, 1]], [[0, 1], [1, 1], [2, 1]]) def test_ornt2axcodes(): # Recoding orientation to axis codes labels = (('left', 'right'), ('back', 'front'), ('down', 'up')) - assert ornt2axcodes([[0, 1], - [1, 1], - [2, 1]], labels) == ('right', 'front', 'up') - assert ornt2axcodes([[0, -1], - [1, -1], - [2, -1]], labels) == ('left', 'back', 'down') - assert ornt2axcodes([[2, -1], - [1, -1], - [0, -1]], labels) == ('down', 'back', 'left') - assert ornt2axcodes([[1, 1], - [2, -1], - [0, 1]], labels) == ('front', 'down', 'right') + assert ornt2axcodes([[0, 1], [1, 1], [2, 1]], labels) == ('right', 'front', 'up') + assert ornt2axcodes([[0, -1], [1, -1], [2, -1]], labels) == ('left', 'back', 'down') + assert ornt2axcodes([[2, -1], [1, -1], [0, -1]], labels) == ('down', 'back', 'left') + assert ornt2axcodes([[1, 1], [2, -1], [0, 1]], labels) == ('front', 'down', 'right') # default is RAS output directions - assert ornt2axcodes([[0, 1], - [1, 1], - [2, 1]]) == ('R', 'A', 'S') + assert ornt2axcodes([[0, 1], [1, 1], [2, 1]]) == ('R', 'A', 'S') # dropped axes produce None - assert ornt2axcodes([[0, 1], - [np.nan, np.nan], - [2, 1]]) == ('R', None, 'S') + assert ornt2axcodes([[0, 1], [np.nan, np.nan], [2, 1]]) == ('R', None, 'S') # Non integer axes raises error with pytest.raises(ValueError): ornt2axcodes([[0.1, 1]]) @@ -278,61 +249,35 @@ def test_ornt2axcodes(): def test_axcodes2ornt(): # Go from axcodes back to orientations labels = (('left', 'right'), ('back', 'front'), ('down', 'up')) - assert_array_equal(axcodes2ornt(('right', 'front', 'up'), labels), - [[0, 1], - [1, 1], - [2, 1]] - ) - assert_array_equal(axcodes2ornt(('left', 'back', 'down'), labels), - [[0, -1], - [1, -1], - [2, -1]] - ) - assert_array_equal(axcodes2ornt(('down', 'back', 'left'), labels), - [[2, -1], - [1, -1], - [0, -1]] - ) - assert_array_equal(axcodes2ornt(('front', 'down', 'right'), labels), - [[1, 1], - [2, -1], - [0, 1]] - ) + assert_array_equal(axcodes2ornt(('right', 'front', 'up'), labels), [[0, 1], [1, 1], [2, 1]]) + assert_array_equal(axcodes2ornt(('left', 'back', 'down'), labels), [[0, -1], [1, -1], [2, -1]]) + assert_array_equal(axcodes2ornt(('down', 'back', 'left'), labels), [[2, -1], [1, -1], [0, -1]]) + assert_array_equal(axcodes2ornt(('front', 'down', 'right'), labels), [[1, 1], [2, -1], [0, 1]]) # default is RAS output directions default = np.c_[range(3), [1] * 3] assert_array_equal(axcodes2ornt(('R', 'A', 'S')), default) # dropped axes produce None - assert_array_equal(axcodes2ornt(('R', None, 'S')), - [[0, 1], - [np.nan, np.nan], - [2, 1]] - ) + assert_array_equal(axcodes2ornt(('R', None, 'S')), [[0, 1], [np.nan, np.nan], [2, 1]]) # Missing axcodes raise an error assert_array_equal(axcodes2ornt('RAS'), default) with pytest.raises(ValueError): axcodes2ornt('rAS') # None is OK as axis code - assert_array_equal(axcodes2ornt(('R', None, 'S')), - [[0, 1], - [np.nan, np.nan], - [2, 1]]) + assert_array_equal(axcodes2ornt(('R', None, 'S')), [[0, 1], [np.nan, np.nan], [2, 1]]) # Bad axis code with None also raises error. with pytest.raises(ValueError): axcodes2ornt(('R', None, 's')) # Axis codes checked with custom labels labels = ('SD', 'BF', 'lh') - assert_array_equal(axcodes2ornt('BlD', labels), - [[1, -1], - [2, -1], - [0, 1]]) + assert_array_equal(axcodes2ornt('BlD', labels), [[1, -1], [2, -1], [0, 1]]) with pytest.raises(ValueError): axcodes2ornt('blD', labels) # Duplicate labels - for labels in [('SD', 'BF', 'lD'),('SD', 'SF', 'lD')]: + for labels in [('SD', 'BF', 'lD'), ('SD', 'SF', 'lD')]: with pytest.raises(ValueError): axcodes2ornt('blD', labels) diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index 22e805cb8f..0eca2fdca4 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -1,4 +1,4 @@ -""" Testing parrec module +"""Testing parrec module """ from os.path import join as pjoin, dirname, basename @@ -11,18 +11,24 @@ from .. import load as top_load from ..nifti1 import Nifti1Image, Nifti1Extension, Nifti1Header from .. import parrec -from ..parrec import (parse_PAR_header, PARRECHeader, PARRECError, vol_numbers, - vol_is_full, PARRECImage, PARRECArrayProxy, exts2pars) +from ..parrec import ( + parse_PAR_header, + PARRECHeader, + PARRECError, + vol_numbers, + vol_is_full, + PARRECImage, + PARRECArrayProxy, + exts2pars, +) from ..openers import ImageOpener from ..fileholders import FileHolder from ..volumeutils import array_from_file -from numpy.testing import (assert_almost_equal, - assert_array_equal) +from numpy.testing import assert_almost_equal, assert_array_equal import pytest -from ..testing import (clear_and_catch_warnings, suppress_warnings, - assert_arr_dict_equal) +from ..testing import clear_and_catch_warnings, suppress_warnings, assert_arr_dict_equal from .test_arrayproxy import check_mmap from . import test_spatialimages as tsi @@ -53,80 +59,120 @@ VARIANT_PAR = pjoin(DATA_PATH, 'variant_v4_2_header.PAR') # Affine as we determined it mid-2014 AN_OLD_AFFINE = np.array( - [[-3.64994708, 0., 1.83564171, 123.66276611], - [0., -3.75, 0., 115.617], - [0.86045705, 0., 7.78655376, -27.91161211], - [0., 0., 0., 1.]]) + [ + [-3.64994708, 0.0, 1.83564171, 123.66276611], + [0.0, -3.75, 0.0, 115.617], + [0.86045705, 0.0, 7.78655376, -27.91161211], + [0.0, 0.0, 0.0, 1.0], + ] +) # Affine from Philips-created NIfTI PHILIPS_AFFINE = np.array( - [[-3.65, -0.0016, 1.8356, 125.4881], - [0.0016, -3.75, -0.0004, 117.4916], - [0.8604, 0.0002, 7.7866, -28.3411], - [0., 0., 0., 1.]]) + [ + [-3.65, -0.0016, 1.8356, 125.4881], + [0.0016, -3.75, -0.0004, 117.4916], + [0.8604, 0.0002, 7.7866, -28.3411], + [0.0, 0.0, 0.0, 1.0], + ] +) # Affines generated by parrec.py from test data in many orientations # Data from http://psydata.ovgu.de/philips_achieva_testfiles/conversion2 PREVIOUS_AFFINES = { - "Phantom_EPI_3mm_cor_20APtrans_15RLrot_SENSE_15_1": - npa([[-3., 0., 0., 118.5], - [0., -0.77645714, -3.18755523, 72.82738377], - [0., -2.89777748, 0.85410285, 97.80720486], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_cor_SENSE_8_1": - npa([[-3., 0., 0., 118.5], - [0., 0., -3.3, 64.35], - [0., -3., 0., 118.5], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_sag_15AP_SENSE_13_1": - npa([[0., 0.77645714, 3.18755523, -92.82738377], - [-3., 0., 0., 118.5], - [0., -2.89777748, 0.85410285, 97.80720486], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_sag_15FH_SENSE_12_1": - npa([[0.77645714, 0., 3.18755523, -92.82738377], - [-2.89777748, 0., 0.85410285, 97.80720486], - [0., -3., 0., 118.5], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_sag_15RL_SENSE_11_1": - npa([[0., 0., 3.3, -64.35], - [-2.89777748, -0.77645714, 0., 145.13226726], - [0.77645714, -2.89777748, 0., 83.79215357], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_sag_SENSE_7_1": - npa([[0., 0., 3.3, -64.35], - [-3., 0., 0., 118.5], - [0., -3., 0., 118.5], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_tra_-30AP_10RL_20FH_SENSE_14_1": - npa([[0., 0., 3.3, -74.35], - [-3., 0., 0., 148.5], - [0., -3., 0., 138.5], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_tra_15FH_SENSE_9_1": - npa([[0.77645714, 0., 3.18755523, -92.82738377], - [-2.89777748, 0., 0.85410285, 97.80720486], - [0., -3., 0., 118.5], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_tra_15RL_SENSE_10_1": - npa([[0., 0., 3.3, -64.35], - [-2.89777748, -0.77645714, 0., 145.13226726], - [0.77645714, -2.89777748, 0., 83.79215357], - [0., 0., 0., 1.]]), - "Phantom_EPI_3mm_tra_SENSE_6_1": - npa([[-3., 0., 0., 118.5], - [0., -3., 0., 118.5], - [0., 0., 3.3, -64.35], - [0., 0., 0., 1.]]), + 'Phantom_EPI_3mm_cor_20APtrans_15RLrot_SENSE_15_1': npa( + [ + [-3.0, 0.0, 0.0, 118.5], + [0.0, -0.77645714, -3.18755523, 72.82738377], + [0.0, -2.89777748, 0.85410285, 97.80720486], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_cor_SENSE_8_1': npa( + [ + [-3.0, 0.0, 0.0, 118.5], + [0.0, 0.0, -3.3, 64.35], + [0.0, -3.0, 0.0, 118.5], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_sag_15AP_SENSE_13_1': npa( + [ + [0.0, 0.77645714, 3.18755523, -92.82738377], + [-3.0, 0.0, 0.0, 118.5], + [0.0, -2.89777748, 0.85410285, 97.80720486], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_sag_15FH_SENSE_12_1': npa( + [ + [0.77645714, 0.0, 3.18755523, -92.82738377], + [-2.89777748, 0.0, 0.85410285, 97.80720486], + [0.0, -3.0, 0.0, 118.5], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_sag_15RL_SENSE_11_1': npa( + [ + [0.0, 0.0, 3.3, -64.35], + [-2.89777748, -0.77645714, 0.0, 145.13226726], + [0.77645714, -2.89777748, 0.0, 83.79215357], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_sag_SENSE_7_1': npa( + [ + [0.0, 0.0, 3.3, -64.35], + [-3.0, 0.0, 0.0, 118.5], + [0.0, -3.0, 0.0, 118.5], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_tra_-30AP_10RL_20FH_SENSE_14_1': npa( + [ + [0.0, 0.0, 3.3, -74.35], + [-3.0, 0.0, 0.0, 148.5], + [0.0, -3.0, 0.0, 138.5], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_tra_15FH_SENSE_9_1': npa( + [ + [0.77645714, 0.0, 3.18755523, -92.82738377], + [-2.89777748, 0.0, 0.85410285, 97.80720486], + [0.0, -3.0, 0.0, 118.5], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_tra_15RL_SENSE_10_1': npa( + [ + [0.0, 0.0, 3.3, -64.35], + [-2.89777748, -0.77645714, 0.0, 145.13226726], + [0.77645714, -2.89777748, 0.0, 83.79215357], + [0.0, 0.0, 0.0, 1.0], + ] + ), + 'Phantom_EPI_3mm_tra_SENSE_6_1': npa( + [ + [-3.0, 0.0, 0.0, 118.5], + [0.0, -3.0, 0.0, 118.5], + [0.0, 0.0, 3.3, -64.35], + [0.0, 0.0, 0.0, 1.0], + ] + ), } # Original values for b values in DTI.PAR, still in PSL orientation -DTI_PAR_BVECS = np.array([[-0.667, -0.667, -0.333], - [-0.333, 0.667, -0.667], - [-0.667, 0.333, 0.667], - [-0.707, -0.000, -0.707], - [-0.707, 0.707, 0.000], - [-0.000, 0.707, 0.707], - [0.000, 0.000, 0.000], - [0.000, 0.000, 0.000]]) +DTI_PAR_BVECS = np.array( + [ + [-0.667, -0.667, -0.333], + [-0.333, 0.667, -0.667], + [-0.667, 0.333, 0.667], + [-0.707, -0.000, -0.707], + [-0.707, 0.707, 0.000], + [-0.000, 0.707, 0.707], + [0.000, 0.000, 0.000], + [0.000, 0.000, 0.000], + ] +) # DTI.PAR values for bvecs DTI_PAR_BVALS = [1000] * 6 + [0, 1000] @@ -143,11 +189,9 @@ # use our own affine as determined from a previous load in nibabel affine=AN_OLD_AFFINE, zooms=(3.75, 3.75, 8.0, 2.0), - data_summary=dict( - min=0.0, - max=2299.4110643863678, - mean=194.95876256117265), - is_proxy=True) + data_summary=dict(min=0.0, max=2299.4110643863678, mean=194.95876256117265), + is_proxy=True, + ) ] @@ -179,8 +223,7 @@ def test_header(): assert hdr.get_data_dtype() == np.dtype('= ver), \ - "nibabel.info.VERSION does not match latest tag information" + fallback >= ver + ), 'nibabel.info.VERSION does not match latest tag information' def test_cmp_pkg_version_0(): @@ -56,42 +57,44 @@ def test_cmp_pkg_version_0(): assert cmp_pkg_version(stage2, stage1) == 1 -@pytest.mark.parametrize("test_ver, pkg_ver, exp_out", - [ - ('1.0', '1.0', 0), - ('1.0.0', '1.0', 0), - ('1.0', '1.0.0', 0), - ('1.1', '1.1', 0), - ('1.2', '1.1', 1), - ('1.1', '1.2', -1), - ('1.1.1', '1.1.1', 0), - ('1.1.2', '1.1.1', 1), - ('1.1.1', '1.1.2', -1), - ('1.1', '1.1dev', 1), - ('1.1dev', '1.1', -1), - ('1.2.1', '1.2.1rc1', 1), - ('1.2.1rc1', '1.2.1', -1), - ('1.2.1rc1', '1.2.1rc', 1), - ('1.2.1rc', '1.2.1rc1', -1), - ('1.2.1rc1', '1.2.1rc', 1), - ('1.2.1rc', '1.2.1rc1', -1), - ('1.2.1b', '1.2.1a', 1), - ('1.2.1a', '1.2.1b', -1), - ('1.2.0+1', '1.2', 1), - ('1.2', '1.2.0+1', -1), - ('1.2.1+1', '1.2.1', 1), - ('1.2.1', '1.2.1+1', -1), - ('1.2.1rc1+1', '1.2.1', -1), - ('1.2.1', '1.2.1rc1+1', 1), - ('1.2.1rc1+1', '1.2.1+1', -1), - ('1.2.1+1', '1.2.1rc1+1', 1), - ]) +@pytest.mark.parametrize( + 'test_ver, pkg_ver, exp_out', + [ + ('1.0', '1.0', 0), + ('1.0.0', '1.0', 0), + ('1.0', '1.0.0', 0), + ('1.1', '1.1', 0), + ('1.2', '1.1', 1), + ('1.1', '1.2', -1), + ('1.1.1', '1.1.1', 0), + ('1.1.2', '1.1.1', 1), + ('1.1.1', '1.1.2', -1), + ('1.1', '1.1dev', 1), + ('1.1dev', '1.1', -1), + ('1.2.1', '1.2.1rc1', 1), + ('1.2.1rc1', '1.2.1', -1), + ('1.2.1rc1', '1.2.1rc', 1), + ('1.2.1rc', '1.2.1rc1', -1), + ('1.2.1rc1', '1.2.1rc', 1), + ('1.2.1rc', '1.2.1rc1', -1), + ('1.2.1b', '1.2.1a', 1), + ('1.2.1a', '1.2.1b', -1), + ('1.2.0+1', '1.2', 1), + ('1.2', '1.2.0+1', -1), + ('1.2.1+1', '1.2.1', 1), + ('1.2.1', '1.2.1+1', -1), + ('1.2.1rc1+1', '1.2.1', -1), + ('1.2.1', '1.2.1rc1+1', 1), + ('1.2.1rc1+1', '1.2.1+1', -1), + ('1.2.1+1', '1.2.1rc1+1', 1), + ], +) def test_cmp_pkg_version_1(test_ver, pkg_ver, exp_out): # Test version comparator assert cmp_pkg_version(test_ver, pkg_ver) == exp_out -@pytest.mark.parametrize("args", [['foo.2'], ['foo.2', '1.0'], ['1.0', 'foo.2'], ['foo']]) +@pytest.mark.parametrize('args', [['foo.2'], ['foo.2', '1.0'], ['1.0', 'foo.2'], ['foo']]) def test_cmp_pkg_version_error(args): with pytest.raises(ValueError): cmp_pkg_version(*args) diff --git a/nibabel/tests/test_processing.py b/nibabel/tests/test_processing.py index 3c2a70a8c4..cd7c1830ea 100644 --- a/nibabel/tests/test_processing.py +++ b/nibabel/tests/test_processing.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Testing processing module +"""Testing processing module """ from os.path import dirname, join as pjoin @@ -16,17 +16,23 @@ import numpy.linalg as npl from nibabel.optpkg import optional_package + spnd, have_scipy, _ = optional_package('scipy.ndimage') import nibabel as nib -from nibabel.processing import (sigma2fwhm, fwhm2sigma, adapt_affine, - resample_from_to, resample_to_output, smooth_image, - conform) +from nibabel.processing import ( + sigma2fwhm, + fwhm2sigma, + adapt_affine, + resample_from_to, + resample_to_output, + smooth_image, + conform, +) from nibabel.nifti1 import Nifti1Image from nibabel.nifti2 import Nifti2Image from nibabel.orientations import aff2axcodes, inv_ornt_aff -from nibabel.affines import (AffineError, from_matvec, to_matvec, apply_affine, - voxel_sizes) +from nibabel.affines import AffineError, from_matvec, to_matvec, apply_affine, voxel_sizes from nibabel.eulerangles import euler2mat from numpy.testing import assert_almost_equal, assert_array_equal @@ -44,9 +50,13 @@ from .test_imageclasses import MINC_3DS, MINC_4DS # Filenames of other images that should work correctly with processing -OTHER_IMGS = ('anatomical.nii', 'functional.nii', - 'example4d.nii.gz', 'example_nifti2.nii.gz', - 'phantom_EPI_asc_CLEAR_2_1.PAR') +OTHER_IMGS = ( + 'anatomical.nii', + 'functional.nii', + 'example4d.nii.gz', + 'example_nifti2.nii.gz', + 'phantom_EPI_asc_CLEAR_2_1.PAR', +) def test_sigma2fwhm(): @@ -68,27 +78,17 @@ def test_adapt_affine(): # For 4x4 affine, 3D image, no-op assert_array_equal(adapt_affine(aff_3d, 3), aff_3d) # For 4x4 affine, 4D image, add extra identity dimension - assert_array_equal(adapt_affine(aff_3d, 4), - [[ 0, 1, 2, 0, 11], - [ 3, 4, 5, 0, 12], - [ 6, 7, 8, 0, 13], - [ 0, 0, 0, 1, 0], - [ 0, 0, 0, 0, 1]]) + assert_array_equal( + adapt_affine(aff_3d, 4), + [[0, 1, 2, 0, 11], [3, 4, 5, 0, 12], [6, 7, 8, 0, 13], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]], + ) # For 5x5 affine, 4D image, identity aff_4d = from_matvec(np.arange(16).reshape((4, 4)), [11, 12, 13, 14]) assert_array_equal(adapt_affine(aff_4d, 4), aff_4d) # For 4x4 affine, 2D image, dropped column - assert_array_equal(adapt_affine(aff_3d, 2), - [[ 0, 1, 11], - [ 3, 4, 12], - [ 6, 7, 13], - [ 0, 0, 1]]) + assert_array_equal(adapt_affine(aff_3d, 2), [[0, 1, 11], [3, 4, 12], [6, 7, 13], [0, 0, 1]]) # For 4x4 affine, 1D image, 2 dropped columns - assert_array_equal(adapt_affine(aff_3d, 1), - [[ 0, 11], - [ 3, 12], - [ 6, 13], - [ 0, 1]]) + assert_array_equal(adapt_affine(aff_3d, 1), [[0, 11], [3, 12], [6, 13], [0, 1]]) # For 3x3 affine, 2D image, identity aff_2d = from_matvec(np.arange(4).reshape((2, 2)), [11, 12]) assert_array_equal(adapt_affine(aff_2d, 2), aff_2d) @@ -111,8 +111,7 @@ def test_resample_from_to(caplog): ax_flip_ornt = flip_ornt.copy() ax_flip_ornt[axis, 1] = -1 aff_flip_i = inv_ornt_aff(ax_flip_ornt, (2, 3, 4)) - flipped_img = Nifti1Image(np.flip(data, axis), - np.dot(affine, aff_flip_i)) + flipped_img = Nifti1Image(np.flip(data, axis), np.dot(affine, aff_flip_i)) out = resample_from_to(flipped_img, ((2, 3, 4), affine)) assert_almost_equal(img.dataobj, out.dataobj) assert_array_equal(img.affine, out.affine) @@ -255,13 +254,10 @@ def test_resample_to_output(caplog): # Subsample voxels out_img = resample_to_output(Nifti1Image(data, np.diag([4, 5, 6, 1]))) with pytest.warns(UserWarning): # Suppress scipy warning - exp_out = spnd.affine_transform(data, - [1/4, 1/5, 1/6], - output_shape = (5, 11, 19)) + exp_out = spnd.affine_transform(data, [1 / 4, 1 / 5, 1 / 6], output_shape=(5, 11, 19)) assert_array_equal(out_img.dataobj, exp_out) # Unsubsample with voxel sizes - out_img = resample_to_output(Nifti1Image(data, np.diag([4, 5, 6, 1])), - [4, 5, 6]) + out_img = resample_to_output(Nifti1Image(data, np.diag([4, 5, 6, 1])), [4, 5, 6]) assert_array_equal(out_img.dataobj, data) # A rotation to test nearest, order, cval rot_3 = from_matvec(euler2mat(np.pi / 4), [0, 0, 0]) @@ -269,10 +265,9 @@ def test_resample_to_output(caplog): out_img = resample_to_output(rot_3_img) exp_shape = (4, 4, 4) assert out_img.shape == exp_shape - exp_aff = np.array([[1, 0, 0, -2 * np.cos(np.pi / 4)], - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]]) + exp_aff = np.array( + [[1, 0, 0, -2 * np.cos(np.pi / 4)], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] + ) assert_almost_equal(out_img.affine, exp_aff) rzs, trans = to_matvec(np.dot(npl.inv(rot_3), exp_aff)) exp_out = spnd.affine_transform(data, rzs, trans, exp_shape) @@ -280,15 +275,18 @@ def test_resample_to_output(caplog): # Order assert_almost_equal( resample_to_output(rot_3_img, order=0).dataobj, - spnd.affine_transform(data, rzs, trans, exp_shape, order=0)) + spnd.affine_transform(data, rzs, trans, exp_shape, order=0), + ) # Cval assert_almost_equal( resample_to_output(rot_3_img, cval=99).dataobj, - spnd.affine_transform(data, rzs, trans, exp_shape, cval=99)) + spnd.affine_transform(data, rzs, trans, exp_shape, cval=99), + ) # Mode assert_almost_equal( resample_to_output(rot_3_img, mode='nearest').dataobj, - spnd.affine_transform(data, rzs, trans, exp_shape, mode='nearest')) + spnd.affine_transform(data, rzs, trans, exp_shape, mode='nearest'), + ) # out_class img_ni1 = Nifti2Image(data, np.eye(4)) img_ni2 = Nifti2Image(data, np.eye(4)) @@ -343,8 +341,7 @@ def test_smooth_image(caplog): exp_out = spnd.gaussian_filter(data, sd, mode='constant') assert_array_equal(smooth_image(img, 8, mode='constant').dataobj, exp_out) exp_out = spnd.gaussian_filter(data, sd, mode='constant', cval=99) - assert_array_equal(smooth_image(img, 8, mode='constant', cval=99).dataobj, - exp_out) + assert_array_equal(smooth_image(img, 8, mode='constant', cval=99).dataobj, exp_out) # out_class img_ni1 = Nifti1Image(data, np.eye(4)) img_ni2 = Nifti2Image(data, np.eye(4)) @@ -383,8 +380,7 @@ def test_spatial_axes_check(caplog): def assert_spm_resampling_close(from_img, our_resampled, spm_resampled): - """ Assert our resampling is close to SPM's, allowing for edge effects - """ + """Assert our resampling is close to SPM's, allowing for edge effects""" # To allow for differences in the way SPM and scipy.ndimage handle off-edge # interpolation, mask out voxels off edge to_img_shape = spm_resampled.shape @@ -396,9 +392,9 @@ def assert_spm_resampling_close(from_img, our_resampled, spm_resampled): # Places where SPM may not return default value but scipy.ndimage will (SPM # does not return zeros <0.05 from image edges). # See: https://github.com/nipy/nibabel/pull/255#issuecomment-186774173 - outside_vol = np.any((resamp_coords < 0) | - (np.subtract(resamp_coords, from_img.shape) > -1), - axis=-1) + outside_vol = np.any( + (resamp_coords < 0) | (np.subtract(resamp_coords, from_img.shape) > -1), axis=-1 + ) spm_res = np.where(outside_vol, np.nan, np.array(spm_resampled.dataobj)) assert_allclose_safely(our_resampled.dataobj, spm_res) assert_almost_equal(our_resampled.affine, spm_resampled.affine, 5) @@ -417,12 +413,8 @@ def test_against_spm_resample(): func = nib.load(pjoin(DATA_DIR, 'functional.nii')) some_rotations = euler2mat(0.1, 0.2, 0.3) extra_affine = from_matvec(some_rotations, [3, 4, 5]) - moved_anat = nib.Nifti1Image(anat.get_fdata(), - extra_affine.dot(anat.affine), - anat.header) - one_func = nib.Nifti1Image(func.dataobj[..., 0], - func.affine, - func.header) + moved_anat = nib.Nifti1Image(anat.get_fdata(), extra_affine.dot(anat.affine), anat.header) + one_func = nib.Nifti1Image(func.dataobj[..., 0], func.affine, func.header) moved2func = resample_from_to(moved_anat, one_func, order=1, cval=np.nan) spm_moved = nib.load(pjoin(DATA_DIR, 'resampled_anat_moved.nii')) assert_spm_resampling_close(moved_anat, moved2func, spm_moved) @@ -431,7 +423,7 @@ def test_against_spm_resample(): # John Ashburner). moved2output = resample_to_output(moved_anat, 4, order=1, cval=np.nan) spm2output = nib.load(pjoin(DATA_DIR, 'reoriented_anat_moved.nii')) - assert_spm_resampling_close(moved_anat, moved2output, spm2output); + assert_spm_resampling_close(moved_anat, moved2output, spm2output) @needs_scipy @@ -448,8 +440,13 @@ def test_conform(caplog): # Test with non-default arguments. with caplog.at_level(logging.CRITICAL): # Suppress logs when changing classes - c = conform(anat, out_shape=(100, 100, 200), voxel_size=(2, 2, 1.5), - orientation="LPI", out_class=Nifti2Image) + c = conform( + anat, + out_shape=(100, 100, 200), + voxel_size=(2, 2, 1.5), + orientation='LPI', + out_class=Nifti2Image, + ) assert c.shape == (100, 100, 200) assert c.header.get_zooms() == (2, 2, 1.5) assert c.dataobj.dtype.type == anat.dataobj.dtype.type diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 3b91709964..c2ca1ed27c 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Validate image proxy API +"""Validate image proxy API Minimum array proxy API is: @@ -77,15 +77,13 @@ def _some_slicers(shape): slicers[i, i] = 0 # Add a newaxis to keep us on our toes no_pos = ndim // 2 - slicers = np.hstack((slicers[:, :no_pos], - np.empty((ndim, 1)), - slicers[:, no_pos:])) + slicers = np.hstack((slicers[:, :no_pos], np.empty((ndim, 1)), slicers[:, no_pos:])) slicers[:, no_pos] = None return [tuple(s) for s in slicers] class _TestProxyAPI(ValidateAPI): - """ Base class for testing proxy APIs + """Base class for testing proxy APIs Assumes that real classes will provide an `obj_params` method which is a generator returning 2 tuples of (, ). @@ -97,6 +95,7 @@ class _TestProxyAPI(ValidateAPI): The
above should support at least "get_data_dtype", "set_data_dtype", "get_data_shape", "set_data_shape" """ + # Flag True if offset can be set into header of image settable_offset = False @@ -203,11 +202,12 @@ def validate_proxy_slicing(self, pmaker, params): class TestAnalyzeProxyAPI(_TestProxyAPI): - """ Specific Analyze-type array proxy API test + """Specific Analyze-type array proxy API test The analyze proxy extends the general API by adding read-only attributes ``slope, inter, offset`` """ + proxy_class = ArrayProxy header_class = AnalyzeHeader shapes = ((2,), (2, 3), (2, 3, 4), (2, 3, 4, 5)) @@ -221,7 +221,7 @@ class TestAnalyzeProxyAPI(_TestProxyAPI): data_endian = '=' def obj_params(self): - """ Iterator returning (``proxy_creator``, ``proxy_params``) pairs + """Iterator returning (``proxy_creator``, ``proxy_params``) pairs Each pair will be tested separately. @@ -240,13 +240,11 @@ def obj_params(self): offsets = (0, 16) # For non-integral parameters, cast to float32 value can be losslessly cast # later, enabling exact checks, then back to float for consistency - slopes = (1., 2., float(np.float32(3.1416))) if self.has_slope else (1.,) - inters = (0., 10., float(np.float32(2.7183))) if self.has_inter else (0.,) - for shape, dtype, offset, slope, inter in product(self.shapes, - self.data_dtypes, - offsets, - slopes, - inters): + slopes = (1.0, 2.0, float(np.float32(3.1416))) if self.has_slope else (1.0,) + inters = (0.0, 10.0, float(np.float32(2.7183))) if self.has_inter else (0.0,) + for shape, dtype, offset, slope, inter in product( + self.shapes, self.data_dtypes, offsets, slopes, inters + ): n_els = np.prod(shape) dtype = np.dtype(dtype).newbyteorder(self.data_endian) arr = np.arange(n_els, dtype=dtype).reshape(shape) @@ -264,9 +262,7 @@ def obj_params(self): # and datatypes of slope, inter hdr.set_slope_inter(slope, inter) s, i = hdr.get_slope_inter() - tmp = apply_read_scaling(arr, - 1. if s is None else s, - 0. if i is None else i) + tmp = apply_read_scaling(arr, 1.0 if s is None else s, 0.0 if i is None else i) dtype_out = tmp.dtype.type def sio_func(): @@ -277,9 +273,7 @@ def sio_func(): # Use a copy of the header to avoid changing # global header in test functions. new_hdr = hdr.copy() - return (self.proxy_class(fio, new_hdr), - fio, - new_hdr) + return (self.proxy_class(fio, new_hdr), fio, new_hdr) params = dict( dtype=dtype, @@ -289,7 +283,8 @@ def sio_func(): shape=shape, offset=offset, slope=slope, - inter=inter) + inter=inter, + ) yield sio_func, params # Same with filenames with InTemporaryDirectory(): @@ -302,9 +297,8 @@ def fname_func(): # Use a copy of the header to avoid changing # global header in test functions. new_hdr = hdr.copy() - return (self.proxy_class(fname, new_hdr), - fname, - new_hdr) + return (self.proxy_class(fname, new_hdr), fname, new_hdr) + params = params.copy() yield fname_func, params @@ -339,8 +333,20 @@ class TestSpm2AnalyzeProxyAPI(TestSpm99AnalyzeProxyAPI): class TestNifti1ProxyAPI(TestSpm99AnalyzeProxyAPI): header_class = Nifti1Header has_inter = True - data_dtypes = (np.uint8, np.int16, np.int32, np.float32, np.complex64, np.float64, - np.int8, np.uint16, np.uint32, np.int64, np.uint64, np.complex128) + data_dtypes = ( + np.uint8, + np.int16, + np.int32, + np.float32, + np.complex64, + np.float64, + np.int8, + np.uint16, + np.uint32, + np.int64, + np.uint64, + np.complex128, + ) if have_binary128(): data_dtypes += (np.float128, np.complex256) @@ -366,7 +372,7 @@ def opener(f): return netcdf_file(f, mode='r') def obj_params(self): - """ Iterator returning (``proxy_creator``, ``proxy_params``) pairs + """Iterator returning (``proxy_creator``, ``proxy_params``) pairs Each pair will be tested separately. @@ -378,8 +384,7 @@ def obj_params(self): having an effect on the later tests in the same function. """ eg_path = pjoin(DATA_PATH, self.eg_fname) - arr_out = self.file_class( - self.opener(eg_path)).get_scaled_data() + arr_out = self.file_class(self.opener(eg_path)).get_scaled_data() def eg_func(): mf = self.file_class(self.opener(eg_path)) @@ -387,13 +392,12 @@ def eg_func(): img = self.module.load(eg_path) fobj = open(eg_path, 'rb') return prox, fobj, img.header - yield (eg_func, - dict(shape=self.eg_shape, - dtype_out=np.float64, - arr_out=arr_out)) + + yield (eg_func, dict(shape=self.eg_shape, dtype_out=np.float64, arr_out=arr_out)) if have_h5py: + class TestMinc2API(TestMinc1API): module = minc2 file_class = minc2.Minc2File @@ -420,32 +424,25 @@ def eg_func(): prox = ecat.EcatImageArrayProxy(sh) fobj = open(eg_path, 'rb') return prox, fobj, sh - yield (eg_func, - dict(shape=self.eg_shape, - dtype_out=np.float64, - arr_out=arr_out)) + + yield (eg_func, dict(shape=self.eg_shape, dtype_out=np.float64, arr_out=arr_out)) def validate_header_isolated(self, pmaker, params): raise unittest.SkipTest('ECAT header does not support dtype get') class TestPARRECAPI(_TestProxyAPI): - def _func_dict(self, rec_name): img = parrec.load(rec_name) arr_out = img.get_fdata() def eg_func(): img = parrec.load(rec_name) - prox = parrec.PARRECArrayProxy(rec_name, - img.header, - scaling='dv') + prox = parrec.PARRECArrayProxy(rec_name, img.header, scaling='dv') fobj = open(rec_name, 'rb') return prox, fobj, img.header - return (eg_func, - dict(shape=img.shape, - dtype_out=np.float64, - arr_out=arr_out)) + + return (eg_func, dict(shape=img.shape, dtype_out=np.float64, arr_out=arr_out)) def obj_params(self): yield self._func_dict(EG_REC) diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index fe50fc0199..3dc681f517 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test quaternion calculations """ +"""Test quaternion calculations""" import numpy as np from numpy import pi @@ -99,7 +99,7 @@ def test_inverse_0(): assert iq.dtype.kind == 'f' -@pytest.mark.parametrize("M, q", eg_pairs) +@pytest.mark.parametrize('M, q', eg_pairs) def test_inverse_1(M, q): iq = nq.inverse(q) iqM = nq.quat2mat(iq) @@ -122,15 +122,15 @@ def test_norm(): assert not nq.isunit(qi) -@pytest.mark.parametrize("M1, q1", eg_pairs[0::4]) -@pytest.mark.parametrize("M2, q2", eg_pairs[1::4]) +@pytest.mark.parametrize('M1, q1', eg_pairs[0::4]) +@pytest.mark.parametrize('M2, q2', eg_pairs[1::4]) def test_mult(M1, q1, M2, q2): # Test that quaternion * same as matrix * q21 = nq.mult(q2, q1) assert_array_almost_equal, np.dot(M2, M1), nq.quat2mat(q21) -@pytest.mark.parametrize("M, q", eg_pairs) +@pytest.mark.parametrize('M, q', eg_pairs) def test_inverse(M, q): iq = nq.inverse(q) iqM = nq.quat2mat(iq) @@ -144,15 +144,15 @@ def test_eye(): assert np.allclose(nq.quat2mat(qi), np.eye(3)) -@pytest.mark.parametrize("vec", np.eye(3)) -@pytest.mark.parametrize("M, q", eg_pairs) +@pytest.mark.parametrize('vec', np.eye(3)) +@pytest.mark.parametrize('M, q', eg_pairs) def test_qrotate(vec, M, q): vdash = nq.rotate_vector(vec, q) vM = np.dot(M, vec) assert_array_almost_equal(vdash, vM) -@pytest.mark.parametrize("q", unit_quats) +@pytest.mark.parametrize('q', unit_quats) def test_quaternion_reconstruction(q): # Test reconstruction of arbitrary unit quaternions M = nq.quat2mat(q) @@ -160,7 +160,7 @@ def test_quaternion_reconstruction(q): # Accept positive or negative match posm = np.allclose(q, qt) negm = np.allclose(q, -qt) - assert (posm or negm) + assert posm or negm def test_angle_axis2quat(): diff --git a/nibabel/tests/test_recoder.py b/nibabel/tests/test_recoder.py index 127a7b0704..1d903d6f9f 100644 --- a/nibabel/tests/test_recoder.py +++ b/nibabel/tests/test_recoder.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests recoder class """ +"""Tests recoder class""" import numpy as np @@ -24,6 +24,7 @@ def test_recoder_1(): with pytest.raises(KeyError): rc.code[3] + def test_recoder_2(): # with explicit name for code codes = ((1,), (2,)) @@ -49,6 +50,7 @@ def test_recoder_3(): with pytest.raises(AttributeError): rc.label + def test_recoder_4(): # with explicit column names codes = ((1, 'one'), (2, 'two')) @@ -86,7 +88,6 @@ def test_recoder_6(): def test_custom_dicter(): # Allow custom dict-like object in constructor class MyDict: - def __init__(self): self._keys = [] @@ -103,6 +104,7 @@ def keys(self): def values(self): return ['funny', 'list'] + # code, label, aliases codes = ((1, 'one', '1', 'first'), (2, 'two')) rc = Recoder(codes, map_maker=MyDict) diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index c54a069e55..9300dfa207 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -4,77 +4,106 @@ import pytest MODULE_SCHEDULE = [ - ("5.0.0", ["nibabel.keywordonly", "nibabel.py3k"]), - ("4.0.0", ["nibabel.trackvis"]), - ("3.0.0", ["nibabel.minc", "nibabel.checkwarns"]), + ('5.0.0', ['nibabel.keywordonly', 'nibabel.py3k']), + ('4.0.0', ['nibabel.trackvis']), + ('3.0.0', ['nibabel.minc', 'nibabel.checkwarns']), # Verify that the test will be quiet if the schedule outlives the modules - ("1.0.0", ["nibabel.nosuchmod"]), + ('1.0.0', ['nibabel.nosuchmod']), ] OBJECT_SCHEDULE = [ - ("7.0.0", [("nibabel.gifti.gifti", "GiftiNVPairs"), - ]), - ("6.0.0", [("nibabel.loadsave", "guessed_image_type"), - ("nibabel.loadsave", "read_img_data"), - ("nibabel.orientations", "flip_axis"), - ("nibabel.pydicom_compat", "dicom_test"), - ("nibabel.onetime", "setattr_on_read"), - ]), - ("5.0.0", [("nibabel.gifti.gifti", "data_tag"), - ("nibabel.gifti.giftiio", "read"), - ("nibabel.gifti.giftiio", "write"), - ("nibabel.gifti.parse_gifti_fast", "Outputter"), - ("nibabel.gifti.parse_gifti_fast", "parse_gifti_file"), - ("nibabel.imageclasses", "ext_map"), - ("nibabel.imageclasses", "class_map"), - ("nibabel.loadsave", "which_analyze_type"), - ("nibabel.volumeutils", "BinOpener"), - ("nibabel.volumeutils", "allopen"), - ("nibabel.orientations", "orientation_affine"), - ("nibabel.spatialimages", "Header"), - ]), - ("4.0.0", [("nibabel.minc1", "MincFile"), - ("nibabel.minc1", "MincImage")]), - ("3.0.0", [("nibabel.testing", "catch_warn_reset")]), + ( + '7.0.0', + [ + ('nibabel.gifti.gifti', 'GiftiNVPairs'), + ], + ), + ( + '6.0.0', + [ + ('nibabel.loadsave', 'guessed_image_type'), + ('nibabel.loadsave', 'read_img_data'), + ('nibabel.orientations', 'flip_axis'), + ('nibabel.pydicom_compat', 'dicom_test'), + ('nibabel.onetime', 'setattr_on_read'), + ], + ), + ( + '5.0.0', + [ + ('nibabel.gifti.gifti', 'data_tag'), + ('nibabel.gifti.giftiio', 'read'), + ('nibabel.gifti.giftiio', 'write'), + ('nibabel.gifti.parse_gifti_fast', 'Outputter'), + ('nibabel.gifti.parse_gifti_fast', 'parse_gifti_file'), + ('nibabel.imageclasses', 'ext_map'), + ('nibabel.imageclasses', 'class_map'), + ('nibabel.loadsave', 'which_analyze_type'), + ('nibabel.volumeutils', 'BinOpener'), + ('nibabel.volumeutils', 'allopen'), + ('nibabel.orientations', 'orientation_affine'), + ('nibabel.spatialimages', 'Header'), + ], + ), + ('4.0.0', [('nibabel.minc1', 'MincFile'), ('nibabel.minc1', 'MincImage')]), + ('3.0.0', [('nibabel.testing', 'catch_warn_reset')]), # Verify that the test will be quiet if the schedule outlives the modules - ("1.0.0", [("nibabel.nosuchmod", "anyobj"), ("nibabel.nifti1", "nosuchobj")]), + ('1.0.0', [('nibabel.nosuchmod', 'anyobj'), ('nibabel.nifti1', 'nosuchobj')]), ] ATTRIBUTE_SCHEDULE = [ - ("7.0.0", [("nibabel.gifti.gifti", "GiftiMetaData", "from_dict"), - ("nibabel.gifti.gifti", "GiftiMetaData", "metadata"), - ("nibabel.gifti.gifti", "GiftiMetaData", "data"), - ]), - ("5.0.0", [("nibabel.dataobj_images", "DataobjImage", "get_data"), - ("nibabel.freesurfer.mghformat", "MGHHeader", "_header_data"), - ("nibabel.gifti.gifti", "GiftiDataArray", "from_array"), - ("nibabel.gifti.gifti", "GiftiDataArray", "to_xml_open"), - ("nibabel.gifti.gifti", "GiftiDataArray", "to_xml_close"), - ("nibabel.gifti.gifti", "GiftiDataArray", "get_metadata"), - ("nibabel.gifti.gifti", "GiftiImage", "get_labeltable"), - ("nibabel.gifti.gifti", "GiftiImage", "set_labeltable"), - ("nibabel.gifti.gifti", "GiftiImage", "get_metadata"), - ("nibabel.gifti.gifti", "GiftiImage", "set_metadata"), - ("nibabel.gifti.gifti", "GiftiImage", "getArraysFromIntent"), - ("nibabel.gifti.gifti", "GiftiMetaData", "get_metadata"), - ("nibabel.gifti.gifti", "GiftiLabel", "get_rgba"), - ("nibabel.nicom.dicomwrappers", "Wrapper", "get_affine"), - ("nibabel.streamlines.array_sequence", "ArraySequence", "data"), - ("nibabel.ecat", "EcatImage", "from_filespec"), - ("nibabel.filebasedimages", "FileBasedImage", "get_header"), - ("nibabel.spatialimages", "SpatialImage", "get_affine"), - ("nibabel.arraywriters", "ArrayWriter", "_check_nan2zero"), - ]), - ("4.0.0", [("nibabel.dataobj_images", "DataobjImage", "get_shape"), - ("nibabel.filebasedimages", "FileBasedImage", "filespec_to_files"), - ("nibabel.filebasedimages", "FileBasedImage", "to_filespec"), - ("nibabel.filebasedimages", "FileBasedImage", "to_files"), - ("nibabel.filebasedimages", "FileBasedImage", "from_files"), - ("nibabel.arrayproxy", "ArrayProxy", "header")]), + ( + '7.0.0', + [ + ('nibabel.gifti.gifti', 'GiftiMetaData', 'from_dict'), + ('nibabel.gifti.gifti', 'GiftiMetaData', 'metadata'), + ('nibabel.gifti.gifti', 'GiftiMetaData', 'data'), + ], + ), + ( + '5.0.0', + [ + ('nibabel.dataobj_images', 'DataobjImage', 'get_data'), + ('nibabel.freesurfer.mghformat', 'MGHHeader', '_header_data'), + ('nibabel.gifti.gifti', 'GiftiDataArray', 'from_array'), + ('nibabel.gifti.gifti', 'GiftiDataArray', 'to_xml_open'), + ('nibabel.gifti.gifti', 'GiftiDataArray', 'to_xml_close'), + ('nibabel.gifti.gifti', 'GiftiDataArray', 'get_metadata'), + ('nibabel.gifti.gifti', 'GiftiImage', 'get_labeltable'), + ('nibabel.gifti.gifti', 'GiftiImage', 'set_labeltable'), + ('nibabel.gifti.gifti', 'GiftiImage', 'get_metadata'), + ('nibabel.gifti.gifti', 'GiftiImage', 'set_metadata'), + ('nibabel.gifti.gifti', 'GiftiImage', 'getArraysFromIntent'), + ('nibabel.gifti.gifti', 'GiftiMetaData', 'get_metadata'), + ('nibabel.gifti.gifti', 'GiftiLabel', 'get_rgba'), + ('nibabel.nicom.dicomwrappers', 'Wrapper', 'get_affine'), + ('nibabel.streamlines.array_sequence', 'ArraySequence', 'data'), + ('nibabel.ecat', 'EcatImage', 'from_filespec'), + ('nibabel.filebasedimages', 'FileBasedImage', 'get_header'), + ('nibabel.spatialimages', 'SpatialImage', 'get_affine'), + ('nibabel.arraywriters', 'ArrayWriter', '_check_nan2zero'), + ], + ), + ( + '4.0.0', + [ + ('nibabel.dataobj_images', 'DataobjImage', 'get_shape'), + ('nibabel.filebasedimages', 'FileBasedImage', 'filespec_to_files'), + ('nibabel.filebasedimages', 'FileBasedImage', 'to_filespec'), + ('nibabel.filebasedimages', 'FileBasedImage', 'to_files'), + ('nibabel.filebasedimages', 'FileBasedImage', 'from_files'), + ('nibabel.arrayproxy', 'ArrayProxy', 'header'), + ], + ), # Verify that the test will be quiet if the schedule outlives the modules - ("1.0.0", [("nibabel.nosuchmod", "anyobj", "anyattr"), - ("nibabel.nifti1", "nosuchobj", "anyattr"), - ("nibabel.nifti1", "Nifti1Image", "nosuchattr")]), + ( + '1.0.0', + [ + ('nibabel.nosuchmod', 'anyobj', 'anyattr'), + ('nibabel.nifti1', 'nosuchobj', 'anyattr'), + ('nibabel.nifti1', 'Nifti1Image', 'nosuchattr'), + ], + ), ] @@ -86,7 +115,7 @@ def test_module_removal(): for module in _filter(MODULE_SCHEDULE): with pytest.raises(ImportError): __import__(module) - assert False, f"Time to remove {module}" + assert False, f'Time to remove {module}' def test_object_removal(): @@ -95,7 +124,7 @@ def test_object_removal(): module = __import__(module_name) except ImportError: continue - assert not hasattr(module, obj), f"Time to remove {module_name}.{obj}" + assert not hasattr(module, obj), f'Time to remove {module_name}.{obj}' def test_attribute_removal(): @@ -108,29 +137,29 @@ def test_attribute_removal(): klass = getattr(module, cls) except AttributeError: continue - assert not hasattr(klass, attr), f"Time to remove {module_name}.{cls}.{attr}" + assert not hasattr(klass, attr), f'Time to remove {module_name}.{cls}.{attr}' # # Test the tests, making sure that we will get errors when the time comes # -_sched = "nibabel.tests.test_removalschedule.{}_SCHEDULE".format +_sched = 'nibabel.tests.test_removalschedule.{}_SCHEDULE'.format -@mock.patch(_sched("MODULE"), [("3.0.0", ["nibabel.nifti1"])]) +@mock.patch(_sched('MODULE'), [('3.0.0', ['nibabel.nifti1'])]) def test_unremoved_module(): with pytest.raises(AssertionError): test_module_removal() -@mock.patch(_sched("OBJECT"), [("3.0.0", [("nibabel.nifti1", "Nifti1Image")])]) +@mock.patch(_sched('OBJECT'), [('3.0.0', [('nibabel.nifti1', 'Nifti1Image')])]) def test_unremoved_object(): with pytest.raises(AssertionError): test_object_removal() -@mock.patch(_sched("ATTRIBUTE"), [("3.0.0", [("nibabel.nifti1", "Nifti1Image", "affine")])]) +@mock.patch(_sched('ATTRIBUTE'), [('3.0.0', [('nibabel.nifti1', 'Nifti1Image', 'affine')])]) def test_unremoved_attr(): with pytest.raises(AssertionError): test_attribute_removal() diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index dfc53a2bdb..54ab79a928 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -1,4 +1,4 @@ -""" Test numerical errors introduced by writing then reading images +"""Test numerical errors introduced by writing then reading images Test arrays with a range of numerical values, integer and floating point. """ @@ -43,7 +43,7 @@ def check_params(in_arr, in_type, out_type): def big_bad_ulp(arr): - """ Return array of ulp values for values in `arr` + """Return array of ulp values for values in `arr` I haven't thought about whether the vectorized log2 here could lead to incorrect rounding; this only needs to be ballpark @@ -70,7 +70,7 @@ def big_bad_ulp(arr): nzs = working_arr > 0 fl2[nzs] = np.floor(np.log(working_arr[nzs]) / LOGe2) fl2 = np.clip(fl2, info['minexp'], np.inf) - return 2**(fl2 - info['nmant']) + return 2 ** (fl2 - info['nmant']) def test_big_bad_ulp(): @@ -80,8 +80,17 @@ def test_big_bad_ulp(): min_ulp = 2 ** (ti['minexp'] - ti['nmant']) in_arr = np.zeros((10,), dtype=ftype) in_arr = np.array([0, 0, 1, 2, 4, 5, -5, -np.inf, np.inf], dtype=ftype) - out_arr = [min_ulp, min_ulp, fi.eps, fi.eps * 2, fi.eps * 4, - fi.eps * 4, fi.eps * 4, np.inf, np.inf] + out_arr = [ + min_ulp, + min_ulp, + fi.eps, + fi.eps * 2, + fi.eps * 4, + fi.eps * 4, + fi.eps * 4, + np.inf, + np.inf, + ] assert_array_equal(big_bad_ulp(in_arr).astype(ftype), out_arr) @@ -158,8 +167,7 @@ def check_arr(test_id, V_in, in_type, out_type, scaling_type): with np.errstate(over='ignore'): Ai = arr - scaling_type(inter) Ais = Ai / scaling_type(slope) - exp_abs_err = inting_err + inter_err + ( - big_bad_ulp(Ai) + big_bad_ulp(Ais)) + exp_abs_err = inting_err + inter_err + (big_bad_ulp(Ai) + big_bad_ulp(Ais)) # Relative scaling error from calculation of slope # This threshold needs to be 2 x larger on windows 32 bit and PPC for # some reason @@ -167,8 +175,8 @@ def check_arr(test_id, V_in, in_type, out_type, scaling_type): test_vals = (abs_err <= exp_abs_err) | (rel_err <= rel_thresh) this_test = np.all(test_vals) if DEBUG: - abs_fails = (abs_err > exp_abs_err) - rel_fails = (rel_err > rel_thresh) + abs_fails = abs_err > exp_abs_err + rel_fails = rel_err > rel_thresh all_fails = abs_fails & rel_fails if np.any(rel_fails): abs_mx_e = abs_err[rel_fails].max() @@ -180,14 +188,19 @@ def check_arr(test_id, V_in, in_type, out_type, scaling_type): rel_mx_e = rel_err[abs_fails].max() else: rel_mx_e = None - print((test_id, - np.dtype(in_type).str, - np.dtype(out_type).str, - exp_abs_mx_e, - abs_mx_e, - rel_thresh, - rel_mx_e, - slope, inter)) + print( + ( + test_id, + np.dtype(in_type).str, + np.dtype(out_type).str, + exp_abs_mx_e, + abs_mx_e, + rel_thresh, + rel_mx_e, + slope, + inter, + ) + ) # To help debugging failures with --pdb-failure np.nonzero(all_fails) assert this_test diff --git a/nibabel/tests/test_rstutils.py b/nibabel/tests/test_rstutils.py index 4fb83d3170..55a0aace7c 100644 --- a/nibabel/tests/test_rstutils.py +++ b/nibabel/tests/test_rstutils.py @@ -1,4 +1,4 @@ -""" Test printable table +"""Test printable table """ import numpy as np @@ -12,40 +12,47 @@ def test_rst_table(): # Tests for printable table function R, C = 3, 4 cell_values = np.arange(R * C).reshape((R, C)) - assert (rst_table(cell_values) == - """+--------+--------+--------+--------+--------+ + assert ( + rst_table(cell_values) + == """+--------+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +========+========+========+========+========+ | row[0] | 0.00 | 1.00 | 2.00 | 3.00 | | row[1] | 4.00 | 5.00 | 6.00 | 7.00 | | row[2] | 8.00 | 9.00 | 10.00 | 11.00 | -+--------+--------+--------+--------+--------+""") - assert (rst_table(cell_values, ['a', 'b', 'c']) == - """+---+--------+--------+--------+--------+ ++--------+--------+--------+--------+--------+""" + ) + assert ( + rst_table(cell_values, ['a', 'b', 'c']) + == """+---+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +===+========+========+========+========+ | a | 0.00 | 1.00 | 2.00 | 3.00 | | b | 4.00 | 5.00 | 6.00 | 7.00 | | c | 8.00 | 9.00 | 10.00 | 11.00 | -+---+--------+--------+--------+--------+""") ++---+--------+--------+--------+--------+""" + ) with pytest.raises(ValueError): rst_table(cell_values, ['a', 'b']) with pytest.raises(ValueError): rst_table(cell_values, ['a', 'b', 'c', 'd']) - assert (rst_table(cell_values, None, ['1', '2', '3', '4']) == - """+--------+-------+-------+-------+-------+ + assert ( + rst_table(cell_values, None, ['1', '2', '3', '4']) + == """+--------+-------+-------+-------+-------+ | | 1 | 2 | 3 | 4 | +========+=======+=======+=======+=======+ | row[0] | 0.00 | 1.00 | 2.00 | 3.00 | | row[1] | 4.00 | 5.00 | 6.00 | 7.00 | | row[2] | 8.00 | 9.00 | 10.00 | 11.00 | -+--------+-------+-------+-------+-------+""") ++--------+-------+-------+-------+-------+""" + ) with pytest.raises(ValueError): rst_table(cell_values, None, ['1', '2', '3']) with pytest.raises(ValueError): rst_table(cell_values, None, list('12345')) - assert (rst_table(cell_values, title='A title') == - """******* + assert ( + rst_table(cell_values, title='A title') + == """******* A title ******* @@ -55,35 +62,36 @@ def test_rst_table(): | row[0] | 0.00 | 1.00 | 2.00 | 3.00 | | row[1] | 4.00 | 5.00 | 6.00 | 7.00 | | row[2] | 8.00 | 9.00 | 10.00 | 11.00 | -+--------+--------+--------+--------+--------+""") - assert (rst_table(cell_values, val_fmt='{0}') == - """+--------+--------+--------+--------+--------+ ++--------+--------+--------+--------+--------+""" + ) + assert ( + rst_table(cell_values, val_fmt='{0}') + == """+--------+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +========+========+========+========+========+ | row[0] | 0 | 1 | 2 | 3 | | row[1] | 4 | 5 | 6 | 7 | | row[2] | 8 | 9 | 10 | 11 | -+--------+--------+--------+--------+--------+""") ++--------+--------+--------+--------+--------+""" + ) # Doing a fancy cell format cell_values_back = np.arange(R * C)[::-1].reshape((R, C)) cell_3d = np.dstack((cell_values, cell_values_back)) - assert (rst_table(cell_3d, val_fmt='{0[0]}-{0[1]}') == - """+--------+--------+--------+--------+--------+ + assert ( + rst_table(cell_3d, val_fmt='{0[0]}-{0[1]}') + == """+--------+--------+--------+--------+--------+ | | col[0] | col[1] | col[2] | col[3] | +========+========+========+========+========+ | row[0] | 0-11 | 1-10 | 2-9 | 3-8 | | row[1] | 4-7 | 5-6 | 6-5 | 7-4 | | row[2] | 8-3 | 9-2 | 10-1 | 11-0 | -+--------+--------+--------+--------+--------+""") ++--------+--------+--------+--------+--------+""" + ) # Test formatting characters - formats = dict( - down='!', - along='_', - thick_long='~', - cross='%', - title_heading='#') - assert (rst_table(cell_values, title='A title', format_chars=formats) == - """####### + formats = dict(down='!', along='_', thick_long='~', cross='%', title_heading='#') + assert ( + rst_table(cell_values, title='A title', format_chars=formats) + == """####### A title ####### @@ -93,7 +101,8 @@ def test_rst_table(): ! row[0] ! 0.00 ! 1.00 ! 2.00 ! 3.00 ! ! row[1] ! 4.00 ! 5.00 ! 6.00 ! 7.00 ! ! row[2] ! 8.00 ! 9.00 ! 10.00 ! 11.00 ! -%________%________%________%________%________%""") +%________%________%________%________%________%""" + ) formats['funny_value'] = '!' with pytest.raises(ValueError): rst_table(cell_values, title='A title', format_chars=formats) diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index b1a00c0570..e705a96c83 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test for scaling / rounding in volumeutils module """ +"""Test for scaling / rounding in volumeutils module""" import numpy as np import warnings @@ -18,7 +18,7 @@ from .test_volumeutils import _calculate_scale -from numpy.testing import (assert_array_almost_equal, assert_array_equal) +from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest @@ -26,30 +26,33 @@ DEBUG = True -@pytest.mark.parametrize("in_arr, res", [ - ([[-1, 0, 1], [np.inf, np.nan, -np.inf]], (-1, 1)), - (np.array([[-1, 0, 1], [np.inf, np.nan, -np.inf]]), (-1, 1)), - ([[np.nan], [np.nan]], (np.inf, -np.inf)), # all nans slices - (np.zeros((3, 4, 5)) + np.nan, (np.inf, -np.inf)), - ([[-np.inf], [np.inf]], (np.inf, -np.inf)), # all infs slices - (np.zeros((3, 4, 5)) + np.inf, (np.inf, -np.inf)), - ([[np.nan, -1, 2], [-2, np.nan, 1]], (-2, 2)), - ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), - ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case - ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), - ([np.nan], (np.inf, -np.inf)), - ([np.inf], (np.inf, -np.inf)), - ([-np.inf], (np.inf, -np.inf)), - ([np.inf, 1], (1, 1)), # only look at finite values - ([-np.inf, 1], (1, 1)), - ([[], []], (np.inf, -np.inf)), # empty array - (np.array([[-3, 0, 1], [2, -1, 4]], dtype=int), (-3, 4)), - (np.array([[1, 0, 1], [2, 3, 4]], dtype=np.uint), (0, 4)), - ([0., 1, 2, 3], (0, 3)), - # Complex comparison works as if they are floats - ([[np.nan, -1 - 100j, 2], [-2, np.nan, 1 + 100j]], (-2, 2)), - ([[np.nan, -1, 2 - 100j], [-2 + 100j, np.nan, 1]], (-2 + 100j, 2 - 100j)), -]) +@pytest.mark.parametrize( + 'in_arr, res', + [ + ([[-1, 0, 1], [np.inf, np.nan, -np.inf]], (-1, 1)), + (np.array([[-1, 0, 1], [np.inf, np.nan, -np.inf]]), (-1, 1)), + ([[np.nan], [np.nan]], (np.inf, -np.inf)), # all nans slices + (np.zeros((3, 4, 5)) + np.nan, (np.inf, -np.inf)), + ([[-np.inf], [np.inf]], (np.inf, -np.inf)), # all infs slices + (np.zeros((3, 4, 5)) + np.inf, (np.inf, -np.inf)), + ([[np.nan, -1, 2], [-2, np.nan, 1]], (-2, 2)), + ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), + ([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case + ([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)), + ([np.nan], (np.inf, -np.inf)), + ([np.inf], (np.inf, -np.inf)), + ([-np.inf], (np.inf, -np.inf)), + ([np.inf, 1], (1, 1)), # only look at finite values + ([-np.inf, 1], (1, 1)), + ([[], []], (np.inf, -np.inf)), # empty array + (np.array([[-3, 0, 1], [2, -1, 4]], dtype=int), (-3, 4)), + (np.array([[1, 0, 1], [2, 3, 4]], dtype=np.uint), (0, 4)), + ([0.0, 1, 2, 3], (0, 3)), + # Complex comparison works as if they are floats + ([[np.nan, -1 - 100j, 2], [-2, np.nan, 1 + 100j]], (-2, 2)), + ([[np.nan, -1, 2 - 100j], [-2 + 100j, np.nan, 1]], (-2 + 100j, 2 - 100j)), + ], +) def test_finite_range(in_arr, res): # Finite range utility function assert finite_range(in_arr) == res @@ -71,12 +74,12 @@ def test_finite_range(in_arr, res): def test_finite_range_err(): # Test error cases - a = np.array([[1., 0, 1], [2, 3, 4]]).view([('f1', 'f')]) + a = np.array([[1.0, 0, 1], [2, 3, 4]]).view([('f1', 'f')]) with pytest.raises(TypeError): finite_range(a) -@pytest.mark.parametrize("out_type", [np.int16, np.float32]) +@pytest.mark.parametrize('out_type', [np.int16, np.float32]) def test_a2f_mn_mx(out_type): # Test array to file mn, mx handling str_io = BytesIO() @@ -111,7 +114,7 @@ def test_a2f_mn_mx(out_type): def test_a2f_nan2zero(): # Test conditions under which nans written to zero - arr = np.array([np.nan, 99.], dtype=np.float32) + arr = np.array([np.nan, 99.0], dtype=np.float32) str_io = BytesIO() array_to_file(arr, str_io) data_back = array_from_file(arr.shape, np.float32, str_io) @@ -132,14 +135,17 @@ def test_a2f_nan2zero(): assert_array_equal(data_back, [np.array(np.nan).astype(np.int32), 99]) -@pytest.mark.parametrize("in_type, out_type", [ - (np.int16, np.int16), - (np.int16, np.int8), - (np.uint16, np.uint8), - (np.int32, np.int8), - (np.float32, np.uint8), - (np.float32, np.int16) -]) +@pytest.mark.parametrize( + 'in_type, out_type', + [ + (np.int16, np.int16), + (np.int16, np.int8), + (np.uint16, np.uint8), + (np.int32, np.int8), + (np.float32, np.uint8), + (np.float32, np.int16), + ], +) def test_array_file_scales(in_type, out_type): # Test scaling works for max, min when going from larger to smaller type, # and from float to integer. @@ -154,21 +160,24 @@ def test_array_file_scales(in_type, out_type): arr2 = array_from_file(arr.shape, out_dtype, bio) arr3 = apply_read_scaling(arr2, slope, inter) # Max rounding error for integer type - max_miss = slope / 2. + max_miss = slope / 2.0 assert np.all(np.abs(arr - arr3) <= max_miss) -@pytest.mark.parametrize("category0, category1, overflow",[ - # Confirm that, for all ints and uints as input, and all possible outputs, - # for any simple way of doing the calculation, the result is near enough - ('int', 'int', False), - ('uint', 'int', False), - # Converting floats to integer - ('float', 'int', True), - ('float', 'uint', True), - ('complex', 'int', True), - ('complex', 'uint', True), -]) +@pytest.mark.parametrize( + 'category0, category1, overflow', + [ + # Confirm that, for all ints and uints as input, and all possible outputs, + # for any simple way of doing the calculation, the result is near enough + ('int', 'int', False), + ('uint', 'int', False), + # Converting floats to integer + ('float', 'int', True), + ('float', 'uint', True), + ('complex', 'int', True), + ('complex', 'uint', True), + ], +) def test_scaling_in_abstract(category0, category1, overflow): for in_type in np.sctypes[category0]: for out_type in np.sctypes[category1]: diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index 61a41f54ad..e4006788c1 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -1,6 +1,6 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" Test scripts +"""Test scripts Test running scripts """ @@ -8,8 +8,7 @@ import sys import os import shutil -from os.path import (dirname, join as pjoin, abspath, splitext, basename, - exists) +from os.path import dirname, join as pjoin, abspath, splitext, basename, exists import csv from glob import glob @@ -27,8 +26,7 @@ from .scriptrunner import ScriptRunner from .nibabel_data import needs_nibabel_data from ..testing import assert_dt_equal, assert_re_in -from .test_parrec import (DTI_PAR_BVECS, DTI_PAR_BVALS, - EXAMPLE_IMAGES as PARREC_EXAMPLES) +from .test_parrec import DTI_PAR_BVECS, DTI_PAR_BVALS, EXAMPLE_IMAGES as PARREC_EXAMPLES from .test_parrec_data import BALLS, AFF_OFF from ..testing import assert_data_similar @@ -39,9 +37,8 @@ def _proc_stdout(stdout): runner = ScriptRunner( - script_sdir='bin', - debug_print_var='NIPY_DEBUG_PRINT', - output_processor=_proc_stdout) + script_sdir='bin', debug_print_var='NIPY_DEBUG_PRINT', output_processor=_proc_stdout +) run_command = runner.run_command @@ -49,6 +46,8 @@ def script_test(func): # Decorator to label test as a script_test func.script_test = True return func + + script_test.__test__ = False # It's not a test DATA_PATH = abspath(pjoin(dirname(__file__), 'data')) @@ -62,59 +61,91 @@ def load_small_file(): return False -def check_nib_ls_example4d(opts=[], hdrs_str="", other_str=""): +def check_nib_ls_example4d(opts=[], hdrs_str='', other_str=''): # test nib-ls script fname = pjoin(DATA_PATH, 'example4d.nii.gz') - expected_re = (" (int16|[<>]i2) \\[128, 96, 24, 2\\] 2.00x2.00x2.20x2000.00 " - f"#exts: 2{hdrs_str} sform{other_str}$") + expected_re = ( + ' (int16|[<>]i2) \\[128, 96, 24, 2\\] 2.00x2.00x2.20x2000.00 ' + f'#exts: 2{hdrs_str} sform{other_str}$' + ) cmd = ['nib-ls'] + opts + [fname] code, stdout, stderr = run_command(cmd) - assert fname == stdout[:len(fname)] - assert_re_in(expected_re, stdout[len(fname):]) + assert fname == stdout[: len(fname)] + assert_re_in(expected_re, stdout[len(fname) :]) def check_nib_diff_examples(): - fnames = [pjoin(DATA_PATH, f) - for f in ('standard.nii.gz', 'example4d.nii.gz')] + fnames = [pjoin(DATA_PATH, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] code, stdout, stderr = run_command(['nib-diff'] + fnames, check_code=False) - checked_fields = ["Field/File", "regular", "dim_info", "dim", "datatype", "bitpix", "pixdim", "slice_end", - "xyzt_units", "cal_max", "descrip", "qform_code", "sform_code", "quatern_b", - "quatern_c", "quatern_d", "qoffset_x", "qoffset_y", "qoffset_z", "srow_x", - "srow_y", "srow_z", "DATA(md5)", "DATA(diff 1:)"] + checked_fields = [ + 'Field/File', + 'regular', + 'dim_info', + 'dim', + 'datatype', + 'bitpix', + 'pixdim', + 'slice_end', + 'xyzt_units', + 'cal_max', + 'descrip', + 'qform_code', + 'sform_code', + 'quatern_b', + 'quatern_c', + 'quatern_d', + 'qoffset_x', + 'qoffset_y', + 'qoffset_z', + 'srow_x', + 'srow_y', + 'srow_z', + 'DATA(md5)', + 'DATA(diff 1:)', + ] for item in checked_fields: assert item in stdout - fnames2 = [pjoin(DATA_PATH, f) - for f in ('example4d.nii.gz', 'example4d.nii.gz')] + fnames2 = [pjoin(DATA_PATH, f) for f in ('example4d.nii.gz', 'example4d.nii.gz')] code, stdout, stderr = run_command(['nib-diff'] + fnames2, check_code=False) - assert stdout == "These files are identical." + assert stdout == 'These files are identical.' - fnames3 = [pjoin(DATA_PATH, f) - for f in ('standard.nii.gz', 'example4d.nii.gz', 'example_nifti2.nii.gz')] + fnames3 = [ + pjoin(DATA_PATH, f) + for f in ('standard.nii.gz', 'example4d.nii.gz', 'example_nifti2.nii.gz') + ] code, stdout, stderr = run_command(['nib-diff'] + fnames3, check_code=False) for item in checked_fields: assert item in stdout - fnames4 = [pjoin(DATA_PATH, f) - for f in ('standard.nii.gz', 'standard.nii.gz', 'standard.nii.gz')] + fnames4 = [ + pjoin(DATA_PATH, f) for f in ('standard.nii.gz', 'standard.nii.gz', 'standard.nii.gz') + ] code, stdout, stderr = run_command(['nib-diff'] + fnames4, check_code=False) - assert stdout == "These files are identical." + assert stdout == 'These files are identical.' code, stdout, stderr = run_command(['nib-diff', '--dt', 'float64'] + fnames, check_code=False) for item in checked_fields: assert item in stdout -@pytest.mark.parametrize("args", [ - [], - [['-H', 'dim,bitpix'], r" \[ 4 128 96 24 2 1 1 1\] 16"], - [['-c'], "", " !1030 uniques. Use --all-counts"], - [['-c', '--all-counts'], "", " 2:3 3:2 4:1 5:1.*"], - # both stats and counts - [['-c', '-s', '--all-counts'], "", r" \[229725\] \[2, 1.2e\+03\] 2:3 3:2 4:1 5:1.*"], - # and must not error out if we allow for zeros - [['-c', '-s', '-z', '--all-counts'], "", r" \[589824\] \[0, 1.2e\+03\] 0:360099 2:3 3:2 4:1 5:1.*"], -]) +@pytest.mark.parametrize( + 'args', + [ + [], + [['-H', 'dim,bitpix'], r' \[ 4 128 96 24 2 1 1 1\] 16'], + [['-c'], '', ' !1030 uniques. Use --all-counts'], + [['-c', '--all-counts'], '', ' 2:3 3:2 4:1 5:1.*'], + # both stats and counts + [['-c', '-s', '--all-counts'], '', r' \[229725\] \[2, 1.2e\+03\] 2:3 3:2 4:1 5:1.*'], + # and must not error out if we allow for zeros + [ + ['-c', '-s', '-z', '--all-counts'], + '', + r' \[589824\] \[0, 1.2e\+03\] 0:360099 2:3 3:2 4:1 5:1.*', + ], + ], +) @script_test def test_nib_ls(args): check_nib_ls_example4d(*args) @@ -126,8 +157,7 @@ def test_nib_ls_multiple(): # verify that correctly lists/formats for multiple files fnames = [ pjoin(DATA_PATH, f) - for f in ('example4d.nii.gz', 'example_nifti2.nii.gz', - 'small.mnc', 'nifti2.hdr') + for f in ('example4d.nii.gz', 'example_nifti2.nii.gz', 'small.mnc', 'nifti2.hdr') ] code, stdout, stderr = run_command(['nib-ls'] + fnames) stdout_lines = stdout.split('\n') @@ -136,30 +166,27 @@ def test_nib_ls_multiple(): # they should be indented correctly. Since all files are int type - ln = max(len(f) for f in fnames) i_str = ' i' if sys.byteorder == 'little' else ' -ve offset - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]]), + ( + (2, 3, 4), + np.diag([-1, 1, 1, 1]), + None, + (2, 3, 4), + [ + [1, 0, 0, -1], # axis reversed -> -ve offset + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ], + ), # zooms for affine > 1 -> larger grid with default 1mm output voxels - ((2, 3, 4), np.diag([4, 5, 6, 1]), None, - (5, 11, 19), np.eye(4)), + ((2, 3, 4), np.diag([4, 5, 6, 1]), None, (5, 11, 19), np.eye(4)), # set output voxels to be same size as input. back to original shape - ((2, 3, 4), np.diag([4, 5, 6, 1]), (4, 5, 6), - (2, 3, 4), np.diag([4, 5, 6, 1])), + ((2, 3, 4), np.diag([4, 5, 6, 1]), (4, 5, 6), (2, 3, 4), np.diag([4, 5, 6, 1])), # Translation preserved in output - ((2, 3, 4), trans_123, None, - (2, 3, 4), trans_123), - ((2, 3, 4), trans_m123, None, - (2, 3, 4), trans_m123), + ((2, 3, 4), trans_123, None, (2, 3, 4), trans_123), + ((2, 3, 4), trans_m123, None, (2, 3, 4), trans_m123), # rotation around 3rd axis - ((2, 3, 4), rot_3, None, - # x diff, y diff now 3 cos pi / 4 == 2.12, ceil to 3, add 1 - # most negative x now 2 cos pi / 4 - (4, 4, 4), [[1, 0, 0, -2 * np.cos(np.pi / 4)], - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1]]), + ( + (2, 3, 4), + rot_3, + None, + # x diff, y diff now 3 cos pi / 4 == 2.12, ceil to 3, add 1 + # most negative x now 2 cos pi / 4 + (4, 4, 4), + [[1, 0, 0, -2 * np.cos(np.pi / 4)], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], + ), # Less than 3 axes - ((2, 3), np.eye(4), None, - (2, 3), np.eye(4)), - ((2,), np.eye(4), None, - (2,), np.eye(4)), + ((2, 3), np.eye(4), None, (2, 3), np.eye(4)), + ((2,), np.eye(4), None, (2,), np.eye(4)), # Number of voxel sizes matches length - ((2, 3), np.diag([4, 5, 6, 1]), (4, 5), - (2, 3), np.diag([4, 5, 1, 1])), + ((2, 3), np.diag([4, 5, 6, 1]), (4, 5), (2, 3), np.diag([4, 5, 1, 1])), ) @@ -105,21 +107,21 @@ def test_vox2out_vox(): def test_slice2volume(): # Get affine expressing selection of single slice from volume - for axis, def_aff in zip((0, 1, 2), ( + for axis, def_aff in zip( + (0, 1, 2), + ( [[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]], [[1, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 1]], - [[1, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]])): + [[1, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]], + ), + ): for val in (0, 5, 10): exp_aff = np.array(def_aff) exp_aff[axis, -1] = val assert (slice2volume(val, axis) == exp_aff).all() -@pytest.mark.parametrize("index, axis", [ - [-1, 0], - [0, -1], - [0, 3] -]) +@pytest.mark.parametrize('index, axis', [[-1, 0], [0, -1], [0, 3]]) def test_slice2volume_exception(index, axis): with pytest.raises(ValueError): - slice2volume(index, axis) \ No newline at end of file + slice2volume(index, axis) diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 52eff4be72..cdbe8dc9f2 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -6,8 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Testing spatialimages - +"""Testing spatialimages """ import warnings @@ -33,6 +32,7 @@ from ..tmpdirs import InTemporaryDirectory from .. import load as top_load + def test_header_init(): # test the basic header hdr = SpatialHeader() @@ -70,12 +70,15 @@ def test_from_header(): assert hdr is not copy class C: + def get_data_dtype(self): + return np.dtype('u2') - def get_data_dtype(self): return np.dtype('u2') + def get_data_shape(self): + return (5, 4, 3) - def get_data_shape(self): return (5, 4, 3) + def get_zooms(self): + return (10.0, 9.0, 8.0) - def get_zooms(self): return (10.0, 9.0, 8.0) converted = SpatialHeader.from_header(C()) assert isinstance(converted, SpatialHeader) assert converted.get_data_dtype() == np.dtype('u2') @@ -151,23 +154,20 @@ def test_data_dtype(): def test_affine(): hdr = SpatialHeader(np.float64, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) - assert_array_almost_equal(hdr.get_best_affine(), - [[-3.0, 0, 0, 0], - [0, 2, 0, -1], - [0, 0, 1, -1], - [0, 0, 0, 1]]) + assert_array_almost_equal( + hdr.get_best_affine(), [[-3.0, 0, 0, 0], [0, 2, 0, -1], [0, 0, 1, -1], [0, 0, 0, 1]] + ) hdr.default_x_flip = False - assert_array_almost_equal(hdr.get_best_affine(), - [[3.0, 0, 0, 0], - [0, 2, 0, -1], - [0, 0, 1, -1], - [0, 0, 0, 1]]) + assert_array_almost_equal( + hdr.get_best_affine(), [[3.0, 0, 0, 0], [0, 2, 0, -1], [0, 0, 1, -1], [0, 0, 0, 1]] + ) assert np.array_equal(hdr.get_base_affine(), hdr.get_best_affine()) def test_read_data(): class CHeader(SpatialHeader): data_layout = 'C' + for klass, order in ((SpatialHeader, 'F'), (CHeader, 'C')): hdr = klass(np.int32, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) fobj = BytesIO() @@ -359,7 +359,7 @@ def test_get_fdata(self): assert rt_img.get_fdata() is not out_data assert (rt_img.get_fdata() == in_data).all() - @expires("5.0.0") + @expires('5.0.0') def test_get_data(self): # Test array image and proxy image interface img_klass = self.image_class @@ -399,8 +399,7 @@ def test_slicer(self): in_data_template = np.arange(240, dtype=np.int16) base_affine = np.eye(4) t_axis = None - for dshape in ((4, 5, 6, 2), # Time series - (8, 5, 6)): # Volume + for dshape in ((4, 5, 6, 2), (8, 5, 6)): # Time series # Volume in_data = in_data_template.copy().reshape(dshape) img = img_klass(in_data, base_affine.copy()) @@ -408,11 +407,12 @@ def test_slicer(self): with pytest.raises(TypeError) as exception_manager: img[0, 0, 0] # Make sure the right message gets raised: - assert (str(exception_manager.value) == - "Cannot slice image objects; consider using " - "`img.slicer[slice]` to generate a sliced image (see " - "documentation for caveats) or slicing image array data " - "with `img.dataobj[slice]` or `img.get_fdata()[slice]`") + assert ( + str(exception_manager.value) == 'Cannot slice image objects; consider using ' + '`img.slicer[slice]` to generate a sliced image (see ' + 'documentation for caveats) or slicing image array data ' + 'with `img.dataobj[slice]` or `img.get_fdata()[slice]`' + ) if not spatial_axes_first(img): with pytest.raises(ValueError): @@ -425,14 +425,15 @@ def test_slicer(self): spatial_zooms = img.header.get_zooms()[:3] # Down-sample with [::2, ::2, ::2] along spatial dimensions - sliceobj = [slice(None, None, 2)] * 3 + \ - [slice(None)] * (len(dshape) - 3) + sliceobj = [slice(None, None, 2)] * 3 + [slice(None)] * (len(dshape) - 3) downsampled_img = img.slicer[tuple(sliceobj)] assert (downsampled_img.header.get_zooms()[:3] == np.array(spatial_zooms) * 2).all() - max4d = (hasattr(img.header, '_structarr') and - 'dims' in img.header._structarr.dtype.fields and - img.header._structarr['dims'].shape == (4,)) + max4d = ( + hasattr(img.header, '_structarr') + and 'dims' in img.header._structarr.dtype.fields + and img.header._structarr['dims'].shape == (4,) + ) # Check newaxis and single-slice errors with pytest.raises(IndexError): img.slicer[None] @@ -453,8 +454,7 @@ def test_slicer(self): img.slicer[:, :, :, None] else: # Reorder non-spatial axes - assert (img.slicer[:, :, :, None].shape - == img.shape[:3] + (1,) + img.shape[3:]) + assert img.slicer[:, :, :, None].shape == img.shape[:3] + (1,) + img.shape[3:] # 4D to 3D using ellipsis or slices assert img.slicer[..., 0].shape == img.shape[:-1] assert img.slicer[:, :, :, 0].shape == img.shape[:-1] @@ -510,8 +510,23 @@ def test_slicer(self): img.slicer[[0], [-1]] # Check data is consistent with slicing numpy arrays - slice_elems = np.array((None, Ellipsis, 0, 1, -1, [0], [1], [-1], - slice(None), slice(1), slice(-1), slice(1, -1)), dtype=object) + slice_elems = np.array( + ( + None, + Ellipsis, + 0, + 1, + -1, + [0], + [1], + [-1], + slice(None), + slice(1), + slice(-1), + slice(1, -1), + ), + dtype=object, + ) for n_elems in range(6): for _ in range(1 if n_elems == 0 else 10): sliceobj = tuple(np.random.choice(slice_elems, n_elems)) @@ -529,12 +544,13 @@ def test_slicer(self): class MmapImageMixin: - """ Mixin for testing images that may return memory maps """ + """Mixin for testing images that may return memory maps""" + #: whether to test mode of returned memory map check_mmap_mode = True def get_disk_image(self): - """ Return image, image filename, and flag for required scaling + """Return image, image filename, and flag for required scaling Subclasses can do anything to return an image, including loading a pre-existing image from disk. @@ -563,19 +579,22 @@ def test_load_mmap(self): with InTemporaryDirectory(): img, fname, has_scaling = self.get_disk_image() file_map = img.file_map.copy() - for func, param1 in ((img_klass.from_filename, fname), - (img_klass.load, fname), - (top_load, fname), - (img_klass.from_file_map, file_map)): + for func, param1 in ( + (img_klass.from_filename, fname), + (img_klass.load, fname), + (top_load, fname), + (img_klass.from_file_map, file_map), + ): for mmap, expected_mode in ( - # mmap value, expected memmap mode - # mmap=None -> no mmap value - # expected mode=None -> no memmap returned - (None, 'c'), - (True, 'c'), - ('c', 'c'), - ('r', 'r'), - (False, None)): + # mmap value, expected memmap mode + # mmap=None -> no mmap value + # expected mode=None -> no memmap returned + (None, 'c'), + (True, 'c'), + ('c', 'c'), + ('r', 'r'), + (False, None), + ): # If the image has scaling, then numpy 1.12 will not return # a memmap, regardless of the input flags. Previous # numpies returned a memmap object, even though the array @@ -589,7 +608,9 @@ def test_load_mmap(self): back_img = func(param1, **kwargs) back_data = np.asanyarray(back_img.dataobj) if expected_mode is None: - assert not isinstance(back_data, np.memmap), f'Should not be a {img_klass.__name__}' + assert not isinstance( + back_data, np.memmap + ), f'Should not be a {img_klass.__name__}' else: assert isinstance(back_data, np.memmap), f'Not a {img_klass.__name__}' if self.check_mmap_mode: diff --git a/nibabel/tests/test_spm2analyze.py b/nibabel/tests/test_spm2analyze.py index 582f6b70bd..9881a23d07 100644 --- a/nibabel/tests/test_spm2analyze.py +++ b/nibabel/tests/test_spm2analyze.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Tests for SPM2 header stuff """ +"""Tests for SPM2 header stuff""" import numpy as np @@ -19,6 +19,7 @@ from . import test_spm99analyze + class TestSpm2AnalyzeHeader(test_spm99analyze.TestSpm99AnalyzeHeader): header_class = Spm2AnalyzeHeader @@ -26,20 +27,21 @@ def test_slope_inter(self): hdr = self.header_class() assert hdr.get_slope_inter() == (1.0, 0.0) for in_tup, exp_err, out_tup, raw_slope in ( - ((2.0,), None, (2.0, 0.), 2.), - ((None,), None, (None, None), np.nan), - ((1.0, None), None, (1.0, 0.), 1.), - # non zero intercept causes error - ((None, 1.1), HeaderTypeError, (None, None), np.nan), - ((2.0, 1.1), HeaderTypeError, (None, None), 2.), - # null scalings - ((0.0, None), HeaderDataError, (None, None), 0.), - ((np.nan, np.nan), None, (None, None), np.nan), - ((np.nan, None), None, (None, None), np.nan), - ((None, np.nan), None, (None, None), np.nan), - ((np.inf, None), HeaderDataError, (None, None), np.inf), - ((-np.inf, None), HeaderDataError, (None, None), -np.inf), - ((None, 0.0), None, (None, None), np.nan)): + ((2.0,), None, (2.0, 0.0), 2.0), + ((None,), None, (None, None), np.nan), + ((1.0, None), None, (1.0, 0.0), 1.0), + # non zero intercept causes error + ((None, 1.1), HeaderTypeError, (None, None), np.nan), + ((2.0, 1.1), HeaderTypeError, (None, None), 2.0), + # null scalings + ((0.0, None), HeaderDataError, (None, None), 0.0), + ((np.nan, np.nan), None, (None, None), np.nan), + ((np.nan, None), None, (None, None), np.nan), + ((None, np.nan), None, (None, None), np.nan), + ((np.inf, None), HeaderDataError, (None, None), np.inf), + ((-np.inf, None), HeaderDataError, (None, None), -np.inf), + ((None, 0.0), None, (None, None), np.nan), + ): hdr = self.header_class() if not exp_err is None: with pytest.raises(exp_err): diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index d2ba898fa6..9d04643d2a 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -17,14 +17,14 @@ import pytest from ..optpkg import optional_package + _, have_scipy, _ = optional_package('scipy') # Decorator to skip tests requiring save / load if scipy not available for mat # files needs_scipy = unittest.skipUnless(have_scipy, 'scipy not available') -from ..spm99analyze import (Spm99AnalyzeHeader, Spm99AnalyzeImage, - HeaderTypeError) +from ..spm99analyze import Spm99AnalyzeHeader, Spm99AnalyzeImage, HeaderTypeError from ..casting import type_info, shared_range from ..volumeutils import apply_read_scaling, _dt_min_max from ..spatialimages import supported_np_types, HeaderDataError @@ -33,7 +33,7 @@ bytesio_round_trip, bytesio_filemap, assert_allclose_safely, - suppress_warnings + suppress_warnings, ) from . import test_analyze @@ -48,7 +48,7 @@ class HeaderScalingMixin: - """ Mixin to add scaling tests to header tests + """Mixin to add scaling tests to header tests Needs to be a mixin so nifti tests can use this method without inheriting directly from the SPM header tests @@ -82,8 +82,7 @@ def test_data_scaling(self): assert np.all(data == data_back) -class TestSpm99AnalyzeHeader(test_analyze.TestAnalyzeHeader, - HeaderScalingMixin): +class TestSpm99AnalyzeHeader(test_analyze.TestAnalyzeHeader, HeaderScalingMixin): header_class = Spm99AnalyzeHeader def test_empty(self): @@ -109,20 +108,21 @@ def test_slope_inter(self): hdr = self.header_class() assert hdr.get_slope_inter() == (1.0, None) for in_tup, exp_err, out_tup, raw_slope in ( - ((2.0,), None, (2.0, None), 2.), - ((None,), None, (None, None), np.nan), - ((1.0, None), None, (1.0, None), 1.), - # non zero intercept causes error - ((None, 1.1), HeaderTypeError, (None, None), np.nan), - ((2.0, 1.1), HeaderTypeError, (None, None), 2.), - # null scalings - ((0.0, None), HeaderDataError, (None, None), 0.), - ((np.nan, np.nan), None, (None, None), np.nan), - ((np.nan, None), None, (None, None), np.nan), - ((None, np.nan), None, (None, None), np.nan), - ((np.inf, None), HeaderDataError, (None, None), np.inf), - ((-np.inf, None), HeaderDataError, (None, None), -np.inf), - ((None, 0.0), None, (None, None), np.nan)): + ((2.0,), None, (2.0, None), 2.0), + ((None,), None, (None, None), np.nan), + ((1.0, None), None, (1.0, None), 1.0), + # non zero intercept causes error + ((None, 1.1), HeaderTypeError, (None, None), np.nan), + ((2.0, 1.1), HeaderTypeError, (None, None), 2.0), + # null scalings + ((0.0, None), HeaderDataError, (None, None), 0.0), + ((np.nan, np.nan), None, (None, None), np.nan), + ((np.nan, None), None, (None, None), np.nan), + ((None, np.nan), None, (None, None), np.nan), + ((np.inf, None), HeaderDataError, (None, None), np.inf), + ((-np.inf, None), HeaderDataError, (None, None), -np.inf), + ((None, 0.0), None, (None, None), np.nan), + ): hdr = self.header_class() if not exp_err is None: with pytest.raises(exp_err): @@ -146,9 +146,11 @@ def test_origin_checks(self): hdr['origin'][0] = 101 # severity 20 fhdr, message, raiser = self.log_chk(hdr, 20) assert fhdr == hdr - assert (message == 'very large origin values ' - 'relative to dims; leaving as set, ' - 'ignoring for affine') + assert ( + message == 'very large origin values ' + 'relative to dims; leaving as set, ' + 'ignoring for affine' + ) pytest.raises(*raiser) # diagnose binary block dxer = self.header_class.diagnose_binaryblock @@ -229,21 +231,13 @@ def test_header_scaling(self): if not hdr_class.has_data_intercept: return invalid_inters = (np.nan, np.inf, -np.inf) - invalid_pairs = tuple( - itertools.product(invalid_slopes, invalid_inters)) - bad_slopes_good_inter = tuple( - itertools.product(invalid_slopes, (0, 1))) - good_slope_bad_inters = tuple( - itertools.product((1, 2), invalid_inters)) - for slope, inter in (invalid_pairs + bad_slopes_good_inter + - good_slope_bad_inters): + invalid_pairs = tuple(itertools.product(invalid_slopes, invalid_inters)) + bad_slopes_good_inter = tuple(itertools.product(invalid_slopes, (0, 1))) + good_slope_bad_inters = tuple(itertools.product((1, 2), invalid_inters)) + for slope, inter in invalid_pairs + bad_slopes_good_inter + good_slope_bad_inters: self.assert_null_scaling(arr, slope, inter) - def _check_write_scaling(self, - slope, - inter, - effective_slope, - effective_inter): + def _check_write_scaling(self, slope, inter, effective_slope, effective_inter): # Test that explicit set of slope / inter forces write of data using # this slope, inter. We use this helper function for children of the # Analyze header @@ -275,16 +269,13 @@ def _check_write_scaling(self, assert_array_equal(img.get_fdata(), arr) # But the array scaled after round trip img_rt = bytesio_round_trip(img) - assert_array_equal(img_rt.get_fdata(), - apply_read_scaling(arr, - effective_slope, - effective_inter)) + assert_array_equal( + img_rt.get_fdata(), apply_read_scaling(arr, effective_slope, effective_inter) + ) # The scaling set into the array proxy do_slope, do_inter = img.header.get_slope_inter() - assert_array_equal(img_rt.dataobj.slope, - 1 if do_slope is None else do_slope) - assert_array_equal(img_rt.dataobj.inter, - 0 if do_inter is None else do_inter) + assert_array_equal(img_rt.dataobj.slope, 1 if do_slope is None else do_slope) + assert_array_equal(img_rt.dataobj.inter, 0 if do_inter is None else do_inter) # The new header scaling has been reset self.assert_scale_me_scaling(img_rt.header) # But the original is the same as it was when we set it @@ -293,20 +284,19 @@ def _check_write_scaling(self, img.header.set_data_dtype(np.uint8) with np.errstate(invalid='ignore'): img_rt = bytesio_round_trip(img) - assert_array_equal(img_rt.get_fdata(), - apply_read_scaling(np.round(arr), - effective_slope, - effective_inter)) + assert_array_equal( + img_rt.get_fdata(), apply_read_scaling(np.round(arr), effective_slope, effective_inter) + ) # But we have to clip too arr[-1, -1, -1] = 256 arr[-2, -1, -1] = -1 with np.errstate(invalid='ignore'): img_rt = bytesio_round_trip(img) exp_unscaled_arr = np.clip(np.round(arr), 0, 255) - assert_array_equal(img_rt.get_fdata(), - apply_read_scaling(exp_unscaled_arr, - effective_slope, - effective_inter)) + assert_array_equal( + img_rt.get_fdata(), + apply_read_scaling(exp_unscaled_arr, effective_slope, effective_inter), + ) def test_int_int_scaling(self): # Check int to int conversion without slope, inter @@ -328,9 +318,7 @@ def test_no_scaling(self): # Any old non-default slope and intercept slope = 2 inter = 10 if hdr.has_data_intercept else 0 - for in_dtype, out_dtype in itertools.product( - FLOAT_TYPES + IUINT_TYPES, - supported_types): + for in_dtype, out_dtype in itertools.product(FLOAT_TYPES + IUINT_TYPES, supported_types): # Need to check complex scaling mn_in, mx_in = _dt_min_max(in_dtype) arr = np.array([mn_in, -1, 0, 1, 10, mx_in], dtype=in_dtype) @@ -354,8 +342,7 @@ def test_no_scaling(self): exp_back = np.round(exp_back) if in_dtype in FLOAT_TYPES: # Clip to shared range of working precision - exp_back = np.clip(exp_back, - *shared_range(float, out_dtype)) + exp_back = np.clip(exp_back, *shared_range(float, out_dtype)) else: # iu input and output type # No scaling, never gets converted to float. # Does get clipped to range of output type @@ -363,9 +350,7 @@ def test_no_scaling(self): if (mn_in, mx_in) != (mn_out, mx_out): # Use smaller of input, output range to avoid np.clip # upcasting the array because of large clip limits. - exp_back = np.clip(exp_back, - max(mn_in, mn_out), - min(mx_in, mx_out)) + exp_back = np.clip(exp_back, max(mn_in, mn_out), min(mx_in, mx_out)) if out_dtype in COMPLEX_TYPES: # always cast to real from complex exp_back = exp_back.astype(out_dtype) @@ -374,8 +359,7 @@ def test_no_scaling(self): exp_back = exp_back.astype(float) # Allow for small differences in large numbers with suppress_warnings(): # invalid value - assert_allclose_safely(back_arr, - exp_back * slope + inter) + assert_allclose_safely(back_arr, exp_back * slope + inter) def test_write_scaling(self): # Check writes with scaling set @@ -414,7 +398,9 @@ class TestSpm99AnalyzeImage(test_analyze.TestAnalyzeImage, ImageScalingMixin): test_header_updating = needs_scipy(test_analyze.TestAnalyzeImage.test_header_updating) test_offset_to_zero = needs_scipy(test_analyze.TestAnalyzeImage.test_offset_to_zero) test_big_offset_exts = needs_scipy(test_analyze.TestAnalyzeImage.test_big_offset_exts) - test_dtype_to_filename_arg = needs_scipy(test_analyze.TestAnalyzeImage.test_dtype_to_filename_arg) + test_dtype_to_filename_arg = needs_scipy( + test_analyze.TestAnalyzeImage.test_dtype_to_filename_arg + ) test_header_scaling = needs_scipy(ImageScalingMixin.test_header_scaling) test_int_int_scaling = needs_scipy(ImageScalingMixin.test_int_int_scaling) test_write_scaling = needs_scipy(ImageScalingMixin.test_write_scaling) @@ -441,6 +427,7 @@ def test_mat_read(self): # the saved mat file mat_fileobj = img.file_map['mat'].fileobj from scipy.io import loadmat, savemat + mat_fileobj.seek(0) mats = loadmat(mat_fileobj) assert 'M' in mats and 'mat' in mats @@ -458,21 +445,18 @@ def test_mat_read(self): flipper = np.diag([-1, 1, 1, 1]) assert_array_equal(mats['M'], np.dot(aff, np.dot(flipper, from_111))) mat_fileobj.seek(0) - savemat(mat_fileobj, - dict(M=np.diag([3, 4, 5, 1]), mat=np.diag([6, 7, 8, 1]))) + savemat(mat_fileobj, dict(M=np.diag([3, 4, 5, 1]), mat=np.diag([6, 7, 8, 1]))) # Check we are preferring the 'mat' matrix r_img = img_klass.from_file_map(fm) assert_array_equal(r_img.get_fdata(), arr) - assert_array_equal(r_img.affine, - np.dot(np.diag([6, 7, 8, 1]), to_111)) + assert_array_equal(r_img.affine, np.dot(np.diag([6, 7, 8, 1]), to_111)) # But will use M if present mat_fileobj.seek(0) mat_fileobj.truncate(0) savemat(mat_fileobj, dict(M=np.diag([3, 4, 5, 1]))) r_img = img_klass.from_file_map(fm) assert_array_equal(r_img.get_fdata(), arr) - assert_array_equal(r_img.affine, - np.dot(np.diag([3, 4, 5, 1]), np.dot(flipper, to_111))) + assert_array_equal(r_img.affine, np.dot(np.diag([3, 4, 5, 1]), np.dot(flipper, to_111))) def test_none_affine(self): # Allow for possibility of no affine resulting in nothing written into @@ -499,29 +483,41 @@ def test_origin_affine(): assert hdr.default_x_flip assert_array_almost_equal( hdr.get_origin_affine(), # from center of image - [[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -3.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -3.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) hdr['origin'][:3] = [3, 4, 5] assert_array_almost_equal( hdr.get_origin_affine(), # using origin - [[-3., 0., 0., 6.], - [0., 2., 0., -6.], - [0., 0., 1., -4.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 6.0], + [0.0, 2.0, 0.0, -6.0], + [0.0, 0.0, 1.0, -4.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) hdr['origin'] = 0 # unset origin hdr.set_data_shape((3, 5)) assert_array_almost_equal( hdr.get_origin_affine(), - [[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -0.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -0.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) hdr.set_data_shape((3, 5, 7)) assert_array_almost_equal( hdr.get_origin_affine(), # from center of image - [[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -3.], - [0., 0., 0., 1.]]) + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -3.0], + [0.0, 0.0, 0.0, 1.0], + ], + ) diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 6b7a25ceb2..11a46bafdb 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -1,4 +1,4 @@ -""" Tests for warnings context managers +"""Tests for warnings context managers """ import sys @@ -7,11 +7,19 @@ import numpy as np -from ..testing import (error_warnings, suppress_warnings, - clear_and_catch_warnings, assert_allclose_safely, - get_fresh_mod, assert_re_in, test_data, data_path) +from ..testing import ( + error_warnings, + suppress_warnings, + clear_and_catch_warnings, + assert_allclose_safely, + get_fresh_mod, + assert_re_in, + test_data, + data_path, +) import pytest + def test_assert_allclose_safely(): # Test the safe version of allclose assert_allclose_safely([1, 1], [1, 1]) @@ -114,6 +122,7 @@ def test_warn_error(): def f(): with error_warnings(): raise ValueError('An error') + with pytest.raises(ValueError): f() @@ -133,33 +142,39 @@ def test_warn_ignore(): def f(): with suppress_warnings(): raise ValueError('An error') + with pytest.raises(ValueError): f() -@pytest.mark.parametrize("regex, entries", [ - [".*", ""], - [".*", ["any"]], - ["ab", "abc"], - # Sufficient to have one entry matching - ["ab", ["", "abc", "laskdjf"]], - # Tuples should be ok too - ["ab", ("", "abc", "laskdjf")], - # Should do match not search - pytest.param("ab", "cab", marks=pytest.mark.xfail), - pytest.param("ab$", "abc", marks=pytest.mark.xfail), - pytest.param("ab$", ["ddd", ""], marks=pytest.mark.xfail), - pytest.param("ab$", ("ddd", ""), marks=pytest.mark.xfail), - #Shouldn't "match" the empty list - pytest.param("", [], marks=pytest.mark.xfail) -]) + +@pytest.mark.parametrize( + 'regex, entries', + [ + ['.*', ''], + ['.*', ['any']], + ['ab', 'abc'], + # Sufficient to have one entry matching + ['ab', ['', 'abc', 'laskdjf']], + # Tuples should be ok too + ['ab', ('', 'abc', 'laskdjf')], + # Should do match not search + pytest.param('ab', 'cab', marks=pytest.mark.xfail), + pytest.param('ab$', 'abc', marks=pytest.mark.xfail), + pytest.param('ab$', ['ddd', ''], marks=pytest.mark.xfail), + pytest.param('ab$', ('ddd', ''), marks=pytest.mark.xfail), + # Shouldn't "match" the empty list + pytest.param('', [], marks=pytest.mark.xfail), + ], +) def test_assert_re_in(regex, entries): assert_re_in(regex, entries) def test_test_data(): assert test_data() == data_path - assert test_data() == os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', 'tests', 'data')) + assert test_data() == os.path.abspath( + os.path.join(os.path.dirname(__file__), '..', 'tests', 'data') + ) for subdir in ('nicom', 'gifti', 'externals'): assert test_data(subdir) == os.path.join(data_path[:-10], subdir, 'tests', 'data') assert os.path.exists(test_data(subdir)) @@ -171,8 +186,10 @@ def test_test_data(): assert not os.path.exists(test_data(None, 'doesnotexist')) - for subdir, fname in [('gifti', 'ascii.gii'), - ('nicom', '0.dcm'), - ('externals', 'example_1.nc'), - (None, 'empty.tck')]: + for subdir, fname in [ + ('gifti', 'ascii.gii'), + ('nicom', '0.dcm'), + ('externals', 'example_1.nc'), + (None, 'empty.tck'), + ]: assert os.path.exists(test_data(subdir, fname)) diff --git a/nibabel/tests/test_tmpdirs.py b/nibabel/tests/test_tmpdirs.py index c4d119b14f..2c0c5199ce 100644 --- a/nibabel/tests/test_tmpdirs.py +++ b/nibabel/tests/test_tmpdirs.py @@ -1,4 +1,4 @@ -""" Test tmpdirs module """ +"""Test tmpdirs module""" from os import getcwd from os.path import realpath, abspath, dirname, isfile diff --git a/nibabel/tests/test_tripwire.py b/nibabel/tests/test_tripwire.py index 2ec3e06182..0efddbe8bb 100644 --- a/nibabel/tests/test_tripwire.py +++ b/nibabel/tests/test_tripwire.py @@ -1,10 +1,11 @@ -""" Testing tripwire module +"""Testing tripwire module """ from ..tripwire import TripWire, is_tripwire, TripWireError import pytest + def test_is_tripwire(): assert not is_tripwire(object()) assert is_tripwire(TripWire('some message')) @@ -21,4 +22,4 @@ def test_tripwire(): except TripWireError as err: assert isinstance(err, AttributeError) else: - raise RuntimeError("No error raised, but expected") + raise RuntimeError('No error raised, but expected') diff --git a/nibabel/tests/test_viewers.py b/nibabel/tests/test_viewers.py index fd1109eaff..04e616fedd 100644 --- a/nibabel/tests/test_viewers.py +++ b/nibabel/tests/test_viewers.py @@ -33,9 +33,9 @@ def test_viewer(): # Test viewer plt = optional_package('matplotlib.pyplot')[0] a = np.sin(np.linspace(0, np.pi, 20)) - b = np.sin(np.linspace(0, np.pi*5, 30)) + b = np.sin(np.linspace(0, np.pi * 5, 30)) data = (np.outer(a, b)[..., np.newaxis] * a)[:, :, :, np.newaxis] - data = data * np.array([1., 2.]) # give it a # of volumes > 1 + data = data * np.array([1.0, 2.0]) # give it a # of volumes > 1 v = OrthoSlicer3D(data) assert_array_equal(v.position, (0, 0, 0)) assert 'OrthoSlicer3D' in repr(v) @@ -54,7 +54,7 @@ def test_viewer(): v.cmap = 'hot' v.clim = (0, 3) with pytest.raises(ValueError): - OrthoSlicer3D.clim.fset(v, (0.,)) # bad limits + OrthoSlicer3D.clim.fset(v, (0.0,)) # bad limits with pytest.raises( ( ValueError, # MPL3.5 and lower @@ -90,8 +90,7 @@ def test_viewer(): fig, axes = plt.subplots(1, 4) plt.close(fig) v1 = OrthoSlicer3D(data, axes=axes) - aff = np.array([[0, 1, 0, 3], [-1, 0, 0, 2], [0, 0, 2, 1], [0, 0, 0, 1]], - float) + aff = np.array([[0, 1, 0, 3], [-1, 0, 0, 2], [0, 0, 2, 1], [0, 0, 0, 1]], float) v2 = OrthoSlicer3D(data, affine=aff, axes=axes[:3]) # bad data (not 3+ dim) with pytest.raises(ValueError): diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index 3e6ba1bab4..c2104b5b59 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test for volumeutils module """ +"""Test for volumeutils module""" import os from os.path import exists @@ -26,38 +26,42 @@ from ..tmpdirs import InTemporaryDirectory from ..openers import ImageOpener -from ..volumeutils import (array_from_file, - _is_compressed_fobj, - array_to_file, - fname_ext_ul_case, - write_zeros, - seek_tell, - apply_read_scaling, - working_type, - best_write_scale_ftype, - better_float_of, - int_scinter_ftype, - make_dt_codes, - native_code, - shape_zoom_affine, - rec2dict, - _dt_min_max, - _write_data, - _ftype4scaled_finite, - ) +from ..volumeutils import ( + array_from_file, + _is_compressed_fobj, + array_to_file, + fname_ext_ul_case, + write_zeros, + seek_tell, + apply_read_scaling, + working_type, + best_write_scale_ftype, + better_float_of, + int_scinter_ftype, + make_dt_codes, + native_code, + shape_zoom_affine, + rec2dict, + _dt_min_max, + _write_data, + _ftype4scaled_finite, +) from ..openers import Opener, BZ2File -from ..casting import (floor_log2, type_info, OK_FLOATS, shared_range) +from ..casting import floor_log2, type_info, OK_FLOATS, shared_range from ..optpkg import optional_package -from numpy.testing import (assert_array_almost_equal, - assert_array_equal) +from numpy.testing import assert_array_almost_equal, assert_array_equal import pytest -from nibabel.testing import (assert_dt_equal, assert_allclose_safely, - suppress_warnings, error_warnings) +from nibabel.testing import ( + assert_dt_equal, + assert_allclose_safely, + suppress_warnings, + error_warnings, +) -pyzstd, HAVE_ZSTD, _ = optional_package("pyzstd") +pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') #: convenience variables for numpy types FLOAT_TYPES = np.sctypes['float'] @@ -73,9 +77,7 @@ def test__is_compressed_fobj(): # _is_compressed helper function with InTemporaryDirectory(): - file_openers = [('', open, False), - ('.gz', gzip.open, True), - ('.bz2', BZ2File, True)] + file_openers = [('', open, False), ('.gz', gzip.open, True), ('.bz2', BZ2File, True)] if HAVE_ZSTD: file_openers += [('.zst', pyzstd.ZstdFile, True)] for ext, opener, compressed in file_openers: @@ -102,9 +104,7 @@ def make_array(n, bytes): openers = [open, gzip.open, BZ2File] if HAVE_ZSTD: openers += [pyzstd.ZstdFile] - for n, opener in itertools.product( - (256, 1024, 2560, 25600), - openers): + for n, opener in itertools.product((256, 1024, 2560, 25600), openers): in_arr = np.arange(n, dtype=dtype) # Write array to file fobj_w = opener(fname, 'wb') @@ -218,18 +218,14 @@ def test_array_from_file_mmap(): def buf_chk(in_arr, out_buf, in_buf, offset): - """ Write contents of in_arr into fileobj, read back, check same """ + """Write contents of in_arr into fileobj, read back, check same""" instr = b' ' * offset + in_arr.tobytes(order='F') out_buf.write(instr) out_buf.flush() if in_buf is None: # we're using in_buf from out_buf out_buf.seek(0) in_buf = out_buf - arr = array_from_file( - in_arr.shape, - in_arr.dtype, - in_buf, - offset) + arr = array_from_file(in_arr.shape, in_arr.dtype, in_buf, offset) return np.allclose(in_arr, arr) @@ -242,8 +238,7 @@ def test_array_from_file_openers(): extensions = ['', '.gz', '.bz2'] if HAVE_ZSTD: extensions += ['.zst'] - for ext, offset in itertools.product(extensions, - (0, 5, 10)): + for ext, offset in itertools.product(extensions, (0, 5, 10)): fname = 'test.bin' + ext with Opener(fname, 'wb') as out_buf: if offset != 0: # avoid https://bugs.python.org/issue16828 @@ -267,10 +262,8 @@ def test_array_from_file_reread(): if HAVE_ZSTD: openers += [pyzstd.ZstdFile] for shape, opener, dtt, order in itertools.product( - ((64,), (64, 65), (64, 65, 66)), - openers, - (np.int16, np.float32), - ('F', 'C')): + ((64,), (64, 65), (64, 65, 66)), openers, (np.int16, np.float32), ('F', 'C') + ): n_els = np.prod(shape) in_arr = np.arange(n_els, dtype=dtt).reshape(shape) is_bio = hasattr(opener, 'getvalue') @@ -308,8 +301,7 @@ def test_array_to_file(): ndt = dt.newbyteorder(code) for allow_intercept in (True, False): scale, intercept, mn, mx = _calculate_scale(arr, ndt, allow_intercept) - data_back = write_return(arr, str_io, ndt, - 0, intercept, scale) + data_back = write_return(arr, str_io, ndt, 0, intercept, scale) assert_array_almost_equal(arr, data_back) # Test array-like str_io = BytesIO() @@ -340,8 +332,9 @@ def test_a2f_upscale(): inter = info['min'] str_io = BytesIO() # We need to provide mn, mx for function to be able to calculate upcasting - array_to_file(arr, str_io, np.uint8, intercept=inter, divslope=slope, - mn=info['min'], mx=info['max']) + array_to_file( + arr, str_io, np.uint8, intercept=inter, divslope=slope, mn=info['min'], mx=info['max'] + ) raw = array_from_file(arr.shape, np.uint8, str_io) back = apply_read_scaling(raw, slope, inter) top = back - arr @@ -429,13 +422,11 @@ def test_a2f_nan2zero_scaling(): # Array values including zero before scaling but not after bio = BytesIO() for in_dt, out_dt, zero_in, inter in itertools.product( - FLOAT_TYPES, - IUINT_TYPES, - (True, False), - (0, -100)): + FLOAT_TYPES, IUINT_TYPES, (True, False), (0, -100) + ): in_info = np.finfo(in_dt) out_info = np.iinfo(out_dt) - mx = min(in_info.max, out_info.max * 2., 2**32) + inter + mx = min(in_info.max, out_info.max * 2.0, 2**32) + inter mn = 0 if zero_in or inter else 100 vals = [np.nan] + [mn, mx] nan_arr = np.array(vals, dtype=in_dt) @@ -499,15 +490,21 @@ def test_a2f_big_scalers(): # We need nan2zero=False because we can't represent 0 in the input, given # the scaling and the output range. with suppress_warnings(): # overflow - array_to_file(arr, str_io, np.int8, intercept=np.float32(2**120), - nan2zero=False) + array_to_file(arr, str_io, np.int8, intercept=np.float32(2**120), nan2zero=False) data_back = array_from_file(arr.shape, np.int8, str_io) assert_array_equal(data_back, [-128, -128, 127]) # Scales also if mx, mn specified? Same notes and complaints as for the test # above. str_io.seek(0) - array_to_file(arr, str_io, np.int8, mn=info['min'], mx=info['max'], - intercept=np.float32(2**120), nan2zero=False) + array_to_file( + arr, + str_io, + np.int8, + mn=info['min'], + mx=info['max'], + intercept=np.float32(2**120), + nan2zero=False, + ) data_back = array_from_file(arr.shape, np.int8, str_io) assert_array_equal(data_back, [-128, -128, 127]) # And if slope causes overflow? @@ -518,8 +515,7 @@ def test_a2f_big_scalers(): assert_array_equal(data_back, [-128, 0, 127]) # with mn, mx specified? str_io.seek(0) - array_to_file(arr, str_io, np.int8, mn=info['min'], mx=info['max'], - divslope=np.float32(0.5)) + array_to_file(arr, str_io, np.int8, mn=info['min'], mx=info['max'], divslope=np.float32(0.5)) data_back = array_from_file(arr.shape, np.int8, str_io) assert_array_equal(data_back, [-128, 0, 127]) @@ -529,13 +525,13 @@ def test_a2f_int_scaling(): arr = np.array([0, 1, 128, 255], dtype=np.uint8) fobj = BytesIO() back_arr = write_return(arr, fobj, np.uint8, intercept=1) - assert_array_equal(back_arr, np.clip(arr - 1., 0, 255)) + assert_array_equal(back_arr, np.clip(arr - 1.0, 0, 255)) back_arr = write_return(arr, fobj, np.uint8, divslope=2) - assert_array_equal(back_arr, np.round(np.clip(arr / 2., 0, 255))) + assert_array_equal(back_arr, np.round(np.clip(arr / 2.0, 0, 255))) back_arr = write_return(arr, fobj, np.uint8, intercept=1, divslope=2) - assert_array_equal(back_arr, np.round(np.clip((arr - 1.) / 2., 0, 255))) + assert_array_equal(back_arr, np.round(np.clip((arr - 1.0) / 2.0, 0, 255))) back_arr = write_return(arr, fobj, np.int16, intercept=1, divslope=2) - assert_array_equal(back_arr, np.round((arr - 1.) / 2.)) + assert_array_equal(back_arr, np.round((arr - 1.0) / 2.0)) def test_a2f_scaled_unscaled(): @@ -543,10 +539,8 @@ def test_a2f_scaled_unscaled(): # without scaling fobj = BytesIO() for in_dtype, out_dtype, intercept, divslope in itertools.product( - NUMERIC_TYPES, - NUMERIC_TYPES, - (0, 0.5, -1, 1), - (1, 0.5, 2)): + NUMERIC_TYPES, NUMERIC_TYPES, (0, 0.5, -1, 1), (1, 0.5, 2) + ): mn_in, mx_in = _dt_min_max(in_dtype) nan_val = np.nan if in_dtype in CFLOAT_TYPES else 10 arr = np.array([mn_in, -1, 0, 1, mx_in, nan_val], dtype=in_dtype) @@ -556,31 +550,28 @@ def test_a2f_scaled_unscaled(): if out_dtype in IUINT_TYPES: nan_fill = np.round(nan_fill) # nan2zero will check whether 0 in scaled to a valid value in output - if (in_dtype in CFLOAT_TYPES and not mn_out <= nan_fill <= mx_out): + if in_dtype in CFLOAT_TYPES and not mn_out <= nan_fill <= mx_out: with pytest.raises(ValueError): - array_to_file(arr, - fobj, - out_dtype=out_dtype, - divslope=divslope, - intercept=intercept) + array_to_file( + arr, fobj, out_dtype=out_dtype, divslope=divslope, intercept=intercept + ) continue with suppress_warnings(): - back_arr = write_return(arr, fobj, - out_dtype=out_dtype, - divslope=divslope, - intercept=intercept) + back_arr = write_return( + arr, fobj, out_dtype=out_dtype, divslope=divslope, intercept=intercept + ) exp_back = arr.copy() - if (in_dtype in IUINT_TYPES and - out_dtype in IUINT_TYPES and - (intercept, divslope) == (0, 1)): + if ( + in_dtype in IUINT_TYPES + and out_dtype in IUINT_TYPES + and (intercept, divslope) == (0, 1) + ): # Direct iu to iu casting. # Need to clip if ranges not the same. # Use smaller of input, output range to avoid np.clip upcasting # the array because of large clip limits. if (mn_in, mx_in) != (mn_out, mx_out): - exp_back = np.clip(exp_back, - max(mn_in, mn_out), - min(mx_in, mx_out)) + exp_back = np.clip(exp_back, max(mn_in, mn_out), min(mx_in, mx_out)) else: # Need to deal with nans, casting to float, clipping if in_dtype in CFLOAT_TYPES and out_dtype in IUINT_TYPES: exp_back[np.isnan(exp_back)] = 0 @@ -590,8 +581,7 @@ def test_a2f_scaled_unscaled(): exp_back -= intercept if divslope != 1: exp_back /= divslope - if (exp_back.dtype.type in CFLOAT_TYPES and - out_dtype in IUINT_TYPES): + if exp_back.dtype.type in CFLOAT_TYPES and out_dtype in IUINT_TYPES: exp_back = np.round(exp_back).astype(float) exp_back = np.clip(exp_back, *shared_range(float, out_dtype)) exp_back = exp_back.astype(out_dtype) @@ -611,40 +601,32 @@ def test_a2f_nanpos(): def test_a2f_bad_scaling(): # Test that pathological scalers raise an error - NUMERICAL_TYPES = sum([np.sctypes[key] for key in ['int', - 'uint', - 'float', - 'complex']], - []) + NUMERICAL_TYPES = sum([np.sctypes[key] for key in ['int', 'uint', 'float', 'complex']], []) for in_type, out_type, slope, inter in itertools.product( - NUMERICAL_TYPES, - NUMERICAL_TYPES, - (None, 1, 0, np.nan, -np.inf, np.inf), - (0, np.nan, -np.inf, np.inf)): + NUMERICAL_TYPES, + NUMERICAL_TYPES, + (None, 1, 0, np.nan, -np.inf, np.inf), + (0, np.nan, -np.inf, np.inf), + ): arr = np.ones((2,), dtype=in_type) fobj = BytesIO() cm = error_warnings() - if (np.issubdtype(in_type, np.complexfloating) and - not np.issubdtype(out_type, np.complexfloating)): + if np.issubdtype(in_type, np.complexfloating) and not np.issubdtype( + out_type, np.complexfloating + ): cm = pytest.warns(np.ComplexWarning) if (slope, inter) == (1, 0): with cm: - assert_array_equal(arr, - write_return(arr, fobj, out_type, - intercept=inter, - divslope=slope)) + assert_array_equal( + arr, write_return(arr, fobj, out_type, intercept=inter, divslope=slope) + ) elif (slope, inter) == (None, 0): - assert_array_equal(0, - write_return(arr, fobj, out_type, - intercept=inter, - divslope=slope)) + assert_array_equal( + 0, write_return(arr, fobj, out_type, intercept=inter, divslope=slope) + ) else: with pytest.raises(ValueError): - array_to_file(arr, - fobj, - np.int8, - intercept=inter, - divslope=slope) + array_to_file(arr, fobj, np.int8, intercept=inter, divslope=slope) def test_a2f_nan2zero_range(): @@ -664,8 +646,9 @@ def test_a2f_nan2zero_range(): # Pushing zero outside the output data range does not generate error back_arr = write_return(arr_no_nan, fobj, np.int8, intercept=129, nan2zero=True) assert_array_equal([-128, -128, -128, -127], back_arr) - back_arr = write_return(arr_no_nan, fobj, np.int8, - intercept=257.1, divslope=2, nan2zero=True) + back_arr = write_return( + arr_no_nan, fobj, np.int8, intercept=257.1, divslope=2, nan2zero=True + ) assert_array_equal([-128, -128, -128, -128], back_arr) for dt in CFLOAT_TYPES: arr = np.array([-1, 0, 1, np.nan], dtype=dt) @@ -678,12 +661,10 @@ def test_a2f_nan2zero_range(): # No errors from explicit thresholding # mn thresholding excluding zero with pytest.warns(complex_warn) if complex_warn else error_warnings(): - assert_array_equal([1, 1, 1, 0], - write_return(arr, fobj, np.int8, mn=1)) + assert_array_equal([1, 1, 1, 0], write_return(arr, fobj, np.int8, mn=1)) # mx thresholding excluding zero with pytest.warns(complex_warn) if complex_warn else error_warnings(): - assert_array_equal([-1, -1, -1, 0], - write_return(arr, fobj, np.int8, mx=-1)) + assert_array_equal([-1, -1, -1, 0], write_return(arr, fobj, np.int8, mx=-1)) # Errors from datatype threshold after scaling with pytest.warns(complex_warn) if complex_warn else error_warnings(): back_arr = write_return(arr, fobj, np.int8, intercept=128) @@ -708,8 +689,9 @@ def test_a2f_nan2zero_range(): write_return(arr_no_nan, fobj, np.int8, intercept=257.1, divslope=2) # OK with nan2zero false with pytest.warns(c_and_n_warn) if c_and_n_warn else error_warnings(): - back_arr = write_return(arr, fobj, np.int8, - intercept=257.1, divslope=2, nan2zero=False) + back_arr = write_return( + arr, fobj, np.int8, intercept=257.1, divslope=2, nan2zero=False + ) assert_array_equal([-128, -128, -128, nan_cast], back_arr) @@ -769,7 +751,7 @@ def test_apply_scaling(): assert (i16_arr * big).dtype == np.float32 # An equivalent case is a little hard to find for the intercept nmant_32 = type_info(np.float32)['nmant'] - big_delta = np.float32(2**(floor_log2(big) - nmant_32)) + big_delta = np.float32(2 ** (floor_log2(big) - nmant_32)) assert (i16_arr * big_delta + big).dtype == np.float32 # Upcasting does occur with this routine assert apply_read_scaling(i16_arr, big).dtype == np.float64 @@ -783,10 +765,8 @@ def test_apply_scaling(): assert apply_read_scaling(np.int8(0), f32(-1e38), f32(0.0)).dtype == np.float64 # Non-zero intercept still generates floats assert_dt_equal(apply_read_scaling(i16_arr, 1.0, 1.0).dtype, float) - assert_dt_equal(apply_read_scaling( - np.zeros((1,), dtype=np.int32), 1.0, 1.0).dtype, float) - assert_dt_equal(apply_read_scaling( - np.zeros((1,), dtype=np.int64), 1.0, 1.0).dtype, float) + assert_dt_equal(apply_read_scaling(np.zeros((1,), dtype=np.int32), 1.0, 1.0).dtype, float) + assert_dt_equal(apply_read_scaling(np.zeros((1,), dtype=np.int64), 1.0, 1.0).dtype, float) def test_apply_read_scaling_ints(): @@ -799,7 +779,7 @@ def test_apply_read_scaling_ints(): def test_apply_read_scaling_nones(): # Check that we can pass None as slope and inter to apply read scaling - arr=np.arange(10, dtype=np.int16) + arr = np.arange(10, dtype=np.int16) assert_array_equal(apply_read_scaling(arr, None, None), arr) assert_array_equal(apply_read_scaling(arr, 2, None), arr * 2) assert_array_equal(apply_read_scaling(arr, None, 1), arr + 1) @@ -819,6 +799,7 @@ def test_working_type(): # need this because of the very confusing np.int32 != np.intp (on 32 bit). def wt(*args, **kwargs): return np.dtype(working_type(*args, **kwargs)).str + d1 = np.atleast_1d for in_type in NUMERIC_TYPES: in_ts = np.dtype(in_type).str @@ -851,6 +832,7 @@ def test_better_float(): # Better float function def check_against(f1, f2): return f1 if FLOAT_TYPES.index(f1) >= FLOAT_TYPES.index(f2) else f2 + for first in FLOAT_TYPES: for other in IUINT_TYPES + np.sctypes['complex']: assert better_float_of(first, other) == first @@ -884,7 +866,7 @@ def test_best_write_scale_ftype(): L_info = type_info(lower_t) t_max = L_info['max'] nmant = L_info['nmant'] # number of significand digits - big_delta = lower_t(2**(floor_log2(t_max) - nmant)) # delta below max + big_delta = lower_t(2 ** (floor_log2(t_max) - nmant)) # delta below max # Even large values that don't overflow don't change output arr = np.array([0, t_max], dtype=lower_t) assert best_write_scale_ftype(arr, 1, 0) == lower_t @@ -995,9 +977,9 @@ def test_seek_tell_logic(): assert bio.tell() == 10 class BabyBio(BytesIO): - def seek(self, *args): raise OSError() + bio = BabyBio() # Fresh fileobj, position 0, can't seek - error with pytest.raises(OSError): @@ -1044,22 +1026,19 @@ def test_shape_zoom_affine(): shape = (3, 5, 7) zooms = (3, 2, 1) res = shape_zoom_affine(shape, zooms) - exp = np.array([[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -3.], - [0., 0., 0., 1.]]) + exp = np.array( + [[-3.0, 0.0, 0.0, 3.0], [0.0, 2.0, 0.0, -4.0], [0.0, 0.0, 1.0, -3.0], [0.0, 0.0, 0.0, 1.0]] + ) assert_array_almost_equal(res, exp) res = shape_zoom_affine((3, 5), (3, 2)) - exp = np.array([[-3., 0., 0., 3.], - [0., 2., 0., -4.], - [0., 0., 1., -0.], - [0., 0., 0., 1.]]) + exp = np.array( + [[-3.0, 0.0, 0.0, 3.0], [0.0, 2.0, 0.0, -4.0], [0.0, 0.0, 1.0, -0.0], [0.0, 0.0, 0.0, 1.0]] + ) assert_array_almost_equal(res, exp) res = shape_zoom_affine(shape, zooms, False) - exp = np.array([[3., 0., 0., -3.], - [0., 2., 0., -4.], - [0., 0., 1., -3.], - [0., 0., 0., 1.]]) + exp = np.array( + [[3.0, 0.0, 0.0, -3.0], [0.0, 2.0, 0.0, -4.0], [0.0, 0.0, 1.0, -3.0], [0.0, 0.0, 0.0, 1.0]] + ) assert_array_almost_equal(res, exp) @@ -1096,12 +1075,10 @@ def test_dtypes(): dtr = make_dt_codes(dt_defs) assert dtr[np.dtype('f4').newbyteorder('S')] == 16 assert dtr.value_set() == set((16,)) - assert dtr.fields == ('code', 'label', 'type', 'niistring', 'dtype', - 'sw_dtype') + assert dtr.fields == ('code', 'label', 'type', 'niistring', 'dtype', 'sw_dtype') assert dtr.niistring[16] == 'ASTRING' # And that unequal elements raises error - dt_defs = ((16, 'float32', np.float32, 'ASTRING'), - (16, 'float32', np.float32)) + dt_defs = ((16, 'float32', np.float32, 'ASTRING'), (16, 'float32', np.float32)) with pytest.raises(ValueError): make_dt_codes(dt_defs) # And that 2 or 5 elements raises error @@ -1117,16 +1094,18 @@ def test__write_data(): # Test private utility function for writing data itp = itertools.product - def assert_rt(data, - shape, - out_dtype, - order='F', - in_cast=None, - pre_clips=None, - inter=0., - slope=1., - post_clips=None, - nan_fill=None): + def assert_rt( + data, + shape, + out_dtype, + order='F', + in_cast=None, + pre_clips=None, + inter=0.0, + slope=1.0, + post_clips=None, + nan_fill=None, + ): sio = BytesIO() to_write = data.reshape(shape) # to check that we didn't modify in-place @@ -1134,11 +1113,11 @@ def assert_rt(data, nan_positions = np.isnan(to_write) have_nans = np.any(nan_positions) if have_nans and nan_fill is None and not out_dtype.type == 'f': - raise ValueError("Cannot handle this case") - _write_data(to_write, sio, out_dtype, order, in_cast, pre_clips, inter, - slope, post_clips, nan_fill) - arr = np.ndarray(shape, out_dtype, buffer=sio.getvalue(), - order=order) + raise ValueError('Cannot handle this case') + _write_data( + to_write, sio, out_dtype, order, in_cast, pre_clips, inter, slope, post_clips, nan_fill + ) + arr = np.ndarray(shape, out_dtype, buffer=sio.getvalue(), order=order) expected = to_write.copy() if have_nans and not nan_fill is None: expected[nan_positions] = nan_fill * slope + inter @@ -1147,37 +1126,51 @@ def assert_rt(data, # check shape writing for shape, order in itp( - ((24,), (24, 1), (24, 1, 1), (1, 24), (1, 1, 24), (2, 3, 4), - (6, 1, 4), (1, 6, 4), (6, 4, 1)), - 'FC'): + ( + (24,), + (24, 1), + (24, 1, 1), + (1, 24), + (1, 1, 24), + (2, 3, 4), + (6, 1, 4), + (1, 6, 4), + (6, 4, 1), + ), + 'FC', + ): assert_rt(np.arange(24), shape, np.int16, order=order) # check defense against modifying data in-place for in_cast, pre_clips, inter, slope, post_clips, nan_fill in itp( - (None, np.float32), - (None, (-1, 25)), - (0., 1.), - (1., 0.5), - (None, (-2, 49)), - (None, 1)): + (None, np.float32), (None, (-1, 25)), (0.0, 1.0), (1.0, 0.5), (None, (-2, 49)), (None, 1) + ): data = np.arange(24).astype(np.float32) - assert_rt(data, shape, np.int16, - in_cast=in_cast, - pre_clips=pre_clips, - inter=inter, - slope=slope, - post_clips=post_clips, - nan_fill=nan_fill) + assert_rt( + data, + shape, + np.int16, + in_cast=in_cast, + pre_clips=pre_clips, + inter=inter, + slope=slope, + post_clips=post_clips, + nan_fill=nan_fill, + ) # Check defense against in-place modification with nans present if not nan_fill is None: data[1] = np.nan - assert_rt(data, shape, np.int16, - in_cast=in_cast, - pre_clips=pre_clips, - inter=inter, - slope=slope, - post_clips=post_clips, - nan_fill=nan_fill) + assert_rt( + data, + shape, + np.int16, + in_cast=in_cast, + pre_clips=pre_clips, + inter=inter, + slope=slope, + post_clips=post_clips, + nan_fill=nan_fill, + ) def test_array_from_file_overflow(): @@ -1185,18 +1178,20 @@ def test_array_from_file_overflow(): shape = (1500,) * 6 class NoStringIO: # Null file-like for forcing error - def seek(self, n_bytes): pass def read(self, n_bytes): return b'' + try: array_from_file(shape, np.int8, NoStringIO()) except OSError as err: message = str(err) - assert message == ("Expected 11390625000000000000 bytes, got 0 " - "bytes from object\n - could the file be damaged?") + assert message == ( + 'Expected 11390625000000000000 bytes, got 0 ' + 'bytes from object\n - could the file be damaged?' + ) def test__ftype4scaled_finite_warningfilters(): @@ -1249,7 +1244,7 @@ def run(self): def _calculate_scale(data, out_dtype, allow_intercept): - """ Calculate scaling and optional intercept for data + """Calculate scaling and optional intercept for data Copy of the deprecated volumeutils.calculate_scale, to preserve tests @@ -1280,6 +1275,7 @@ def _calculate_scale(data, out_dtype, allow_intercept): if np.can_cast(in_dtype, out_dtype): return 1.0, 0.0, None, None from ..arraywriters import make_array_writer, WriterError, get_slope_inter + try: writer = make_array_writer(data, out_dtype, True, allow_intercept) except WriterError as e: diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index a360804f5a..2e4ea6a788 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Test binary header objects +"""Test binary header objects This is a root testing class, used in the Analyze and other tests as a framework for all the tests common to the Analyze types @@ -42,8 +42,9 @@ INTEGER_TYPES = np.sctypes['int'] + np.sctypes['uint'] + def log_chk(hdr, level): - """ Utility method to check header checking / logging + """Utility method to check header checking / logging Asserts that log entry appears during ``hdr.check_fix`` for logging level below `level`. @@ -99,18 +100,16 @@ def log_chk(hdr, level): logger.removeHandler(handler) # When error level == level, check_fix should raise an error hdrc2 = hdr.copy() - raiser = (HeaderDataError, - hdrc2.check_fix, - logger, - level) + raiser = (HeaderDataError, hdrc2.check_fix, logger, level) return hdrc, message, raiser class _TestWrapStructBase(BaseTestCase): - """ Class implements base tests for binary headers + """Class implements base tests for binary headers It serves as a base class for other binary header tests """ + header_class = None def get_bad_bb(self): @@ -190,10 +189,9 @@ def test_mappingness(self): assert hdr.get(keys[0]) == falsyval assert hdr.get(keys[0], -1) == falsyval - def test_endianness_ro(self): # endianness is a read only property - """ Its use in initialization tested in the init tests. + """Its use in initialization tested in the init tests. Endianness gives endian interpretation of binary data. It is read only because the only common use case is to set the endianness on initialization (or occasionally byteswapping the @@ -237,8 +235,7 @@ def log_chk(self, hdr, level): return log_chk(hdr, level) def assert_no_log_err(self, hdr): - """ Assert that no logging or errors result from this `hdr` - """ + """Assert that no logging or errors result from this `hdr`""" fhdr, message, raiser = self.log_chk(hdr, 0) assert (fhdr, message) == (hdr, '') @@ -286,9 +283,9 @@ def test_as_byteswapped(self): # Note that contents is not rechecked on swap / copy class DC(self.header_class): - def check_fix(self, *args, **kwargs): raise Exception + # Assumes check=True default with pytest.raises(Exception): DC(hdr.binaryblock) @@ -313,15 +310,15 @@ def test_str(self): assert len(s1) > 0 - class _TestLabeledWrapStruct(_TestWrapStructBase): - """ Test a wrapstruct with value labeling """ + """Test a wrapstruct with value labeling""" def test_get_value_label(self): # Test get value label method # Make a new class to avoid overwriting recoders of original class MyHdr(self.header_class): _field_recoders = {} + hdr = MyHdr() # Key not existing raises error with pytest.raises(ValueError): @@ -351,7 +348,8 @@ class MyHdr(self.header_class): class MyWrapStruct(WrapStruct): - """ An example wrapped struct class """ + """An example wrapped struct class""" + template_dtype = np.dtype([('an_integer', 'i2'), ('a_str', 'S10')]) @classmethod @@ -369,11 +367,11 @@ def default_structarr(klass, endianness=None): @classmethod def _get_checks(klass): - """ Return sequence of check functions for this class """ - return (klass._chk_integer, - klass._chk_string) + """Return sequence of check functions for this class""" + return (klass._chk_integer, klass._chk_string) """ Check functions in format expected by BatteryRunner class """ + @staticmethod def _chk_integer(hdr, fix=False): rep = Report(HeaderDataError) @@ -405,7 +403,8 @@ class MyLabeledWrapStruct(LabeledWrapStruct, MyWrapStruct): class TestMyWrapStruct(_TestWrapStructBase): - """ Test fake binary header defined at top of module """ + """Test fake binary header defined at top of module""" + header_class = MyWrapStruct def get_bad_bb(self): @@ -515,6 +514,7 @@ def test_str(self): # Make sure not to overwrite class dictionary class MyHdr(self.header_class): _field_recoders = {} + hdr = MyHdr() s1 = str(hdr) assert len(s1) > 0 diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index 10b5ee78f5..c175940ff7 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Contexts for *with* statement providing temporary directories +"""Contexts for *with* statement providing temporary directories """ import os import shutil @@ -31,7 +31,7 @@ class TemporaryDirectory: False """ - def __init__(self, suffix="", prefix=template, dir=None): + def __init__(self, suffix='', prefix=template, dir=None): self.name = mkdtemp(suffix, prefix, dir) self._closed = False @@ -49,7 +49,7 @@ def __exit__(self, exc, value, tb): class InTemporaryDirectory(TemporaryDirectory): - """ Create, return, and change directory to a temporary directory + """Create, return, and change directory to a temporary directory Notes ------ @@ -82,7 +82,7 @@ def __exit__(self, exc, value, tb): class InGivenDirectory: - """ Change directory to given directory for duration of ``with`` block + """Change directory to given directory for duration of ``with`` block Useful when you want to use `InTemporaryDirectory` for the final test, but you are still debugging. For example, you may want to do this in the end: @@ -106,7 +106,7 @@ class InGivenDirectory: """ def __init__(self, path=None): - """ Initialize directory context manager + """Initialize directory context manager Parameters ---------- diff --git a/nibabel/tripwire.py b/nibabel/tripwire.py index db659df337..3b6ecfbb40 100644 --- a/nibabel/tripwire.py +++ b/nibabel/tripwire.py @@ -1,9 +1,10 @@ -""" Class to raise error for missing modules or other misfortunes +"""Class to raise error for missing modules or other misfortunes """ class TripWireError(AttributeError): - """ Exception if trying to use TripWire object """ + """Exception if trying to use TripWire object""" + # Has to be subclass of AttributeError, to work round Python 3.5 inspection # for doctests. Python 3.5 looks for a ``__wrapped__`` attribute during # initialization of doctests, and only allows AttributeError as signal this @@ -11,7 +12,7 @@ class TripWireError(AttributeError): def is_tripwire(obj): - """ Returns True if `obj` appears to be a TripWire object + """Returns True if `obj` appears to be a TripWire object Examples -------- @@ -30,7 +31,7 @@ def is_tripwire(obj): class TripWire: - """ Class raising error if used + """Class raising error if used Standard use is to proxy modules that we could not import @@ -47,5 +48,5 @@ def __init__(self, msg): self._msg = msg def __getattr__(self, attr_name): - """ Raise informative error accessing attributes """ + """Raise informative error accessing attributes""" raise TripWireError(self._msg) diff --git a/nibabel/viewers.py b/nibabel/viewers.py index 65e813ef0f..c3720d474b 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -1,4 +1,4 @@ -""" Utilities for viewing images +"""Utilities for viewing images Includes version of OrthoSlicer3D code originally written by our own Paul Ivanov. @@ -13,7 +13,7 @@ class OrthoSlicer3D: - """ Orthogonal-plane slice viewer. + """Orthogonal-plane slice viewer. OrthoSlicer3d expects 3- or 4-dimensional array data. It treats 4D data as a sequence of 3D spatial volumes, where a slice over the final @@ -39,6 +39,7 @@ class OrthoSlicer3D: >>> data = np.outer(a, b)[..., np.newaxis] * a >>> OrthoSlicer3D(data).show() # doctest: +SKIP """ + # Skip doctest above b/c not all systems have mpl installed def __init__(self, data, affine=None, axes=None, title=None): @@ -74,7 +75,7 @@ def __init__(self, data, affine=None, axes=None, title=None): if data.ndim < 3: raise ValueError('data must have at least 3 dimensions') if np.iscomplexobj(data): - raise TypeError("Complex data not supported") + raise TypeError('Complex data not supported') affine = np.array(affine, float) if affine is not None else np.eye(4) if affine.shape != (4, 4): raise ValueError('affine must be a 4x4 matrix') @@ -90,7 +91,7 @@ def __init__(self, data, affine=None, axes=None, title=None): self._volume_dims = data.shape[3:] self._current_vol_data = data[:, :, :, 0] if data.ndim > 3 else data self._data = data - self._clim = np.percentile(data, (1., 99.)) + self._clim = np.percentile(data, (1.0, 99.0)) del data if axes is None: # make the axes @@ -130,36 +131,53 @@ def __init__(self, data, affine=None, axes=None, title=None): # set up axis crosshairs self._crosshairs = [None] * 3 - r = [self._scalers[self._order[2]] / self._scalers[self._order[1]], - self._scalers[self._order[2]] / self._scalers[self._order[0]], - self._scalers[self._order[1]] / self._scalers[self._order[0]]] + r = [ + self._scalers[self._order[2]] / self._scalers[self._order[1]], + self._scalers[self._order[2]] / self._scalers[self._order[0]], + self._scalers[self._order[1]] / self._scalers[self._order[0]], + ] self._sizes = [self._data.shape[order] for order in self._order] - for ii, xax, yax, ratio, label in zip([0, 1, 2], [1, 0, 0], [2, 2, 1], - r, ('SAIP', 'SRIL', 'ARPL')): + for ii, xax, yax, ratio, label in zip( + [0, 1, 2], [1, 0, 0], [2, 2, 1], r, ('SAIP', 'SRIL', 'ARPL') + ): ax = self._axes[ii] d = np.zeros((self._sizes[yax], self._sizes[xax])) im = self._axes[ii].imshow( - d, vmin=self._clim[0], vmax=self._clim[1], aspect=1, - cmap='gray', interpolation='nearest', origin='lower') + d, + vmin=self._clim[0], + vmax=self._clim[1], + aspect=1, + cmap='gray', + interpolation='nearest', + origin='lower', + ) self._ims.append(im) - vert = ax.plot([0] * 2, [-0.5, self._sizes[yax] - 0.5], - color=(0, 1, 0), linestyle='-')[0] - horiz = ax.plot([-0.5, self._sizes[xax] - 0.5], [0] * 2, - color=(0, 1, 0), linestyle='-')[0] + vert = ax.plot( + [0] * 2, [-0.5, self._sizes[yax] - 0.5], color=(0, 1, 0), linestyle='-' + )[0] + horiz = ax.plot( + [-0.5, self._sizes[xax] - 0.5], [0] * 2, color=(0, 1, 0), linestyle='-' + )[0] self._crosshairs[ii] = dict(vert=vert, horiz=horiz) # add text labels (top, right, bottom, left) lims = [0, self._sizes[xax], 0, self._sizes[yax]] bump = 0.01 - poss = [[lims[1] / 2., lims[3]], - [(1 + bump) * lims[1], lims[3] / 2.], - [lims[1] / 2., 0], - [lims[0] - bump * lims[1], lims[3] / 2.]] - anchors = [['center', 'bottom'], ['left', 'center'], - ['center', 'top'], ['right', 'center']] + poss = [ + [lims[1] / 2.0, lims[3]], + [(1 + bump) * lims[1], lims[3] / 2.0], + [lims[1] / 2.0, 0], + [lims[0] - bump * lims[1], lims[3] / 2.0], + ] + anchors = [ + ['center', 'bottom'], + ['left', 'center'], + ['center', 'top'], + ['right', 'center'], + ] for pos, anchor, lab in zip(poss, anchors, label): - ax.text(pos[0], pos[1], lab, - horizontalalignment=anchor[0], - verticalalignment=anchor[1]) + ax.text( + pos[0], pos[1], lab, horizontalalignment=anchor[0], verticalalignment=anchor[1] + ) ax.axis(lims) ax.set_aspect(ratio) ax.patch.set_visible(False) @@ -180,14 +198,19 @@ def __init__(self, data, affine=None, axes=None, title=None): y = np.zeros(self.n_volumes + 1) x = np.arange(self.n_volumes + 1) - 0.5 step = ax.step(x, y, where='post', color='y')[0] - ax.set_xticks(np.unique(np.linspace(0, self.n_volumes - 1, - 5).astype(int))) + ax.set_xticks(np.unique(np.linspace(0, self.n_volumes - 1, 5).astype(int))) ax.set_xlim(x[0], x[-1]) yl = [self._data.min(), self._data.max()] yl = [lim + s * np.diff(lims)[0] for lim, s in zip(yl, [-1.01, 1.01])] - patch = mpl_patch.Rectangle([-0.5, yl[0]], 1., np.diff(yl)[0], - fill=True, facecolor=(0, 1, 0), - edgecolor=(0, 1, 0), alpha=0.25) + patch = mpl_patch.Rectangle( + [-0.5, yl[0]], + 1.0, + np.diff(yl)[0], + fill=True, + facecolor=(0, 1, 0), + edgecolor=(0, 1, 0), + alpha=0.25, + ) ax.add_patch(patch) ax.set_ylim(yl) self._volume_ax_objs = dict(step=step, patch=patch) @@ -202,32 +225,32 @@ def __init__(self, data, affine=None, axes=None, title=None): # actually set data meaningfully self._position = np.zeros(4) - self._position[3] = 1. # convenience for affine multiplication + self._position[3] = 1.0 # convenience for affine multiplication self._changing = False # keep track of status to avoid loops self._links = [] # other viewers this one is linked to self._plt.draw() for fig in self._figs: fig.canvas.draw() self._set_volume_index(0, update_slices=False) - self._set_position(0., 0., 0.) + self._set_position(0.0, 0.0, 0.0) self._draw() def __repr__(self): title = '' if self._title is None else f'{self._title} ' vol = '' if self.n_volumes <= 1 else f', {self.n_volumes}' - r = (f'<{self.__class__.__name__}: {title}({self._sizes[0]}, ' - f'{self._sizes[1]}, {self._sizes[2]}{vol})>') + r = ( + f'<{self.__class__.__name__}: {title}({self._sizes[0]}, ' + f'{self._sizes[1]}, {self._sizes[2]}{vol})>' + ) return r # User-level functions ################################################### def show(self): - """Show the slicer in blocking mode; convenience for ``plt.show()`` - """ + """Show the slicer in blocking mode; convenience for ``plt.show()``""" self._plt.show() def close(self): - """Close the viewer figures - """ + """Close the viewer figures""" self._cleanup() for f in self._figs: self._plt.close(f) @@ -294,8 +317,9 @@ def link_to(self, other): Other viewer to use to link movements. """ if not isinstance(other, self.__class__): - raise TypeError('other must be an instance of ' - f'{self.__class__.__name__}, not {type(other)}') + raise TypeError( + 'other must be an instance of ' f'{self.__class__.__name__}, not {type(other)}' + ) self._link(other, is_primary=True) def _link(self, other, is_primary): @@ -355,8 +379,7 @@ def _set_volume_index(self, v, update_slices=True): self._data_idx[3] = max(min(int(round(v)), max_ - 1), 0) idx = (slice(None), slice(None), slice(None)) if self._data.ndim > 3: - idx = idx + tuple(np.unravel_index(self._data_idx[3], - self._volume_dims)) + idx = idx + tuple(np.unravel_index(self._data_idx[3], self._volume_dims)) self._current_vol_data = self._data[idx] # update all of our slice plots if update_slices: @@ -381,8 +404,7 @@ def _set_position(self, x, y, z, notify=True): # sagittal: get to S/A # coronal: get to S/L # axial: get to A/L - data = np.rollaxis(self._current_vol_data, - axis=self._order[ii])[self._data_idx[ii]] + data = np.rollaxis(self._current_vol_data, axis=self._order[ii])[self._data_idx[ii]] xax = [1, 0, 0][ii] yax = [2, 2, 1][ii] if self._order[xax] < self._order[yax]: @@ -440,14 +462,14 @@ def _on_scroll(self, event): return ii = 3 # shift: change volume in any axis assert ii in range(4) - dv = 10. if event.key is not None and 'control' in event.key else 1. - dv *= 1. if event.button == 'up' else -1. + dv = 10.0 if event.key is not None and 'control' in event.key else 1.0 + dv *= 1.0 if event.button == 'up' else -1.0 dv *= -1 if self._flips[ii] else 1 val = self._data_idx[ii] + dv if ii == 3: self._set_volume_index(val) else: - coords = [self._data_idx[k] for k in range(3)] + [1.] + coords = [self._data_idx[k] for k in range(3)] + [1.0] coords[ii] = val self._set_position(*np.dot(self._affine, coords)[:3]) self._draw() @@ -468,7 +490,7 @@ def _on_mouse(self, event): x, y = event.xdata, event.ydata x = self._sizes[xax] - x if self._flips[xax] else x y = self._sizes[yax] - y if self._flips[yax] else y - idxs = [None, None, None, 1.] + idxs = [None, None, None, 1.0] idxs[xax] = x idxs[yax] = y idxs[ii] = self._data_idx[ii] @@ -479,7 +501,7 @@ def _on_keypress(self, event): """Handle mpl keypress events""" if event.key is not None and 'escape' in event.key: self.close() - elif event.key in ["=", '+']: + elif event.key in ['=', '+']: # increment volume index new_idx = min(self._data_idx[3] + 1, self.n_volumes) self._set_volume_index(new_idx, update_slices=True) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index 7f18c20f3f..f026750e95 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Utility functions for analyze-like formats """ +"""Utility functions for analyze-like formats""" import sys import warnings @@ -23,7 +23,7 @@ from .externals.oset import OrderedSet from .optpkg import optional_package -pyzstd, HAVE_ZSTD, _ = optional_package("pyzstd") +pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') sys_is_le = sys.byteorder == 'little' native_code = sys_is_le and '<' or '>' @@ -33,7 +33,8 @@ ('<', 'little', 'l', 'le', 'L', 'LE'), ('>', 'big', 'BIG', 'b', 'be', 'B', 'BE'), (native_code, 'native', 'n', 'N', '=', '|', 'i', 'I'), - (swapped_code, 'swapped', 's', 'S', '!')) + (swapped_code, 'swapped', 's', 'S', '!'), +) # We'll put these into the Recoder class after we define it #: default compression level when writing gz and bz2 files @@ -48,7 +49,7 @@ class Recoder: - """ class to return canonical code(s) from code or aliases + """class to return canonical code(s) from code or aliases The concept is a lot easier to read in the implementation and tests than it is to explain, so... @@ -82,7 +83,7 @@ class Recoder: """ def __init__(self, codes, fields=('code',), map_maker=OrderedDict): - """ Create recoder object + """Create recoder object ``codes`` give a sequence of code, alias sequences ``fields`` are names by which the entries in these sequences can be @@ -120,7 +121,7 @@ def __init__(self, codes, fields=('code',), map_maker=OrderedDict): self.add_codes(codes) def add_codes(self, code_syn_seqs): - """ Add codes to object + """Add codes to object Parameters ---------- @@ -154,7 +155,7 @@ def add_codes(self, code_syn_seqs): self.__dict__[field_name][alias] = code_syns[field_ind] def __getitem__(self, key): - """ Return value from field1 dictionary (first column of values) + """Return value from field1 dictionary (first column of values) Returns same value as ``obj.field1[key]`` and, with the default initializing ``fields`` argument of fields=('code',), @@ -167,8 +168,7 @@ def __getitem__(self, key): return self.field1[key] def __contains__(self, key): - """ True if field1 in recoder contains `key` - """ + """True if field1 in recoder contains `key`""" try: self.field1[key] except KeyError: @@ -176,7 +176,7 @@ def __contains__(self, key): return True def keys(self): - """ Return all available code and alias values + """Return all available code and alias values Returns same value as ``obj.field1.keys()`` and, with the default initializing ``fields`` argument of fields=('code',), @@ -190,7 +190,7 @@ def keys(self): return self.field1.keys() def value_set(self, name=None): - """ Return OrderedSet of possible returned values for column + """Return OrderedSet of possible returned values for column By default, the column is the first column. @@ -224,7 +224,7 @@ def value_set(self, name=None): class DtypeMapper: - """ Specialized mapper for numpy dtypes + """Specialized mapper for numpy dtypes We pass this mapper into the Recoder class to deal with numpy dtype hashing. @@ -252,7 +252,7 @@ def values(self): return self._dict.values() def __setitem__(self, key, value): - """ Set item into mapping, checking for dtype keys + """Set item into mapping, checking for dtype keys Cache dtype keys for comparison test in __getitem__ """ @@ -261,7 +261,7 @@ def __setitem__(self, key, value): self._dtype_keys.append(key) def __getitem__(self, key): - """ Get item from mapping, checking for dtype keys + """Get item from mapping, checking for dtype keys First do simple hash lookup, then check for a dtype key that has failed the hash lookup. Look then for any known dtype keys that compare equal @@ -279,7 +279,7 @@ def __getitem__(self, key): def pretty_mapping(mapping, getterfunc=None): - """ Make pretty string from mapping + """Make pretty string from mapping Adjusts text column to print values on basis of longest key. Probably only sensible if keys are mainly strings. @@ -339,7 +339,7 @@ def pretty_mapping(mapping, getterfunc=None): def make_dt_codes(codes_seqs): - """ Create full dt codes Recoder instance from datatype codes + """Create full dt codes Recoder instance from datatype codes Include created numpy dtype (from numpy type) and opposite endian numpy dtype @@ -379,13 +379,12 @@ def make_dt_codes(codes_seqs): def _is_compressed_fobj(fobj): - """ Return True if fobj represents a compressed data file-like object - """ + """Return True if fobj represents a compressed data file-like object""" return isinstance(fobj, COMPRESSED_FILE_LIKES) def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): - """ Get array from file with specified shape, dtype and file offset + """Get array from file with specified shape, dtype and file offset Parameters ---------- @@ -428,8 +427,7 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): True """ if mmap not in (True, False, 'c', 'r', 'r+'): - raise ValueError("mmap value should be one of True, False, 'c', " - "'r', 'r+'") + raise ValueError("mmap value should be one of True, False, 'c', " "'r', 'r+'") if mmap is True: mmap = 'c' in_dtype = np.dtype(in_dtype) @@ -437,12 +435,7 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): infile = getattr(infile, 'fobj', infile) if mmap and not _is_compressed_fobj(infile): try: # Try memmapping file on disk - return np.memmap(infile, - in_dtype, - mode=mmap, - shape=shape, - order=order, - offset=offset) + return np.memmap(infile, in_dtype, mode=mmap, shape=shape, order=order, offset=offset) # The error raised by memmap, for different file types, has # changed in different incarnations of the numpy routine except (AttributeError, TypeError, ValueError): @@ -464,8 +457,10 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): n_read = len(data_bytes) needs_copy = True if n_bytes != n_read: - raise OSError(f"Expected {n_bytes} bytes, got {n_read} bytes from " - f"{getattr(infile, 'name', 'object')}\n - could the file be damaged?") + raise OSError( + f'Expected {n_bytes} bytes, got {n_read} bytes from ' + f"{getattr(infile, 'name', 'object')}\n - could the file be damaged?" + ) arr = np.ndarray(shape, in_dtype, buffer=data_bytes, order=order) if needs_copy: return arr.copy() @@ -473,10 +468,19 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True): return arr -def array_to_file(data, fileobj, out_dtype=None, offset=0, - intercept=0.0, divslope=1.0, - mn=None, mx=None, order='F', nan2zero=True): - """ Helper function for writing arrays to file objects +def array_to_file( + data, + fileobj, + out_dtype=None, + offset=0, + intercept=0.0, + divslope=1.0, + mn=None, + mx=None, + order='F', + nan2zero=True, +): + """Helper function for writing arrays to file objects Writes arrays as scaled by `intercept` and `divslope`, and clipped at (prescaling) `mn` minimum, and `mx` maximum. @@ -558,8 +562,7 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, """ # Shield special case div_none = divslope is None - if not np.all( - np.isfinite((intercept, 1.0 if div_none else divslope))): + if not np.all(np.isfinite((intercept, 1.0 if div_none else divslope))): raise ValueError('divslope and intercept must be finite') if divslope == 0: raise ValueError('divslope cannot be zero') @@ -571,15 +574,14 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, out_dtype = np.dtype(out_dtype) if offset is not None: seek_tell(fileobj, offset) - if (div_none or (mn, mx) == (0, 0) or - ((mn is not None and mx is not None) and mx < mn)): + if div_none or (mn, mx) == (0, 0) or ((mn is not None and mx is not None) and mx < mn): write_zeros(fileobj, data.size * out_dtype.itemsize) return if order not in 'FC': raise ValueError('Order should be one of F or C') # Simple cases pre_clips = None if (mn is None and mx is None) else (mn, mx) - null_scaling = (intercept == 0 and divslope == 1) + null_scaling = intercept == 0 and divslope == 1 if in_dtype.type == np.void: if not null_scaling: raise ValueError('Cannot scale non-numeric types') @@ -589,8 +591,7 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, if pre_clips is not None: pre_clips = _dt_min_max(in_dtype, *pre_clips) if null_scaling and np.can_cast(in_dtype, out_dtype): - return _write_data(data, fileobj, out_dtype, order, - pre_clips=pre_clips) + return _write_data(data, fileobj, out_dtype, order, pre_clips=pre_clips) # Force upcasting for floats by making atleast_1d. slope, inter = [np.atleast_1d(v) for v in (divslope, intercept)] # Default working point type for applying slope / inter @@ -601,10 +602,9 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, in_kind = in_dtype.kind out_kind = out_dtype.kind if out_kind in 'fc': - return _write_data(data, fileobj, out_dtype, order, - slope=slope, - inter=inter, - pre_clips=pre_clips) + return _write_data( + data, fileobj, out_dtype, order, slope=slope, inter=inter, pre_clips=pre_clips + ) assert out_kind in 'iu' if in_kind in 'iu': if null_scaling: @@ -613,8 +613,7 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, mn, mx = _dt_min_max(in_dtype, mn, mx) mn_out, mx_out = _dt_min_max(out_dtype) pre_clips = max(mn, mn_out), min(mx, mx_out) - return _write_data(data, fileobj, out_dtype, order, - pre_clips=pre_clips) + return _write_data(data, fileobj, out_dtype, order, pre_clips=pre_clips) # In any case, we do not want to check for nans because we've already # disallowed scaling that generates nans nan2zero = False @@ -677,38 +676,48 @@ def array_to_file(data, fileobj, out_dtype=None, offset=0, # slope). Assume errors are for working float type. Round for integer # rounding est_err = np.round(2 * np.finfo(w_type).eps * abs(inter / slope)) - if ((nan_fill < both_mn and abs(nan_fill - both_mn) < est_err) or - (nan_fill > both_mx and abs(nan_fill - both_mx) < est_err)): + if (nan_fill < both_mn and abs(nan_fill - both_mn) < est_err) or ( + nan_fill > both_mx and abs(nan_fill - both_mx) < est_err + ): # nan_fill can be (just) outside clip range nan_fill = np.clip(nan_fill, both_mn, both_mx) else: - raise ValueError(f"nan_fill == {nan_fill}, outside safe int range " - f"({int(both_mn)}-{int(both_mx)}); " - "change scaling or set nan2zero=False?") + raise ValueError( + f'nan_fill == {nan_fill}, outside safe int range ' + f'({int(both_mn)}-{int(both_mx)}); ' + 'change scaling or set nan2zero=False?' + ) # Make sure non-nan output clipped to shared range post_mn = np.max([post_mn, both_mn]) post_mx = np.min([post_mx, both_mx]) in_cast = None if cast_in_dtype == in_dtype else cast_in_dtype - return _write_data(data, fileobj, out_dtype, order, - in_cast=in_cast, - pre_clips=pre_clips, - inter=inter, - slope=slope, - post_clips=(post_mn, post_mx), - nan_fill=nan_fill if nan2zero else None) - - -def _write_data(data, - fileobj, - out_dtype, - order, - in_cast=None, - pre_clips=None, - inter=0., - slope=1., - post_clips=None, - nan_fill=None): - """ Write array `data` to `fileobj` as `out_dtype` type, layout `order` + return _write_data( + data, + fileobj, + out_dtype, + order, + in_cast=in_cast, + pre_clips=pre_clips, + inter=inter, + slope=slope, + post_clips=(post_mn, post_mx), + nan_fill=nan_fill if nan2zero else None, + ) + + +def _write_data( + data, + fileobj, + out_dtype, + order, + in_cast=None, + pre_clips=None, + inter=0.0, + slope=1.0, + post_clips=None, + nan_fill=None, +): + """Write array `data` to `fileobj` as `out_dtype` type, layout `order` Does not modify `data` in-place. @@ -741,8 +750,7 @@ def _write_data(data, data = np.atleast_2d(data) elif order == 'F': data = data.T - nan_need_copy = ((pre_clips, in_cast, inter, slope, post_clips) == - (None, None, 0, 1, None)) + nan_need_copy = (pre_clips, in_cast, inter, slope, post_clips) == (None, None, 0, 1, None) for dslice in data: # cycle over first dimension to save memory if pre_clips is not None: dslice = np.clip(dslice, *pre_clips) @@ -773,20 +781,15 @@ def _dt_min_max(dtype_like, mn=None, mx=None): info = np.iinfo(dt) dt_mn, dt_mx = (info.min, info.max) else: - raise ValueError("unknown dtype") + raise ValueError('unknown dtype') return dt_mn if mn is None else mn, dt_mx if mx is None else mx -_CSIZE2FLOAT = { - 8: np.float32, - 16: np.float64, - 24: np.longdouble, - 32: np.longdouble} +_CSIZE2FLOAT = {8: np.float32, 16: np.float64, 24: np.longdouble, 32: np.longdouble} def _matching_float(np_type): - """ Return floating point type matching `np_type` - """ + """Return floating point type matching `np_type`""" dtype = np.dtype(np_type) if dtype.kind not in 'cf': raise ValueError('Expecting float or complex type as input') @@ -796,7 +799,7 @@ def _matching_float(np_type): def write_zeros(fileobj, count, block_size=8194): - """ Write `count` zero bytes to `fileobj` + """Write `count` zero bytes to `fileobj` Parameters ---------- @@ -816,7 +819,7 @@ def write_zeros(fileobj, count, block_size=8194): def seek_tell(fileobj, offset, write0=False): - """ Seek in `fileobj` or check we're in the right place already + """Seek in `fileobj` or check we're in the right place already Parameters ---------- @@ -846,7 +849,7 @@ def seek_tell(fileobj, offset, write0=False): def apply_read_scaling(arr, slope=None, inter=None): - """ Apply scaling in `slope` and `inter` to array `arr` + """Apply scaling in `slope` and `inter` to array `arr` This is for loading the array from a file (as opposed to the reverse scaling when saving an array to file) @@ -889,7 +892,7 @@ def apply_read_scaling(arr, slope=None, inter=None): # int to float; get enough precision to avoid infs # Find floating point type for which scaling does not overflow, # starting at given type - default = (slope.dtype.type if slope.dtype.kind == 'f' else np.float64) + default = slope.dtype.type if slope.dtype.kind == 'f' else np.float64 ftype = int_scinter_ftype(arr.dtype, slope, inter, default) slope = slope.astype(ftype) inter = inter.astype(ftype) @@ -901,7 +904,7 @@ def apply_read_scaling(arr, slope=None, inter=None): def working_type(in_type, slope=1.0, inter=0.0): - """ Return array type from applying `slope`, `inter` to array of `in_type` + """Return array type from applying `slope`, `inter` to array of `in_type` Numpy type that results from an array of type `in_type` being combined with `slope` and `inter`. It returns something like the dtype type of @@ -944,7 +947,7 @@ def working_type(in_type, slope=1.0, inter=0.0): def int_scinter_ftype(ifmt, slope=1.0, inter=0.0, default=np.float32): - """ float type containing int type `ifmt` * `slope` + `inter` + """float type containing int type `ifmt` * `slope` + `inter` Return float type that can represent the max and the min of the `ifmt` type after multiplication with `slope` and addition of `inter` with something @@ -996,7 +999,7 @@ def int_scinter_ftype(ifmt, slope=1.0, inter=0.0, default=np.float32): def best_write_scale_ftype(arr, slope=1.0, inter=0.0, default=np.float32): - """ Smallest float type to contain range of ``arr`` after scaling + """Smallest float type to contain range of ``arr`` after scaling Scaling that will be applied to ``arr`` is ``(arr - inter) / slope``. @@ -1060,7 +1063,7 @@ def best_write_scale_ftype(arr, slope=1.0, inter=0.0, default=np.float32): def better_float_of(first, second, default=np.float32): - """ Return more capable float type of `first` and `second` + """Return more capable float type of `first` and `second` Return `default` if neither of `first` or `second` is a float @@ -1105,10 +1108,8 @@ def better_float_of(first, second, default=np.float32): return second.type -def _ftype4scaled_finite(tst_arr, slope, inter, direction='read', - default=np.float32): - """ Smallest float type for scaling of `tst_arr` that does not overflow - """ +def _ftype4scaled_finite(tst_arr, slope, inter, direction='read', default=np.float32): + """Smallest float type for scaling of `tst_arr` that does not overflow""" assert direction in ('read', 'write') if default not in OK_FLOATS and default is np.longdouble: # Omitted longdouble @@ -1146,7 +1147,7 @@ def _ftype4scaled_finite(tst_arr, slope, inter, direction='read', def finite_range(arr, check_nan=False): - """ Get range (min, max) or range and flag (min, max, has_nan) from `arr` + """Get range (min, max) or range and flag (min, max, has_nan) from `arr` Parameters ---------- @@ -1242,7 +1243,7 @@ def finite_range(arr, check_nan=False): def shape_zoom_affine(shape, zooms, x_flip=True): - """ Get affine implied by given shape and zooms + """Get affine implied by given shape and zooms We get the translations from the center of the image (implied by `shape`). @@ -1304,7 +1305,7 @@ def shape_zoom_affine(shape, zooms, x_flip=True): def rec2dict(rec): - """ Convert recarray to dictionary + """Convert recarray to dictionary Also converts scalar values to scalars @@ -1337,7 +1338,7 @@ def rec2dict(rec): def fname_ext_ul_case(fname): - """ `fname` with ext changed to upper / lower case if file exists + """`fname` with ext changed to upper / lower case if file exists Check for existence of `fname`. If it does exist, return unmodified. If it doesn't, check for existence of `fname` with case changed from lower to diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index b933892565..cdc2957dab 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -6,7 +6,7 @@ # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -""" Class to wrap numpy structured array +"""Class to wrap numpy structured array ============ wrapstruct @@ -111,8 +111,7 @@ """ import numpy as np -from .volumeutils import (pretty_mapping, endian_codes, native_code, - swapped_code) +from .volumeutils import pretty_mapping, endian_codes, native_code, swapped_code from . import imageglobals as imageglobals from .batteryrunners import BatteryRunner @@ -125,11 +124,8 @@ class WrapStruct: # placeholder datatype template_dtype = np.dtype([('integer', 'i2')]) - def __init__(self, - binaryblock=None, - endianness=None, - check=True): - """ Initialize WrapStruct from binary data block + def __init__(self, binaryblock=None, endianness=None, check=True): + """Initialize WrapStruct from binary data block Parameters ---------- @@ -160,8 +156,7 @@ def __init__(self, # check size if len(binaryblock) != self.template_dtype.itemsize: raise WrapStructError('Binary block is wrong size') - wstr = np.ndarray(shape=(), dtype=self.template_dtype, - buffer=binaryblock) + wstr = np.ndarray(shape=(), dtype=self.template_dtype, buffer=binaryblock) if endianness is None: endianness = self.__class__.guessed_endian(wstr) else: @@ -175,7 +170,7 @@ def __init__(self, @classmethod def from_fileobj(klass, fileobj, endianness=None, check=True): - """ Return read structure with given or guessed endiancode + """Return read structure with given or guessed endiancode Parameters ---------- @@ -194,7 +189,7 @@ def from_fileobj(klass, fileobj, endianness=None, check=True): @property def binaryblock(self): - """ binary block of data as string + """binary block of data as string Returns ------- @@ -211,7 +206,7 @@ def binaryblock(self): return self._structarr.tobytes() def write_to(self, fileobj): - """ Write structure to fileobj + """Write structure to fileobj Write starts at fileobj current file position. @@ -237,7 +232,7 @@ def write_to(self, fileobj): @property def endianness(self): - """ endian code of binary data + """endian code of binary data The endianness code gives the current byte order interpretation of the binary data. @@ -261,7 +256,7 @@ def endianness(self): return swapped_code def copy(self): - """ Return copy of structure + """Return copy of structure >>> wstr = WrapStruct() >>> wstr['integer'] = 3 @@ -274,7 +269,7 @@ def copy(self): return self.__class__(self.binaryblock, self.endianness, check=False) def __eq__(self, other): - """ equality between two structures defined by binaryblock + """equality between two structures defined by binaryblock Examples -------- @@ -302,7 +297,7 @@ def __ne__(self, other): return not self == other def __getitem__(self, item): - """ Return values from structure data + """Return values from structure data Examples -------- @@ -313,7 +308,7 @@ def __getitem__(self, item): return self._structarr[item] def __setitem__(self, item, value): - """ Set values in structured data + """Set values in structured data Examples -------- @@ -328,24 +323,24 @@ def __iter__(self): return iter(self.keys()) def keys(self): - """ Return keys from structured data""" + """Return keys from structured data""" return list(self.template_dtype.names) def values(self): - """ Return values from structured data""" + """Return values from structured data""" data = self._structarr return [data[key] for key in self.template_dtype.names] def items(self): - """ Return items from structured data""" + """Return items from structured data""" return zip(self.keys(), self.values()) def get(self, k, d=None): - """ Return value for the key k if present or d otherwise""" + """Return value for the key k if present or d otherwise""" return self._structarr[k] if k in self.keys() else d def check_fix(self, logger=None, error_level=None): - """ Check structured data with checks + """Check structured data with checks Parameters ---------- @@ -365,16 +360,15 @@ def check_fix(self, logger=None, error_level=None): @classmethod def diagnose_binaryblock(klass, binaryblock, endianness=None): - """ Run checks over binary data, return string """ + """Run checks over binary data, return string""" wstr = klass(binaryblock, endianness=endianness, check=False) battrun = BatteryRunner(klass._get_checks()) reports = battrun.check_only(wstr) - return '\n'.join([report.message - for report in reports if report.message]) + return '\n'.join([report.message for report in reports if report.message]) @classmethod def guessed_endian(self, mapping): - """ Guess intended endianness from mapping-like ``mapping`` + """Guess intended endianness from mapping-like ``mapping`` Parameters ---------- @@ -391,8 +385,7 @@ def guessed_endian(self, mapping): @classmethod def default_structarr(klass, endianness=None): - """ Return structured array for default structure with given endianness - """ + """Return structured array for default structure with given endianness""" dt = klass.template_dtype if endianness is not None: endianness = endian_codes[endianness] @@ -401,7 +394,7 @@ def default_structarr(klass, endianness=None): @property def structarr(self): - """ Structured data, with data fields + """Structured data, with data fields Examples -------- @@ -415,12 +408,12 @@ def structarr(self): return self._structarr def __str__(self): - """ Return string representation for printing """ + """Return string representation for printing""" summary = f"{self.__class__} object, endian='{self.endianness}'" return '\n'.join([summary, pretty_mapping(self)]) def as_byteswapped(self, endianness=None): - """ return new byteswapped object with given ``endianness`` + """return new byteswapped object with given ``endianness`` Guaranteed to make a copy even if endianness is the same as the current endianness. @@ -482,17 +475,17 @@ def as_byteswapped(self, endianness=None): @classmethod def _get_checks(klass): - """ Return sequence of check functions for this class """ + """Return sequence of check functions for this class""" return () class LabeledWrapStruct(WrapStruct): - """ A WrapStruct with some fields having value labels for printing etc - """ + """A WrapStruct with some fields having value labels for printing etc""" + _field_recoders = {} # for recoding values for str def get_value_label(self, fieldname): - """ Returns label for coded field + """Returns label for coded field A coded field is an int field containing codes that stand for discrete values that also have string labels. @@ -535,7 +528,7 @@ def get_value_label(self, fieldname): return f'' def __str__(self): - """ Return string representation for printing """ + """Return string representation for printing""" summary = f"{self.__class__} object, endian='{self.endianness}'" def _getter(obj, key): diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index d907f95e10..67e10cd152 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -18,25 +18,25 @@ class XmlSerializable: - """ Basic interface for serializing an object to xml""" + """Basic interface for serializing an object to xml""" def _to_xml_element(self): - """ Output should be a xml.etree.ElementTree.Element""" + """Output should be a xml.etree.ElementTree.Element""" raise NotImplementedError() def to_xml(self, enc='utf-8'): - """ Output should be an xml string with the given encoding. + """Output should be an xml string with the given encoding. (default: utf-8)""" ele = self._to_xml_element() return '' if ele is None else tostring(ele, enc) class XmlBasedHeader(FileBasedHeader, XmlSerializable): - """ Basic wrapper around FileBasedHeader and XmlSerializable.""" + """Basic wrapper around FileBasedHeader and XmlSerializable.""" class XmlParser: - """ Base class for defining how to parse xml-based image snippets. + """Base class for defining how to parse xml-based image snippets. Image-specific parsers should define: StartElementHandler @@ -44,9 +44,7 @@ class XmlParser: CharacterDataHandler """ - HANDLER_NAMES = ['StartElementHandler', - 'EndElementHandler', - 'CharacterDataHandler'] + HANDLER_NAMES = ['StartElementHandler', 'EndElementHandler', 'CharacterDataHandler'] def __init__(self, encoding='utf-8', buffer_size=35000000, verbose=0): """ From 5d0481d33a5691bf82538dff1da541d2f29a0e14 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 21:55:26 -0500 Subject: [PATCH 06/12] STY: Reorder imports and guard against oversorting --- nibabel/__init__.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index ad14fc52dc..a816937dd2 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -43,6 +43,11 @@ from . import spm2analyze as spm2 from . import nifti1 as ni1 from . import ecat +from . import mriutils +from . import streamlines +from . import viewers + +# isort: split # object imports from .fileholders import FileHolder, FileHolderError @@ -67,9 +72,8 @@ aff2axcodes, ) from .imageclasses import all_image_classes -from . import mriutils -from . import streamlines -from . import viewers + +# isort: split from .pkg_info import get_pkg_info as _get_pkg_info From 0ab2856cac4d4baae7ab3e2f6d58421db55d807f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 21:32:00 -0500 Subject: [PATCH 07/12] STY: isort [git-blame-ignore-rev] --- nibabel/__init__.py | 40 ++++++----- nibabel/affines.py | 4 +- nibabel/analyze.py | 20 +++--- nibabel/arrayproxy.py | 9 ++- nibabel/arraywriters.py | 10 +-- nibabel/benchmarks/bench_array_to_file.py | 5 +- .../benchmarks/bench_arrayproxy_slicing.py | 9 +-- nibabel/benchmarks/bench_fileslice.py | 6 +- nibabel/benchmarks/bench_finite_range.py | 6 +- nibabel/benchmarks/bench_load_save.py | 7 +- nibabel/brikhead.py | 4 +- nibabel/casting.py | 2 +- nibabel/cifti2/__init__.py | 18 ++--- nibabel/cifti2/cifti2.py | 12 ++-- nibabel/cifti2/cifti2_axes.py | 6 +- nibabel/cifti2/parse_cifti2.py | 26 ++++---- nibabel/cifti2/tests/test_axes.py | 6 +- nibabel/cifti2/tests/test_cifti2.py | 8 +-- nibabel/cifti2/tests/test_cifti2io_axes.py | 10 +-- nibabel/cifti2/tests/test_cifti2io_header.py | 13 ++-- nibabel/cifti2/tests/test_new_cifti2.py | 4 +- nibabel/cmdline/convert.py | 2 +- nibabel/cmdline/dicomfs.py | 12 ++-- nibabel/cmdline/diff.py | 6 +- nibabel/cmdline/ls.py | 4 +- nibabel/cmdline/parrec2nii.py | 20 +++--- nibabel/cmdline/roi.py | 5 +- nibabel/cmdline/stats.py | 3 +- nibabel/cmdline/tck2trk.py | 5 +- nibabel/cmdline/tests/test_conform.py | 2 +- nibabel/cmdline/tests/test_convert.py | 5 +- nibabel/cmdline/tests/test_parrec2nii.py | 9 +-- nibabel/cmdline/tests/test_roi.py | 11 ++-- nibabel/cmdline/tests/test_stats.py | 7 +- nibabel/cmdline/tests/test_utils.py | 11 ++-- nibabel/cmdline/trk2tck.py | 2 +- nibabel/data.py | 10 +-- nibabel/dataobj_images.py | 2 +- nibabel/deprecator.py | 2 +- nibabel/dft.py | 12 ++-- nibabel/ecat.py | 6 +- nibabel/eulerangles.py | 2 - nibabel/filebasedimages.py | 3 +- nibabel/fileslice.py | 6 +- nibabel/freesurfer/__init__.py | 10 +-- nibabel/freesurfer/io.py | 8 +-- nibabel/freesurfer/mghformat.py | 13 ++-- nibabel/freesurfer/tests/test_io.py | 29 ++++---- nibabel/freesurfer/tests/test_mghformat.py | 25 +++---- nibabel/funcs.py | 2 +- nibabel/gifti/__init__.py | 8 +-- nibabel/gifti/gifti.py | 11 ++-- nibabel/gifti/parse_gifti_fast.py | 12 ++-- nibabel/gifti/tests/test_gifti.py | 19 +++--- nibabel/gifti/tests/test_parse_gifti_fast.py | 20 +++--- nibabel/imageclasses.py | 6 +- nibabel/imagestats.py | 1 + nibabel/loadsave.py | 9 +-- nibabel/minc1.py | 3 +- nibabel/minc2.py | 2 +- nibabel/nicom/ascconv.py | 3 +- nibabel/nicom/dicomreaders.py | 2 +- nibabel/nicom/dicomwrappers.py | 5 +- nibabel/nicom/tests/__init__.py | 1 + nibabel/nicom/tests/test_ascconv.py | 6 +- nibabel/nicom/tests/test_csareader.py | 11 ++-- nibabel/nicom/tests/test_dicomreaders.py | 8 +-- nibabel/nicom/tests/test_dicomwrappers.py | 21 +++--- nibabel/nicom/tests/test_dwiparams.py | 7 +- nibabel/nicom/tests/test_structreader.py | 2 +- nibabel/nicom/tests/test_utils.py | 3 +- nibabel/nifti1.py | 12 ++-- nibabel/nifti2.py | 2 +- nibabel/openers.py | 3 +- nibabel/optpkg.py | 1 + nibabel/parrec.py | 15 +++-- nibabel/pkg_info.py | 2 + nibabel/processing.py | 6 +- nibabel/pydicom_compat.py | 5 +- nibabel/quaternions.py | 1 + nibabel/spatialimages.py | 6 +- nibabel/spm99analyze.py | 7 +- nibabel/streamlines/__init__.py | 7 +- nibabel/streamlines/array_sequence.py | 2 +- nibabel/streamlines/tck.py | 6 +- .../streamlines/tests/test_array_sequence.py | 11 ++-- nibabel/streamlines/tests/test_streamlines.py | 21 +++--- nibabel/streamlines/tests/test_tck.py | 19 +++--- nibabel/streamlines/tests/test_tractogram.py | 23 ++++--- .../streamlines/tests/test_tractogram_file.py | 4 +- nibabel/streamlines/tests/test_trk.py | 18 +++-- nibabel/streamlines/tests/test_utils.py | 8 +-- nibabel/streamlines/tractogram.py | 5 +- nibabel/streamlines/trk.py | 11 ++-- nibabel/testing/__init__.py | 18 +++-- nibabel/tests/data/check_parrec_reslice.py | 1 + nibabel/tests/data/gen_standard.py | 2 +- nibabel/tests/data/make_moved_anat.py | 2 +- nibabel/tests/nibabel_data.py | 7 +- nibabel/tests/scriptrunner.py | 10 +-- nibabel/tests/test_affines.py | 16 ++--- nibabel/tests/test_analyze.py | 33 +++++----- nibabel/tests/test_api_validators.py | 1 + nibabel/tests/test_arrayproxy.py | 24 +++---- nibabel/tests/test_arraywriters.py | 23 ++++--- nibabel/tests/test_batteryrunners.py | 4 +- nibabel/tests/test_brikhead.py | 7 +- nibabel/tests/test_casting.py | 22 +++---- nibabel/tests/test_data.py | 26 ++++---- nibabel/tests/test_dataobj_images.py | 5 +- nibabel/tests/test_deprecated.py | 5 +- nibabel/tests/test_deprecator.py | 10 +-- nibabel/tests/test_dft.py | 10 ++- nibabel/tests/test_diff.py | 7 +- nibabel/tests/test_ecat.py | 16 ++--- nibabel/tests/test_ecat_data.py | 5 +- nibabel/tests/test_environment.py | 6 +- nibabel/tests/test_euler.py | 6 +- nibabel/tests/test_filebasedimages.py | 3 +- nibabel/tests/test_filehandles.py | 4 +- nibabel/tests/test_fileholders.py | 1 - nibabel/tests/test_filename_parser.py | 4 +- nibabel/tests/test_files_interface.py | 10 +-- nibabel/tests/test_fileslice.py | 29 ++++---- nibabel/tests/test_fileutils.py | 3 +- nibabel/tests/test_floating.py | 22 +++---- nibabel/tests/test_funcs.py | 10 ++- nibabel/tests/test_image_api.py | 56 ++++++++-------- nibabel/tests/test_image_load_save.py | 39 ++++++----- nibabel/tests/test_image_types.py | 18 ++--- nibabel/tests/test_imageclasses.py | 10 ++- nibabel/tests/test_imagestats.py | 3 +- nibabel/tests/test_init.py | 8 ++- nibabel/tests/test_loadsave.py | 23 ++++--- nibabel/tests/test_minc1.py | 22 +++---- nibabel/tests/test_minc2.py | 2 - nibabel/tests/test_minc2_data.py | 8 +-- nibabel/tests/test_mriutils.py | 4 +- nibabel/tests/test_nibabel_data.py | 5 +- nibabel/tests/test_nifti1.py | 40 +++++------ nibabel/tests/test_nifti2.py | 11 ++-- nibabel/tests/test_onetime.py | 1 + nibabel/tests/test_openers.py | 26 +++----- nibabel/tests/test_optpkg.py | 9 ++- nibabel/tests/test_orientations.py | 21 +++--- nibabel/tests/test_parrec.py | 31 ++++----- nibabel/tests/test_parrec_data.py | 14 ++-- nibabel/tests/test_pkg_info.py | 4 +- nibabel/tests/test_processing.py | 31 ++++----- nibabel/tests/test_proxy_api.py | 37 +++++------ nibabel/tests/test_quaternions.py | 6 +- nibabel/tests/test_recoder.py | 5 +- nibabel/tests/test_removalschedule.py | 4 +- nibabel/tests/test_round_trip.py | 12 ++-- nibabel/tests/test_rstutils.py | 3 +- nibabel/tests/test_scaling.py | 14 ++-- nibabel/tests/test_scripts.py | 29 ++++---- nibabel/tests/test_spaces.py | 9 ++- nibabel/tests/test_spatialimages.py | 17 ++--- nibabel/tests/test_spm2analyze.py | 7 +- nibabel/tests/test_spm99analyze.py | 21 +++--- nibabel/tests/test_testing.py | 14 ++-- nibabel/tests/test_tmpdirs.py | 3 +- nibabel/tests/test_tripwire.py | 4 +- nibabel/tests/test_viewers.py | 8 +-- nibabel/tests/test_volumeutils.py | 66 +++++++++---------- nibabel/tests/test_wrapstruct.py | 17 ++--- nibabel/tmpdirs.py | 2 +- nibabel/viewers.py | 3 +- nibabel/volumeutils.py | 10 +-- nibabel/wrapstruct.py | 2 +- 171 files changed, 874 insertions(+), 971 deletions(-) diff --git a/nibabel/__init__.py b/nibabel/__init__.py index a816937dd2..4311e3d7bf 100644 --- a/nibabel/__init__.py +++ b/nibabel/__init__.py @@ -9,8 +9,8 @@ import os -from .pkg_info import __version__ from .info import long_description as __doc__ +from .pkg_info import __version__ __doc__ += """ Quickstart @@ -39,39 +39,37 @@ # module imports from . import analyze as ana -from . import spm99analyze as spm99 -from . import spm2analyze as spm2 +from . import ecat, mriutils from . import nifti1 as ni1 -from . import ecat -from . import mriutils -from . import streamlines -from . import viewers +from . import spm2analyze as spm2 +from . import spm99analyze as spm99 +from . import streamlines, viewers # isort: split # object imports +from .analyze import AnalyzeHeader, AnalyzeImage +from .arrayproxy import is_proxy +from .cifti2 import Cifti2Header, Cifti2Image from .fileholders import FileHolder, FileHolderError +from .freesurfer import MGHImage +from .funcs import as_closest_canonical, concat_images, four_to_three, squeeze_image +from .gifti import GiftiImage +from .imageclasses import all_image_classes from .loadsave import load, save -from .arrayproxy import is_proxy -from .analyze import AnalyzeHeader, AnalyzeImage -from .spm99analyze import Spm99AnalyzeHeader, Spm99AnalyzeImage -from .spm2analyze import Spm2AnalyzeHeader, Spm2AnalyzeImage -from .nifti1 import Nifti1Header, Nifti1Image, Nifti1Pair -from .nifti2 import Nifti2Header, Nifti2Image, Nifti2Pair from .minc1 import Minc1Image from .minc2 import Minc2Image -from .cifti2 import Cifti2Header, Cifti2Image -from .gifti import GiftiImage -from .freesurfer import MGHImage -from .funcs import squeeze_image, concat_images, four_to_three, as_closest_canonical +from .nifti1 import Nifti1Header, Nifti1Image, Nifti1Pair +from .nifti2 import Nifti2Header, Nifti2Image, Nifti2Pair from .orientations import ( - io_orientation, - flip_axis, OrientationError, - apply_orientation, aff2axcodes, + apply_orientation, + flip_axis, + io_orientation, ) -from .imageclasses import all_image_classes +from .spm2analyze import Spm2AnalyzeHeader, Spm2AnalyzeImage +from .spm99analyze import Spm99AnalyzeHeader, Spm99AnalyzeImage # isort: split diff --git a/nibabel/affines.py b/nibabel/affines.py index c8bc586aa7..59b52e768e 100644 --- a/nibabel/affines.py +++ b/nibabel/affines.py @@ -2,10 +2,10 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Utility routines for working with points and affine transforms """ -import numpy as np - from functools import reduce +import numpy as np + class AffineError(ValueError): """Errors in calculating or using affines""" diff --git a/nibabel/analyze.py b/nibabel/analyze.py index 648c75d68a..4a76350d59 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -84,21 +84,21 @@ import numpy as np +from .arrayproxy import ArrayProxy +from .arraywriters import ArrayWriter, WriterError, get_slope_inter, make_array_writer +from .batteryrunners import Report +from .fileholders import copy_file_map +from .spatialimages import HeaderDataError, HeaderTypeError, SpatialImage from .volumeutils import ( - native_code, - swapped_code, - make_dt_codes, - shape_zoom_affine, + apply_read_scaling, array_from_file, + make_dt_codes, + native_code, seek_tell, - apply_read_scaling, + shape_zoom_affine, + swapped_code, ) -from .arraywriters import make_array_writer, get_slope_inter, WriterError, ArrayWriter from .wrapstruct import LabeledWrapStruct -from .spatialimages import HeaderDataError, HeaderTypeError, SpatialImage -from .fileholders import copy_file_map -from .batteryrunners import Report -from .arrayproxy import ArrayProxy # Sub-parts of standard analyze header from # Mayo dbh.h file diff --git a/nibabel/arrayproxy.py b/nibabel/arrayproxy.py index bb97b8efb0..5a2bae02c0 100644 --- a/nibabel/arrayproxy.py +++ b/nibabel/arrayproxy.py @@ -25,16 +25,15 @@ See :mod:`nibabel.tests.test_proxy_api` for proxy API conformance checks. """ +import warnings from contextlib import contextmanager from threading import RLock -import warnings import numpy as np -from .volumeutils import array_from_file, apply_read_scaling -from .fileslice import fileslice, canonical_slicers from . import openers - +from .fileslice import canonical_slicers, fileslice +from .volumeutils import apply_read_scaling, array_from_file """This flag controls whether a new file handle is created every time an image is accessed through an ``ArrayProxy``, or a single file handle is created and @@ -413,8 +412,8 @@ def reshape(self, shape): size = np.prod(self._shape) # Calculate new shape if not fully specified - from operator import mul from functools import reduce + from operator import mul n_unknowns = len([e for e in shape if e == -1]) if n_unknowns > 1: diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index 1a80bcfa98..59e55b314c 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -32,15 +32,15 @@ def __init__(self, array, out_dtype=None) import numpy as np from .casting import ( - int_to_float, as_int, - int_abs, - type_info, - floor_exact, best_float, + floor_exact, + int_abs, + int_to_float, shared_range, + type_info, ) -from .volumeutils import finite_range, array_to_file +from .volumeutils import array_to_file, finite_range class WriterError(Exception): diff --git a/nibabel/benchmarks/bench_array_to_file.py b/nibabel/benchmarks/bench_array_to_file.py index 7b59fbcaec..c2bab7e95e 100644 --- a/nibabel/benchmarks/bench_array_to_file.py +++ b/nibabel/benchmarks/bench_array_to_file.py @@ -14,13 +14,12 @@ from io import BytesIO # NOQA import numpy as np - -from .butils import print_git_title - from numpy.testing import measure from nibabel.volumeutils import array_to_file # NOQA +from .butils import print_git_title + def bench_array_to_file(): rng = np.random.RandomState(20111001) diff --git a/nibabel/benchmarks/bench_arrayproxy_slicing.py b/nibabel/benchmarks/bench_arrayproxy_slicing.py index 71ea801756..d313a7db5e 100644 --- a/nibabel/benchmarks/bench_arrayproxy_slicing.py +++ b/nibabel/benchmarks/bench_arrayproxy_slicing.py @@ -10,18 +10,19 @@ pytest -c /benchmarks/pytest.benchmark.ini /benchmarks/bench_arrayproxy_slicing.py """ -from timeit import timeit import gc import itertools as it -import numpy as np +from timeit import timeit from unittest import mock +import numpy as np + import nibabel as nib -from nibabel.tmpdirs import InTemporaryDirectory from nibabel.openers import HAVE_INDEXED_GZIP +from nibabel.tmpdirs import InTemporaryDirectory -from .butils import print_git_title from ..rstutils import rst_table +from .butils import print_git_title # if memory_profiler is installed, we get memory usage results try: diff --git a/nibabel/benchmarks/bench_fileslice.py b/nibabel/benchmarks/bench_fileslice.py index 59b6aa9314..cc3d837c2d 100644 --- a/nibabel/benchmarks/bench_fileslice.py +++ b/nibabel/benchmarks/bench_fileslice.py @@ -9,16 +9,16 @@ """ import sys +from io import BytesIO from timeit import timeit import numpy as np -from io import BytesIO -from ..openers import ImageOpener from ..fileslice import fileslice +from ..openers import ImageOpener +from ..optpkg import optional_package from ..rstutils import rst_table from ..tmpdirs import InTemporaryDirectory -from ..optpkg import optional_package SHAPE = (64, 64, 32, 100) ROW_NAMES = [f'axis {i}, len {dim}' for i, dim in enumerate(SHAPE)] diff --git a/nibabel/benchmarks/bench_finite_range.py b/nibabel/benchmarks/bench_finite_range.py index 0a6ff576fa..edd839ce61 100644 --- a/nibabel/benchmarks/bench_finite_range.py +++ b/nibabel/benchmarks/bench_finite_range.py @@ -13,14 +13,12 @@ import sys import numpy as np - - -from .butils import print_git_title - from numpy.testing import measure from nibabel.volumeutils import finite_range # NOQA +from .butils import print_git_title + def bench_finite_range(): rng = np.random.RandomState(20111001) diff --git a/nibabel/benchmarks/bench_load_save.py b/nibabel/benchmarks/bench_load_save.py index d9c6461959..007753ce51 100644 --- a/nibabel/benchmarks/bench_load_save.py +++ b/nibabel/benchmarks/bench_load_save.py @@ -11,17 +11,14 @@ """ import sys +from io import BytesIO import numpy as np - -from io import BytesIO +from numpy.testing import measure from .. import Nifti1Image - from .butils import print_git_title -from numpy.testing import measure - def bench_load_save(): rng = np.random.RandomState(20111001) diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 4a330893b3..0559671217 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -28,15 +28,15 @@ programs (e.g., example4d+orig'[0]'). """ -from copy import deepcopy import os import re +from copy import deepcopy import numpy as np from .arrayproxy import ArrayProxy from .fileslice import strided_scalar -from .spatialimages import SpatialImage, SpatialHeader, HeaderDataError, ImageDataError +from .spatialimages import HeaderDataError, ImageDataError, SpatialHeader, SpatialImage from .volumeutils import Recoder # used for doc-tests diff --git a/nibabel/casting.py b/nibabel/casting.py index c2bceeaf0f..ce58915fe9 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -6,7 +6,7 @@ import warnings from numbers import Integral -from platform import processor, machine +from platform import machine, processor import numpy as np diff --git a/nibabel/cifti2/__init__.py b/nibabel/cifti2/__init__.py index e7c999b6cd..9c6805f818 100644 --- a/nibabel/cifti2/__init__.py +++ b/nibabel/cifti2/__init__.py @@ -17,28 +17,28 @@ cifti2_axes """ -from .parse_cifti2 import Cifti2Extension from .cifti2 import ( - Cifti2MetaData, + CIFTI_BRAIN_STRUCTURES, + CIFTI_MODEL_TYPES, + Cifti2BrainModel, Cifti2Header, + Cifti2HeaderError, Cifti2Image, Cifti2Label, Cifti2LabelTable, - Cifti2VertexIndices, - Cifti2VoxelIndicesIJK, - Cifti2BrainModel, Cifti2Matrix, Cifti2MatrixIndicesMap, + Cifti2MetaData, Cifti2NamedMap, Cifti2Parcel, Cifti2Surface, Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, + Cifti2VertexIndices, Cifti2Vertices, Cifti2Volume, - CIFTI_BRAIN_STRUCTURES, - Cifti2HeaderError, - CIFTI_MODEL_TYPES, + Cifti2VoxelIndicesIJK, load, save, ) -from .cifti2_axes import Axis, BrainModelAxis, ParcelsAxis, SeriesAxis, LabelAxis, ScalarAxis +from .cifti2_axes import Axis, BrainModelAxis, LabelAxis, ParcelsAxis, ScalarAxis, SeriesAxis +from .parse_cifti2 import Cifti2Extension diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 4b6fd3df25..497b796dca 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -17,19 +17,19 @@ http://www.nitrc.org/projects/cifti """ import re -from collections.abc import MutableSequence, MutableMapping, Iterable from collections import OrderedDict +from collections.abc import Iterable, MutableMapping, MutableSequence from warnings import warn import numpy as np from .. import xmlutils as xml -from ..filebasedimages import FileBasedHeader, SerializableImage -from ..dataobj_images import DataobjImage -from ..nifti1 import Nifti1Extensions -from ..nifti2 import Nifti2Image, Nifti2Header from ..arrayproxy import reshape_dataobj from ..caret import CaretMetaData +from ..dataobj_images import DataobjImage +from ..filebasedimages import FileBasedHeader, SerializableImage +from ..nifti1 import Nifti1Extensions +from ..nifti2 import Nifti2Header, Nifti2Image from ..volumeutils import make_dt_codes @@ -1473,7 +1473,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): img : Cifti2Image Returns a Cifti2Image """ - from .parse_cifti2 import _Cifti2AsNiftiImage, Cifti2Extension + from .parse_cifti2 import Cifti2Extension, _Cifti2AsNiftiImage nifti_img = _Cifti2AsNiftiImage.from_file_map( file_map, mmap=mmap, keep_file_open=keep_file_open diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index 31e4ab55ab..3d88fca1e3 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -118,10 +118,12 @@ ... bm_cortex))) """ +import abc +from operator import xor + import numpy as np + from . import cifti2 -from operator import xor -import abc def from_index_mapping(mim): diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index 36db0fa290..550d8e30bd 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -7,37 +7,37 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -import numpy as np from io import BytesIO +import numpy as np from packaging.version import Version, parse +from .. import xmlutils as xml +from ..batteryrunners import Report +from ..nifti1 import Nifti1Extension, extension_codes, intent_codes +from ..nifti2 import Nifti2Header, Nifti2Image +from ..spatialimages import HeaderDataError from .cifti2 import ( - Cifti2MetaData, + CIFTI_BRAIN_STRUCTURES, + CIFTI_MODEL_TYPES, + Cifti2BrainModel, Cifti2Header, + Cifti2HeaderError, Cifti2Label, Cifti2LabelTable, - Cifti2VertexIndices, - Cifti2VoxelIndicesIJK, - Cifti2BrainModel, Cifti2Matrix, Cifti2MatrixIndicesMap, + Cifti2MetaData, Cifti2NamedMap, Cifti2Parcel, Cifti2Surface, Cifti2TransformationMatrixVoxelIndicesIJKtoXYZ, + Cifti2VertexIndices, Cifti2Vertices, Cifti2Volume, - CIFTI_BRAIN_STRUCTURES, - CIFTI_MODEL_TYPES, + Cifti2VoxelIndicesIJK, _underscore, - Cifti2HeaderError, ) -from .. import xmlutils as xml -from ..spatialimages import HeaderDataError -from ..batteryrunners import Report -from ..nifti1 import Nifti1Extension, extension_codes, intent_codes -from ..nifti2 import Nifti2Header, Nifti2Image class Cifti2Extension(Nifti1Extension): diff --git a/nibabel/cifti2/tests/test_axes.py b/nibabel/cifti2/tests/test_axes.py index ecb6be272b..b8940433af 100644 --- a/nibabel/cifti2/tests/test_axes.py +++ b/nibabel/cifti2/tests/test_axes.py @@ -1,9 +1,11 @@ +from copy import deepcopy + import numpy as np import pytest -from .test_cifti2io_axes import check_rewrite + import nibabel.cifti2.cifti2_axes as axes -from copy import deepcopy +from .test_cifti2io_axes import check_rewrite rand_affine = np.random.randn(4, 4) vol_shape = (5, 10, 3) diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index be10f8b0e0..98d97e34e2 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -4,15 +4,13 @@ from xml.etree import ElementTree import numpy as np +import pytest from nibabel import cifti2 as ci +from nibabel.cifti2.cifti2 import Cifti2HeaderError, _float_01, _value_if_klass from nibabel.nifti2 import Nifti2Header -from nibabel.cifti2.cifti2 import _float_01, _value_if_klass, Cifti2HeaderError - -import pytest - from nibabel.tests.test_dataobj_images import TestDataobjAPI as _TDA -from nibabel.tests.test_image_api import SerializeMixin, DtypeOverrideMixin +from nibabel.tests.test_image_api import DtypeOverrideMixin, SerializeMixin def compare_xml_leaf(str1, str2): diff --git a/nibabel/cifti2/tests/test_cifti2io_axes.py b/nibabel/cifti2/tests/test_cifti2io_axes.py index 756b0f6c9f..2f5e781e44 100644 --- a/nibabel/cifti2/tests/test_cifti2io_axes.py +++ b/nibabel/cifti2/tests/test_cifti2io_axes.py @@ -1,10 +1,12 @@ -from nibabel.cifti2 import cifti2_axes, cifti2 -from nibabel.tests.nibabel_data import get_nibabel_data, needs_nibabel_data -import nibabel as nib import os -import numpy as np import tempfile +import numpy as np + +import nibabel as nib +from nibabel.cifti2 import cifti2, cifti2_axes +from nibabel.tests.nibabel_data import get_nibabel_data, needs_nibabel_data + test_directory = os.path.join(get_nibabel_data(), 'nitest-cifti2') hcp_labels = [ diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index 3497ec413f..7315a0d1f2 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -7,21 +7,20 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from os.path import join as pjoin, dirname import io +from os.path import dirname +from os.path import join as pjoin +import pytest +from numpy.testing import assert_array_almost_equal from packaging.version import Version import nibabel as nib from nibabel import cifti2 as ci from nibabel.cifti2.parse_cifti2 import _Cifti2AsNiftiHeader - -from nibabel.tmpdirs import InTemporaryDirectory -from nibabel.tests.nibabel_data import get_nibabel_data, needs_nibabel_data from nibabel.tests import test_nifti2 as tn2 - -from numpy.testing import assert_array_almost_equal -import pytest +from nibabel.tests.nibabel_data import get_nibabel_data, needs_nibabel_data +from nibabel.tmpdirs import InTemporaryDirectory NIBABEL_TEST_DATA = pjoin(dirname(nib.__file__), 'tests', 'data') NIFTI2_DATA = pjoin(NIBABEL_TEST_DATA, 'example_nifti2.nii.gz') diff --git a/nibabel/cifti2/tests/test_new_cifti2.py b/nibabel/cifti2/tests/test_new_cifti2.py index 15c6c110b9..84f1376f1f 100644 --- a/nibabel/cifti2/tests/test_new_cifti2.py +++ b/nibabel/cifti2/tests/test_new_cifti2.py @@ -7,17 +7,17 @@ scratch. """ import numpy as np +import pytest import nibabel as nib from nibabel import cifti2 as ci from nibabel.tmpdirs import InTemporaryDirectory -import pytest from ...testing import ( + assert_array_equal, clear_and_catch_warnings, error_warnings, suppress_warnings, - assert_array_equal, ) affine = [[-1.5, 0, 0, 90], [0, 1.5, 0, -85], [0, 0, 1.5, -71], [0, 0, 0, 1.0]] diff --git a/nibabel/cmdline/convert.py b/nibabel/cmdline/convert.py index ce80d8c709..c0bc8f212e 100644 --- a/nibabel/cmdline/convert.py +++ b/nibabel/cmdline/convert.py @@ -12,8 +12,8 @@ """ import argparse -from pathlib import Path import warnings +from pathlib import Path import nibabel as nib diff --git a/nibabel/cmdline/dicomfs.py b/nibabel/cmdline/dicomfs.py index efba4809c7..8de1438544 100644 --- a/nibabel/cmdline/dicomfs.py +++ b/nibabel/cmdline/dicomfs.py @@ -9,13 +9,13 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # Copyright (C) 2011 Christian Haselgrove -import sys -import os -import stat import errno -import time import locale import logging +import os +import stat +import sys +import time class dummy_fuse: @@ -32,11 +32,11 @@ class dummy_fuse: except ImportError: fuse = dummy_fuse +from optparse import Option, OptionParser + import nibabel as nib import nibabel.dft as dft -from optparse import OptionParser, Option - encoding = locale.getdefaultlocale()[1] fuse.fuse_python_api = (0, 2) diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index 5ec5f425ee..5ca691ad64 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -16,17 +16,17 @@ with native endianness used in data files. """ +import hashlib +import os import re import sys from collections import OrderedDict -from optparse import OptionParser, Option +from optparse import Option, OptionParser import numpy as np import nibabel as nib import nibabel.cmdline.utils -import hashlib -import os def get_opt_parser(): diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index 1bb9396bb3..c78c0910bf 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -12,13 +12,13 @@ """ import sys -from optparse import OptionParser, Option +from optparse import Option, OptionParser import numpy as np import nibabel as nib import nibabel.cmdline.utils -from nibabel.cmdline.utils import _err, verbose, table2string, ap, safe_get +from nibabel.cmdline.utils import _err, ap, safe_get, table2string, verbose __copyright__ = 'Copyright (c) 2011-18 Yaroslav Halchenko ' 'and NiBabel contributors' __license__ = 'MIT' diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index f0d5b207f7..d6d3d6afe7 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -1,21 +1,23 @@ """Code for PAR/REC to NIfTI converter command """ -from optparse import OptionParser, Option +import csv +import os +import sys +from optparse import Option, OptionParser + import numpy as np import numpy.linalg as npl -import sys -import os -import csv + import nibabel -import nibabel.parrec as pr -from nibabel.parrec import one_line -from nibabel.mriutils import calculate_dwell_time, MRIError import nibabel.nifti1 as nifti1 +import nibabel.parrec as pr +from nibabel.affines import apply_affine, from_matvec, to_matvec from nibabel.filename_parser import splitext_addext +from nibabel.mriutils import MRIError, calculate_dwell_time +from nibabel.orientations import apply_orientation, inv_ornt_aff, io_orientation +from nibabel.parrec import one_line from nibabel.volumeutils import fname_ext_ul_case -from nibabel.orientations import io_orientation, inv_ornt_aff, apply_orientation -from nibabel.affines import apply_affine, from_matvec, to_matvec def get_opt_parser(): diff --git a/nibabel/cmdline/roi.py b/nibabel/cmdline/roi.py index 690bb0b646..36f00a033a 100644 --- a/nibabel/cmdline/roi.py +++ b/nibabel/cmdline/roi.py @@ -1,6 +1,7 @@ -import sys -import os import argparse +import os +import sys + import nibabel as nb diff --git a/nibabel/cmdline/stats.py b/nibabel/cmdline/stats.py index 5c5d58f93c..0a6fc14aeb 100644 --- a/nibabel/cmdline/stats.py +++ b/nibabel/cmdline/stats.py @@ -12,8 +12,9 @@ """ import argparse + +from nibabel.imagestats import count_nonzero_voxels, mask_volume from nibabel.loadsave import load -from nibabel.imagestats import mask_volume, count_nonzero_voxels def _get_parser(): diff --git a/nibabel/cmdline/tck2trk.py b/nibabel/cmdline/tck2trk.py index f50801c714..d5d29ba430 100644 --- a/nibabel/cmdline/tck2trk.py +++ b/nibabel/cmdline/tck2trk.py @@ -1,13 +1,12 @@ """ Convert tractograms (TCK -> TRK). """ -import os import argparse +import os import nibabel as nib - -from nibabel.streamlines import Field from nibabel.orientations import aff2axcodes +from nibabel.streamlines import Field def parse_args(): diff --git a/nibabel/cmdline/tests/test_conform.py b/nibabel/cmdline/tests/test_conform.py index 8e203b68f9..524e81fc79 100644 --- a/nibabel/cmdline/tests/test_conform.py +++ b/nibabel/cmdline/tests/test_conform.py @@ -13,9 +13,9 @@ import pytest import nibabel as nib -from nibabel.testing import test_data from nibabel.cmdline.conform import main from nibabel.optpkg import optional_package +from nibabel.testing import test_data _, have_scipy, _ = optional_package('scipy.ndimage') needs_scipy = unittest.skipUnless(have_scipy, 'These tests need scipy') diff --git a/nibabel/cmdline/tests/test_convert.py b/nibabel/cmdline/tests/test_convert.py index 00f00602af..411726a9ea 100644 --- a/nibabel/cmdline/tests/test_convert.py +++ b/nibabel/cmdline/tests/test_convert.py @@ -8,13 +8,12 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -import pytest - import numpy as np +import pytest import nibabel as nib -from nibabel.testing import test_data from nibabel.cmdline import convert +from nibabel.testing import test_data def test_convert_noop(tmp_path): diff --git a/nibabel/cmdline/tests/test_parrec2nii.py b/nibabel/cmdline/tests/test_parrec2nii.py index 2100f3f478..017df9813a 100644 --- a/nibabel/cmdline/tests/test_parrec2nii.py +++ b/nibabel/cmdline/tests/test_parrec2nii.py @@ -1,20 +1,17 @@ """Tests for the parrec2nii exe code """ -from os.path import join, isfile, basename +from os.path import basename, isfile, join +from unittest.mock import MagicMock, Mock, patch import numpy from numpy import array as npa +from numpy.testing import assert_almost_equal, assert_array_equal import nibabel from nibabel.cmdline import parrec2nii - -from unittest.mock import Mock, MagicMock, patch -from numpy.testing import assert_almost_equal, assert_array_equal - from nibabel.tests.test_parrec import EG_PAR, VARY_PAR from nibabel.tmpdirs import InTemporaryDirectory - AN_OLD_AFFINE = numpy.array( [ [-3.64994708, 0.0, 1.83564171, 123.66276611], diff --git a/nibabel/cmdline/tests/test_roi.py b/nibabel/cmdline/tests/test_roi.py index 6a1229f72e..ea3852b4da 100644 --- a/nibabel/cmdline/tests/test_roi.py +++ b/nibabel/cmdline/tests/test_roi.py @@ -1,13 +1,14 @@ import os -import numpy as np -import nibabel as nb -from nibabel.cmdline.roi import lossless_slice, parse_slice, main -from nibabel.testing import data_path - import unittest from unittest import mock + +import numpy as np import pytest +import nibabel as nb +from nibabel.cmdline.roi import lossless_slice, main, parse_slice +from nibabel.testing import data_path + def test_parse_slice(): assert parse_slice(None) == slice(None) diff --git a/nibabel/cmdline/tests/test_stats.py b/nibabel/cmdline/tests/test_stats.py index ced289cebb..576a408bce 100644 --- a/nibabel/cmdline/tests/test_stats.py +++ b/nibabel/cmdline/tests/test_stats.py @@ -8,13 +8,14 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from io import StringIO import sys +from io import StringIO + import numpy as np -from nibabel.loadsave import save -from nibabel.cmdline.stats import main from nibabel import Nifti1Image +from nibabel.cmdline.stats import main +from nibabel.loadsave import save def test_volume(tmpdir, capsys): diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index 58cab3ba42..5f531769a9 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -5,16 +5,17 @@ Test running scripts """ +from collections import OrderedDict +from io import StringIO +from os.path import join as pjoin + +import numpy as np import pytest import nibabel as nib -import numpy as np -from nibabel.cmdline.utils import * from nibabel.cmdline.diff import * -from os.path import join as pjoin +from nibabel.cmdline.utils import * from nibabel.testing import data_path -from collections import OrderedDict -from io import StringIO def test_table2string(): diff --git a/nibabel/cmdline/trk2tck.py b/nibabel/cmdline/trk2tck.py index cc364af06d..6bfc2c8c3a 100644 --- a/nibabel/cmdline/trk2tck.py +++ b/nibabel/cmdline/trk2tck.py @@ -2,8 +2,8 @@ Convert tractograms (TRK -> TCK). """ -import os import argparse +import os import nibabel as nib diff --git a/nibabel/data.py b/nibabel/data.py index b29476a2d2..eaa6e77acf 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -3,15 +3,15 @@ """ Utilities to find files from NIPY data packages """ -import os -from os.path import join as pjoin +import configparser import glob +import os import sys -import configparser -from packaging.version import Version +from os.path import join as pjoin -from .environment import get_nipy_user_dir, get_nipy_system_dir +from packaging.version import Version +from .environment import get_nipy_system_dir, get_nipy_user_dir DEFAULT_INSTALL_HINT = ( 'If you have the package, have you set the ' 'path to the package correctly?' diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index f8df06157b..054bba5272 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -10,8 +10,8 @@ import numpy as np -from .filebasedimages import FileBasedImage from .deprecated import deprecate_with_version +from .filebasedimages import FileBasedImage class DataobjImage(FileBasedImage): diff --git a/nibabel/deprecator.py b/nibabel/deprecator.py index 7b4ef5221f..251e10d64c 100644 --- a/nibabel/deprecator.py +++ b/nibabel/deprecator.py @@ -2,8 +2,8 @@ """ import functools -import warnings import re +import warnings _LEADING_WHITE = re.compile(r'^(\s*)') diff --git a/nibabel/dft.py b/nibabel/dft.py index fd944a2556..c805128951 100644 --- a/nibabel/dft.py +++ b/nibabel/dft.py @@ -12,20 +12,20 @@ import contextlib -import os -from os.path import join as pjoin -import tempfile import getpass import logging -import warnings +import os import sqlite3 +import tempfile +import warnings +from io import BytesIO +from os.path import join as pjoin import numpy -from io import BytesIO +from nibabel.optpkg import optional_package from .nifti1 import Nifti1Header -from nibabel.optpkg import optional_package pydicom = optional_package('pydicom')[0] diff --git a/nibabel/ecat.py b/nibabel/ecat.py index f72a81d5a4..03d3f26a74 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -48,11 +48,11 @@ import numpy as np -from .volumeutils import native_code, swapped_code, make_dt_codes, array_from_file -from .spatialimages import SpatialImage from .arraywriters import make_array_writer -from .wrapstruct import WrapStruct from .fileslice import canonical_slicers, predict_shape, slice2outax +from .spatialimages import SpatialImage +from .volumeutils import array_from_file, make_dt_codes, native_code, swapped_code +from .wrapstruct import WrapStruct BLOCK_SIZE = 512 diff --git a/nibabel/eulerangles.py b/nibabel/eulerangles.py index bb75b54b1e..b1d187e8c1 100644 --- a/nibabel/eulerangles.py +++ b/nibabel/eulerangles.py @@ -84,12 +84,10 @@ """ import math - from functools import reduce import numpy as np - _FLOAT_EPS_4 = np.finfo(float).eps * 4.0 diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index f74c7b56eb..eee822566b 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -11,8 +11,9 @@ import io from copy import deepcopy from urllib import request + from .fileholders import FileHolder -from .filename_parser import types_filenames, TypesFilenamesError, splitext_addext +from .filename_parser import TypesFilenamesError, splitext_addext, types_filenames from .openers import ImageOpener diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index 8df199d0d2..75da3ff85f 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -2,14 +2,12 @@ """ import operator -from numbers import Integral -from mmap import mmap - from functools import reduce +from mmap import mmap +from numbers import Integral import numpy as np - # Threshold for memory gap above which we always skip, to save memory # This value came from trying various values and looking at the timing with # ``bench_fileslice`` diff --git a/nibabel/freesurfer/__init__.py b/nibabel/freesurfer/__init__.py index 83c12f8682..806d19a272 100644 --- a/nibabel/freesurfer/__init__.py +++ b/nibabel/freesurfer/__init__.py @@ -2,12 +2,12 @@ """ from .io import ( - read_geometry, - read_morph_data, - write_morph_data, read_annot, + read_geometry, read_label, - write_geometry, + read_morph_data, write_annot, + write_geometry, + write_morph_data, ) -from .mghformat import load, save, MGHImage +from .mghformat import MGHImage, load, save diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 36013c3af2..b6f003b984 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -1,14 +1,14 @@ """Read / write FreeSurfer geometry, morphometry, label, annotation formats """ -import warnings -import numpy as np import getpass import time - +import warnings from collections import OrderedDict -from ..openers import Opener +import numpy as np + +from ..openers import Opener _ANNOT_DT = '>i4' """Data type for Freesurfer `.annot` files. diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 45881ba313..6358a6af81 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -11,17 +11,18 @@ Author: Krish Subramaniam """ from os.path import splitext + import numpy as np -from ..affines import voxel_sizes, from_matvec -from ..volumeutils import array_to_file, array_from_file, endian_codes, Recoder +from ..affines import from_matvec, voxel_sizes +from ..arrayproxy import ArrayProxy, reshape_dataobj +from ..batteryrunners import BatteryRunner, Report from ..filebasedimages import SerializableImage -from ..filename_parser import _stringify_path -from ..spatialimages import HeaderDataError, SpatialImage from ..fileholders import FileHolder -from ..arrayproxy import ArrayProxy, reshape_dataobj +from ..filename_parser import _stringify_path from ..openers import ImageOpener -from ..batteryrunners import BatteryRunner, Report +from ..spatialimages import HeaderDataError, SpatialImage +from ..volumeutils import Recoder, array_from_file, array_to_file, endian_codes from ..wrapstruct import LabeledWrapStruct # mgh header diff --git a/nibabel/freesurfer/tests/test_io.py b/nibabel/freesurfer/tests/test_io.py index 3c47f82031..2406679d73 100644 --- a/nibabel/freesurfer/tests/test_io.py +++ b/nibabel/freesurfer/tests/test_io.py @@ -1,33 +1,32 @@ -import os -from os.path import join as pjoin, isdir import getpass -import time -import struct import hashlib +import os +import struct +import time +import unittest import warnings +from os.path import isdir +from os.path import join as pjoin -from ...tmpdirs import InTemporaryDirectory - -import unittest -import pytest import numpy as np +import pytest from numpy.testing import assert_allclose, assert_array_equal +from ...fileslice import strided_scalar +from ...testing import clear_and_catch_warnings +from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data +from ...tmpdirs import InTemporaryDirectory from .. import ( - read_geometry, - read_morph_data, read_annot, + read_geometry, read_label, + read_morph_data, + write_annot, write_geometry, write_morph_data, - write_annot, ) from ..io import _pack_rgb -from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data -from ...fileslice import strided_scalar -from ...testing import clear_and_catch_warnings - DATA_SDIR = 'fsaverage' have_freesurfer = False diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 29f1687c29..ee0ed50fec 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -8,30 +8,25 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for mghformat reading writing""" -import os import io +import os import numpy as np +import pytest +from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_array_equal -from .. import load, save -from ...openers import ImageOpener -from ..mghformat import MGHHeader, MGHError, MGHImage -from ...tmpdirs import InTemporaryDirectory +from ... import imageglobals from ...fileholders import FileHolder +from ...openers import ImageOpener from ...spatialimages import HeaderDataError -from ...volumeutils import sys_is_le -from ...wrapstruct import WrapStructError -from ... import imageglobals - - -import pytest - -from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_almost_equal - from ...testing import data_path - from ...tests import test_spatialimages as tsi from ...tests import test_wrapstruct as tws +from ...tmpdirs import InTemporaryDirectory +from ...volumeutils import sys_is_le +from ...wrapstruct import WrapStructError +from .. import load, save +from ..mghformat import MGHError, MGHHeader, MGHImage MGZ_FNAME = os.path.join(data_path, 'test.mgz') diff --git a/nibabel/funcs.py b/nibabel/funcs.py index 02b9e3ecd7..f83ed68709 100644 --- a/nibabel/funcs.py +++ b/nibabel/funcs.py @@ -9,8 +9,8 @@ """Processor functions for images""" import numpy as np -from .orientations import io_orientation, OrientationError from .loadsave import load +from .orientations import OrientationError, io_orientation def squeeze_image(img): diff --git a/nibabel/gifti/__init__.py b/nibabel/gifti/__init__.py index 2faaf5ab57..824c968afc 100644 --- a/nibabel/gifti/__init__.py +++ b/nibabel/gifti/__init__.py @@ -18,11 +18,11 @@ """ from .gifti import ( - GiftiMetaData, - GiftiNVPairs, - GiftiLabelTable, - GiftiLabel, GiftiCoordSystem, GiftiDataArray, GiftiImage, + GiftiLabel, + GiftiLabelTable, + GiftiMetaData, + GiftiNVPairs, ) diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 8f5efa8ad8..7313f984f2 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -12,17 +12,18 @@ from http://www.nitrc.org/projects/gifti/ """ -import sys -import numpy as np import base64 +import sys import warnings +import numpy as np + from .. import xmlutils as xml -from ..filebasedimages import SerializableImage -from ..nifti1 import data_type_codes, xform_codes, intent_codes from ..caret import CaretMetaData -from .util import array_index_order_codes, gifti_encoding_codes, gifti_endian_codes, KIND2FMT from ..deprecated import deprecate_with_version +from ..filebasedimages import SerializableImage +from ..nifti1 import data_type_codes, intent_codes, xform_codes +from .util import KIND2FMT, array_index_order_codes, gifti_encoding_codes, gifti_endian_codes class _GiftiMDList(list): diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 5de4c2e22c..88c63b5600 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -8,26 +8,26 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import base64 +import os.path as op import sys import warnings import zlib -import os.path as op from io import StringIO from xml.parsers.expat import ExpatError import numpy as np +from ..nifti1 import data_type_codes, intent_codes, xform_codes +from ..xmlutils import XmlParser from .gifti import ( - GiftiMetaData, + GiftiCoordSystem, + GiftiDataArray, GiftiImage, GiftiLabel, GiftiLabelTable, - GiftiDataArray, - GiftiCoordSystem, + GiftiMetaData, ) from .util import array_index_order_codes, gifti_encoding_codes, gifti_endian_codes -from ..nifti1 import data_type_codes, xform_codes, intent_codes -from ..xmlutils import XmlParser class GiftiParseError(ExpatError): diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 73ae9ed95d..8858de589f 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -1,29 +1,29 @@ """Testing gifti objects """ -import warnings +import itertools import sys +import warnings from io import BytesIO import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal from nibabel.tmpdirs import InTemporaryDirectory from ... import load +from ...fileholders import FileHolder +from ...nifti1 import data_type_codes +from ...testing import test_data from .. import ( - GiftiImage, + GiftiCoordSystem, GiftiDataArray, + GiftiImage, GiftiLabel, GiftiLabelTable, GiftiMetaData, GiftiNVPairs, - GiftiCoordSystem, ) -from ...nifti1 import data_type_codes -from ...fileholders import FileHolder - -from numpy.testing import assert_array_almost_equal, assert_array_equal -import pytest -from ...testing import test_data from .test_parse_gifti_fast import ( DATA_FILE1, DATA_FILE2, @@ -32,7 +32,6 @@ DATA_FILE5, DATA_FILE6, ) -import itertools def test_agg_data(): diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index d1f61d3c22..c7a958a5f8 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -7,26 +7,24 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from os.path import join as pjoin, dirname, basename +import shutil import sys import warnings -import shutil +from os.path import basename, dirname +from os.path import join as pjoin from unittest import mock import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal -from .. import gifti as gi -from ..util import gifti_endian_codes -from ..parse_gifti_fast import GiftiParseError, GiftiImageParser from ...loadsave import load, save from ...nifti1 import xform_codes -from ...tmpdirs import InTemporaryDirectory - -from numpy.testing import assert_array_almost_equal - -import pytest from ...testing import clear_and_catch_warnings, suppress_warnings - +from ...tmpdirs import InTemporaryDirectory +from .. import gifti as gi +from ..parse_gifti_fast import GiftiImageParser, GiftiParseError +from ..util import gifti_endian_codes IO_DATA_PATH = pjoin(dirname(__file__), 'data') diff --git a/nibabel/imageclasses.py b/nibabel/imageclasses.py index 614692daac..ac27a6ecac 100644 --- a/nibabel/imageclasses.py +++ b/nibabel/imageclasses.py @@ -15,11 +15,11 @@ from .gifti import GiftiImage from .minc1 import Minc1Image from .minc2 import Minc2Image -from .nifti1 import Nifti1Pair, Nifti1Image -from .nifti2 import Nifti2Pair, Nifti2Image +from .nifti1 import Nifti1Image, Nifti1Pair +from .nifti2 import Nifti2Image, Nifti2Pair from .parrec import PARRECImage -from .spm99analyze import Spm99AnalyzeImage from .spm2analyze import Spm2AnalyzeImage +from .spm99analyze import Spm99AnalyzeImage # Ordered by the load/save priority. all_image_classes = [ diff --git a/nibabel/imagestats.py b/nibabel/imagestats.py index f507365e93..6f1b68178b 100644 --- a/nibabel/imagestats.py +++ b/nibabel/imagestats.py @@ -11,6 +11,7 @@ """ import numpy as np + from nibabel.imageclasses import spatial_axes_first diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index 187644a8e1..f64f3e8230 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -10,14 +10,15 @@ """Utilities to load and save image objects""" import os + import numpy as np -from .filename_parser import splitext_addext, _stringify_path -from .openers import ImageOpener -from .filebasedimages import ImageFileError -from .imageclasses import all_image_classes from .arrayproxy import is_proxy from .deprecated import deprecate_with_version +from .filebasedimages import ImageFileError +from .filename_parser import _stringify_path, splitext_addext +from .imageclasses import all_image_classes +from .openers import ImageOpener _compressed_suffixes = ('.gz', '.bz2', '.zst') diff --git a/nibabel/minc1.py b/nibabel/minc1.py index 56b8747fb4..d6d2d3081b 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -13,9 +13,8 @@ import numpy as np from .externals.netcdf import netcdf_file - -from .spatialimages import SpatialHeader, SpatialImage from .fileslice import canonical_slicers +from .spatialimages import SpatialHeader, SpatialImage _dt_dict = { ('b', 'unsigned'): np.uint8, diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 275a7799c8..9638ced5ee 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -27,7 +27,7 @@ """ import numpy as np -from .minc1 import Minc1File, MincHeader, Minc1Image, MincError +from .minc1 import Minc1File, Minc1Image, MincError, MincHeader class Hdf5Bunch: diff --git a/nibabel/nicom/ascconv.py b/nibabel/nicom/ascconv.py index 10471e586a..d03845f900 100644 --- a/nibabel/nicom/ascconv.py +++ b/nibabel/nicom/ascconv.py @@ -3,11 +3,10 @@ """ Parse the "ASCCONV" meta data format found in a variety of Siemens MR files. """ -import re import ast +import re from collections import OrderedDict - ASCCONV_RE = re.compile( r'### ASCCONV BEGIN((?:\s*[^=\s]+=[^=\s]+)*) ###\n(.*?)\n### ASCCONV END ###', flags=re.M | re.S, diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index 3f5293dcc3..a3c49d7f10 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -1,5 +1,5 @@ -from os.path import join as pjoin import glob +from os.path import join as pjoin import numpy as np diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 3c7268dbe0..be070e8608 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -18,10 +18,11 @@ import numpy as np from nibabel.optpkg import optional_package + +from ..onetime import auto_attr as one_time +from ..openers import ImageOpener from . import csareader as csar from .dwiparams import B2q, nearest_pos_semi_def, q2bg -from ..openers import ImageOpener -from ..onetime import auto_attr as one_time pydicom = optional_package('pydicom')[0] diff --git a/nibabel/nicom/tests/__init__.py b/nibabel/nicom/tests/__init__.py index 4a7ea3b284..ec2c5b2f38 100644 --- a/nibabel/nicom/tests/__init__.py +++ b/nibabel/nicom/tests/__init__.py @@ -1,4 +1,5 @@ import unittest + from nibabel.optpkg import optional_package pydicom, have_dicom, _ = optional_package('pydicom') diff --git a/nibabel/nicom/tests/test_ascconv.py b/nibabel/nicom/tests/test_ascconv.py index 6415c2725e..4737d3615d 100644 --- a/nibabel/nicom/tests/test_ascconv.py +++ b/nibabel/nicom/tests/test_ascconv.py @@ -1,15 +1,15 @@ """Testing Siemens "ASCCONV" parser """ -from os.path import join as pjoin, dirname from collections import OrderedDict +from os.path import dirname +from os.path import join as pjoin import numpy as np +from numpy.testing import assert_array_almost_equal, assert_array_equal from .. import ascconv -from numpy.testing import assert_array_equal, assert_array_almost_equal - DATA_PATH = pjoin(dirname(__file__), 'data') ASCCONV_INPUT = pjoin(DATA_PATH, 'ascconv_sample.txt') diff --git a/nibabel/nicom/tests/test_csareader.py b/nibabel/nicom/tests/test_csareader.py index 1dfe348c4b..0fc559c7fc 100644 --- a/nibabel/nicom/tests/test_csareader.py +++ b/nibabel/nicom/tests/test_csareader.py @@ -1,18 +1,17 @@ """Testing Siemens CSA header reader """ +import gzip import sys -from os.path import join as pjoin from copy import deepcopy -import gzip +from os.path import join as pjoin import numpy as np +import pytest from .. import csareader as csa from .. import dwiparams as dwp - -import pytest -from . import pydicom, dicom_test -from .test_dicomwrappers import IO_DATA_PATH, DATA +from . import dicom_test, pydicom +from .test_dicomwrappers import DATA, IO_DATA_PATH CSA2_B0 = open(pjoin(IO_DATA_PATH, 'csa2_b0.bin'), 'rb').read() CSA2_B1000 = open(pjoin(IO_DATA_PATH, 'csa2_b1000.bin'), 'rb').read() diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index dba29b6503..b7a60dfc3b 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -4,13 +4,13 @@ from os.path import join as pjoin import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal from nibabel.optpkg import optional_package -from .. import dicomreaders as didr -from .test_dicomwrappers import EXPECTED_AFFINE, EXPECTED_PARAMS, IO_DATA_PATH, DATA -import pytest -from numpy.testing import assert_array_equal, assert_array_almost_equal +from .. import dicomreaders as didr +from .test_dicomwrappers import DATA, EXPECTED_AFFINE, EXPECTED_PARAMS, IO_DATA_PATH pydicom, _, setup_module = optional_package('pydicom') diff --git a/nibabel/nicom/tests/test_dicomwrappers.py b/nibabel/nicom/tests/test_dicomwrappers.py index 3dd1665c3f..3efa7f3aab 100755 --- a/nibabel/nicom/tests/test_dicomwrappers.py +++ b/nibabel/nicom/tests/test_dicomwrappers.py @@ -1,24 +1,23 @@ """Testing DICOM wrappers """ -from os.path import join as pjoin, dirname import gzip -from hashlib import sha1 -from decimal import Decimal from copy import copy +from decimal import Decimal +from hashlib import sha1 +from os.path import dirname +from os.path import join as pjoin +from unittest import TestCase import numpy as np - -from . import pydicom, have_dicom, dicom_test -from .. import dicomwrappers as didw -from .. import dicomreaders as didr -from ...volumeutils import endian_codes - import pytest -from unittest import TestCase +from numpy.testing import assert_array_almost_equal, assert_array_equal -from numpy.testing import assert_array_equal, assert_array_almost_equal from ...tests.nibabel_data import get_nibabel_data, needs_nibabel_data +from ...volumeutils import endian_codes +from .. import dicomreaders as didr +from .. import dicomwrappers as didw +from . import dicom_test, have_dicom, pydicom IO_DATA_PATH = pjoin(dirname(__file__), 'data') DATA_FILE = pjoin(IO_DATA_PATH, 'siemens_dwi_1000.dcm.gz') diff --git a/nibabel/nicom/tests/test_dwiparams.py b/nibabel/nicom/tests/test_dwiparams.py index 8a869c01db..6e98b4af61 100644 --- a/nibabel/nicom/tests/test_dwiparams.py +++ b/nibabel/nicom/tests/test_dwiparams.py @@ -2,12 +2,11 @@ """ import numpy as np - -from ..dwiparams import B2q, q2bg - import pytest +from numpy.testing import assert_array_almost_equal +from numpy.testing import assert_equal as np_assert_equal -from numpy.testing import assert_array_almost_equal, assert_equal as np_assert_equal +from ..dwiparams import B2q, q2bg def test_b2q(): diff --git a/nibabel/nicom/tests/test_structreader.py b/nibabel/nicom/tests/test_structreader.py index c7815cd6fb..2d37bbc3ed 100644 --- a/nibabel/nicom/tests/test_structreader.py +++ b/nibabel/nicom/tests/test_structreader.py @@ -1,7 +1,7 @@ """Testing Siemens CSA header reader """ -import sys import struct +import sys from ..structreader import Unpacker diff --git a/nibabel/nicom/tests/test_utils.py b/nibabel/nicom/tests/test_utils.py index edd20f9973..37dbcd7d19 100644 --- a/nibabel/nicom/tests/test_utils.py +++ b/nibabel/nicom/tests/test_utils.py @@ -3,8 +3,9 @@ import re from nibabel.optpkg import optional_package -from .test_dicomwrappers import DATA, DATA_PHILIPS + from ..utils import find_private_section +from .test_dicomwrappers import DATA, DATA_PHILIPS pydicom, _, setup_module = optional_package('pydicom') diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 625fe6baa9..0d28298313 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -17,16 +17,16 @@ import numpy.linalg as npl from numpy.compat.py3k import asstr +from . import analyze # module import from .arrayproxy import get_obj_dtype -from .optpkg import optional_package +from .batteryrunners import Report +from .casting import have_binary128 from .filebasedimages import SerializableImage -from .volumeutils import Recoder, make_dt_codes, endian_codes +from .optpkg import optional_package +from .quaternions import fillpositive, mat2quat, quat2mat from .spatialimages import HeaderDataError, ImageFileError -from .batteryrunners import Report -from .quaternions import fillpositive, quat2mat, mat2quat -from . import analyze # module import from .spm99analyze import SpmAnalyzeHeader -from .casting import have_binary128 +from .volumeutils import Recoder, endian_codes, make_dt_codes pdcm, have_dicom, _ = optional_package('pydicom') diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index 9e8e597772..193e458c6b 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -17,8 +17,8 @@ from .analyze import AnalyzeHeader from .batteryrunners import Report +from .nifti1 import Nifti1Header, Nifti1Image, Nifti1Pair from .spatialimages import HeaderDataError, ImageFileError -from .nifti1 import Nifti1Header, Nifti1Pair, Nifti1Image r""" Header struct from : https://www.nitrc.org/forum/message.php?msg_id=3738 diff --git a/nibabel/openers.py b/nibabel/openers.py index 6338711cd7..4a1b911c95 100644 --- a/nibabel/openers.py +++ b/nibabel/openers.py @@ -9,10 +9,11 @@ """Context manager openers for various fileobject types """ -from bz2 import BZ2File import gzip import warnings +from bz2 import BZ2File from os.path import splitext + from packaging.version import Version from nibabel.optpkg import optional_package diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index 090a73c366..c91ad0f1e8 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -1,5 +1,6 @@ """Routines to support optional packages""" from packaging.version import Version + from .tripwire import TripWire diff --git a/nibabel/parrec.py b/nibabel/parrec.py index c7d7a55617..04184117dc 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -122,21 +122,22 @@ to a CSV file by adding the option "--volume-info". """ +import re import warnings -import numpy as np +from collections import OrderedDict from copy import deepcopy -import re from io import StringIO from locale import getpreferredencoding -from collections import OrderedDict -from .spatialimages import SpatialHeader, SpatialImage +import numpy as np + +from .affines import apply_affine, dot_reduce, from_matvec from .eulerangles import euler2mat -from .volumeutils import Recoder, array_from_file -from .affines import from_matvec, dot_reduce, apply_affine -from .nifti1 import unit_codes from .fileslice import fileslice, strided_scalar +from .nifti1 import unit_codes from .openers import ImageOpener +from .spatialimages import SpatialHeader, SpatialImage +from .volumeutils import Recoder, array_from_file # PSL to RAS affine PSL_TO_RAS = np.array( diff --git a/nibabel/pkg_info.py b/nibabel/pkg_info.py index 4d0257f4d6..010e4107ac 100644 --- a/nibabel/pkg_info.py +++ b/nibabel/pkg_info.py @@ -1,5 +1,7 @@ import sys + from packaging.version import Version + from . import _version __version__ = _version.get_versions()['version'] diff --git a/nibabel/processing.py b/nibabel/processing.py index 336e9b40f1..669b416fb6 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -22,11 +22,11 @@ spnd, _, _ = optional_package('scipy.ndimage') -from .affines import AffineError, to_matvec, from_matvec, append_diag, rescale_affine -from .spaces import vox2out_vox +from .affines import AffineError, append_diag, from_matvec, rescale_affine, to_matvec +from .imageclasses import spatial_axes_first from .nifti1 import Nifti1Image from .orientations import axcodes2ornt, io_orientation, ornt_transform -from .imageclasses import spatial_axes_first +from .spaces import vox2out_vox SIGMA2FWHM = np.sqrt(8 * np.log(2)) diff --git a/nibabel/pydicom_compat.py b/nibabel/pydicom_compat.py index a58c2fdba9..9ee2553c5a 100644 --- a/nibabel/pydicom_compat.py +++ b/nibabel/pydicom_compat.py @@ -33,11 +33,10 @@ except ImportError: have_dicom = False else: # pydicom module available - from pydicom.dicomio import read_file - from pydicom.sequence import Sequence - # Values not imported by default import pydicom.values + from pydicom.dicomio import read_file + from pydicom.sequence import Sequence if have_dicom: tag_for_keyword = pydicom.datadict.tag_for_keyword diff --git a/nibabel/quaternions.py b/nibabel/quaternions.py index 7ae9a3c63a..7965029f3b 100644 --- a/nibabel/quaternions.py +++ b/nibabel/quaternions.py @@ -26,6 +26,7 @@ """ import math + import numpy as np MAX_FLOAT = np.maximum_sctype(float) diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 7977943ffd..1adf63fe42 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -131,13 +131,13 @@ import numpy as np -from .filebasedimages import FileBasedHeader from .dataobj_images import DataobjImage from .filebasedimages import ImageFileError # noqa -from .viewers import OrthoSlicer3D -from .volumeutils import shape_zoom_affine +from .filebasedimages import FileBasedHeader from .fileslice import canonical_slicers from .orientations import apply_orientation, inv_ornt_aff +from .viewers import OrthoSlicer3D +from .volumeutils import shape_zoom_affine class HeaderDataError(Exception): diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 1f9d7a3589..cad77c4d09 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -8,15 +8,14 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Read / write access to SPM99 version of analyze image format""" import warnings -import numpy as np - from io import BytesIO -from .spatialimages import HeaderDataError, HeaderTypeError +import numpy as np -from .batteryrunners import Report from . import analyze # module import +from .batteryrunners import Report from .optpkg import optional_package +from .spatialimages import HeaderDataError, HeaderTypeError have_scipy = optional_package('scipy')[1] diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index 5e8d87b671..604c32b1e5 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -3,13 +3,12 @@ import os import warnings -from .header import Field from .array_sequence import ArraySequence -from .tractogram import Tractogram, LazyTractogram +from .header import Field +from .tck import TckFile +from .tractogram import LazyTractogram, Tractogram from .tractogram_file import ExtensionWarning - from .trk import TrkFile -from .tck import TckFile # List of all supported formats FORMATS = {'.trk': TrkFile, '.tck': TckFile} diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index bb03e6bfd0..f9e9af90e3 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -1,6 +1,6 @@ import numbers -from operator import mul from functools import reduce +from operator import mul import numpy as np diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 7fb5cde8b3..e08afb48ea 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -12,11 +12,9 @@ from nibabel.openers import Opener from .array_sequence import ArraySequence -from .tractogram_file import TractogramFile -from .tractogram_file import HeaderWarning, DataWarning -from .tractogram_file import HeaderError, DataError -from .tractogram import TractogramItem, Tractogram, LazyTractogram from .header import Field +from .tractogram import LazyTractogram, Tractogram, TractogramItem +from .tractogram_file import DataError, DataWarning, HeaderError, HeaderWarning, TractogramFile from .utils import peek_next MEGABYTE = 1024 * 1024 diff --git a/nibabel/streamlines/tests/test_array_sequence.py b/nibabel/streamlines/tests/test_array_sequence.py index a673c5ce9d..a3faa6a58b 100644 --- a/nibabel/streamlines/tests/test_array_sequence.py +++ b/nibabel/streamlines/tests/test_array_sequence.py @@ -1,16 +1,15 @@ +import itertools import os import sys -import unittest import tempfile -import itertools -import numpy as np +import unittest +import numpy as np import pytest -from ...testing import assert_arrays_equal from numpy.testing import assert_array_equal -from ..array_sequence import ArraySequence, is_array_sequence, concatenate - +from ...testing import assert_arrays_equal +from ..array_sequence import ArraySequence, concatenate, is_array_sequence SEQ_DATA = {} diff --git a/nibabel/streamlines/tests/test_streamlines.py b/nibabel/streamlines/tests/test_streamlines.py index 09824b6ee9..dfb74042a3 100644 --- a/nibabel/streamlines/tests/test_streamlines.py +++ b/nibabel/streamlines/tests/test_streamlines.py @@ -1,25 +1,22 @@ import os -import unittest import tempfile -import numpy as np +import unittest import warnings +from io import BytesIO +from os.path import join as pjoin +import numpy as np import pytest - -from os.path import join as pjoin +from numpy.compat.py3k import asbytes import nibabel as nib -from io import BytesIO +from nibabel.testing import clear_and_catch_warnings, data_path, error_warnings from nibabel.tmpdirs import InTemporaryDirectory -from numpy.compat.py3k import asbytes - -from nibabel.testing import data_path, error_warnings, clear_and_catch_warnings +from .. import FORMATS, trk +from ..tractogram import LazyTractogram, Tractogram +from ..tractogram_file import ExtensionWarning, TractogramFile from .test_tractogram import assert_tractogram_equal -from ..tractogram import Tractogram, LazyTractogram -from ..tractogram_file import TractogramFile, ExtensionWarning -from .. import FORMATS -from .. import trk DATA = {} diff --git a/nibabel/streamlines/tests/test_tck.py b/nibabel/streamlines/tests/test_tck.py index 7f6e7307ba..f514d3f3df 100644 --- a/nibabel/streamlines/tests/test_tck.py +++ b/nibabel/streamlines/tests/test_tck.py @@ -1,21 +1,18 @@ import os import unittest -import numpy as np -from os.path import join as pjoin - from io import BytesIO +from os.path import join as pjoin -from ..array_sequence import ArraySequence -from ..tractogram import Tractogram -from ..tractogram_file import HeaderWarning, HeaderError -from ..tractogram_file import DataError - -from .. import tck as tck_module -from ..tck import TckFile - +import numpy as np import pytest from numpy.testing import assert_array_equal + from ...testing import data_path, error_warnings +from .. import tck as tck_module +from ..array_sequence import ArraySequence +from ..tck import TckFile +from ..tractogram import Tractogram +from ..tractogram_file import DataError, HeaderError, HeaderWarning from .test_tractogram import assert_tractogram_equal DATA = {} diff --git a/nibabel/streamlines/tests/test_tractogram.py b/nibabel/streamlines/tests/test_tractogram.py index c698f10e44..30294be438 100644 --- a/nibabel/streamlines/tests/test_tractogram.py +++ b/nibabel/streamlines/tests/test_tractogram.py @@ -1,19 +1,26 @@ -import sys import copy +import operator +import sys import unittest -import numpy as np import warnings -import operator from collections import defaultdict +import numpy as np import pytest -from ...testing import assert_arrays_equal, clear_and_catch_warnings -from numpy.testing import assert_array_equal, assert_array_almost_equal +from numpy.testing import assert_array_almost_equal, assert_array_equal +from ...testing import assert_arrays_equal, clear_and_catch_warnings from .. import tractogram as module_tractogram -from ..tractogram import is_data_dict, is_lazy_dict -from ..tractogram import TractogramItem, Tractogram, LazyTractogram -from ..tractogram import PerArrayDict, PerArraySequenceDict, LazyDict +from ..tractogram import ( + LazyDict, + LazyTractogram, + PerArrayDict, + PerArraySequenceDict, + Tractogram, + TractogramItem, + is_data_dict, + is_lazy_dict, +) DATA = {} diff --git a/nibabel/streamlines/tests/test_tractogram_file.py b/nibabel/streamlines/tests/test_tractogram_file.py index a1d89ccec6..53a7fb662b 100644 --- a/nibabel/streamlines/tests/test_tractogram_file.py +++ b/nibabel/streamlines/tests/test_tractogram_file.py @@ -1,11 +1,11 @@ """Test tractogramFile base class """ +import pytest + from ..tractogram import Tractogram from ..tractogram_file import TractogramFile -import pytest - def test_subclassing_tractogram_file(): diff --git a/nibabel/streamlines/tests/test_trk.py b/nibabel/streamlines/tests/test_trk.py index e23efc8d5d..b8ff43620b 100644 --- a/nibabel/streamlines/tests/test_trk.py +++ b/nibabel/streamlines/tests/test_trk.py @@ -1,28 +1,26 @@ +import copy import os import sys -import copy import unittest -import numpy as np -from os.path import join as pjoin - from io import BytesIO +from os.path import join as pjoin +import numpy as np import pytest -from ...testing import data_path, clear_and_catch_warnings, assert_arr_dict_equal, error_warnings from numpy.testing import assert_array_equal -from .test_tractogram import assert_tractogram_equal +from ...testing import assert_arr_dict_equal, clear_and_catch_warnings, data_path, error_warnings +from .. import trk as trk_module +from ..header import Field from ..tractogram import Tractogram from ..tractogram_file import HeaderError, HeaderWarning - -from .. import trk as trk_module from ..trk import ( TrkFile, - encode_value_in_name, decode_value_from_name, + encode_value_in_name, get_affine_trackvis_to_rasmm, ) -from ..header import Field +from .test_tractogram import assert_tractogram_equal DATA = {} diff --git a/nibabel/streamlines/tests/test_utils.py b/nibabel/streamlines/tests/test_utils.py index bcdde6d013..7836d45eb5 100644 --- a/nibabel/streamlines/tests/test_utils.py +++ b/nibabel/streamlines/tests/test_utils.py @@ -1,11 +1,11 @@ import os -import numpy as np -import nibabel as nib -from nibabel.testing import data_path +import numpy as np +import pytest from numpy.testing import assert_array_equal -import pytest +import nibabel as nib +from nibabel.testing import data_path from ..utils import get_affine_from_reference diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index cf9a099fe4..ded937ab11 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -1,8 +1,9 @@ import copy import numbers -import numpy as np -from warnings import warn from collections.abc import MutableMapping +from warnings import warn + +import numpy as np from nibabel.affines import apply_affine diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index eb382af4d0..bbf156ee08 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -2,27 +2,24 @@ # http://www.trackvis.org/docs/?subsect=fileformat import os -import struct import string +import struct import warnings import numpy as np from numpy.compat.py3k import asstr import nibabel as nib - from nibabel.openers import Opener -from nibabel.volumeutils import native_code, swapped_code, endian_codes from nibabel.orientations import aff2axcodes, axcodes2ornt +from nibabel.volumeutils import endian_codes, native_code, swapped_code from .array_sequence import create_arraysequences_from_generator -from .tractogram_file import TractogramFile -from .tractogram_file import DataError, HeaderError, HeaderWarning -from .tractogram import TractogramItem, Tractogram, LazyTractogram from .header import Field +from .tractogram import LazyTractogram, Tractogram, TractogramItem +from .tractogram_file import DataError, HeaderError, HeaderWarning, TractogramFile from .utils import peek_next - MAX_NB_NAMED_SCALARS_PER_POINT = 10 MAX_NB_NAMED_PROPERTIES_PER_STREAMLINE = 10 diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 44cc82890b..4600782d4b 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -8,24 +8,21 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utilities for testing""" -import re import os +import re import sys -import warnings -from pkg_resources import resource_filename - import unittest +import warnings +from contextlib import nullcontext +from itertools import zip_longest -import pytest import numpy as np +import pytest from numpy.testing import assert_array_equal +from pkg_resources import resource_filename +from .helpers import assert_data_similar, bytesio_filemap, bytesio_round_trip from .np_features import memmap_after_ufunc -from .helpers import bytesio_filemap, bytesio_round_trip, assert_data_similar - -from itertools import zip_longest - -from contextlib import nullcontext def test_data(subdir=None, fname=None): @@ -229,6 +226,7 @@ def setUp(self): def expires(version): """Decorator to mark a test as xfail with ExpiredDeprecationError after version""" from packaging.version import Version + from nibabel import __version__ as nbver from nibabel.deprecator import ExpiredDeprecationError diff --git a/nibabel/tests/data/check_parrec_reslice.py b/nibabel/tests/data/check_parrec_reslice.py index 17b36bd6dd..8ade7f539c 100644 --- a/nibabel/tests/data/check_parrec_reslice.py +++ b/nibabel/tests/data/check_parrec_reslice.py @@ -22,6 +22,7 @@ of the field of view. """ import glob + import numpy as np import numpy.linalg as npl diff --git a/nibabel/tests/data/gen_standard.py b/nibabel/tests/data/gen_standard.py index 2d736fb445..598726fe74 100644 --- a/nibabel/tests/data/gen_standard.py +++ b/nibabel/tests/data/gen_standard.py @@ -6,8 +6,8 @@ * standard.trk """ import numpy as np -import nibabel as nib +import nibabel as nib from nibabel.streamlines import FORMATS from nibabel.streamlines.header import Field diff --git a/nibabel/tests/data/make_moved_anat.py b/nibabel/tests/data/make_moved_anat.py index aee20eda97..678b5dfdeb 100644 --- a/nibabel/tests/data/make_moved_anat.py +++ b/nibabel/tests/data/make_moved_anat.py @@ -9,8 +9,8 @@ import numpy as np import nibabel as nib -from nibabel.eulerangles import euler2mat from nibabel.affines import from_matvec +from nibabel.eulerangles import euler2mat if __name__ == '__main__': img = nib.load('anatomical.nii') diff --git a/nibabel/tests/nibabel_data.py b/nibabel/tests/nibabel_data.py index 06e5540674..8d4652d79f 100644 --- a/nibabel/tests/nibabel_data.py +++ b/nibabel/tests/nibabel_data.py @@ -1,10 +1,11 @@ """Functions / decorators for finding / requiring nibabel-data directory """ -from os import environ, listdir -from os.path import dirname, realpath, join as pjoin, isdir, exists - import unittest +from os import environ, listdir +from os.path import dirname, exists, isdir +from os.path import join as pjoin +from os.path import realpath def get_nibabel_data(): diff --git a/nibabel/tests/scriptrunner.py b/nibabel/tests/scriptrunner.py index 474eeceb2c..1ec2fcb486 100644 --- a/nibabel/tests/scriptrunner.py +++ b/nibabel/tests/scriptrunner.py @@ -12,12 +12,12 @@ assert_equal(code, 0) assert_equal(stdout, b'This script ran OK') """ -import sys import os -from os.path import dirname, join as pjoin, isfile, isdir, realpath, pathsep - -from subprocess import Popen, PIPE - +import sys +from os.path import dirname, isdir, isfile +from os.path import join as pjoin +from os.path import pathsep, realpath +from subprocess import PIPE, Popen MY_PACKAGE = __package__ diff --git a/nibabel/tests/test_affines.py b/nibabel/tests/test_affines.py index 08166df6e8..08ae5f4bda 100644 --- a/nibabel/tests/test_affines.py +++ b/nibabel/tests/test_affines.py @@ -4,26 +4,24 @@ from itertools import product import numpy as np +import pytest +from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_array_equal -from ..eulerangles import euler2mat from ..affines import ( AffineError, - apply_affine, append_diag, - to_matvec, - from_matvec, + apply_affine, dot_reduce, - voxel_sizes, + from_matvec, obliquity, rescale_affine, + to_matvec, + voxel_sizes, ) +from ..eulerangles import euler2mat from ..orientations import aff2axcodes -import pytest -from numpy.testing import assert_array_equal, assert_almost_equal, assert_array_almost_equal - - def validated_apply_affine(T, xyz): # This was the original apply_affine implementation that we've stashed here # to test against diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 2cea69413f..1f80addc30 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -12,38 +12,35 @@ header """ -import os -import re +import itertools import logging +import os import pickle -import itertools +import re +from io import BytesIO, StringIO import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal -from io import BytesIO, StringIO -from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types -from ..analyze import AnalyzeHeader, AnalyzeImage -from ..nifti1 import Nifti1Header -from ..loadsave import read_img_data from .. import imageglobals -from ..casting import as_int -from ..tmpdirs import InTemporaryDirectory +from ..analyze import AnalyzeHeader, AnalyzeImage from ..arraywriters import WriterError +from ..casting import as_int +from ..loadsave import read_img_data +from ..nifti1 import Nifti1Header from ..optpkg import optional_package - -import pytest -from numpy.testing import assert_array_equal, assert_array_almost_equal - +from ..spatialimages import HeaderDataError, HeaderTypeError, supported_np_types from ..testing import ( - data_path, - suppress_warnings, assert_dt_equal, bytesio_filemap, bytesio_round_trip, + data_path, + suppress_warnings, ) - -from . import test_wrapstruct as tws +from ..tmpdirs import InTemporaryDirectory from . import test_spatialimages as tsi +from . import test_wrapstruct as tws HAVE_ZSTD = optional_package('pyzstd')[1] diff --git a/nibabel/tests/test_api_validators.py b/nibabel/tests/test_api_validators.py index 2382847da4..1d21092eef 100644 --- a/nibabel/tests/test_api_validators.py +++ b/nibabel/tests/test_api_validators.py @@ -1,6 +1,7 @@ """Metaclass and class for validating instance APIs """ import os + import pytest diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index e4d16e7dd8..5018e95e1f 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -9,29 +9,25 @@ """Tests for arrayproxy module """ -import warnings -import gzip import contextlib - +import gzip import pickle +import warnings from io import BytesIO -from packaging.version import Version -from ..tmpdirs import InTemporaryDirectory +from unittest import mock import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal +from packaging.version import Version from .. import __version__ -from ..arrayproxy import ArrayProxy, is_proxy, reshape_dataobj, get_obj_dtype -from ..openers import ImageOpener -from ..nifti1 import Nifti1Header +from ..arrayproxy import ArrayProxy, get_obj_dtype, is_proxy, reshape_dataobj from ..deprecator import ExpiredDeprecationError - -from unittest import mock - -from numpy.testing import assert_array_equal, assert_array_almost_equal -import pytest +from ..nifti1 import Nifti1Header +from ..openers import ImageOpener from ..testing import memmap_after_ufunc - +from ..tmpdirs import InTemporaryDirectory from .test_fileslice import slicer_samples from .test_openers import patch_indexed_gzip diff --git a/nibabel/tests/test_arraywriters.py b/nibabel/tests/test_arraywriters.py index 1fbaa38916..e77c2fd11f 100644 --- a/nibabel/tests/test_arraywriters.py +++ b/nibabel/tests/test_arraywriters.py @@ -3,27 +3,26 @@ See docstring of :mod:`nibabel.arraywriters` for API. """ -from platform import python_compiler, machine import itertools +from io import BytesIO +from platform import machine, python_compiler + import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal -from io import BytesIO from ..arraywriters import ( - SlopeInterArrayWriter, + ArrayWriter, + ScalingError, SlopeArrayWriter, + SlopeInterArrayWriter, WriterError, - ScalingError, - ArrayWriter, - make_array_writer, get_slope_inter, + make_array_writer, ) -from ..casting import int_abs, type_info, shared_range, on_powerpc -from ..volumeutils import array_from_file, apply_read_scaling, _dt_min_max - -from numpy.testing import assert_array_almost_equal, assert_array_equal -import pytest +from ..casting import int_abs, on_powerpc, shared_range, type_info from ..testing import assert_allclose_safely, suppress_warnings - +from ..volumeutils import _dt_min_max, apply_read_scaling, array_from_file FLOAT_TYPES = np.sctypes['float'] COMPLEX_TYPES = np.sctypes['complex'] diff --git a/nibabel/tests/test_batteryrunners.py b/nibabel/tests/test_batteryrunners.py index d260d2db76..84590452ea 100644 --- a/nibabel/tests/test_batteryrunners.py +++ b/nibabel/tests/test_batteryrunners.py @@ -9,13 +9,13 @@ """Tests for BatteryRunner and Report objects """ +import logging from io import StringIO -import logging +import pytest from ..batteryrunners import BatteryRunner, Report -import pytest # define some trivial functions as checks def chk1(obj, fix=False): diff --git a/nibabel/tests/test_brikhead.py b/nibabel/tests/test_brikhead.py index ff9e91520e..b2c1f1257c 100644 --- a/nibabel/tests/test_brikhead.py +++ b/nibabel/tests/test_brikhead.py @@ -10,14 +10,11 @@ from os.path import join as pjoin import numpy as np - -from .. import load, Nifti1Image -from .. import brikhead - import pytest from numpy.testing import assert_array_equal -from ..testing import data_path, assert_data_similar +from .. import Nifti1Image, brikhead, load +from ..testing import assert_data_similar, data_path from .test_fileslice import slicer_samples EXAMPLE_IMAGES = [ diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index d16541b352..8c4cad7bbb 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -1,29 +1,27 @@ """Test casting utilities """ import os - from platform import machine + import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal from ..casting import ( - float_to_int, - shared_range, CastingError, - int_to_float, - as_int, - int_abs, - floor_log2, able_int_type, + as_int, best_float, - ulp, + float_to_int, + floor_log2, + int_abs, + int_to_float, longdouble_precision_improved, + shared_range, + ulp, ) from ..testing import suppress_warnings -from numpy.testing import assert_array_almost_equal, assert_array_equal - -import pytest - def test_shared_range(): for ft in np.sctypes['float']: diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index 0fbadc6af0..ece2e1c6cd 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -2,31 +2,27 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """Tests for data module""" import os -from os.path import join as pjoin -from os import environ as env import sys import tempfile +from os import environ as env +from os.path import join as pjoin + +import pytest +from .. import data as nibd from ..data import ( - get_data_path, - find_data_dir, + Bomber, DataError, - _cfg_value, - make_datasource, Datasource, VersionedDatasource, - Bomber, + _cfg_value, datasource_or_bomber, + find_data_dir, + get_data_path, + make_datasource, ) - from ..tmpdirs import TemporaryDirectory - -from .. import data as nibd - - -import pytest - -from .test_environment import with_environment, DATA_KEY, USER_KEY +from .test_environment import DATA_KEY, USER_KEY, with_environment @pytest.fixture diff --git a/nibabel/tests/test_dataobj_images.py b/nibabel/tests/test_dataobj_images.py index dfbb0fe4cb..a1d2dbc9f1 100644 --- a/nibabel/tests/test_dataobj_images.py +++ b/nibabel/tests/test_dataobj_images.py @@ -3,11 +3,10 @@ import numpy as np -from nibabel.filebasedimages import FileBasedHeader from nibabel.dataobj_images import DataobjImage - -from nibabel.tests.test_image_api import DataInterfaceMixin +from nibabel.filebasedimages import FileBasedHeader from nibabel.tests.test_filebasedimages import TestFBImageAPI as _TFI +from nibabel.tests.test_image_api import DataInterfaceMixin class DoNumpyImage(DataobjImage): diff --git a/nibabel/tests/test_deprecated.py b/nibabel/tests/test_deprecated.py index cd56f507f9..962f9c0827 100644 --- a/nibabel/tests/test_deprecated.py +++ b/nibabel/tests/test_deprecated.py @@ -2,12 +2,11 @@ """ import warnings + import pytest from nibabel import pkg_info -from nibabel.deprecated import ModuleProxy, FutureWarningMixin, deprecate_with_version - - +from nibabel.deprecated import FutureWarningMixin, ModuleProxy, deprecate_with_version from nibabel.tests.test_deprecator import TestDeprecatorFunc as _TestDF diff --git a/nibabel/tests/test_deprecator.py b/nibabel/tests/test_deprecator.py index 31b61f5153..833908af94 100644 --- a/nibabel/tests/test_deprecator.py +++ b/nibabel/tests/test_deprecator.py @@ -9,12 +9,12 @@ import pytest from nibabel.deprecator import ( - _ensure_cr, - _add_dep_doc, - ExpiredDeprecationError, - Deprecator, - TESTSETUP, TESTCLEANUP, + TESTSETUP, + Deprecator, + ExpiredDeprecationError, + _add_dep_doc, + _ensure_cr, ) from ..testing import clear_and_catch_warnings diff --git a/nibabel/tests/test_dft.py b/nibabel/tests/test_dft.py index b43b2762f7..f756600fd3 100644 --- a/nibabel/tests/test_dft.py +++ b/nibabel/tests/test_dft.py @@ -2,18 +2,22 @@ """ import os -from os.path import join as pjoin, dirname +import sqlite3 from io import BytesIO +from os.path import dirname +from os.path import join as pjoin + from ..testing import suppress_warnings -import sqlite3 with suppress_warnings(): from .. import dft -from .. import nifti1 import unittest + import pytest +from .. import nifti1 + # Shield optional package imports from ..optpkg import optional_package diff --git a/nibabel/tests/test_diff.py b/nibabel/tests/test_diff.py index b1f05177bb..fee71d628b 100644 --- a/nibabel/tests/test_diff.py +++ b/nibabel/tests/test_diff.py @@ -3,9 +3,10 @@ """Test diff """ -from os.path import dirname, join as pjoin, abspath -import numpy as np +from os.path import abspath, dirname +from os.path import join as pjoin +import numpy as np DATA_PATH = abspath(pjoin(dirname(__file__), 'data')) @@ -41,7 +42,7 @@ def test_diff_values_mixed(): def test_diff_values_array(): - from numpy import nan, array, inf + from numpy import array, inf, nan a_int = array([1, 2]) a_float = a_int.astype(float) diff --git a/nibabel/tests/test_ecat.py b/nibabel/tests/test_ecat.py index 875e06c0a7..9cb9f91e1a 100644 --- a/nibabel/tests/test_ecat.py +++ b/nibabel/tests/test_ecat.py @@ -9,27 +9,23 @@ import os import warnings +from unittest import TestCase import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal -from ..openers import Opener from ..ecat import ( EcatHeader, - EcatSubHeader, EcatImage, - read_mlist, + EcatSubHeader, get_frame_order, get_series_framenumbers, + read_mlist, ) - -from unittest import TestCase -import pytest - -from numpy.testing import assert_array_equal, assert_array_almost_equal - +from ..openers import Opener from ..testing import data_path, suppress_warnings from ..tmpdirs import InTemporaryDirectory - from . import test_wrapstruct as tws from .test_fileslice import slicer_samples diff --git a/nibabel/tests/test_ecat_data.py b/nibabel/tests/test_ecat_data.py index de4164cd3c..b7dbe4750a 100644 --- a/nibabel/tests/test_ecat_data.py +++ b/nibabel/tests/test_ecat_data.py @@ -13,11 +13,10 @@ from os.path import join as pjoin import numpy as np +from numpy.testing import assert_almost_equal, assert_array_equal -from .nibabel_data import get_nibabel_data, needs_nibabel_data from ..ecat import load - -from numpy.testing import assert_array_equal, assert_almost_equal +from .nibabel_data import get_nibabel_data, needs_nibabel_data ECAT_TEST_PATH = pjoin(get_nibabel_data(), 'nipy-ecattest') diff --git a/nibabel/tests/test_environment.py b/nibabel/tests/test_environment.py index 5742edef43..afb6d36f84 100644 --- a/nibabel/tests/test_environment.py +++ b/nibabel/tests/test_environment.py @@ -3,13 +3,13 @@ import os from os import environ as env -from os.path import join as pjoin, abspath +from os.path import abspath +from os.path import join as pjoin +import pytest from .. import environment as nibe -import pytest - DATA_KEY = 'NIPY_DATA_PATH' USER_KEY = 'NIPY_USER_DIR' diff --git a/nibabel/tests/test_euler.py b/nibabel/tests/test_euler.py index 25e4c776d2..8b0fb932d5 100644 --- a/nibabel/tests/test_euler.py +++ b/nibabel/tests/test_euler.py @@ -9,15 +9,15 @@ """Tests for Euler angles""" import math + import numpy as np +import pytest from numpy import pi +from numpy.testing import assert_array_almost_equal, assert_array_equal from .. import eulerangles as nea from .. import quaternions as nq -import pytest -from numpy.testing import assert_array_equal, assert_array_almost_equal - FLOAT_EPS = np.finfo(np.float64).eps # Example rotations """ diff --git a/nibabel/tests/test_filebasedimages.py b/nibabel/tests/test_filebasedimages.py index aee02f5a68..aa48a3e747 100644 --- a/nibabel/tests/test_filebasedimages.py +++ b/nibabel/tests/test_filebasedimages.py @@ -1,14 +1,13 @@ """Testing filebasedimages module """ -from itertools import product import warnings +from itertools import product import numpy as np import pytest from ..filebasedimages import FileBasedHeader, FileBasedImage, SerializableImage - from .test_image_api import GenericImageAPI, SerializeMixin diff --git a/nibabel/tests/test_filehandles.py b/nibabel/tests/test_filehandles.py index 73698b23ac..506a623758 100644 --- a/nibabel/tests/test_filehandles.py +++ b/nibabel/tests/test_filehandles.py @@ -2,10 +2,10 @@ Check that loading an image does not use up filehandles. """ -from os.path import join as pjoin import shutil -from tempfile import mkdtemp import unittest +from os.path import join as pjoin +from tempfile import mkdtemp import numpy as np diff --git a/nibabel/tests/test_fileholders.py b/nibabel/tests/test_fileholders.py index a0e50e4133..33b3f76e6f 100644 --- a/nibabel/tests/test_fileholders.py +++ b/nibabel/tests/test_fileholders.py @@ -3,7 +3,6 @@ from io import BytesIO - from ..fileholders import FileHolder diff --git a/nibabel/tests/test_filename_parser.py b/nibabel/tests/test_filename_parser.py index b4a816a137..29da7b6f61 100644 --- a/nibabel/tests/test_filename_parser.py +++ b/nibabel/tests/test_filename_parser.py @@ -8,10 +8,10 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for filename container""" -from ..filename_parser import types_filenames, TypesFilenamesError, parse_filename, splitext_addext - import pytest +from ..filename_parser import TypesFilenamesError, parse_filename, splitext_addext, types_filenames + def test_filenames(): types_exts = (('image', '.img'), ('header', '.hdr')) diff --git a/nibabel/tests/test_files_interface.py b/nibabel/tests/test_files_interface.py index 80c4a0ab92..52557d353d 100644 --- a/nibabel/tests/test_files_interface.py +++ b/nibabel/tests/test_files_interface.py @@ -9,16 +9,16 @@ """Testing filesets - a draft """ +from io import BytesIO + import numpy as np +import pytest +from numpy.testing import assert_array_equal -from .. import Nifti1Image, Nifti1Pair, MGHImage, all_image_classes -from io import BytesIO +from .. import MGHImage, Nifti1Image, Nifti1Pair, all_image_classes from ..fileholders import FileHolderError from ..spatialimages import SpatialImage -from numpy.testing import assert_array_equal -import pytest - def test_files_spatialimages(): # test files creation in image classes diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index e98fd473a0..781f17d716 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -1,36 +1,35 @@ """Test slicing of file-like objects""" +import time +from functools import partial from io import BytesIO from itertools import product -from functools import partial -from threading import Thread, Lock -import time +from threading import Lock, Thread import numpy as np +import pytest +from numpy.testing import assert_array_equal from ..fileslice import ( - is_fancy, + _positive_slice, + _simple_fileslice, + calc_slicedefs, canonical_slicers, fileslice, + fill_slicer, + is_fancy, + optimize_read_slicers, + optimize_slicer, predict_shape, read_segments, - _positive_slice, - threshold_heuristic, - optimize_slicer, slice2len, - fill_slicer, - optimize_read_slicers, - slicers2segments, - calc_slicedefs, - _simple_fileslice, slice2outax, + slicers2segments, strided_scalar, + threshold_heuristic, ) -import pytest -from numpy.testing import assert_array_equal - def _check_slice(sliceobj): # Fancy indexing always returns a copy, basic indexing returns a view diff --git a/nibabel/tests/test_fileutils.py b/nibabel/tests/test_fileutils.py index 3544b88977..21c7676fce 100644 --- a/nibabel/tests/test_fileutils.py +++ b/nibabel/tests/test_fileutils.py @@ -10,10 +10,9 @@ """ -from ..fileutils import read_zt_byte_strings - import pytest +from ..fileutils import read_zt_byte_strings from ..tmpdirs import InTemporaryDirectory diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 62df671aca..321eb1b961 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -2,28 +2,26 @@ """ import sys - import numpy as np +import pytest from ..casting import ( - floor_exact, - ceil_exact, - as_int, FloatingError, - int_to_float, - floor_log2, - type_info, - _check_nmant, _check_maxexp, - ok_floats, - on_powerpc, + _check_nmant, + as_int, + ceil_exact, + floor_exact, + floor_log2, have_binary128, + int_to_float, longdouble_precision_improved, + ok_floats, + on_powerpc, + type_info, ) from ..testing import suppress_warnings -import pytest - IEEE_floats = [np.float16, np.float32, np.float64] LD_INFO = type_info(np.longdouble) diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index e1a7ec9264..752aed0b52 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -9,17 +9,15 @@ """Test for image funcs""" import numpy as np +import pytest +from numpy.testing import assert_array_equal -from ..funcs import concat_images, as_closest_canonical, OrientationError from ..analyze import AnalyzeImage -from ..nifti1 import Nifti1Image +from ..funcs import OrientationError, as_closest_canonical, concat_images from ..loadsave import save - +from ..nifti1 import Nifti1Image from ..tmpdirs import InTemporaryDirectory -from numpy.testing import assert_array_equal -import pytest - _counter = 0 diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 57a0322cab..af82c304ac 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -23,11 +23,11 @@ cached, but False otherwise. """ +import io +import pathlib import warnings from functools import partial from itertools import product -import io -import pathlib import numpy as np @@ -36,45 +36,47 @@ _, have_scipy, _ = optional_package('scipy') _, have_h5py, _ = optional_package('h5py') +import unittest + +import pytest +from numpy.testing import assert_allclose, assert_almost_equal, assert_array_equal, assert_warns + +from nibabel.arraywriters import WriterError +from nibabel.testing import ( + assert_data_similar, + bytesio_filemap, + bytesio_round_trip, + clear_and_catch_warnings, + expires, + nullcontext, +) + from .. import ( AnalyzeImage, - Spm99AnalyzeImage, - Spm2AnalyzeImage, - Nifti1Pair, - Nifti1Image, - Nifti2Pair, - Nifti2Image, GiftiImage, MGHImage, Minc1Image, Minc2Image, + Nifti1Image, + Nifti1Pair, + Nifti2Image, + Nifti2Pair, + Spm2AnalyzeImage, + Spm99AnalyzeImage, + brikhead, is_proxy, + minc1, + minc2, + parrec, ) -from ..spatialimages import SpatialImage -from .. import minc1, minc2, parrec, brikhead from ..deprecator import ExpiredDeprecationError - -import unittest -import pytest - -from numpy.testing import assert_almost_equal, assert_array_equal, assert_warns, assert_allclose -from nibabel.testing import ( - bytesio_round_trip, - bytesio_filemap, - assert_data_similar, - clear_and_catch_warnings, - nullcontext, - expires, -) +from ..spatialimages import SpatialImage from ..tmpdirs import InTemporaryDirectory - from .test_api_validators import ValidateAPI +from .test_brikhead import EXAMPLE_IMAGES as AFNI_EXAMPLE_IMAGES from .test_minc1 import EXAMPLE_IMAGES as MINC1_EXAMPLE_IMAGES from .test_minc2 import EXAMPLE_IMAGES as MINC2_EXAMPLE_IMAGES from .test_parrec import EXAMPLE_IMAGES as PARREC_EXAMPLE_IMAGES -from .test_brikhead import EXAMPLE_IMAGES as AFNI_EXAMPLE_IMAGES - -from nibabel.arraywriters import WriterError def maybe_deprecated(meth_name): diff --git a/nibabel/tests/test_image_load_save.py b/nibabel/tests/test_image_load_save.py index 13c403285c..962a2433bf 100644 --- a/nibabel/tests/test_image_load_save.py +++ b/nibabel/tests/test_image_load_save.py @@ -7,43 +7,42 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for loader function""" -from io import BytesIO - +import logging +import pathlib import shutil -from os.path import dirname, join as pjoin +from io import BytesIO +from os.path import dirname +from os.path import join as pjoin from tempfile import mkdtemp -import pathlib -import logging import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal -from .. import analyze as ana -from .. import spm99analyze as spm99 -from .. import spm2analyze as spm2 -from .. import nifti1 as ni1 -from .. import loadsave as nils from .. import ( - Nifti1Image, + AnalyzeImage, + MGHImage, + Minc1Image, + Minc2Image, Nifti1Header, + Nifti1Image, Nifti1Pair, Nifti2Image, Nifti2Pair, - Minc1Image, - Minc2Image, Spm2AnalyzeImage, Spm99AnalyzeImage, - AnalyzeImage, - MGHImage, all_image_classes, ) -from ..tmpdirs import InTemporaryDirectory -from ..volumeutils import native_code, swapped_code +from .. import analyze as ana +from .. import loadsave as nils +from .. import nifti1 as ni1 +from .. import spm2analyze as spm2 +from .. import spm99analyze as spm99 from ..optpkg import optional_package from ..spatialimages import SpatialImage from ..testing import expires - -from numpy.testing import assert_array_equal, assert_array_almost_equal -import pytest +from ..tmpdirs import InTemporaryDirectory +from ..volumeutils import native_code, swapped_code _, have_scipy, _ = optional_package('scipy') # No scipy=>no SPM-format writing DATA_PATH = pjoin(dirname(__file__), 'data') diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index fd9927eb00..f8186f4147 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -9,28 +9,28 @@ """Tests for is_image / may_contain_header functions""" import copy -from os.path import dirname, basename, join as pjoin +from os.path import basename, dirname +from os.path import join as pjoin import numpy as np from .. import ( - Nifti1Image, + AnalyzeHeader, + AnalyzeImage, + MGHImage, + Minc1Image, + Minc2Image, Nifti1Header, + Nifti1Image, Nifti1Pair, - Nifti2Image, Nifti2Header, + Nifti2Image, Nifti2Pair, - AnalyzeImage, - AnalyzeHeader, - Minc1Image, - Minc2Image, Spm2AnalyzeImage, Spm99AnalyzeImage, - MGHImage, all_image_classes, ) - DATA_PATH = pjoin(dirname(__file__), 'data') diff --git a/nibabel/tests/test_imageclasses.py b/nibabel/tests/test_imageclasses.py index 472e1c5d63..74f05dc6e3 100644 --- a/nibabel/tests/test_imageclasses.py +++ b/nibabel/tests/test_imageclasses.py @@ -1,21 +1,19 @@ """Testing imageclasses module """ -from os.path import dirname, join as pjoin import warnings +from os.path import dirname +from os.path import join as pjoin import numpy as np - import pytest import nibabel as nib +from nibabel import imageclasses from nibabel.analyze import AnalyzeImage +from nibabel.imageclasses import spatial_axes_first from nibabel.nifti1 import Nifti1Image from nibabel.nifti2 import Nifti2Image - -from nibabel import imageclasses -from nibabel.imageclasses import spatial_axes_first - from nibabel.optpkg import optional_package have_h5py = optional_package('h5py')[1] diff --git a/nibabel/tests/test_imagestats.py b/nibabel/tests/test_imagestats.py index 47dd2ecbd5..8adfc910a8 100644 --- a/nibabel/tests/test_imagestats.py +++ b/nibabel/tests/test_imagestats.py @@ -10,8 +10,7 @@ import numpy as np -from .. import imagestats -from .. import Nifti1Image +from .. import Nifti1Image, imagestats def test_mask_volume(): diff --git a/nibabel/tests/test_init.py b/nibabel/tests/test_init.py index c227889e59..ff4dc082f6 100644 --- a/nibabel/tests/test_init.py +++ b/nibabel/tests/test_init.py @@ -1,8 +1,10 @@ -import nibabel as nib -from pkg_resources import resource_filename -import pytest from unittest import mock +import pytest +from pkg_resources import resource_filename + +import nibabel as nib + @pytest.mark.parametrize( 'verbose, v_args', [(-2, ['-qq']), (-1, ['-q']), (0, []), (1, ['-v']), (2, ['-vv'])] diff --git a/nibabel/tests/test_loadsave.py b/nibabel/tests/test_loadsave.py index f8cc168cfd..3b58772b6a 100644 --- a/nibabel/tests/test_loadsave.py +++ b/nibabel/tests/test_loadsave.py @@ -1,34 +1,33 @@ """Testing loadsave module """ -from os.path import dirname, join as pjoin -import shutil import pathlib +import shutil +from os.path import dirname +from os.path import join as pjoin import numpy as np from .. import ( - Spm99AnalyzeImage, - Spm2AnalyzeImage, - Nifti1Pair, Nifti1Image, - Nifti2Pair, + Nifti1Pair, Nifti2Image, + Nifti2Pair, + Spm2AnalyzeImage, + Spm99AnalyzeImage, ) -from ..loadsave import load, read_img_data, _signature_matches_extension from ..filebasedimages import ImageFileError -from ..tmpdirs import InTemporaryDirectory, TemporaryDirectory +from ..loadsave import _signature_matches_extension, load, read_img_data from ..openers import Opener -from ..testing import expires - from ..optpkg import optional_package +from ..testing import expires +from ..tmpdirs import InTemporaryDirectory, TemporaryDirectory _, have_scipy, _ = optional_package('scipy') _, have_pyzstd, _ = optional_package('pyzstd') -from numpy.testing import assert_almost_equal, assert_array_equal - import pytest +from numpy.testing import assert_almost_equal, assert_array_equal data_path = pjoin(dirname(__file__), 'data') diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index 4556f76787..3eeefaa84b 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -7,29 +7,25 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -from os.path import join as pjoin - -import gzip import bz2 -import warnings +import gzip import types +import warnings from io import BytesIO +from os.path import join as pjoin import numpy as np +import pytest +from numpy.testing import assert_array_equal -from .. import load, Nifti1Image -from ..externals.netcdf import netcdf_file +from .. import Nifti1Image, load, minc1 from ..deprecated import ModuleProxy -from .. import minc1 +from ..deprecator import ExpiredDeprecationError +from ..externals.netcdf import netcdf_file from ..minc1 import Minc1File, Minc1Image, MincHeader from ..optpkg import optional_package - +from ..testing import assert_data_similar, clear_and_catch_warnings, data_path from ..tmpdirs import InTemporaryDirectory -from ..deprecator import ExpiredDeprecationError -from ..testing import assert_data_similar, data_path, clear_and_catch_warnings -from numpy.testing import assert_array_equal -import pytest - from . import test_spatialimages as tsi from .test_fileslice import slicer_samples diff --git a/nibabel/tests/test_minc2.py b/nibabel/tests/test_minc2.py index 3e220ef2d1..bd06456c33 100644 --- a/nibabel/tests/test_minc2.py +++ b/nibabel/tests/test_minc2.py @@ -13,10 +13,8 @@ from .. import minc2 from ..minc2 import Minc2File, Minc2Image - from ..optpkg import optional_package from ..testing import data_path - from . import test_minc1 as tm2 h5py, have_h5py, setup_module = optional_package('h5py') diff --git a/nibabel/tests/test_minc2_data.py b/nibabel/tests/test_minc2_data.py index 03fb93cbea..e96e716699 100644 --- a/nibabel/tests/test_minc2_data.py +++ b/nibabel/tests/test_minc2_data.py @@ -13,12 +13,12 @@ from os.path import join as pjoin import numpy as np +from numpy.testing import assert_almost_equal, assert_array_equal -from .nibabel_data import get_nibabel_data, needs_nibabel_data -from .. import load as top_load, Nifti1Image +from .. import Nifti1Image +from .. import load as top_load from ..optpkg import optional_package - -from numpy.testing import assert_array_equal, assert_almost_equal +from .nibabel_data import get_nibabel_data, needs_nibabel_data h5py, have_h5py, setup_module = optional_package('h5py') diff --git a/nibabel/tests/test_mriutils.py b/nibabel/tests/test_mriutils.py index 082d053805..848579cee6 100644 --- a/nibabel/tests/test_mriutils.py +++ b/nibabel/tests/test_mriutils.py @@ -10,10 +10,10 @@ """ -from numpy.testing import assert_almost_equal import pytest +from numpy.testing import assert_almost_equal -from ..mriutils import calculate_dwell_time, MRIError +from ..mriutils import MRIError, calculate_dwell_time def test_calculate_dwell_time(): diff --git a/nibabel/tests/test_nibabel_data.py b/nibabel/tests/test_nibabel_data.py index ec97108e35..1687589549 100644 --- a/nibabel/tests/test_nibabel_data.py +++ b/nibabel/tests/test_nibabel_data.py @@ -2,11 +2,12 @@ """ import os -from os.path import dirname, realpath, join as pjoin, isdir +from os.path import dirname, isdir +from os.path import join as pjoin +from os.path import realpath from . import nibabel_data as nibd - MY_DIR = dirname(__file__) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 0018dfe842..59bf214eda 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -8,55 +8,51 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Tests for nifti reading package""" import os -import warnings import struct +import unittest +import warnings +from io import BytesIO import numpy as np +import pytest +from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_array_equal from nibabel import nifti1 as nifti1 from nibabel.affines import from_matvec -from nibabel.casting import type_info, have_binary128 +from nibabel.casting import have_binary128, type_info from nibabel.eulerangles import euler2mat -from io import BytesIO from nibabel.nifti1 import ( - load, + Nifti1DicomExtension, + Nifti1Extension, + Nifti1Extensions, Nifti1Header, - Nifti1PairHeader, Nifti1Image, Nifti1Pair, - Nifti1Extension, - Nifti1DicomExtension, - Nifti1Extensions, + Nifti1PairHeader, data_type_codes, extension_codes, + load, slice_order_codes, ) +from nibabel.optpkg import optional_package from nibabel.spatialimages import HeaderDataError from nibabel.tmpdirs import InTemporaryDirectory -from nibabel.optpkg import optional_package + from ..freesurfer import load as mghload from ..orientations import aff2axcodes - -from .test_arraywriters import rt_err_estimate, IUINT_TYPES -from .test_orientations import ALL_ORNTS -from .nibabel_data import get_nibabel_data, needs_nibabel_data - -from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_almost_equal - from ..testing import ( + bytesio_filemap, + bytesio_round_trip, clear_and_catch_warnings, data_path, runif_extra_has, suppress_warnings, - bytesio_filemap, - bytesio_round_trip, ) - -import unittest -import pytest - from . import test_analyze as tana from . import test_spm99analyze as tspm +from .nibabel_data import get_nibabel_data, needs_nibabel_data +from .test_arraywriters import IUINT_TYPES, rt_err_estimate +from .test_orientations import ALL_ORNTS header_file = os.path.join(data_path, 'nifti1.hdr') image_file = os.path.join(data_path, 'example4d.nii.gz') diff --git a/nibabel/tests/test_nifti2.py b/nibabel/tests/test_nifti2.py index 57a97a1322..742ef148bf 100644 --- a/nibabel/tests/test_nifti2.py +++ b/nibabel/tests/test_nifti2.py @@ -10,16 +10,13 @@ import os import numpy as np - -from .. import nifti2 -from ..nifti1 import Nifti1Header, Nifti1PairHeader, Nifti1Extension, Nifti1Extensions -from ..nifti2 import Nifti2Header, Nifti2PairHeader, Nifti2Image, Nifti2Pair - -from . import test_nifti1 as tn1 - from numpy.testing import assert_array_equal +from .. import nifti2 +from ..nifti1 import Nifti1Extension, Nifti1Extensions, Nifti1Header, Nifti1PairHeader +from ..nifti2 import Nifti2Header, Nifti2Image, Nifti2Pair, Nifti2PairHeader from ..testing import data_path +from . import test_nifti1 as tn1 header_file = os.path.join(data_path, 'nifti2.hdr') image_file = os.path.join(data_path, 'example_nifti2.nii.gz') diff --git a/nibabel/tests/test_onetime.py b/nibabel/tests/test_onetime.py index 2659b7fbbc..426702fa43 100644 --- a/nibabel/tests/test_onetime.py +++ b/nibabel/tests/test_onetime.py @@ -1,4 +1,5 @@ import pytest + from nibabel.onetime import auto_attr, setattr_on_read from nibabel.testing import expires diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 2a306079f4..5219cb27ac 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -7,29 +7,23 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Test for openers module""" -import os import contextlib -from gzip import GzipFile -from io import BytesIO, UnsupportedOperation -from packaging.version import Version import hashlib +import os import time - -from numpy.compat.py3k import asstr, asbytes -from ..openers import ( - Opener, - ImageOpener, - HAVE_INDEXED_GZIP, - BZ2File, - DeterministicGzipFile, -) -from ..tmpdirs import InTemporaryDirectory -from ..optpkg import optional_package - import unittest +from gzip import GzipFile +from io import BytesIO, UnsupportedOperation from unittest import mock + import pytest +from numpy.compat.py3k import asbytes, asstr +from packaging.version import Version + from ..deprecator import ExpiredDeprecationError +from ..openers import HAVE_INDEXED_GZIP, BZ2File, DeterministicGzipFile, ImageOpener, Opener +from ..optpkg import optional_package +from ..tmpdirs import InTemporaryDirectory pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') diff --git a/nibabel/tests/test_optpkg.py b/nibabel/tests/test_optpkg.py index 875c32bbdf..7ffaa2f851 100644 --- a/nibabel/tests/test_optpkg.py +++ b/nibabel/tests/test_optpkg.py @@ -1,14 +1,13 @@ """Testing optpkg module """ -from unittest import mock -import types -import sys import builtins -from packaging.version import Version +import sys +import types +from unittest import SkipTest, mock -from unittest import SkipTest import pytest +from packaging.version import Version from nibabel.optpkg import optional_package from nibabel.tripwire import TripWire, TripWireError diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 5d786c0eac..16f7f5ce46 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -8,29 +8,26 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Testing for orientations module""" -import numpy as np import warnings +import numpy as np import pytest - from numpy.testing import assert_array_equal +from ..affines import from_matvec, to_matvec from ..orientations import ( - io_orientation, - ornt_transform, - inv_ornt_aff, - flip_axis, - apply_orientation, OrientationError, - ornt2axcodes, - axcodes2ornt, aff2axcodes, + apply_orientation, + axcodes2ornt, + flip_axis, + inv_ornt_aff, + io_orientation, + ornt2axcodes, + ornt_transform, ) - -from ..affines import from_matvec, to_matvec from ..testing import expires - IN_ARRS = [ np.eye(4), [[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]], diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index 0eca2fdca4..e50b609da4 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -1,38 +1,35 @@ """Testing parrec module """ -from os.path import join as pjoin, dirname, basename from glob import glob +from os.path import basename, dirname +from os.path import join as pjoin from warnings import simplefilter import numpy as np +import pytest from numpy import array as npa +from numpy.testing import assert_almost_equal, assert_array_equal from .. import load as top_load -from ..nifti1 import Nifti1Image, Nifti1Extension, Nifti1Header from .. import parrec +from ..fileholders import FileHolder +from ..nifti1 import Nifti1Extension, Nifti1Header, Nifti1Image +from ..openers import ImageOpener from ..parrec import ( - parse_PAR_header, - PARRECHeader, + PARRECArrayProxy, PARRECError, - vol_numbers, - vol_is_full, + PARRECHeader, PARRECImage, - PARRECArrayProxy, exts2pars, + parse_PAR_header, + vol_is_full, + vol_numbers, ) -from ..openers import ImageOpener -from ..fileholders import FileHolder +from ..testing import assert_arr_dict_equal, clear_and_catch_warnings, suppress_warnings from ..volumeutils import array_from_file - -from numpy.testing import assert_almost_equal, assert_array_equal - -import pytest -from ..testing import clear_and_catch_warnings, suppress_warnings, assert_arr_dict_equal - -from .test_arrayproxy import check_mmap from . import test_spatialimages as tsi - +from .test_arrayproxy import check_mmap DATA_PATH = pjoin(dirname(__file__), 'data') EG_PAR = pjoin(DATA_PATH, 'phantom_EPI_asc_CLEAR_2_1.PAR') diff --git a/nibabel/tests/test_parrec_data.py b/nibabel/tests/test_parrec_data.py index 1179a21264..a437fafeda 100644 --- a/nibabel/tests/test_parrec_data.py +++ b/nibabel/tests/test_parrec_data.py @@ -1,21 +1,21 @@ """Test we can correctly import example PARREC files """ +import unittest from glob import glob -from os.path import join as pjoin, basename, splitext, exists +from os.path import basename, exists +from os.path import join as pjoin +from os.path import splitext import numpy as np +import pytest +from numpy.testing import assert_almost_equal from .. import load as top_load -from ..parrec import load from ..affines import voxel_sizes - +from ..parrec import load from .nibabel_data import get_nibabel_data, needs_nibabel_data -import unittest -import pytest -from numpy.testing import assert_almost_equal - BALLS = pjoin(get_nibabel_data(), 'nitest-balls1') OBLIQUE = pjoin(get_nibabel_data(), 'parrec_oblique') diff --git a/nibabel/tests/test_pkg_info.py b/nibabel/tests/test_pkg_info.py index 0583add021..32059c68d8 100644 --- a/nibabel/tests/test_pkg_info.py +++ b/nibabel/tests/test_pkg_info.py @@ -1,13 +1,13 @@ """Testing package info """ +import pytest from packaging.version import Version import nibabel as nib from nibabel.pkg_info import cmp_pkg_version -from ..info import VERSION -import pytest +from ..info import VERSION def test_pkg_info(): diff --git a/nibabel/tests/test_processing.py b/nibabel/tests/test_processing.py index cd7c1830ea..dc877d3802 100644 --- a/nibabel/tests/test_processing.py +++ b/nibabel/tests/test_processing.py @@ -9,8 +9,9 @@ """Testing processing module """ -from os.path import dirname, join as pjoin import logging +from os.path import dirname +from os.path import join as pjoin import numpy as np import numpy.linalg as npl @@ -19,28 +20,28 @@ spnd, have_scipy, _ = optional_package('scipy.ndimage') +import unittest + +import pytest +from numpy.testing import assert_almost_equal, assert_array_equal + import nibabel as nib +from nibabel.affines import AffineError, apply_affine, from_matvec, to_matvec, voxel_sizes +from nibabel.eulerangles import euler2mat +from nibabel.nifti1 import Nifti1Image +from nibabel.nifti2 import Nifti2Image +from nibabel.orientations import aff2axcodes, inv_ornt_aff from nibabel.processing import ( - sigma2fwhm, - fwhm2sigma, adapt_affine, + conform, + fwhm2sigma, resample_from_to, resample_to_output, + sigma2fwhm, smooth_image, - conform, ) -from nibabel.nifti1 import Nifti1Image -from nibabel.nifti2 import Nifti2Image -from nibabel.orientations import aff2axcodes, inv_ornt_aff -from nibabel.affines import AffineError, from_matvec, to_matvec, apply_affine, voxel_sizes -from nibabel.eulerangles import euler2mat - -from numpy.testing import assert_almost_equal, assert_array_equal -import unittest -import pytest - -from nibabel.tests.test_spaces import assert_all_in, get_outspace_params from nibabel.testing import assert_allclose_safely +from nibabel.tests.test_spaces import assert_all_in, get_outspace_params needs_scipy = unittest.skipUnless(have_scipy, 'These tests need scipy') diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index c2ca1ed27c..dfac167690 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -28,38 +28,31 @@ These last are to allow the proxy to be re-used with different images. """ -from os.path import join as pjoin +import unittest import warnings -from itertools import product from io import BytesIO +from itertools import product +from os.path import join as pjoin import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_almost_equal, assert_array_equal -from ..volumeutils import apply_read_scaling +from .. import ecat, minc1, minc2, parrec from ..analyze import AnalyzeHeader -from ..spm99analyze import Spm99AnalyzeHeader -from ..spm2analyze import Spm2AnalyzeHeader -from ..nifti1 import Nifti1Header -from ..freesurfer.mghformat import MGHHeader -from .. import minc1 -from ..externals.netcdf import netcdf_file -from .. import minc2 -from .. import ecat -from .. import parrec -from ..casting import have_binary128 - from ..arrayproxy import ArrayProxy, is_proxy - -import unittest -import pytest -from numpy.testing import assert_almost_equal, assert_array_equal, assert_allclose - -from ..testing import data_path as DATA_PATH, assert_dt_equal, clear_and_catch_warnings +from ..casting import have_binary128 from ..deprecator import ExpiredDeprecationError +from ..externals.netcdf import netcdf_file +from ..freesurfer.mghformat import MGHHeader +from ..nifti1 import Nifti1Header from ..optpkg import optional_package - +from ..spm2analyze import Spm2AnalyzeHeader +from ..spm99analyze import Spm99AnalyzeHeader +from ..testing import assert_dt_equal, clear_and_catch_warnings +from ..testing import data_path as DATA_PATH from ..tmpdirs import InTemporaryDirectory - +from ..volumeutils import apply_read_scaling from .test_api_validators import ValidateAPI from .test_parrec import EG_REC, VARY_REC diff --git a/nibabel/tests/test_quaternions.py b/nibabel/tests/test_quaternions.py index 3dc681f517..a3e63dd851 100644 --- a/nibabel/tests/test_quaternions.py +++ b/nibabel/tests/test_quaternions.py @@ -9,14 +9,12 @@ """Test quaternion calculations""" import numpy as np -from numpy import pi - import pytest - +from numpy import pi from numpy.testing import assert_array_almost_equal, assert_array_equal -from .. import quaternions as nq from .. import eulerangles as nea +from .. import quaternions as nq # Example rotations eg_rots = [] diff --git a/nibabel/tests/test_recoder.py b/nibabel/tests/test_recoder.py index 1d903d6f9f..49a9898ce2 100644 --- a/nibabel/tests/test_recoder.py +++ b/nibabel/tests/test_recoder.py @@ -9,11 +9,10 @@ """Tests recoder class""" import numpy as np - -from ..volumeutils import Recoder, DtypeMapper, native_code, swapped_code - import pytest +from ..volumeutils import DtypeMapper, Recoder, native_code, swapped_code + def test_recoder_1(): # simplest case, no aliases diff --git a/nibabel/tests/test_removalschedule.py b/nibabel/tests/test_removalschedule.py index 9300dfa207..939895abbd 100644 --- a/nibabel/tests/test_removalschedule.py +++ b/nibabel/tests/test_removalschedule.py @@ -1,8 +1,10 @@ -from ..pkg_info import cmp_pkg_version import unittest from unittest import mock + import pytest +from ..pkg_info import cmp_pkg_version + MODULE_SCHEDULE = [ ('5.0.0', ['nibabel.keywordonly', 'nibabel.py3k']), ('4.0.0', ['nibabel.trackvis']), diff --git a/nibabel/tests/test_round_trip.py b/nibabel/tests/test_round_trip.py index 54ab79a928..cb754d0b54 100644 --- a/nibabel/tests/test_round_trip.py +++ b/nibabel/tests/test_round_trip.py @@ -3,16 +3,16 @@ Test arrays with a range of numerical values, integer and floating point. """ -import numpy as np - from io import BytesIO -from .. import Nifti1Image, Nifti1Header -from ..spatialimages import HeaderDataError, supported_np_types -from ..arraywriters import ScalingError -from ..casting import best_float, ulp, type_info +import numpy as np from numpy.testing import assert_array_equal +from .. import Nifti1Header, Nifti1Image +from ..arraywriters import ScalingError +from ..casting import best_float, type_info, ulp +from ..spatialimages import HeaderDataError, supported_np_types + DEBUG = False diff --git a/nibabel/tests/test_rstutils.py b/nibabel/tests/test_rstutils.py index 55a0aace7c..847b7a4eee 100644 --- a/nibabel/tests/test_rstutils.py +++ b/nibabel/tests/test_rstutils.py @@ -2,11 +2,10 @@ """ import numpy as np +import pytest from ..rstutils import rst_table -import pytest - def test_rst_table(): # Tests for printable table function diff --git a/nibabel/tests/test_scaling.py b/nibabel/tests/test_scaling.py index e705a96c83..2fbe88a1a7 100644 --- a/nibabel/tests/test_scaling.py +++ b/nibabel/tests/test_scaling.py @@ -8,19 +8,17 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Test for scaling / rounding in volumeutils module""" -import numpy as np import warnings - from io import BytesIO -from ..volumeutils import finite_range, apply_read_scaling, array_to_file, array_from_file -from ..casting import type_info -from ..testing import suppress_warnings - -from .test_volumeutils import _calculate_scale -from numpy.testing import assert_array_almost_equal, assert_array_equal +import numpy as np import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal +from ..casting import type_info +from ..testing import suppress_warnings +from ..volumeutils import apply_read_scaling, array_from_file, array_to_file, finite_range +from .test_volumeutils import _calculate_scale # Debug print statements DEBUG = True diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index e4006788c1..a089fb7eef 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -5,30 +5,31 @@ Test running scripts """ -import sys +import csv import os import shutil -from os.path import dirname, join as pjoin, abspath, splitext, basename, exists -import csv +import sys +import unittest from glob import glob +from os.path import abspath, basename, dirname, exists +from os.path import join as pjoin +from os.path import splitext import numpy as np +import pytest +from numpy.testing import assert_almost_equal import nibabel as nib -from ..tmpdirs import InTemporaryDirectory + from ..loadsave import load from ..orientations import aff2axcodes, inv_ornt_aff - -import unittest -import pytest -from numpy.testing import assert_almost_equal - -from .scriptrunner import ScriptRunner +from ..testing import assert_data_similar, assert_dt_equal, assert_re_in +from ..tmpdirs import InTemporaryDirectory from .nibabel_data import needs_nibabel_data -from ..testing import assert_dt_equal, assert_re_in -from .test_parrec import DTI_PAR_BVECS, DTI_PAR_BVALS, EXAMPLE_IMAGES as PARREC_EXAMPLES -from .test_parrec_data import BALLS, AFF_OFF -from ..testing import assert_data_similar +from .scriptrunner import ScriptRunner +from .test_parrec import DTI_PAR_BVALS, DTI_PAR_BVECS +from .test_parrec import EXAMPLE_IMAGES as PARREC_EXAMPLES +from .test_parrec_data import AFF_OFF, BALLS def _proc_stdout(stdout): diff --git a/nibabel/tests/test_spaces.py b/nibabel/tests/test_spaces.py index 3e3f2ab0a8..83dec9256c 100644 --- a/nibabel/tests/test_spaces.py +++ b/nibabel/tests/test_spaces.py @@ -3,14 +3,13 @@ import numpy as np import numpy.linalg as npl +import pytest +from numpy.testing import assert_almost_equal -from ..spaces import vox2out_vox, slice2volume from ..affines import apply_affine, from_matvec -from ..nifti1 import Nifti1Image from ..eulerangles import euler2mat - -import pytest -from numpy.testing import assert_almost_equal +from ..nifti1 import Nifti1Image +from ..spaces import slice2volume, vox2out_vox def assert_all_in(in_shape, in_affine, out_shape, out_affine): diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index cdbe8dc9f2..2a1da21bdd 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -10,27 +10,24 @@ """ import warnings - -import numpy as np - from io import BytesIO -from ..spatialimages import SpatialHeader, SpatialImage, HeaderDataError -from ..imageclasses import spatial_axes_first +from unittest import TestCase +import numpy as np import pytest -from unittest import TestCase from numpy.testing import assert_array_almost_equal +from .. import load as top_load +from ..imageclasses import spatial_axes_first +from ..spatialimages import HeaderDataError, SpatialHeader, SpatialImage from ..testing import ( bytesio_round_trip, clear_and_catch_warnings, - suppress_warnings, - memmap_after_ufunc, expires, + memmap_after_ufunc, + suppress_warnings, ) - from ..tmpdirs import InTemporaryDirectory -from .. import load as top_load def test_header_init(): diff --git a/nibabel/tests/test_spm2analyze.py b/nibabel/tests/test_spm2analyze.py index 9881a23d07..7e3d048de5 100644 --- a/nibabel/tests/test_spm2analyze.py +++ b/nibabel/tests/test_spm2analyze.py @@ -9,14 +9,11 @@ """Tests for SPM2 header stuff""" import numpy as np - -from ..spatialimages import HeaderTypeError, HeaderDataError -from ..spm2analyze import Spm2AnalyzeHeader, Spm2AnalyzeImage - import pytest from numpy.testing import assert_array_equal - +from ..spatialimages import HeaderDataError, HeaderTypeError +from ..spm2analyze import Spm2AnalyzeHeader, Spm2AnalyzeImage from . import test_spm99analyze diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index 9d04643d2a..e5eb969388 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -7,14 +7,13 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## -import numpy as np import itertools - +import unittest from io import BytesIO -from numpy.testing import assert_array_equal, assert_array_almost_equal -import unittest +import numpy as np import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal from ..optpkg import optional_package @@ -24,18 +23,16 @@ # files needs_scipy = unittest.skipUnless(have_scipy, 'scipy not available') -from ..spm99analyze import Spm99AnalyzeHeader, Spm99AnalyzeImage, HeaderTypeError -from ..casting import type_info, shared_range -from ..volumeutils import apply_read_scaling, _dt_min_max -from ..spatialimages import supported_np_types, HeaderDataError - +from ..casting import shared_range, type_info +from ..spatialimages import HeaderDataError, supported_np_types +from ..spm99analyze import HeaderTypeError, Spm99AnalyzeHeader, Spm99AnalyzeImage from ..testing import ( - bytesio_round_trip, - bytesio_filemap, assert_allclose_safely, + bytesio_filemap, + bytesio_round_trip, suppress_warnings, ) - +from ..volumeutils import _dt_min_max, apply_read_scaling from . import test_analyze FLOAT_TYPES = np.sctypes['float'] diff --git a/nibabel/tests/test_testing.py b/nibabel/tests/test_testing.py index 11a46bafdb..38c815d4c8 100644 --- a/nibabel/tests/test_testing.py +++ b/nibabel/tests/test_testing.py @@ -1,23 +1,23 @@ """Tests for warnings context managers """ -import sys import os +import sys import warnings import numpy as np +import pytest from ..testing import ( - error_warnings, - suppress_warnings, - clear_and_catch_warnings, assert_allclose_safely, - get_fresh_mod, assert_re_in, - test_data, + clear_and_catch_warnings, data_path, + error_warnings, + get_fresh_mod, + suppress_warnings, + test_data, ) -import pytest def test_assert_allclose_safely(): diff --git a/nibabel/tests/test_tmpdirs.py b/nibabel/tests/test_tmpdirs.py index 2c0c5199ce..3b2e5d5466 100644 --- a/nibabel/tests/test_tmpdirs.py +++ b/nibabel/tests/test_tmpdirs.py @@ -1,11 +1,10 @@ """Test tmpdirs module""" from os import getcwd -from os.path import realpath, abspath, dirname, isfile +from os.path import abspath, dirname, isfile, realpath from ..tmpdirs import InGivenDirectory - MY_PATH = abspath(__file__) MY_DIR = dirname(MY_PATH) diff --git a/nibabel/tests/test_tripwire.py b/nibabel/tests/test_tripwire.py index 0efddbe8bb..f172d5c579 100644 --- a/nibabel/tests/test_tripwire.py +++ b/nibabel/tests/test_tripwire.py @@ -1,10 +1,10 @@ """Testing tripwire module """ -from ..tripwire import TripWire, is_tripwire, TripWireError - import pytest +from ..tripwire import TripWire, TripWireError, is_tripwire + def test_is_tripwire(): assert not is_tripwire(object()) diff --git a/nibabel/tests/test_viewers.py b/nibabel/tests/test_viewers.py index 04e616fedd..1649ba62da 100644 --- a/nibabel/tests/test_viewers.py +++ b/nibabel/tests/test_viewers.py @@ -7,18 +7,16 @@ # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +import unittest from collections import namedtuple as nt import numpy as np +import pytest +from numpy.testing import assert_array_equal, assert_equal from ..optpkg import optional_package from ..viewers import OrthoSlicer3D -from numpy.testing import assert_array_equal, assert_equal - -import unittest -import pytest - # Need at least MPL 1.3 for viewer tests. # 2020.02.11 - 1.3 wheels are no longer distributed, so the minimum we test with is 1.5 matplotlib, has_mpl, _ = optional_package('matplotlib', min_version='1.5') diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index c2104b5b59..d8821d308b 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -8,57 +8,53 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Test for volumeutils module""" -import os -from os.path import exists - -from io import BytesIO -import tempfile -import warnings +import bz2 import functools -import itertools import gzip -import bz2 +import itertools +import os +import tempfile import threading import time -from packaging.version import Version +import warnings +from io import BytesIO +from os.path import exists import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal +from packaging.version import Version +from nibabel.testing import ( + assert_allclose_safely, + assert_dt_equal, + error_warnings, + suppress_warnings, +) + +from ..casting import OK_FLOATS, floor_log2, shared_range, type_info +from ..openers import BZ2File, ImageOpener, Opener +from ..optpkg import optional_package from ..tmpdirs import InTemporaryDirectory -from ..openers import ImageOpener from ..volumeutils import ( - array_from_file, + _dt_min_max, + _ftype4scaled_finite, _is_compressed_fobj, - array_to_file, - fname_ext_ul_case, - write_zeros, - seek_tell, + _write_data, apply_read_scaling, - working_type, + array_from_file, + array_to_file, best_write_scale_ftype, better_float_of, + fname_ext_ul_case, int_scinter_ftype, make_dt_codes, native_code, - shape_zoom_affine, rec2dict, - _dt_min_max, - _write_data, - _ftype4scaled_finite, -) -from ..openers import Opener, BZ2File -from ..casting import floor_log2, type_info, OK_FLOATS, shared_range - -from ..optpkg import optional_package - -from numpy.testing import assert_array_almost_equal, assert_array_equal -import pytest - -from nibabel.testing import ( - assert_dt_equal, - assert_allclose_safely, - suppress_warnings, - error_warnings, + seek_tell, + shape_zoom_affine, + working_type, + write_zeros, ) pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') @@ -1274,7 +1270,7 @@ def _calculate_scale(data, out_dtype, allow_intercept): out_dtype = np.dtype(out_dtype) if np.can_cast(in_dtype, out_dtype): return 1.0, 0.0, None, None - from ..arraywriters import make_array_writer, WriterError, get_slope_inter + from ..arraywriters import WriterError, get_slope_inter, make_array_writer try: writer = make_array_writer(data, out_dtype, True, allow_intercept) diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 2e4ea6a788..718700768e 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -24,21 +24,18 @@ _field_recoders -> field_recoders """ import logging +from io import BytesIO, StringIO + import numpy as np +import pytest +from numpy.testing import assert_array_equal -from io import BytesIO, StringIO -from ..wrapstruct import WrapStructError, WrapStruct, LabeledWrapStruct +from .. import imageglobals from ..batteryrunners import Report - -from ..volumeutils import swapped_code, native_code, Recoder from ..spatialimages import HeaderDataError -from .. import imageglobals - from ..testing import BaseTestCase - -from numpy.testing import assert_array_equal -import pytest - +from ..volumeutils import Recoder, native_code, swapped_code +from ..wrapstruct import LabeledWrapStruct, WrapStruct, WrapStructError INTEGER_TYPES = np.sctypes['int'] + np.sctypes['uint'] diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index c175940ff7..e8fba870c1 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -10,7 +10,7 @@ """ import os import shutil -from tempfile import template, mkdtemp +from tempfile import mkdtemp, template class TemporaryDirectory: diff --git a/nibabel/viewers.py b/nibabel/viewers.py index c3720d474b..d1c13dfeee 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -4,9 +4,10 @@ Paul Ivanov. """ -import numpy as np import weakref +import numpy as np + from .affines import voxel_sizes from .optpkg import optional_package from .orientations import aff2axcodes, axcodes2ornt diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index f026750e95..d31d91ea01 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -8,19 +8,19 @@ ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Utility functions for analyze-like formats""" +import gzip import sys import warnings -import gzip from collections import OrderedDict -from os.path import exists, splitext -from operator import mul from functools import reduce +from operator import mul +from os.path import exists, splitext import numpy as np -from .casting import shared_range, OK_FLOATS -from .openers import BZ2File, IndexedGzipFile +from .casting import OK_FLOATS, shared_range from .externals.oset import OrderedSet +from .openers import BZ2File, IndexedGzipFile from .optpkg import optional_package pyzstd, HAVE_ZSTD, _ = optional_package('pyzstd') diff --git a/nibabel/wrapstruct.py b/nibabel/wrapstruct.py index cdc2957dab..bf29e0828a 100644 --- a/nibabel/wrapstruct.py +++ b/nibabel/wrapstruct.py @@ -111,9 +111,9 @@ """ import numpy as np -from .volumeutils import pretty_mapping, endian_codes, native_code, swapped_code from . import imageglobals as imageglobals from .batteryrunners import BatteryRunner +from .volumeutils import endian_codes, native_code, pretty_mapping, swapped_code class WrapStructError(Exception): From 263fca9bf6d4ca314a5a322b4824d6f53d0589df Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Thu, 29 Dec 2022 21:46:13 -0500 Subject: [PATCH 08/12] STY: Manual, blue-compatible touchups [git-blame-ignore-rev] --- nibabel/analyze.py | 6 +- nibabel/arraywriters.py | 2 +- nibabel/brikhead.py | 22 +- nibabel/casting.py | 16 +- nibabel/cifti2/cifti2.py | 23 +- nibabel/cifti2/cifti2_axes.py | 4 +- nibabel/cifti2/parse_cifti2.py | 2 +- nibabel/cifti2/tests/test_axes.py | 47 +- nibabel/cifti2/tests/test_cifti2.py | 17 +- nibabel/cifti2/tests/test_cifti2io_header.py | 16 +- nibabel/cifti2/tests/test_new_cifti2.py | 55 +- nibabel/cmdline/diff.py | 4 +- nibabel/cmdline/ls.py | 2 +- nibabel/cmdline/nifti_dx.py | 2 +- nibabel/cmdline/parrec2nii.py | 28 +- nibabel/cmdline/tests/test_utils.py | 503 ++++++++----------- nibabel/cmdline/utils.py | 2 +- nibabel/data.py | 6 +- nibabel/dataobj_images.py | 6 +- nibabel/ecat.py | 6 +- nibabel/filebasedimages.py | 2 +- nibabel/filename_parser.py | 2 +- nibabel/fileslice.py | 2 +- nibabel/freesurfer/mghformat.py | 55 +- nibabel/freesurfer/tests/test_mghformat.py | 23 +- nibabel/gifti/gifti.py | 11 +- nibabel/gifti/parse_gifti_fast.py | 4 +- nibabel/gifti/util.py | 6 +- nibabel/loadsave.py | 4 +- nibabel/minc1.py | 6 +- nibabel/minc2.py | 2 +- nibabel/nicom/csareader.py | 6 +- nibabel/nicom/dicomreaders.py | 4 +- nibabel/nicom/dicomwrappers.py | 24 +- nibabel/nicom/tests/test_dicomreaders.py | 2 +- nibabel/nifti1.py | 22 +- nibabel/nifti2.py | 2 +- nibabel/optpkg.py | 2 +- nibabel/orientations.py | 10 +- nibabel/parrec.py | 156 +++--- nibabel/processing.py | 28 +- nibabel/rstutils.py | 20 +- nibabel/spatialimages.py | 23 +- nibabel/spm99analyze.py | 4 +- nibabel/streamlines/__init__.py | 5 +- nibabel/streamlines/array_sequence.py | 2 +- nibabel/streamlines/tck.py | 6 +- nibabel/streamlines/trk.py | 23 +- nibabel/tests/test_affines.py | 43 +- nibabel/tests/test_analyze.py | 2 +- nibabel/tests/test_brikhead.py | 14 +- nibabel/tests/test_euler.py | 24 +- nibabel/tests/test_fileslice.py | 31 +- nibabel/tests/test_floating.py | 25 +- nibabel/tests/test_image_types.py | 2 +- nibabel/tests/test_minc1.py | 36 +- nibabel/tests/test_minc2.py | 36 +- nibabel/tests/test_openers.py | 4 +- nibabel/tests/test_orientations.py | 159 +++++- nibabel/tests/test_parrec.py | 44 +- nibabel/tests/test_processing.py | 35 +- nibabel/tests/test_proxy_api.py | 14 +- nibabel/tests/test_spaces.py | 16 +- nibabel/tests/test_spatialimages.py | 16 +- nibabel/tests/test_volumeutils.py | 32 +- nibabel/viewers.py | 2 +- 66 files changed, 961 insertions(+), 799 deletions(-) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index 4a76350d59..e128239865 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -394,8 +394,8 @@ def from_header(klass, header=None, check=True): obj.set_data_dtype(orig_code) except HeaderDataError: raise HeaderDataError( - f'Input header {header.__class__} has ' - f"datatype {header.get_value_label('datatype')} " + f'Input header {header.__class__} has datatype ' + f'{header.get_value_label("datatype")} ' f'but output header {klass} does not support it' ) obj.set_data_dtype(header.get_data_dtype()) @@ -785,7 +785,7 @@ def set_slope_inter(self, slope, inter=None): """ if (slope in (None, 1) or np.isnan(slope)) and (inter in (None, 0) or np.isnan(inter)): return - raise HeaderTypeError('Cannot set slope != 1 or intercept != 0 ' 'for Analyze headers') + raise HeaderTypeError('Cannot set slope != 1 or intercept != 0 for Analyze headers') @classmethod def _get_checks(klass): diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index 59e55b314c..21fd6ba6ee 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -432,7 +432,7 @@ def _range_scale(self, in_min, in_max): if self._out_dtype.kind == 'u': if in_min < 0 and in_max > 0: raise WriterError( - 'Cannot scale negative and positive ' 'numbers to uint without intercept' + 'Cannot scale negative and positive numbers to uint without intercept' ) if in_max <= 0: # All input numbers <= 0 self.slope = in_min / out_max diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 0559671217..72b09c4d75 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -58,7 +58,12 @@ } space_codes = Recoder( - ((0, 'unknown', ''), (1, 'scanner', 'ORIG'), (3, 'talairach', 'TLRC'), (4, 'mni', 'MNI')), + ( + (0, 'unknown', ''), + (1, 'scanner', 'ORIG'), + (3, 'talairach', 'TLRC'), + (4, 'mni', 'MNI'), + ), fields=('code', 'label', 'space'), ) @@ -104,9 +109,7 @@ def _unpack_var(var): TEMPLATE_SPACE ORIG """ - err_msg = ( - 'Please check HEAD file to ensure it is AFNI compliant. ' f'Offending attribute:\n{var}' - ) + err_msg = f'Please check HEAD file to ensure it is AFNI compliant. Offending attribute:\n{var}' atype, aname = TYPE_RE.findall(var), NAME_RE.findall(var) if len(atype) != 1: raise AFNIHeaderError(f'Invalid attribute type entry in HEAD file. {err_msg}') @@ -119,8 +122,7 @@ def _unpack_var(var): attr = [atype(f) for f in attr.split()] except ValueError: raise AFNIHeaderError( - 'Failed to read variable from HEAD file ' - f'due to improper type casting. {err_msg}' + f'Failed to read variable from HEAD file due to improper type casting. {err_msg}' ) else: # AFNI string attributes will always start with open single quote and @@ -354,13 +356,7 @@ def _calc_zooms(self): origin", and second giving "Time step (TR)". """ xyz_step = tuple(np.abs(self.info['DELTA'])) - t_step = self.info.get( - 'TAXIS_FLOATS', - ( - 0, - 0, - ), - ) + t_step = self.info.get('TAXIS_FLOATS', (0, 0)) if len(t_step) > 0: t_step = (t_step[1],) return xyz_step + t_step diff --git a/nibabel/casting.py b/nibabel/casting.py index ce58915fe9..a17a25a2c8 100644 --- a/nibabel/casting.py +++ b/nibabel/casting.py @@ -259,15 +259,15 @@ def type_info(np_type): if vals in ( (112, 15, 16), # binary128 (info_64.nmant, info_64.nexp, 8), # float64 - (63, 15, 12), - (63, 15, 16), - ): # Intel extended 80 + (63, 15, 12), # Intel extended 80 + (63, 15, 16), # Intel extended 80 + ): return ret # these are OK without modification # The remaining types are longdoubles with bad finfo values. Some we # correct, others we wait to hear of errors. # We start with float64 as basis ret = type_info(np.float64) - if vals in ((52, 15, 12), (52, 15, 16)): # windows float96 # windows float128? + if vals in ((52, 15, 12), (52, 15, 16)): # windows float96 / windows float128? # On windows 32 bit at least, float96 is Intel 80 storage but operating # at float64 precision. The finfo values give nexp == 15 (as for intel # 80) but in calculations nexp in fact appears to be 11 as for float64 @@ -298,7 +298,13 @@ def type_info(np_type): if np_type is np.longcomplex: max_val += 0j ret = dict( - min=-max_val, max=max_val, nmant=112, nexp=15, minexp=-16382, maxexp=16384, width=width + min=-max_val, + max=max_val, + nmant=112, + nexp=15, + minexp=-16382, + maxexp=16384, + width=width, ) else: # don't recognize the type raise FloatingError(f'We had not expected long double type {np_type} with info {info}') diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 497b796dca..6c141b44f1 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -70,10 +70,15 @@ class Cifti2HeaderError(Exception): CIFTI_MODEL_TYPES = ( 'CIFTI_MODEL_TYPE_SURFACE', # Modeled using surface vertices - 'CIFTI_MODEL_TYPE_VOXELS', # Modeled using voxels. + 'CIFTI_MODEL_TYPE_VOXELS', # Modeled using voxels. ) -CIFTI_SERIESUNIT_TYPES = ('SECOND', 'HERTZ', 'METER', 'RADIAN') +CIFTI_SERIESUNIT_TYPES = ( + 'SECOND', + 'HERTZ', + 'METER', + 'RADIAN', +) CIFTI_BRAIN_STRUCTURES = ( 'CIFTI_STRUCTURE_ACCUMBENS_LEFT', @@ -662,7 +667,7 @@ def __init__(self, name=None, voxel_indices_ijk=None, vertices=None): self.vertices = vertices if vertices is not None else [] for val in self.vertices: if not isinstance(val, Cifti2Vertices): - raise ValueError('Cifti2Parcel vertices must be instances of ' 'Cifti2Vertices') + raise ValueError('Cifti2Parcel vertices must be instances of Cifti2Vertices') @property def voxel_indices_ijk(self): @@ -1237,7 +1242,7 @@ def _validate_new_mim(self, value): a2md = self._get_indices_from_mim(value) if not set(self.mapped_indices).isdisjoint(a2md): raise Cifti2HeaderError( - 'Indices in this Cifti2MatrixIndicesMap ' 'already mapped in this matrix' + 'Indices in this Cifti2MatrixIndicesMap already mapped in this matrix' ) def __setitem__(self, key, value): @@ -1412,7 +1417,13 @@ class Cifti2Image(DataobjImage, SerializableImage): rw = True def __init__( - self, dataobj=None, header=None, nifti_header=None, extra=None, file_map=None, dtype=None + self, + dataobj=None, + header=None, + nifti_header=None, + extra=None, + file_map=None, + dtype=None, ): """Initialize image @@ -1485,7 +1496,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): cifti_header = item.get_content() break else: - raise ValueError('NIfTI2 header does not contain a CIFTI-2 ' 'extension') + raise ValueError('NIfTI2 header does not contain a CIFTI-2 extension') # Construct cifti image. # Use array proxy object where possible diff --git a/nibabel/cifti2/cifti2_axes.py b/nibabel/cifti2/cifti2_axes.py index 3d88fca1e3..3142c8362b 100644 --- a/nibabel/cifti2/cifti2_axes.py +++ b/nibabel/cifti2/cifti2_axes.py @@ -665,7 +665,7 @@ def __add__(self, other): not np.allclose(other.affine, affine) or other.volume_shape != shape ): raise ValueError( - 'Trying to concatenate two BrainModels defined ' 'in a different brain volume' + 'Trying to concatenate two BrainModels defined in a different brain volume' ) nvertices = dict(self.nvertices) @@ -1008,7 +1008,7 @@ def __add__(self, other): not np.allclose(other.affine, affine) or other.volume_shape != shape ): raise ValueError( - 'Trying to concatenate two ParcelsAxis defined ' 'in a different brain volume' + 'Trying to concatenate two ParcelsAxis defined in a different brain volume' ) nvertices = dict(self.nvertices) for name, value in other.nvertices.items(): diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index 550d8e30bd..e067144997 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -335,7 +335,7 @@ def StartElementHandler(self, name, attrs): raise Cifti2HeaderError( 'Volume element can only be a child of the CIFTI-2 MatrixIndicesMap element' ) - dimensions = tuple([int(val) for val in attrs['VolumeDimensions'].split(',')]) + dimensions = tuple(int(val) for val in attrs['VolumeDimensions'].split(',')) volume = Cifti2Volume(volume_dimensions=dimensions) mim.append(volume) self.fsm_state.append('Volume') diff --git a/nibabel/cifti2/tests/test_axes.py b/nibabel/cifti2/tests/test_axes.py index b8940433af..4cabd188b1 100644 --- a/nibabel/cifti2/tests/test_axes.py +++ b/nibabel/cifti2/tests/test_axes.py @@ -128,7 +128,9 @@ def test_brain_models(): assert (bml[4].vertex == [2, 9, 14]).all() for bm, label, is_surface in zip( - bml, ['ThalamusRight', 'Other', 'cortex_left', 'Other'], (False, False, True, True) + bml, + ['ThalamusRight', 'Other', 'cortex_left', 'Other'], + (False, False, True, True), ): assert np.all(bm.surface_mask == ~bm.volume_mask) structures = list(bm.iter_structures()) @@ -176,18 +178,27 @@ def test_brain_models(): # Test the constructor bm_vox = axes.BrainModelAxis( - 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4) + 'thalamus_left', + voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4), + volume_shape=(2, 3, 4), ) assert np.all(bm_vox.name == ['CIFTI_STRUCTURE_THALAMUS_LEFT'] * 5) assert np.array_equal(bm_vox.vertex, np.full(5, -1)) assert np.array_equal(bm_vox.voxel, np.full((5, 3), 1)) with pytest.raises(ValueError): # no volume shape - axes.BrainModelAxis('thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4)) + axes.BrainModelAxis( + 'thalamus_left', + voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4), + ) with pytest.raises(ValueError): # no affine axes.BrainModelAxis( - 'thalamus_left', voxel=np.ones((5, 3), dtype=int), volume_shape=(2, 3, 4) + 'thalamus_left', + voxel=np.ones((5, 3), dtype=int), + volume_shape=(2, 3, 4), ) with pytest.raises(ValueError): # incorrect name @@ -207,7 +218,11 @@ def test_brain_models(): ) with pytest.raises(ValueError): # no voxels or vertices - axes.BrainModelAxis('thalamus_left', affine=np.eye(4), volume_shape=(2, 3, 4)) + axes.BrainModelAxis( + 'thalamus_left', + affine=np.eye(4), + volume_shape=(2, 3, 4), + ) with pytest.raises(ValueError): # incorrect voxel shape axes.BrainModelAxis( @@ -218,7 +233,9 @@ def test_brain_models(): ) bm_vertex = axes.BrainModelAxis( - 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_left': 20} + 'cortex_left', + vertex=np.ones(5, dtype=int), + nvertices={'cortex_left': 20}, ) assert np.array_equal(bm_vertex.name, ['CIFTI_STRUCTURE_CORTEX_LEFT'] * 5) assert np.array_equal(bm_vertex.vertex, np.full(5, 1)) @@ -227,11 +244,15 @@ def test_brain_models(): axes.BrainModelAxis('cortex_left', vertex=np.ones(5, dtype=int)) with pytest.raises(ValueError): axes.BrainModelAxis( - 'cortex_left', vertex=np.ones(5, dtype=int), nvertices={'cortex_right': 20} + 'cortex_left', + vertex=np.ones(5, dtype=int), + nvertices={'cortex_right': 20}, ) with pytest.raises(ValueError): axes.BrainModelAxis( - 'cortex_left', vertex=-np.ones(5, dtype=int), nvertices={'cortex_left': 20} + 'cortex_left', + vertex=-np.ones(5, dtype=int), + nvertices={'cortex_left': 20}, ) # test from_mask errors @@ -244,7 +265,10 @@ def test_brain_models(): # tests error in adding together or combining as ParcelsAxis bm_vox = axes.BrainModelAxis( - 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4) + 'thalamus_left', + voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4), + volume_shape=(2, 3, 4), ) bm_vox + bm_vox assert (bm_vertex + bm_vox)[: bm_vertex.size] == bm_vertex @@ -289,7 +313,10 @@ def test_brain_models(): # test equalities bm_vox = axes.BrainModelAxis( - 'thalamus_left', voxel=np.ones((5, 3), dtype=int), affine=np.eye(4), volume_shape=(2, 3, 4) + 'thalamus_left', + voxel=np.ones((5, 3), dtype=int), + affine=np.eye(4), + volume_shape=(2, 3, 4), ) bm_other = deepcopy(bm_vox) assert bm_vox == bm_other diff --git a/nibabel/cifti2/tests/test_cifti2.py b/nibabel/cifti2/tests/test_cifti2.py index 98d97e34e2..bf287b8e03 100644 --- a/nibabel/cifti2/tests/test_cifti2.py +++ b/nibabel/cifti2/tests/test_cifti2.py @@ -79,10 +79,7 @@ def test_cifti2_metadata(): with pytest.raises(KeyError): md.difference_update({'a': 'aval', 'd': 'dval'}) - assert ( - md.to_xml().decode('utf-8') - == 'bbval' - ) + assert md.to_xml() == b'bbval' def test__float_01(): @@ -195,8 +192,7 @@ def test_cifti2_parcel(): assert len(pl.vertices) == 0 assert ( - pl.to_xml().decode('utf-8') - == '1 2 3' + pl.to_xml() == b'1 2 3' ) @@ -207,7 +203,7 @@ def test_cifti2_vertices(): vs.brain_structure = 'CIFTI_STRUCTURE_OTHER' - assert vs.to_xml().decode('utf-8') == '' + assert vs.to_xml() == b'' assert len(vs) == 0 vs.extend(np.array([0, 1, 2])) @@ -217,10 +213,7 @@ def test_cifti2_vertices(): with pytest.raises(ValueError): vs.insert(1, 'a') - assert ( - vs.to_xml().decode('utf-8') - == '0 1 2' - ) + assert vs.to_xml() == b'0 1 2' vs[0] = 10 assert vs[0] == 10 @@ -254,7 +247,7 @@ def test_cifti2_vertexindices(): vi.to_xml() vi.extend(np.array([0, 1, 2])) assert len(vi) == 3 - assert vi.to_xml().decode('utf-8') == '0 1 2' + assert vi.to_xml() == b'0 1 2' with pytest.raises(ValueError): vi[0] = 'a' diff --git a/nibabel/cifti2/tests/test_cifti2io_header.py b/nibabel/cifti2/tests/test_cifti2io_header.py index 7315a0d1f2..8d393686dd 100644 --- a/nibabel/cifti2/tests/test_cifti2io_header.py +++ b/nibabel/cifti2/tests/test_cifti2io_header.py @@ -11,6 +11,7 @@ from os.path import dirname from os.path import join as pjoin +import numpy as np import pytest from numpy.testing import assert_array_almost_equal from packaging.version import Version @@ -249,12 +250,17 @@ def test_read_geometry(): assert from_file.voxel_indices_ijk[-1] == expected[3] assert current_index == img.shape[1] - expected_affine = [[-2, 0, 0, 90], [0, 2, 0, -126], [0, 0, 2, -72], [0, 0, 0, 1]] + expected_affine = [ + [-2, 0, 0, 90], + [0, 2, 0, -126], + [0, 0, 2, -72], + [0, 0, 0, 1], + ] expected_dimensions = (91, 109, 91) - assert ( - geometry_mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix - == expected_affine - ).all() + assert np.array_equal( + geometry_mapping.volume.transformation_matrix_voxel_indices_ijk_to_xyz.matrix, + expected_affine, + ) assert geometry_mapping.volume.volume_dimensions == expected_dimensions diff --git a/nibabel/cifti2/tests/test_new_cifti2.py b/nibabel/cifti2/tests/test_new_cifti2.py index 84f1376f1f..0f90b822da 100644 --- a/nibabel/cifti2/tests/test_new_cifti2.py +++ b/nibabel/cifti2/tests/test_new_cifti2.py @@ -20,14 +20,27 @@ suppress_warnings, ) -affine = [[-1.5, 0, 0, 90], [0, 1.5, 0, -85], [0, 0, 1.5, -71], [0, 0, 0, 1.0]] +affine = [ + [-1.5, 0, 0, 90], + [0, 1.5, 0, -85], + [0, 0, 1.5, -71], + [0, 0, 0, 1.0], +] dimensions = (120, 83, 78) number_of_vertices = 30000 brain_models = [ - ('CIFTI_STRUCTURE_THALAMUS_LEFT', [[60, 60, 60], [61, 59, 60], [61, 60, 59], [80, 90, 92]]), + ( + 'CIFTI_STRUCTURE_THALAMUS_LEFT', + [ + [60, 60, 60], + [61, 59, 60], + [61, 60, 59], + [80, 90, 92], + ], + ), ('CIFTI_STRUCTURE_CORTEX_LEFT', [0, 1000, 1301, 19972, 27312]), ('CIFTI_STRUCTURE_CORTEX_RIGHT', [207]), ] @@ -107,7 +120,17 @@ def check_geometry_map(mapping): parcels = [ - ('volume_parcel', ([[60, 60, 60], [61, 59, 60], [61, 60, 59], [80, 90, 92]],)), + ( + 'volume_parcel', + ( + [ + [60, 60, 60], + [61, 59, 60], + [61, 60, 59], + [80, 90, 92], + ], + ), + ), ( 'surface_parcel', ( @@ -117,7 +140,13 @@ def check_geometry_map(mapping): ), ( 'mixed_parcel', - ([[71, 81, 39], [53, 21, 91]], ('CIFTI_STRUCTURE_CORTEX_LEFT', [71, 88, 999])), + ( + [ + [71, 81, 39], + [53, 21, 91], + ], + ('CIFTI_STRUCTURE_CORTEX_LEFT', [71, 88, 999]), + ), ), ('single_element', ([[71, 81, 39]], ('CIFTI_STRUCTURE_CORTEX_LEFT', [40]))), ] @@ -196,9 +225,19 @@ def check_scalar_map(mapping): ( 'first_name', {'meta_key': 'some_metadata'}, - {0: ('label0', (0.1, 0.3, 0.2, 0.5)), 1: ('new_label', (0.5, 0.3, 0.1, 0.4))}, + { + 0: ('label0', (0.1, 0.3, 0.2, 0.5)), + 1: ('new_label', (0.5, 0.3, 0.1, 0.4)), + }, + ), + ( + 'another name', + {}, + { + 0: ('???', (0, 0, 0, 0)), + 1: ('great region', (0.4, 0.1, 0.23, 0.15)), + }, ), - ('another name', {}, {0: ('???', (0, 0, 0, 0)), 1: ('great region', (0.4, 0.1, 0.23, 0.15))}), ] @@ -463,7 +502,7 @@ def test_pconnseries(): hdr = ci.Cifti2Header(matrix) data = np.random.randn(4, 4, 13) img = ci.Cifti2Image(data, hdr) - img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_' 'PARCELLATED_SERIES') + img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_PARCELLATED_SERIES') with InTemporaryDirectory(): ci.save(img, 'test.pconnseries.nii') @@ -486,7 +525,7 @@ def test_pconnscalar(): hdr = ci.Cifti2Header(matrix) data = np.random.randn(4, 4, 2) img = ci.Cifti2Image(data, hdr) - img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_' 'PARCELLATED_SCALAR') + img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_PARCELLATED_PARCELLATED_SCALAR') with InTemporaryDirectory(): ci.save(img, 'test.pconnscalar.nii') diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index 5ca691ad64..799e17f645 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -51,7 +51,7 @@ def get_opt_parser(): '--header-fields', dest='header_fields', default='all', - help='Header fields (comma separated) to be printed as well' ' (if present)', + help='Header fields (comma separated) to be printed as well (if present)', ), Option( '--ma', @@ -59,7 +59,7 @@ def get_opt_parser(): dest='data_max_abs_diff', type=float, default=0.0, - help='Maximal absolute difference in data between files' ' to tolerate.', + help='Maximal absolute difference in data between files to tolerate.', ), Option( '--mr', diff --git a/nibabel/cmdline/ls.py b/nibabel/cmdline/ls.py index c78c0910bf..4f504910a2 100755 --- a/nibabel/cmdline/ls.py +++ b/nibabel/cmdline/ls.py @@ -20,7 +20,7 @@ import nibabel.cmdline.utils from nibabel.cmdline.utils import _err, ap, safe_get, table2string, verbose -__copyright__ = 'Copyright (c) 2011-18 Yaroslav Halchenko ' 'and NiBabel contributors' +__copyright__ = 'Copyright (c) 2011-18 Yaroslav Halchenko and NiBabel contributors' __license__ = 'MIT' diff --git a/nibabel/cmdline/nifti_dx.py b/nibabel/cmdline/nifti_dx.py index 64f02694ee..103bbf2640 100644 --- a/nibabel/cmdline/nifti_dx.py +++ b/nibabel/cmdline/nifti_dx.py @@ -15,7 +15,7 @@ import nibabel as nib __author__ = 'Matthew Brett' -__copyright__ = 'Copyright (c) 2011-18 Matthew Brett ' 'and NiBabel contributors' +__copyright__ = 'Copyright (c) 2011-18 Matthew Brett and NiBabel contributors' __license__ = 'MIT' diff --git a/nibabel/cmdline/parrec2nii.py b/nibabel/cmdline/parrec2nii.py index d6d3d6afe7..c04a6e0196 100644 --- a/nibabel/cmdline/parrec2nii.py +++ b/nibabel/cmdline/parrec2nii.py @@ -44,10 +44,7 @@ def get_opt_parser(): type='string', dest='outdir', default=None, - help=one_line( - """Destination directory for NIfTI files. - Default: current directory.""" - ), + help='Destination directory for NIfTI files. Default: current directory.', ) ) p.add_option( @@ -81,10 +78,7 @@ def get_opt_parser(): action='store_true', dest='bvs', default=False, - help=one_line( - """Output bvals/bvecs files in addition to NIFTI - image.""" - ), + help='Output bvals/bvecs files in addition to NIFTI image.', ) ) p.add_option( @@ -207,7 +201,7 @@ def get_opt_parser(): default=False, help=one_line( """Do not discard the diagnostic Philips DTI - trace volume, if it exists in the data.""" + trace volume, if it exists in the data.""" ), ) ) @@ -217,10 +211,7 @@ def get_opt_parser(): action='store_true', dest='overwrite', default=False, - help=one_line( - """Overwrite file if it exists. Default: - False""" - ), + help='Overwrite file if it exists. Default: False', ) ) p.add_option( @@ -300,7 +291,14 @@ def proc_file(infile, opts): out_dtype = np.float64 # Reorient data block to LAS+ if necessary ornt = io_orientation(np.diag([-1, 1, 1, 1]).dot(affine)) - if np.all(ornt == [[0, 1], [1, 1], [2, 1]]): # already in LAS+ + if np.array_equal( + ornt, + [ + [0, 1], + [1, 1], + [2, 1], + ], + ): # already in LAS+ t_aff = np.eye(4) else: # Not in LAS+ t_aff = inv_ornt_aff(ornt, pr_img.shape) @@ -431,6 +429,6 @@ def main(): errs.append(f'{infile}: {e}') if len(errs): - error('Caught %i exceptions. Dump follows:\n\n %s' % (len(errs), '\n'.join(errs)), 1) + error(f'Caught {len(errs)} exceptions. Dump follows:\n\n' + '\n'.join(errs), 1) else: verbose('Done') diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index 5f531769a9..6d2e6953fb 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -5,7 +5,6 @@ Test running scripts """ -from collections import OrderedDict from io import StringIO from os.path import join as pjoin @@ -58,134 +57,87 @@ def get_test(self): def test_get_headers_diff(): fnames = [pjoin(data_path, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] actual_difference = get_headers_diff([nib.load(f).header for f in fnames]) - expected_difference = OrderedDict( - [ - ('regular', [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))]), - ( - 'dim_info', - [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], - ), - ( - 'dim', - [ - np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), - np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), - ], - ), - ('datatype', [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')]), - ('bitpix', [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')]), - ( - 'pixdim', - [ - np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), - np.array( - [ - -1.00000000e00, - 2.00000000e00, - 2.00000000e00, - 2.19999909e00, - 2.00000000e03, - 1.00000000e00, - 1.00000000e00, - 1.00000000e00, - ] - ).astype(dtype='float32'), - ], - ), - ('slice_end', [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')]), - ( - 'xyzt_units', - [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], - ), - ( - 'cal_max', - [ - np.array(0.0).astype(dtype='float32'), - np.asarray(1162.0).astype(dtype='float32'), - ], - ), - ( - 'descrip', - [ - np.array(''.encode('utf-8')).astype(dtype='S80'), - np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( - dtype='S80' - ), - ], - ), - ('qform_code', [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), - ('sform_code', [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), - ( - 'quatern_b', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-1.9451068140294884e-26).astype(dtype='float32'), - ], - ), - ( - 'quatern_c', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.9967085123062134).astype(dtype='float32'), - ], - ), - ( - 'quatern_d', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.0810687392950058).astype(dtype='float32'), - ], - ), - ( - 'qoffset_x', - [ - np.array(0.0).astype(dtype='float32'), - np.array(117.8551025390625).astype(dtype='float32'), - ], - ), - ( - 'qoffset_y', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-35.72294235229492).astype(dtype='float32'), - ], - ), - ( - 'qoffset_z', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-7.248798370361328).astype(dtype='float32'), - ], - ), - ( - 'srow_x', - [ - np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), - np.array( - [-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02] - ).astype(dtype='float32'), - ], - ), - ( - 'srow_y', - [ - np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), - np.array( - [-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01] - ).astype(dtype='float32'), - ], - ), - ( - 'srow_z', + expected_difference = { + 'regular': [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))], + 'dim_info': [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], + 'dim': [ + np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), + np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), + ], + 'datatype': [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')], + 'bitpix': [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')], + 'pixdim': [ + np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), + np.array( [ - np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), - np.array( - [8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00] - ).astype(dtype='float32'), - ], - ), - ] - ) + -1.00000000e00, + 2.00000000e00, + 2.00000000e00, + 2.19999909e00, + 2.00000000e03, + 1.00000000e00, + 1.00000000e00, + 1.00000000e00, + ] + ).astype(dtype='float32'), + ], + 'slice_end': [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')], + 'xyzt_units': [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], + 'cal_max': [ + np.array(0.0).astype(dtype='float32'), + np.asarray(1162.0).astype(dtype='float32'), + ], + 'descrip': [ + np.array(''.encode('utf-8')).astype(dtype='S80'), + np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( + dtype='S80' + ), + ], + 'qform_code': [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')], + 'sform_code': [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')], + 'quatern_b': [ + np.array(0.0).astype(dtype='float32'), + np.array(-1.9451068140294884e-26).astype(dtype='float32'), + ], + 'quatern_c': [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.9967085123062134).astype(dtype='float32'), + ], + 'quatern_d': [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.0810687392950058).astype(dtype='float32'), + ], + 'qoffset_x': [ + np.array(0.0).astype(dtype='float32'), + np.array(117.8551025390625).astype(dtype='float32'), + ], + 'qoffset_y': [ + np.array(0.0).astype(dtype='float32'), + np.array(-35.72294235229492).astype(dtype='float32'), + ], + 'qoffset_z': [ + np.array(0.0).astype(dtype='float32'), + np.array(-7.248798370361328).astype(dtype='float32'), + ], + 'srow_x': [ + np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), + np.array([-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02]).astype( + dtype='float32' + ), + ], + 'srow_y': [ + np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), + np.array([-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01]).astype( + dtype='float32' + ), + ], + 'srow_z': [ + np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), + np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00]).astype( + dtype='float32' + ), + ], + } np.testing.assert_equal(actual_difference, expected_difference) @@ -193,25 +145,22 @@ def test_get_headers_diff(): def test_display_diff(): bogus_names = ['hellokitty.nii.gz', 'privettovarish.nii.gz'] - dict_values = OrderedDict( - [ - ('datatype', [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')]), - ('bitpix', [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')]), - ] - ) - - expected_output = ( - 'These files are different.\n' + 'Field/File 1:hellokitty.nii.gz' - ' ' - '2:privettovarish.nii.gz \n' - 'datatype ' - '2 ' - '4 \n' - 'bitpix ' - '8 16' - ' ' - '\n' - ) + dict_values = { + 'datatype': [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')], + 'bitpix': [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')], + } + + expected_output = """\ +These files are different. +Field/File \ +1:hellokitty.nii.gz \ +2:privettovarish.nii.gz \n\ +datatype \ +2 \ +4 \n\ +bitpix \ +8 \ +16 \n""" assert display_diff(bogus_names, dict_values) == expected_output @@ -229,43 +178,33 @@ def test_get_data_diff(): test_array_5 = np.arange(64).reshape(8, 8) # same shape, 2 files - assert get_data_diff([test_array, test_array_2]) == OrderedDict( - [('DATA(diff 1:)', [None, OrderedDict([('abs', 1), ('rel', 2.0)])])] - ) + assert get_data_diff([test_array, test_array_2]) == { + 'DATA(diff 1:)': [None, {'abs': 1, 'rel': 2.0}] + } # same shape, 3 files - assert get_data_diff([test_array, test_array_2, test_array_3]) == OrderedDict( - [ - ( - 'DATA(diff 1:)', - [ - None, - OrderedDict([('abs', 1), ('rel', 2.0)]), - OrderedDict([('abs', 2), ('rel', 2.0)]), - ], - ), - ( - 'DATA(diff 2:)', - [None, None, OrderedDict([('abs', 1), ('rel', 0.66666666666666663)])], - ), - ] - ) + assert get_data_diff([test_array, test_array_2, test_array_3]) == { + 'DATA(diff 1:)': [ + None, + {'abs': 1, 'rel': 2.0}, + {'abs': 2, 'rel': 2.0}, + ], + 'DATA(diff 2:)': [None, None, {'abs': 1, 'rel': 0.66666666666666663}], + } # same shape, 2 files, modified maximum abs/rel - assert get_data_diff([test_array, test_array_2], max_abs=2, max_rel=2) == OrderedDict() + assert get_data_diff([test_array, test_array_2], max_abs=2, max_rel=2) == {} # different shape, 2 files - assert get_data_diff([test_array_2, test_array_4]) == OrderedDict( - [('DATA(diff 1:)', [None, {'CMP': 'incompat'}])] - ) + assert get_data_diff([test_array_2, test_array_4]) == { + 'DATA(diff 1:)': [None, {'CMP': 'incompat'}] + } # different shape, 3 files - assert get_data_diff([test_array_4, test_array_5, test_array_2]) == OrderedDict( - [ - ('DATA(diff 1:)', [None, {'CMP': 'incompat'}, {'CMP': 'incompat'}]), - ('DATA(diff 2:)', [None, None, {'CMP': 'incompat'}]), - ] - ) + assert get_data_diff([test_array_4, test_array_5, test_array_2]) == { + 'DATA(diff 1:)': [None, {'CMP': 'incompat'}, {'CMP': 'incompat'}], + 'DATA(diff 2:)': [None, None, {'CMP': 'incompat'}], + } test_return = get_data_diff([test_array, test_array_2], dtype=np.float32) assert type(test_return['DATA(diff 1:)'][1]['abs']) is np.float32 @@ -280,138 +219,88 @@ def test_get_data_diff(): def test_main(): test_names = [pjoin(data_path, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] - expected_difference = OrderedDict( - [ - ('regular', [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))]), - ( - 'dim_info', - [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], - ), - ( - 'dim', + expected_difference = { + 'regular': [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))], + 'dim_info': [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], + 'dim': [ + np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), + np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), + ], + 'datatype': [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')], + 'bitpix': [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')], + 'pixdim': [ + np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), + np.array( [ - np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), - np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), - ], - ), - ('datatype', [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')]), - ('bitpix', [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')]), - ( - 'pixdim', - [ - np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), - np.array( - [ - -1.00000000e00, - 2.00000000e00, - 2.00000000e00, - 2.19999909e00, - 2.00000000e03, - 1.00000000e00, - 1.00000000e00, - 1.00000000e00, - ] - ).astype(dtype='float32'), - ], - ), - ('slice_end', [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')]), - ( - 'xyzt_units', - [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], - ), - ( - 'cal_max', - [ - np.array(0.0).astype(dtype='float32'), - np.asarray(1162.0).astype(dtype='float32'), - ], - ), - ( - 'descrip', - [ - np.array(''.encode('utf-8')).astype(dtype='S80'), - np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( - dtype='S80' - ), - ], - ), - ('qform_code', [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), - ('sform_code', [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')]), - ( - 'quatern_b', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-1.9451068140294884e-26).astype(dtype='float32'), - ], - ), - ( - 'quatern_c', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.9967085123062134).astype(dtype='float32'), - ], - ), - ( - 'quatern_d', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.0810687392950058).astype(dtype='float32'), - ], - ), - ( - 'qoffset_x', - [ - np.array(0.0).astype(dtype='float32'), - np.array(117.8551025390625).astype(dtype='float32'), - ], - ), - ( - 'qoffset_y', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-35.72294235229492).astype(dtype='float32'), - ], - ), - ( - 'qoffset_z', - [ - np.array(0.0).astype(dtype='float32'), - np.array(-7.248798370361328).astype(dtype='float32'), - ], - ), - ( - 'srow_x', - [ - np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), - np.array( - [-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02] - ).astype(dtype='float32'), - ], - ), - ( - 'srow_y', - [ - np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), - np.array( - [-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01] - ).astype(dtype='float32'), - ], - ), - ( - 'srow_z', - [ - np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), - np.array( - [8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00] - ).astype(dtype='float32'), - ], - ), - ( - 'DATA(md5)', - ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'], - ), - ] - ) + -1.00000000e00, + 2.00000000e00, + 2.00000000e00, + 2.19999909e00, + 2.00000000e03, + 1.00000000e00, + 1.00000000e00, + 1.00000000e00, + ] + ).astype(dtype='float32'), + ], + 'slice_end': [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')], + 'xyzt_units': [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], + 'cal_max': [ + np.array(0.0).astype(dtype='float32'), + np.asarray(1162.0).astype(dtype='float32'), + ], + 'descrip': [ + np.array(''.encode('utf-8')).astype(dtype='S80'), + np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( + dtype='S80' + ), + ], + 'qform_code': [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')], + 'sform_code': [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')], + 'quatern_b': [ + np.array(0.0).astype(dtype='float32'), + np.array(-1.9451068140294884e-26).astype(dtype='float32'), + ], + 'quatern_c': [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.9967085123062134).astype(dtype='float32'), + ], + 'quatern_d': [ + np.array(0.0).astype(dtype='float32'), + np.array(-0.0810687392950058).astype(dtype='float32'), + ], + 'qoffset_x': [ + np.array(0.0).astype(dtype='float32'), + np.array(117.8551025390625).astype(dtype='float32'), + ], + 'qoffset_y': [ + np.array(0.0).astype(dtype='float32'), + np.array(-35.72294235229492).astype(dtype='float32'), + ], + 'qoffset_z': [ + np.array(0.0).astype(dtype='float32'), + np.array(-7.248798370361328).astype(dtype='float32'), + ], + 'srow_x': [ + np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), + np.array([-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02]).astype( + dtype='float32' + ), + ], + 'srow_y': [ + np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), + np.array([-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01]).astype( + dtype='float32' + ), + ], + 'srow_z': [ + np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), + np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00]).astype( + dtype='float32' + ), + ], + 'DATA(md5)': ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'], + } with pytest.raises(SystemExit): np.testing.assert_equal(main(test_names, StringIO()), expected_difference) diff --git a/nibabel/cmdline/utils.py b/nibabel/cmdline/utils.py index 41b10d6b31..8e9d45251e 100644 --- a/nibabel/cmdline/utils.py +++ b/nibabel/cmdline/utils.py @@ -64,7 +64,7 @@ def table2string(table, out=None): atable = np.asarray(table) # eat whole entry while computing width for @w (for wide) markup_strip = re.compile('^@([lrc]|w.*)') - col_width = [max([len(markup_strip.sub('', x)) for x in column]) for column in atable.T] + col_width = [max(len(markup_strip.sub('', x)) for x in column) for column in atable.T] string = '' for i, table_ in enumerate(table): string_ = '' diff --git a/nibabel/data.py b/nibabel/data.py index eaa6e77acf..42826d2f67 100644 --- a/nibabel/data.py +++ b/nibabel/data.py @@ -13,9 +13,7 @@ from .environment import get_nipy_system_dir, get_nipy_user_dir -DEFAULT_INSTALL_HINT = ( - 'If you have the package, have you set the ' 'path to the package correctly?' -) +DEFAULT_INSTALL_HINT = 'If you have the package, have you set the path to the package correctly?' class DataError(Exception): @@ -135,7 +133,7 @@ def __init__(self, base_path, config_filename=None): version_parts = self.version.split('.') self.major_version = int(version_parts[0]) self.minor_version = int(version_parts[1]) - self.version_no = float('%d.%d' % (self.major_version, self.minor_version)) + self.version_no = float(f'{self.major_version}.{self.minor_version}') def _cfg_value(fname, section='DATA', value='path'): diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index 054bba5272..64ef906820 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -48,10 +48,8 @@ def dataobj(self): return self._dataobj @deprecate_with_version( - 'get_data() is deprecated in favor of get_fdata(),' - ' which has a more predictable return type. To ' - 'obtain get_data() behavior going forward, use ' - 'numpy.asanyarray(img.dataobj).', + 'get_data() is deprecated in favor of get_fdata(), which has a more predictable return ' + 'type. To obtain get_data() behavior going forward, use numpy.asanyarray(img.dataobj).', '3.0', '5.0', ) diff --git a/nibabel/ecat.py b/nibabel/ecat.py index 03d3f26a74..d151465933 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -822,7 +822,7 @@ def __init__(self, dataobj, affine, header, subheader, mlist, extra=None, file_m def affine(self): if not self._subheader._check_affines(): warnings.warn( - 'Affines different across frames, loading affine ' 'from FIRST frame', UserWarning + 'Affines different across frames, loading affine from FIRST frame', UserWarning ) return self._affine @@ -893,7 +893,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): # Get affine if not subheaders._check_affines(): warnings.warn( - 'Affines different across frames, loading affine ' 'from FIRST frame', UserWarning + 'Affines different across frames, loading affine from FIRST frame', UserWarning ) aff = subheaders.get_frame_affine() img = klass(data, aff, header, subheaders, mlist, extra=None, file_map=file_map) @@ -1010,7 +1010,7 @@ def to_file_map(self, file_map=None): @classmethod def from_image(klass, img): - raise NotImplementedError('Ecat images can only be generated ' 'from file objects') + raise NotImplementedError('Ecat images can only be generated from file objects') @classmethod def load(klass, filespec): diff --git a/nibabel/filebasedimages.py b/nibabel/filebasedimages.py index eee822566b..e37a698f2f 100644 --- a/nibabel/filebasedimages.py +++ b/nibabel/filebasedimages.py @@ -35,7 +35,7 @@ def from_header(klass, header=None): if type(header) == klass: return header.copy() raise NotImplementedError( - 'Header class requires a conversion ' f'from {klass} to {type(header)}' + f'Header class requires a conversion from {klass} to {type(header)}' ) @classmethod diff --git a/nibabel/filename_parser.py b/nibabel/filename_parser.py index 42e89fa721..77949a6791 100644 --- a/nibabel/filename_parser.py +++ b/nibabel/filename_parser.py @@ -114,7 +114,7 @@ def types_filenames( """ template_fname = _stringify_path(template_fname) if not isinstance(template_fname, str): - raise TypesFilenamesError('Need file name as input ' 'to set_filenames') + raise TypesFilenamesError('Need file name as input to set_filenames') if template_fname.endswith('.'): template_fname = template_fname[:-1] filename, found_ext, ignored, guessed_name = parse_filename( diff --git a/nibabel/fileslice.py b/nibabel/fileslice.py index 75da3ff85f..87cac05a4a 100644 --- a/nibabel/fileslice.py +++ b/nibabel/fileslice.py @@ -104,7 +104,7 @@ def canonical_slicers(sliceobj, shape, check_inds=True): if slicer == Ellipsis: remaining = sliceobj[i + 1 :] if Ellipsis in remaining: - raise ValueError('More than one Ellipsis in slicing ' 'expression') + raise ValueError('More than one Ellipsis in slicing expression') real_remaining = [r for r in remaining if r is not None] n_ellided = n_dim - n_real - len(real_remaining) can_slicers.extend((slice(None),) * n_ellided) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 6358a6af81..1091bedbcb 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -55,46 +55,10 @@ # caveat: Note that it's ambiguous to get the code given the bytespervoxel # caveat 2: Note that the bytespervox you get is in str ( not an int) _dtdefs = ( # code, conversion function, dtype, bytes per voxel - ( - 0, - 'uint8', - '>u1', - '1', - 'MRI_UCHAR', - np.uint8, - np.dtype(np.uint8), - np.dtype(np.uint8).newbyteorder('>'), - ), - ( - 4, - 'int16', - '>i2', - '2', - 'MRI_SHORT', - np.int16, - np.dtype(np.int16), - np.dtype(np.int16).newbyteorder('>'), - ), - ( - 1, - 'int32', - '>i4', - '4', - 'MRI_INT', - np.int32, - np.dtype(np.int32), - np.dtype(np.int32).newbyteorder('>'), - ), - ( - 3, - 'float', - '>f4', - '4', - 'MRI_FLOAT', - np.float32, - np.dtype(np.float32), - np.dtype(np.float32).newbyteorder('>'), - ), + (0, 'uint8', '>u1', '1', 'MRI_UCHAR', np.uint8, np.dtype('u1'), np.dtype('>u1')), + (4, 'int16', '>i2', '2', 'MRI_SHORT', np.int16, np.dtype('i2'), np.dtype('>i2')), + (1, 'int32', '>i4', '4', 'MRI_INT', np.int32, np.dtype('i4'), np.dtype('>i4')), + (3, 'float', '>f4', '4', 'MRI_FLOAT', np.float32, np.dtype('f4'), np.dtype('>f4')), ) # make full code alias bank, including dtype column @@ -233,7 +197,12 @@ def get_vox2ras_tkr(self): ds = self._structarr['delta'] ns = self._structarr['dims'][:3] * ds / 2.0 v2rtkr = np.array( - [[-ds[0], 0, 0, ns[0]], [0, 0, ds[2], -ns[2]], [0, -ds[1], 0, ns[1]], [0, 0, 0, 1]], + [ + [-ds[0], 0, 0, ns[0]], + [0, 0, ds[2], -ns[2]], + [0, -ds[1], 0, ns[1]], + [0, 0, 0, 1], + ], dtype=np.float32, ) return v2rtkr @@ -312,7 +281,7 @@ def set_zooms(self, zooms): raise HeaderDataError('Expecting %d zoom values' % ndims) if np.any(zooms[:3] <= 0): raise HeaderDataError( - 'Spatial (first three) zooms must be positive; got ' f'{tuple(zooms[:3])}' + f'Spatial (first three) zooms must be positive; got {tuple(zooms[:3])}' ) hdr['delta'] = zooms[:3] if len(zooms) == 4: @@ -474,7 +443,7 @@ def as_byteswapped(self, endianness=None): """ if endianness is None or endian_codes[endianness] != '>': - raise ValueError('Cannot byteswap MGHHeader - ' 'must always be big endian') + raise ValueError('Cannot byteswap MGHHeader - must always be big endian') return self.copy() @classmethod diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index ee0ed50fec..0a850488c2 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -32,11 +32,22 @@ # sample voxel to ras matrix (mri_info --vox2ras) v2r = np.array( - [[1, 2, 3, -13], [2, 3, 1, -11.5], [3, 1, 2, -11.5], [0, 0, 0, 1]], dtype=np.float32 + [ + [1, 2, 3, -13], + [2, 3, 1, -11.5], + [3, 1, 2, -11.5], + [0, 0, 0, 1], + ], + dtype=np.float32, ) # sample voxel to ras - tkr matrix (mri_info --vox2ras-tkr) v2rtkr = np.array( - [[-1.0, 0.0, 0.0, 1.5], [0.0, 0.0, 1.0, -2.5], [0.0, -1.0, 0.0, 2.0], [0.0, 0.0, 0.0, 1.0]], + [ + [-1.0, 0.0, 0.0, 1.5], + [0.0, 0.0, 1.0, -2.5], + [0.0, -1.0, 0.0, 2.0], + [0.0, 0.0, 0.0, 1.0], + ], dtype=np.float32, ) @@ -145,7 +156,13 @@ def test_set_zooms(): assert_array_almost_equal(h.get_zooms(), [1, 1, 1, 2]) h.set_zooms([1, 1, 1, 3]) assert_array_almost_equal(h.get_zooms(), [1, 1, 1, 3]) - for zooms in ((-1, 1, 1, 1), (1, -1, 1, 1), (1, 1, -1, 1), (1, 1, 1, -1), (1, 1, 1, 1, 5)): + for zooms in ( + (-1, 1, 1, 1), + (1, -1, 1, 1), + (1, 1, -1, 1), + (1, 1, 1, -1), + (1, 1, 1, 1, 5), + ): with pytest.raises(HeaderDataError): h.set_zooms(zooms) # smoke test for tr=0 diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index 7313f984f2..dc205d8004 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -108,7 +108,7 @@ def _sanitize(args, kwargs): @property @deprecate_with_version( - 'The data attribute is deprecated. Use GiftiMetaData object ' 'directly as a dict.', + 'The data attribute is deprecated. Use GiftiMetaData object directly as a dict.', '4.0', '6.0', ) @@ -147,7 +147,7 @@ class GiftiNVPairs: """ @deprecate_with_version( - 'GiftiNVPairs objects are deprecated. Use the GiftiMetaData object ' 'as a dict, instead.', + 'GiftiNVPairs objects are deprecated. Use the GiftiMetaData object as a dict, instead.', '4.0', '6.0', ) @@ -834,11 +834,10 @@ def _to_xml_element(self): def to_xml(self, enc='utf-8'): """Return XML corresponding to image content""" - return b""" + header = b""" -""" + xml.XmlSerializable.to_xml( - self, enc - ) +""" + return header + super().to_xml(enc) # Avoid the indirection of going through to_file_map to_bytes = to_xml diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 88c63b5600..68dfb00af8 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -64,7 +64,7 @@ def read_data_block(darray, fname, data, mmap): ``numpy.ndarray`` or ``numpy.memmap`` containing the parsed data """ if mmap not in (True, False, 'c', 'r', 'r+'): - raise ValueError("mmap value should be one of True, False, 'c', " "'r', 'r+'") + raise ValueError("mmap value should be one of True, False, 'c', 'r', 'r+'") if mmap is True: mmap = 'c' enclabel = gifti_encoding_codes.label[darray.encoding] @@ -85,7 +85,7 @@ def read_data_block(darray, fname, data, mmap): if enclabel == 'External': if fname is None: raise GiftiParseError( - 'ExternalFileBinary is not supported ' 'when loading from in-memory XML' + 'ExternalFileBinary is not supported when loading from in-memory XML' ) ext_fname = op.join(op.dirname(fname), darray.ext_fname) if not op.exists(ext_fname): diff --git a/nibabel/gifti/util.py b/nibabel/gifti/util.py index 7659ee33cc..9393292013 100644 --- a/nibabel/gifti/util.py +++ b/nibabel/gifti/util.py @@ -13,7 +13,11 @@ KIND2FMT = {'i': '%i', 'u': '%i', 'f': '%10.6f', 'c': '%10.6f', 'V': ''} array_index_order_codes = Recoder( - ((1, 'RowMajorOrder', 'C'), (2, 'ColumnMajorOrder', 'F')), fields=('code', 'label', 'npcode') + ( + (1, 'RowMajorOrder', 'C'), + (2, 'ColumnMajorOrder', 'F'), + ), + fields=('code', 'label', 'npcode'), ) gifti_encoding_codes = Recoder( diff --git a/nibabel/loadsave.py b/nibabel/loadsave.py index f64f3e8230..6c1981ca77 100644 --- a/nibabel/loadsave.py +++ b/nibabel/loadsave.py @@ -198,7 +198,9 @@ def save(img, filename, **kwargs): @deprecate_with_version( - 'read_img_data deprecated. ' 'Please use ``img.dataobj.get_unscaled()`` instead.', '3.2', '5.0' + 'read_img_data deprecated. Please use ``img.dataobj.get_unscaled()`` instead.', + '3.2', + '5.0', ) def read_img_data(img, prefer='scaled'): """Read data from image associated with files diff --git a/nibabel/minc1.py b/nibabel/minc1.py index d6d2d3081b..fb183277bc 100644 --- a/nibabel/minc1.py +++ b/nibabel/minc1.py @@ -87,7 +87,7 @@ def get_data_shape(self): def get_zooms(self): """Get real-world sizes of voxels""" # zooms must be positive; but steps in MINC can be negative - return tuple([abs(float(dim.step)) if hasattr(dim, 'step') else 1.0 for dim in self._dims]) + return tuple(abs(float(dim.step)) if hasattr(dim, 'step') else 1.0 for dim in self._dims) def get_affine(self): nspatial = len(self._spatial_dims) @@ -127,7 +127,7 @@ def _get_valid_range(self): except AttributeError: valid_range = [info.min, info.max] if valid_range[0] < info.min or valid_range[1] > info.max: - raise ValueError('Valid range outside input ' 'data type range') + raise ValueError('Valid range outside input data type range') return np.asarray(valid_range, dtype=np.float64) def _get_scalar(self, var): @@ -170,7 +170,7 @@ def _normalize(self, data, sliceobj=()): mx_dims = self._get_dimensions(image_max) mn_dims = self._get_dimensions(image_min) if mx_dims != mn_dims: - raise MincError('"image-max" and "image-min" do not have the same' 'dimensions') + raise MincError('"image-max" and "image-min" do not have the same dimensions') nscales = len(mx_dims) if nscales > 2: raise MincError('More than two scaling dimensions') diff --git a/nibabel/minc2.py b/nibabel/minc2.py index 9638ced5ee..1fffae0c86 100644 --- a/nibabel/minc2.py +++ b/nibabel/minc2.py @@ -97,7 +97,7 @@ def _get_valid_range(self): valid_range = [info.min, info.max] else: if valid_range[0] < info.min or valid_range[1] > info.max: - raise ValueError('Valid range outside input ' 'data type range') + raise ValueError('Valid range outside input data type range') return np.asarray(valid_range, dtype=np.float64) def _get_scalar(self, var): diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index 376dcb5b5a..961e93ecbb 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -99,8 +99,8 @@ def read(csa_str): csa_dict['n_tags'], csa_dict['check'] = up_str.unpack('2I') if not 0 < csa_dict['n_tags'] <= MAX_CSA_ITEMS: raise CSAReadError( - 'Number of tags `t` should be ' - '0 < t <= %d. Instead found %d tags.' % (MAX_CSA_ITEMS, csa_dict['n_tags']) + f'Number of tags `t` should be 0 < t <= {MAX_CSA_ITEMS}. ' + f'Instead found {csa_dict["n_tags"]} tags.' ) for tag_no in range(csa_dict['n_tags']): name, vm, vr, syngodt, n_items, last3 = up_str.unpack('64si4s3i') @@ -138,7 +138,7 @@ def read(csa_str): else: # CSA2 item_len = x1 if (ptr + item_len) > csa_len: - raise CSAReadError('Item is too long, ' 'aborting read') + raise CSAReadError('Item is too long, aborting read') if item_no >= n_values: assert item_len == 0 continue diff --git a/nibabel/nicom/dicomreaders.py b/nibabel/nicom/dicomreaders.py index a3c49d7f10..113af967cc 100644 --- a/nibabel/nicom/dicomreaders.py +++ b/nibabel/nicom/dicomreaders.py @@ -170,9 +170,9 @@ def _third_pass(wrappers): '- slices are probably unsortable' ) if None in inos: - raise DicomReadError(msg_fmt % 'some or all slices with ' 'missing InstanceNumber') + raise DicomReadError(msg_fmt % 'some or all slices with missing InstanceNumber') if len(set(inos)) < len(inos): - raise DicomReadError(msg_fmt % 'some or all slices with ' 'the same InstanceNumber') + raise DicomReadError(msg_fmt % 'some or all slices with the same InstanceNumber') # sort by instance number wrappers.sort(key=_instance_sorter) # start loop, in which we start a new volume, each time we see a z diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index be070e8608..9290d6c376 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -84,9 +84,8 @@ def wrapper_from_data(dcm_data): csa = csar.get_csa_header(dcm_data) except csar.CSAReadError as e: warnings.warn( - 'Error while attempting to read CSA header: ' - + str(e.args) - + '\n Ignoring Siemens private (CSA) header info.' + f'Error while attempting to read CSA header: {e.args}\n' + 'Ignoring Siemens private (CSA) header info.' ) csa = None if csa is None: @@ -193,7 +192,7 @@ def rotation_matrix(self): # motivated in ``doc/source/notebooks/ata_error.ipynb``, and from # discussion at https://github.com/nipy/nibabel/pull/156 if not np.allclose(np.eye(3), np.dot(R, R.T), atol=5e-5): - raise WrapperPrecisionError('Rotation matrix not nearly ' 'orthogonal') + raise WrapperPrecisionError('Rotation matrix not nearly orthogonal') return R @one_time @@ -537,7 +536,7 @@ def image_shape(self): stack_ids = set(frame.FrameContentSequence[0].StackID for frame in self.frames) if len(stack_ids) > 1: raise WrapperError( - 'File contains more than one StackID. ' 'Cannot handle multi-stack files' + 'File contains more than one StackID. Cannot handle multi-stack files' ) # Determine if one of the dimension indices refers to the stack id dim_seq = [dim.DimensionIndexPointer for dim in self.get('DimensionIndexSequence')] @@ -551,9 +550,7 @@ def image_shape(self): # derived volume is included derived_tag = pydicom.datadict.tag_for_keyword('DiffusionBValue') if derived_tag not in dim_seq: - raise WrapperError( - 'Missing information, cannot remove indices ' 'with confidence.' - ) + raise WrapperError('Missing information, cannot remove indices with confidence.') derived_dim_idx = dim_seq.index(derived_tag) frame_indices = np.delete(frame_indices, derived_dim_idx, axis=1) # account for the 2 additional dimensions (row and column) not included @@ -568,7 +565,7 @@ def image_shape(self): shape = (rows, cols) + tuple(ns_unique) n_vols = np.prod(shape[3:]) if n_frames != n_vols * shape[2]: - raise WrapperError('Calculated shape does not match number of ' 'frames.') + raise WrapperError('Calculated shape does not match number of frames.') return tuple(shape) @one_time @@ -582,7 +579,7 @@ def image_orient_patient(self): try: iop = self.frames[0].PlaneOrientationSequence[0].ImageOrientationPatient except AttributeError: - raise WrapperError('Not enough information for ' 'image_orient_patient') + raise WrapperError('Not enough information for image_orient_patient') if iop is None: return None iop = np.array(list(map(float, iop))) @@ -833,9 +830,7 @@ def __init__(self, dcm_data, csa_header=None, n_mosaic=None): pass if n_mosaic is None or n_mosaic == 0: raise WrapperError( - 'No valid mosaic number in CSA ' - 'header; is this really ' - 'Siemens mosiac data?' + 'No valid mosaic number in CSA header; is this really Siemens mosiac data?' ) self.n_mosaic = n_mosaic self.mosaic_size = int(np.ceil(np.sqrt(n_mosaic))) @@ -848,8 +843,7 @@ def image_shape(self): cols = self.get('Columns') if None in (rows, cols): return None - mosaic_size = self.mosaic_size - return (int(rows / mosaic_size), int(cols / mosaic_size), self.n_mosaic) + return (rows // self.mosaic_size, cols // self.mosaic_size, self.n_mosaic) @one_time def image_position(self): diff --git a/nibabel/nicom/tests/test_dicomreaders.py b/nibabel/nicom/tests/test_dicomreaders.py index b7a60dfc3b..1e749aced1 100644 --- a/nibabel/nicom/tests/test_dicomreaders.py +++ b/nibabel/nicom/tests/test_dicomreaders.py @@ -54,7 +54,7 @@ def test_passing_kwds(): def test_slices_to_series(): - dicom_files = (pjoin(IO_DATA_PATH, '%d.dcm' % i) for i in range(2)) + dicom_files = (pjoin(IO_DATA_PATH, f'{i}.dcm') for i in range(2)) wrappers = [didr.wrapper_from_file(f) for f in dicom_files] series = didr.slices_to_series(wrappers) assert len(series) == 1 diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 0d28298313..392bf5c2ad 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -815,7 +815,7 @@ def get_data_shape(self): vec_len = int(self._structarr['glmin']) if vec_len == 0: raise HeaderDataError( - '-1 in dim[1] but 0 in glmin; ' 'inconsistent freesurfer type header?' + '-1 in dim[1] but 0 in glmin; inconsistent freesurfer type header?' ) return (vec_len, 1, 1) + shape[3:] # Apply freesurfer hack for ico7 surface @@ -1095,7 +1095,7 @@ def set_qform(self, affine, code=None, strip_shears=True): P, S, Qs = npl.svd(R) PR = np.dot(P, Qs) if not strip_shears and not np.allclose(PR, R): - raise HeaderDataError('Shears in affine and `strip_shears` is ' 'False') + raise HeaderDataError('Shears in affine and `strip_shears` is False') # Convert to quaternion quat = mat2quat(PR) # Set into header @@ -1498,7 +1498,7 @@ def get_slice_duration(self): """ _, _, slice_dim = self.get_dim_info() if slice_dim is None: - raise HeaderDataError('Slice dimension must be set ' 'for duration to be valid') + raise HeaderDataError('Slice dimension must be set for duration to be valid') return float(self._structarr['slice_duration']) def set_slice_duration(self, duration): @@ -1515,20 +1515,20 @@ def set_slice_duration(self, duration): """ _, _, slice_dim = self.get_dim_info() if slice_dim is None: - raise HeaderDataError('Slice dimension must be set ' 'for duration to be valid') + raise HeaderDataError('Slice dimension must be set for duration to be valid') self._structarr['slice_duration'] = duration def get_n_slices(self): """Return the number of slices""" _, _, slice_dim = self.get_dim_info() if slice_dim is None: - raise HeaderDataError('Slice dimension not set in header ' 'dim_info') + raise HeaderDataError('Slice dimension not set in header dim_info') shape = self.get_data_shape() try: slice_len = shape[slice_dim] except IndexError: raise HeaderDataError( - f'Slice dimension index ({slice_dim}) ' f'outside shape tuple ({shape})' + f'Slice dimension index ({slice_dim}) outside shape tuple ({shape})' ) return slice_len @@ -1561,7 +1561,7 @@ def get_slice_times(self): duration = self.get_slice_duration() slabel = self.get_value_label('slice_code') if slabel == 'unknown': - raise HeaderDataError('Cannot get slice times when ' 'Slice code is "unknown"') + raise HeaderDataError('Cannot get slice times when slice code is "unknown"') slice_start, slice_end = (int(hdr['slice_start']), int(hdr['slice_end'])) if slice_start < 0: raise HeaderDataError('slice_start should be >= 0') @@ -1602,7 +1602,7 @@ def set_slice_times(self, slice_times): hdr = self._structarr slice_len = self.get_n_slices() if slice_len != len(slice_times): - raise HeaderDataError('Number of slice times does not ' 'match number of slices') + raise HeaderDataError('Number of slice times does not match number of slices') # Extract Nones at beginning and end. Check for others for ind, time in enumerate(slice_times): if time is not None: @@ -1617,12 +1617,12 @@ def set_slice_times(self, slice_times): timed = slice_times[slice_start : slice_end + 1] for time in timed: if time is None: - raise HeaderDataError('Cannot have None in middle ' 'of slice time vector') + raise HeaderDataError('Cannot have None in middle of slice time vector') # Find slice duration, check times are compatible with single # duration tdiffs = np.diff(np.sort(timed)) if not np.allclose(np.diff(tdiffs), 0): - raise HeaderDataError('Slice times not compatible with ' 'single slice duration') + raise HeaderDataError('Slice times not compatible with single slice duration') duration = np.mean(tdiffs) # To slice time order st_order = np.round(np.array(timed) / duration) @@ -1752,7 +1752,7 @@ def _chk_offset(hdr, fix=False): return hdr, rep if magic == hdr.single_magic and offset < hdr.single_vox_offset: rep.problem_level = 40 - rep.problem_msg = 'vox offset %d too low for ' 'single file nifti1' % offset + rep.problem_msg = 'vox offset %d too low for single file nifti1' % offset if fix: hdr['vox_offset'] = hdr.single_vox_offset rep.fix_msg = f'setting to minimum value of {hdr.single_vox_offset}' diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index 193e458c6b..01a918e445 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -211,7 +211,7 @@ def _chk_eol_check(hdr, fix=False): return hdr, rep rep.problem_level = 40 rep.problem_msg = ( - 'EOL check not 0 or 13, 10, 26, 10; data may be ' 'corrupted by EOL conversion' + 'EOL check not 0 or 13, 10, 26, 10; data may be corrupted by EOL conversion' ) if fix: hdr['eol_check'] = (13, 10, 26, 10) diff --git a/nibabel/optpkg.py b/nibabel/optpkg.py index c91ad0f1e8..d1eb9d17d5 100644 --- a/nibabel/optpkg.py +++ b/nibabel/optpkg.py @@ -105,7 +105,7 @@ def optional_package(name, trip_msg=None, min_version=None): trip_msg = f'These functions need {name} version >= {min_version}' if trip_msg is None: trip_msg = ( - f'We need package {name} for these functions, ' f'but ``import {name}`` raised {exc}' + f'We need package {name} for these functions, but ``import {name}`` raised {exc}' ) pkg = TripWire(trip_msg) diff --git a/nibabel/orientations.py b/nibabel/orientations.py index 0adf19ca78..f9e1ea028c 100644 --- a/nibabel/orientations.py +++ b/nibabel/orientations.py @@ -154,10 +154,10 @@ def apply_orientation(arr, ornt): ornt = np.asarray(ornt) n = ornt.shape[0] if t_arr.ndim < n: - raise OrientationError('Data array has fewer dimensions than ' 'orientation') + raise OrientationError('Data array has fewer dimensions than orientation') # no coordinates can be dropped for applying the orientations if np.any(np.isnan(ornt[:, 0])): - raise OrientationError('Cannot drop coordinates when ' 'applying orientation to data') + raise OrientationError('Cannot drop coordinates when applying orientation to data') # apply ornt transformations for ax, flip in enumerate(ornt[:, 1]): if flip == -1: @@ -225,7 +225,11 @@ def inv_ornt_aff(ornt, shape): return np.dot(undo_flip, undo_reorder) -@deprecate_with_version('flip_axis is deprecated. ' 'Please use numpy.flip instead.', '3.2', '5.0') +@deprecate_with_version( + 'flip_axis is deprecated. Please use numpy.flip instead.', + '3.2', + '5.0', +) def flip_axis(arr, axis=0): """Flip contents of `axis` in array `arr` diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 04184117dc..81e956f2b8 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -141,16 +141,35 @@ # PSL to RAS affine PSL_TO_RAS = np.array( - [[0, 0, -1, 0], [-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]] # L -> R # P -> A # S -> S + [ + [0, 0, -1, 0], # L -> R + [-1, 0, 0, 0], # P -> A + [0, 1, 0, 0], # S -> S + [0, 0, 0, 1], + ] ) # Acquisition (tra/sag/cor) to PSL axes # These come from looking at transverse, sagittal, coronal datasets where we # can see the LR, PA, SI orientation of the slice axes from the scanned object ACQ_TO_PSL = dict( - transverse=np.array([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]), # P # S # L + transverse=np.array( + [ + [0, 1, 0, 0], # P + [0, 0, 1, 0], # S + [1, 0, 0, 0], # L + [0, 0, 0, 1], + ] + ), sagittal=np.diag([1, -1, -1, 1]), - coronal=np.array([[0, 0, 1, 0], [0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]]), # P # S # L + coronal=np.array( + [ + [0, 0, 1, 0], # P + [0, -1, 0, 0], # S + [1, 0, 0, 0], # L + [0, 0, 0, 1], + ] + ), ) DEG2RAD = np.pi / 180.0 @@ -212,121 +231,55 @@ image_def_dtds = {} image_def_dtds['V4'] = [ ('slice number', int), - ( - 'echo number', - int, - ), - ( - 'dynamic scan number', - int, - ), - ( - 'cardiac phase number', - int, - ), - ( - 'image_type_mr', - int, - ), - ( - 'scanning sequence', - int, - ), - ( - 'index in REC file', - int, - ), - ( - 'image pixel size', - int, - ), - ( - 'scan percentage', - int, - ), - ('recon resolution', int, (2,)), + ('echo number', int), + ('dynamic scan number', int), + ('cardiac phase number', int), + ('image_type_mr', int), + ('scanning sequence', int), + ('index in REC file', int), + ('image pixel size', int), + ('scan percentage', int), + ('recon resolution', int, (2)), ('rescale intercept', float), ('rescale slope', float), ('scale slope', float), # Window center, width recorded as integer but can be float - ( - 'window center', - float, - ), - ( - 'window width', - float, - ), + ('window center', float), + ('window width', float), ('image angulation', float, (3,)), ('image offcentre', float, (3,)), ('slice thickness', float), ('slice gap', float), - ( - 'image_display_orientation', - int, - ), - ( - 'slice orientation', - int, - ), - ( - 'fmri_status_indication', - int, - ), - ( - 'image_type_ed_es', - int, - ), + ('image_display_orientation', int), + ('slice orientation', int), + ('fmri_status_indication', int), + ('image_type_ed_es', int), ('pixel spacing', float, (2,)), ('echo_time', float), ('dyn_scan_begin_time', float), ('trigger_time', float), ('diffusion_b_factor', float), - ( - 'number of averages', - int, - ), + ('number of averages', int), ('image_flip_angle', float), - ( - 'cardiac frequency', - int, - ), - ( - 'minimum RR-interval', - int, - ), - ( - 'maximum RR-interval', - int, - ), - ( - 'TURBO factor', - int, - ), + ('cardiac frequency', int), + ('minimum RR-interval', int), + ('maximum RR-interval', int), + ('TURBO factor', int), ('Inversion delay', float), ] # Extra image def fields for 4.1 compared to 4 image_def_dtds['V4.1'] = image_def_dtds['V4'] + [ - ( - 'diffusion b value number', - int, - ), # (imagekey!) - ( - 'gradient orientation number', - int, - ), # (imagekey!) - ('contrast type', 'S30'), # XXX might be too short? - ('diffusion anisotropy type', 'S30'), # XXX might be too short? + ('diffusion b value number', int), # (imagekey!) + ('gradient orientation number', int), # (imagekey!) + ('contrast type', 'S30'), # XXX might be too short? + ('diffusion anisotropy type', 'S30'), # XXX might be too short? ('diffusion', float, (3,)), ] # Extra image def fields for 4.2 compared to 4.1 image_def_dtds['V4.2'] = image_def_dtds['V4.1'] + [ - ( - 'label type', - int, - ), # (imagekey!) + ('label type', int), # (imagekey!) ] #: PAR header versions we claim to understand @@ -337,7 +290,12 @@ #: slice orientation codes slice_orientation_codes = Recoder( - ((1, 'transverse'), (2, 'sagittal'), (3, 'coronal')), fields=('code', 'label') # code, label + ( # code, label + (1, 'transverse'), + (2, 'sagittal'), + (3, 'coronal'), + ), + fields=('code', 'label'), ) @@ -804,7 +762,7 @@ def from_header(klass, header=None): raise PARRECError('Cannot create PARRECHeader from air.') if type(header) == klass: return header.copy() - raise PARRECError('Cannot create PARREC header from ' 'non-PARREC header.') + raise PARRECError('Cannot create PARREC header from non-PARREC header.') @classmethod def from_fileobj(klass, fileobj, permit_truncated=False, strict_sort=False): @@ -830,9 +788,7 @@ def as_analyze_map(self): f"{self.general_info['patient_name']};" f"{self.general_info['exam_date'].replace(' ', '')};" f"{self.general_info['protocol_name']}" - )[ - :80 - ] # max len + )[:80] is_fmri = self.general_info['max_dynamics'] > 1 # PAR/REC uses msec, but in _calc_zooms we convert to sec t = 'sec' if is_fmri else 'unknown' @@ -930,7 +886,7 @@ def _get_unique_image_prop(self, name): props = self.image_defs[name] if np.any(np.diff(props, axis=0)): raise PARRECError( - f'Varying {name} in image sequence ' f'({props}). This is not supported.' + f'Varying {name} in image sequence ({props}). This is not supported.' ) return props[0] diff --git a/nibabel/processing.py b/nibabel/processing.py index 669b416fb6..e3c9ae8214 100644 --- a/nibabel/processing.py +++ b/nibabel/processing.py @@ -114,7 +114,12 @@ def adapt_affine(affine, n_dim): def resample_from_to( - from_img, to_vox_map, order=3, mode='constant', cval=0.0, out_class=Nifti1Image + from_img, + to_vox_map, + order=3, + mode='constant', + cval=0.0, + out_class=Nifti1Image, ): """Resample image `from_img` to mapped voxel space `to_vox_map` @@ -155,7 +160,7 @@ def resample_from_to( # This check requires `shape` attribute of image if not spatial_axes_first(from_img): raise ValueError( - 'Cannot predict position of spatial axes for Image ' 'type ' + str(type(from_img)) + f'Cannot predict position of spatial axes for Image type {type(from_img)}' ) try: to_shape, to_affine = to_vox_map.shape, to_vox_map.affine @@ -177,7 +182,12 @@ def resample_from_to( def resample_to_output( - in_img, voxel_sizes=None, order=3, mode='constant', cval=0.0, out_class=Nifti1Image + in_img, + voxel_sizes=None, + order=3, + mode='constant', + cval=0.0, + out_class=Nifti1Image, ): """Resample image `in_img` to output voxel axes (world space) @@ -235,7 +245,13 @@ def resample_to_output( return resample_from_to(in_img, out_vox_map, order, mode, cval, out_class) -def smooth_image(img, fwhm, mode='nearest', cval=0.0, out_class=Nifti1Image): +def smooth_image( + img, + fwhm, + mode='nearest', + cval=0.0, + out_class=Nifti1Image, +): """Smooth image `img` along voxel axes by FWHM `fwhm` millimeters Parameters @@ -275,9 +291,7 @@ def smooth_image(img, fwhm, mode='nearest', cval=0.0, out_class=Nifti1Image): """ # This check requires `shape` attribute of image if not spatial_axes_first(img): - raise ValueError( - 'Cannot predict position of spatial axes for Image ' 'type ' + str(type(img)) - ) + raise ValueError(f'Cannot predict position of spatial axes for Image type {type(img)}') if out_class is None: out_class = img.__class__ n_dim = len(img.shape) diff --git a/nibabel/rstutils.py b/nibabel/rstutils.py index a63894cef8..cb40633e54 100644 --- a/nibabel/rstutils.py +++ b/nibabel/rstutils.py @@ -7,7 +7,12 @@ def rst_table( - cell_values, row_names=None, col_names=None, title='', val_fmt='{0:5.2f}', format_chars=None + cell_values, + row_names=None, + col_names=None, + title='', + val_fmt='{0:5.2f}', + format_chars=None, ): """Return string for ReST table with entries `cell_values` @@ -87,14 +92,23 @@ def rst_table( row_val_fmt = '{0:<' + str(col_len) + '}' table_strs = [] if title != '': - table_strs += [title_heading * len(title), title, title_heading * len(title), ''] + table_strs += [ + title_heading * len(title), + title, + title_heading * len(title), + '', + ] along_headings = [along * len(h) for h in col_headings] crossed_line = cross_starter + cross_joiner.join(along_headings) + cross_ender thick_long_headings = [thick_long * len(h) for h in col_headings] crossed_thick_line = ( cross_thick_starter + cross_thick_joiner.join(thick_long_headings) + cross_thick_ender ) - table_strs += [crossed_line, down_starter + col_header + down_ender, crossed_thick_line] + table_strs += [ + crossed_line, + down_starter + col_header + down_ender, + crossed_thick_line, + ] for row_no, row_name in enumerate(row_names): row_vals = [row_val_fmt.format(row_str) for row_str in row_str_list[row_no]] row_line = down_starter + down_joiner.join([row_name] + row_vals) + down_ender diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index 1adf63fe42..c582ee149b 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -311,8 +311,7 @@ def __init__(self, img): if not spatial_axes_first(img): raise ValueError( - 'Cannot predict position of spatial axes for ' - 'Image type ' + img.__class__.__name__ + 'Cannot predict position of spatial axes for image type {img.__class__.__name__}' ) self.img = img @@ -356,7 +355,7 @@ def check_slicing(self, slicer, return_spatial=False): raise IndexError('New axis not permitted in spatial dimensions') elif isinstance(subslicer, int): raise IndexError( - 'Scalar indices disallowed in spatial dimensions; ' 'Use `[x]` or `x:x+1`.' + 'Scalar indices disallowed in spatial dimensions; Use `[x]` or `x:x+1`.' ) return spatial_slices if return_spatial else slicer @@ -495,16 +494,14 @@ def _affine2header(self): def __str__(self): shape = self.shape affine = self.affine - return '\n'.join( - ( - str(self.__class__), - f'data shape {shape}', - 'affine: ', - str(affine), - 'metadata:', - str(self._header), - ) - ) + return f""" +{self.__class__} +data shape {shape} +affine: +{affine} +metadata: +{self._header} +""" def get_data_dtype(self): return self._header.get_data_dtype() diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index cad77c4d09..7a2f176318 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -88,7 +88,7 @@ def set_slope_inter(self, slope, inter=None): self._structarr['scl_slope'] = slope if inter in (None, 0) or np.isnan(inter): return - raise HeaderTypeError('Cannot set non-zero intercept ' 'for SPM headers') + raise HeaderTypeError('Cannot set non-zero intercept for SPM headers') class Spm99AnalyzeHeader(SpmAnalyzeHeader): @@ -282,7 +282,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): if 'mat' in mats: # this overrides a 'M', and includes any flip mat = mats['mat'] if mat.ndim > 2: - warnings.warn('More than one affine in "mat" matrix, ' 'using first') + warnings.warn('More than one affine in "mat" matrix, using first') mat = mat[:, :, 0] ret._affine = mat elif 'M' in mats: # the 'M' matrix does not include flips diff --git a/nibabel/streamlines/__init__.py b/nibabel/streamlines/__init__.py index 604c32b1e5..f99f80e4e4 100644 --- a/nibabel/streamlines/__init__.py +++ b/nibabel/streamlines/__init__.py @@ -11,7 +11,10 @@ from .trk import TrkFile # List of all supported formats -FORMATS = {'.trk': TrkFile, '.tck': TckFile} +FORMATS = { + '.trk': TrkFile, + '.tck': TckFile, +} def is_supported(fileobj): diff --git a/nibabel/streamlines/array_sequence.py b/nibabel/streamlines/array_sequence.py index f9e9af90e3..faa5d2390d 100644 --- a/nibabel/streamlines/array_sequence.py +++ b/nibabel/streamlines/array_sequence.py @@ -527,7 +527,7 @@ def _op(self, op, value=None, inplace=False): def __iter__(self): if len(self._lengths) != len(self._offsets): raise ValueError( - 'ArraySequence object corrupted:' ' len(self._lengths) != len(self._offsets)' + 'ArraySequence object corrupted: len(self._lengths) != len(self._offsets)' ) for offset, lengths in zip(self._offsets, self._lengths): diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index e08afb48ea..7738a0e069 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -360,14 +360,14 @@ def _read_header(cls, fileobj): # Check integrity of TCK header. if 'datatype' not in hdr: - msg = "Missing 'datatype' attribute in TCK header." ' Assuming it is Float32LE.' + msg = "Missing 'datatype' attribute in TCK header. Assuming it is Float32LE." warnings.warn(msg, HeaderWarning) hdr['datatype'] = 'Float32LE' if not hdr['datatype'].startswith('Float32'): msg = ( - "TCK only supports float32 dtype but 'datatype: " - f"{hdr['datatype']}' was specified in the header." + f"TCK only supports float32 dtype but 'datatype: {hdr['datatype']}' " + 'was specified in the header.' ) raise HeaderError(msg) diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index bbf156ee08..b32e12d8b3 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -508,15 +508,7 @@ def save(self, fileobj): points = np.asarray(t.streamline) scalars = [np.asarray(t.data_for_points[k]) for k in data_for_points_keys] - scalars = np.concatenate( - [ - np.ndarray( - (len(points), 0), - ) - ] - + scalars, - axis=1, - ) + scalars = np.concatenate([np.ndarray((len(points), 0))] + scalars, axis=1) properties = [ np.asarray(t.data_for_streamline[k]) for k in data_for_streamline_keys ] @@ -543,7 +535,7 @@ def save(self, fileobj): raise DataError(msg) if nb_properties_per_streamline != int(nb_properties_per_streamline): - msg = 'Nb. of properties differs from one streamline to' ' another!' + msg = 'Nb. of properties differs from one streamline to another!' raise DataError(msg) header[Field.NB_STREAMLINES] = nb_streamlines @@ -599,14 +591,14 @@ def _read_header(fileobj): header_rec[Field.VOXEL_TO_RASMM] = np.zeros((4, 4)) elif header_rec['version'] == 3: warnings.warn( - 'Parsing a TRK v3 file as v2. Some features may not ' 'be handled correctly.', + 'Parsing a TRK v3 file as v2. Some features may not be handled correctly.', HeaderWarning, ) elif header_rec['version'] in (2, 3): pass # Nothing more to do. else: raise HeaderError( - 'NiBabel only supports versions 1 and 2 of ' 'the Trackvis file format' + 'NiBabel only supports versions 1 and 2 of the Trackvis file format' ) # Convert the first record of `header_rec` into a dictionary @@ -617,11 +609,8 @@ def _read_header(fileobj): if header[Field.VOXEL_TO_RASMM][3][3] == 0: header[Field.VOXEL_TO_RASMM] = np.eye(4, dtype=np.float32) warnings.warn( - ( - "Field 'vox_to_ras' in the TRK's header was" - " not recorded. Will continue assuming it's" - ' the identity.' - ), + "Field 'vox_to_ras' in the TRK's header was not recorded. " + "Will continue assuming it's the identity.", HeaderWarning, ) diff --git a/nibabel/tests/test_affines.py b/nibabel/tests/test_affines.py index 08ae5f4bda..28f405e566 100644 --- a/nibabel/tests/test_affines.py +++ b/nibabel/tests/test_affines.py @@ -109,10 +109,23 @@ def test_append_diag(): # Routine for appending diagonal elements assert_array_equal(append_diag(np.diag([2, 3, 1]), [1]), np.diag([2, 3, 1, 1])) assert_array_equal(append_diag(np.diag([2, 3, 1]), [1, 1]), np.diag([2, 3, 1, 1, 1])) - aff = np.array([[2, 0, 0], [0, 3, 0], [0, 0, 1], [0, 0, 1]]) + aff = np.array( + [ + [2, 0, 0], + [0, 3, 0], + [0, 0, 1], + [0, 0, 1], + ] + ) assert_array_equal( append_diag(aff, [5], [9]), - [[2, 0, 0, 0], [0, 3, 0, 0], [0, 0, 0, 1], [0, 0, 5, 9], [0, 0, 0, 1]], + [ + [2, 0, 0, 0], + [0, 3, 0, 0], + [0, 0, 0, 1], + [0, 0, 5, 9], + [0, 0, 0, 1], + ], ) assert_array_equal( append_diag(aff, [5, 6], [9, 10]), @@ -125,10 +138,21 @@ def test_append_diag(): [0, 0, 0, 0, 1], ], ) - aff = np.array([[2, 0, 0, 0], [0, 3, 0, 0], [0, 0, 0, 1]]) + aff = np.array( + [ + [2, 0, 0, 0], + [0, 3, 0, 0], + [0, 0, 0, 1], + ] + ) assert_array_equal( append_diag(aff, [5], [9]), - [[2, 0, 0, 0, 0], [0, 3, 0, 0, 0], [0, 0, 0, 5, 9], [0, 0, 0, 0, 1]], + [ + [2, 0, 0, 0, 0], + [0, 3, 0, 0, 0], + [0, 0, 0, 5, 9], + [0, 0, 0, 0, 1], + ], ) # Length of starts has to match length of steps with pytest.raises(AffineError): @@ -150,15 +174,8 @@ def test_dot_reduce(): assert_array_equal(dot_reduce(vec, mat), np.dot(vec, mat)) assert_array_equal(dot_reduce(mat, vec), np.dot(mat, vec)) mat2 = np.arange(13, 22).reshape((3, 3)) - assert_array_equal(dot_reduce(mat2, vec, mat), np.dot(mat2, np.dot(vec, mat))) - assert_array_equal( - dot_reduce( - mat, - vec, - mat2, - ), - np.dot(mat, np.dot(vec, mat2)), - ) + assert_array_equal(dot_reduce(mat2, vec, mat), mat2 @ (vec @ mat)) + assert_array_equal(dot_reduce(mat, vec, mat2), mat @ (vec @ mat2)) def test_voxel_sizes(): diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 1f80addc30..5287bad4a9 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -238,7 +238,7 @@ def test_logger_error(self): imageglobals.logger = logger hdr.copy().check_fix() assert str_io.getvalue() == ( - 'bitpix does not match datatype; ' 'setting bitpix to match datatype\n' + 'bitpix does not match datatype; setting bitpix to match datatype\n' ) # Check that error_level in fact causes error to be raised imageglobals.error_level = 10 diff --git a/nibabel/tests/test_brikhead.py b/nibabel/tests/test_brikhead.py index b2c1f1257c..5bf6e79cb9 100644 --- a/nibabel/tests/test_brikhead.py +++ b/nibabel/tests/test_brikhead.py @@ -24,7 +24,12 @@ shape=(33, 41, 25, 3), dtype=np.int16, affine=np.array( - [[-3.0, 0, 0, 49.5], [0, -3.0, 0, 82.312], [0, 0, 3.0, -52.3511], [0, 0, 0, 1.0]] + [ + [-3.0, 0, 0, 49.5], + [0, -3.0, 0, 82.312], + [0, 0, 3.0, -52.3511], + [0, 0, 0, 1.0], + ] ), zooms=(3.0, 3.0, 3.0, 3.0), data_summary=dict(min=0, max=13722, mean=4266.76024636), @@ -39,7 +44,12 @@ shape=(47, 54, 43, 1.0), dtype=np.int16, affine=np.array( - [[3.0, 0, 0, -66.0], [0, 3.0, 0, -87.0], [0, 0, 3.0, -54.0], [0, 0, 0, 1.0]] + [ + [3.0, 0, 0, -66.0], + [0, 3.0, 0, -87.0], + [0, 0, 3.0, -54.0], + [0, 0, 0, 1.0], + ] ), zooms=(3.0, 3.0, 3.0, 0.0), data_summary=dict( diff --git a/nibabel/tests/test_euler.py b/nibabel/tests/test_euler.py index 8b0fb932d5..b0c965c399 100644 --- a/nibabel/tests/test_euler.py +++ b/nibabel/tests/test_euler.py @@ -32,19 +32,37 @@ def x_only(x): cosx = np.cos(x) sinx = np.sin(x) - return np.array([[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]]) + return np.array( + [ + [1, 0, 0], + [0, cosx, -sinx], + [0, sinx, cosx], + ] + ) def y_only(y): cosy = np.cos(y) siny = np.sin(y) - return np.array([[cosy, 0, siny], [0, 1, 0], [-siny, 0, cosy]]) + return np.array( + [ + [cosy, 0, siny], + [0, 1, 0], + [-siny, 0, cosy], + ] + ) def z_only(z): cosz = np.cos(z) sinz = np.sin(z) - return np.array([[cosz, -sinz, 0], [sinz, cosz, 0], [0, 0, 1]]) + return np.array( + [ + [cosz, -sinz, 0], + [sinz, cosz, 0], + [0, 0, 1], + ] + ) def sympy_euler(z, y, x): diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index 781f17d716..9f42e67c0d 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -120,14 +120,7 @@ def test_canonical_slicers(): canonical_slicers((1, 10), shape, True) # Unless check_inds is False assert canonical_slicers((10,), shape, False) == (10, slice(None)) - assert canonical_slicers( - ( - 1, - 10, - ), - shape, - False, - ) == (1, 10) + assert canonical_slicers((1, 10), shape, False) == (1, 10) # Check negative -> positive assert canonical_slicers(-1, shape) == (9, slice(None)) assert canonical_slicers((slice(None), -1), shape) == (slice(None), 9) @@ -487,15 +480,10 @@ def test_optimize_read_slicers(): (slice(None), slice(None)), ) # optimizing - assert optimize_read_slicers( - ( - slice(None), - slice(0, 5, 2), - ), - (10, 6), - 4, - _always, - ) == ((slice(None), slice(0, 5, 1)), (slice(None), slice(None, None, 2))) + assert optimize_read_slicers((slice(None), slice(0, 5, 2)), (10, 6), 4, _always) == ( + (slice(None), slice(0, 5, 1)), + (slice(None), slice(None, None, 2)), + ) # Optimize does nothing for integer when last assert optimize_read_slicers((slice(None), 1), (10, 6), 4, _always) == ( (slice(None), 1), @@ -623,14 +611,7 @@ def test_predict_shape(): def test_strided_scalar(): # Utility to make numpy array of given shape from scalar using striding for shape, scalar in product( - ( - (2,), - ( - 2, - 3, - ), - (2, 3, 4), - ), + ((2,), (2, 3), (2, 3, 4)), (1, 2, np.int16(3)), ): expected = np.zeros(shape, dtype=np.array(scalar).dtype) + scalar diff --git a/nibabel/tests/test_floating.py b/nibabel/tests/test_floating.py index 321eb1b961..a06c180b84 100644 --- a/nibabel/tests/test_floating.py +++ b/nibabel/tests/test_floating.py @@ -46,17 +46,14 @@ def test_type_info(): for dtt in np.sctypes['int'] + np.sctypes['uint']: info = np.iinfo(dtt) infod = type_info(dtt) - assert ( - dict( - min=info.min, - max=info.max, - nexp=None, - nmant=None, - minexp=None, - maxexp=None, - width=np.dtype(dtt).itemsize, - ) - == infod + assert infod == dict( + min=info.min, + max=info.max, + nexp=None, + nmant=None, + minexp=None, + maxexp=None, + width=np.dtype(dtt).itemsize, ) assert infod['min'].dtype.type == dtt assert infod['max'].dtype.type == dtt @@ -74,11 +71,11 @@ def test_type_info(): # https://developer.apple.com/library/mac/#documentation/Darwin/Reference/Manpages/man3/float.3.html if vals in ( (52, 11, 8), # longdouble is same as double - (63, 15, 12), + (63, 15, 12), # intel 80 bit (63, 15, 16), # intel 80 bit (112, 15, 16), # real float128 - (106, 11, 16), - ): # PPC head, tail doubles, expected values + (106, 11, 16), # PPC head, tail doubles, expected values + ): pass elif vals == (105, 11, 16): # bust info for PPC head / tail longdoubles # min and max broken, copy from infod diff --git a/nibabel/tests/test_image_types.py b/nibabel/tests/test_image_types.py index f8186f4147..9fd48ee697 100644 --- a/nibabel/tests/test_image_types.py +++ b/nibabel/tests/test_image_types.py @@ -104,7 +104,7 @@ def check_img(img_path, img_klass, sniff_mode, sniff, expect_success, msg): # Reuse the sniff... but it will only change for some # sniff_mode values. - msg = f'{expected_img_klass.__name__}/ {sniff_mode}/ ' f'{expect_success}' + msg = f'{expected_img_klass.__name__}/ {sniff_mode}/ {expect_success}' sniff = check_img( img_path, klass, diff --git a/nibabel/tests/test_minc1.py b/nibabel/tests/test_minc1.py index 3eeefaa84b..be4f0deb07 100644 --- a/nibabel/tests/test_minc1.py +++ b/nibabel/tests/test_minc1.py @@ -40,7 +40,14 @@ fname=pjoin(data_path, 'tiny.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1], + ] + ), zooms=(2.0, 2.0, 2.0), # These values from SPM2 data_summary=dict(min=0.20784314, max=0.74901961, mean=0.60602819), @@ -50,7 +57,14 @@ fname=pjoin(data_path, 'minc1_1_scale.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1], + ] + ), zooms=(2.0, 2.0, 2.0), # These values from mincstats data_summary=dict(min=0.2082842439, max=0.2094327615, mean=0.2091292083), @@ -60,7 +74,14 @@ fname=pjoin(data_path, 'minc1_4d.mnc'), shape=(2, 10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1], + ] + ), zooms=(1.0, 2.0, 2.0, 2.0), # These values from mincstats data_summary=dict(min=0.2078431373, max=1.498039216, mean=0.9090422837), @@ -70,7 +91,14 @@ fname=pjoin(data_path, 'minc1-no-att.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 1.0, 0], [0, 1.0, 0, 0], [1.0, 0, 0, 0], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 1.0, 0], + [0, 1.0, 0, 0], + [1.0, 0, 0, 0], + [0, 0, 0, 1], + ] + ), zooms=(1.0, 1.0, 1.0), # These values from SPM2/mincstats data_summary=dict(min=0.20784314, max=0.74901961, mean=0.6061103), diff --git a/nibabel/tests/test_minc2.py b/nibabel/tests/test_minc2.py index bd06456c33..251393818a 100644 --- a/nibabel/tests/test_minc2.py +++ b/nibabel/tests/test_minc2.py @@ -26,7 +26,14 @@ fname=pjoin(data_path, 'small.mnc'), shape=(18, 28, 29), dtype=np.int16, - affine=np.array([[0, 0, 7.0, -98], [0, 8.0, 0, -134], [9.0, 0, 0, -72], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 7.0, -98], + [0, 8.0, 0, -134], + [9.0, 0, 0, -72], + [0, 0, 0, 1], + ] + ), zooms=(9.0, 8.0, 7.0), # These values from mincstats data_summary=dict(min=0.1185331417, max=92.87690699, mean=31.2127952), @@ -36,7 +43,14 @@ fname=pjoin(data_path, 'minc2_1_scale.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1], + ] + ), zooms=(2.0, 2.0, 2.0), # These values from mincstats data_summary=dict(min=0.2082842439, max=0.2094327615, mean=0.2091292083), @@ -46,7 +60,14 @@ fname=pjoin(data_path, 'minc2_4d.mnc'), shape=(2, 10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 2.0, -20], [0, 2.0, 0, -20], [2.0, 0, 0, -10], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 2.0, -20], + [0, 2.0, 0, -20], + [2.0, 0, 0, -10], + [0, 0, 0, 1], + ] + ), zooms=(1.0, 2.0, 2.0, 2.0), # These values from mincstats data_summary=dict(min=0.2078431373, max=1.498039216, mean=0.9090422837), @@ -56,7 +77,14 @@ fname=pjoin(data_path, 'minc2-no-att.mnc'), shape=(10, 20, 20), dtype=np.uint8, - affine=np.array([[0, 0, 1.0, 0], [0, 1.0, 0, 0], [1.0, 0, 0, 0], [0, 0, 0, 1]]), + affine=np.array( + [ + [0, 0, 1.0, 0], + [0, 1.0, 0, 0], + [1.0, 0, 0, 0], + [0, 0, 0, 1], + ] + ), zooms=(1.0, 1.0, 1.0), # These values from SPM2/mincstats data_summary=dict(min=0.20784314, max=0.74901961, mean=0.6061103), diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index 5219cb27ac..f993e342e4 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -328,9 +328,7 @@ def test_iter(): blue ridged mountains of virginia -""".split( - '\n' - ) +""".splitlines() with InTemporaryDirectory(): sobj = BytesIO() files_to_test = [ diff --git a/nibabel/tests/test_orientations.py b/nibabel/tests/test_orientations.py index 16f7f5ce46..8821fac0e0 100644 --- a/nibabel/tests/test_orientations.py +++ b/nibabel/tests/test_orientations.py @@ -30,21 +30,61 @@ IN_ARRS = [ np.eye(4), - [[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]], - [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]], - [[3, 1, 0, 0], [1, 3, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], - [[1, 3, 0, 0], [3, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], + [ + [0, 0, 1, 0], + [0, 1, 0, 0], + [1, 0, 0, 0], + [0, 0, 0, 1], + ], + [ + [0, 1, 0, 0], + [0, 0, 1, 0], + [1, 0, 0, 0], + [0, 0, 0, 1], + ], + [ + [3, 1, 0, 0], + [1, 3, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ], + [ + [1, 3, 0, 0], + [3, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ], ] OUT_ORNTS = [ - [[0, 1], [1, 1], [2, 1]], - [[2, 1], [1, 1], [0, 1]], - [[2, 1], [0, 1], [1, 1]], - [[0, 1], [1, 1], [2, 1]], - [[1, 1], [0, 1], [2, 1]], + [ + [0, 1], + [1, 1], + [2, 1], + ], + [ + [2, 1], + [1, 1], + [0, 1], + ], + [ + [2, 1], + [0, 1], + [1, 1], + ], + [ + [0, 1], + [1, 1], + [2, 1], + ], + [ + [1, 1], + [0, 1], + [2, 1], + ], ] -IN_ARRS = IN_ARRS + [ +IN_ARRS.extend( [ [np.cos(np.pi / 6 + i * np.pi / 2), np.sin(np.pi / 6 + i * np.pi / 2), 0, 0], [-np.sin(np.pi / 6 + i * np.pi / 2), np.cos(np.pi / 6 + i * np.pi / 2), 0, 0], @@ -52,13 +92,29 @@ [0, 0, 0, 1], ] for i in range(4) -] +) -OUT_ORNTS = OUT_ORNTS + [ - [[0, 1], [1, 1], [2, 1]], - [[1, -1], [0, 1], [2, 1]], - [[0, -1], [1, -1], [2, 1]], - [[1, 1], [0, -1], [2, 1]], +OUT_ORNTS += [ + [ + [0, 1], + [1, 1], + [2, 1], + ], + [ + [1, -1], + [0, 1], + [2, 1], + ], + [ + [0, -1], + [1, -1], + [2, 1], + ], + [ + [1, 1], + [0, -1], + [2, 1], + ], ] @@ -159,12 +215,39 @@ def test_io_orientation(): ornt = io_orientation(arr) assert_array_equal( ornt, - [[0, 1], [1, 1], [2, 1], [3, 1], [np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], + [ + [0, 1], + [1, 1], + [2, 1], + [3, 1], + [np.nan, np.nan], + [np.nan, np.nan], + [np.nan, np.nan], + ], ) # Test behavior of thresholding - def_aff = np.array([[1.0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) - fail_tol = np.array([[0, 1], [np.nan, np.nan], [2, 1]]) - pass_tol = np.array([[0, 1], [1, 1], [2, 1]]) + def_aff = np.array( + [ + [1.0, 1, 0, 0], + [0, 0, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ] + ) + fail_tol = np.array( + [ + [0, 1], + [np.nan, np.nan], + [2, 1], + ] + ) + pass_tol = np.array( + [ + [0, 1], + [1, 1], + [2, 1], + ] + ) eps = np.finfo(float).eps # Test that a Y axis appears as we increase the difference between the # first two columns @@ -190,22 +273,40 @@ def test_io_orientation(): aff_extra_col[:3, -1] = vec assert_array_equal( io_orientation(aff_extra_col, tol=1e-5), - [[0, 1], [np.nan, np.nan], [2, 1], [np.nan, np.nan]], + [ + [0, 1], + [np.nan, np.nan], + [2, 1], + [np.nan, np.nan], + ], ) aff_extra_row = np.zeros((5, 4)) aff_extra_row[-1, -1] = 1 # Not strictly necessary, but for completeness aff_extra_row[:3, :3] = mat aff_extra_row[:3, -1] = vec - assert_array_equal(io_orientation(aff_extra_row, tol=1e-5), [[0, 1], [np.nan, np.nan], [2, 1]]) + assert_array_equal( + io_orientation(aff_extra_row, tol=1e-5), + [ + [0, 1], + [np.nan, np.nan], + [2, 1], + ], + ) def test_ornt_transform(): assert_array_equal( - ornt_transform([[0, 1], [1, 1], [2, -1]], [[1, 1], [0, 1], [2, 1]]), + ornt_transform( + [[0, 1], [1, 1], [2, -1]], + [[1, 1], [0, 1], [2, 1]], + ), [[1, 1], [0, 1], [2, -1]], ) assert_array_equal( - ornt_transform([[0, 1], [1, 1], [2, 1]], [[2, 1], [0, -1], [1, 1]]), + ornt_transform( + [[0, 1], [1, 1], [2, 1]], + [[2, 1], [0, -1], [1, 1]], + ), [[1, -1], [2, 1], [0, 1]], ) # Must have same shape @@ -214,11 +315,17 @@ def test_ornt_transform(): # Must be (N,2) in shape with pytest.raises(ValueError): - ornt_transform([[0, 1, 1], [1, 1, 1]], [[0, 1, 1], [1, 1, 1]]) + ornt_transform( + [[0, 1, 1], [1, 1, 1]], + [[0, 1, 1], [1, 1, 1]], + ) # Target axes must exist in source with pytest.raises(ValueError): - ornt_transform([[0, 1], [1, 1], [1, 1]], [[0, 1], [1, 1], [2, 1]]) + ornt_transform( + [[0, 1], [1, 1], [1, 1]], + [[0, 1], [1, 1], [2, 1]], + ) def test_ornt2axcodes(): diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index e50b609da4..f1d81cf96c 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -310,35 +310,9 @@ def test_get_sorted_slice_indices(): hdr = PARRECHeader(HDR_INFO, HDR_DEFS[::-1]) assert_array_equal( hdr.get_sorted_slice_indices(), - [ - 8, - 7, - 6, - 5, - 4, - 3, - 2, - 1, - 0, - 17, - 16, - 15, - 14, - 13, - 12, - 11, - 10, - 9, - 26, - 25, - 24, - 23, - 22, - 21, - 20, - 19, - 18, - ], + [8, 7, 6, 5, 4, 3, 2, 1, 0] + + [17, 16, 15, 14, 13, 12, 11, 10, 9] + + [26, 25, 24, 23, 22, 21, 20, 19, 18], ) # Omit last slice, only two volumes with clear_and_catch_warnings(modules=[parrec], record=True): @@ -378,12 +352,12 @@ def test_sorting_multiple_echos_and_contrasts(): # This .PAR file has 3 echos and 4 image types (real, imaginary, magnitude, # phase). # After sorting should be: - # Type 0, Echo 1, Slices 1-30 - # Type 0, Echo 2, Slices 1-30 - # Type 0, Echo 3, Slices 1-30 - # Type 1, Echo 1, Slices 1-30 - # ... - # Type 3, Echo 3, Slices 1-30 + # Type 0, Echo 1, Slices 1-30 + # Type 0, Echo 2, Slices 1-30 + # Type 0, Echo 3, Slices 1-30 + # Type 1, Echo 1, Slices 1-30 + # ... + # Type 3, Echo 3, Slices 1-30 t1_par = pjoin(DATA_PATH, 'T1_3echo_mag_real_imag_phase.PAR') with open(t1_par, 'rt') as fobj: t1_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) diff --git a/nibabel/tests/test_processing.py b/nibabel/tests/test_processing.py index dc877d3802..ffd1fbff2b 100644 --- a/nibabel/tests/test_processing.py +++ b/nibabel/tests/test_processing.py @@ -81,15 +81,37 @@ def test_adapt_affine(): # For 4x4 affine, 4D image, add extra identity dimension assert_array_equal( adapt_affine(aff_3d, 4), - [[0, 1, 2, 0, 11], [3, 4, 5, 0, 12], [6, 7, 8, 0, 13], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]], + [ + [0, 1, 2, 0, 11], + [3, 4, 5, 0, 12], + [6, 7, 8, 0, 13], + [0, 0, 0, 1, 0], + [0, 0, 0, 0, 1], + ], ) # For 5x5 affine, 4D image, identity aff_4d = from_matvec(np.arange(16).reshape((4, 4)), [11, 12, 13, 14]) assert_array_equal(adapt_affine(aff_4d, 4), aff_4d) # For 4x4 affine, 2D image, dropped column - assert_array_equal(adapt_affine(aff_3d, 2), [[0, 1, 11], [3, 4, 12], [6, 7, 13], [0, 0, 1]]) + assert_array_equal( + adapt_affine(aff_3d, 2), + [ + [0, 1, 11], + [3, 4, 12], + [6, 7, 13], + [0, 0, 1], + ], + ) # For 4x4 affine, 1D image, 2 dropped columns - assert_array_equal(adapt_affine(aff_3d, 1), [[0, 11], [3, 12], [6, 13], [0, 1]]) + assert_array_equal( + adapt_affine(aff_3d, 1), + [ + [0, 11], + [3, 12], + [6, 13], + [0, 1], + ], + ) # For 3x3 affine, 2D image, identity aff_2d = from_matvec(np.arange(4).reshape((2, 2)), [11, 12]) assert_array_equal(adapt_affine(aff_2d, 2), aff_2d) @@ -267,7 +289,12 @@ def test_resample_to_output(caplog): exp_shape = (4, 4, 4) assert out_img.shape == exp_shape exp_aff = np.array( - [[1, 0, 0, -2 * np.cos(np.pi / 4)], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] + [ + [1, 0, 0, -2 * np.cos(np.pi / 4)], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ] ) assert_almost_equal(out_img.affine, exp_aff) rzs, trans = to_matvec(np.dot(npl.inv(rot_3), exp_aff)) diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index dfac167690..1bdd6c26e8 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -70,7 +70,13 @@ def _some_slicers(shape): slicers[i, i] = 0 # Add a newaxis to keep us on our toes no_pos = ndim // 2 - slicers = np.hstack((slicers[:, :no_pos], np.empty((ndim, 1)), slicers[:, no_pos:])) + slicers = np.hstack( + ( + slicers[:, :no_pos], + np.empty((ndim, 1)), + slicers[:, no_pos:], + ) + ) slicers[:, no_pos] = None return [tuple(s) for s in slicers] @@ -236,7 +242,11 @@ def obj_params(self): slopes = (1.0, 2.0, float(np.float32(3.1416))) if self.has_slope else (1.0,) inters = (0.0, 10.0, float(np.float32(2.7183))) if self.has_inter else (0.0,) for shape, dtype, offset, slope, inter in product( - self.shapes, self.data_dtypes, offsets, slopes, inters + self.shapes, + self.data_dtypes, + offsets, + slopes, + inters, ): n_els = np.prod(shape) dtype = np.dtype(dtype).newbyteorder(self.data_endian) diff --git a/nibabel/tests/test_spaces.py b/nibabel/tests/test_spaces.py index 83dec9256c..dbfe533890 100644 --- a/nibabel/tests/test_spaces.py +++ b/nibabel/tests/test_spaces.py @@ -67,7 +67,12 @@ def get_outspace_params(): # x diff, y diff now 3 cos pi / 4 == 2.12, ceil to 3, add 1 # most negative x now 2 cos pi / 4 (4, 4, 4), - [[1, 0, 0, -2 * np.cos(np.pi / 4)], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], + [ + [1, 0, 0, -2 * np.cos(np.pi / 4)], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ], ), # Less than 3 axes ((2, 3), np.eye(4), None, (2, 3), np.eye(4)), @@ -120,7 +125,14 @@ def test_slice2volume(): assert (slice2volume(val, axis) == exp_aff).all() -@pytest.mark.parametrize('index, axis', [[-1, 0], [0, -1], [0, 3]]) +@pytest.mark.parametrize( + 'index, axis', + [ + [-1, 0], + [0, -1], + [0, 3], + ], +) def test_slice2volume_exception(index, axis): with pytest.raises(ValueError): slice2volume(index, axis) diff --git a/nibabel/tests/test_spatialimages.py b/nibabel/tests/test_spatialimages.py index 2a1da21bdd..27305739aa 100644 --- a/nibabel/tests/test_spatialimages.py +++ b/nibabel/tests/test_spatialimages.py @@ -152,11 +152,23 @@ def test_data_dtype(): def test_affine(): hdr = SpatialHeader(np.float64, shape=(1, 2, 3), zooms=(3.0, 2.0, 1.0)) assert_array_almost_equal( - hdr.get_best_affine(), [[-3.0, 0, 0, 0], [0, 2, 0, -1], [0, 0, 1, -1], [0, 0, 0, 1]] + hdr.get_best_affine(), + [ + [-3.0, 0, 0, 0], + [0, 2, 0, -1], + [0, 0, 1, -1], + [0, 0, 0, 1], + ], ) hdr.default_x_flip = False assert_array_almost_equal( - hdr.get_best_affine(), [[3.0, 0, 0, 0], [0, 2, 0, -1], [0, 0, 1, -1], [0, 0, 0, 1]] + hdr.get_best_affine(), + [ + [3.0, 0, 0, 0], + [0, 2, 0, -1], + [0, 0, 1, -1], + [0, 0, 0, 1], + ], ) assert np.array_equal(hdr.get_base_affine(), hdr.get_best_affine()) diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index d8821d308b..b01195ff5f 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -1023,17 +1023,32 @@ def test_shape_zoom_affine(): zooms = (3, 2, 1) res = shape_zoom_affine(shape, zooms) exp = np.array( - [[-3.0, 0.0, 0.0, 3.0], [0.0, 2.0, 0.0, -4.0], [0.0, 0.0, 1.0, -3.0], [0.0, 0.0, 0.0, 1.0]] + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -3.0], + [0.0, 0.0, 0.0, 1.0], + ] ) assert_array_almost_equal(res, exp) res = shape_zoom_affine((3, 5), (3, 2)) exp = np.array( - [[-3.0, 0.0, 0.0, 3.0], [0.0, 2.0, 0.0, -4.0], [0.0, 0.0, 1.0, -0.0], [0.0, 0.0, 0.0, 1.0]] + [ + [-3.0, 0.0, 0.0, 3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -0.0], + [0.0, 0.0, 0.0, 1.0], + ] ) assert_array_almost_equal(res, exp) res = shape_zoom_affine(shape, zooms, False) exp = np.array( - [[3.0, 0.0, 0.0, -3.0], [0.0, 2.0, 0.0, -4.0], [0.0, 0.0, 1.0, -3.0], [0.0, 0.0, 0.0, 1.0]] + [ + [3.0, 0.0, 0.0, -3.0], + [0.0, 2.0, 0.0, -4.0], + [0.0, 0.0, 1.0, -3.0], + [0.0, 0.0, 0.0, 1.0], + ] ) assert_array_almost_equal(res, exp) @@ -1139,7 +1154,12 @@ def assert_rt( # check defense against modifying data in-place for in_cast, pre_clips, inter, slope, post_clips, nan_fill in itp( - (None, np.float32), (None, (-1, 25)), (0.0, 1.0), (1.0, 0.5), (None, (-2, 49)), (None, 1) + (None, np.float32), + (None, (-1, 25)), + (0.0, 1.0), + (1.0, 0.5), + (None, (-2, 49)), + (None, 1), ): data = np.arange(24).astype(np.float32) assert_rt( @@ -1185,8 +1205,8 @@ def read(self, n_bytes): except OSError as err: message = str(err) assert message == ( - 'Expected 11390625000000000000 bytes, got 0 ' - 'bytes from object\n - could the file be damaged?' + 'Expected 11390625000000000000 bytes, got 0 bytes from object\n' + ' - could the file be damaged?' ) diff --git a/nibabel/viewers.py b/nibabel/viewers.py index d1c13dfeee..bb9f612a7d 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -319,7 +319,7 @@ def link_to(self, other): """ if not isinstance(other, self.__class__): raise TypeError( - 'other must be an instance of ' f'{self.__class__.__name__}, not {type(other)}' + f'other must be an instance of {self.__class__.__name__}, not {type(other)}' ) self._link(other, is_primary=True) From 6b0ddd23b1da1df7ca9ae275673f82bfa20a754c Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 30 Dec 2022 11:01:19 -0500 Subject: [PATCH 09/12] STY: Run vanilla blue Add fmt off/on guards for tabular comments [git-blame-ignore-rev] --- nibabel/cifti2/cifti2.py | 2 +- nibabel/freesurfer/mghformat.py | 2 ++ nibabel/nicom/csareader.py | 10 +++++----- nibabel/nifti1.py | 4 +++- nibabel/nifti2.py | 2 ++ nibabel/parrec.py | 4 +++- 6 files changed, 16 insertions(+), 8 deletions(-) diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 6c141b44f1..713907cf66 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -70,7 +70,7 @@ class Cifti2HeaderError(Exception): CIFTI_MODEL_TYPES = ( 'CIFTI_MODEL_TYPE_SURFACE', # Modeled using surface vertices - 'CIFTI_MODEL_TYPE_VOXELS', # Modeled using voxels. + 'CIFTI_MODEL_TYPE_VOXELS', # Modeled using voxels. ) CIFTI_SERIESUNIT_TYPES = ( diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index 1091bedbcb..b65c24f221 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -29,6 +29,7 @@ # See https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/MghFormat DATA_OFFSET = 284 # Note that mgh data is strictly big endian ( hence the > sign ) +# fmt: off header_dtd = [ ('version', '>i4'), # 0; must be 1 ('dims', '>i4', (4,)), # 4; width, height, depth, nframes @@ -47,6 +48,7 @@ ('ti', '>f4'), # 12; inversion time ('fov', '>f4'), # 16; field of view (unused) ] +# fmt: on header_dtype = np.dtype(header_dtd) footer_dtype = np.dtype(footer_dtd) diff --git a/nibabel/nicom/csareader.py b/nibabel/nicom/csareader.py index 961e93ecbb..40f3f852d9 100644 --- a/nibabel/nicom/csareader.py +++ b/nibabel/nicom/csareader.py @@ -10,11 +10,11 @@ 'FL': float, # float 'FD': float, # double 'DS': float, # decimal string - 'SS': int, # signed short - 'US': int, # unsigned short - 'SL': int, # signed long - 'UL': int, # unsigned long - 'IS': int, # integer string + 'SS': int, # signed short + 'US': int, # unsigned short + 'SL': int, # signed long + 'UL': int, # unsigned long + 'IS': int, # integer string } MAX_CSA_ITEMS = 1000 diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index 392bf5c2ad..a5079d3d89 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -32,6 +32,7 @@ # nifti1 flat header definition for Analyze-like first 348 bytes # first number in comments indicates offset in file header in bytes +# fmt: off header_dtd = [ ('sizeof_hdr', 'i4'), # 0; must be 348 ('data_type', 'S10'), # 4; unused @@ -75,8 +76,9 @@ ('srow_y', 'f4', (4,)), # 296; 2nd row affine transform ('srow_z', 'f4', (4,)), # 312; 3rd row affine transform ('intent_name', 'S16'), # 328; name or meaning of data - ('magic', 'S4'), # 344; must be 'ni1\0' or 'n+1\0' + ('magic', 'S4'), # 344; must be 'ni1\0' or 'n+1\0' ] +# fmt: on # Full header numpy dtype header_dtype = np.dtype(header_dtd) diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index 01a918e445..c0106ae29d 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -74,6 +74,7 @@ # nifti2 flat header definition for first 540 bytes # First number in comments indicates offset in file header in bytes +# fmt: off header_dtd = [ ('sizeof_hdr', 'i4'), # 0; must be 540 ('magic', 'S4'), # 4; must be 'ni2\0' or 'n+2\0' @@ -114,6 +115,7 @@ ('dim_info', 'u1'), # 524; MRI slice ordering code ('unused_str', 'S15'), # 525; unused, filled with \0 ] # total 540 +# fmt: on # Full header numpy dtype header_dtype = np.dtype(header_dtd) diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 81e956f2b8..1459f3460e 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -144,7 +144,7 @@ [ [0, 0, -1, 0], # L -> R [-1, 0, 0, 0], # P -> A - [0, 1, 0, 0], # S -> S + [0, 1, 0, 0], # S -> S [0, 0, 0, 1], ] ) @@ -269,6 +269,7 @@ ] # Extra image def fields for 4.1 compared to 4 +# fmt: off image_def_dtds['V4.1'] = image_def_dtds['V4'] + [ ('diffusion b value number', int), # (imagekey!) ('gradient orientation number', int), # (imagekey!) @@ -281,6 +282,7 @@ image_def_dtds['V4.2'] = image_def_dtds['V4.1'] + [ ('label type', int), # (imagekey!) ] +# fmt: on #: PAR header versions we claim to understand supported_versions = list(image_def_dtds.keys()) From 4481a4c2640bd4be6e9c468e550d01aae448ab99 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 22:32:46 -0500 Subject: [PATCH 10/12] STY: pyupgrade --py37-plus Mostly motivated by excessive use of arguments to super(). Also caught a number of `np.array(X).astype(Y)` to convert to `np.array(X, Y)`. [git-blame-ignore-rev] --- nibabel/analyze.py | 6 +- nibabel/arraywriters.py | 18 +- nibabel/brikhead.py | 12 +- nibabel/cifti2/cifti2.py | 2 +- nibabel/cifti2/parse_cifti2.py | 6 +- nibabel/cmdline/diff.py | 2 +- nibabel/cmdline/roi.py | 2 +- nibabel/cmdline/tests/test_utils.py | 162 ++++++++---------- nibabel/dataobj_images.py | 2 +- nibabel/deprecated.py | 2 +- nibabel/ecat.py | 4 +- nibabel/freesurfer/io.py | 10 +- nibabel/freesurfer/mghformat.py | 10 +- nibabel/gifti/gifti.py | 2 +- nibabel/gifti/parse_gifti_fast.py | 4 +- nibabel/gifti/tests/test_parse_gifti_fast.py | 2 +- nibabel/nicom/ascconv.py | 2 +- nibabel/nicom/dicomwrappers.py | 12 +- nibabel/nicom/tests/test_ascconv.py | 2 +- nibabel/nifti1.py | 20 +-- nibabel/nifti2.py | 4 +- nibabel/parrec.py | 4 +- nibabel/spatialimages.py | 2 +- nibabel/spm99analyze.py | 10 +- nibabel/streamlines/tck.py | 4 +- .../streamlines/tests/test_array_sequence.py | 4 +- nibabel/streamlines/tractogram.py | 6 +- nibabel/streamlines/tractogram_file.py | 2 +- nibabel/streamlines/trk.py | 10 +- nibabel/testing/__init__.py | 8 +- nibabel/tests/test_analyze.py | 6 +- nibabel/tests/test_arrayproxy.py | 2 +- nibabel/tests/test_casting.py | 4 +- nibabel/tests/test_data.py | 18 +- nibabel/tests/test_filebasedimages.py | 2 +- nibabel/tests/test_fileslice.py | 3 +- nibabel/tests/test_funcs.py | 4 +- nibabel/tests/test_image_api.py | 2 +- nibabel/tests/test_nifti1.py | 14 +- nibabel/tests/test_openers.py | 6 +- nibabel/tests/test_parrec.py | 38 ++-- nibabel/tests/test_recoder.py | 8 +- nibabel/tests/test_scripts.py | 4 +- nibabel/tests/test_spm99analyze.py | 2 +- nibabel/tests/test_volumeutils.py | 8 +- nibabel/tests/test_wrapstruct.py | 2 +- nibabel/tmpdirs.py | 4 +- nibabel/viewers.py | 2 +- nibabel/volumeutils.py | 8 +- nibabel/xmlutils.py | 2 +- 50 files changed, 218 insertions(+), 257 deletions(-) diff --git a/nibabel/analyze.py b/nibabel/analyze.py index e128239865..e165112259 100644 --- a/nibabel/analyze.py +++ b/nibabel/analyze.py @@ -248,7 +248,7 @@ def __init__(self, binaryblock=None, endianness=None, check=True): >>> hdr4.endianness == swapped_code True """ - super(AnalyzeHeader, self).__init__(binaryblock, endianness, check) + super().__init__(binaryblock, endianness, check) @classmethod def guessed_endian(klass, hdr): @@ -336,7 +336,7 @@ def guessed_endian(klass, hdr): @classmethod def default_structarr(klass, endianness=None): """Return header data for empty header with given endianness""" - hdr_data = super(AnalyzeHeader, klass).default_structarr(endianness) + hdr_data = super().default_structarr(endianness) hdr_data['sizeof_hdr'] = klass.sizeof_hdr hdr_data['dim'] = 1 hdr_data['dim'][0] = 0 @@ -904,7 +904,7 @@ class AnalyzeImage(SpatialImage): ImageArrayProxy = ArrayProxy def __init__(self, dataobj, affine, header=None, extra=None, file_map=None, dtype=None): - super(AnalyzeImage, self).__init__(dataobj, affine, header, extra, file_map) + super().__init__(dataobj, affine, header, extra, file_map) # Reset consumable values self._header.set_data_offset(0) self._header.set_slope_inter(None, None) diff --git a/nibabel/arraywriters.py b/nibabel/arraywriters.py index 21fd6ba6ee..5a0b04925e 100644 --- a/nibabel/arraywriters.py +++ b/nibabel/arraywriters.py @@ -313,7 +313,7 @@ def scaling_needed(self): data are within range of the output type, return False * Otherwise return True """ - if not super(SlopeArrayWriter, self).scaling_needed(): + if not super().scaling_needed(): return False mn, mx = self.finite_range() # this is cached # No finite data - no scaling needed @@ -428,7 +428,7 @@ def _range_scale(self, in_min, in_max): # not lose precision because min/max are of fp type. out_min, out_max = np.array((out_min, out_max), dtype=big_float) else: # (u)int - out_min, out_max = [int_to_float(v, big_float) for v in (out_min, out_max)] + out_min, out_max = (int_to_float(v, big_float) for v in (out_min, out_max)) if self._out_dtype.kind == 'u': if in_min < 0 and in_max > 0: raise WriterError( @@ -507,13 +507,11 @@ def __init__(self, array, out_dtype=None, calc_scale=True, scaler_dtype=np.float >>> (aw.slope, aw.inter) == (1.0, 128) True """ - super(SlopeInterArrayWriter, self).__init__( - array, out_dtype, calc_scale, scaler_dtype, **kwargs - ) + super().__init__(array, out_dtype, calc_scale, scaler_dtype, **kwargs) def reset(self): """Set object to values before any scaling calculation""" - super(SlopeInterArrayWriter, self).reset() + super().reset() self.inter = 0.0 def _get_inter(self): @@ -549,14 +547,14 @@ def to_fileobj(self, fileobj, order='F'): def _iu2iu(self): # (u)int to (u)int - mn, mx = [as_int(v) for v in self.finite_range()] + mn, mx = (as_int(v) for v in self.finite_range()) # range may be greater than the largest integer for this type. # as_int needed to work round numpy 1.4.1 int casting bug out_dtype = self._out_dtype # Options in this method are scaling using intercept only. These will # have to pass through ``self.scaler_dtype`` (because the intercept is # in this type). - o_min, o_max = [as_int(v) for v in shared_range(self.scaler_dtype, out_dtype)] + o_min, o_max = (as_int(v) for v in shared_range(self.scaler_dtype, out_dtype)) type_range = o_max - o_min mn2mx = mx - mn if mn2mx <= type_range: # might offset be enough? @@ -579,7 +577,7 @@ def _iu2iu(self): self.inter = inter return # Try slope options (sign flip) and then range scaling - super(SlopeInterArrayWriter, self)._iu2iu() + super()._iu2iu() def _range_scale(self, in_min, in_max): """Calculate scaling, intercept based on data range and output type""" @@ -604,7 +602,7 @@ def _range_scale(self, in_min, in_max): in_min, in_max = as_int(in_min), as_int(in_max) in_range = int_to_float(in_max - in_min, big_float) # Cast to float for later processing. - in_min, in_max = [int_to_float(v, big_float) for v in (in_min, in_max)] + in_min, in_max = (int_to_float(v, big_float) for v in (in_min, in_max)) if out_dtype.kind == 'f': # Type range, these are also floats info = type_info(out_dtype) diff --git a/nibabel/brikhead.py b/nibabel/brikhead.py index 72b09c4d75..470ed16664 100644 --- a/nibabel/brikhead.py +++ b/nibabel/brikhead.py @@ -195,7 +195,7 @@ def parse_AFNI_header(fobj): """ # edge case for being fed a filename instead of a file object if isinstance(fobj, str): - with open(fobj, 'rt') as src: + with open(fobj) as src: return parse_AFNI_header(src) # unpack variables in HEAD file head = fobj.read().split('\n\n') @@ -239,9 +239,7 @@ def __init__(self, file_like, header, *, mmap=True, keep_file_open=None): effect. The default value (``None``) will result in the value of ``nibabel.arrayproxy.KEEP_FILE_OPEN_DEFAULT`` being used. """ - super(AFNIArrayProxy, self).__init__( - file_like, header, mmap=mmap, keep_file_open=keep_file_open - ) + super().__init__(file_like, header, mmap=mmap, keep_file_open=keep_file_open) self._scaling = header.get_data_scaling() @property @@ -293,9 +291,7 @@ def __init__(self, info): """ self.info = info dt = _get_datatype(self.info) - super(AFNIHeader, self).__init__( - data_dtype=dt, shape=self._calc_data_shape(), zooms=self._calc_zooms() - ) + super().__init__(data_dtype=dt, shape=self._calc_data_shape(), zooms=self._calc_zooms()) @classmethod def from_header(klass, header=None): @@ -553,7 +549,7 @@ def filespec_to_file_map(klass, filespec): If `filespec` is not recognizable as being a filename for this image type. """ - file_map = super(AFNIImage, klass).filespec_to_file_map(filespec) + file_map = super().filespec_to_file_map(filespec) # check for AFNI-specific BRIK/HEAD compression idiosyncrasies for key, fholder in file_map.items(): fname = fholder.filename diff --git a/nibabel/cifti2/cifti2.py b/nibabel/cifti2/cifti2.py index 713907cf66..423dbfbf9d 100644 --- a/nibabel/cifti2/cifti2.py +++ b/nibabel/cifti2/cifti2.py @@ -1451,7 +1451,7 @@ def __init__( """ if not isinstance(header, Cifti2Header) and header: header = Cifti2Header.from_axes(header) - super(Cifti2Image, self).__init__(dataobj, header=header, extra=extra, file_map=file_map) + super().__init__(dataobj, header=header, extra=extra, file_map=file_map) self._nifti_header = LimitedNifti2Header.from_header(nifti_header) # if NIfTI header not specified, get data type from input array diff --git a/nibabel/cifti2/parse_cifti2.py b/nibabel/cifti2/parse_cifti2.py index e067144997..c7bfb953f9 100644 --- a/nibabel/cifti2/parse_cifti2.py +++ b/nibabel/cifti2/parse_cifti2.py @@ -91,7 +91,7 @@ def _valid_intent_code(klass, intent_code): @classmethod def may_contain_header(klass, binaryblock): - if not super(_Cifti2AsNiftiHeader, klass).may_contain_header(binaryblock): + if not super().may_contain_header(binaryblock): return False hdr = klass(binaryblock=binaryblock[: klass.sizeof_hdr]) return klass._valid_intent_code(hdr.get_intent('code')[0]) @@ -135,9 +135,7 @@ class Cifti2Parser(xml.XmlParser): """Class to parse an XML string into a CIFTI-2 header object""" def __init__(self, encoding=None, buffer_size=3500000, verbose=0): - super(Cifti2Parser, self).__init__( - encoding=encoding, buffer_size=buffer_size, verbose=verbose - ) + super().__init__(encoding=encoding, buffer_size=buffer_size, verbose=verbose) self.fsm_state = [] self.struct_state = [] diff --git a/nibabel/cmdline/diff.py b/nibabel/cmdline/diff.py index 799e17f645..b409c7205d 100755 --- a/nibabel/cmdline/diff.py +++ b/nibabel/cmdline/diff.py @@ -248,7 +248,7 @@ def get_data_diff(files, max_abs=0, max_rel=0, dtype=np.float64): # Since we operated on sub-selected values already, we need # to plug them back in candidates[ - tuple((indexes[sub_thr] for indexes in np.where(candidates))) + tuple(indexes[sub_thr] for indexes in np.where(candidates)) ] = False max_rel_diff = np.max(rel_diff) else: diff --git a/nibabel/cmdline/roi.py b/nibabel/cmdline/roi.py index 36f00a033a..ea47970043 100644 --- a/nibabel/cmdline/roi.py +++ b/nibabel/cmdline/roi.py @@ -22,7 +22,7 @@ def lossless_slice(img, slicers): def parse_slice(crop, allow_step=True): if crop is None: return slice(None) - start, stop, *extra = [int(val) if val else None for val in crop.split(':')] + start, stop, *extra = (int(val) if val else None for val in crop.split(':')) if len(extra) > 1: raise ValueError(f'Cannot parse specification: {crop}') if not allow_step and extra and extra[0] not in (1, None): diff --git a/nibabel/cmdline/tests/test_utils.py b/nibabel/cmdline/tests/test_utils.py index 6d2e6953fb..8143d648d9 100644 --- a/nibabel/cmdline/tests/test_utils.py +++ b/nibabel/cmdline/tests/test_utils.py @@ -58,16 +58,16 @@ def test_get_headers_diff(): fnames = [pjoin(data_path, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] actual_difference = get_headers_diff([nib.load(f).header for f in fnames]) expected_difference = { - 'regular': [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))], - 'dim_info': [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], + 'regular': [np.asarray(b''), np.asarray(b'r')], + 'dim_info': [np.asarray(0, 'uint8'), np.asarray(57, 'uint8')], 'dim': [ - np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), - np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), + np.array([3, 4, 5, 7, 1, 1, 1, 1], 'int16'), + np.array([4, 128, 96, 24, 2, 1, 1, 1], 'int16'), ], - 'datatype': [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')], - 'bitpix': [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')], + 'datatype': [np.array(2, 'uint8'), np.array(4, 'uint8')], + 'bitpix': [np.array(8, 'uint8'), np.array(16, 'uint8')], 'pixdim': [ - np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), + np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0], 'float32'), np.array( [ -1.00000000e00, @@ -78,64 +78,57 @@ def test_get_headers_diff(): 1.00000000e00, 1.00000000e00, 1.00000000e00, - ] - ).astype(dtype='float32'), + ], + 'float32', + ), ], - 'slice_end': [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')], - 'xyzt_units': [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], + 'slice_end': [np.array(0, 'uint8'), np.array(23, 'uint8')], + 'xyzt_units': [np.array(0, 'uint8'), np.array(10, 'uint8')], 'cal_max': [ - np.array(0.0).astype(dtype='float32'), - np.asarray(1162.0).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.asarray(1162.0, 'float32'), ], 'descrip': [ - np.array(''.encode('utf-8')).astype(dtype='S80'), - np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( - dtype='S80' - ), + np.array(b'', 'S80'), + np.array(b'FSL3.3\x00 v2.25 NIfTI-1 Single file format', 'S80'), ], - 'qform_code': [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')], - 'sform_code': [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')], + 'qform_code': [np.array(0, 'int16'), np.array(1, 'int16')], + 'sform_code': [np.array(2, 'int16'), np.array(1, 'int16')], 'quatern_b': [ - np.array(0.0).astype(dtype='float32'), - np.array(-1.9451068140294884e-26).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-1.9451068140294884e-26, 'float32'), ], 'quatern_c': [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.9967085123062134).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-0.9967085123062134, 'float32'), ], 'quatern_d': [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.0810687392950058).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-0.0810687392950058, 'float32'), ], 'qoffset_x': [ - np.array(0.0).astype(dtype='float32'), - np.array(117.8551025390625).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(117.8551025390625, 'float32'), ], 'qoffset_y': [ - np.array(0.0).astype(dtype='float32'), - np.array(-35.72294235229492).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-35.72294235229492, 'float32'), ], 'qoffset_z': [ - np.array(0.0).astype(dtype='float32'), - np.array(-7.248798370361328).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-7.248798370361328, 'float32'), ], 'srow_x': [ - np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), - np.array([-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02]).astype( - dtype='float32' - ), + np.array([1.0, 0.0, 0.0, 0.0], 'float32'), + np.array([-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02], 'float32'), ], 'srow_y': [ - np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), - np.array([-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01]).astype( - dtype='float32' - ), + np.array([0.0, 3.0, 0.0, 0.0], 'float32'), + np.array([-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01], 'float32'), ], 'srow_z': [ - np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), - np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00]).astype( - dtype='float32' - ), + np.array([0.0, 0.0, 2.0, 0.0], 'float32'), + np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00], 'float32'), ], } @@ -146,8 +139,8 @@ def test_display_diff(): bogus_names = ['hellokitty.nii.gz', 'privettovarish.nii.gz'] dict_values = { - 'datatype': [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')], - 'bitpix': [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')], + 'datatype': [np.array(2, 'uint8'), np.array(4, 'uint8')], + 'bitpix': [np.array(8, 'uint8'), np.array(16, 'uint8')], } expected_output = """\ @@ -220,16 +213,16 @@ def test_get_data_diff(): def test_main(): test_names = [pjoin(data_path, f) for f in ('standard.nii.gz', 'example4d.nii.gz')] expected_difference = { - 'regular': [np.asarray(''.encode('utf-8')), np.asarray('r'.encode('utf-8'))], - 'dim_info': [np.asarray(0).astype(dtype='uint8'), np.asarray(57).astype(dtype='uint8')], + 'regular': [np.asarray(b''), np.asarray(b'r')], + 'dim_info': [np.asarray(0, 'uint8'), np.asarray(57, 'uint8')], 'dim': [ - np.array([3, 4, 5, 7, 1, 1, 1, 1]).astype(dtype='int16'), - np.array([4, 128, 96, 24, 2, 1, 1, 1]).astype(dtype='int16'), + np.array([3, 4, 5, 7, 1, 1, 1, 1], 'int16'), + np.array([4, 128, 96, 24, 2, 1, 1, 1], 'int16'), ], - 'datatype': [np.array(2).astype(dtype='uint8'), np.array(4).astype(dtype='uint8')], - 'bitpix': [np.array(8).astype(dtype='uint8'), np.array(16).astype(dtype='uint8')], + 'datatype': [np.array(2, 'uint8'), np.array(4, 'uint8')], + 'bitpix': [np.array(8, 'uint8'), np.array(16, 'uint8')], 'pixdim': [ - np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0]).astype(dtype='float32'), + np.array([1.0, 1.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0], 'float32'), np.array( [ -1.00000000e00, @@ -240,64 +233,57 @@ def test_main(): 1.00000000e00, 1.00000000e00, 1.00000000e00, - ] - ).astype(dtype='float32'), + ], + 'float32', + ), ], - 'slice_end': [np.array(0).astype(dtype='uint8'), np.array(23).astype(dtype='uint8')], - 'xyzt_units': [np.array(0).astype(dtype='uint8'), np.array(10).astype(dtype='uint8')], + 'slice_end': [np.array(0, 'uint8'), np.array(23, 'uint8')], + 'xyzt_units': [np.array(0, 'uint8'), np.array(10, 'uint8')], 'cal_max': [ - np.array(0.0).astype(dtype='float32'), - np.asarray(1162.0).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.asarray(1162.0, 'float32'), ], 'descrip': [ - np.array(''.encode('utf-8')).astype(dtype='S80'), - np.array('FSL3.3\x00 v2.25 NIfTI-1 Single file format'.encode('utf-8')).astype( - dtype='S80' - ), + np.array(b'', 'S80'), + np.array(b'FSL3.3\x00 v2.25 NIfTI-1 Single file format', 'S80'), ], - 'qform_code': [np.array(0).astype(dtype='int16'), np.array(1).astype(dtype='int16')], - 'sform_code': [np.array(2).astype(dtype='int16'), np.array(1).astype(dtype='int16')], + 'qform_code': [np.array(0, 'int16'), np.array(1, 'int16')], + 'sform_code': [np.array(2, 'int16'), np.array(1, 'int16')], 'quatern_b': [ - np.array(0.0).astype(dtype='float32'), - np.array(-1.9451068140294884e-26).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-1.9451068140294884e-26, 'float32'), ], 'quatern_c': [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.9967085123062134).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-0.9967085123062134, 'float32'), ], 'quatern_d': [ - np.array(0.0).astype(dtype='float32'), - np.array(-0.0810687392950058).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-0.0810687392950058, 'float32'), ], 'qoffset_x': [ - np.array(0.0).astype(dtype='float32'), - np.array(117.8551025390625).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(117.8551025390625, 'float32'), ], 'qoffset_y': [ - np.array(0.0).astype(dtype='float32'), - np.array(-35.72294235229492).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-35.72294235229492, 'float32'), ], 'qoffset_z': [ - np.array(0.0).astype(dtype='float32'), - np.array(-7.248798370361328).astype(dtype='float32'), + np.array(0.0, 'float32'), + np.array(-7.248798370361328, 'float32'), ], 'srow_x': [ - np.array([1.0, 0.0, 0.0, 0.0]).astype(dtype='float32'), - np.array([-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02]).astype( - dtype='float32' - ), + np.array([1.0, 0.0, 0.0, 0.0], 'float32'), + np.array([-2.00000000e00, 6.71471565e-19, 9.08102451e-18, 1.17855103e02], 'float32'), ], 'srow_y': [ - np.array([0.0, 3.0, 0.0, 0.0]).astype(dtype='float32'), - np.array([-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01]).astype( - dtype='float32' - ), + np.array([0.0, 3.0, 0.0, 0.0], 'float32'), + np.array([-6.71471565e-19, 1.97371149e00, -3.55528235e-01, -3.57229424e01], 'float32'), ], 'srow_z': [ - np.array([0.0, 0.0, 2.0, 0.0]).astype(dtype='float32'), - np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00]).astype( - dtype='float32' - ), + np.array([0.0, 0.0, 2.0, 0.0], 'float32'), + np.array([8.25548089e-18, 3.23207617e-01, 2.17108178e00, -7.24879837e00], 'float32'), ], 'DATA(md5)': ['0a2576dd6badbb25bfb3b12076df986b', 'b0abbc492b4fd533b2c80d82570062cf'], } diff --git a/nibabel/dataobj_images.py b/nibabel/dataobj_images.py index 64ef906820..5c8de66674 100644 --- a/nibabel/dataobj_images.py +++ b/nibabel/dataobj_images.py @@ -38,7 +38,7 @@ def __init__(self, dataobj, header=None, extra=None, file_map=None): file_map : mapping, optional mapping giving file information for this image format """ - super(DataobjImage, self).__init__(header=header, extra=extra, file_map=file_map) + super().__init__(header=header, extra=extra, file_map=file_map) self._dataobj = dataobj self._fdata_cache = None self._data_cache = None diff --git a/nibabel/deprecated.py b/nibabel/deprecated.py index 900c0fcf4d..aa41675dbd 100644 --- a/nibabel/deprecated.py +++ b/nibabel/deprecated.py @@ -60,7 +60,7 @@ class FutureWarningMixin: def __init__(self, *args, **kwargs): warnings.warn(self.warn_message, FutureWarning, stacklevel=2) - super(FutureWarningMixin, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) class VisibleDeprecationWarning(UserWarning): diff --git a/nibabel/ecat.py b/nibabel/ecat.py index d151465933..5217bd1333 100644 --- a/nibabel/ecat.py +++ b/nibabel/ecat.py @@ -278,7 +278,7 @@ def __init__(self, binaryblock=None, endianness=None, check=True): Whether to check and fix header for errors. No checks currently implemented, so value has no effect. """ - super(EcatHeader, self).__init__(binaryblock, endianness, check) + super().__init__(binaryblock, endianness, check) @classmethod def guessed_endian(klass, hdr): @@ -291,7 +291,7 @@ def guessed_endian(klass, hdr): @classmethod def default_structarr(klass, endianness=None): """Return header data for empty header with given endianness""" - hdr_data = super(EcatHeader, klass).default_structarr(endianness) + hdr_data = super().default_structarr(endianness) hdr_data['magic_number'] = 'MATRIX72' hdr_data['sw_version'] = 74 hdr_data['num_frames'] = 0 diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index b6f003b984..6e8538c202 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -229,7 +229,7 @@ def write_geometry(filepath, coords, faces, create_stamp=None, volume_info=None) with open(filepath, 'wb') as fobj: magic_bytes.tofile(fobj) - fobj.write((f'{create_stamp}\n\n').encode('utf-8')) + fobj.write((f'{create_stamp}\n\n').encode()) np.array([coords.shape[0], faces.shape[0]], dtype='>i4').tofile(fobj) @@ -610,13 +610,11 @@ def _serialize_volume_info(volume_info): strings.append(np.array(volume_info[key], dtype='>i4').tobytes()) elif key in ('valid', 'filename'): val = volume_info[key] - strings.append(f'{key} = {val}\n'.encode('utf-8')) + strings.append(f'{key} = {val}\n'.encode()) elif key == 'volume': val = volume_info[key] - strings.append(f'{key} = {val[0]} {val[1]} {val[2]}\n'.encode('utf-8')) + strings.append(f'{key} = {val[0]} {val[1]} {val[2]}\n'.encode()) else: val = volume_info[key] - strings.append( - f'{key:6s} = {val[0]:.10g} {val[1]:.10g} {val[2]:.10g}\n'.encode('utf-8') - ) + strings.append(f'{key:6s} = {val[0]:.10g} {val[1]:.10g} {val[2]:.10g}\n'.encode()) return b''.join(strings) diff --git a/nibabel/freesurfer/mghformat.py b/nibabel/freesurfer/mghformat.py index b65c24f221..cb86b4400b 100644 --- a/nibabel/freesurfer/mghformat.py +++ b/nibabel/freesurfer/mghformat.py @@ -119,7 +119,7 @@ def __init__(self, binaryblock=None, check=True): # Footer is optional and may contain variable-length text fields, # so limit to fixed fields binaryblock = binaryblock[:full_size] + b'\x00' * (full_size - len(binaryblock)) - super(MGHHeader, self).__init__(binaryblock=binaryblock, endianness='big', check=False) + super().__init__(binaryblock=binaryblock, endianness='big', check=False) if not self._structarr['goodRASFlag']: self._set_affine_default() if check: @@ -367,7 +367,7 @@ def default_structarr(klass, endianness=None): """ if endianness is not None and endian_codes[endianness] != '>': raise ValueError('MGHHeader must always be big endian') - structarr = super(MGHHeader, klass).default_structarr(endianness=endianness) + structarr = super().default_structarr(endianness=endianness) structarr['version'] = 1 structarr['dims'] = 1 structarr['type'] = 3 @@ -477,9 +477,7 @@ def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): shape = dataobj.shape if len(shape) < 3: dataobj = reshape_dataobj(dataobj, shape + (1,) * (3 - len(shape))) - super(MGHImage, self).__init__( - dataobj, affine, header=header, extra=extra, file_map=file_map - ) + super().__init__(dataobj, affine, header=header, extra=extra, file_map=file_map) @classmethod def filespec_to_file_map(klass, filespec): @@ -487,7 +485,7 @@ def filespec_to_file_map(klass, filespec): """ Check for compressed .mgz format, then .mgh format """ if splitext(filespec)[1].lower() == '.mgz': return dict(image=FileHolder(filename=filespec)) - return super(MGHImage, klass).filespec_to_file_map(filespec) + return super().filespec_to_file_map(filespec) @classmethod def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): diff --git a/nibabel/gifti/gifti.py b/nibabel/gifti/gifti.py index dc205d8004..c80fbf2e22 100644 --- a/nibabel/gifti/gifti.py +++ b/nibabel/gifti/gifti.py @@ -589,7 +589,7 @@ def __init__( darrays=None, version='1.0', ): - super(GiftiImage, self).__init__(header=header, extra=extra, file_map=file_map) + super().__init__(header=header, extra=extra, file_map=file_map) if darrays is None: darrays = [] if meta is None: diff --git a/nibabel/gifti/parse_gifti_fast.py b/nibabel/gifti/parse_gifti_fast.py index 68dfb00af8..e4a9be4bd6 100644 --- a/nibabel/gifti/parse_gifti_fast.py +++ b/nibabel/gifti/parse_gifti_fast.py @@ -150,9 +150,7 @@ def _str2int(in_str): class GiftiImageParser(XmlParser): def __init__(self, encoding=None, buffer_size=35000000, verbose=0, mmap=True): - super(GiftiImageParser, self).__init__( - encoding=encoding, buffer_size=buffer_size, verbose=verbose - ) + super().__init__(encoding=encoding, buffer_size=buffer_size, verbose=verbose) # output self.img = None diff --git a/nibabel/gifti/tests/test_parse_gifti_fast.py b/nibabel/gifti/tests/test_parse_gifti_fast.py index c7a958a5f8..f08bdd1b17 100644 --- a/nibabel/gifti/tests/test_parse_gifti_fast.py +++ b/nibabel/gifti/tests/test_parse_gifti_fast.py @@ -368,7 +368,7 @@ def test_parse_dataarrays(): with InTemporaryDirectory(): save(img, fn) - with open(fn, 'r') as fp: + with open(fn) as fp: txt = fp.read() # Make a bad gifti. txt = txt.replace('NumberOfDataArrays="0"', 'NumberOfDataArrays ="1"') diff --git a/nibabel/nicom/ascconv.py b/nibabel/nicom/ascconv.py index d03845f900..be6da9786c 100644 --- a/nibabel/nicom/ascconv.py +++ b/nibabel/nicom/ascconv.py @@ -205,7 +205,7 @@ def parse_ascconv(ascconv_str, str_delim='"'): A line of the ASCCONV section could not be parsed. """ attrs, content = ASCCONV_RE.match(ascconv_str).groups() - attrs = OrderedDict((tuple(x.split('=')) for x in attrs.split())) + attrs = OrderedDict(tuple(x.split('=')) for x in attrs.split()) # Normalize string start / end markers to something Python understands content = content.replace(str_delim, '"""').replace('\\', '\\\\') # Use Python's own parser to parse modified ASCCONV assignments diff --git a/nibabel/nicom/dicomwrappers.py b/nibabel/nicom/dicomwrappers.py index 9290d6c376..7e6bea9009 100755 --- a/nibabel/nicom/dicomwrappers.py +++ b/nibabel/nicom/dicomwrappers.py @@ -533,7 +533,7 @@ def image_shape(self): [frame.FrameContentSequence[0].DimensionIndexValues for frame in self.frames] ) # Check that there is only one multiframe stack index - stack_ids = set(frame.FrameContentSequence[0].StackID for frame in self.frames) + stack_ids = {frame.FrameContentSequence[0].StackID for frame in self.frames} if len(stack_ids) > 1: raise WrapperError( 'File contains more than one StackID. Cannot handle multi-stack files' @@ -645,7 +645,7 @@ def get_data(self): def _scale_data(self, data): pix_trans = getattr(self.frames[0], 'PixelValueTransformationSequence', None) if pix_trans is None: - return super(MultiframeWrapper, self)._scale_data(data) + return super()._scale_data(data) scale = float(pix_trans[0].RescaleSlope) offset = float(pix_trans[0].RescaleIntercept) return self._apply_scale_offset(data, scale, offset) @@ -681,7 +681,7 @@ def __init__(self, dcm_data, csa_header=None): None, we try and read the CSA information from `dcm_data`. If this fails, we fall back to an empty dict. """ - super(SiemensWrapper, self).__init__(dcm_data) + super().__init__(dcm_data) if dcm_data is None: dcm_data = {} self.dcm_data = dcm_data @@ -695,7 +695,7 @@ def __init__(self, dcm_data, csa_header=None): def slice_normal(self): # The std_slice_normal comes from the cross product of the directions # in the ImageOrientationPatient - std_slice_normal = super(SiemensWrapper, self).slice_normal + std_slice_normal = super().slice_normal csa_slice_normal = csar.get_slice_normal(self.csa_header) if std_slice_normal is None and csa_slice_normal is None: return None @@ -718,7 +718,7 @@ def slice_normal(self): @one_time def series_signature(self): """Add ICE dims from CSA header to signature""" - signature = super(SiemensWrapper, self).series_signature + signature = super().series_signature ice = csar.get_ice_dims(self.csa_header) if ice is not None: ice = ice[:6] + ice[8:9] @@ -861,7 +861,7 @@ def image_position(self): img_pos : (3,) array position in mm of voxel (0,0,0) in Mosaic array """ - ipp = super(MosaicWrapper, self).image_position + ipp = super().image_position # mosaic image size md_rows, md_cols = (self.get('Rows'), self.get('Columns')) iop = self.image_orient_patient diff --git a/nibabel/nicom/tests/test_ascconv.py b/nibabel/nicom/tests/test_ascconv.py index 4737d3615d..cd27bc3192 100644 --- a/nibabel/nicom/tests/test_ascconv.py +++ b/nibabel/nicom/tests/test_ascconv.py @@ -15,7 +15,7 @@ def test_ascconv_parse(): - with open(ASCCONV_INPUT, 'rt') as fobj: + with open(ASCCONV_INPUT) as fobj: contents = fobj.read() ascconv_dict, attrs = ascconv.parse_ascconv(contents, str_delim='""') assert attrs == OrderedDict() diff --git a/nibabel/nifti1.py b/nibabel/nifti1.py index a5079d3d89..a10686145b 100644 --- a/nibabel/nifti1.py +++ b/nibabel/nifti1.py @@ -688,7 +688,7 @@ class Nifti1Header(SpmAnalyzeHeader): def __init__(self, binaryblock=None, endianness=None, check=True, extensions=()): """Initialize header from binary data block and extensions""" - super(Nifti1Header, self).__init__(binaryblock, endianness, check) + super().__init__(binaryblock, endianness, check) self.extensions = self.exts_klass(extensions) def copy(self): @@ -730,7 +730,7 @@ def write_to(self, fileobj): raise HeaderDataError( f'vox offset set to {vox_offset}, but need at least {min_vox_offset}' ) - super(Nifti1Header, self).write_to(fileobj) + super().write_to(fileobj) # Write extensions if len(self.extensions) == 0: # If single file, write required 0 stream to signal no extensions @@ -754,7 +754,7 @@ def get_best_affine(self): @classmethod def default_structarr(klass, endianness=None): """Create empty header binary block with given endianness""" - hdr_data = super(Nifti1Header, klass).default_structarr(endianness) + hdr_data = super().default_structarr(endianness) if klass.is_single: hdr_data['magic'] = klass.single_magic else: @@ -781,7 +781,7 @@ def from_header(klass, header=None, check=True): hdr : header instance fresh header instance of our own class """ - new_hdr = super(Nifti1Header, klass).from_header(header, check) + new_hdr = super().from_header(header, check) if isinstance(header, Nifti1Header): new_hdr.extensions[:] = header.extensions[:] return new_hdr @@ -811,7 +811,7 @@ def get_data_shape(self): Allows for freesurfer hack for 7th order icosahedron surface described in `issue 309`_, load_nifti.m_, and `save_nifti.m `_. """ - shape = super(Nifti1Header, self).get_data_shape() + shape = super().get_data_shape() # Apply freesurfer hack for large vectors if shape[:3] == (-1, 1, 1): vec_len = int(self._structarr['glmin']) @@ -903,7 +903,7 @@ def set_data_shape(self, shape): stacklevel=2, ) shape = (-1, 1, 1) + shape[3:] - super(Nifti1Header, self).set_data_shape(shape) + super().set_data_shape(shape) def set_data_dtype(self, datatype): """Set numpy dtype for data from code or dtype or type @@ -1838,7 +1838,7 @@ def __init__(self, dataobj, affine, header=None, extra=None, file_map=None, dtyp f'by passing the dtype argument to {self.__class__.__name__}().' ) warnings.warn(msg, FutureWarning, stacklevel=2) - super(Nifti1Pair, self).__init__(dataobj, affine, header, extra, file_map, dtype) + super().__init__(dataobj, affine, header, extra, file_map, dtype) # Force set of s/q form when header is None unless affine is also None if header is None and affine is not None: self._affine2header() @@ -1877,7 +1877,7 @@ def update_header(self): >>> np.all(hdr.get_sform() == affine) True """ - super(Nifti1Pair, self).update_header() + super().update_header() hdr = self._header hdr['magic'] = hdr.pair_magic @@ -2232,7 +2232,7 @@ def as_reoriented(self, ornt): the transpose that needs to be done to the implied array, as in ``arr.transpose(ornt[:,0])`` """ - img = super(Nifti1Pair, self).as_reoriented(ornt) + img = super().as_reoriented(ornt) if img is self: return img @@ -2266,7 +2266,7 @@ def _get_fileholders(file_map): def update_header(self): """Harmonize header with image data and affine""" - super(Nifti1Image, self).update_header() + super().update_header() hdr = self._header hdr['magic'] = hdr.single_magic diff --git a/nibabel/nifti2.py b/nibabel/nifti2.py index c0106ae29d..cb138962cc 100644 --- a/nibabel/nifti2.py +++ b/nibabel/nifti2.py @@ -188,7 +188,7 @@ def set_data_shape(self, shape): @classmethod def default_structarr(klass, endianness=None): """Create empty header binary block with given endianness""" - hdr_data = super(Nifti2Header, klass).default_structarr(endianness) + hdr_data = super().default_structarr(endianness) hdr_data['eol_check'] = (13, 10, 26, 10) return hdr_data @@ -197,7 +197,7 @@ def default_structarr(klass, endianness=None): @classmethod def _get_checks(klass): # Add our own checks - return super(Nifti2Header, klass)._get_checks() + (klass._chk_eol_check,) + return super()._get_checks() + (klass._chk_eol_check,) @staticmethod def _chk_eol_check(hdr, fix=False): diff --git a/nibabel/parrec.py b/nibabel/parrec.py index 1459f3460e..27ade56ae9 100644 --- a/nibabel/parrec.py +++ b/nibabel/parrec.py @@ -754,9 +754,7 @@ def __init__(self, info, image_defs, permit_truncated=False, strict_sort=False): ) # REC data always little endian dt = np.dtype('uint' + str(bitpix)).newbyteorder('<') - super(PARRECHeader, self).__init__( - data_dtype=dt, shape=self._calc_data_shape(), zooms=self._calc_zooms() - ) + super().__init__(data_dtype=dt, shape=self._calc_data_shape(), zooms=self._calc_zooms()) @classmethod def from_header(klass, header=None): diff --git a/nibabel/spatialimages.py b/nibabel/spatialimages.py index c582ee149b..d969e57745 100644 --- a/nibabel/spatialimages.py +++ b/nibabel/spatialimages.py @@ -428,7 +428,7 @@ def __init__(self, dataobj, affine, header=None, extra=None, file_map=None): file_map : mapping, optional mapping giving file information for this image format """ - super(SpatialImage, self).__init__(dataobj, header=header, extra=extra, file_map=file_map) + super().__init__(dataobj, header=header, extra=extra, file_map=file_map) if affine is not None: # Check that affine is array-like 4,4. Maybe this is too strict at # this abstract level, but so far I think all image formats we know diff --git a/nibabel/spm99analyze.py b/nibabel/spm99analyze.py index 7a2f176318..12e3cb658d 100644 --- a/nibabel/spm99analyze.py +++ b/nibabel/spm99analyze.py @@ -45,7 +45,7 @@ class SpmAnalyzeHeader(analyze.AnalyzeHeader): @classmethod def default_structarr(klass, endianness=None): """Create empty header binary block with given endianness""" - hdr_data = super(SpmAnalyzeHeader, klass).default_structarr(endianness) + hdr_data = super().default_structarr(endianness) hdr_data['scl_slope'] = 1 return hdr_data @@ -206,7 +206,7 @@ def set_origin_from_affine(self, affine): @classmethod def _get_checks(klass): - checks = super(Spm99AnalyzeHeader, klass)._get_checks() + checks = super()._get_checks() return checks + (klass._chk_origin,) @staticmethod @@ -264,9 +264,7 @@ def from_file_map(klass, file_map, *, mmap=True, keep_file_open=None): img : Spm99AnalyzeImage instance """ - ret = super(Spm99AnalyzeImage, klass).from_file_map( - file_map, mmap=mmap, keep_file_open=keep_file_open - ) + ret = super().from_file_map(file_map, mmap=mmap, keep_file_open=keep_file_open) try: matf = file_map['mat'].get_prepare_fileobj() except OSError: @@ -312,7 +310,7 @@ def to_file_map(self, file_map=None, dtype=None): """ if file_map is None: file_map = self.file_map - super(Spm99AnalyzeImage, self).to_file_map(file_map, dtype=dtype) + super().to_file_map(file_map, dtype=dtype) mat = self._affine if mat is None: return diff --git a/nibabel/streamlines/tck.py b/nibabel/streamlines/tck.py index 7738a0e069..ec8e7dbce7 100644 --- a/nibabel/streamlines/tck.py +++ b/nibabel/streamlines/tck.py @@ -68,7 +68,7 @@ def __init__(self, tractogram, header=None): This is in contrast with TRK's internal convention where it would have referred to a corner. """ - super(TckFile, self).__init__(tractogram, header) + super().__init__(tractogram, header) @classmethod def is_correct_format(cls, fileobj): @@ -288,7 +288,7 @@ def _write_header(fileobj, header): # Write header to file. fileobj.write(out) - fileobj.write(f'\nfile: . {hdr_offset}\nEND\n'.encode('utf-8')) + fileobj.write(f'\nfile: . {hdr_offset}\nEND\n'.encode()) @classmethod def _read_header(cls, fileobj): diff --git a/nibabel/streamlines/tests/test_array_sequence.py b/nibabel/streamlines/tests/test_array_sequence.py index a3faa6a58b..0c8557fe50 100644 --- a/nibabel/streamlines/tests/test_array_sequence.py +++ b/nibabel/streamlines/tests/test_array_sequence.py @@ -89,7 +89,7 @@ def test_creating_arraysequence_from_list(self): check_arr_seq(ArraySequence(iter(SEQ_DATA['data']), buffer_size), SEQ_DATA['data']) def test_creating_arraysequence_from_generator(self): - gen_1, gen_2 = itertools.tee((e for e in SEQ_DATA['data'])) + gen_1, gen_2 = itertools.tee(e for e in SEQ_DATA['data']) seq = ArraySequence(gen_1) seq_with_buffer = ArraySequence(gen_2, buffer_size=256) @@ -189,7 +189,7 @@ def test_arraysequence_extend(self): # Extend with a generator. seq = SEQ_DATA['seq'].copy() # Copy because of in-place modification. - seq.extend((d for d in new_data)) + seq.extend(d for d in new_data) check_arr_seq(seq, SEQ_DATA['data'] + new_data) # Extend with another `ArraySequence` object. diff --git a/nibabel/streamlines/tractogram.py b/nibabel/streamlines/tractogram.py index ded937ab11..9e7c0f9af2 100644 --- a/nibabel/streamlines/tractogram.py +++ b/nibabel/streamlines/tractogram.py @@ -98,7 +98,7 @@ class PerArrayDict(SliceableDataDict): def __init__(self, n_rows=0, *args, **kwargs): self.n_rows = n_rows - super(PerArrayDict, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def __setitem__(self, key, value): value = np.asarray(list(value)) @@ -604,9 +604,7 @@ def __init__( refers to the center of the voxel. By default, the streamlines are in an unknown space, i.e. affine_to_rasmm is None. """ - super(LazyTractogram, self).__init__( - streamlines, data_per_streamline, data_per_point, affine_to_rasmm - ) + super().__init__(streamlines, data_per_streamline, data_per_point, affine_to_rasmm) self._nb_streamlines = None self._data = None self._affine_to_apply = np.eye(4) diff --git a/nibabel/streamlines/tractogram_file.py b/nibabel/streamlines/tractogram_file.py index 321ea3d2ad..2cec1ea9cb 100644 --- a/nibabel/streamlines/tractogram_file.py +++ b/nibabel/streamlines/tractogram_file.py @@ -30,7 +30,7 @@ class abstractclassmethod(classmethod): def __init__(self, callable): callable.__isabstractmethod__ = True - super(abstractclassmethod, self).__init__(callable) + super().__init__(callable) class TractogramFile(ABC): diff --git a/nibabel/streamlines/trk.py b/nibabel/streamlines/trk.py index b32e12d8b3..4f570a2803 100644 --- a/nibabel/streamlines/trk.py +++ b/nibabel/streamlines/trk.py @@ -237,7 +237,7 @@ def __init__(self, tractogram, header=None): and *mm* space where coordinate (0,0,0) refers to the center of the voxel. """ - super(TrkFile, self).__init__(tractogram, header) + super().__init__(tractogram, header) @classmethod def is_correct_format(cls, fileobj): @@ -359,9 +359,9 @@ def load(cls, fileobj, lazy_load=False): def _read(): for pts, scals, props in cls._read(fileobj, hdr): items = data_per_point_slice.items() - data_for_points = dict((k, scals[:, v]) for k, v in items) + data_for_points = {k: scals[:, v] for k, v in items} items = data_per_streamline_slice.items() - data_for_streamline = dict((k, props[v]) for k, v in items) + data_for_streamline = {k: props[v] for k, v in items} yield TractogramItem(pts, data_for_streamline, data_for_points) tractogram = LazyTractogram.from_data_func(_read) @@ -503,7 +503,7 @@ def save(self, fileobj): header['scalar_name'][:] = scalar_name for t in tractogram: - if any((len(d) != len(t.streamline) for d in t.data_for_points.values())): + if any(len(d) != len(t.streamline) for d in t.data_for_points.values()): raise DataError('Missing scalars for some points!') points = np.asarray(t.streamline) @@ -747,7 +747,7 @@ def __str__(self): vars['property_names'] = '\n '.join(property_names) # Make all byte strings into strings # Fixes recursion error on Python 3.3 - vars = dict((k, asstr(v) if hasattr(v, 'decode') else v) for k, v in vars.items()) + vars = {k: asstr(v) if hasattr(v, 'decode') else v for k, v in vars.items()} return """\ MAGIC NUMBER: {MAGIC_NUMBER} v.{version} diff --git a/nibabel/testing/__init__.py b/nibabel/testing/__init__.py index 4600782d4b..eb99eabca0 100644 --- a/nibabel/testing/__init__.py +++ b/nibabel/testing/__init__.py @@ -147,7 +147,7 @@ class clear_and_catch_warnings(warnings.catch_warnings): def __init__(self, record=True, modules=()): self.modules = set(modules).union(self.class_modules) self._warnreg_copies = {} - super(clear_and_catch_warnings, self).__init__(record=record) + super().__init__(record=record) def __enter__(self): for mod in self.modules: @@ -155,10 +155,10 @@ def __enter__(self): mod_reg = mod.__warningregistry__ self._warnreg_copies[mod] = mod_reg.copy() mod_reg.clear() - return super(clear_and_catch_warnings, self).__enter__() + return super().__enter__() def __exit__(self, *exc_info): - super(clear_and_catch_warnings, self).__exit__(*exc_info) + super().__exit__(*exc_info) for mod in self.modules: if hasattr(mod, '__warningregistry__'): mod.__warningregistry__.clear() @@ -183,7 +183,7 @@ class error_warnings(clear_and_catch_warnings): filter = 'error' def __enter__(self): - mgr = super(error_warnings, self).__enter__() + mgr = super().__enter__() warnings.simplefilter(self.filter) return mgr diff --git a/nibabel/tests/test_analyze.py b/nibabel/tests/test_analyze.py index 5287bad4a9..7584d550f6 100644 --- a/nibabel/tests/test_analyze.py +++ b/nibabel/tests/test_analyze.py @@ -61,7 +61,7 @@ class TestAnalyzeHeader(tws._TestLabeledWrapStruct): header_class = AnalyzeHeader example_file = header_file sizeof_hdr = AnalyzeHeader.sizeof_hdr - supported_np_types = set((np.uint8, np.int16, np.int32, np.float32, np.float64, np.complex64)) + supported_np_types = {np.uint8, np.int16, np.int32, np.float32, np.float64, np.complex64} add_intp(supported_np_types) def test_supported_types(self): @@ -74,7 +74,7 @@ def get_bad_bb(self): return b'\x00' * self.header_class.template_dtype.itemsize def test_general_init(self): - super(TestAnalyzeHeader, self).test_general_init() + super().test_general_init() hdr = self.header_class() # an empty header has shape (0,) - like an empty array # (np.array([])) @@ -497,7 +497,7 @@ def test_orientation(self): assert_array_equal(hdr.get_base_affine(), aff) def test_str(self): - super(TestAnalyzeHeader, self).test_str() + super().test_str() hdr = self.header_class() s1 = str(hdr) # check the datacode recoding diff --git a/nibabel/tests/test_arrayproxy.py b/nibabel/tests/test_arrayproxy.py index 5018e95e1f..7558c55ea5 100644 --- a/nibabel/tests/test_arrayproxy.py +++ b/nibabel/tests/test_arrayproxy.py @@ -421,7 +421,7 @@ class CountingImageOpener(ImageOpener): num_openers = 0 def __init__(self, *args, **kwargs): - super(CountingImageOpener, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) CountingImageOpener.num_openers += 1 diff --git a/nibabel/tests/test_casting.py b/nibabel/tests/test_casting.py index 8c4cad7bbb..62da526319 100644 --- a/nibabel/tests/test_casting.py +++ b/nibabel/tests/test_casting.py @@ -53,7 +53,7 @@ def test_shared_range(): if thresh_overflow: assert np.all((bit_bigger == casted_mx) | (bit_bigger == imax)) else: - assert np.all((bit_bigger <= casted_mx)) + assert np.all(bit_bigger <= casted_mx) if it in np.sctypes['uint']: assert mn == 0 continue @@ -79,7 +79,7 @@ def test_shared_range(): if thresh_overflow: assert np.all((bit_smaller == casted_mn) | (bit_smaller == imin)) else: - assert np.all((bit_smaller >= casted_mn)) + assert np.all(bit_smaller >= casted_mn) def test_shared_range_inputs(): diff --git a/nibabel/tests/test_data.py b/nibabel/tests/test_data.py index ece2e1c6cd..af7ef66bde 100644 --- a/nibabel/tests/test_data.py +++ b/nibabel/tests/test_data.py @@ -51,19 +51,19 @@ def test_versioned(): VersionedDatasource(tmpdir) tmpfile = pjoin(tmpdir, 'config.ini') # ini file, but wrong section - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[SOMESECTION]\n') fobj.write('version = 0.1\n') with pytest.raises(DataError): VersionedDatasource(tmpdir) # ini file, but right section, wrong key - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DEFAULT]\n') fobj.write('somekey = 0.1\n') with pytest.raises(DataError): VersionedDatasource(tmpdir) # ini file, right section and key - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DEFAULT]\n') fobj.write('version = 0.1\n') vds = VersionedDatasource(tmpdir) @@ -73,7 +73,7 @@ def test_versioned(): assert vds.minor_version == 1 assert vds.get_filename('config.ini') == tmpfile # ini file, right section and key, funny value - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DEFAULT]\n') fobj.write('version = 0.1.2.dev\n') vds = VersionedDatasource(tmpdir) @@ -142,7 +142,7 @@ def test_data_path(with_nimd_env): # Next, make a fake user directory, and put a file in there with TemporaryDirectory() as tmpdir: tmpfile = pjoin(tmpdir, 'config.ini') - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DATA]\n') fobj.write(f'path = {tst_pth}') nibd.get_nipy_user_dir = lambda: tmpdir @@ -153,11 +153,11 @@ def test_data_path(with_nimd_env): with TemporaryDirectory() as tmpdir: nibd.get_nipy_system_dir = lambda: tmpdir tmpfile = pjoin(tmpdir, 'an_example.ini') - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DATA]\n') fobj.write(f'path = {tst_pth}\n') tmpfile = pjoin(tmpdir, 'another_example.ini') - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DATA]\n') fobj.write('path = %s\n' % '/path/two') assert get_data_path() == tst_list + ['/path/two'] + old_pth @@ -195,7 +195,7 @@ def test_make_datasource(with_nimd_env): with pytest.raises(DataError): make_datasource(pkg_def) tmpfile = pjoin(pkg_dir, 'config.ini') - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DEFAULT]\n') fobj.write('version = 0.1\n') ds = make_datasource(pkg_def, data_path=[tmpdir]) @@ -223,7 +223,7 @@ def test_datasource_or_bomber(with_nimd_env): pkg_dir = pjoin(tmpdir, 'pkg') os.mkdir(pkg_dir) tmpfile = pjoin(pkg_dir, 'config.ini') - with open(tmpfile, 'wt') as fobj: + with open(tmpfile, 'w') as fobj: fobj.write('[DEFAULT]\n') fobj.write('version = 0.2\n') ds = datasource_or_bomber(pkg_def) diff --git a/nibabel/tests/test_filebasedimages.py b/nibabel/tests/test_filebasedimages.py index aa48a3e747..3aa1ae78c5 100644 --- a/nibabel/tests/test_filebasedimages.py +++ b/nibabel/tests/test_filebasedimages.py @@ -17,7 +17,7 @@ class FBNumpyImage(FileBasedImage): files_types = (('image', '.npy'),) def __init__(self, arr, header=None, extra=None, file_map=None): - super(FBNumpyImage, self).__init__(header, extra, file_map) + super().__init__(header, extra, file_map) self.arr = arr @property diff --git a/nibabel/tests/test_fileslice.py b/nibabel/tests/test_fileslice.py index 9f42e67c0d..e9f65e45a2 100644 --- a/nibabel/tests/test_fileslice.py +++ b/nibabel/tests/test_fileslice.py @@ -726,8 +726,7 @@ def slicer_samples(shape): slicers_list = [] for i in range(ndim): slicers_list.append(_slices_for_len(shape[i])) - for sliceobj in product(*slicers_list): - yield sliceobj + yield from product(*slicers_list) # Nones and ellipses yield (None,) if ndim == 0: diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index 752aed0b52..b60974de5f 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -39,8 +39,8 @@ def test_concat(): affine = np.eye(4) for dim in range(2, 6): - all_shapes_ND = tuple((shape[:dim] for shape in all_shapes_5D)) - all_shapes_N1D_unary = tuple((shape + (1,) for shape in all_shapes_ND)) + all_shapes_ND = tuple(shape[:dim] for shape in all_shapes_5D) + all_shapes_N1D_unary = tuple(shape + (1,) for shape in all_shapes_ND) all_shapes = all_shapes_ND + all_shapes_N1D_unary # Loop over all possible combinations of images, in first and diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index af82c304ac..39e9b07a83 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -644,7 +644,7 @@ class MakeImageAPI(LoadImageAPI): def obj_params(self): # Return any obj_params from superclass - for func, params in super(MakeImageAPI, self).obj_params(): + for func, params in super().obj_params(): yield func, params # Create new images aff = np.diag([1, 2, 3, 1]) diff --git a/nibabel/tests/test_nifti1.py b/nibabel/tests/test_nifti1.py index 59bf214eda..2cbbfc1f5d 100644 --- a/nibabel/tests/test_nifti1.py +++ b/nibabel/tests/test_nifti1.py @@ -96,7 +96,7 @@ def test_from_eg_file(self): def test_data_scaling(self): # Test scaling in header - super(TestNifti1PairHeader, self).test_data_scaling() + super().test_data_scaling() hdr = self.header_class() data = np.arange(0, 3, 0.5).reshape((1, 2, 3)) hdr.set_data_shape(data.shape) @@ -1330,9 +1330,7 @@ def test_nifti_dicom_extension(): assert dcmext.get_content().PatientID == 'NiPy' # create a single dicom tag (Patient ID, [0010,0020]) with Explicit VR / LE - dcmbytes_explicit = struct.pack( - '2H2sH4s', 0x10, 0x20, 'LO'.encode('utf-8'), 4, 'NiPy'.encode('utf-8') - ) + dcmbytes_explicit_be = struct.pack('>2H2sH4s', 0x10, 0x20, b'LO', 4, b'NiPy') hdr_be = Nifti1Header(endianness='>') # Big Endian Nifti1Header dcmext = Nifti1DicomExtension(2, dcmbytes_explicit_be, parent_hdr=hdr_be) assert dcmext.__class__ == Nifti1DicomExtension @@ -1552,5 +1548,5 @@ def test_large_nifti1(): data = load('test.nii.gz').get_fdata() # Check that the data are all ones assert image_shape == data.shape - n_ones = np.sum((data == 1.0)) + n_ones = np.sum(data == 1.0) assert np.prod(image_shape) == n_ones diff --git a/nibabel/tests/test_openers.py b/nibabel/tests/test_openers.py index f993e342e4..b4f71f2501 100644 --- a/nibabel/tests/test_openers.py +++ b/nibabel/tests/test_openers.py @@ -111,7 +111,7 @@ def test_Opener_various(): class MockIndexedGzipFile(GzipFile): def __init__(self, *args, **kwargs): self._drop_handles = kwargs.pop('drop_handles', False) - super(MockIndexedGzipFile, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) @contextlib.contextmanager @@ -284,7 +284,7 @@ def test_name(): if HAVE_ZSTD: files_to_test += ['test.txt.zst'] for input in files_to_test: - exp_name = input if type(input) == type('') else None + exp_name = input if type(input) == str else None with Opener(input, 'wb') as fobj: assert fobj.name == exp_name @@ -317,7 +317,7 @@ def test_close_if_mine(): if has_closed: assert not fobj.closed fobj.close_if_mine() - is_str = type(input) is type('') + is_str = type(input) is str if has_closed: assert fobj.closed == is_str diff --git a/nibabel/tests/test_parrec.py b/nibabel/tests/test_parrec.py index f1d81cf96c..0a9d7c7dc2 100644 --- a/nibabel/tests/test_parrec.py +++ b/nibabel/tests/test_parrec.py @@ -210,9 +210,9 @@ def test_top_level_load(): def test_header(): v42_hdr = PARRECHeader(HDR_INFO, HDR_DEFS) for strict_sort in [False, True]: - with open(V4_PAR, 'rt') as fobj: + with open(V4_PAR) as fobj: v4_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=strict_sort) - with open(V41_PAR, 'rt') as fobj: + with open(V41_PAR) as fobj: v41_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=strict_sort) for hdr in (v42_hdr, v41_hdr, v4_hdr): hdr = PARRECHeader(HDR_INFO, HDR_DEFS) @@ -296,7 +296,7 @@ def test_affine_regression(): # Data at http://psydata.ovgu.de/philips_achieva_testfiles/conversion2 for basename, exp_affine in PREVIOUS_AFFINES.items(): fname = pjoin(DATA_PATH, basename + '.PAR') - with open(fname, 'rt') as fobj: + with open(fname) as fobj: hdr = PARRECHeader.from_fileobj(fobj) assert_almost_equal(hdr.get_affine(), exp_affine) @@ -328,7 +328,7 @@ def test_sorting_dual_echo_T1(): # For this .PAR file, instead of getting 1 echo per volume, they get # mixed up unless strict_sort=True t1_par = pjoin(DATA_PATH, 'T1_dual_echo.PAR') - with open(t1_par, 'rt') as fobj: + with open(t1_par) as fobj: t1_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) # should get the correct order even if we randomly shuffle the order @@ -359,7 +359,7 @@ def test_sorting_multiple_echos_and_contrasts(): # ... # Type 3, Echo 3, Slices 1-30 t1_par = pjoin(DATA_PATH, 'T1_3echo_mag_real_imag_phase.PAR') - with open(t1_par, 'rt') as fobj: + with open(t1_par) as fobj: t1_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) # should get the correct order even if we randomly shuffle the order @@ -398,7 +398,7 @@ def test_sorting_multiecho_ASL(): # For this .PAR file has 3 keys corresponding to volumes: # 'echo number', 'label type', 'dynamic scan number' asl_par = pjoin(DATA_PATH, 'ASL_3D_Multiecho.PAR') - with open(asl_par, 'rt') as fobj: + with open(asl_par) as fobj: asl_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) # should get the correct order even if we randomly shuffle the order @@ -467,13 +467,13 @@ def test_vol_is_full(): def gen_par_fobj(): for par in glob(pjoin(DATA_PATH, '*.PAR')): - with open(par, 'rt') as fobj: + with open(par) as fobj: yield par, fobj def test_truncated_load(): # Test loading of truncated header - with open(TRUNC_PAR, 'rt') as fobj: + with open(TRUNC_PAR) as fobj: gen_info, slice_info = parse_PAR_header(fobj) with pytest.raises(PARRECError): PARRECHeader(gen_info, slice_info) @@ -504,7 +504,7 @@ def test_vol_calculations(): def test_diffusion_parameters(): # Check getting diffusion parameters from diffusion example dti_par = pjoin(DATA_PATH, 'DTI.PAR') - with open(dti_par, 'rt') as fobj: + with open(dti_par) as fobj: dti_hdr = PARRECHeader.from_fileobj(fobj) assert dti_hdr.get_data_shape() == (80, 80, 10, 8) assert dti_hdr.general_info['diffusion'] == 1 @@ -520,7 +520,7 @@ def test_diffusion_parameters(): def test_diffusion_parameters_strict_sort(): # Check getting diffusion parameters from diffusion example dti_par = pjoin(DATA_PATH, 'DTI.PAR') - with open(dti_par, 'rt') as fobj: + with open(dti_par) as fobj: dti_hdr = PARRECHeader.from_fileobj(fobj, strict_sort=True) # should get the correct order even if we randomly shuffle the order @@ -540,7 +540,7 @@ def test_diffusion_parameters_strict_sort(): def test_diffusion_parameters_v4(): dti_v4_par = pjoin(DATA_PATH, 'DTIv40.PAR') - with open(dti_v4_par, 'rt') as fobj: + with open(dti_v4_par) as fobj: dti_v4_hdr = PARRECHeader.from_fileobj(fobj) assert dti_v4_hdr.get_data_shape() == (80, 80, 10, 8) assert dti_v4_hdr.general_info['diffusion'] == 1 @@ -567,7 +567,7 @@ def test_epi_params(): # Check EPI conversion for par_root in ('T2_-interleaved', 'T2_', 'phantom_EPI_asc_CLEAR_2_1'): epi_par = pjoin(DATA_PATH, par_root + '.PAR') - with open(epi_par, 'rt') as fobj: + with open(epi_par) as fobj: epi_hdr = PARRECHeader.from_fileobj(fobj) assert len(epi_hdr.get_data_shape()) == 4 assert_almost_equal(epi_hdr.get_zooms()[-1], 2.0) @@ -577,7 +577,7 @@ def test_xyzt_unit_conversion(): # Check conversion to NIfTI-like has sensible units for par_root in ('T2_-interleaved', 'T2_', 'phantom_EPI_asc_CLEAR_2_1'): epi_par = pjoin(DATA_PATH, par_root + '.PAR') - with open(epi_par, 'rt') as fobj: + with open(epi_par) as fobj: epi_hdr = PARRECHeader.from_fileobj(fobj) nifti_hdr = Nifti1Header.from_header(epi_hdr) assert len(nifti_hdr.get_data_shape()) == 4 @@ -588,7 +588,7 @@ def test_xyzt_unit_conversion(): def test_truncations(): # Test tests for truncation par = pjoin(DATA_PATH, 'T2_.PAR') - with open(par, 'rt') as fobj: + with open(par) as fobj: gen_info, slice_info = parse_PAR_header(fobj) # Header is well-formed as is hdr = PARRECHeader(gen_info, slice_info) @@ -690,10 +690,10 @@ def assert_copy_ok(hdr1, hdr2): assert_copy_ok(hdr, hdr2) assert not hdr.permit_truncated assert not hdr2.permit_truncated - with open(TRUNC_PAR, 'rt') as fobj: + with open(TRUNC_PAR) as fobj: with pytest.raises(PARRECError): PARRECHeader.from_fileobj(fobj) - with open(TRUNC_PAR, 'rt') as fobj: + with open(TRUNC_PAR) as fobj: # Parse but warn on inconsistent header with pytest.warns(UserWarning, match='Header inconsistency'): trunc_hdr = PARRECHeader.from_fileobj(fobj, True) @@ -826,7 +826,7 @@ def test_varying_scaling(): def test_anonymized(): # Test we can read anonymized PAR correctly - with open(ANON_PAR, 'rt') as fobj: + with open(ANON_PAR) as fobj: anon_hdr = PARRECHeader.from_fileobj(fobj) gen_defs, img_defs = anon_hdr.general_info, anon_hdr.image_defs assert gen_defs['patient_name'] == '' @@ -877,7 +877,7 @@ def test_exts2par(): def test_dualTR(): expected_TRs = np.asarray([2000.0, 500.0]) - with open(DUAL_TR_PAR, 'rt') as fobj: + with open(DUAL_TR_PAR) as fobj: with clear_and_catch_warnings(modules=[parrec], record=True) as wlist: simplefilter('always') dualTR_hdr = PARRECHeader.from_fileobj(fobj) @@ -889,7 +889,7 @@ def test_dualTR(): def test_ADC_map(): # test reading an apparent diffusion coefficient map - with open(ADC_PAR, 'rt') as fobj: + with open(ADC_PAR) as fobj: # two truncation warnings expected because general_info indicates: # 1.) multiple directions diff --git a/nibabel/tests/test_recoder.py b/nibabel/tests/test_recoder.py index 49a9898ce2..f5a77158ec 100644 --- a/nibabel/tests/test_recoder.py +++ b/nibabel/tests/test_recoder.py @@ -111,7 +111,7 @@ def values(self): assert rc.code['one'] == 'spam' assert rc.code['first'] == 'spam' assert rc.code['bizarre'] == 'eggs' - assert rc.value_set() == set(['funny', 'list']) + assert rc.value_set() == {'funny', 'list'} assert list(rc.keys()) == ['some', 'keys'] @@ -138,11 +138,11 @@ def test_sugar(): assert rc[1] == rc.field1[1] assert rc['two'] == rc.field1['two'] # keys gets all keys - assert set(rc.keys()) == set((1, 'one', '1', 'first', 2, 'two')) + assert set(rc.keys()) == {1, 'one', '1', 'first', 2, 'two'} # value_set gets set of values from first column - assert rc.value_set() == set((1, 2)) + assert rc.value_set() == {1, 2} # or named column if given - assert rc.value_set('label') == set(('one', 'two')) + assert rc.value_set('label') == {'one', 'two'} # "in" works for values in and outside the set assert 'one' in rc assert 'three' not in rc diff --git a/nibabel/tests/test_scripts.py b/nibabel/tests/test_scripts.py index a089fb7eef..9f07b3933b 100644 --- a/nibabel/tests/test_scripts.py +++ b/nibabel/tests/test_scripts.py @@ -392,7 +392,7 @@ def test_parrec2nii_with_data(): ['parrec2nii', '--overwrite', '--dwell-time', '--field-strength', '3', dti_par] ) exp_dwell = (26 * 9.087) / (42.576 * 3.4 * 3 * 28) - with open('DTI.dwell_time', 'rt') as fobj: + with open('DTI.dwell_time') as fobj: contents = fobj.read().strip() assert_almost_equal(float(contents), exp_dwell) # ensure trace is removed by default @@ -424,7 +424,7 @@ def test_parrec2nii_with_data(): # Writes .ordering.csv if requested run_command(['parrec2nii', '--overwrite', '--volume-info', dti_par]) assert exists('DTI.ordering.csv') - with open('DTI.ordering.csv', 'r') as csvfile: + with open('DTI.ordering.csv') as csvfile: csvreader = csv.reader(csvfile, delimiter=',') csv_keys = next(csvreader) # header row nlines = 0 # count number of non-header rows diff --git a/nibabel/tests/test_spm99analyze.py b/nibabel/tests/test_spm99analyze.py index e5eb969388..9bc4c928a6 100644 --- a/nibabel/tests/test_spm99analyze.py +++ b/nibabel/tests/test_spm99analyze.py @@ -83,7 +83,7 @@ class TestSpm99AnalyzeHeader(test_analyze.TestAnalyzeHeader, HeaderScalingMixin) header_class = Spm99AnalyzeHeader def test_empty(self): - super(TestSpm99AnalyzeHeader, self).test_empty() + super().test_empty() hdr = self.header_class() assert hdr['scl_slope'] == 1 diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index b01195ff5f..ee9329187f 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -996,11 +996,11 @@ def seek(self, *args): def test_fname_ext_ul_case(): # Get filename ignoring the case of the filename extension with InTemporaryDirectory(): - with open('afile.TXT', 'wt') as fobj: + with open('afile.TXT', 'w') as fobj: fobj.write('Interesting information') # OSX usually has case-insensitive file systems; Windows also os_cares_case = not exists('afile.txt') - with open('bfile.txt', 'wt') as fobj: + with open('bfile.txt', 'w') as fobj: fobj.write('More interesting information') # If there is no file, the case doesn't change assert fname_ext_ul_case('nofile.txt') == 'nofile.txt' @@ -1070,7 +1070,7 @@ def test_dtypes(): dt_defs = ((16, 'float32', np.float32),) dtr = make_dt_codes(dt_defs) # check we have the fields we were expecting - assert dtr.value_set() == set((16,)) + assert dtr.value_set() == {16} assert dtr.fields == ('code', 'label', 'type', 'dtype', 'sw_dtype') # These of course should pass regardless of dtype assert dtr[np.float32] == 16 @@ -1085,7 +1085,7 @@ def test_dtypes(): dt_defs = ((16, 'float32', np.float32, 'ASTRING'),) dtr = make_dt_codes(dt_defs) assert dtr[np.dtype('f4').newbyteorder('S')] == 16 - assert dtr.value_set() == set((16,)) + assert dtr.value_set() == {16} assert dtr.fields == ('code', 'label', 'type', 'niistring', 'dtype', 'sw_dtype') assert dtr.niistring[16] == 'ASTRING' # And that unequal elements raises error diff --git a/nibabel/tests/test_wrapstruct.py b/nibabel/tests/test_wrapstruct.py index 718700768e..66dda18237 100644 --- a/nibabel/tests/test_wrapstruct.py +++ b/nibabel/tests/test_wrapstruct.py @@ -357,7 +357,7 @@ def guessed_endian(klass, hdr): @classmethod def default_structarr(klass, endianness=None): - structarr = super(MyWrapStruct, klass).default_structarr(endianness) + structarr = super().default_structarr(endianness) structarr['an_integer'] = 1 structarr['a_str'] = b'a string' return structarr diff --git a/nibabel/tmpdirs.py b/nibabel/tmpdirs.py index e8fba870c1..ac3bf6c0f0 100644 --- a/nibabel/tmpdirs.py +++ b/nibabel/tmpdirs.py @@ -74,11 +74,11 @@ class InTemporaryDirectory(TemporaryDirectory): def __enter__(self): self._pwd = os.getcwd() os.chdir(self.name) - return super(InTemporaryDirectory, self).__enter__() + return super().__enter__() def __exit__(self, exc, value, tb): os.chdir(self._pwd) - return super(InTemporaryDirectory, self).__exit__(exc, value, tb) + return super().__exit__(exc, value, tb) class InGivenDirectory: diff --git a/nibabel/viewers.py b/nibabel/viewers.py index bb9f612a7d..9dad3dd17f 100644 --- a/nibabel/viewers.py +++ b/nibabel/viewers.py @@ -216,7 +216,7 @@ def __init__(self, data, affine=None, axes=None, title=None): ax.set_ylim(yl) self._volume_ax_objs = dict(step=step, patch=patch) - self._figs = set([a.figure for a in self._axes]) + self._figs = {a.figure for a in self._axes} for fig in self._figs: fig.canvas.mpl_connect('scroll_event', self._on_scroll) fig.canvas.mpl_connect('motion_notify_event', self._on_mouse) diff --git a/nibabel/volumeutils.py b/nibabel/volumeutils.py index d31d91ea01..b339b6bab5 100644 --- a/nibabel/volumeutils.py +++ b/nibabel/volumeutils.py @@ -593,7 +593,7 @@ def array_to_file( if null_scaling and np.can_cast(in_dtype, out_dtype): return _write_data(data, fileobj, out_dtype, order, pre_clips=pre_clips) # Force upcasting for floats by making atleast_1d. - slope, inter = [np.atleast_1d(v) for v in (divslope, intercept)] + slope, inter = (np.atleast_1d(v) for v in (divslope, intercept)) # Default working point type for applying slope / inter if slope.dtype.kind in 'iu': slope = slope.astype(float) @@ -621,7 +621,7 @@ def array_to_file( # going to integers # Because we're going to integers, complex inter and slope will only slow # us down, cast to float - slope, inter = [v.astype(_matching_float(v.dtype)) for v in (slope, inter)] + slope, inter = (v.astype(_matching_float(v.dtype)) for v in (slope, inter)) # We'll do the thresholding on the scaled data, so turn off the # thresholding on the unscaled data pre_clips = None @@ -642,7 +642,7 @@ def array_to_file( extremes = np.array(dt_mnmx, dtype=cast_in_dtype) w_type = best_write_scale_ftype(extremes, slope, inter, w_type) # Push up precision by casting the slope, inter - slope, inter = [v.astype(w_type) for v in (slope, inter)] + slope, inter = (v.astype(w_type) for v in (slope, inter)) # We need to know the result of applying slope and inter to the min and # max of the array, in order to clip the output array, after applying # the slope and inter. Otherwise we'd need to clip twice, once before @@ -887,7 +887,7 @@ def apply_read_scaling(arr, slope=None, inter=None): return arr shape = arr.shape # Force float / float upcasting by promoting to arrays - arr, slope, inter = [np.atleast_1d(v) for v in (arr, slope, inter)] + arr, slope, inter = (np.atleast_1d(v) for v in (arr, slope, inter)) if arr.dtype.kind in 'iu': # int to float; get enough precision to avoid infs # Find floating point type for which scaling does not overflow, diff --git a/nibabel/xmlutils.py b/nibabel/xmlutils.py index 67e10cd152..8e0b18fb6e 100644 --- a/nibabel/xmlutils.py +++ b/nibabel/xmlutils.py @@ -94,7 +94,7 @@ def parse(self, string=None, fname=None, fptr=None): if string is not None: fptr = BytesIO(string) elif fname is not None: - fptr = open(fname, 'r') + fptr = open(fname) # store the name of the xml file in case it is needed during parsing self.fname = getattr(fptr, 'name', None) From bf298113da99079c9c7b5e1690e41879828cd472 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Thu, 29 Dec 2022 22:53:17 -0500 Subject: [PATCH 11/12] STY: Reduce array().astype() and similar constructs [git-blame-ignore-rev] --- nibabel/freesurfer/io.py | 6 +++--- nibabel/freesurfer/tests/test_mghformat.py | 8 ++++---- nibabel/gifti/tests/test_gifti.py | 2 +- nibabel/tests/test_funcs.py | 2 +- nibabel/tests/test_image_api.py | 2 +- nibabel/tests/test_proxy_api.py | 2 +- nibabel/tests/test_volumeutils.py | 2 +- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/nibabel/freesurfer/io.py b/nibabel/freesurfer/io.py index 6e8538c202..ec6b474b04 100644 --- a/nibabel/freesurfer/io.py +++ b/nibabel/freesurfer/io.py @@ -70,9 +70,9 @@ def _read_volume_info(fobj): if key in ('valid', 'filename'): volume_info[key] = pair[1].strip() elif key == 'volume': - volume_info[key] = np.array(pair[1].split()).astype(int) + volume_info[key] = np.array(pair[1].split(), int) else: - volume_info[key] = np.array(pair[1].split()).astype(float) + volume_info[key] = np.array(pair[1].split(), float) # Ignore the rest return volume_info @@ -521,7 +521,7 @@ def write_annot(filepath, labels, ctab, names, fill_ctab=True): vnum = len(labels) def write(num, dtype=dt): - np.array([num]).astype(dtype).tofile(fobj) + np.array([num], dtype).tofile(fobj) def write_string(s): s = (s if isinstance(s, bytes) else s.encode()) + b'\x00' diff --git a/nibabel/freesurfer/tests/test_mghformat.py b/nibabel/freesurfer/tests/test_mghformat.py index 0a850488c2..ded1aca8a2 100644 --- a/nibabel/freesurfer/tests/test_mghformat.py +++ b/nibabel/freesurfer/tests/test_mghformat.py @@ -123,7 +123,7 @@ def test_write_mgh(): def test_write_noaffine_mgh(): # now just save the image without the vox2ras transform # and see if it uses the default values to save - v = np.ones((7, 13, 3, 22)).astype(np.uint8) + v = np.ones((7, 13, 3, 22), np.uint8) # form a MGHImage object using data # and the default affine matrix (Note the "None") img = MGHImage(v, None) @@ -175,7 +175,7 @@ def bad_dtype_mgh(): """ # try to write an unsigned short and make sure it # raises MGHError - v = np.ones((7, 13, 3, 22)).astype(np.uint16) + v = np.ones((7, 13, 3, 22), np.uint16) # form a MGHImage object using data # and the default affine matrix (Note the "None") MGHImage(v, None) @@ -189,7 +189,7 @@ def test_bad_dtype_mgh(): def test_filename_exts(): # Test acceptable filename extensions - v = np.ones((7, 13, 3, 22)).astype(np.uint8) + v = np.ones((7, 13, 3, 22), np.uint8) # form a MGHImage object using data # and the default affine matrix (Note the "None") img = MGHImage(v, None) @@ -251,7 +251,7 @@ def test_header_updating(): def test_cosine_order(): # Test we are interpreting the cosine order right - data = np.arange(60).reshape((3, 4, 5)).astype(np.int32) + data = np.arange(60, dtype=np.int32).reshape((3, 4, 5)) aff = np.diag([2.0, 3, 4, 1]) aff[0] = [2, 1, 0, 10] img = MGHImage(data, aff) diff --git a/nibabel/gifti/tests/test_gifti.py b/nibabel/gifti/tests/test_gifti.py index 8858de589f..49a8cbc07f 100644 --- a/nibabel/gifti/tests/test_gifti.py +++ b/nibabel/gifti/tests/test_gifti.py @@ -465,7 +465,7 @@ def test_darray_dtype_coercion_failures(): encodings = ('ASCII', 'B64BIN', 'B64GZ') for data_dtype, darray_dtype, encoding in itertools.product(dtypes, dtypes, encodings): da = GiftiDataArray( - np.arange(10).astype(data_dtype), + np.arange(10, dtype=data_dtype), encoding=encoding, intent='NIFTI_INTENT_NODE_INDEX', datatype=darray_dtype, diff --git a/nibabel/tests/test_funcs.py b/nibabel/tests/test_funcs.py index b60974de5f..10f6e90813 100644 --- a/nibabel/tests/test_funcs.py +++ b/nibabel/tests/test_funcs.py @@ -127,7 +127,7 @@ def test_concat(): def test_closest_canonical(): # Use 32-bit data so that the AnalyzeImage class doesn't complain - arr = np.arange(24).reshape((2, 3, 4, 1)).astype(np.int32) + arr = np.arange(24, dtype=np.int32).reshape((2, 3, 4, 1)) # Test with an AnalyzeImage first img = AnalyzeImage(arr, np.eye(4)) diff --git a/nibabel/tests/test_image_api.py b/nibabel/tests/test_image_api.py index 39e9b07a83..091bc57e8c 100644 --- a/nibabel/tests/test_image_api.py +++ b/nibabel/tests/test_image_api.py @@ -255,7 +255,7 @@ def validate_data_interface(self, imaker, params): with maybe_deprecated(meth_name), pytest.raises(ValueError): method(caching='something') # dataobj is read only - fake_data = np.zeros(img.shape).astype(img.get_data_dtype()) + fake_data = np.zeros(img.shape, dtype=img.get_data_dtype()) with pytest.raises(AttributeError): img.dataobj = fake_data # So is in_memory diff --git a/nibabel/tests/test_proxy_api.py b/nibabel/tests/test_proxy_api.py index 1bdd6c26e8..1c9e02186c 100644 --- a/nibabel/tests/test_proxy_api.py +++ b/nibabel/tests/test_proxy_api.py @@ -61,7 +61,7 @@ def _some_slicers(shape): ndim = len(shape) - slicers = np.eye(ndim).astype(int).astype(object) + slicers = np.eye(ndim, dtype=int).astype(object) slicers[slicers == 0] = slice(None) for i in range(ndim): if i % 2: diff --git a/nibabel/tests/test_volumeutils.py b/nibabel/tests/test_volumeutils.py index ee9329187f..ab5bd38ee6 100644 --- a/nibabel/tests/test_volumeutils.py +++ b/nibabel/tests/test_volumeutils.py @@ -1161,7 +1161,7 @@ def assert_rt( (None, (-2, 49)), (None, 1), ): - data = np.arange(24).astype(np.float32) + data = np.arange(24, dtype=np.float32) assert_rt( data, shape, From 69785cd53e7139e90b8cf3cd41ca154e502177f0 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 30 Dec 2022 09:07:52 -0500 Subject: [PATCH 12/12] MNT: make .git-blame-ignore-revs --- .git-blame-ignore-revs | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .git-blame-ignore-revs diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..d700b59665 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,12 @@ +# Thu Dec 29 22:53:17 2022 -0500 - effigies@gmail.com - STY: Reduce array().astype() and similar constructs +bf298113da99079c9c7b5e1690e41879828cd472 +# Thu Dec 29 22:32:46 2022 -0500 - effigies@gmail.com - STY: pyupgrade --py37-plus +4481a4c2640bd4be6e9c468e550d01aae448ab99 +# Fri Dec 30 11:01:19 2022 -0500 - effigies@gmail.com - STY: Run vanilla blue +6b0ddd23b1da1df7ca9ae275673f82bfa20a754c +# Thu Dec 29 21:46:13 2022 -0500 - markiewicz@stanford.edu - STY: Manual, blue-compatible touchups +263fca9bf6d4ca314a5a322b4824d6f53d0589df +# Thu Dec 29 21:32:00 2022 -0500 - effigies@gmail.com - STY: isort +0ab2856cac4d4baae7ab3e2f6d58421db55d807f +# Thu Dec 29 21:30:29 2022 -0500 - effigies@gmail.com - STY: blue +1a8dd302ff85b1136c81d492509b80e7748339f0 \ No newline at end of file