diff --git a/.circleci/version.py b/.circleci/version.py index cbaaacb31b..207a6b4fef 100644 --- a/.circleci/version.py +++ b/.circleci/version.py @@ -2,4 +2,5 @@ import sdcflows -print(sdcflows.__version__, end='', file=open('/tmp/.docker-version.txt', 'w')) +with open('/tmp/.docker-version.txt', 'w') as f: + f.write(sdcflows.__version__) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index fe21fefa26..c262426f78 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,3 +1,9 @@ +# Thu May 15 11:41:29 2025 -0400 - markiewicz@stanford.edu - chore: Manual ruff fixes [ignore-rev] +d0da75c1e4d6345ea1cc2b337668f6774288195e +# Thu May 15 11:11:38 2025 -0400 - markiewicz@stanford.edu - run: ruff check --fix [ignore-rev] +497670f170f87cdcfb32cc525f5bf0668f96129a +# Thu May 15 09:34:34 2025 -0400 - markiewicz@stanford.edu - run: ruff check --fix [ignore-rev] +dc3706038661984718da079e24a863842407a837 # Thu May 15 09:29:29 2025 -0400 - markiewicz@stanford.edu - run: ruff format [ignore-rev] d97ae316c0bdf71084a0732760ceed5221033fc2 # Thu May 15 09:26:57 2025 -0400 - markiewicz@stanford.edu - run: ruff check --fix [ignore-rev] diff --git a/.maint/update_authors.py b/.maint/update_authors.py index 48fe34eed0..a5a6a3d93c 100644 --- a/.maint/update_authors.py +++ b/.maint/update_authors.py @@ -130,10 +130,11 @@ def get_git_lines(fname='line-contributors.txt'): if not lines: raise RuntimeError( """\ -Could not find line-contributors from git repository.%s""" - % """ \ +Could not find line-contributors from git repository.{}""".format( + """ \ git-line-summary not found, please install git-extras. """ - * (cmd[0] is None) + * (cmd[0] is None) + ) ) return [' '.join(line.strip().split()[1:-1]) for line in lines if '%' in line] @@ -219,7 +220,7 @@ def zenodo( elif isinstance(creator['affiliation'], list): creator['affiliation'] = creator['affiliation'][0] - Path(zenodo_file).write_text('%s\n' % json.dumps(zenodo, indent=2, ensure_ascii=False)) + Path(zenodo_file).write_text(f'{json.dumps(zenodo, indent=2, ensure_ascii=False)}\n') @cli.command() @@ -266,10 +267,8 @@ def _aslist(value): aff_indexes = [ ', '.join( - [ - '%d' % (affiliations.index(a) + 1) - for a in _aslist(author.get('affiliation', 'Unaffiliated')) - ] + str(affiliations.index(a) + 1) + for a in _aslist(author.get('affiliation', 'Unaffiliated')) ) for author in hits ] @@ -280,15 +279,13 @@ def _aslist(value): file=sys.stderr, ) - print('Authors (%d):' % len(hits)) - print( - '%s.' - % '; '.join(['%s \\ :sup:`%s`\\ ' % (i['name'], idx) for i, idx in zip(hits, aff_indexes)]) - ) + print(f'Authors ({len(hits)}):') + print('; '.join([rf'{i["name"]} \ :sup:`{idx}`\ ' for i, idx in zip(hits, aff_indexes)]) + '.') print( - '\n\nAffiliations:\n%s' - % '\n'.join(['{0: >2}. {1}'.format(i + 1, a) for i, a in enumerate(affiliations)]) + '\n\nAffiliations:\n{}'.format( + '\n'.join([f'{i + 1: >2}. {a}' for i, a in enumerate(affiliations)]) + ) ) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ce9679c707..9fa3868f94 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,12 +12,9 @@ repos: - id: end-of-file-fixer - id: fix-byte-order-marker - id: trailing-whitespace - # Enable when no significant PRs are in progress - # - repo: https://github.com/psf/black - # rev: 23.1.0 - # hooks: - # - id: black - # - repo: https://github.com/pycqa/isort - # rev: 5.12.0 - # hooks: - # - id: isort + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.11.10 + hooks: + - id: ruff + args: [ --fix ] + - id: ruff-format diff --git a/docs/tools/apigen.py b/docs/tools/apigen.py index f602f45fa8..30e4b4789f 100644 --- a/docs/tools/apigen.py +++ b/docs/tools/apigen.py @@ -27,7 +27,7 @@ DEBUG = True -class ApiDocWriter(object): +class ApiDocWriter: """Class for automatic detection and parsing of API docs to Sphinx-parsable reST format""" @@ -185,9 +185,8 @@ def _parse_module(self, uri): # nothing that we could handle here. return ([], []) - f = open(filename, 'rt') - functions, classes = self._parse_lines(f) - f.close() + with open(filename) as f: + functions, classes = self._parse_lines(f) return functions, classes def _parse_module_with_import(self, uri): @@ -217,14 +216,10 @@ def _parse_module_with_import(self, uri): continue obj = mod.__dict__[obj_str] # Check if function / class defined in module - if not self.other_defines and not getmodule(obj) == mod: + if not self.other_defines and getmodule(obj) != mod: continue # figure out if obj is a function or class - if ( - hasattr(obj, 'func_name') - or isinstance(obj, BuiltinFunctionType) - or isinstance(obj, FunctionType) - ): + if hasattr(obj, 'func_name') or isinstance(obj, (BuiltinFunctionType, FunctionType)): functions.append(obj_str) else: try: @@ -278,7 +273,7 @@ def generate_api_doc(self, uri): # Make a shorter version of the uri that omits the package name for # titles - uri_short = re.sub(r'^%s\.' % self.package_name, '', uri) + uri_short = re.sub(rf'^{self.package_name}\.', '', uri) head = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' body = '' @@ -345,20 +340,12 @@ def _survives_exclude(self, matchstr, match_type): elif match_type == 'package': patterns = self.package_skip_patterns else: - raise ValueError('Cannot interpret match type "%s"' % match_type) + raise ValueError(f'Cannot interpret match type "{match_type}"') # Match to URI without package name L = len(self.package_name) if matchstr[:L] == self.package_name: matchstr = matchstr[L:] - for pat in patterns: - try: - pat.search - except AttributeError: - pat = re.compile(pat) - if pat.search(matchstr): - return False - - return True + return not any(re.search(pat, matchstr) for pat in patterns) def discover_modules(self): r"""Return module sequence discovered from ``self.package_name`` @@ -426,7 +413,7 @@ def write_modules_api(self, modules, outdir): written_modules = [] for ulm, mods in module_by_ulm.items(): - print('Generating docs for %s:' % ulm) + print(f'Generating docs for {ulm}:') document_head = [] document_body = [] @@ -438,11 +425,8 @@ def write_modules_api(self, modules, outdir): document_body.append(body) out_module = ulm + self.rst_extension - outfile = os.path.join(outdir, out_module) - fileobj = open(outfile, 'wt') - - fileobj.writelines(document_head + document_body) - fileobj.close() + with open(os.path.join(outdir, out_module), 'w') as fileobj: + fileobj.writelines(document_head + document_body) written_modules.append(out_module) self.written_modules = written_modules @@ -497,14 +481,13 @@ def write_index(self, outdir, froot='gen', relative_to=None): relpath = (outdir + os.path.sep).replace(relative_to + os.path.sep, '') else: relpath = outdir - idx = open(path, 'wt') - w = idx.write - w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') - - title = 'API Reference' - w(title + '\n') - w('=' * len(title) + '\n\n') - w('.. toctree::\n\n') - for f in self.written_modules: - w(' %s\n' % os.path.join(relpath, f)) - idx.close() + with open(path, 'w') as idx: + w = idx.write + w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') + + title = 'API Reference' + w(title + '\n') + w('=' * len(title) + '\n\n') + w('.. toctree::\n\n') + for f in self.written_modules: + w(f' {os.path.join(relpath, f)}\n') diff --git a/docs/tools/buildmodref.py b/docs/tools/buildmodref.py index e0a7c41059..2e81222b95 100755 --- a/docs/tools/buildmodref.py +++ b/docs/tools/buildmodref.py @@ -1,8 +1,6 @@ #!/usr/bin/env python """Script to auto-generate API docs.""" -from __future__ import division, print_function - # stdlib imports import sys @@ -16,7 +14,7 @@ def abort(error): - print('*WARNING* API documentation not generated: %s' % error) + print(f'*WARNING* API documentation not generated: {error}') exit() @@ -43,13 +41,13 @@ def writeapi(package, outdir, source_version, other_defines=True): docwriter = ApiDocWriter(package, rst_extension='.rst', other_defines=other_defines) docwriter.package_skip_patterns += [ - r'\.%s$' % package, + rf'\.{package}$', r'.*test.*$', r'\.version.*$', ] docwriter.write_api_docs(outdir) docwriter.write_index(outdir, 'index', relative_to=outdir) - print('%d files written' % len(docwriter.written_modules)) + print(f'{len(docwriter.written_modules)} files written') if __name__ == '__main__': diff --git a/pyproject.toml b/pyproject.toml index 040fab4974..72e0e5949c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -182,8 +182,12 @@ line-length = 99 select = [ "F", "E", + "C", "W", + "B", "I", + "SIM", + "UP", ] ignore = [ "E203", @@ -198,6 +202,9 @@ inline-quotes = "single" [tool.ruff.lint.extend-per-file-ignores] "*/__init__.py" = ["F401"] "docs/conf.py" = ["E265"] +"*/test_*.py" = [ + "B018", # Unassigned expressions are fine +] [tool.ruff.format] quote-style = "single" diff --git a/sdcflows/cli/main.py b/sdcflows/cli/main.py index d96751dd2c..9336903f31 100644 --- a/sdcflows/cli/main.py +++ b/sdcflows/cli/main.py @@ -25,7 +25,6 @@ def main(argv=None): """Entry point for SDCFlows' CLI.""" - import atexit import gc import os import sys @@ -34,8 +33,6 @@ def main(argv=None): from sdcflows import config from sdcflows.cli.parser import parse_args - atexit.register(config.restore_env) - # Run parser parse_args(argv) diff --git a/sdcflows/cli/parser.py b/sdcflows/cli/parser.py index 184f375717..ed89836d1f 100644 --- a/sdcflows/cli/parser.py +++ b/sdcflows/cli/parser.py @@ -22,7 +22,6 @@ # """Standalone command line executable for estimation of fieldmaps.""" -import re from argparse import Action, ArgumentDefaultsHelpFormatter, ArgumentParser from functools import partial from pathlib import Path @@ -46,9 +45,7 @@ def _parse_participant_labels(value): ['s060'] """ - return sorted( - set(re.sub(r'^sub-', '', item.strip()) for item in re.split(r'\s+', f'{value}'.strip())) - ) + return sorted({item.removeprefix('sub-') for item in value.split()}) def _parser(): @@ -301,12 +298,10 @@ def parse_args(args=None, namespace=None): # Ensure input and output folders are not the same if output_dir == bids_dir: + suggested_path = bids_dir / 'derivatives' / f'sdcflows_{version.split("+")[0]}' parser.error( 'The selected output folder is the same as the input BIDS folder. ' - 'Please modify the output path (suggestion: %s).' - % bids_dir - / 'derivatives' - / ('sdcflows_%s' % version.split('+')[0]) + f'Please modify the output path (suggestion: {suggested_path}).' ) if bids_dir in work_dir.parents: diff --git a/sdcflows/config.py b/sdcflows/config.py index e2e851184d..98a660c7dd 100644 --- a/sdcflows/config.py +++ b/sdcflows/config.py @@ -124,6 +124,8 @@ from importlib_metadata import version as get_version # Ignore annoying warnings +import contextlib + from sdcflows import __version__ from sdcflows._warnings import logging @@ -200,7 +202,7 @@ _memory_gb = None try: if 'linux' in sys.platform: - with open('/proc/meminfo', 'r') as f_in: + with open('/proc/meminfo') as f_in: _meminfo_lines = f_in.readlines() _mem_total_line = [line for line in _meminfo_lines if 'MemTotal' in line][0] _mem_total = float(_mem_total_line.split()[1]) @@ -220,7 +222,7 @@ class _Config: """An abstract class forbidding instantiation.""" - _paths = tuple() + _paths = () def __init__(self): """Avert instantiation.""" @@ -239,10 +241,8 @@ def load(cls, settings, init=True): setattr(cls, k, v) if init: - try: + with contextlib.suppress(AttributeError): cls.init() - except AttributeError: - pass @classmethod def get(cls): @@ -647,13 +647,3 @@ def _process_initializer(config_file: Path): # Set the maximal number of threads per process os.environ['OMP_NUM_THREADS'] = f'{config.nipype.omp_nthreads}' - - -def restore_env(): - """Restore the original environment.""" - - for k in os.environ.keys(): - del os.environ[k] - - for k, v in environment._pre_sdcflows.items(): - os.environ[k] = v diff --git a/sdcflows/fieldmaps.py b/sdcflows/fieldmaps.py index 5f9262f44b..b05e7f7822 100644 --- a/sdcflows/fieldmaps.py +++ b/sdcflows/fieldmaps.py @@ -342,7 +342,7 @@ def __attrs_post_init__(self): # Fieldmap option 1: actual field-mapping sequences fmap_types = suffix_set.intersection(('fieldmap', 'phasediff', 'phase1', 'phase2')) - if len(fmap_types) > 1 and fmap_types - set(('phase1', 'phase2')): + if len(fmap_types) > 1 and fmap_types - {'phase1', 'phase2'}: raise TypeError(f'Incompatible suffices found: <{",".join(fmap_types)}>.') if fmap_types: @@ -376,7 +376,7 @@ def __attrs_post_init__(self): raise ValueError( 'A fieldmap or phase-difference estimation type was found, ' f'but an anatomical reference ({magnitude} file) is missing.' - ) + ) from None # Check presence and try to find (if necessary) the second magnitude file if self.method == EstimatorType.PHASEDIFF and 'magnitude2' not in suffix_set: @@ -390,7 +390,7 @@ def __attrs_post_init__(self): raise ValueError( 'A phase-difference estimation (phase1/2) type was found, ' 'but an anatomical reference (magnitude2 file) is missing.' - ) + ) from None # Fieldmap option 2: PEPOLAR (and fieldmap-less or ANAT) # IMPORTANT NOTE: fieldmap-less approaches can be considered PEPOLAR with RO = 0.0s @@ -403,7 +403,7 @@ def __attrs_post_init__(self): if _pepolar_estimation and not anat_types: self.method = MODALITIES[pepolar_types.pop()] - _pe = set(f.metadata['PhaseEncodingDirection'] for f in self.sources) + _pe = {f.metadata['PhaseEncodingDirection'] for f in self.sources} if len(_pe) == 1: raise ValueError( f'Only one phase-encoding direction <{_pe.pop()}> found across sources.' @@ -418,9 +418,9 @@ def __attrs_post_init__(self): # No method has been identified -> fail. raise ValueError('Insufficient sources to estimate a fieldmap.') - intents_meta = set( + intents_meta = { el for f in self.sources for el in listify(f.metadata.get('IntendedFor') or []) - ) + } # Register this estimation method if not self.bids_id: diff --git a/sdcflows/interfaces/bspline.py b/sdcflows/interfaces/bspline.py index ecf394823e..bdf88ba1c1 100644 --- a/sdcflows/interfaces/bspline.py +++ b/sdcflows/interfaces/bspline.py @@ -628,7 +628,7 @@ def _fix_topup_fieldcoeff(in_coeff, fmap_ref, pe_dir, out_file=None): ) ) header['cal_min'] = -header['cal_max'] - header.set_intent('estimate', tuple(), name='B-Spline coefficients') + header.set_intent('estimate', (), name='B-Spline coefficients') # Write out fixed (generalized) coefficients coeffnii.__class__(coeffnii.dataobj, newaff, header).to_filename(out_file) diff --git a/sdcflows/interfaces/reportlets.py b/sdcflows/interfaces/reportlets.py index 3f7b05f39e..bccd855f1b 100644 --- a/sdcflows/interfaces/reportlets.py +++ b/sdcflows/interfaces/reportlets.py @@ -61,7 +61,7 @@ class FieldmapReportlet(reporting.ReportCapableInterface): def __init__(self, **kwargs): """Instantiate FieldmapReportlet.""" self._n_cuts = kwargs.pop('n_cuts', self._n_cuts) - super(FieldmapReportlet, self).__init__(generate_report=True, **kwargs) + super().__init__(generate_report=True, **kwargs) def _run_interface(self, runtime): return runtime @@ -77,7 +77,7 @@ def _generate_report(self): fmapnii = nb.squeeze_image(rotate_affine(load_img(self.inputs.fieldmap), rot=canonical_r)) if fmapnii.dataobj.ndim == 4: - for i, tstep in enumerate(nb.four_to_three(fmapnii)): + for tstep in nb.four_to_three(fmapnii): if np.any(np.asanyarray(tstep.dataobj) != 0): fmapnii = tstep break diff --git a/sdcflows/interfaces/utils.py b/sdcflows/interfaces/utils.py index f315623de0..80a5be0b6d 100644 --- a/sdcflows/interfaces/utils.py +++ b/sdcflows/interfaces/utils.py @@ -196,20 +196,20 @@ def _run_interface(self, runtime): # Identity transform if np.array_equal(img2target, [[0, 1], [1, 1], [2, 1]]): - self._results = dict( - out_file=self.inputs.in_file, - pe_dir=self.inputs.pe_dir, - ) + self._results = { + 'out_file': self.inputs.in_file, + 'pe_dir': self.inputs.pe_dir, + } return runtime reoriented = img.as_reoriented(img2target) pe_dirs = [reorient_pedir(pe_dir, img2target) for pe_dir in self.inputs.pe_dir] - self._results = dict( - out_file=fname_presuffix(self.inputs.in_file, suffix=target, newpath=runtime.cwd), - pe_dir=pe_dirs, - ) + self._results = { + 'out_file': fname_presuffix(self.inputs.in_file, suffix=target, newpath=runtime.cwd), + 'pe_dir': pe_dirs, + } reoriented.to_filename(self._results['out_file']) @@ -405,7 +405,7 @@ def _flatten(inlist, max_trs=50, out_dir=None): out_dir = Path(out_dir) if out_dir is not None else Path() output = [] - for i, (path, meta) in enumerate(inlist): + for path, meta in inlist: img = nb.load(path) if len(img.shape) == 3: output.append((path, meta)) diff --git a/sdcflows/tests/test_fieldmaps.py b/sdcflows/tests/test_fieldmaps.py index 5032c68dcf..383376b988 100644 --- a/sdcflows/tests/test_fieldmaps.py +++ b/sdcflows/tests/test_fieldmaps.py @@ -125,7 +125,7 @@ def test_FieldmapEstimation(dsA_dir, inputfiles, method, nsources): # Exercise workflow creation wf = fe.get_workflow() - wf == fe.get_workflow() + assert wf == fe.get_workflow() @pytest.mark.parametrize( diff --git a/sdcflows/tests/test_transform.py b/sdcflows/tests/test_transform.py index 05b54f4712..cb2d01bb3e 100644 --- a/sdcflows/tests/test_transform.py +++ b/sdcflows/tests/test_transform.py @@ -197,7 +197,7 @@ def test_apply_transform(tmpdir, outdir, datadir, pe0, hmc, fmap): error_margin = 0.5 if fmap is False: # If no fieldmap, this is equivalent to only HMC realigned = LinearTransformsMapping(hmc_xfms, reference=in_file).apply(in_file) - error = np.sqrt(((corrected.dataobj - realigned.dataobj) ** 2)) + error = np.sqrt((corrected.dataobj - realigned.dataobj) ** 2) if outdir: # Do not include the first volume in the average to enhance differences @@ -226,7 +226,7 @@ def test_apply_transform(tmpdir, outdir, datadir, pe0, hmc, fmap): ) else: realigned = nb.load(in_file) - error = np.nan_to_num(np.sqrt(((corrected.dataobj - realigned.dataobj) ** 2)), nan=0) + error = np.nan_to_num(np.sqrt((corrected.dataobj - realigned.dataobj) ** 2), nan=0) error_margin = 200 # test oracle is pretty bad here - needs revision. if outdir: @@ -290,7 +290,7 @@ def test_apply_transform(tmpdir, outdir, datadir, pe0, hmc, fmap): out_report=str(outdir / f'sub-pilot_ses-15_acq-b0_dir-{pe0}_desc-hmcdiff_dwi.svg'), ).run() - error = np.sqrt(((corrected.dataobj - corrected_nohmc.dataobj) ** 2)) + error = np.sqrt((corrected.dataobj - corrected_nohmc.dataobj) ** 2) realigned.__class__( error, realigned.affine, diff --git a/sdcflows/transform.py b/sdcflows/transform.py index 3388789ac3..d3ad85dd1d 100644 --- a/sdcflows/transform.py +++ b/sdcflows/transform.py @@ -49,9 +49,10 @@ import asyncio import os +from collections.abc import Sequence from functools import partial from pathlib import Path -from typing import Callable, Sequence, Tuple +from typing import Callable from warnings import warn import attr @@ -71,7 +72,7 @@ def _sdc_unwarp( data: np.ndarray, coordinates: np.ndarray, - pe_info: Tuple[int, float], + pe_info: tuple[int, float], hmc_xfm: np.ndarray | None, jacobian: bool, fmap_hz: np.ndarray, @@ -120,7 +121,7 @@ def _sdc_unwarp( async def worker( data: np.ndarray, coordinates: np.ndarray, - pe_info: Tuple[int, float], + pe_info: tuple[int, float], hmc_xfm: np.ndarray, func: Callable, semaphore: asyncio.Semaphore, @@ -136,7 +137,7 @@ async def unwarp_parallel( fulldataset: np.ndarray, coordinates: np.ndarray, fmap_hz: np.ndarray, - pe_info: Sequence[Tuple[int, float]], + pe_info: Sequence[tuple[int, float]], xfms: Sequence[np.ndarray], jacobian: bool, order: int = 3, @@ -463,7 +464,8 @@ def apply( if self.mapped is not None: warn( 'The fieldmap has been already fit, the user is responsible for ' - 'ensuring the parameters of the EPI target are consistent.' + 'ensuring the parameters of the EPI target are consistent.', + stacklevel=2, ) else: # Generate warp field (before ensuring positive cosines) @@ -490,18 +492,12 @@ def apply( ro_time *= n_volumes pe_info = [] - for volid in range(n_volumes): - pe_axis = 'ijk'.index(pe_dir[volid][0]) - axis_flip = axcodes[pe_axis] in ('LPI') - pe_flip = pe_dir[volid].endswith('-') - - pe_info.append( - ( - pe_axis, - # Displacements are reversed if either is true (after ensuring positive cosines) - -ro_time[volid] if (axis_flip ^ pe_flip) else ro_time[volid], - ) - ) + for vol_pe_dir, vol_ro_time in zip(pe_dir, ro_time): + pe_axis = 'ijk'.index(vol_pe_dir[0]) + # Displacements are reversed if either is true (after ensuring positive cosines) + flip = (axcodes[pe_axis] in 'LPI') ^ vol_pe_dir.endswith('-') + + pe_info.append((pe_axis, -vol_ro_time if flip else vol_ro_time)) # Reference image's voxel coordinates (in voxel units) voxcoords = ( @@ -524,7 +520,8 @@ def apply( warn( 'Head-motion compensating (realignment) transforms are ignored when applying ' 'the unwarp with SDCFlows. This feature will be enabled as soon as unit tests ' - 'are implemented for its quality assurance.' + 'are implemented for its quality assurance.', + stacklevel=1, ) # Resample @@ -738,7 +735,7 @@ def grid_bspline_weights(target_nii, ctrl_nii, dtype='float32'): 0, atol=1e-3, ): - warn("Image's and B-Spline's grids are not aligned.") + warn("Image's and B-Spline's grids are not aligned.", stacklevel=2) target_to_grid = np.linalg.inv(ctrl_nii.affine) @ target_nii.affine wd = [] diff --git a/sdcflows/utils/bimap.py b/sdcflows/utils/bimap.py index 5a8ed03759..bb360daea3 100644 --- a/sdcflows/utils/bimap.py +++ b/sdcflows/utils/bimap.py @@ -135,11 +135,11 @@ def __setitem__(self, key, value): try: hash(value) except TypeError as exc: - raise TypeError(f"value '{value}' of {exc}") + raise TypeError(f"value '{value}' of {exc}") from None try: hash(key) except TypeError as exc: - raise TypeError(f"key '{key}' of {exc}") + raise TypeError(f"key '{key}' of {exc}") from None if self.__contains__(key): raise KeyError(f"'{key}' is already {'a value' * (key in self._inverse)} in mapping") @@ -174,11 +174,11 @@ def __contains__(self, key): def add(self, value): """Insert a new value in the bidict, generating an automatic key.""" - _used = set( + _used = { int(i.groups()[0]) for i in [_autokey_pat.match(k) for k in self.keys() if k.startswith('auto_')] if i is not None - ) + } for i in range(len(_used) + 1): if i not in _used: newkey = f'auto_{i:05d}' @@ -222,11 +222,11 @@ class EstimatorRegistry(bidict): @property def sources(self): """Return a flattened list of fieldmap sources.""" - return sorted(set([el for group in self.values() for el in group])) + return sorted({el for group in self.values() for el in group}) def get_key(self, value): """Get the key(s) containing a particular value.""" if value not in self.sources: - return tuple() + return () return tuple(sorted(k for k, v in self.items() if value in v)) diff --git a/sdcflows/utils/phasemanip.py b/sdcflows/utils/phasemanip.py index 2ee029ab92..c32c8c8d3f 100644 --- a/sdcflows/utils/phasemanip.py +++ b/sdcflows/utils/phasemanip.py @@ -135,13 +135,15 @@ def delta_te(in_values): te2 = float(in_values.get('EchoTimeDifference')) return abs(te2) except TypeError: - raise ValueError('Phase/phase-difference fieldmaps: no echo-times information.') + raise ValueError( + 'Phase/phase-difference fieldmaps: no echo-times information.' + ) from None except ValueError: - raise ValueError(f'Could not interpret metadata .') + raise ValueError(f'Could not interpret metadata .') from None try: te2 = float(te2 or 'unknown') te1 = float(te1 or 'unknown') except ValueError: - raise ValueError(f'Could not interpret metadata .') + raise ValueError(f'Could not interpret metadata .') from None return abs(te2 - te1) diff --git a/sdcflows/utils/wrangler.py b/sdcflows/utils/wrangler.py index acbcbe6ff9..235697bbff 100644 --- a/sdcflows/utils/wrangler.py +++ b/sdcflows/utils/wrangler.py @@ -29,7 +29,7 @@ from functools import reduce from itertools import product from pathlib import Path -from typing import Any, Dict, List, Optional, Union +from typing import Any from bids.layout import BIDSFile, BIDSLayout from bids.utils import listify @@ -59,7 +59,7 @@ def _resolve_intent(intent: str, layout: BIDSLayout, subject: str) -> str | None return intent -def _filter_metadata(metadata: Dict[str, Any], subject: str) -> Dict[str, Any]: +def _filter_metadata(metadata: dict[str, Any], subject: str) -> dict[str, Any]: intents = metadata.get('IntendedFor') if intents: updated = [_normalize_intent(intent, subject) for intent in listify(intents)] @@ -71,12 +71,12 @@ def find_estimators( *, layout: BIDSLayout, subject: str, - sessions: Optional[List[str]] = None, - fmapless: Union[bool, set] = True, + sessions: list[str] | None = None, + fmapless: bool | set = True, force_fmapless: bool = False, - logger: Optional[logging.Logger] = None, - bids_filters: Optional[dict] = None, - anat_suffix: Union[str, List[str]] = 'T1w', + logger: logging.Logger | None = None, + bids_filters: dict | None = None, + anat_suffix: str | list[str] = 'T1w', ) -> list: """ Apply basic heuristics to automatically find available data for fieldmap estimation. @@ -346,7 +346,7 @@ def find_estimators( estimators = [] # Step 1. Use B0FieldIdentifier metadata - b0_ids = tuple() + b0_ids = () with suppress(BIDSEntityError): # flatten lists from json (tupled in pybids for hashing), then unique b0_ids = reduce( @@ -445,7 +445,7 @@ def find_estimators( # At this point, only single-PE _epi files WITH ``IntendedFor`` can # be automatically processed. - has_intended = tuple() + has_intended = () with suppress(ValueError): has_intended = layout.get( **{ @@ -545,10 +545,10 @@ def find_anatomical_estimators( anat_file: BIDSFile, layout: BIDSLayout, subject: str, - sessions: List[str], - base_entities: Dict[str, Any], - suffixes: List[str], -) -> List[List[fm.FieldmapFile]]: + sessions: list[str], + base_entities: dict[str, Any], + suffixes: list[str], +) -> list[list[fm.FieldmapFile]]: r"""Find anatomical estimators Given an anatomical reference image, create lists of files for estimating @@ -606,7 +606,7 @@ def find_anatomical_estimators( meta.update({'TotalReadoutTime': get_trt(meta, candidate.path)}) epi_targets.append(fm.FieldmapFile(candidate, metadata=meta)) - def sort_key(fmap): + def sort_key(fmap, suffixes=suffixes): # Return sbref before DWI/BOLD and shortest echo first return suffixes.index(fmap.suffix), fmap.metadata.get('EchoTime', 1) @@ -644,7 +644,7 @@ def _log_debug_estimation( def _log_debug_estimator_fail( - logger: logging.Logger, b0_id: str, files: List[BIDSFile], bids_root: str, message: str + logger: logging.Logger, b0_id: str, files: list[BIDSFile], bids_root: str, message: str ) -> None: """A helper function to log failures to build an estimator when running with verbosity.""" logger.debug( diff --git a/sdcflows/viz/utils.py b/sdcflows/viz/utils.py index 2da06282bc..422cb1add6 100644 --- a/sdcflows/viz/utils.py +++ b/sdcflows/viz/utils.py @@ -89,8 +89,8 @@ def plot_registration( # Find and replace the figure_1 id. xml_data = etree.fromstring(svg) - find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % SVGNS) - find_text(xml_data)[0].set('id', '%s-%s-%s' % (div_id, mode, uuid4())) + find_text = etree.ETXPath(f"//{{{SVGNS}}}g[@id='figure_1']") + find_text(xml_data)[0].set('id', f'{div_id}-{mode}-{uuid4()}') svg_fig = SVGFigure() svg_fig.root = xml_data diff --git a/sdcflows/workflows/apply/registration.py b/sdcflows/workflows/apply/registration.py index b9fb1b2775..f8b89a6593 100644 --- a/sdcflows/workflows/apply/registration.py +++ b/sdcflows/workflows/apply/registration.py @@ -164,7 +164,8 @@ def init_coeff2epi_wf( if write_coeff: warn( 'SDCFlows does not tinker with the coefficients file anymore, ' - 'the `write_coeff` parameter will be removed in a future release.' + 'the `write_coeff` parameter will be removed in a future release.', + stacklevel=2, ) return workflow diff --git a/sdcflows/workflows/base.py b/sdcflows/workflows/base.py index 788fd8b77f..b026f53a89 100644 --- a/sdcflows/workflows/base.py +++ b/sdcflows/workflows/base.py @@ -115,9 +115,9 @@ def init_fmap_preproc_wf( in_file=f.path, use_estimate=use_metadata_estimates, ) - except ValueError: + except ValueError as e: msg = f'Missing readout timing information for <{f.path}>.' - raise RuntimeError(msg) + raise RuntimeError(msg) from e est_wf = estimator.get_workflow( use_metadata_estimates=use_metadata_estimates, diff --git a/sdcflows/workflows/fit/base.py b/sdcflows/workflows/fit/base.py index 7ad2f086be..d3264702be 100644 --- a/sdcflows/workflows/fit/base.py +++ b/sdcflows/workflows/fit/base.py @@ -49,7 +49,7 @@ def init_sdcflows_wf(): logger=config.loggers.cli, ) - for subject, sub_estimators in estimators_record.items(): + for sub_estimators in estimators_record.values(): for estim in sub_estimators: estim_wf = estim.get_workflow( omp_nthreads=config.nipype.omp_nthreads, diff --git a/sdcflows/workflows/fit/pepolar.py b/sdcflows/workflows/fit/pepolar.py index 25ae08e827..c64a694c89 100644 --- a/sdcflows/workflows/fit/pepolar.py +++ b/sdcflows/workflows/fit/pepolar.py @@ -293,9 +293,8 @@ def init_3dQwarp_wf(omp_nthreads=1, debug=False, name='pepolar_estimate_wf'): from ...utils.misc import last as _last workflow = Workflow(name=name) - workflow.__desc__ = f"""{_PEPOLAR_DESC} \ -with `3dQwarp` (@afni; AFNI {''.join(['%02d' % v for v in afni.Info().version() or []])}). -""" + afni_ver = ''.join(f'{v:02d}' for v in afni.Info().version() or []) + workflow.__desc__ = f'{_PEPOLAR_DESC} with `3dQwarp` (@afni; AFNI {afni_ver}).' inputnode = pe.Node(niu.IdentityInterface(fields=['in_data', 'metadata']), name='inputnode') diff --git a/sdcflows/workflows/outputs.py b/sdcflows/workflows/outputs.py index aeac11f92d..ce5aa09997 100644 --- a/sdcflows/workflows/outputs.py +++ b/sdcflows/workflows/outputs.py @@ -337,4 +337,4 @@ def _selectintent(metadata): """ from bids.utils import listify - return sorted(set([el for m in listify(metadata) for el in listify(m.get('IntendedFor', []))])) + return sorted({el for m in listify(metadata) for el in listify(m.get('IntendedFor', []))}) diff --git a/tox.ini b/tox.ini index 77d4df3b13..031a66f45d 100644 --- a/tox.ini +++ b/tox.ini @@ -71,9 +71,6 @@ pass_env = CLICOLOR CLICOLOR_FORCE PYTHON_GIL -deps = - # Waiting on a release - py313: traits @ git+https://github.com/enthought/traits.git@10954eb extras = tests setenv = pre: PIP_EXTRA_INDEX_URL=https://pypi.anaconda.org/scientific-python-nightly-wheels/simple