Skip to content

Commit

Permalink
Fix warnings, use keyword args where possible (#1484)
Browse files Browse the repository at this point in the history
  • Loading branch information
rly authored May 26, 2022
1 parent 728e0ac commit d38cc78
Show file tree
Hide file tree
Showing 40 changed files with 1,068 additions and 672 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@
- Added support for NWB 2.5.0.
- Added support for updated ``IndexSeries`` type, new ``order_of_images`` field in ``Images``, and new neurodata_type
``ImageReferences``. @rly (#1483)
- Added support for HDMF 3.3.1. This is now the minimum version of HDMF supported. Importantly, HDMF 3.3 introduces
warnings when the constructor of a class mapped to an HDMF-common data type or an autogenerated data type class
is passed positional arguments instead of all keyword arguments. @rly (#1484)

### Documentation and tutorial enhancements:
- Added tutorial on annotating data via ``TimeIntervals``. @oruebel (#1390)
Expand Down
14 changes: 7 additions & 7 deletions docs/gallery/general/extensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@

from pynwb import register_class, load_namespaces
from pynwb.ecephys import ElectricalSeries
from hdmf.utils import docval, call_docval_func, getargs, get_docval
from hdmf.utils import docval, get_docval, popargs

ns_path = "mylab.namespace.yaml"
load_namespaces(ns_path)
Expand All @@ -118,16 +118,16 @@ class TetrodeSeries(ElectricalSeries):
@docval(*get_docval(ElectricalSeries.__init__) + (
{'name': 'trode_id', 'type': int, 'doc': 'the tetrode id'},))
def __init__(self, **kwargs):
call_docval_func(super(TetrodeSeries, self).__init__, kwargs)
self.trode_id = getargs('trode_id', kwargs)
trode_id = popargs('trode_id', kwargs)
super().__init__(**kwargs)
self.trode_id = trode_id


####################
# .. note::
#
# See the API docs for more information about :py:func:`~hdmf.utils.docval`
# :py:func:`~hdmf.utils.call_docval_func`, :py:func:`~hdmf.utils.getargs`
# and :py:func:`~hdmf.utils.get_docval`
# See the API docs for more information about :py:func:`~hdmf.utils.docval`,
# :py:func:`~hdmf.utils.popargs`, and :py:func:`~hdmf.utils.get_docval`
#
# When extending :py:class:`~pynwb.core.NWBContainer` or :py:class:`~pynwb.core.NWBContainer`
# subclasses, you should define the class field ``__nwbfields__``. This will
Expand Down Expand Up @@ -301,7 +301,7 @@ class Potato(NWBContainer):
{'name': 'weight', 'type': float, 'doc': 'weight of potato in grams'},
{'name': 'age', 'type': float, 'doc': 'age of potato in days'})
def __init__(self, **kwargs):
super(Potato, self).__init__(name=kwargs['name'])
super().__init__(name=kwargs['name'])
self.weight = kwargs['weight']
self.age = kwargs['age']

Expand Down
2 changes: 1 addition & 1 deletion environment-ros3.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ channels:
dependencies:
- python=3.9
- h5py==3.6.0
- hdmf==3.1.1
- hdmf==3.3.1
- matplotlib==3.5.1
- numpy==1.21.0
- pandas==1.3.0
Expand Down
2 changes: 1 addition & 1 deletion requirements-min.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# minimum versions of package dependencies for installing PyNWB
h5py==2.10 # support for selection of datasets with list of indices added in 2.10
hdmf==3.1.1
hdmf==3.3.1
numpy==1.16
pandas==1.0.5
python-dateutil==2.7
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# pinned dependencies to reproduce an entire development environment to use PyNWB
h5py==3.3.0
hdmf==3.1.1
hdmf==3.3.1
numpy==1.21.0
pandas==1.3.0
python-dateutil==2.8.1
Expand Down
1 change: 1 addition & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ per-file-ignores =
setup.py:T001
test.py:T001
scripts/*:T001
extend-ignore = E203

[metadata]
description-file = README.rst
12 changes: 6 additions & 6 deletions src/pynwb/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import h5py

from hdmf.spec import NamespaceCatalog
from hdmf.utils import docval, getargs, popargs, call_docval_func, get_docval
from hdmf.utils import docval, getargs, popargs, get_docval
from hdmf.backends.io import HDMFIO
from hdmf.backends.hdf5 import HDF5IO as _HDF5IO
from hdmf.validate import ValidatorMap
Expand Down Expand Up @@ -87,7 +87,7 @@ def get_manager(**kwargs):
Get a BuildManager to use for I/O using the given extensions. If no extensions are provided,
return a BuildManager that uses the core namespace
'''
type_map = call_docval_func(get_type_map, kwargs)
type_map = get_type_map(**kwargs)
return BuildManager(type_map)


Expand Down Expand Up @@ -227,13 +227,13 @@ def __init__(self, **kwargs):
raise ValueError("cannot load namespaces from file when writing to it")

tm = get_type_map()
super(NWBHDF5IO, self).load_namespaces(tm, path, file=file_obj, driver=driver)
super().load_namespaces(tm, path, file=file_obj, driver=driver)
manager = BuildManager(tm)

# XXX: Leaving this here in case we want to revert to this strategy for
# loading cached namespaces
# ns_catalog = NamespaceCatalog(NWBGroupSpec, NWBDatasetSpec, NWBNamespace)
# super(NWBHDF5IO, self).load_namespaces(ns_catalog, path)
# super().load_namespaces(ns_catalog, path)
# tm = TypeMap(ns_catalog)
# tm.copy_mappers(get_type_map())
else:
Expand All @@ -243,7 +243,7 @@ def __init__(self, **kwargs):
manager = get_manager(extensions=extensions)
elif manager is None:
manager = get_manager()
super(NWBHDF5IO, self).__init__(path, manager=manager, mode=mode, file=file_obj, comm=comm, driver=driver)
super().__init__(path, manager=manager, mode=mode, file=file_obj, comm=comm, driver=driver)

@docval({'name': 'src_io', 'type': HDMFIO,
'doc': 'the HDMFIO object (such as NWBHDF5IO) that was used to read the data to export'},
Expand Down Expand Up @@ -287,7 +287,7 @@ def export(self, **kwargs):
"""
nwbfile = popargs('nwbfile', kwargs)
kwargs['container'] = nwbfile
call_docval_func(super().export, kwargs)
super().export(**kwargs)


from . import io as __io # noqa: F401,E402
Expand Down
131 changes: 64 additions & 67 deletions src/pynwb/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import numpy as np

from hdmf.utils import docval, getargs, popargs, call_docval_func, get_docval
from hdmf.utils import docval, popargs_to_dict, get_docval, popargs
from hdmf.common import DynamicTable, VectorData
from hdmf.utils import get_data_shape

Expand Down Expand Up @@ -33,50 +33,44 @@ class ProcessingModule(MultiContainerInterface):
@docval({'name': 'name', 'type': str, 'doc': 'The name of this processing module'},
{'name': 'description', 'type': str, 'doc': 'Description of this processing module'},
{'name': 'data_interfaces', 'type': (list, tuple, dict),
'doc': 'NWBDataInterfacess that belong to this ProcessingModule', 'default': None})
'doc': 'NWBDataInterfaces that belong to this ProcessingModule', 'default': None})
def __init__(self, **kwargs):
call_docval_func(super(ProcessingModule, self).__init__, kwargs)
self.description = popargs('description', kwargs)
self.data_interfaces = popargs('data_interfaces', kwargs)
description, data_interfaces = popargs("description", "data_interfaces", kwargs)
super().__init__(**kwargs)
self.description = description
self.data_interfaces = data_interfaces

@property
def containers(self):
return self.data_interfaces

def __getitem__(self, arg):
return self.get(arg)

@docval({'name': 'container', 'type': (NWBDataInterface, DynamicTable),
'doc': 'the NWBDataInterface to add to this Module'})
def add_container(self, **kwargs):
'''
Add an NWBContainer to this ProcessingModule
'''
container = getargs('container', kwargs)
warn(PendingDeprecationWarning('add_container will be replaced by add'))
self.add(container)
self.add(kwargs['container'])

@docval({'name': 'container_name', 'type': str, 'doc': 'the name of the NWBContainer to retrieve'})
def get_container(self, **kwargs):
'''
Retrieve an NWBContainer from this ProcessingModule
'''
container_name = getargs('container_name', kwargs)
warn(PendingDeprecationWarning('get_container will be replaced by get'))
return self.get(container_name)
return self.get(kwargs['container_name'])

@docval({'name': 'NWBDataInterface', 'type': (NWBDataInterface, DynamicTable),
'doc': 'the NWBDataInterface to add to this Module'})
def add_data_interface(self, **kwargs):
NWBDataInterface = getargs('NWBDataInterface', kwargs)
warn(PendingDeprecationWarning('add_data_interface will be replaced by add'))
self.add(NWBDataInterface)
self.add(kwargs['NWBDataInterface'])

@docval({'name': 'data_interface_name', 'type': str, 'doc': 'the name of the NWBContainer to retrieve'})
def get_data_interface(self, **kwargs):
data_interface_name = getargs('data_interface_name', kwargs)
warn(PendingDeprecationWarning('get_data_interface will be replaced by get'))
return self.get(data_interface_name)
return self.get(kwargs['data_interface_name'])


@register_class('TimeSeries', CORE_NAMESPACE)
Expand Down Expand Up @@ -153,20 +147,27 @@ class TimeSeries(NWBDataInterface):
def __init__(self, **kwargs):
"""Create a TimeSeries object
"""
keys_to_set = ("starting_time",
"rate",
"resolution",
"comments",
"description",
"conversion",
"offset",
"unit",
"control",
"control_description",
"continuity")
args_to_set = popargs_to_dict(keys_to_set, kwargs)
keys_to_process = ("data", "timestamps") # these are properties and cannot be set with setattr
args_to_process = popargs_to_dict(keys_to_process, kwargs)
super().__init__(**kwargs)

call_docval_func(super(TimeSeries, self).__init__, kwargs)
keys = ("resolution",
"comments",
"description",
"conversion",
"offset",
"unit",
"control",
"control_description",
"continuity")

data_shape = get_data_shape(data=kwargs["data"], strict_no_data_load=True)
timestamps_shape = get_data_shape(data=kwargs["timestamps"], strict_no_data_load=True)
for key, val in args_to_set.items():
setattr(self, key, val)

data_shape = get_data_shape(data=args_to_process["data"], strict_no_data_load=True)
timestamps_shape = get_data_shape(data=args_to_process["timestamps"], strict_no_data_load=True)
if (
# check that the shape is known
data_shape is not None and timestamps_shape is not None
Expand All @@ -182,32 +183,25 @@ def __init__(self, **kwargs):
):
warn("Length of data does not match length of timestamps. Your data may be transposed. Time should be on "
"the 0th dimension")
for key in keys:
val = kwargs.get(key)
if val is not None:
setattr(self, key, val)

data = getargs('data', kwargs)
data = args_to_process['data']
self.fields['data'] = data
if isinstance(data, TimeSeries):
data.__add_link('data_link', self)

timestamps = kwargs.get('timestamps')
starting_time = kwargs.get('starting_time')
rate = kwargs.get('rate')
timestamps = args_to_process['timestamps']
if timestamps is not None:
if rate is not None:
if self.rate is not None:
raise ValueError('Specifying rate and timestamps is not supported.')
if starting_time is not None:
if self.starting_time is not None:
raise ValueError('Specifying starting_time and timestamps is not supported.')
self.fields['timestamps'] = timestamps
self.timestamps_unit = self.__time_unit
self.interval = 1
if isinstance(timestamps, TimeSeries):
timestamps.__add_link('timestamp_link', self)
elif rate is not None:
self.rate = rate
if starting_time is not None:
self.starting_time = starting_time
else:
elif self.rate is not None:
if self.starting_time is None: # override default if rate is provided but not starting time
self.starting_time = 0.0
self.starting_time_unit = self.__time_unit
else:
Expand Down Expand Up @@ -235,14 +229,16 @@ def no_len_warning(attr):
else:
warn(no_len_warning('data'), UserWarning)

if hasattr(self, 'timestamps'):
if hasattr(self.timestamps, '__len__'):
try:
return len(self.timestamps)
except TypeError:
warn(unreadable_warning('timestamps'), UserWarning)
elif not (hasattr(self, 'rate') and hasattr(self, 'starting_time')):
warn(no_len_warning('timestamps'), UserWarning)
# only get here if self.data has no __len__ or __len__ is unreadable
if hasattr(self.timestamps, '__len__'):
try:
return len(self.timestamps)
except TypeError:
warn(unreadable_warning('timestamps'), UserWarning)
elif self.rate is None and self.starting_time is None:
warn(no_len_warning('timestamps'), UserWarning)

return None

@property
def data(self):
Expand Down Expand Up @@ -270,8 +266,7 @@ def timestamp_link(self):

def __get_links(self, links):
ret = self.fields.get(links, list())
if ret is not None:
ret = set(ret)
ret = set(ret)
return ret

def __add_link(self, links_key, link):
Expand All @@ -296,9 +291,11 @@ class Image(NWBData):
{'name': 'resolution', 'type': 'float', 'doc': 'pixels / cm', 'default': None},
{'name': 'description', 'type': str, 'doc': 'description of image', 'default': None})
def __init__(self, **kwargs):
call_docval_func(super(Image, self).__init__, kwargs)
self.resolution = kwargs['resolution']
self.description = kwargs['description']
args_to_set = popargs_to_dict(("resolution", "description"), kwargs)
super().__init__(**kwargs)

for key, val in args_to_set.items():
setattr(self, key, val)


@register_class('ImageReferences', CORE_NAMESPACE)
Expand Down Expand Up @@ -343,11 +340,11 @@ class Images(MultiContainerInterface):
{'name': 'order_of_images', 'type': 'ImageReferences',
'doc': 'Ordered dataset of references to Image objects stored in the parent group.', 'default': None},)
def __init__(self, **kwargs):
name, description, images, order_of_images = popargs('name', 'description', 'images', 'order_of_images', kwargs)
super(Images, self).__init__(name, **kwargs)
self.description = description
self.images = images
self.order_of_images = order_of_images

args_to_set = popargs_to_dict(("description", "images", "order_of_images"), kwargs)
super().__init__(**kwargs)
for key, val in args_to_set.items():
setattr(self, key, val)


class TimeSeriesReference(NamedTuple):
Expand Down Expand Up @@ -516,7 +513,7 @@ class TimeSeriesReferenceVectorData(VectorData):
"to be selected as well as an object reference to the TimeSeries."},
*get_docval(VectorData.__init__, 'data'))
def __init__(self, **kwargs):
call_docval_func(super().__init__, kwargs)
super().__init__(**kwargs)
# CAUTION: Define any logic specific for init in the self._init_internal function, not here!
self._init_internal()

Expand All @@ -535,8 +532,8 @@ def _init_internal(self):
'must be convertible to a TimeSeriesReference'})
def add_row(self, **kwargs):
"""Append a data value to this column."""
val = getargs('val', kwargs)
if not (isinstance(val, self.TIME_SERIES_REFERENCE_TUPLE)):
val = kwargs['val']
if not isinstance(val, self.TIME_SERIES_REFERENCE_TUPLE):
val = self.TIME_SERIES_REFERENCE_TUPLE(*val)
val.check_types()
super().append(val)
Expand All @@ -546,8 +543,8 @@ def add_row(self, **kwargs):
'must be convertible to a TimeSeriesReference'})
def append(self, **kwargs):
"""Append a data value to this column."""
arg = getargs('arg', kwargs)
if not (isinstance(arg, self.TIME_SERIES_REFERENCE_TUPLE)):
arg = kwargs['arg']
if not isinstance(arg, self.TIME_SERIES_REFERENCE_TUPLE):
arg = self.TIME_SERIES_REFERENCE_TUPLE(*arg)
arg.check_types()
super().append(arg)
Expand Down
2 changes: 1 addition & 1 deletion src/pynwb/behavior.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def __init__(self, **kwargs):
Create a SpatialSeries TimeSeries dataset
"""
name, data, reference_frame, unit = popargs('name', 'data', 'reference_frame', 'unit', kwargs)
super(SpatialSeries, self).__init__(name, data, unit, **kwargs)
super().__init__(name, data, unit, **kwargs)

# NWB 2.5 restricts length of second dimension to be <= 3
allowed_data_shapes = ((None, ), (None, 1), (None, 2), (None, 3))
Expand Down
Loading

0 comments on commit d38cc78

Please sign in to comment.