From 9adc03b2c59441bc14ac90faee0c0fb9276932d2 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 6 Sep 2024 19:16:25 -0600 Subject: [PATCH 01/18] add ruff check for public functions --- pyproject.toml | 46 ++++----- setup.py | 1 + .../blackrock/blackrockdatainterface.py | 6 +- .../ecephys/blackrock/header_tools.py | 56 +++++------ .../tools/roiextractors/roiextractors.py | 94 ++++++++++++++++++- .../tools/spikeinterface/spikeinterface.py | 5 +- src/neuroconv/tools/testing/mock_probes.py | 11 +++ src/neuroconv/utils/json_schema.py | 9 +- 8 files changed, 169 insertions(+), 59 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0eacf483b..6b49f8917 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,15 +8,15 @@ version = "0.6.2" description = "Convert data from proprietary formats to NWB format." readme = "README.md" authors = [ - {name = "Cody Baker"}, - {name = "Szonja Weigl"}, - {name = "Heberto Mayorquin"}, - {name = "Paul Adkisson"}, - {name = "Luiz Tauffer"}, - {name = "Ben Dichter", email = "ben.dichter@catalystneuro.com"} + { name = "Cody Baker" }, + { name = "Szonja Weigl" }, + { name = "Heberto Mayorquin" }, + { name = "Paul Adkisson" }, + { name = "Luiz Tauffer" }, + { name = "Ben Dichter", email = "ben.dichter@catalystneuro.com" }, ] urls = { "Homepage" = "https://github.com/catalystneuro/neuroconv" } -license = {file = "license.txt"} +license = { file = "license.txt" } keywords = ["nwb"] classifiers = [ "Intended Audience :: Science/Research", @@ -49,7 +49,7 @@ dependencies = [ "parse>=1.20.0", "click", "docstring-parser", - "packaging" # Issue 903 + "packaging", # Issue 903 ] @@ -57,12 +57,12 @@ dependencies = [ test = [ "pytest", "pytest-cov", - "ndx-events>=0.2.0", # for special tests to ensure load_namespaces is set to allow NWBFile load at all times + "ndx-events>=0.2.0", # for special tests to ensure load_namespaces is set to allow NWBFile load at all times "parameterized>=0.8.1", "ndx-miniscope", "spikeinterface[qualitymetrics]>=0.101.0", - "zarr<2.18.0", # Error with Blosc (read-only during decode) in numcodecs on May 7; check later if resolved - "pytest-xdist" + "zarr<2.18.0", # Error with Blosc (read-only during decode) in numcodecs on May 7; check later if resolved + "pytest-xdist", ] docs = [ @@ -72,9 +72,9 @@ docs = [ "readthedocs-sphinx-search==0.1.2", "sphinx-toggleprompt==0.2.0", "sphinx-copybutton==0.5.0", - "roiextractors", # Needed for the API documentation - "spikeinterface>=0.101.0", # Needed for the API documentation - "pydata_sphinx_theme==0.12.0" + "roiextractors", # Needed for the API documentation + "spikeinterface>=0.101.0", # Needed for the API documentation + "pydata_sphinx_theme==0.12.0", ] dandi = ["dandi>=0.58.1"] compressors = ["hdf5plugin"] @@ -91,14 +91,10 @@ neuroconv = "neuroconv.tools.yaml_conversion_specification._yaml_conversion_spec [tool.pytest.ini_options] minversion = "6.0" addopts = "-ra --doctest-glob='*.rst'" -testpaths = [ - "docs/conversion_examples_gallery/", - "tests" -] +testpaths = ["docs/conversion_examples_gallery/", "tests"] doctest_optionflags = "ELLIPSIS" - [tool.black] line-length = 120 target-version = ['py38', 'py39', 'py310'] @@ -121,17 +117,23 @@ extend-exclude = ''' ''' - [tool.ruff] [tool.ruff.lint] -select = ["F401", "I", "D101"] # TODO: eventually, expand to other 'F' linting +select = [ + "F401", # Unused import + "I", # All isort rules + "D101", # Missing docstring in public class + "D103", # Missing docstring in public function +] fixable = ["ALL"] [tool.ruff.lint.per-file-ignores] "**__init__.py" = ["F401", "I"] -"tests/**" = ["D"] # We are not enforcing docstrings in tests +"tests/**" = ["D"] # We are not enforcing docstrings in tests "src/neuroconv/tools/testing/data_interface_mixins.py" = ["D"] # We are not enforcing docstrings in the interface mixings +"docs/conf.py" = ["D"] # We are not enforcing docstrings in the conf.py file +"docs/conversion_examples_gallery/conftest.py" = ["D"] # We are not enforcing docstrings in the conversion examples [tool.ruff.lint.isort] relative-imports-order = "closest-to-furthest" diff --git a/setup.py b/setup.py index 3160c59fa..1f4b5b65a 100644 --- a/setup.py +++ b/setup.py @@ -10,6 +10,7 @@ def read_requirements(file): + """Read requirements from a file.""" with open(root / file) as f: return f.readlines() diff --git a/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py b/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py index 19e34fdcd..d5122bf66 100644 --- a/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py @@ -3,7 +3,7 @@ from pydantic import FilePath -from .header_tools import parse_nev_basic_header, parse_nsx_basic_header +from .header_tools import _parse_nev_basic_header, _parse_nsx_basic_header from ..baserecordingextractorinterface import BaseRecordingExtractorInterface from ..basesortingextractorinterface import BaseSortingExtractorInterface from ....utils import get_schema_from_method_signature @@ -60,7 +60,7 @@ def __init__( def get_metadata(self) -> dict: metadata = super().get_metadata() # Open file and extract headers - basic_header = parse_nsx_basic_header(self.source_data["file_path"]) + basic_header = _parse_nsx_basic_header(self.source_data["file_path"]) if "TimeOrigin" in basic_header: metadata["NWBFile"].update(session_start_time=basic_header["TimeOrigin"]) if "Comment" in basic_header: @@ -101,7 +101,7 @@ def __init__(self, file_path: FilePath, sampling_frequency: float = None, verbos def get_metadata(self) -> dict: metadata = super().get_metadata() # Open file and extract headers - basic_header = parse_nev_basic_header(self.source_data["file_path"]) + basic_header = _parse_nev_basic_header(self.source_data["file_path"]) if "TimeOrigin" in basic_header: session_start_time = basic_header["TimeOrigin"] metadata["NWBFile"].update(session_start_time=session_start_time.strftime("%Y-%m-%dT%H:%M:%S")) diff --git a/src/neuroconv/datainterfaces/ecephys/blackrock/header_tools.py b/src/neuroconv/datainterfaces/ecephys/blackrock/header_tools.py index 7b182a3f4..5ac8f94d5 100644 --- a/src/neuroconv/datainterfaces/ecephys/blackrock/header_tools.py +++ b/src/neuroconv/datainterfaces/ecephys/blackrock/header_tools.py @@ -5,7 +5,7 @@ from struct import calcsize, unpack -def processheaders(curr_file, packet_fields): +def _processheaders(curr_file, packet_fields): """ :param curr_file: {file} the current BR datafile to be processed :param packet_fields : {named tuple} the specific binary fields for the given header @@ -45,11 +45,11 @@ def processheaders(curr_file, packet_fields): return packet_formatted -def format_filespec(header_list): +def _format_filespec(header_list): return str(next(header_list)) + "." + str(next(header_list)) # eg 2.3 -def format_timeorigin(header_list): +def _format_timeorigin(header_list): year = next(header_list) month = next(header_list) _ = next(header_list) @@ -61,12 +61,12 @@ def format_timeorigin(header_list): return datetime(year, month, day, hour, minute, second, millisecond * 1000) -def format_stripstring(header_list): +def _format_stripstring(header_list): string = bytes.decode(next(header_list), "latin-1") return string.split(STRING_TERMINUS, 1)[0] -def format_none(header_list): +def _format_none(header_list): return next(header_list) @@ -74,38 +74,38 @@ def format_none(header_list): STRING_TERMINUS = "\x00" -def parse_nsx_basic_header(nsx_file): +def _parse_nsx_basic_header(nsx_file): nsx_basic_dict = [ - FieldDef("FileSpec", "2B", format_filespec), # 2 bytes - 2 unsigned char - FieldDef("BytesInHeader", "I", format_none), # 4 bytes - uint32 - FieldDef("Label", "16s", format_stripstring), # 16 bytes - 16 char array - FieldDef("Comment", "256s", format_stripstring), # 256 bytes - 256 char array - FieldDef("Period", "I", format_none), # 4 bytes - uint32 - FieldDef("TimeStampResolution", "I", format_none), # 4 bytes - uint32 - FieldDef("TimeOrigin", "8H", format_timeorigin), # 16 bytes - 8 uint16 - FieldDef("ChannelCount", "I", format_none), + FieldDef("FileSpec", "2B", _format_filespec), # 2 bytes - 2 unsigned char + FieldDef("BytesInHeader", "I", _format_none), # 4 bytes - uint32 + FieldDef("Label", "16s", _format_stripstring), # 16 bytes - 16 char array + FieldDef("Comment", "256s", _format_stripstring), # 256 bytes - 256 char array + FieldDef("Period", "I", _format_none), # 4 bytes - uint32 + FieldDef("TimeStampResolution", "I", _format_none), # 4 bytes - uint32 + FieldDef("TimeOrigin", "8H", _format_timeorigin), # 16 bytes - 8 uint16 + FieldDef("ChannelCount", "I", _format_none), ] # 4 bytes - uint32 datafile = open(nsx_file, "rb") filetype_id = bytes.decode(datafile.read(8), "latin-1") if filetype_id == "NEURALSG": # this won't contain fields that can be added to NWBFile metadata return dict() - return processheaders(datafile, nsx_basic_dict) + return _processheaders(datafile, nsx_basic_dict) -def parse_nev_basic_header(nev_file): +def _parse_nev_basic_header(nev_file): nev_basic_dict = [ - FieldDef("FileTypeID", "8s", format_stripstring), # 8 bytes - 8 char array - FieldDef("FileSpec", "2B", format_filespec), # 2 bytes - 2 unsigned char - FieldDef("AddFlags", "H", format_none), # 2 bytes - uint16 - FieldDef("BytesInHeader", "I", format_none), # 4 bytes - uint32 - FieldDef("BytesInDataPackets", "I", format_none), # 4 bytes - uint32 - FieldDef("TimeStampResolution", "I", format_none), # 4 bytes - uint32 - FieldDef("SampleTimeResolution", "I", format_none), # 4 bytes - uint32 - FieldDef("TimeOrigin", "8H", format_timeorigin), # 16 bytes - 8 x uint16 - FieldDef("CreatingApplication", "32s", format_stripstring), # 32 bytes - 32 char array - FieldDef("Comment", "256s", format_stripstring), # 256 bytes - 256 char array - FieldDef("NumExtendedHeaders", "I", format_none), + FieldDef("FileTypeID", "8s", _format_stripstring), # 8 bytes - 8 char array + FieldDef("FileSpec", "2B", _format_filespec), # 2 bytes - 2 unsigned char + FieldDef("AddFlags", "H", _format_none), # 2 bytes - uint16 + FieldDef("BytesInHeader", "I", _format_none), # 4 bytes - uint32 + FieldDef("BytesInDataPackets", "I", _format_none), # 4 bytes - uint32 + FieldDef("TimeStampResolution", "I", _format_none), # 4 bytes - uint32 + FieldDef("SampleTimeResolution", "I", _format_none), # 4 bytes - uint32 + FieldDef("TimeOrigin", "8H", _format_timeorigin), # 16 bytes - 8 x uint16 + FieldDef("CreatingApplication", "32s", _format_stripstring), # 32 bytes - 32 char array + FieldDef("Comment", "256s", _format_stripstring), # 256 bytes - 256 char array + FieldDef("NumExtendedHeaders", "I", _format_none), ] datafile = open(nev_file, "rb") - return processheaders(datafile, nev_basic_dict) + return _processheaders(datafile, nev_basic_dict) diff --git a/src/neuroconv/tools/roiextractors/roiextractors.py b/src/neuroconv/tools/roiextractors/roiextractors.py index 618d30b4a..f28631c77 100644 --- a/src/neuroconv/tools/roiextractors/roiextractors.py +++ b/src/neuroconv/tools/roiextractors/roiextractors.py @@ -682,9 +682,38 @@ def add_imaging_to_nwbfile( iterator_type: Optional[str] = "v2", iterator_options: Optional[dict] = None, parent_container: Literal["acquisition", "processing/ophys"] = "acquisition", -): +) -> NWBFile: + """ + Add imaging data from an ImagingExtractor object to an NWBFile. + + Parameters + ---------- + imaging : ImagingExtractor + The extractor object containing the imaging data. + nwbfile : NWBFile + The NWB file where the imaging data will be added. + metadata : dict, optional + Metadata for the NWBFile, by default None. + photon_series_type : {"TwoPhotonSeries", "OnePhotonSeries"}, optional + The type of photon series to be added, by default "TwoPhotonSeries". + photon_series_index : int, optional + The index of the photon series in the provided imaging data, by default 0. + iterator_type : str, optional + The type of iterator to use for adding the data. Commonly used to manage large datasets, by default "v2". + iterator_options : dict, optional + Additional options for controlling the iteration process, by default None. + parent_container : {"acquisition", "processing/ophys"}, optional + Specifies the parent container to which the photon series should be added, either as part of "acquisition" or + under the "processing/ophys" module, by default "acquisition". + + Returns + ------- + NWBFile + The NWB file with the imaging data added + + """ add_devices_to_nwbfile(nwbfile=nwbfile, metadata=metadata) - add_photon_series_to_nwbfile( + nwbfile = add_photon_series_to_nwbfile( imaging=imaging, nwbfile=nwbfile, metadata=metadata, @@ -695,6 +724,8 @@ def add_imaging_to_nwbfile( parent_container=parent_container, ) + return nwbfile + def write_imaging( imaging: ImagingExtractor, @@ -1158,8 +1189,31 @@ def add_background_plane_segmentation_to_nwbfile( iterator_options: Optional[dict] = None, compression_options: Optional[dict] = None, # TODO: remove completely after 10/1/2024 ) -> NWBFile: - # TODO needs docstring + """ + Add background plane segmentation data from a SegmentationExtractor object to an NWBFile. + Parameters + ---------- + segmentation_extractor : SegmentationExtractor + The extractor object containing background segmentation data. + nwbfile : NWBFile + The NWB file to which the background plane segmentation will be added. + metadata : dict, optional + Metadata for the NWBFile, by default None. + background_plane_segmentation_name : str, optional + The name of the background PlaneSegmentation object to be added, by default None. + mask_type : str, optional + Type of mask to use for segmentation; options are "image", "pixel", or "voxel", by default "image". + iterator_options : dict, optional + Options for iterating over the segmentation data, by default None. + compression_options : dict, optional + Deprecated: options for compression; will be removed after 2024-10-01, by default None. + + Returns + ------- + NWBFile + The NWBFile with the added background plane segmentation data. + """ # TODO: remove completely after 10/1/2024 if compression_options is not None: warnings.warn( @@ -1724,6 +1778,40 @@ def add_segmentation_to_nwbfile( iterator_options: Optional[dict] = None, compression_options: Optional[dict] = None, # TODO: remove completely after 10/1/2024 ) -> NWBFile: + """ + Add segmentation data from a SegmentationExtractor object to an NWBFile. + + Parameters + ---------- + segmentation_extractor : SegmentationExtractor + The extractor object containing segmentation data. + nwbfile : NWBFile + The NWB file where the segmentation data will be added. + metadata : dict, optional + Metadata for the NWBFile, by default None. + plane_segmentation_name : str, optional + The name of the PlaneSegmentation object to be added, by default None. + background_plane_segmentation_name : str, optional + The name of the background PlaneSegmentation, if any, by default None. + include_background_segmentation : bool, optional + If True, includes background plane segmentation, by default False. + include_roi_centroids : bool, optional + If True, includes the centroids of the regions of interest (ROIs), by default True. + include_roi_acceptance : bool, optional + If True, includes the acceptance status of ROIs, by default True. + mask_type : str, optional + Type of mask to use for segmentation; can be either "image" or "pixel", by default "image". + iterator_options : dict, optional + Options for iterating over the data, by default None. + compression_options : dict, optional + Deprecated: options for compression; will be removed after 2024-10-01, by default None. + + Returns + ------- + NWBFile + The NWBFile with the added segmentation data. + """ + # TODO: remove completely after 10/1/2024 if compression_options is not None: warnings.warn( diff --git a/src/neuroconv/tools/spikeinterface/spikeinterface.py b/src/neuroconv/tools/spikeinterface/spikeinterface.py index 262e1eaa8..9ffe19c70 100644 --- a/src/neuroconv/tools/spikeinterface/spikeinterface.py +++ b/src/neuroconv/tools/spikeinterface/spikeinterface.py @@ -1991,7 +1991,7 @@ def add_sorting_analyzer_to_nwbfile( sorting_copy.set_property(prop, tm[prop]) add_electrodes_info_to_nwbfile(recording, nwbfile=nwbfile, metadata=metadata) - electrode_group_indices = get_electrode_group_indices(recording, nwbfile=nwbfile) + electrode_group_indices = _get_electrode_group_indices(recording, nwbfile=nwbfile) unit_electrode_indices = [electrode_group_indices] * len(sorting.unit_ids) add_units_table_to_nwbfile( @@ -2213,7 +2213,8 @@ def add_waveforms( ) -def get_electrode_group_indices(recording, nwbfile): +def _get_electrode_group_indices(recording, nwbfile): + """ """ if "group_name" in recording.get_property_keys(): group_names = list(np.unique(recording.get_property("group_name"))) elif "group" in recording.get_property_keys(): diff --git a/src/neuroconv/tools/testing/mock_probes.py b/src/neuroconv/tools/testing/mock_probes.py index 8b41d0f9c..f70f0dfeb 100644 --- a/src/neuroconv/tools/testing/mock_probes.py +++ b/src/neuroconv/tools/testing/mock_probes.py @@ -2,6 +2,17 @@ def generate_mock_probe(num_channels: int, num_shanks: int = 3): + """ + Generate a mock probe with specified number of channels and shanks. + + Parameters: + num_channels (int): The number of channels in the probe. + num_shanks (int, optional): The number of shanks in the probe. Defaults to 3. + + Returns: + pi.Probe: The generated mock probe. + + """ import probeinterface as pi # The shank ids will be 0, 0, 0, ..., 1, 1, 1, ..., 2, 2, 2, ... diff --git a/src/neuroconv/utils/json_schema.py b/src/neuroconv/utils/json_schema.py index 6c1ba7245..182558b98 100644 --- a/src/neuroconv/utils/json_schema.py +++ b/src/neuroconv/utils/json_schema.py @@ -298,7 +298,14 @@ def get_schema_from_hdmf_class(hdmf_class): return schema -def get_metadata_schema_for_icephys(): +def get_metadata_schema_for_icephys() -> dict: + """ + Returns the metadata schema for icephys data. + + Returns: + dict: The metadata schema for icephys data. + + """ schema = get_base_schema(tag="Icephys") schema["required"] = ["Device", "Electrodes"] schema["properties"] = dict( From 87c6cb535937d7dc5b79943f5fd5877055a0d0d2 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 6 Sep 2024 19:21:43 -0600 Subject: [PATCH 02/18] changelog --- CHANGELOG.md | 2 +- pyproject.toml | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cd1580540..fed6de5ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,7 +12,7 @@ ### Improvements * Using ruff to enforce existence of public classes' docstrings [PR #1034](https://github.com/catalystneuro/neuroconv/pull/1034) * Separated tests that use external data by modality [PR #1049](https://github.com/catalystneuro/neuroconv/pull/1049) - +* Using ruff to enforce existence of public functions's docstrings [PR #1062](https://github.com/catalystneuro/neuroconv/pull/1062) ## v0.6.1 (August 30, 2024) diff --git a/pyproject.toml b/pyproject.toml index 6b49f8917..dc5af94cc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,7 +49,7 @@ dependencies = [ "parse>=1.20.0", "click", "docstring-parser", - "packaging", # Issue 903 + "packaging" # Issue 903 ] @@ -57,12 +57,12 @@ dependencies = [ test = [ "pytest", "pytest-cov", - "ndx-events>=0.2.0", # for special tests to ensure load_namespaces is set to allow NWBFile load at all times + "ndx-events>=0.2.0", # for special tests to ensure load_namespaces is set to allow NWBFile load at all times "parameterized>=0.8.1", "ndx-miniscope", "spikeinterface[qualitymetrics]>=0.101.0", - "zarr<2.18.0", # Error with Blosc (read-only during decode) in numcodecs on May 7; check later if resolved - "pytest-xdist", + "zarr<2.18.0", # Error with Blosc (read-only during decode) in numcodecs on May 7; check later if resolved + "pytest-xdist" ] docs = [ @@ -72,9 +72,9 @@ docs = [ "readthedocs-sphinx-search==0.1.2", "sphinx-toggleprompt==0.2.0", "sphinx-copybutton==0.5.0", - "roiextractors", # Needed for the API documentation - "spikeinterface>=0.101.0", # Needed for the API documentation - "pydata_sphinx_theme==0.12.0", + "roiextractors", # Needed for the API documentation + "spikeinterface>=0.101.0", # Needed for the API documentation + "pydata_sphinx_theme==0.12.0" ] dandi = ["dandi>=0.58.1"] compressors = ["hdf5plugin"] From 862380674f2f41ce16da809ec8bfc12064204c8d Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 6 Sep 2024 20:21:21 -0600 Subject: [PATCH 03/18] work in progress --- pyproject.toml | 3 +- .../tdt_fp/tdtfiberphotometrydatainterface.py | 2 + .../ophys/tiff/tiffdatainterface.py | 1 + .../text/timeintervalsinterface.py | 97 ++++++++++++++++++- src/neuroconv/nwbconverter.py | 27 +++++- src/neuroconv/tools/hdmf.py | 2 + src/neuroconv/tools/path_expansion.py | 2 +- .../tools/testing/mock_interfaces.py | 92 +++++++++++++++++- src/neuroconv/utils/dict.py | 17 ++++ 9 files changed, 233 insertions(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index dc5af94cc..d9ec5a782 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ doctest_optionflags = "ELLIPSIS" [tool.black] line-length = 120 -target-version = ['py38', 'py39', 'py310'] +target-version = ['py39', 'py310'] include = '\.pyi?$' extend-exclude = ''' /( @@ -124,6 +124,7 @@ select = [ "F401", # Unused import "I", # All isort rules "D101", # Missing docstring in public class + "D102", # Missing docstring in public method "D103", # Missing docstring in public function ] fixable = ["ALL"] diff --git a/src/neuroconv/datainterfaces/ophys/tdt_fp/tdtfiberphotometrydatainterface.py b/src/neuroconv/datainterfaces/ophys/tdt_fp/tdtfiberphotometrydatainterface.py index aa58f6ae4..f9198b105 100644 --- a/src/neuroconv/datainterfaces/ophys/tdt_fp/tdtfiberphotometrydatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/tdt_fp/tdtfiberphotometrydatainterface.py @@ -46,6 +46,7 @@ def __init__(self, folder_path: DirectoryPath, verbose: bool = True): import ndx_fiber_photometry # noqa: F401 def get_metadata(self) -> DeepDict: + """Get metadata for the TDTFiberPhotometryInterface.""" metadata = super().get_metadata() tdt_photometry = self.load(evtype=["scalars"]) # This evtype quickly loads info without loading all the data. start_timestamp = tdt_photometry.info.start_date.timestamp() @@ -54,6 +55,7 @@ def get_metadata(self) -> DeepDict: return metadata def get_metadata_schema(self) -> dict: + """Get the metadata schema for the TDTFiberPhotometryInterface.""" metadata_schema = super().get_metadata_schema() return metadata_schema diff --git a/src/neuroconv/datainterfaces/ophys/tiff/tiffdatainterface.py b/src/neuroconv/datainterfaces/ophys/tiff/tiffdatainterface.py index 1eaa3b55e..ce98561de 100644 --- a/src/neuroconv/datainterfaces/ophys/tiff/tiffdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/tiff/tiffdatainterface.py @@ -14,6 +14,7 @@ class TiffImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """ "Get the source schema for the TIFF imaging interface.""" source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to Tiff file." return source_schema diff --git a/src/neuroconv/datainterfaces/text/timeintervalsinterface.py b/src/neuroconv/datainterfaces/text/timeintervalsinterface.py index 5f5b1107d..505868b2a 100644 --- a/src/neuroconv/datainterfaces/text/timeintervalsinterface.py +++ b/src/neuroconv/datainterfaces/text/timeintervalsinterface.py @@ -24,21 +24,33 @@ def __init__( verbose: bool = True, ): """ + Initialize the TimeIntervalsInterface. + Parameters ---------- file_path : FilePath + The path to the file containing time intervals data. read_kwargs : dict, optional - verbose : bool, default: True + Additional arguments for reading the file, by default None. + verbose : bool, optional + If True, provides verbose output, by default True. """ read_kwargs = read_kwargs or dict() super().__init__(file_path=file_path) self.verbose = verbose - self._read_kwargs = read_kwargs self.dataframe = self._read_file(file_path, **read_kwargs) self.time_intervals = None def get_metadata(self) -> dict: + """ + Get metadata for the time intervals. + + Returns + ------- + dict + Metadata dictionary with information about the time intervals table. + """ metadata = super().get_metadata() metadata["TimeIntervals"] = dict( trials=dict( @@ -50,22 +62,74 @@ def get_metadata(self) -> dict: return metadata def get_metadata_schema(self) -> dict: + """ + Get the metadata schema for the time intervals. + + Returns + ------- + dict + The schema dictionary for time intervals metadata. + """ fpath = Path(__file__).parent.parent.parent / "schemas" / "timeintervals_schema.json" return load_dict_from_file(fpath) def get_original_timestamps(self, column: str) -> np.ndarray: + """ + Get the original timestamps for a given column. + + Parameters + ---------- + column : str + The name of the column containing timestamps. + + Returns + ------- + np.ndarray + The original timestamps from the specified column. + + Raises + ------ + ValueError + If the column name does not end with '_time'. + """ if not column.endswith("_time"): raise ValueError("Timing columns on a TimeIntervals table need to end with '_time'!") return self._read_file(**self.source_data, **self._read_kwargs)[column].values def get_timestamps(self, column: str) -> np.ndarray: + """ + Get the current timestamps for a given column. + + Parameters + ---------- + column : str + The name of the column containing timestamps. + + Returns + ------- + np.ndarray + The current timestamps from the specified column. + + Raises + ------ + ValueError + If the column name does not end with '_time'. + """ if not column.endswith("_time"): raise ValueError("Timing columns on a TimeIntervals table need to end with '_time'!") return self.dataframe[column].values def set_aligned_starting_time(self, aligned_starting_time: float): + """ + Align the starting time by shifting all timestamps by the given value. + + Parameters + ---------- + aligned_starting_time : float + The aligned starting time to shift all timestamps by. + """ timing_columns = [column for column in self.dataframe.columns if column.endswith("_time")] for column in timing_columns: @@ -74,6 +138,23 @@ def set_aligned_starting_time(self, aligned_starting_time: float): def set_aligned_timestamps( self, aligned_timestamps: np.ndarray, column: str, interpolate_other_columns: bool = False ): + """ + Set aligned timestamps for the given column and optionally interpolate other columns. + + Parameters + ---------- + aligned_timestamps : np.ndarray + The aligned timestamps to set for the given column. + column : str + The name of the column to update with the aligned timestamps. + interpolate_other_columns : bool, optional + If True, interpolate the timestamps in other columns, by default False. + + Raises + ------ + ValueError + If the column name does not end with '_time'. + """ if not column.endswith("_time"): raise ValueError("Timing columns on a TimeIntervals table need to end with '_time'!") @@ -96,6 +177,18 @@ def set_aligned_timestamps( ) def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray, column: str): + """ + Align timestamps using linear interpolation. + + Parameters + ---------- + unaligned_timestamps : np.ndarray + The original unaligned timestamps that map to the aligned timestamps. + aligned_timestamps : np.ndarray + The target aligned timestamps corresponding to the unaligned timestamps. + column : str + The name of the column containing the timestamps to be aligned. + """ current_timestamps = self.get_timestamps(column=column) assert ( current_timestamps[1] >= unaligned_timestamps[0] diff --git a/src/neuroconv/nwbconverter.py b/src/neuroconv/nwbconverter.py index 1f3e7c9f8..7a6917ff5 100644 --- a/src/neuroconv/nwbconverter.py +++ b/src/neuroconv/nwbconverter.py @@ -166,13 +166,34 @@ def create_nwbfile(self, metadata: Optional[dict] = None, conversion_options: Op self.add_to_nwbfile(nwbfile=nwbfile, metadata=metadata, conversion_options=conversion_options) return nwbfile - def add_to_nwbfile(self, nwbfile: NWBFile, metadata, conversion_options: Optional[dict] = None) -> None: + def add_to_nwbfile(self, nwbfile: NWBFile, metadata, conversion_options: Optional[dict] = None) -> NWBFile: + """ + Add data from the instantiated data interfaces to the given NWBFile. + + Parameters + ---------- + nwbfile : NWBFile + The NWB file object to which the data from the data interfaces will be added. + metadata : dict + The metadata dictionary that contains information used to describe the data. + conversion_options : dict, optional + A dictionary containing conversion options for each interface, where non-default behavior is requested. + Each key corresponds to a data interface name, and the values are dictionaries with options for that interface. + By default, None. + + Returns + ------- + nwbfile : NWBFile + The NWB file object with the data from the data interfaces added to it. + """ conversion_options = conversion_options or dict() for interface_name, data_interface in self.data_interface_objects.items(): data_interface.add_to_nwbfile( nwbfile=nwbfile, metadata=metadata, **conversion_options.get(interface_name, dict()) ) + return nwbfile + def run_conversion( self, nwbfile_path: Optional[FilePath] = None, @@ -283,11 +304,11 @@ class ConverterPipe(NWBConverter): """Takes a list or dict of pre-initialized interfaces as arguments to build an NWBConverter class.""" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa D102 raise NotImplementedError("Source data not available with previously initialized classes.") @classmethod - def validate_source(cls): + def validate_source(cls): # noqa D102 raise NotImplementedError("Source data not available with previously initialized classes.") def __init__(self, data_interfaces: Union[list[BaseDataInterface], dict[str, BaseDataInterface]], verbose=True): diff --git a/src/neuroconv/tools/hdmf.py b/src/neuroconv/tools/hdmf.py index 660971df5..164e3ba85 100644 --- a/src/neuroconv/tools/hdmf.py +++ b/src/neuroconv/tools/hdmf.py @@ -50,6 +50,8 @@ def estimate_default_chunk_shape(chunk_mb: float, maxshape: tuple[int, ...], dty def estimate_default_buffer_shape( buffer_gb: float, chunk_shape: tuple[int, ...], maxshape: tuple[int, ...], dtype: np.dtype ) -> tuple[int, ...]: + """ "Add docstring to this""" + # Elevate any overflow warnings to trigger error. # This is usually an indicator of something going terribly wrong with the estimation calculations and should be # avoided at all costs. diff --git a/src/neuroconv/tools/path_expansion.py b/src/neuroconv/tools/path_expansion.py index 4ab839c0f..9a085c7ba 100644 --- a/src/neuroconv/tools/path_expansion.py +++ b/src/neuroconv/tools/path_expansion.py @@ -143,7 +143,7 @@ class LocalPathExpander(AbstractPathExpander): See https://neuroconv.readthedocs.io/en/main/user_guide/expand_path.html for more information. """ - def list_directory(self, base_directory: DirectoryPath) -> Iterable[FilePath]: # noqa: D101 + def list_directory(self, base_directory: DirectoryPath) -> Iterable[FilePath]: # noqa: D101,D102 base_directory = Path(base_directory) assert base_directory.is_dir(), f"The specified 'base_directory' ({base_directory}) is not a directory!" return (str(path.relative_to(base_directory)) for path in base_directory.rglob("*")) diff --git a/src/neuroconv/tools/testing/mock_interfaces.py b/src/neuroconv/tools/testing/mock_interfaces.py index f05228b34..1d2b7de01 100644 --- a/src/neuroconv/tools/testing/mock_interfaces.py +++ b/src/neuroconv/tools/testing/mock_interfaces.py @@ -24,37 +24,84 @@ class MockBehaviorEventInterface(BaseTemporalAlignmentInterface): @classmethod def get_source_schema(cls) -> dict: + """ + Get the schema for the data source, excluding the 'event_times' parameter. + + Returns + ------- + dict + The schema dictionary for the data source, including additional properties for flexibility. + """ source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=["event_times"]) source_schema["additionalProperties"] = True return source_schema def __init__(self, event_times: Optional[ArrayType] = None): """ - Define event times for some behavior. + Initialize the interface with event times for behavior. Parameters ---------- event_times : list of floats, optional The event times to set as timestamps for this interface. - The default is the array [1.2, 2.3, 3.4] for similarity to the timescale of the MockSpikeGLXNIDQInterface. + The default is the array [1.2, 2.3, 3.4] to simulate a time series similar to the + MockSpikeGLXNIDQInterface. """ event_times = event_times or [1.2, 2.3, 3.4] self.event_times = np.array(event_times) self.original_event_times = np.array(event_times) # Make a copy of the initial loaded timestamps def get_original_timestamps(self) -> np.ndarray: + """ + Get the original event times before any alignment or transformation. + + Returns + ------- + np.ndarray + The original event times as a NumPy array. + """ return self.original_event_times def get_timestamps(self) -> np.ndarray: + """ + Get the current (possibly aligned) event times. + + Returns + ------- + np.ndarray + The current event times as a NumPy array, possibly modified after alignment. + """ return self.event_times def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + """ + Set the event times after alignment. + + Parameters + ---------- + aligned_timestamps : np.ndarray + The aligned event timestamps to update the internal event times. + """ self.event_times = aligned_timestamps def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict): + """ + Add the event times to an NWBFile as a DynamicTable. + + Parameters + ---------- + nwbfile : NWBFile + The NWB file to which the event times will be added. + metadata : dict + Metadata to describe the event times in the NWB file. + + Notes + ----- + This method creates a DynamicTable to store event times and adds it to the NWBFile's acquisition. + """ table = DynamicTable(name="BehaviorEvents", description="Times of various classified behaviors.") table.add_column(name="event_time", description="Time of each event.") - for timestamp in self.get_timestamps(): # adding data by column gives error + for timestamp in self.get_timestamps(): table.add_row(event_time=timestamp) nwbfile.add_acquisition(table) @@ -68,6 +115,9 @@ class MockSpikeGLXNIDQInterface(SpikeGLXNIDQInterface): @classmethod def get_source_schema(cls) -> dict: + """ + Get the source schema for the mock SpikeGLX interface. + """ source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=["ttl_times"]) source_schema["additionalProperties"] = True return source_schema @@ -151,6 +201,9 @@ def __init__( ) def get_metadata(self) -> dict: + """ + Returns the metadata dictionary for the current object. + """ metadata = super().get_metadata() session_start_time = datetime.now().astimezone() metadata["NWBFile"]["session_start_time"] = session_start_time @@ -172,6 +225,25 @@ def __init__( verbose: bool = True, photon_series_type: Literal["OnePhotonSeries", "TwoPhotonSeries"] = "TwoPhotonSeries", ): + """ + Parameters + ---------- + num_frames : int, optional + Number of frames in the imaging data. Default is 30. + num_rows : int, optional + Number of rows in the imaging data. Default is 10. + num_columns : int, optional + Number of columns in the imaging data. Default is 10. + sampling_frequency : float, optional + Sampling frequency of the imaging data. Default is 30. + dtype : str, optional + Data type of the imaging data. Default is "uint16". + verbose : bool, optional + Whether to print verbose output. Default is True. + photon_series_type : {"OnePhotonSeries", "TwoPhotonSeries"}, optional + Type of photon series. Default is "TwoPhotonSeries". + + """ from roiextractors.testing import generate_dummy_imaging_extractor self.imaging_extractor = generate_dummy_imaging_extractor( @@ -186,6 +258,20 @@ def __init__( self.photon_series_type = photon_series_type def get_metadata(self, photon_series_type: Optional[Literal["OnePhotonSeries", "TwoPhotonSeries"]] = None) -> dict: + """ + Get the metadata for the imaging interface. + + Parameters + ---------- + photon_series_type : Literal["OnePhotonSeries", "TwoPhotonSeries"], optional + The type of photon series to include in the metadata. + If not specified, all photon series will be included. + + Returns + ------- + dict + The metadata for the imaging interface. + """ session_start_time = datetime.now().astimezone() metadata = super().get_metadata(photon_series_type=photon_series_type) metadata["NWBFile"]["session_start_time"] = session_start_time diff --git a/src/neuroconv/utils/dict.py b/src/neuroconv/utils/dict.py index f0507b653..a6cef630a 100644 --- a/src/neuroconv/utils/dict.py +++ b/src/neuroconv/utils/dict.py @@ -209,12 +209,29 @@ class DeepDict(defaultdict): """A defaultdict of defaultdicts""" def __init__(self, *args: Any, **kwargs: Any) -> None: + """A defaultdict of defaultdicts""" super().__init__(lambda: DeepDict(), *args, **kwargs) for key, value in self.items(): if isinstance(value, dict): self[key] = DeepDict(value) def deep_update(self, other: Optional[Union[dict, "DeepDict"]] = None, **kwargs) -> None: + """ + Recursively update the DeepDict with another dictionary or DeepDict. + + Parameters + ---------- + other : dict or DeepDict, optional + The dictionary or DeepDict to update the current instance with. + **kwargs : Any + Additional keyword arguments representing key-value pairs to update the DeepDict. + + Notes + ----- + For any keys that exist in both the current instance and the provided dictionary, the values are merged + recursively if both are dictionaries. Otherwise, the value from `other` or `kwargs` will overwrite the + existing value. + """ for key, value in (other or kwargs).items(): if key in self and isinstance(self[key], dict) and isinstance(value, dict): self[key].deep_update(value) From e8a2ccd00aca22fd6829ecced14b7b71d58c04df Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 6 Sep 2024 20:36:03 -0600 Subject: [PATCH 04/18] work in progress --- pyproject.toml | 2 +- .../ophys/brukertiff/brukertiffconverter.py | 19 ++++++++++ .../brukertiff/brukertiffdatainterface.py | 6 ++++ .../ophys/caiman/caimandatainterface.py | 1 + .../micromanagertiffdatainterface.py | 2 ++ .../ophys/miniscope/miniscopeconverter.py | 36 +++++++++++++++++++ .../miniscopeimagingdatainterface.py | 21 +++++++++++ .../ophys/sbx/sbxdatainterface.py | 1 + .../scanimage/scanimageimaginginterfaces.py | 12 +++++-- .../ophys/suite2p/suite2pdatainterface.py | 31 ++++++++++++++-- 10 files changed, 126 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d9ec5a782..0ac45727c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -124,7 +124,7 @@ select = [ "F401", # Unused import "I", # All isort rules "D101", # Missing docstring in public class - "D102", # Missing docstring in public method + # "D102", # Missing docstring in public method "D103", # Missing docstring in public function ] fixable = ["ALL"] diff --git a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py index 86e8edc1f..b0abbd887 100644 --- a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py +++ b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py @@ -138,6 +138,7 @@ class BrukerTiffSinglePlaneConverter(NWBConverter): @classmethod def get_source_schema(cls): + """Get the source schema for the Bruker imaging interface.""" return get_schema_from_method_signature(cls) def get_conversion_options_schema(self): @@ -205,6 +206,24 @@ def run_conversion( stub_test: bool = False, stub_frames: int = 100, ) -> None: + """ + Run the NWB conversion process for all instantiated data interfaces. + + Parameters + ---------- + nwbfile_path : FilePath, optional + The file path where the NWB file will be written. If None, the file is handled in-memory. + nwbfile : NWBFile, optional + An existing in-memory NWBFile object. If None, a new NWBFile object will be created. + metadata : dict, optional + Metadata dictionary used to create or validate the NWBFile. If None, metadata is automatically generated. + overwrite : bool, optional + If True, the NWBFile at `nwbfile_path` is overwritten if it exists. If False (default), data is appended. + stub_test : bool, optional + If True, only a subset of the data (up to `stub_frames`) is used for testing purposes. By default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True. By default 100. + """ if metadata is None: metadata = self.get_metadata() diff --git a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py index 9742711e1..464b039be 100644 --- a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py @@ -16,6 +16,7 @@ class BrukerTiffMultiPlaneImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """Get the source schema for the Bruker TIFF imaging data.""" source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" @@ -28,6 +29,7 @@ def get_streams( folder_path: DirectoryPath, plane_separation_type: Literal["contiguous", "disjoint"] = None, ) -> dict: + """get streams for the Bruker TIFF imaging data.""" from roiextractors import BrukerTiffMultiPlaneImagingExtractor streams = BrukerTiffMultiPlaneImagingExtractor.get_streams(folder_path=folder_path) @@ -117,6 +119,7 @@ def _determine_position_current(self) -> list[float]: return position_values def get_metadata(self) -> DeepDict: + """get metadata for the Bruker TIFF imaging data.""" metadata = super().get_metadata() xml_metadata = self.imaging_extractor.xml_metadata @@ -183,6 +186,7 @@ class BrukerTiffSinglePlaneImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """Get the source schema for the Bruker TIFF imaging data.""" source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" @@ -191,6 +195,7 @@ def get_source_schema(cls) -> dict: @classmethod def get_streams(cls, folder_path: DirectoryPath) -> dict: + """get streams for the Bruker TIFF imaging data.""" from roiextractors import BrukerTiffMultiPlaneImagingExtractor streams = BrukerTiffMultiPlaneImagingExtractor.get_streams(folder_path=folder_path) @@ -263,6 +268,7 @@ def _determine_position_current(self) -> list[float]: return position_values def get_metadata(self) -> DeepDict: + """get metadata for the Bruker TIFF imaging data.""" metadata = super().get_metadata() xml_metadata = self.imaging_extractor.xml_metadata diff --git a/src/neuroconv/datainterfaces/ophys/caiman/caimandatainterface.py b/src/neuroconv/datainterfaces/ophys/caiman/caimandatainterface.py index 386c03d3c..802645139 100644 --- a/src/neuroconv/datainterfaces/ophys/caiman/caimandatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/caiman/caimandatainterface.py @@ -12,6 +12,7 @@ class CaimanSegmentationInterface(BaseSegmentationExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """Get the source schema for the Caiman segmentation interface.""" source_metadata = super().get_source_schema() source_metadata["properties"]["file_path"]["description"] = "Path to .hdf5 file." return source_metadata diff --git a/src/neuroconv/datainterfaces/ophys/micromanagertiff/micromanagertiffdatainterface.py b/src/neuroconv/datainterfaces/ophys/micromanagertiff/micromanagertiffdatainterface.py index 17cbc95ed..5373b7004 100644 --- a/src/neuroconv/datainterfaces/ophys/micromanagertiff/micromanagertiffdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/micromanagertiff/micromanagertiffdatainterface.py @@ -13,6 +13,7 @@ class MicroManagerTiffImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """get the source schema for the Micro-Manager TIFF imaging interface.""" source_schema = super().get_source_schema() source_schema["properties"]["folder_path"]["description"] = "The folder containing the OME-TIF image files." @@ -37,6 +38,7 @@ def __init__(self, folder_path: DirectoryPath, verbose: bool = True): self.imaging_extractor._channel_names = [f"OpticalChannel{channel_name}"] def get_metadata(self) -> dict: + """Get metadata for the Micro-Manager TIFF imaging data.""" metadata = super().get_metadata() micromanager_metadata = self.imaging_extractor.micromanager_metadata diff --git a/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeconverter.py b/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeconverter.py index cfee8f027..38b620738 100644 --- a/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeconverter.py +++ b/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeconverter.py @@ -19,6 +19,7 @@ class MiniscopeConverter(NWBConverter): @classmethod def get_source_schema(cls): + """Get the source schema for the Miniscope converter.""" source_schema = get_schema_from_method_signature(cls) source_schema["properties"]["folder_path"]["description"] = "The path to the main Miniscope folder." return source_schema @@ -61,6 +62,7 @@ def __init__(self, folder_path: DirectoryPath, verbose: bool = True): ) def get_conversion_options_schema(self) -> dict: + """get the conversion options schema.""" return self.data_interface_objects["MiniscopeImaging"].get_conversion_options_schema() def add_to_nwbfile( @@ -70,6 +72,21 @@ def add_to_nwbfile( stub_test: bool = False, stub_frames: int = 100, ): + """ + Add Miniscope imaging and behavioral camera data to the specified NWBFile. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the imaging and behavioral data will be added. + metadata : dict + Metadata dictionary containing information about the imaging and behavioral recordings. + stub_test : bool, optional + If True, only a subset of the data (defined by `stub_frames`) will be added for testing purposes, + by default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True, by default 100. + """ self.data_interface_objects["MiniscopeImaging"].add_to_nwbfile( nwbfile=nwbfile, metadata=metadata, @@ -90,6 +107,25 @@ def run_conversion( stub_test: bool = False, stub_frames: int = 100, ) -> None: + """ + Run the NWB conversion process for the instantiated data interfaces. + + Parameters + ---------- + nwbfile_path : str, optional + Path where the NWBFile will be written. If None, the file is handled in-memory. + nwbfile : NWBFile, optional + An in-memory NWBFile object to be written to the file. If None, a new NWBFile is created. + metadata : dict, optional + Metadata dictionary with information to create the NWBFile. If None, metadata is auto-generated. + overwrite : bool, optional + If True, overwrites the existing NWBFile at `nwbfile_path`. If False (default), data is appended. + stub_test : bool, optional + If True, only a subset of the data (up to `stub_frames`) is written for testing purposes, + by default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True, by default 100. + """ if metadata is None: metadata = self.get_metadata() diff --git a/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeimagingdatainterface.py b/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeimagingdatainterface.py index 64a180c46..b932159f1 100644 --- a/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeimagingdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeimagingdatainterface.py @@ -19,6 +19,7 @@ class MiniscopeImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """Get the source schema for the Miniscope imaging interface.""" source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" @@ -49,6 +50,7 @@ def __init__(self, folder_path: DirectoryPath): self.photon_series_type = "OnePhotonSeries" def get_metadata(self) -> DeepDict: + """Get metadata for the Miniscope imaging data.""" from ....tools.roiextractors import get_nwb_imaging_metadata metadata = super().get_metadata() @@ -74,11 +76,13 @@ def get_metadata(self) -> DeepDict: return metadata def get_metadata_schema(self) -> dict: + """Get the metadata schema for the Miniscope imaging data.""" metadata_schema = super().get_metadata_schema() metadata_schema["properties"]["Ophys"]["definitions"]["Device"]["additionalProperties"] = True return metadata_schema def get_original_timestamps(self) -> np.ndarray: + """Get the original timestamps from the Miniscope data.""" from ndx_miniscope.utils import get_timestamps timestamps = get_timestamps(folder_path=self.source_data["folder_path"]) @@ -92,6 +96,23 @@ def add_to_nwbfile( stub_test: bool = False, stub_frames: int = 100, ): + """ + Add imaging data to the specified NWBFile, including device and photon series information. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the imaging data will be added. + metadata : dict, optional + Metadata containing information about the imaging device and photon series. If None, default metadata is used. + photon_series_type : {"TwoPhotonSeries", "OnePhotonSeries"}, optional + The type of photon series to be added, either "TwoPhotonSeries" or "OnePhotonSeries", by default "OnePhotonSeries". + stub_test : bool, optional + If True, only a subset of the data (defined by `stub_frames`) will be added for testing purposes, + by default False. + stub_frames : int, optional + The number of frames to include if `stub_test` is True, by default 100. + """ from ndx_miniscope.utils import add_miniscope_device from ....tools.roiextractors import add_photon_series_to_nwbfile diff --git a/src/neuroconv/datainterfaces/ophys/sbx/sbxdatainterface.py b/src/neuroconv/datainterfaces/ophys/sbx/sbxdatainterface.py index 5b921f6f3..36caba14c 100644 --- a/src/neuroconv/datainterfaces/ophys/sbx/sbxdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/sbx/sbxdatainterface.py @@ -37,6 +37,7 @@ def __init__( ) def get_metadata(self) -> dict: + """Get metadata for the Scanbox imaging data.""" metadata = super().get_metadata() metadata["Ophys"]["Device"][0]["description"] = "Scanbox imaging" return metadata diff --git a/src/neuroconv/datainterfaces/ophys/scanimage/scanimageimaginginterfaces.py b/src/neuroconv/datainterfaces/ophys/scanimage/scanimageimaginginterfaces.py index 09e8f86d3..d7f0af51b 100644 --- a/src/neuroconv/datainterfaces/ophys/scanimage/scanimageimaginginterfaces.py +++ b/src/neuroconv/datainterfaces/ophys/scanimage/scanimageimaginginterfaces.py @@ -28,12 +28,13 @@ class ScanImageImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """Get the source schema for the ScanImage imaging interface.""" source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to Tiff file." return source_schema @validate_call - def __new__( + def __new__( # noqa: D102 cls, file_path: FilePath, channel_name: Optional[str] = None, @@ -87,6 +88,7 @@ class ScanImageLegacyImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """ " "Get the source schema for the ScanImage legacy imaging interface.""" source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to Tiff file." return source_schema @@ -131,6 +133,7 @@ def __init__( super().__init__(file_path=file_path, sampling_frequency=sampling_frequency, verbose=verbose) def get_metadata(self) -> dict: + """get metadata for the ScanImage imaging data""" device_number = 0 # Imaging plane metadata is a list with metadata for each plane metadata = super().get_metadata() @@ -166,12 +169,13 @@ class ScanImageMultiFileImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """get the source schema for the ScanImage multi-file imaging interface.""" source_schema = super().get_source_schema() source_schema["properties"]["folder_path"]["description"] = "Path to the folder containing the TIFF files." return source_schema @validate_call - def __new__( + def __new__( # noqa: D102 cls, folder_path: DirectoryPath, file_pattern: str, @@ -287,6 +291,7 @@ def __init__( ) def get_metadata(self) -> dict: + """get metadata for the ScanImage imaging data""" metadata = super().get_metadata() extracted_session_start_time = datetime.datetime.strptime( @@ -404,6 +409,7 @@ def __init__( ) def get_metadata(self) -> dict: + """get metadata for the ScanImage imaging data""" metadata = super().get_metadata() extracted_session_start_time = datetime.datetime.strptime( @@ -522,6 +528,7 @@ def __init__( ) def get_metadata(self) -> dict: + """get metadata for the ScanImage imaging data""" metadata = super().get_metadata() extracted_session_start_time = datetime.datetime.strptime( @@ -651,6 +658,7 @@ def __init__( ) def get_metadata(self) -> dict: + """get metadata for the ScanImage imaging data""" metadata = super().get_metadata() extracted_session_start_time = datetime.datetime.strptime( diff --git a/src/neuroconv/datainterfaces/ophys/suite2p/suite2pdatainterface.py b/src/neuroconv/datainterfaces/ophys/suite2p/suite2pdatainterface.py index 056616ce5..d9065a644 100644 --- a/src/neuroconv/datainterfaces/ophys/suite2p/suite2pdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/suite2p/suite2pdatainterface.py @@ -50,6 +50,7 @@ class Suite2pSegmentationInterface(BaseSegmentationExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """Get the source schema for the Suite2p segmentation interface.""" schema = super().get_source_schema() schema["properties"]["folder_path"][ "description" @@ -61,13 +62,13 @@ def get_source_schema(cls) -> dict: return schema @classmethod - def get_available_planes(cls, folder_path: DirectoryPath) -> dict: + def get_available_planes(cls, folder_path: DirectoryPath) -> dict: # noqa: D102 from roiextractors import Suite2pSegmentationExtractor return Suite2pSegmentationExtractor.get_available_planes(folder_path=folder_path) @classmethod - def get_available_channels(cls, folder_path: DirectoryPath) -> dict: + def get_available_channels(cls, folder_path: DirectoryPath) -> dict: # noqa: D102 from roiextractors import Suite2pSegmentationExtractor return Suite2pSegmentationExtractor.get_available_channels(folder_path=folder_path) @@ -113,6 +114,7 @@ def __init__( self.verbose = verbose def get_metadata(self) -> DeepDict: + """get metadata for the Suite2p segmentation data""" metadata = super().get_metadata() # No need to update the metadata links for the default plane segmentation name @@ -140,6 +142,31 @@ def add_to_nwbfile( iterator_options: Optional[dict] = None, compression_options: Optional[dict] = None, ): + """ + Add segmentation data to the specified NWBFile. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the segmentation data will be added. + metadata : dict, optional + Metadata containing information about the segmentation. If None, default metadata is used. + stub_test : bool, optional + If True, only a subset of the data (defined by `stub_frames`) will be added for testing purposes, + by default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True, by default 100. + include_roi_centroids : bool, optional + Whether to include the centroids of regions of interest (ROIs) in the data, by default True. + include_roi_acceptance : bool, optional + Whether to include acceptance status of ROIs, by default True. + mask_type : str, optional + The type of mask used for segmentation, either "image", "pixel", or "voxel", by default "image". + plane_segmentation_name : str, optional + The name of the plane segmentation object, by default None. + iterator_options : dict, optional + Additional options for iterating over the data, by default None. + """ super().add_to_nwbfile( nwbfile=nwbfile, metadata=metadata, From 413593936067f78864bc15692d59c03e79877859 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 6 Sep 2024 20:49:56 -0600 Subject: [PATCH 05/18] noqa in proress --- pyproject.toml | 2 +- .../neuroscope/neuroscopedatainterface.py | 4 +- .../openephys/openephysbinarydatainterface.py | 4 +- .../openephys/openephysdatainterface.py | 4 +- .../openephys/openephyslegacydatainterface.py | 4 +- .../ecephys/phy/phydatainterface.py | 2 +- .../ecephys/plexon/plexondatainterface.py | 12 ++--- .../ecephys/spike2/spike2datainterface.py | 2 +- .../spikegadgets/spikegadgetsdatainterface.py | 2 +- .../ecephys/spikeglx/spikeglxconverter.py | 6 +-- .../ecephys/spikeglx/spikeglxdatainterface.py | 6 +-- .../ecephys/spikeglx/spikeglxnidqinterface.py | 4 +- .../icephys/abf/abfdatainterface.py | 6 +-- .../icephys/baseicephysinterface.py | 16 +++--- .../ophys/baseimagingextractorinterface.py | 48 ++++++++++++++++- .../basesegmentationextractorinterface.py | 28 ++++++++-- .../ophys/brukertiff/brukertiffconverter.py | 51 +++++++++++++++++++ 17 files changed, 159 insertions(+), 42 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0ac45727c..c80784183 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -124,7 +124,7 @@ select = [ "F401", # Unused import "I", # All isort rules "D101", # Missing docstring in public class - # "D102", # Missing docstring in public method +# "D102", # Missing docstring in public method "D103", # Missing docstring in public function ] fixable = ["ALL"] diff --git a/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py b/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py index d68532a94..68642c60d 100644 --- a/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py @@ -253,7 +253,7 @@ class NeuroScopeSortingInterface(BaseSortingExtractorInterface): info = "Interface for converting NeuroScope recording data." @classmethod - def get_source_schema(self) -> dict: + def get_source_schema(self) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"]["description"] = "Path to folder containing .res and .clu files." source_schema["properties"]["keep_mua_units"][ @@ -300,7 +300,7 @@ def __init__( verbose=verbose, ) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() session_path = Path(self.source_data["folder_path"]) session_id = session_path.stem diff --git a/src/neuroconv/datainterfaces/ecephys/openephys/openephysbinarydatainterface.py b/src/neuroconv/datainterfaces/ecephys/openephys/openephysbinarydatainterface.py index 371b96f94..c012a10f4 100644 --- a/src/neuroconv/datainterfaces/ecephys/openephys/openephysbinarydatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/openephys/openephysbinarydatainterface.py @@ -20,7 +20,7 @@ class OpenEphysBinaryRecordingInterface(BaseRecordingExtractorInterface): ExtractorName = "OpenEphysBinaryRecordingExtractor" @classmethod - def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: + def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: # noqa: D102 from spikeinterface.extractors import OpenEphysBinaryRecordingExtractor stream_names, _ = OpenEphysBinaryRecordingExtractor.get_streams(folder_path=folder_path) @@ -86,7 +86,7 @@ def __init__( if stub_test: self.subset_channels = [0, 1] - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 from ._openephys_utils import _get_session_start_time metadata = super().get_metadata() diff --git a/src/neuroconv/datainterfaces/ecephys/openephys/openephysdatainterface.py b/src/neuroconv/datainterfaces/ecephys/openephys/openephysdatainterface.py index 81b84c36c..fe6df75c1 100644 --- a/src/neuroconv/datainterfaces/ecephys/openephys/openephysdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/openephys/openephysdatainterface.py @@ -18,7 +18,7 @@ class OpenEphysRecordingInterface(BaseRecordingExtractorInterface): ExtractorName = "OpenEphysBinaryRecordingExtractor" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" @@ -26,7 +26,7 @@ def get_source_schema(cls) -> dict: return source_schema @classmethod - def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: + def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: # noqa: D102 if any(Path(folder_path).rglob("*.continuous")): return OpenEphysLegacyRecordingInterface.get_stream_names(folder_path=folder_path) elif any(Path(folder_path).rglob("*.dat")): diff --git a/src/neuroconv/datainterfaces/ecephys/openephys/openephyslegacydatainterface.py b/src/neuroconv/datainterfaces/ecephys/openephys/openephyslegacydatainterface.py index b3392d2db..2272ef753 100644 --- a/src/neuroconv/datainterfaces/ecephys/openephys/openephyslegacydatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/openephys/openephyslegacydatainterface.py @@ -19,7 +19,7 @@ class OpenEphysLegacyRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for converting legacy OpenEphys recording data." @classmethod - def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: + def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: # noqa: D102 from spikeinterface.extractors import OpenEphysLegacyRecordingExtractor stream_names, _ = OpenEphysLegacyRecordingExtractor.get_streams(folder_path=folder_path) @@ -77,7 +77,7 @@ def __init__( folder_path=folder_path, stream_name=stream_name, block_index=block_index, verbose=verbose, es_key=es_key ) - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() neo_reader = self.recording_extractor.neo_reader diff --git a/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py b/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py index 07324b602..9d885182d 100644 --- a/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py @@ -16,7 +16,7 @@ class PhySortingInterface(BaseSortingExtractorInterface): info = "Interface for Phy sorting data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["exclude_cluster_groups"]["items"] = dict(type="string") source_schema["properties"]["folder_path"][ diff --git a/src/neuroconv/datainterfaces/ecephys/plexon/plexondatainterface.py b/src/neuroconv/datainterfaces/ecephys/plexon/plexondatainterface.py index 4a6a50fa5..137e4a496 100644 --- a/src/neuroconv/datainterfaces/ecephys/plexon/plexondatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/plexon/plexondatainterface.py @@ -19,7 +19,7 @@ class PlexonRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for Plexon recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .plx file." return source_schema @@ -39,7 +39,7 @@ def __init__(self, file_path: FilePath, verbose: bool = True, es_key: str = "Ele """ super().__init__(file_path=file_path, verbose=verbose, es_key=es_key) - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() neo_reader = self.recording_extractor.neo_reader @@ -64,7 +64,7 @@ class Plexon2RecordingInterface(BaseRecordingExtractorInterface): info = "Interface for Plexon2 recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .pl2 file." return source_schema @@ -92,7 +92,7 @@ def __init__(self, file_path: FilePath, verbose: bool = True, es_key: str = "Ele all_annotations=True, ) - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() neo_reader = self.recording_extractor.neo_reader @@ -116,7 +116,7 @@ class PlexonSortingInterface(BaseSortingExtractorInterface): info = "Interface for Plexon sorting data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the plexon spiking data (.plx file)." return source_schema @@ -135,7 +135,7 @@ def __init__(self, file_path: FilePath, verbose: bool = True): """ super().__init__(file_path=file_path, verbose=verbose) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() neo_reader = self.sorting_extractor.neo_reader diff --git a/src/neuroconv/datainterfaces/ecephys/spike2/spike2datainterface.py b/src/neuroconv/datainterfaces/ecephys/spike2/spike2datainterface.py index ccd98a369..d1a924306 100644 --- a/src/neuroconv/datainterfaces/ecephys/spike2/spike2datainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spike2/spike2datainterface.py @@ -28,7 +28,7 @@ class Spike2RecordingInterface(BaseRecordingExtractorInterface): ExtractorName = "CedRecordingExtractor" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=["smrx_channel_ids"]) source_schema.update(additionalProperties=True) source_schema["properties"]["file_path"].update(description="Path to .smrx file.") diff --git a/src/neuroconv/datainterfaces/ecephys/spikegadgets/spikegadgetsdatainterface.py b/src/neuroconv/datainterfaces/ecephys/spikegadgets/spikegadgetsdatainterface.py index b8b483dd0..d732accaa 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikegadgets/spikegadgetsdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spikegadgets/spikegadgetsdatainterface.py @@ -17,7 +17,7 @@ class SpikeGadgetsRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for SpikeGadgets recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = get_json_schema_from_method_signature(cls, exclude=["source_data"]) source_schema["properties"]["file_path"].update(description="Path to SpikeGadgets (.rec) file.") return source_schema diff --git a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxconverter.py b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxconverter.py index 6aeb36cec..2fccdb8ce 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxconverter.py +++ b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxconverter.py @@ -22,13 +22,13 @@ class SpikeGLXConverterPipe(ConverterPipe): info = "Converter for multi-stream SpikeGLX recording data." @classmethod - def get_source_schema(cls): + def get_source_schema(cls): # noqa: D102 source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=["streams"]) source_schema["properties"]["folder_path"]["description"] = "Path to the folder containing SpikeGLX streams." return source_schema @classmethod - def get_streams(cls, folder_path: DirectoryPath) -> list[str]: + def get_streams(cls, folder_path: DirectoryPath) -> list[str]: # noqa: D102 from spikeinterface.extractors import SpikeGLXRecordingExtractor return SpikeGLXRecordingExtractor.get_streams(folder_path=folder_path)[0] @@ -86,7 +86,7 @@ def __init__( super().__init__(data_interfaces=data_interfaces, verbose=verbose) - def get_conversion_options_schema(self) -> dict: + def get_conversion_options_schema(self) -> dict: # noqa: D102 conversion_options_schema = super().get_conversion_options_schema() conversion_options_schema["properties"].update( {name: interface.get_conversion_options_schema() for name, interface in self.data_interface_objects.items()} diff --git a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py index d45a7f946..480b26cb2 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py @@ -37,7 +37,7 @@ class SpikeGLXRecordingInterface(BaseRecordingExtractorInterface): ExtractorName = "SpikeGLXRecordingExtractor" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=["x_pitch", "y_pitch"]) source_schema["properties"]["file_path"]["description"] = "Path to SpikeGLX ap.bin or lf.bin file." return source_schema @@ -82,7 +82,7 @@ def __init__( # Set electrodes properties add_recording_extractor_properties(self.recording_extractor) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() session_start_time = get_session_start_time(self.meta) if session_start_time: @@ -122,7 +122,7 @@ def get_metadata(self) -> dict: return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 new_recording = self.get_extractor()( folder_path=self.source_data["folder_path"], stream_id=self.source_data["stream_id"] ) # TODO: add generic method for aliasing from NeuroConv signature to SI init diff --git a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxnidqinterface.py b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxnidqinterface.py index 42dad773d..2406b47c7 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxnidqinterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxnidqinterface.py @@ -20,7 +20,7 @@ class SpikeGLXNIDQInterface(BaseRecordingExtractorInterface): ExtractorName = "SpikeGLXRecordingExtractor" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=["x_pitch", "y_pitch"]) source_schema["properties"]["file_path"]["description"] = "Path to SpikeGLX .nidq file." return source_schema @@ -65,7 +65,7 @@ def __init__( ) self.meta = self.recording_extractor.neo_reader.signals_info_dict[(0, "nidq")]["meta"] - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() session_start_time = get_session_start_time(self.meta) diff --git a/src/neuroconv/datainterfaces/icephys/abf/abfdatainterface.py b/src/neuroconv/datainterfaces/icephys/abf/abfdatainterface.py index 535381466..4386a310e 100644 --- a/src/neuroconv/datainterfaces/icephys/abf/abfdatainterface.py +++ b/src/neuroconv/datainterfaces/icephys/abf/abfdatainterface.py @@ -35,7 +35,7 @@ class AbfInterface(BaseIcephysInterface): ExtractorName = "AxonIO" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_paths"] = dict( type="array", @@ -76,7 +76,7 @@ def __init__( icephys_metadata_file_path=icephys_metadata_file_path, ) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 from ....tools.neo import get_number_of_electrodes, get_number_of_segments metadata = super().get_metadata() @@ -158,7 +158,7 @@ def get_metadata(self) -> dict: return metadata - def set_aligned_starting_time(self, aligned_starting_time: float): + def set_aligned_starting_time(self, aligned_starting_time: float): # noqa: D102 for reader in self.readers_list: number_of_segments = reader.header["nb_segment"][0] for segment_index in range(number_of_segments): diff --git a/src/neuroconv/datainterfaces/icephys/baseicephysinterface.py b/src/neuroconv/datainterfaces/icephys/baseicephysinterface.py index 76c9bf840..0dcac9813 100644 --- a/src/neuroconv/datainterfaces/icephys/baseicephysinterface.py +++ b/src/neuroconv/datainterfaces/icephys/baseicephysinterface.py @@ -21,7 +21,7 @@ class BaseIcephysInterface(BaseExtractorInterface): ExtractorModuleName = "neo" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=[]) return source_schema @@ -53,14 +53,14 @@ def __init__(self, file_paths: list[FilePath]): self._timestamps = None - def get_metadata_schema(self) -> dict: + def get_metadata_schema(self) -> dict: # noqa: D102 metadata_schema = super().get_metadata_schema() if self.DandiIcephysMetadata is not None: metadata_schema["properties"]["ndx-dandi-icephys"] = get_schema_from_hdmf_class(self.DandiIcephysMetadata) metadata_schema["properties"]["Icephys"] = get_metadata_schema_for_icephys() return metadata_schema - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 from ...tools.neo import get_number_of_electrodes metadata = super().get_metadata() @@ -73,19 +73,19 @@ def get_metadata(self) -> dict: ) return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 raise NotImplementedError("Icephys interfaces do not yet support timestamps.") - def get_timestamps(self) -> np.ndarray: + def get_timestamps(self) -> np.ndarray: # noqa: D102 raise NotImplementedError("Icephys interfaces do not yet support timestamps.") - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): # noqa: D102 raise NotImplementedError("Icephys interfaces do not yet support timestamps.") - def set_aligned_starting_time(self, aligned_starting_time: float): + def set_aligned_starting_time(self, aligned_starting_time: float): # noqa: D102 raise NotImplementedError("This icephys interface has not specified the method for aligning starting time.") - def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray): + def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray): # noqa: D102 raise NotImplementedError("Icephys interfaces do not yet support timestamps.") def add_to_nwbfile( diff --git a/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py b/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py index 548b57d0c..3cb710891 100644 --- a/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py +++ b/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py @@ -48,6 +48,17 @@ def __init__( def get_metadata_schema( self, photon_series_type: Optional[Literal["OnePhotonSeries", "TwoPhotonSeries"]] = None ) -> dict: + """ + Retrieve the metadata schema for the optical physiology (Ophys) data, with optional handling of photon series type. + + Parameters + ---------- + photon_series_type : {"OnePhotonSeries", "TwoPhotonSeries"}, optional + The type of photon series to include in the schema. If None, the value from the instance is used. + This argument is deprecated and will be removed in a future version. Set `photon_series_type` during + the initialization of the `BaseImagingExtractorInterface` instance. + + """ if photon_series_type is not None: warnings.warn( @@ -102,6 +113,16 @@ def get_metadata_schema( def get_metadata( self, photon_series_type: Optional[Literal["OnePhotonSeries", "TwoPhotonSeries"]] = None ) -> DeepDict: + """ + Retrieve the metadata for the imaging data, with optional handling of photon series type. + + Parameters + ---------- + photon_series_type : {"OnePhotonSeries", "TwoPhotonSeries"}, optional + The type of photon series to include in the metadata. If None, the value from the instance is used. + This argument is deprecated and will be removed in a future version. Instead, set `photon_series_type` + during the initialization of the `BaseImagingExtractorInterface` instance. + """ if photon_series_type is not None: warnings.warn( @@ -127,14 +148,15 @@ def get_metadata( two_photon_series["rate"] = float(two_photon_series["rate"]) return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 reinitialized_extractor = self.get_extractor()(**self.source_data) return reinitialized_extractor.frame_to_time(frames=np.arange(stop=reinitialized_extractor.get_num_frames())) - def get_timestamps(self) -> np.ndarray: + def get_timestamps(self) -> np.ndarray: # noqa: D102 return self.imaging_extractor.frame_to_time(frames=np.arange(stop=self.imaging_extractor.get_num_frames())) def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + """Replace all timestamps for this interface with those aligned to the common session start time.""" self.imaging_extractor.set_times(times=aligned_timestamps) def add_to_nwbfile( @@ -147,6 +169,28 @@ def add_to_nwbfile( stub_test: bool = False, stub_frames: int = 100, ): + """ + Add imaging data to the NWBFile, including options for photon series and stubbing. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the imaging data will be added. + metadata : dict, optional + Metadata dictionary containing information about the imaging data. If None, default metadata is used. + photon_series_type : {"TwoPhotonSeries", "OnePhotonSeries"}, optional + The type of photon series to be added to the NWBFile. Default is "TwoPhotonSeries". + photon_series_index : int, optional + The index of the photon series in the NWBFile, used to differentiate between multiple series, by default 0. + parent_container : {"acquisition", "processing/ophys"}, optional + The container in the NWBFile where the data will be added, by default "acquisition". + stub_test : bool, optional + If True, only a subset of the imaging data (up to `stub_frames`) will be added for testing purposes, + by default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True, by default 100. + + """ from ...tools.roiextractors import add_imaging_to_nwbfile if stub_test: diff --git a/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py b/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py index 6b55b5afb..ac4243b45 100644 --- a/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py +++ b/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py @@ -23,6 +23,27 @@ def __init__(self, **source_data): self.segmentation_extractor = self.get_extractor()(**source_data) def get_metadata_schema(self) -> dict: + """ + Generate the metadata schema for Ophys data, updating required fields and properties. + + This method builds upon the base schema and customizes it for Ophys-specific metadata, including required + components such as devices, fluorescence data, imaging planes, and two-photon series. It also applies + temporary schema adjustments to handle certain use cases until a centralized metadata schema definition + is available. + + Returns + ------- + dict + A dictionary representing the updated Ophys metadata schema. + + Notes + ----- + - Ensures that `Device` and `ImageSegmentation` are marked as required. + - Updates various properties, including ensuring arrays for `ImagingPlane` and `TwoPhotonSeries`. + - Adjusts the schema for `Fluorescence`, including required fields and pattern properties. + - Adds schema definitions for `DfOverF`, segmentation images, and summary images. + - Applies temporary fixes, such as setting additional properties for `ImageSegmentation` to True. + """ metadata_schema = super().get_metadata_schema() metadata_schema["required"] = ["Ophys"] metadata_schema["properties"]["Ophys"] = get_base_schema() @@ -88,23 +109,24 @@ def get_metadata_schema(self) -> dict: fill_defaults(metadata_schema, self.get_metadata()) return metadata_schema - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 from ...tools.roiextractors import get_nwb_segmentation_metadata metadata = super().get_metadata() metadata.update(get_nwb_segmentation_metadata(self.segmentation_extractor)) return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 reinitialized_extractor = self.get_extractor()(**self.source_data) return reinitialized_extractor.frame_to_time(frames=np.arange(stop=reinitialized_extractor.get_num_frames())) - def get_timestamps(self) -> np.ndarray: + def get_timestamps(self) -> np.ndarray: # noqa: D102 return self.segmentation_extractor.frame_to_time( frames=np.arange(stop=self.segmentation_extractor.get_num_frames()) ) def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + """set the aligned timestamps for the segmentation extractor.""" self.segmentation_extractor.set_times(times=aligned_timestamps) def add_to_nwbfile( diff --git a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py index b0abbd887..b77683219 100644 --- a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py +++ b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py @@ -24,6 +24,7 @@ class BrukerTiffMultiPlaneConverter(NWBConverter): @classmethod def get_source_schema(cls): + """Get the source schema for the Bruker imaging interface.""" source_schema = get_schema_from_method_signature(cls) source_schema["properties"]["folder_path"][ "description" @@ -31,6 +32,7 @@ def get_source_schema(cls): return source_schema def get_conversion_options_schema(self): + """get the conversion options schema.""" interface_name = list(self.data_interface_objects.keys())[0] return self.data_interface_objects[interface_name].get_conversion_options_schema() @@ -91,6 +93,20 @@ def add_to_nwbfile( stub_test: bool = False, stub_frames: int = 100, ): + """ + Add data from multiple data interfaces to the specified NWBFile. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the data will be added. + metadata : dict + Metadata dictionary containing information to describe the data being added to the NWB file. + stub_test : bool, optional + If True, only a subset of the data (up to `stub_frames`) will be added for testing purposes. Default is False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True. Default is 100. + """ for photon_series_index, (interface_name, data_interface) in enumerate(self.data_interface_objects.items()): data_interface.add_to_nwbfile( nwbfile=nwbfile, @@ -109,6 +125,24 @@ def run_conversion( stub_test: bool = False, stub_frames: int = 100, ) -> None: + """ + Run the conversion process for the instantiated data interfaces and add data to the NWB file. + + Parameters + ---------- + nwbfile_path : FilePath, optional + Path where the NWB file will be written. If None, the file will be handled in-memory. + nwbfile : NWBFile, optional + An in-memory NWBFile object. If None, a new NWBFile object will be created. + metadata : dict, optional + Metadata dictionary for describing the NWB file. If None, it will be auto-generated using the `get_metadata()` method. + overwrite : bool, optional + If True, overwrites the existing NWB file at `nwbfile_path`. If False, appends to the file (default is False). + stub_test : bool, optional + If True, only a subset of the data (up to `stub_frames`) will be added for testing purposes, by default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True, by default 100. + """ if metadata is None: metadata = self.get_metadata() @@ -142,6 +176,7 @@ def get_source_schema(cls): return get_schema_from_method_signature(cls) def get_conversion_options_schema(self): + """Get the conversion options schema.""" interface_name = list(self.data_interface_objects.keys())[0] return self.data_interface_objects[interface_name].get_conversion_options_schema() @@ -188,6 +223,22 @@ def add_to_nwbfile( stub_test: bool = False, stub_frames: int = 100, ): + """ + Add data from all instantiated data interfaces to the provided NWBFile. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the data will be added. + metadata : dict + Metadata dictionary containing information about the data to be added. + stub_test : bool, optional + If True, only a subset of the data (defined by `stub_frames`) will be added for testing purposes, + by default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True, by default 100. + + """ for photon_series_index, (interface_name, data_interface) in enumerate(self.data_interface_objects.items()): data_interface.add_to_nwbfile( nwbfile=nwbfile, From 7afe65e5ca0eb9a4d1191de8a0001403ca7c866e Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 6 Sep 2024 21:04:55 -0600 Subject: [PATCH 06/18] almost done --- pyproject.toml | 2 +- .../alphaomega/alphaomegadatainterface.py | 2 +- .../ecephys/axona/axonadatainterface.py | 14 +++---- .../ecephys/baselfpextractorinterface.py | 2 +- .../baserecordingextractorinterface.py | 8 ++-- .../ecephys/basesortingextractorinterface.py | 21 +++++++++-- .../ecephys/biocam/biocamdatainterface.py | 2 +- .../blackrock/blackrockdatainterface.py | 8 ++-- .../cellexplorer/cellexplorerdatainterface.py | 37 ++++++++++++++++--- .../ecephys/edf/edfdatainterface.py | 8 ++-- .../ecephys/intan/intandatainterface.py | 6 +-- .../ecephys/kilosort/kilosortdatainterface.py | 2 +- .../ecephys/maxwell/maxonedatainterface.py | 2 +- .../ecephys/mcsraw/mcsrawdatainterface.py | 2 +- .../ecephys/mearec/mearecdatainterface.py | 4 +- .../neuralynx/neuralynxdatainterface.py | 6 +-- .../neuroscope/neuroscopedatainterface.py | 10 ++--- 17 files changed, 88 insertions(+), 48 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index c80784183..0ac45727c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -124,7 +124,7 @@ select = [ "F401", # Unused import "I", # All isort rules "D101", # Missing docstring in public class -# "D102", # Missing docstring in public method + # "D102", # Missing docstring in public method "D103", # Missing docstring in public function ] fixable = ["ALL"] diff --git a/src/neuroconv/datainterfaces/ecephys/alphaomega/alphaomegadatainterface.py b/src/neuroconv/datainterfaces/ecephys/alphaomega/alphaomegadatainterface.py index 308f6bc56..7ab25784f 100644 --- a/src/neuroconv/datainterfaces/ecephys/alphaomega/alphaomegadatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/alphaomega/alphaomegadatainterface.py @@ -35,7 +35,7 @@ def __init__(self, folder_path: DirectoryPath, verbose: bool = True, es_key: str """ super().__init__(folder_path=folder_path, stream_id="RAW", verbose=verbose, es_key=es_key) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() annotation = self.recording_extractor.neo_reader.raw_annotations metadata["NWBFile"].update(session_start_time=annotation["blocks"][0]["rec_datetime"]) diff --git a/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py b/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py index 731aec168..a8700e0ec 100644 --- a/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py @@ -25,7 +25,7 @@ class AxonaRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for Axona recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to .bin file." return source_schema @@ -49,7 +49,7 @@ def __init__(self, file_path: FilePath, verbose: bool = True, es_key: str = "Ele tetrode_id = self.recording_extractor.get_property("tetrode_id") self.recording_extractor.set_channel_groups(tetrode_id) - def extract_nwb_file_metadata(self) -> dict: + def extract_nwb_file_metadata(self) -> dict: # noqa: D102 raw_annotations = self.recording_extractor.neo_reader.raw_annotations session_start_time = raw_annotations["blocks"][0]["segments"][0]["rec_datetime"] session_description = self.metadata_in_set_file["comments"] @@ -66,7 +66,7 @@ def extract_nwb_file_metadata(self) -> dict: return nwbfile_metadata - def extract_ecephys_metadata(self) -> dict: + def extract_ecephys_metadata(self) -> dict: # noqa: D102 unique_elec_group_names = set(self.recording_extractor.get_channel_groups()) sw_version = self.metadata_in_set_file["sw_version"] description = f"Axona DacqUSB, sw_version={sw_version}" @@ -92,7 +92,7 @@ def extract_ecephys_metadata(self) -> dict: return ecephys_metadata - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() nwbfile_metadata = self.extract_nwb_file_metadata() @@ -112,7 +112,7 @@ class AxonaUnitRecordingInterface(AxonaRecordingInterface): info = "Interface for Axona recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 return dict( required=["file_path"], properties=dict( @@ -143,7 +143,7 @@ class AxonaLFPDataInterface(BaseLFPExtractorInterface): ExtractorName = "NumpyRecording" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 return dict( required=["file_path"], properties=dict(file_path=dict(type="string")), @@ -168,7 +168,7 @@ class AxonaPositionDataInterface(BaseDataInterface): info = "Interface for Axona position data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 return get_schema_from_method_signature(cls.__init__) def __init__(self, file_path: str): diff --git a/src/neuroconv/datainterfaces/ecephys/baselfpextractorinterface.py b/src/neuroconv/datainterfaces/ecephys/baselfpextractorinterface.py index 7ce6bb9e4..2084137d7 100644 --- a/src/neuroconv/datainterfaces/ecephys/baselfpextractorinterface.py +++ b/src/neuroconv/datainterfaces/ecephys/baselfpextractorinterface.py @@ -18,7 +18,7 @@ class BaseLFPExtractorInterface(BaseRecordingExtractorInterface): def __init__(self, verbose: bool = True, es_key: str = "ElectricalSeriesLFP", **source_data): super().__init__(verbose=verbose, es_key=es_key, **source_data) - def add_to_nwbfile( + def add_to_nwbfile( # noqa: D102 self, nwbfile: Optional[NWBFile] = None, metadata: Optional[dict] = None, diff --git a/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py b/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py index 620b86224..77927676a 100644 --- a/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py +++ b/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py @@ -84,7 +84,7 @@ def get_metadata_schema(self) -> dict: ) return metadata_schema - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() channel_groups_array = self.recording_extractor.get_channel_groups() @@ -145,7 +145,7 @@ def get_timestamps(self) -> Union[np.ndarray, list[np.ndarray]]: for segment_index in range(self._number_of_segments) ] - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): # noqa: D102 assert ( self._number_of_segments == 1 ), "This recording has multiple segments; please use 'align_segment_timestamps' instead." @@ -175,7 +175,7 @@ def set_aligned_segment_timestamps(self, aligned_segment_timestamps: list[np.nda times=aligned_segment_timestamps[segment_index], segment_index=segment_index ) - def set_aligned_starting_time(self, aligned_starting_time: float): + def set_aligned_starting_time(self, aligned_starting_time: float): # noqa: D102 if self._number_of_segments == 1: self.set_aligned_timestamps(aligned_timestamps=self.get_timestamps() + aligned_starting_time) else: @@ -246,7 +246,7 @@ def has_probe(self) -> bool: """ return self.recording_extractor.has_probe() - def align_by_interpolation( + def align_by_interpolation( # noqa: D102 self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray, diff --git a/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py b/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py index b3cd25d24..3e6944a38 100644 --- a/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py +++ b/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py @@ -75,15 +75,15 @@ def get_metadata_schema(self) -> dict: ) return metadata_schema - def register_recording(self, recording_interface: BaseRecordingExtractorInterface): + def register_recording(self, recording_interface: BaseRecordingExtractorInterface): # noqa: D102 self.sorting_extractor.register_recording(recording=recording_interface.recording_extractor) - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 raise NotImplementedError( "Unable to fetch original timestamps for a SortingInterface since it relies upon an attached recording." ) - def get_timestamps(self) -> Union[np.ndarray, list[np.ndarray]]: + def get_timestamps(self) -> Union[np.ndarray, list[np.ndarray]]: # noqa: D102 if not self.sorting_extractor.has_recording(): raise NotImplementedError( "In order to align timestamps for a SortingInterface, it must have a recording " @@ -167,7 +167,7 @@ def set_aligned_segment_timestamps(self, aligned_segment_timestamps: list[np.nda times=aligned_segment_timestamps[segment_index], segment_index=segment_index ) - def set_aligned_starting_time(self, aligned_starting_time: float): + def set_aligned_starting_time(self, aligned_starting_time: float): # noqa: D102 if self.sorting_extractor.has_recording(): if self._number_of_segments == 1: self.set_aligned_timestamps(aligned_timestamps=self.get_timestamps() + aligned_starting_time) @@ -216,6 +216,19 @@ def set_aligned_segment_starting_times(self, aligned_segment_starting_times: lis sorting_segment._t_start = aligned_segment_starting_time def subset_sorting(self): + """ + Generate a subset of the sorting extractor based on spike timing data. + + This method identifies the earliest spike time across all units in the sorting extractor and creates a + subset of the sorting data up to 110% of the earliest spike time. If the sorting extractor is associated + with a recording, the subset is further limited by the total number of samples in the recording. + + Returns + ------- + SortingExtractor + A new `SortingExtractor` object representing the subset of the original sorting data, + sliced from the start frame to the calculated end frame. + """ max_min_spike_time = max( [ min(x) diff --git a/src/neuroconv/datainterfaces/ecephys/biocam/biocamdatainterface.py b/src/neuroconv/datainterfaces/ecephys/biocam/biocamdatainterface.py index f12f3a93d..8f18c5ada 100644 --- a/src/neuroconv/datainterfaces/ecephys/biocam/biocamdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/biocam/biocamdatainterface.py @@ -15,7 +15,7 @@ class BiocamRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for Biocam recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 schema = super().get_source_schema() schema["properties"]["file_path"]["description"] = "Path to the .bwr file." return schema diff --git a/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py b/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py index d5122bf66..b69d5d981 100644 --- a/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py @@ -18,7 +18,7 @@ class BlackrockRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for Blackrock recording data." @classmethod - def get_source_schema(cls): + def get_source_schema(cls): # noqa: D102 source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=["block_index", "seg_index"]) source_schema["properties"]["file_path"][ "description" @@ -57,7 +57,7 @@ def __init__( super().__init__(file_path=file_path, stream_id=str(nsx_to_load), verbose=verbose, es_key=es_key) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() # Open file and extract headers basic_header = _parse_nsx_basic_header(self.source_data["file_path"]) @@ -77,7 +77,7 @@ class BlackrockSortingInterface(BaseSortingExtractorInterface): info = "Interface for Blackrock sorting data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 metadata_schema = get_schema_from_method_signature(method=cls.__init__) metadata_schema["additionalProperties"] = True metadata_schema["properties"]["file_path"].update(description="Path to Blackrock .nev file.") @@ -98,7 +98,7 @@ def __init__(self, file_path: FilePath, sampling_frequency: float = None, verbos """ super().__init__(file_path=file_path, sampling_frequency=sampling_frequency, verbose=verbose) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() # Open file and extract headers basic_header = _parse_nev_basic_header(self.source_data["file_path"]) diff --git a/src/neuroconv/datainterfaces/ecephys/cellexplorer/cellexplorerdatainterface.py b/src/neuroconv/datainterfaces/ecephys/cellexplorer/cellexplorerdatainterface.py index e388e5e44..4151470d1 100644 --- a/src/neuroconv/datainterfaces/ecephys/cellexplorer/cellexplorerdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/cellexplorer/cellexplorerdatainterface.py @@ -289,7 +289,7 @@ class CellExplorerRecordingInterface(BaseRecordingExtractorInterface): binary_file_extension = "dat" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"]["description"] = "Folder containing the .session.mat file" return source_schema @@ -353,7 +353,7 @@ def __init__(self, folder_path: DirectoryPath, verbose: bool = True, es_key: str recording_extractor=self.recording_extractor, folder_path=folder_path ) - def get_original_timestamps(self): + def get_original_timestamps(self): # noqa: D102 num_frames = self.recording_extractor.get_num_frames() sampling_frequency = self.recording_extractor.get_sampling_frequency() timestamps = np.arange(num_frames) / sampling_frequency @@ -385,7 +385,7 @@ class CellExplorerLFPInterface(CellExplorerRecordingInterface): def __init__(self, folder_path: DirectoryPath, verbose: bool = True, es_key: str = "ElectricalSeriesLFP"): super().__init__(folder_path, verbose, es_key) - def add_to_nwbfile( + def add_to_nwbfile( # noqa: D102 self, nwbfile: NWBFile, metadata: Optional[dict] = None, @@ -511,6 +511,33 @@ def __init__(self, file_path: FilePath, verbose: bool = True): ) def generate_recording_with_channel_metadata(self): + """ + Generate a dummy recording extractor with channel metadata from session data. + + This method reads session data from a `.session.mat` file (if available) and generates a dummy recording + extractor. The recording extractor is then populated with channel metadata extracted from the session file. + + Returns + ------- + NumpyRecording + A `NumpyRecording` object representing the dummy recording extractor, containing the channel metadata. + + Notes + ----- + - The method reads the `.session.mat` file using `pymatreader` and extracts `extracellular` data. + - It creates a dummy recording extractor using `spikeinterface.core.numpyextractors.NumpyRecording`. + - The generated extractor includes channel IDs and other relevant metadata such as number of channels, + number of samples, and sampling frequency. + - Channel metadata is added to the dummy extractor using the `add_channel_metadata_to_recoder` function. + - If the `.session.mat` file is not found, no extractor is returned. + + Warnings + -------- + Ensure that the `.session.mat` file is correctly located in the expected session path, or the method will not generate + a recording extractor. + + """ + session_data_file_path = self.session_path / f"{self.session_id}.session.mat" if session_data_file_path.is_file(): from pymatreader import read_mat @@ -540,8 +567,8 @@ def generate_recording_with_channel_metadata(self): return dummy_recording_extractor - def get_metadata(self) -> dict: - metadata = super().get_metadata() + def get_metadata(self) -> dict: # noqa: D102 + metadata = super().get_metadata() # noqa: D102 session_path = Path(self.source_data["file_path"]).parent session_id = session_path.stem # TODO: add condition for retrieving ecephys metadata if no recording or lfp are included in conversion diff --git a/src/neuroconv/datainterfaces/ecephys/edf/edfdatainterface.py b/src/neuroconv/datainterfaces/ecephys/edf/edfdatainterface.py index 119e9f8d2..f5a0c8fcb 100644 --- a/src/neuroconv/datainterfaces/ecephys/edf/edfdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/edf/edfdatainterface.py @@ -18,7 +18,7 @@ class EDFRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for European Data Format (EDF) recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .edf file." return source_schema @@ -45,7 +45,7 @@ def __init__(self, file_path: FilePath, verbose: bool = True, es_key: str = "Ele super().__init__(file_path=file_path, verbose=verbose, es_key=es_key) self.edf_header = self.recording_extractor.neo_reader.edf_header - def extract_nwb_file_metadata(self) -> dict: + def extract_nwb_file_metadata(self) -> dict: # noqa: D102 nwbfile_metadata = dict( session_start_time=self.edf_header["startdate"], experimenter=self.edf_header["technician"], @@ -56,7 +56,7 @@ def extract_nwb_file_metadata(self) -> dict: return nwbfile_metadata - def extract_subject_metadata(self) -> dict: + def extract_subject_metadata(self) -> dict: # noqa: D102 subject_metadata = dict( subject_id=self.edf_header["patientcode"], date_of_birth=self.edf_header["birthdate"], @@ -67,7 +67,7 @@ def extract_subject_metadata(self) -> dict: return subject_metadata - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() nwbfile_metadata = self.extract_nwb_file_metadata() metadata["NWBFile"].update(nwbfile_metadata) diff --git a/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py b/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py index d28214598..2f84e03a4 100644 --- a/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py @@ -23,7 +23,7 @@ class IntanRecordingInterface(BaseRecordingExtractorInterface): stream_id = "0" # This are the amplifier channels, corresponding to the stream_name 'RHD2000 amplifier channel' @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to either a .rhd or a .rhs file" return source_schema @@ -84,14 +84,14 @@ def __init__( super().__init__(**init_kwargs) - def get_metadata_schema(self) -> dict: + def get_metadata_schema(self) -> dict: # noqa: D102 metadata_schema = super().get_metadata_schema() metadata_schema["properties"]["Ecephys"]["properties"].update( ElectricalSeriesRaw=get_schema_from_hdmf_class(ElectricalSeries) ) return metadata_schema - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() ecephys_metadata = metadata["Ecephys"] diff --git a/src/neuroconv/datainterfaces/ecephys/kilosort/kilosortdatainterface.py b/src/neuroconv/datainterfaces/ecephys/kilosort/kilosortdatainterface.py index fc6765823..8cde5b33a 100644 --- a/src/neuroconv/datainterfaces/ecephys/kilosort/kilosortdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/kilosort/kilosortdatainterface.py @@ -11,7 +11,7 @@ class KiloSortSortingInterface(BaseSortingExtractorInterface): info = "Interface for KiloSort sorting data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" diff --git a/src/neuroconv/datainterfaces/ecephys/maxwell/maxonedatainterface.py b/src/neuroconv/datainterfaces/ecephys/maxwell/maxonedatainterface.py index 11902f81b..dc447fc0f 100644 --- a/src/neuroconv/datainterfaces/ecephys/maxwell/maxonedatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/maxwell/maxonedatainterface.py @@ -85,7 +85,7 @@ def __init__( super().__init__(file_path=file_path, verbose=verbose, es_key=es_key) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() maxwell_version = self.recording_extractor.neo_reader.raw_annotations["blocks"][0]["maxwell_version"] diff --git a/src/neuroconv/datainterfaces/ecephys/mcsraw/mcsrawdatainterface.py b/src/neuroconv/datainterfaces/ecephys/mcsraw/mcsrawdatainterface.py index ff8e82139..683bbedfb 100644 --- a/src/neuroconv/datainterfaces/ecephys/mcsraw/mcsrawdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/mcsraw/mcsrawdatainterface.py @@ -15,7 +15,7 @@ class MCSRawRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for MCSRaw recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .raw file." return source_schema diff --git a/src/neuroconv/datainterfaces/ecephys/mearec/mearecdatainterface.py b/src/neuroconv/datainterfaces/ecephys/mearec/mearecdatainterface.py index 7a82025ca..fd2f739af 100644 --- a/src/neuroconv/datainterfaces/ecephys/mearec/mearecdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/mearec/mearecdatainterface.py @@ -18,7 +18,7 @@ class MEArecRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for MEArec recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the MEArec .h5 file." return source_schema @@ -37,7 +37,7 @@ def __init__(self, file_path: FilePath, verbose: bool = True, es_key: str = "Ele """ super().__init__(file_path=file_path, verbose=verbose, es_key=es_key) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() # TODO: improve ProbeInterface integration in our writing procedures diff --git a/src/neuroconv/datainterfaces/ecephys/neuralynx/neuralynxdatainterface.py b/src/neuroconv/datainterfaces/ecephys/neuralynx/neuralynxdatainterface.py index d4ceb6b38..d79d80995 100644 --- a/src/neuroconv/datainterfaces/ecephys/neuralynx/neuralynxdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/neuralynx/neuralynxdatainterface.py @@ -18,14 +18,14 @@ class NeuralynxRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for Neuralynx recording data." @classmethod - def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: + def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: # noqa: D102 from spikeinterface.extractors import NeuralynxRecordingExtractor stream_names, _ = NeuralynxRecordingExtractor.get_streams(folder_path=folder_path) return stream_names @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" @@ -62,7 +62,7 @@ def __init__( if value.dtype == object or value.dtype == np.bool_: self.recording_extractor.set_property(key, np.asarray(value, dtype=str)) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 neo_metadata = extract_neo_header_metadata(self.recording_extractor.neo_reader) # remove filter related entries already covered by `add_recording_extractor_properties` diff --git a/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py b/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py index 68642c60d..085b5cb5c 100644 --- a/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py @@ -102,7 +102,7 @@ class NeuroScopeRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for converting NeuroScope recording data." @classmethod - def get_source_schema(self) -> dict: + def get_source_schema(self) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to .dat file." return source_schema @@ -162,7 +162,7 @@ def __init__( recording_extractor=self.recording_extractor, xml_file_path=xml_file_path ) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 session_path = Path(self.source_data["file_path"]).parent session_id = session_path.stem xml_file_path = self.source_data.get("xml_file_path", str(session_path / f"{session_id}.xml")) @@ -173,7 +173,7 @@ def get_metadata(self) -> dict: metadata["NWBFile"]["session_start_time"] = session_start_time return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 # TODO: add generic method for aliasing from NeuroConv signature to SI init new_recording = self.get_extractor()(file_path=self.source_data["file_path"]) if self._number_of_segments == 1: @@ -195,7 +195,7 @@ class NeuroScopeLFPInterface(BaseLFPExtractorInterface): ExtractorName = "NeuroScopeRecordingExtractor" @classmethod - def get_source_schema(self) -> dict: + def get_source_schema(self) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to .lfp or .eeg file." return source_schema @@ -236,7 +236,7 @@ def __init__( recording_extractor=self.recording_extractor, xml_file_path=xml_file_path ) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 session_path = Path(self.source_data["file_path"]).parent session_id = session_path.stem xml_file_path = self.source_data.get("xml_file_path", str(session_path / f"{session_id}.xml")) From 438913b12ce592bb40fddcde1aac3bac959a79c1 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 6 Sep 2024 21:15:25 -0600 Subject: [PATCH 07/18] DONE --- pyproject.toml | 2 +- src/neuroconv/baseextractorinterface.py | 2 +- .../behavior/audio/audiointerface.py | 14 ++--- .../deeplabcut/deeplabcutdatainterface.py | 8 +-- .../behavior/fictrac/fictracdatainterface.py | 10 ++-- .../lightningpose/lightningposeconverter.py | 55 ++++++++++++++++++- .../lightningposedatainterface.py | 10 ++-- .../behavior/medpc/medpcdatainterface.py | 7 ++- .../miniscope/miniscopedatainterface.py | 4 +- .../neuralynx/neuralynx_nvt_interface.py | 10 ++-- .../behavior/sleap/sleapdatainterface.py | 8 +-- .../behavior/video/video_utils.py | 12 ++-- .../behavior/video/videodatainterface.py | 6 +- .../alphaomega/alphaomegadatainterface.py | 2 +- 14 files changed, 100 insertions(+), 50 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 0ac45727c..d9ec5a782 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -124,7 +124,7 @@ select = [ "F401", # Unused import "I", # All isort rules "D101", # Missing docstring in public class - # "D102", # Missing docstring in public method + "D102", # Missing docstring in public method "D103", # Missing docstring in public function ] fixable = ["ALL"] diff --git a/src/neuroconv/baseextractorinterface.py b/src/neuroconv/baseextractorinterface.py index 5dc0b25a1..9cc7b922c 100644 --- a/src/neuroconv/baseextractorinterface.py +++ b/src/neuroconv/baseextractorinterface.py @@ -19,7 +19,7 @@ class BaseExtractorInterface(BaseTemporalAlignmentInterface, ABC): Extractor = None # Class loads dynamically on first call to .get_extractor() @classmethod - def get_extractor(cls): + def get_extractor(cls): # noqa: D102 if cls.Extractor is not None: return cls.Extractor extractor_module = get_package(package_name=cls.ExtractorModuleName) diff --git a/src/neuroconv/datainterfaces/behavior/audio/audiointerface.py b/src/neuroconv/datainterfaces/behavior/audio/audiointerface.py index fc3f08fb8..81ee68393 100644 --- a/src/neuroconv/datainterfaces/behavior/audio/audiointerface.py +++ b/src/neuroconv/datainterfaces/behavior/audio/audiointerface.py @@ -61,7 +61,7 @@ def __init__(self, file_paths: list[FilePath], verbose: bool = False): super().__init__(file_paths=file_paths) self._segment_starting_times = None - def get_metadata_schema(self) -> dict: + def get_metadata_schema(self) -> dict: # noqa: D102 metadata_schema = super().get_metadata_schema() time_series_metadata_schema_path = ( @@ -84,7 +84,7 @@ def get_metadata_schema(self) -> dict: ) return metadata_schema - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 default_name = "AcousticWaveformSeries" is_multiple_file_path = len(self.source_data["file_paths"]) > 1 audio_metadata = [ @@ -96,17 +96,17 @@ def get_metadata(self) -> dict: ] behavior_metadata = dict(Audio=audio_metadata) - metadata = super().get_metadata() + metadata = super().get_metadata() # noqa: D102 metadata.update(Behavior=behavior_metadata) return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 raise NotImplementedError("The AudioInterface does not yet support timestamps.") - def get_timestamps(self) -> Optional[np.ndarray]: + def get_timestamps(self) -> Optional[np.ndarray]: # noqa: D102 raise NotImplementedError("The AudioInterface does not yet support timestamps.") - def set_aligned_timestamps(self, aligned_timestamps: list[np.ndarray]): + def set_aligned_timestamps(self, aligned_timestamps: list[np.ndarray]): # noqa: D102 raise NotImplementedError("The AudioInterface does not yet support timestamps.") def set_aligned_starting_time(self, aligned_starting_time: float): @@ -155,7 +155,7 @@ def set_aligned_segment_starting_times(self, aligned_segment_starting_times: lis self._segment_starting_times = aligned_segment_starting_times - def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray): + def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray): # noqa: D102 raise NotImplementedError("The AudioInterface does not yet support timestamps.") def add_to_nwbfile( diff --git a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py index 21b054e85..3f27fc6a7 100644 --- a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py @@ -19,7 +19,7 @@ class DeepLabCutInterface(BaseTemporalAlignmentInterface): _timestamps = None @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .h5 file output by dlc." source_schema["properties"]["config_file_path"]["description"] = "Path to .yml config file" @@ -60,7 +60,7 @@ def __init__( self.verbose = verbose super().__init__(file_path=file_path, config_file_path=config_file_path) - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() if self.config_dict: @@ -71,13 +71,13 @@ def get_metadata(self): return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 raise NotImplementedError( "Unable to retrieve the original unaltered timestamps for this interface! " "Define the `get_original_timestamps` method for this interface." ) - def get_timestamps(self) -> np.ndarray: + def get_timestamps(self) -> np.ndarray: # noqa: D102 raise NotImplementedError( "Unable to retrieve timestamps for this interface! Define the `get_timestamps` method for this interface." ) diff --git a/src/neuroconv/datainterfaces/behavior/fictrac/fictracdatainterface.py b/src/neuroconv/datainterfaces/behavior/fictrac/fictracdatainterface.py index 1b9686fd1..c2f332850 100644 --- a/src/neuroconv/datainterfaces/behavior/fictrac/fictracdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/fictrac/fictracdatainterface.py @@ -149,7 +149,7 @@ class FicTracDataInterface(BaseTemporalAlignmentInterface): } @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .dat file (the output of fictrac)" return source_schema @@ -194,7 +194,7 @@ def __init__( self._timestamps = None self._starting_time = None - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() session_start_time = extract_session_start_time( @@ -345,17 +345,17 @@ def get_original_timestamps(self): return timestamps - def get_timestamps(self): + def get_timestamps(self): # noqa: D102 timestamps = self._timestamps if self._timestamps is not None else self.get_original_timestamps() if self._starting_time is not None: timestamps = timestamps + self._starting_time return timestamps - def set_aligned_timestamps(self, aligned_timestamps): + def set_aligned_timestamps(self, aligned_timestamps): # noqa: D102 self._timestamps = aligned_timestamps - def set_aligned_starting_time(self, aligned_starting_time): + def set_aligned_starting_time(self, aligned_starting_time): # noqa: D102 self._starting_time = aligned_starting_time diff --git a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposeconverter.py b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposeconverter.py index dee848f19..65bceb5cc 100644 --- a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposeconverter.py +++ b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposeconverter.py @@ -23,7 +23,7 @@ class LightningPoseConverter(NWBConverter): info = "Interface for handling multiple streams of lightning pose data." @classmethod - def get_source_schema(cls): + def get_source_schema(cls): # noqa: D102 return get_schema_from_method_signature(cls) @validate_call @@ -70,14 +70,14 @@ def __init__( self.labeled_video_name = image_series_labeled_video_name or "ImageSeriesLabeledVideo" self.data_interface_objects.update(dict(LabeledVideo=VideoInterface(file_paths=[labeled_video_file_path]))) - def get_conversion_options_schema(self) -> dict: + def get_conversion_options_schema(self) -> dict: # noqa: D102 conversion_options_schema = get_schema_from_method_signature( method=self.add_to_nwbfile, exclude=["nwbfile", "metadata"] ) return conversion_options_schema - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = self.data_interface_objects["PoseEstimation"].get_metadata() original_video_interface = self.data_interface_objects["OriginalVideo"] original_videos_metadata = original_video_interface.get_metadata() @@ -111,6 +111,28 @@ def add_to_nwbfile( starting_frames_labeled_videos: Optional[list[int]] = None, stub_test: bool = False, ): + """ + Add behavior and pose estimation data, including original and labeled videos, to the specified NWBFile. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the data will be added. + metadata : dict + Metadata dictionary containing information about the behavior and videos. + reference_frame : str, optional + Description of the reference frame for pose estimation, by default None. + confidence_definition : str, optional + Definition for the confidence levels in pose estimation, by default None. + external_mode : bool, optional + If True, the videos will be referenced externally rather than embedded within the NWB file, by default True. + starting_frames_original_videos : list of int, optional + List of starting frames for the original videos, by default None. + starting_frames_labeled_videos : list of int, optional + List of starting frames for the labeled videos, by default None. + stub_test : bool, optional + If True, only a subset of the data will be added for testing purposes, by default False. + """ original_video_interface = self.data_interface_objects["OriginalVideo"] original_video_metadata = next( @@ -172,6 +194,33 @@ def run_conversion( starting_frames_labeled_videos: Optional[list] = None, stub_test: bool = False, ) -> None: + """ + Run the full conversion process, adding behavior, video, and pose estimation data to an NWB file. + + Parameters + ---------- + nwbfile_path : FilePath, optional + The file path where the NWB file will be saved. If None, the file is handled in memory. + nwbfile : NWBFile, optional + An in-memory NWBFile object. If None, a new NWBFile object will be created. + metadata : dict, optional + Metadata dictionary for describing the NWB file contents. If None, it is auto-generated. + overwrite : bool, optional + If True, overwrites the NWB file at `nwbfile_path` if it exists. If False, appends to the file, by default False. + reference_frame : str, optional + Description of the reference frame for pose estimation, by default None. + confidence_definition : str, optional + Definition for confidence levels in pose estimation, by default None. + external_mode : bool, optional + If True, the videos will be referenced externally rather than embedded within the NWB file, by default True. + starting_frames_original_videos : list of int, optional + List of starting frames for the original videos, by default None. + starting_frames_labeled_videos : list of int, optional + List of starting frames for the labeled videos, by default None. + stub_test : bool, optional + If True, only a subset of the data will be added for testing purposes, by default False. + + """ if metadata is None: metadata = self.get_metadata() diff --git a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py index f103b7c9a..2297a5c59 100644 --- a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py @@ -25,7 +25,7 @@ class LightningPoseDataInterface(BaseTemporalAlignmentInterface): associated_suffixes = (".csv", ".mp4") info = "Interface for handling a single stream of lightning pose data." - def get_metadata_schema(self) -> dict: + def get_metadata_schema(self) -> dict: # noqa: D102 metadata_schema = super().get_metadata_schema() metadata_schema["properties"]["Behavior"] = get_base_schema(tag="Behavior") @@ -123,13 +123,13 @@ def _get_original_video_shape(self) -> tuple[int, int]: # image size of the original video is in height x width return video_shape[0], video_shape[1] - def get_original_timestamps(self, stub_test: bool = False) -> np.ndarray: + def get_original_timestamps(self, stub_test: bool = False) -> np.ndarray: # noqa: D102 max_frames = 10 if stub_test else None with self._vc(file_path=str(self.original_video_file_path)) as video: timestamps = video.get_video_timestamps(max_frames=max_frames) return timestamps - def get_timestamps(self, stub_test: bool = False) -> np.ndarray: + def get_timestamps(self, stub_test: bool = False) -> np.ndarray: # noqa: D102 max_frames = 10 if stub_test else None if self._times is None: return self.get_original_timestamps(stub_test=stub_test) @@ -137,10 +137,10 @@ def get_timestamps(self, stub_test: bool = False) -> np.ndarray: timestamps = self._times if not stub_test else self._times[:max_frames] return timestamps - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): # noqa: D102 self._times = aligned_timestamps - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() # Update the session start time if folder structure is saved in the format: YYYY-MM-DD/HH-MM-SS diff --git a/src/neuroconv/datainterfaces/behavior/medpc/medpcdatainterface.py b/src/neuroconv/datainterfaces/behavior/medpc/medpcdatainterface.py index 6a4127663..a76c5dec1 100644 --- a/src/neuroconv/datainterfaces/behavior/medpc/medpcdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/medpc/medpcdatainterface.py @@ -85,7 +85,7 @@ def __init__( ) self.timestamps_dict = {} - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() session_dict = read_medpc_file( file_path=self.source_data["file_path"], @@ -98,7 +98,7 @@ def get_metadata(self) -> DeepDict: return metadata - def get_metadata_schema(self) -> dict: + def get_metadata_schema(self) -> dict: # noqa: D102 metadata_schema = super().get_metadata_schema() medpc_name_to_info_dict = self.source_data["metadata_medpc_name_to_info_dict"] metadata_schema["properties"]["MedPC"] = { @@ -178,11 +178,12 @@ def set_aligned_starting_time(self, aligned_starting_time: float, medpc_name_to_ aligned_timestamps_dict[name] = original_timestamps + aligned_starting_time self.set_aligned_timestamps(aligned_timestamps_dict=aligned_timestamps_dict) - def add_to_nwbfile( + def add_to_nwbfile( # noqa: D102 self, nwbfile: NWBFile, metadata: dict, ) -> None: + ndx_events = get_package(package_name="ndx_events", installation_instructions="pip install ndx-events") medpc_name_to_info_dict = metadata["MedPC"].get("medpc_name_to_info_dict", None) assert medpc_name_to_info_dict is not None, "medpc_name_to_info_dict must be provided in metadata" diff --git a/src/neuroconv/datainterfaces/behavior/miniscope/miniscopedatainterface.py b/src/neuroconv/datainterfaces/behavior/miniscope/miniscopedatainterface.py index ecb763523..63bee0df9 100644 --- a/src/neuroconv/datainterfaces/behavior/miniscope/miniscopedatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/miniscope/miniscopedatainterface.py @@ -17,7 +17,7 @@ class MiniscopeBehaviorInterface(BaseDataInterface): info = "Interface for Miniscope behavior video data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" @@ -66,7 +66,7 @@ def __init__(self, folder_path: DirectoryPath): assert len(self._starting_frames) == len(self._behav_avi_file_paths) self._timestamps = get_timestamps(folder_path=str(folder_path), file_pattern="BehavCam*/timeStamps.csv") - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() metadata["NWBFile"].update(session_start_time=self._recording_start_times[0]) diff --git a/src/neuroconv/datainterfaces/behavior/neuralynx/neuralynx_nvt_interface.py b/src/neuroconv/datainterfaces/behavior/neuralynx/neuralynx_nvt_interface.py index e161387f0..14bc1f5de 100644 --- a/src/neuroconv/datainterfaces/behavior/neuralynx/neuralynx_nvt_interface.py +++ b/src/neuroconv/datainterfaces/behavior/neuralynx/neuralynx_nvt_interface.py @@ -41,7 +41,7 @@ def __init__(self, file_path: FilePath, verbose: bool = True): super().__init__(file_path=file_path) - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 data = read_data(self.file_path) times = data["TimeStamp"] / 1000000 # Neuralynx stores times in microseconds @@ -49,13 +49,13 @@ def get_original_timestamps(self) -> np.ndarray: return times - def get_timestamps(self) -> np.ndarray: + def get_timestamps(self) -> np.ndarray: # noqa: D102 return self._timestamps - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray) -> None: + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray) -> None: # noqa: D102 self._timestamps = aligned_timestamps - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() metadata["NWBFile"].update(session_start_time=self.header["TimeCreated"]) @@ -67,7 +67,7 @@ def get_metadata(self) -> DeepDict: ) return metadata - def get_metadata_schema(self) -> dict: + def get_metadata_schema(self) -> dict: # noqa: D102 metadata_schema = super().get_metadata_schema() if "Behavior" not in metadata_schema["properties"]: diff --git a/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py b/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py index 713b21c98..774b63f59 100644 --- a/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py @@ -19,7 +19,7 @@ class SLEAPInterface(BaseTemporalAlignmentInterface): info = "Interface for SLEAP pose estimation datasets." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .slp file (the output of sleap)" source_schema["properties"]["video_file_path"][ @@ -57,7 +57,7 @@ def __init__( self._timestamps = None super().__init__(file_path=file_path) - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 if self.video_file_path is None: raise ValueError( "Unable to fetch the original timestamps from the video! " @@ -65,11 +65,11 @@ def get_original_timestamps(self) -> np.ndarray: ) return np.array(extract_timestamps(self.video_file_path)) - def get_timestamps(self) -> np.ndarray: + def get_timestamps(self) -> np.ndarray: # noqa: D102 timestamps = self._timestamps if self._timestamps is not None else self.get_original_timestamps() return timestamps - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): # noqa: D102 self._timestamps = aligned_timestamps def add_to_nwbfile( diff --git a/src/neuroconv/datainterfaces/behavior/video/video_utils.py b/src/neuroconv/datainterfaces/behavior/video/video_utils.py index a8f2412aa..8725e3846 100644 --- a/src/neuroconv/datainterfaces/behavior/video/video_utils.py +++ b/src/neuroconv/datainterfaces/behavior/video/video_utils.py @@ -76,7 +76,7 @@ def get_frame_shape(self) -> Tuple: return frame.shape @property - def frame_count(self): + def frame_count(self): # noqa: D102 if self._frame_count is None: self._frame_count = self._video_frame_count() return self._frame_count @@ -89,7 +89,7 @@ def frame_count(self, val: int): ), "Cannot set manual frame_count beyond length of video (received {val})." self._frame_count = val - def get_video_frame_count(self): + def get_video_frame_count(self): # noqa: D102 return self.frame_count def _video_frame_count(self): @@ -99,7 +99,7 @@ def _video_frame_count(self): return int(self.vc.get(prop)) @staticmethod - def get_cv_attribute(attribute_name: str): + def get_cv_attribute(attribute_name: str): # noqa: D102 cv2 = get_package(package_name="cv2", installation_instructions="pip install opencv-python-headless") if int(cv2.__version__.split(".")[0]) < 3: # pragma: no cover @@ -107,7 +107,7 @@ def get_cv_attribute(attribute_name: str): return getattr(cv2, attribute_name) @property - def current_frame(self): + def current_frame(self): # noqa: D102 return self._current_frame @current_frame.setter @@ -136,10 +136,10 @@ def get_video_frame_dtype(self): if frame is not None: return frame.dtype - def release(self): + def release(self): # noqa: D102 self.vc.release() - def isOpened(self): + def isOpened(self): # noqa: D102 return self.vc.isOpened() def __iter__(self): diff --git a/src/neuroconv/datainterfaces/behavior/video/videodatainterface.py b/src/neuroconv/datainterfaces/behavior/video/videodatainterface.py index 7a28c2d2f..f840c6fdc 100644 --- a/src/neuroconv/datainterfaces/behavior/video/videodatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/video/videodatainterface.py @@ -77,7 +77,7 @@ def __init__( self.metadata_key_name = metadata_key_name super().__init__(file_paths=file_paths) - def get_metadata_schema(self): + def get_metadata_schema(self): # noqa: D102 metadata_schema = super().get_metadata_schema() image_series_metadata_schema = get_schema_from_hdmf_class(ImageSeries) # TODO: in future PR, add 'exclude' option to get_schema_from_hdmf_class to bypass this popping @@ -93,7 +93,7 @@ def get_metadata_schema(self): ) return metadata_schema - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() behavior_metadata = { self.metadata_key_name: [ @@ -256,7 +256,7 @@ def set_aligned_segment_starting_times(self, aligned_segment_starting_times: lis else: self._segment_starting_times = aligned_segment_starting_times - def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray): + def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray): # noqa: D102 raise NotImplementedError("The `align_by_interpolation` method has not been developed for this interface yet.") def add_to_nwbfile( diff --git a/src/neuroconv/datainterfaces/ecephys/alphaomega/alphaomegadatainterface.py b/src/neuroconv/datainterfaces/ecephys/alphaomega/alphaomegadatainterface.py index 7ab25784f..610b33ff7 100644 --- a/src/neuroconv/datainterfaces/ecephys/alphaomega/alphaomegadatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/alphaomega/alphaomegadatainterface.py @@ -15,7 +15,7 @@ class AlphaOmegaRecordingInterface(BaseRecordingExtractorInterface): info = "Interface class for converting AlphaOmega recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"]["description"] = "Path to the folder of .mpx files." return source_schema From 2cd9b551b58d679ebe1c933d11080780d96c88b0 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 17 Sep 2024 18:08:05 -0600 Subject: [PATCH 08/18] missing stuff --- .../datainterfaces/ecephys/kilosort/kilosortdatainterface.py | 2 +- src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py | 2 +- src/neuroconv/tools/testing/mock_interfaces.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/neuroconv/datainterfaces/ecephys/kilosort/kilosortdatainterface.py b/src/neuroconv/datainterfaces/ecephys/kilosort/kilosortdatainterface.py index 2f1702f80..1d70289ae 100644 --- a/src/neuroconv/datainterfaces/ecephys/kilosort/kilosortdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/kilosort/kilosortdatainterface.py @@ -37,7 +37,7 @@ def __init__( """ super().__init__(folder_path=folder_path, keep_good_only=keep_good_only, verbose=verbose) - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() # See Kilosort save_to_phy() docstring for more info on these fields: https://github.com/MouseLand/Kilosort/blob/main/kilosort/io.py # Or see phy documentation: https://github.com/cortex-lab/phy/blob/master/phy/apps/base.py diff --git a/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py b/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py index 35f0b860b..8ba9dd593 100644 --- a/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py @@ -44,7 +44,7 @@ def __init__( """ super().__init__(folder_path=folder_path, exclude_cluster_groups=exclude_cluster_groups, verbose=verbose) - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() # See Kilosort save_to_phy() docstring for more info on these fields: https://github.com/MouseLand/Kilosort/blob/main/kilosort/io.py # Or see phy documentation: https://github.com/cortex-lab/phy/blob/master/phy/apps/base.py diff --git a/src/neuroconv/tools/testing/mock_interfaces.py b/src/neuroconv/tools/testing/mock_interfaces.py index 329e3b389..8c28189e9 100644 --- a/src/neuroconv/tools/testing/mock_interfaces.py +++ b/src/neuroconv/tools/testing/mock_interfaces.py @@ -403,7 +403,7 @@ def __init__( verbose=verbose, ) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa D102 session_start_time = datetime.now().astimezone() metadata = super().get_metadata() metadata["NWBFile"]["session_start_time"] = session_start_time From 0d5d6ccb430e545b4b899e861876d600e04c5fbf Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 18 Sep 2024 13:58:04 -0600 Subject: [PATCH 09/18] second suggestion --- .../datainterfaces/ophys/baseimagingextractorinterface.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py b/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py index 3cb710891..c5e9f6c41 100644 --- a/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py +++ b/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py @@ -155,8 +155,7 @@ def get_original_timestamps(self) -> np.ndarray: # noqa: D102 def get_timestamps(self) -> np.ndarray: # noqa: D102 return self.imaging_extractor.frame_to_time(frames=np.arange(stop=self.imaging_extractor.get_num_frames())) - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): - """Replace all timestamps for this interface with those aligned to the common session start time.""" + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): # noqa: D102 self.imaging_extractor.set_times(times=aligned_timestamps) def add_to_nwbfile( From 803529897b40f813cf53c467ad0a175d696985b6 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 18 Sep 2024 13:59:54 -0600 Subject: [PATCH 10/18] removed --- .../datainterfaces/ophys/basesegmentationextractorinterface.py | 3 +-- .../datainterfaces/ophys/brukertiff/brukertiffconverter.py | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py b/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py index fcef1165a..afb59830f 100644 --- a/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py +++ b/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py @@ -126,8 +126,7 @@ def get_timestamps(self) -> np.ndarray: # noqa: D102 frames=np.arange(stop=self.segmentation_extractor.get_num_frames()) ) - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): - """set the aligned timestamps for the segmentation extractor.""" + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): # noqa: D102 self.segmentation_extractor.set_times(times=aligned_timestamps) def add_to_nwbfile( diff --git a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py index b77683219..6686c4aaa 100644 --- a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py +++ b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py @@ -237,7 +237,6 @@ def add_to_nwbfile( by default False. stub_frames : int, optional The number of frames to include in the subset if `stub_test` is True, by default 100. - """ for photon_series_index, (interface_name, data_interface) in enumerate(self.data_interface_objects.items()): data_interface.add_to_nwbfile( From c69487b4baeb0788861e44db1b2b93658cbe0ac7 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 18 Sep 2024 14:00:53 -0600 Subject: [PATCH 11/18] bruker tiff docstring --- .../brukertiff/brukertiffdatainterface.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py index 464b039be..4a893c486 100644 --- a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py @@ -29,7 +29,23 @@ def get_streams( folder_path: DirectoryPath, plane_separation_type: Literal["contiguous", "disjoint"] = None, ) -> dict: - """get streams for the Bruker TIFF imaging data.""" + """ + Get streams for the Bruker TIFF imaging data. + + Parameters + ---------- + folder_path : DirectoryPath + Path to the folder containing the Bruker TIFF files. + plane_separation_type : Literal["contiguous", "disjoint"], optional + Type of plane separation to apply. If "contiguous", only the first plane stream for each channel is retained. + + Returns + ------- + dict + A dictionary containing the streams for the Bruker TIFF imaging data. The dictionary has the following keys: + - "channel_streams": List of channel stream names. + - "plane_streams": Dictionary where keys are channel stream names and values are lists of plane streams. + """ from roiextractors import BrukerTiffMultiPlaneImagingExtractor streams = BrukerTiffMultiPlaneImagingExtractor.get_streams(folder_path=folder_path) From 564703cc2756ce5f115e5f8c8d516f28641957d9 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 18 Sep 2024 14:02:04 -0600 Subject: [PATCH 12/18] bruker tiff single parameters --- .../ophys/brukertiff/brukertiffdatainterface.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py index 4a893c486..f7e7bee1b 100644 --- a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py @@ -211,7 +211,19 @@ def get_source_schema(cls) -> dict: @classmethod def get_streams(cls, folder_path: DirectoryPath) -> dict: - """get streams for the Bruker TIFF imaging data.""" + """ + Get streams for the Bruker TIFF imaging data. + + Parameters + ---------- + folder_path : DirectoryPath + Path to the folder containing the Bruker TIFF files. + + Returns + ------- + dict + A dictionary containing the streams extracted from the Bruker TIFF files. + """ from roiextractors import BrukerTiffMultiPlaneImagingExtractor streams = BrukerTiffMultiPlaneImagingExtractor.get_streams(folder_path=folder_path) From d95364b96736da13236dbc82c6d7027a65f20b3c Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 18 Sep 2024 14:03:52 -0600 Subject: [PATCH 13/18] miniscope imaging --- .../ophys/miniscope/miniscopeimagingdatainterface.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeimagingdatainterface.py b/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeimagingdatainterface.py index b932159f1..659cd8d63 100644 --- a/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeimagingdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeimagingdatainterface.py @@ -81,8 +81,7 @@ def get_metadata_schema(self) -> dict: metadata_schema["properties"]["Ophys"]["definitions"]["Device"]["additionalProperties"] = True return metadata_schema - def get_original_timestamps(self) -> np.ndarray: - """Get the original timestamps from the Miniscope data.""" + def get_original_timestamps(self) -> np.ndarray: # noqa: D102, should inherit docstring from base class from ndx_miniscope.utils import get_timestamps timestamps = get_timestamps(folder_path=self.source_data["folder_path"]) From adcf7fb36e21be46a652efef5cfbb3c104f60256 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 18 Sep 2024 14:06:18 -0600 Subject: [PATCH 14/18] scanimage --- src/neuroconv/basedatainterface.py | 9 ++++++++- .../ophys/scanimage/scanimageimaginginterfaces.py | 3 +-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/neuroconv/basedatainterface.py b/src/neuroconv/basedatainterface.py index 682ed2dba..35c7f413c 100644 --- a/src/neuroconv/basedatainterface.py +++ b/src/neuroconv/basedatainterface.py @@ -36,7 +36,14 @@ class BaseDataInterface(ABC): @classmethod def get_source_schema(cls) -> dict: - """Infer the JSON schema for the source_data from the method signature (annotation typing).""" + """ + Infer the JSON schema for the source_data from the method signature (annotation typing). + + Returns + ------- + dict + The JSON schema for the source_data. + """ return get_json_schema_from_method_signature(cls, exclude=["source_data"]) @validate_call diff --git a/src/neuroconv/datainterfaces/ophys/scanimage/scanimageimaginginterfaces.py b/src/neuroconv/datainterfaces/ophys/scanimage/scanimageimaginginterfaces.py index d7f0af51b..2ff291ac3 100644 --- a/src/neuroconv/datainterfaces/ophys/scanimage/scanimageimaginginterfaces.py +++ b/src/neuroconv/datainterfaces/ophys/scanimage/scanimageimaginginterfaces.py @@ -87,8 +87,7 @@ class ScanImageLegacyImagingInterface(BaseImagingExtractorInterface): ExtractorName = "ScanImageTiffImagingExtractor" @classmethod - def get_source_schema(cls) -> dict: - """ " "Get the source schema for the ScanImage legacy imaging interface.""" + def get_source_schema(cls) -> dict: # noqa: D102 , should inherit docstrnig from the base class source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to Tiff file." return source_schema From 9d7f734091deb814be95f5a9013a6b2753ba0316 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 18 Sep 2024 14:07:27 -0600 Subject: [PATCH 15/18] suit2p paul suggestion --- .../ophys/suite2p/suite2pdatainterface.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/neuroconv/datainterfaces/ophys/suite2p/suite2pdatainterface.py b/src/neuroconv/datainterfaces/ophys/suite2p/suite2pdatainterface.py index d9065a644..fb0ade21b 100644 --- a/src/neuroconv/datainterfaces/ophys/suite2p/suite2pdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/suite2p/suite2pdatainterface.py @@ -160,9 +160,18 @@ def add_to_nwbfile( Whether to include the centroids of regions of interest (ROIs) in the data, by default True. include_roi_acceptance : bool, optional Whether to include acceptance status of ROIs, by default True. - mask_type : str, optional - The type of mask used for segmentation, either "image", "pixel", or "voxel", by default "image". - plane_segmentation_name : str, optional + mask_type : str, default: 'image' + There are three types of ROI masks in NWB, 'image', 'pixel', and 'voxel'. + + * 'image' masks have the same shape as the reference images the segmentation was applied to, and weight each pixel + by its contribution to the ROI (typically boolean, with 0 meaning 'not in the ROI'). + * 'pixel' masks are instead indexed by ROI, with the data at each index being the shape of the image by the number + of pixels in each ROI. + * 'voxel' masks are instead indexed by ROI, with the data at each index being the shape of the volume by the number + of voxels in each ROI. + + Specify your choice between these two as mask_type='image', 'pixel', 'voxel', or None. + plane_segmentation_name : str, optional The name of the plane segmentation object, by default None. iterator_options : dict, optional Additional options for iterating over the data, by default None. From dd85b3c424d689e89caccd1a0cd31e88ea51450d Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 18 Sep 2024 14:08:41 -0600 Subject: [PATCH 16/18] time intervals --- .../datainterfaces/text/timeintervalsinterface.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/neuroconv/datainterfaces/text/timeintervalsinterface.py b/src/neuroconv/datainterfaces/text/timeintervalsinterface.py index 505868b2a..4d3ac827a 100644 --- a/src/neuroconv/datainterfaces/text/timeintervalsinterface.py +++ b/src/neuroconv/datainterfaces/text/timeintervalsinterface.py @@ -42,15 +42,7 @@ def __init__( self.dataframe = self._read_file(file_path, **read_kwargs) self.time_intervals = None - def get_metadata(self) -> dict: - """ - Get metadata for the time intervals. - - Returns - ------- - dict - Metadata dictionary with information about the time intervals table. - """ + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() metadata["TimeIntervals"] = dict( trials=dict( From 1cca391c502eda5be4ae44b1d8cdad0f45ffe174 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 18 Sep 2024 14:09:27 -0600 Subject: [PATCH 17/18] reverse return in add_to_nwbfile --- src/neuroconv/nwbconverter.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/neuroconv/nwbconverter.py b/src/neuroconv/nwbconverter.py index 7a6917ff5..9da798dd0 100644 --- a/src/neuroconv/nwbconverter.py +++ b/src/neuroconv/nwbconverter.py @@ -166,7 +166,7 @@ def create_nwbfile(self, metadata: Optional[dict] = None, conversion_options: Op self.add_to_nwbfile(nwbfile=nwbfile, metadata=metadata, conversion_options=conversion_options) return nwbfile - def add_to_nwbfile(self, nwbfile: NWBFile, metadata, conversion_options: Optional[dict] = None) -> NWBFile: + def add_to_nwbfile(self, nwbfile: NWBFile, metadata, conversion_options: Optional[dict] = None): """ Add data from the instantiated data interfaces to the given NWBFile. @@ -180,11 +180,6 @@ def add_to_nwbfile(self, nwbfile: NWBFile, metadata, conversion_options: Optiona A dictionary containing conversion options for each interface, where non-default behavior is requested. Each key corresponds to a data interface name, and the values are dictionaries with options for that interface. By default, None. - - Returns - ------- - nwbfile : NWBFile - The NWB file object with the data from the data interfaces added to it. """ conversion_options = conversion_options or dict() for interface_name, data_interface in self.data_interface_objects.items(): @@ -192,8 +187,6 @@ def add_to_nwbfile(self, nwbfile: NWBFile, metadata, conversion_options: Optiona nwbfile=nwbfile, metadata=metadata, **conversion_options.get(interface_name, dict()) ) - return nwbfile - def run_conversion( self, nwbfile_path: Optional[FilePath] = None, From de12b9a108f740a527c8653ff03b183ef7c2896e Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Wed, 18 Sep 2024 14:10:49 -0600 Subject: [PATCH 18/18] hdmf --- src/neuroconv/tools/hdmf.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/neuroconv/tools/hdmf.py b/src/neuroconv/tools/hdmf.py index 164e3ba85..d717e9023 100644 --- a/src/neuroconv/tools/hdmf.py +++ b/src/neuroconv/tools/hdmf.py @@ -47,11 +47,10 @@ def estimate_default_chunk_shape(chunk_mb: float, maxshape: tuple[int, ...], dty # TODO: move this to the core iterator in HDMF so it can be easily swapped out as well as run on its own @staticmethod - def estimate_default_buffer_shape( + def estimate_default_buffer_shape( # noqa: D102 buffer_gb: float, chunk_shape: tuple[int, ...], maxshape: tuple[int, ...], dtype: np.dtype ) -> tuple[int, ...]: - """ "Add docstring to this""" - + # TODO: Ad ddocstring to this once someone understands it better # Elevate any overflow warnings to trigger error. # This is usually an indicator of something going terribly wrong with the estimation calculations and should be # avoided at all costs.