diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c7bc13be..cf8c49582 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,9 @@ # Upcoming -## Deprecations - ## Bug Fixes +## Deprecations + ## Features * Using in-house `GenericDataChunkIterator` [PR #1068](https://github.com/catalystneuro/neuroconv/pull/1068) * Data interfaces now perform source (argument inputs) validation with the json schema [PR #1020](https://github.com/catalystneuro/neuroconv/pull/1020) @@ -11,6 +11,7 @@ ## Improvements * Remove dev test from PR [PR #1092](https://github.com/catalystneuro/neuroconv/pull/1092) * Run only the most basic testing while a PR is on draft [PR #1082](https://github.com/catalystneuro/neuroconv/pull/1082) +* Using ruff to enforce existence of public functions's docstrings [PR #1062](https://github.com/catalystneuro/neuroconv/pull/1062) * Test that zarr backend_configuration works in gin data tests [PR #1094](https://github.com/catalystneuro/neuroconv/pull/1094) * Consolidated weekly workflows into one workflow and added email notifications [PR #1088](https://github.com/catalystneuro/neuroconv/pull/1088) * Avoid running link test when the PR is on draft [PR #1093](https://github.com/catalystneuro/neuroconv/pull/1093) diff --git a/pyproject.toml b/pyproject.toml index d7cf25813..f10c139ee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -104,7 +104,7 @@ doctest_optionflags = "ELLIPSIS" [tool.black] line-length = 120 -target-version = ['py38', 'py39', 'py310'] +target-version = ['py39', 'py310'] include = '\.pyi?$' extend-exclude = ''' /( @@ -131,6 +131,7 @@ select = [ "F401", # Unused import "I", # All isort rules "D101", # Missing docstring in public class + "D102", # Missing docstring in public method "D103", # Missing docstring in public function ] fixable = ["ALL"] diff --git a/src/neuroconv/basedatainterface.py b/src/neuroconv/basedatainterface.py index adcec89b5..9e9585d81 100644 --- a/src/neuroconv/basedatainterface.py +++ b/src/neuroconv/basedatainterface.py @@ -37,7 +37,14 @@ class BaseDataInterface(ABC): @classmethod def get_source_schema(cls) -> dict: - """Infer the JSON schema for the source_data from the method signature (annotation typing).""" + """ + Infer the JSON schema for the source_data from the method signature (annotation typing). + + Returns + ------- + dict + The JSON schema for the source_data. + """ return get_json_schema_from_method_signature(cls, exclude=["source_data"]) @classmethod diff --git a/src/neuroconv/baseextractorinterface.py b/src/neuroconv/baseextractorinterface.py index a75fbe1f0..0ea1db5b8 100644 --- a/src/neuroconv/baseextractorinterface.py +++ b/src/neuroconv/baseextractorinterface.py @@ -19,7 +19,7 @@ class BaseExtractorInterface(BaseTemporalAlignmentInterface, ABC): Extractor = None # Class loads dynamically on first call to .get_extractor() @classmethod - def get_extractor(cls): + def get_extractor(cls): # noqa: D102 if cls.Extractor is not None: return cls.Extractor extractor_module = get_package(package_name=cls.ExtractorModuleName) diff --git a/src/neuroconv/datainterfaces/behavior/audio/audiointerface.py b/src/neuroconv/datainterfaces/behavior/audio/audiointerface.py index fc3f08fb8..81ee68393 100644 --- a/src/neuroconv/datainterfaces/behavior/audio/audiointerface.py +++ b/src/neuroconv/datainterfaces/behavior/audio/audiointerface.py @@ -61,7 +61,7 @@ def __init__(self, file_paths: list[FilePath], verbose: bool = False): super().__init__(file_paths=file_paths) self._segment_starting_times = None - def get_metadata_schema(self) -> dict: + def get_metadata_schema(self) -> dict: # noqa: D102 metadata_schema = super().get_metadata_schema() time_series_metadata_schema_path = ( @@ -84,7 +84,7 @@ def get_metadata_schema(self) -> dict: ) return metadata_schema - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 default_name = "AcousticWaveformSeries" is_multiple_file_path = len(self.source_data["file_paths"]) > 1 audio_metadata = [ @@ -96,17 +96,17 @@ def get_metadata(self) -> dict: ] behavior_metadata = dict(Audio=audio_metadata) - metadata = super().get_metadata() + metadata = super().get_metadata() # noqa: D102 metadata.update(Behavior=behavior_metadata) return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 raise NotImplementedError("The AudioInterface does not yet support timestamps.") - def get_timestamps(self) -> Optional[np.ndarray]: + def get_timestamps(self) -> Optional[np.ndarray]: # noqa: D102 raise NotImplementedError("The AudioInterface does not yet support timestamps.") - def set_aligned_timestamps(self, aligned_timestamps: list[np.ndarray]): + def set_aligned_timestamps(self, aligned_timestamps: list[np.ndarray]): # noqa: D102 raise NotImplementedError("The AudioInterface does not yet support timestamps.") def set_aligned_starting_time(self, aligned_starting_time: float): @@ -155,7 +155,7 @@ def set_aligned_segment_starting_times(self, aligned_segment_starting_times: lis self._segment_starting_times = aligned_segment_starting_times - def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray): + def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray): # noqa: D102 raise NotImplementedError("The AudioInterface does not yet support timestamps.") def add_to_nwbfile( diff --git a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py index 21b054e85..3f27fc6a7 100644 --- a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py @@ -19,7 +19,7 @@ class DeepLabCutInterface(BaseTemporalAlignmentInterface): _timestamps = None @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .h5 file output by dlc." source_schema["properties"]["config_file_path"]["description"] = "Path to .yml config file" @@ -60,7 +60,7 @@ def __init__( self.verbose = verbose super().__init__(file_path=file_path, config_file_path=config_file_path) - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() if self.config_dict: @@ -71,13 +71,13 @@ def get_metadata(self): return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 raise NotImplementedError( "Unable to retrieve the original unaltered timestamps for this interface! " "Define the `get_original_timestamps` method for this interface." ) - def get_timestamps(self) -> np.ndarray: + def get_timestamps(self) -> np.ndarray: # noqa: D102 raise NotImplementedError( "Unable to retrieve timestamps for this interface! Define the `get_timestamps` method for this interface." ) diff --git a/src/neuroconv/datainterfaces/behavior/fictrac/fictracdatainterface.py b/src/neuroconv/datainterfaces/behavior/fictrac/fictracdatainterface.py index 1b9686fd1..c2f332850 100644 --- a/src/neuroconv/datainterfaces/behavior/fictrac/fictracdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/fictrac/fictracdatainterface.py @@ -149,7 +149,7 @@ class FicTracDataInterface(BaseTemporalAlignmentInterface): } @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .dat file (the output of fictrac)" return source_schema @@ -194,7 +194,7 @@ def __init__( self._timestamps = None self._starting_time = None - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() session_start_time = extract_session_start_time( @@ -345,17 +345,17 @@ def get_original_timestamps(self): return timestamps - def get_timestamps(self): + def get_timestamps(self): # noqa: D102 timestamps = self._timestamps if self._timestamps is not None else self.get_original_timestamps() if self._starting_time is not None: timestamps = timestamps + self._starting_time return timestamps - def set_aligned_timestamps(self, aligned_timestamps): + def set_aligned_timestamps(self, aligned_timestamps): # noqa: D102 self._timestamps = aligned_timestamps - def set_aligned_starting_time(self, aligned_starting_time): + def set_aligned_starting_time(self, aligned_starting_time): # noqa: D102 self._starting_time = aligned_starting_time diff --git a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposeconverter.py b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposeconverter.py index dee848f19..65bceb5cc 100644 --- a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposeconverter.py +++ b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposeconverter.py @@ -23,7 +23,7 @@ class LightningPoseConverter(NWBConverter): info = "Interface for handling multiple streams of lightning pose data." @classmethod - def get_source_schema(cls): + def get_source_schema(cls): # noqa: D102 return get_schema_from_method_signature(cls) @validate_call @@ -70,14 +70,14 @@ def __init__( self.labeled_video_name = image_series_labeled_video_name or "ImageSeriesLabeledVideo" self.data_interface_objects.update(dict(LabeledVideo=VideoInterface(file_paths=[labeled_video_file_path]))) - def get_conversion_options_schema(self) -> dict: + def get_conversion_options_schema(self) -> dict: # noqa: D102 conversion_options_schema = get_schema_from_method_signature( method=self.add_to_nwbfile, exclude=["nwbfile", "metadata"] ) return conversion_options_schema - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = self.data_interface_objects["PoseEstimation"].get_metadata() original_video_interface = self.data_interface_objects["OriginalVideo"] original_videos_metadata = original_video_interface.get_metadata() @@ -111,6 +111,28 @@ def add_to_nwbfile( starting_frames_labeled_videos: Optional[list[int]] = None, stub_test: bool = False, ): + """ + Add behavior and pose estimation data, including original and labeled videos, to the specified NWBFile. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the data will be added. + metadata : dict + Metadata dictionary containing information about the behavior and videos. + reference_frame : str, optional + Description of the reference frame for pose estimation, by default None. + confidence_definition : str, optional + Definition for the confidence levels in pose estimation, by default None. + external_mode : bool, optional + If True, the videos will be referenced externally rather than embedded within the NWB file, by default True. + starting_frames_original_videos : list of int, optional + List of starting frames for the original videos, by default None. + starting_frames_labeled_videos : list of int, optional + List of starting frames for the labeled videos, by default None. + stub_test : bool, optional + If True, only a subset of the data will be added for testing purposes, by default False. + """ original_video_interface = self.data_interface_objects["OriginalVideo"] original_video_metadata = next( @@ -172,6 +194,33 @@ def run_conversion( starting_frames_labeled_videos: Optional[list] = None, stub_test: bool = False, ) -> None: + """ + Run the full conversion process, adding behavior, video, and pose estimation data to an NWB file. + + Parameters + ---------- + nwbfile_path : FilePath, optional + The file path where the NWB file will be saved. If None, the file is handled in memory. + nwbfile : NWBFile, optional + An in-memory NWBFile object. If None, a new NWBFile object will be created. + metadata : dict, optional + Metadata dictionary for describing the NWB file contents. If None, it is auto-generated. + overwrite : bool, optional + If True, overwrites the NWB file at `nwbfile_path` if it exists. If False, appends to the file, by default False. + reference_frame : str, optional + Description of the reference frame for pose estimation, by default None. + confidence_definition : str, optional + Definition for confidence levels in pose estimation, by default None. + external_mode : bool, optional + If True, the videos will be referenced externally rather than embedded within the NWB file, by default True. + starting_frames_original_videos : list of int, optional + List of starting frames for the original videos, by default None. + starting_frames_labeled_videos : list of int, optional + List of starting frames for the labeled videos, by default None. + stub_test : bool, optional + If True, only a subset of the data will be added for testing purposes, by default False. + + """ if metadata is None: metadata = self.get_metadata() diff --git a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py index f103b7c9a..2297a5c59 100644 --- a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py @@ -25,7 +25,7 @@ class LightningPoseDataInterface(BaseTemporalAlignmentInterface): associated_suffixes = (".csv", ".mp4") info = "Interface for handling a single stream of lightning pose data." - def get_metadata_schema(self) -> dict: + def get_metadata_schema(self) -> dict: # noqa: D102 metadata_schema = super().get_metadata_schema() metadata_schema["properties"]["Behavior"] = get_base_schema(tag="Behavior") @@ -123,13 +123,13 @@ def _get_original_video_shape(self) -> tuple[int, int]: # image size of the original video is in height x width return video_shape[0], video_shape[1] - def get_original_timestamps(self, stub_test: bool = False) -> np.ndarray: + def get_original_timestamps(self, stub_test: bool = False) -> np.ndarray: # noqa: D102 max_frames = 10 if stub_test else None with self._vc(file_path=str(self.original_video_file_path)) as video: timestamps = video.get_video_timestamps(max_frames=max_frames) return timestamps - def get_timestamps(self, stub_test: bool = False) -> np.ndarray: + def get_timestamps(self, stub_test: bool = False) -> np.ndarray: # noqa: D102 max_frames = 10 if stub_test else None if self._times is None: return self.get_original_timestamps(stub_test=stub_test) @@ -137,10 +137,10 @@ def get_timestamps(self, stub_test: bool = False) -> np.ndarray: timestamps = self._times if not stub_test else self._times[:max_frames] return timestamps - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): # noqa: D102 self._times = aligned_timestamps - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() # Update the session start time if folder structure is saved in the format: YYYY-MM-DD/HH-MM-SS diff --git a/src/neuroconv/datainterfaces/behavior/medpc/medpcdatainterface.py b/src/neuroconv/datainterfaces/behavior/medpc/medpcdatainterface.py index 6a4127663..a76c5dec1 100644 --- a/src/neuroconv/datainterfaces/behavior/medpc/medpcdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/medpc/medpcdatainterface.py @@ -85,7 +85,7 @@ def __init__( ) self.timestamps_dict = {} - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() session_dict = read_medpc_file( file_path=self.source_data["file_path"], @@ -98,7 +98,7 @@ def get_metadata(self) -> DeepDict: return metadata - def get_metadata_schema(self) -> dict: + def get_metadata_schema(self) -> dict: # noqa: D102 metadata_schema = super().get_metadata_schema() medpc_name_to_info_dict = self.source_data["metadata_medpc_name_to_info_dict"] metadata_schema["properties"]["MedPC"] = { @@ -178,11 +178,12 @@ def set_aligned_starting_time(self, aligned_starting_time: float, medpc_name_to_ aligned_timestamps_dict[name] = original_timestamps + aligned_starting_time self.set_aligned_timestamps(aligned_timestamps_dict=aligned_timestamps_dict) - def add_to_nwbfile( + def add_to_nwbfile( # noqa: D102 self, nwbfile: NWBFile, metadata: dict, ) -> None: + ndx_events = get_package(package_name="ndx_events", installation_instructions="pip install ndx-events") medpc_name_to_info_dict = metadata["MedPC"].get("medpc_name_to_info_dict", None) assert medpc_name_to_info_dict is not None, "medpc_name_to_info_dict must be provided in metadata" diff --git a/src/neuroconv/datainterfaces/behavior/miniscope/miniscopedatainterface.py b/src/neuroconv/datainterfaces/behavior/miniscope/miniscopedatainterface.py index ecb763523..63bee0df9 100644 --- a/src/neuroconv/datainterfaces/behavior/miniscope/miniscopedatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/miniscope/miniscopedatainterface.py @@ -17,7 +17,7 @@ class MiniscopeBehaviorInterface(BaseDataInterface): info = "Interface for Miniscope behavior video data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" @@ -66,7 +66,7 @@ def __init__(self, folder_path: DirectoryPath): assert len(self._starting_frames) == len(self._behav_avi_file_paths) self._timestamps = get_timestamps(folder_path=str(folder_path), file_pattern="BehavCam*/timeStamps.csv") - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() metadata["NWBFile"].update(session_start_time=self._recording_start_times[0]) diff --git a/src/neuroconv/datainterfaces/behavior/neuralynx/neuralynx_nvt_interface.py b/src/neuroconv/datainterfaces/behavior/neuralynx/neuralynx_nvt_interface.py index e161387f0..14bc1f5de 100644 --- a/src/neuroconv/datainterfaces/behavior/neuralynx/neuralynx_nvt_interface.py +++ b/src/neuroconv/datainterfaces/behavior/neuralynx/neuralynx_nvt_interface.py @@ -41,7 +41,7 @@ def __init__(self, file_path: FilePath, verbose: bool = True): super().__init__(file_path=file_path) - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 data = read_data(self.file_path) times = data["TimeStamp"] / 1000000 # Neuralynx stores times in microseconds @@ -49,13 +49,13 @@ def get_original_timestamps(self) -> np.ndarray: return times - def get_timestamps(self) -> np.ndarray: + def get_timestamps(self) -> np.ndarray: # noqa: D102 return self._timestamps - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray) -> None: + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray) -> None: # noqa: D102 self._timestamps = aligned_timestamps - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() metadata["NWBFile"].update(session_start_time=self.header["TimeCreated"]) @@ -67,7 +67,7 @@ def get_metadata(self) -> DeepDict: ) return metadata - def get_metadata_schema(self) -> dict: + def get_metadata_schema(self) -> dict: # noqa: D102 metadata_schema = super().get_metadata_schema() if "Behavior" not in metadata_schema["properties"]: diff --git a/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py b/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py index 713b21c98..774b63f59 100644 --- a/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py @@ -19,7 +19,7 @@ class SLEAPInterface(BaseTemporalAlignmentInterface): info = "Interface for SLEAP pose estimation datasets." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .slp file (the output of sleap)" source_schema["properties"]["video_file_path"][ @@ -57,7 +57,7 @@ def __init__( self._timestamps = None super().__init__(file_path=file_path) - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 if self.video_file_path is None: raise ValueError( "Unable to fetch the original timestamps from the video! " @@ -65,11 +65,11 @@ def get_original_timestamps(self) -> np.ndarray: ) return np.array(extract_timestamps(self.video_file_path)) - def get_timestamps(self) -> np.ndarray: + def get_timestamps(self) -> np.ndarray: # noqa: D102 timestamps = self._timestamps if self._timestamps is not None else self.get_original_timestamps() return timestamps - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): # noqa: D102 self._timestamps = aligned_timestamps def add_to_nwbfile( diff --git a/src/neuroconv/datainterfaces/behavior/video/video_utils.py b/src/neuroconv/datainterfaces/behavior/video/video_utils.py index fe817a3b2..429bacfc5 100644 --- a/src/neuroconv/datainterfaces/behavior/video/video_utils.py +++ b/src/neuroconv/datainterfaces/behavior/video/video_utils.py @@ -77,7 +77,7 @@ def get_frame_shape(self) -> Tuple: return frame.shape @property - def frame_count(self): + def frame_count(self): # noqa: D102 if self._frame_count is None: self._frame_count = self._video_frame_count() return self._frame_count @@ -90,7 +90,7 @@ def frame_count(self, val: int): ), "Cannot set manual frame_count beyond length of video (received {val})." self._frame_count = val - def get_video_frame_count(self): + def get_video_frame_count(self): # noqa: D102 return self.frame_count def _video_frame_count(self): @@ -100,7 +100,7 @@ def _video_frame_count(self): return int(self.vc.get(prop)) @staticmethod - def get_cv_attribute(attribute_name: str): + def get_cv_attribute(attribute_name: str): # noqa: D102 cv2 = get_package(package_name="cv2", installation_instructions="pip install opencv-python-headless") if int(cv2.__version__.split(".")[0]) < 3: # pragma: no cover @@ -108,7 +108,7 @@ def get_cv_attribute(attribute_name: str): return getattr(cv2, attribute_name) @property - def current_frame(self): + def current_frame(self): # noqa: D102 return self._current_frame @current_frame.setter @@ -137,10 +137,10 @@ def get_video_frame_dtype(self): if frame is not None: return frame.dtype - def release(self): + def release(self): # noqa: D102 self.vc.release() - def isOpened(self): + def isOpened(self): # noqa: D102 return self.vc.isOpened() def __iter__(self): diff --git a/src/neuroconv/datainterfaces/behavior/video/videodatainterface.py b/src/neuroconv/datainterfaces/behavior/video/videodatainterface.py index a544f9c27..5d26bed3a 100644 --- a/src/neuroconv/datainterfaces/behavior/video/videodatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/video/videodatainterface.py @@ -77,7 +77,7 @@ def __init__( self.metadata_key_name = metadata_key_name super().__init__(file_paths=file_paths) - def get_metadata_schema(self): + def get_metadata_schema(self): # noqa: D102 metadata_schema = super().get_metadata_schema() image_series_metadata_schema = get_schema_from_hdmf_class(ImageSeries) # TODO: in future PR, add 'exclude' option to get_schema_from_hdmf_class to bypass this popping @@ -93,7 +93,7 @@ def get_metadata_schema(self): ) return metadata_schema - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() behavior_metadata = { self.metadata_key_name: [ @@ -256,7 +256,7 @@ def set_aligned_segment_starting_times(self, aligned_segment_starting_times: lis else: self._segment_starting_times = aligned_segment_starting_times - def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray): + def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray): # noqa: D102 raise NotImplementedError("The `align_by_interpolation` method has not been developed for this interface yet.") def add_to_nwbfile( diff --git a/src/neuroconv/datainterfaces/ecephys/alphaomega/alphaomegadatainterface.py b/src/neuroconv/datainterfaces/ecephys/alphaomega/alphaomegadatainterface.py index 97f89abca..c5ca9d79e 100644 --- a/src/neuroconv/datainterfaces/ecephys/alphaomega/alphaomegadatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/alphaomega/alphaomegadatainterface.py @@ -16,7 +16,7 @@ class AlphaOmegaRecordingInterface(BaseRecordingExtractorInterface): stream_id = "RAW" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"]["description"] = "Path to the folder of .mpx files." return source_schema @@ -41,7 +41,7 @@ def __init__(self, folder_path: DirectoryPath, verbose: bool = True, es_key: str """ super().__init__(folder_path=folder_path, verbose=verbose, es_key=es_key) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() annotation = self.recording_extractor.neo_reader.raw_annotations metadata["NWBFile"].update(session_start_time=annotation["blocks"][0]["rec_datetime"]) diff --git a/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py b/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py index ba6adf4a1..86ed11b99 100644 --- a/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/axona/axonadatainterface.py @@ -25,7 +25,7 @@ class AxonaRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for Axona recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to .bin file." return source_schema @@ -55,7 +55,7 @@ def __init__(self, file_path: FilePath, verbose: bool = True, es_key: str = "Ele tetrode_id = self.recording_extractor.get_property("tetrode_id") self.recording_extractor.set_channel_groups(tetrode_id) - def extract_nwb_file_metadata(self) -> dict: + def extract_nwb_file_metadata(self) -> dict: # noqa: D102 raw_annotations = self.recording_extractor.neo_reader.raw_annotations session_start_time = raw_annotations["blocks"][0]["segments"][0]["rec_datetime"] session_description = self.metadata_in_set_file["comments"] @@ -72,7 +72,7 @@ def extract_nwb_file_metadata(self) -> dict: return nwbfile_metadata - def extract_ecephys_metadata(self) -> dict: + def extract_ecephys_metadata(self) -> dict: # noqa: D102 unique_elec_group_names = set(self.recording_extractor.get_channel_groups()) sw_version = self.metadata_in_set_file["sw_version"] description = f"Axona DacqUSB, sw_version={sw_version}" @@ -98,7 +98,7 @@ def extract_ecephys_metadata(self) -> dict: return ecephys_metadata - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() nwbfile_metadata = self.extract_nwb_file_metadata() @@ -118,7 +118,7 @@ class AxonaUnitRecordingInterface(AxonaRecordingInterface): info = "Interface for Axona recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 return dict( required=["file_path"], properties=dict( @@ -150,7 +150,7 @@ class AxonaLFPDataInterface(BaseLFPExtractorInterface): ExtractorName = "NumpyRecording" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 return dict( required=["file_path"], properties=dict(file_path=dict(type="string")), @@ -185,7 +185,7 @@ class AxonaPositionDataInterface(BaseDataInterface): info = "Interface for Axona position data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 return get_schema_from_method_signature(cls.__init__) def __init__(self, file_path: str): diff --git a/src/neuroconv/datainterfaces/ecephys/baselfpextractorinterface.py b/src/neuroconv/datainterfaces/ecephys/baselfpextractorinterface.py index 7ce6bb9e4..2084137d7 100644 --- a/src/neuroconv/datainterfaces/ecephys/baselfpextractorinterface.py +++ b/src/neuroconv/datainterfaces/ecephys/baselfpextractorinterface.py @@ -18,7 +18,7 @@ class BaseLFPExtractorInterface(BaseRecordingExtractorInterface): def __init__(self, verbose: bool = True, es_key: str = "ElectricalSeriesLFP", **source_data): super().__init__(verbose=verbose, es_key=es_key, **source_data) - def add_to_nwbfile( + def add_to_nwbfile( # noqa: D102 self, nwbfile: Optional[NWBFile] = None, metadata: Optional[dict] = None, diff --git a/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py b/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py index e2c747378..21cab1628 100644 --- a/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py +++ b/src/neuroconv/datainterfaces/ecephys/baserecordingextractorinterface.py @@ -85,7 +85,7 @@ def get_metadata_schema(self) -> dict: ) return metadata_schema - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() channel_groups_array = self.recording_extractor.get_channel_groups() @@ -150,7 +150,7 @@ def get_timestamps(self) -> Union[np.ndarray, list[np.ndarray]]: for segment_index in range(self._number_of_segments) ] - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): # noqa: D102 assert ( self._number_of_segments == 1 ), "This recording has multiple segments; please use 'align_segment_timestamps' instead." @@ -182,7 +182,7 @@ def set_aligned_segment_timestamps(self, aligned_segment_timestamps: list[np.nda with_warning=False, ) - def set_aligned_starting_time(self, aligned_starting_time: float): + def set_aligned_starting_time(self, aligned_starting_time: float): # noqa: D102 if self._number_of_segments == 1: self.set_aligned_timestamps(aligned_timestamps=self.get_timestamps() + aligned_starting_time) else: @@ -253,7 +253,7 @@ def has_probe(self) -> bool: """ return self.recording_extractor.has_probe() - def align_by_interpolation( + def align_by_interpolation( # noqa: D102 self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray, diff --git a/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py b/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py index cd8396154..ffeb09074 100644 --- a/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py +++ b/src/neuroconv/datainterfaces/ecephys/basesortingextractorinterface.py @@ -75,15 +75,15 @@ def get_metadata_schema(self) -> dict: ) return metadata_schema - def register_recording(self, recording_interface: BaseRecordingExtractorInterface): + def register_recording(self, recording_interface: BaseRecordingExtractorInterface): # noqa: D102 self.sorting_extractor.register_recording(recording=recording_interface.recording_extractor) - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 raise NotImplementedError( "Unable to fetch original timestamps for a SortingInterface since it relies upon an attached recording." ) - def get_timestamps(self) -> Union[np.ndarray, list[np.ndarray]]: + def get_timestamps(self) -> Union[np.ndarray, list[np.ndarray]]: # noqa: D102 if not self.sorting_extractor.has_recording(): raise NotImplementedError( "In order to align timestamps for a SortingInterface, it must have a recording " @@ -171,7 +171,7 @@ def set_aligned_segment_timestamps(self, aligned_segment_timestamps: list[np.nda with_warning=False, ) - def set_aligned_starting_time(self, aligned_starting_time: float): + def set_aligned_starting_time(self, aligned_starting_time: float): # noqa: D102 if self.sorting_extractor.has_recording(): if self._number_of_segments == 1: self.set_aligned_timestamps(aligned_timestamps=self.get_timestamps() + aligned_starting_time) @@ -220,6 +220,19 @@ def set_aligned_segment_starting_times(self, aligned_segment_starting_times: lis sorting_segment._t_start = aligned_segment_starting_time def subset_sorting(self): + """ + Generate a subset of the sorting extractor based on spike timing data. + + This method identifies the earliest spike time across all units in the sorting extractor and creates a + subset of the sorting data up to 110% of the earliest spike time. If the sorting extractor is associated + with a recording, the subset is further limited by the total number of samples in the recording. + + Returns + ------- + SortingExtractor + A new `SortingExtractor` object representing the subset of the original sorting data, + sliced from the start frame to the calculated end frame. + """ max_min_spike_time = max( [ min(x) diff --git a/src/neuroconv/datainterfaces/ecephys/biocam/biocamdatainterface.py b/src/neuroconv/datainterfaces/ecephys/biocam/biocamdatainterface.py index f12f3a93d..8f18c5ada 100644 --- a/src/neuroconv/datainterfaces/ecephys/biocam/biocamdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/biocam/biocamdatainterface.py @@ -15,7 +15,7 @@ class BiocamRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for Biocam recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 schema = super().get_source_schema() schema["properties"]["file_path"]["description"] = "Path to the .bwr file." return schema diff --git a/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py b/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py index e84719431..61faf5b49 100644 --- a/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/blackrock/blackrockdatainterface.py @@ -18,7 +18,7 @@ class BlackrockRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for Blackrock recording data." @classmethod - def get_source_schema(cls): + def get_source_schema(cls): # noqa: D102 source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=["block_index", "seg_index"]) source_schema["properties"]["file_path"][ "description" @@ -64,7 +64,7 @@ def __init__( self.stream_id = str(nsx_to_load) super().__init__(file_path=file_path, verbose=verbose, es_key=es_key) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() # Open file and extract headers basic_header = _parse_nsx_basic_header(self.source_data["file_path"]) @@ -84,7 +84,7 @@ class BlackrockSortingInterface(BaseSortingExtractorInterface): info = "Interface for Blackrock sorting data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 metadata_schema = get_schema_from_method_signature(method=cls.__init__) metadata_schema["additionalProperties"] = True metadata_schema["properties"]["file_path"].update(description="Path to Blackrock .nev file.") @@ -105,7 +105,7 @@ def __init__(self, file_path: FilePath, sampling_frequency: Optional[float] = No """ super().__init__(file_path=file_path, sampling_frequency=sampling_frequency, verbose=verbose) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() # Open file and extract headers basic_header = _parse_nev_basic_header(self.source_data["file_path"]) diff --git a/src/neuroconv/datainterfaces/ecephys/cellexplorer/cellexplorerdatainterface.py b/src/neuroconv/datainterfaces/ecephys/cellexplorer/cellexplorerdatainterface.py index 46e825fb5..f3e847956 100644 --- a/src/neuroconv/datainterfaces/ecephys/cellexplorer/cellexplorerdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/cellexplorer/cellexplorerdatainterface.py @@ -289,7 +289,7 @@ class CellExplorerRecordingInterface(BaseRecordingExtractorInterface): binary_file_extension = "dat" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"]["description"] = "Folder containing the .session.mat file" return source_schema @@ -353,7 +353,7 @@ def __init__(self, folder_path: DirectoryPath, verbose: bool = True, es_key: str recording_extractor=self.recording_extractor, folder_path=folder_path ) - def get_original_timestamps(self): + def get_original_timestamps(self): # noqa: D102 num_frames = self.recording_extractor.get_num_frames() sampling_frequency = self.recording_extractor.get_sampling_frequency() timestamps = np.arange(num_frames) / sampling_frequency @@ -385,7 +385,7 @@ class CellExplorerLFPInterface(CellExplorerRecordingInterface): def __init__(self, folder_path: DirectoryPath, verbose: bool = True, es_key: str = "ElectricalSeriesLFP"): super().__init__(folder_path, verbose, es_key) - def add_to_nwbfile( + def add_to_nwbfile( # noqa: D102 self, nwbfile: NWBFile, metadata: Optional[dict] = None, @@ -518,6 +518,33 @@ def __init__(self, file_path: FilePath, verbose: bool = True): ) def generate_recording_with_channel_metadata(self): + """ + Generate a dummy recording extractor with channel metadata from session data. + + This method reads session data from a `.session.mat` file (if available) and generates a dummy recording + extractor. The recording extractor is then populated with channel metadata extracted from the session file. + + Returns + ------- + NumpyRecording + A `NumpyRecording` object representing the dummy recording extractor, containing the channel metadata. + + Notes + ----- + - The method reads the `.session.mat` file using `pymatreader` and extracts `extracellular` data. + - It creates a dummy recording extractor using `spikeinterface.core.numpyextractors.NumpyRecording`. + - The generated extractor includes channel IDs and other relevant metadata such as number of channels, + number of samples, and sampling frequency. + - Channel metadata is added to the dummy extractor using the `add_channel_metadata_to_recoder` function. + - If the `.session.mat` file is not found, no extractor is returned. + + Warnings + -------- + Ensure that the `.session.mat` file is correctly located in the expected session path, or the method will not generate + a recording extractor. The expected session is self.session_path / f"{self.session_id}.session.mat" + + """ + session_data_file_path = self.session_path / f"{self.session_id}.session.mat" if session_data_file_path.is_file(): from pymatreader import read_mat @@ -547,8 +574,8 @@ def generate_recording_with_channel_metadata(self): return dummy_recording_extractor - def get_metadata(self) -> dict: - metadata = super().get_metadata() + def get_metadata(self) -> dict: # noqa: D102 + metadata = super().get_metadata() # noqa: D102 session_path = Path(self.source_data["file_path"]).parent session_id = session_path.stem # TODO: add condition for retrieving ecephys metadata if no recording or lfp are included in conversion diff --git a/src/neuroconv/datainterfaces/ecephys/edf/edfdatainterface.py b/src/neuroconv/datainterfaces/ecephys/edf/edfdatainterface.py index 119e9f8d2..f5a0c8fcb 100644 --- a/src/neuroconv/datainterfaces/ecephys/edf/edfdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/edf/edfdatainterface.py @@ -18,7 +18,7 @@ class EDFRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for European Data Format (EDF) recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .edf file." return source_schema @@ -45,7 +45,7 @@ def __init__(self, file_path: FilePath, verbose: bool = True, es_key: str = "Ele super().__init__(file_path=file_path, verbose=verbose, es_key=es_key) self.edf_header = self.recording_extractor.neo_reader.edf_header - def extract_nwb_file_metadata(self) -> dict: + def extract_nwb_file_metadata(self) -> dict: # noqa: D102 nwbfile_metadata = dict( session_start_time=self.edf_header["startdate"], experimenter=self.edf_header["technician"], @@ -56,7 +56,7 @@ def extract_nwb_file_metadata(self) -> dict: return nwbfile_metadata - def extract_subject_metadata(self) -> dict: + def extract_subject_metadata(self) -> dict: # noqa: D102 subject_metadata = dict( subject_id=self.edf_header["patientcode"], date_of_birth=self.edf_header["birthdate"], @@ -67,7 +67,7 @@ def extract_subject_metadata(self) -> dict: return subject_metadata - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() nwbfile_metadata = self.extract_nwb_file_metadata() metadata["NWBFile"].update(nwbfile_metadata) diff --git a/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py b/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py index 2d7c849f2..e2efab034 100644 --- a/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/intan/intandatainterface.py @@ -20,7 +20,7 @@ class IntanRecordingInterface(BaseRecordingExtractorInterface): stream_id = "0" # This are the amplifier channels, corresponding to the stream_name 'RHD2000 amplifier channel' @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to either a .rhd or a .rhs file" return source_schema @@ -66,14 +66,14 @@ def __init__( super().__init__(**init_kwargs) - def get_metadata_schema(self) -> dict: + def get_metadata_schema(self) -> dict: # noqa: D102 metadata_schema = super().get_metadata_schema() metadata_schema["properties"]["Ecephys"]["properties"].update( ElectricalSeriesRaw=get_schema_from_hdmf_class(ElectricalSeries) ) return metadata_schema - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() ecephys_metadata = metadata["Ecephys"] diff --git a/src/neuroconv/datainterfaces/ecephys/kilosort/kilosortdatainterface.py b/src/neuroconv/datainterfaces/ecephys/kilosort/kilosortdatainterface.py index aafde42f0..1d70289ae 100644 --- a/src/neuroconv/datainterfaces/ecephys/kilosort/kilosortdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/kilosort/kilosortdatainterface.py @@ -11,7 +11,7 @@ class KiloSortSortingInterface(BaseSortingExtractorInterface): info = "Interface for KiloSort sorting data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" @@ -37,7 +37,7 @@ def __init__( """ super().__init__(folder_path=folder_path, keep_good_only=keep_good_only, verbose=verbose) - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() # See Kilosort save_to_phy() docstring for more info on these fields: https://github.com/MouseLand/Kilosort/blob/main/kilosort/io.py # Or see phy documentation: https://github.com/cortex-lab/phy/blob/master/phy/apps/base.py diff --git a/src/neuroconv/datainterfaces/ecephys/maxwell/maxonedatainterface.py b/src/neuroconv/datainterfaces/ecephys/maxwell/maxonedatainterface.py index 11902f81b..dc447fc0f 100644 --- a/src/neuroconv/datainterfaces/ecephys/maxwell/maxonedatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/maxwell/maxonedatainterface.py @@ -85,7 +85,7 @@ def __init__( super().__init__(file_path=file_path, verbose=verbose, es_key=es_key) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() maxwell_version = self.recording_extractor.neo_reader.raw_annotations["blocks"][0]["maxwell_version"] diff --git a/src/neuroconv/datainterfaces/ecephys/mcsraw/mcsrawdatainterface.py b/src/neuroconv/datainterfaces/ecephys/mcsraw/mcsrawdatainterface.py index ff8e82139..683bbedfb 100644 --- a/src/neuroconv/datainterfaces/ecephys/mcsraw/mcsrawdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/mcsraw/mcsrawdatainterface.py @@ -15,7 +15,7 @@ class MCSRawRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for MCSRaw recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .raw file." return source_schema diff --git a/src/neuroconv/datainterfaces/ecephys/mearec/mearecdatainterface.py b/src/neuroconv/datainterfaces/ecephys/mearec/mearecdatainterface.py index 7a82025ca..fd2f739af 100644 --- a/src/neuroconv/datainterfaces/ecephys/mearec/mearecdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/mearec/mearecdatainterface.py @@ -18,7 +18,7 @@ class MEArecRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for MEArec recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the MEArec .h5 file." return source_schema @@ -37,7 +37,7 @@ def __init__(self, file_path: FilePath, verbose: bool = True, es_key: str = "Ele """ super().__init__(file_path=file_path, verbose=verbose, es_key=es_key) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() # TODO: improve ProbeInterface integration in our writing procedures diff --git a/src/neuroconv/datainterfaces/ecephys/neuralynx/neuralynxdatainterface.py b/src/neuroconv/datainterfaces/ecephys/neuralynx/neuralynxdatainterface.py index 446c06302..f50f1dcc0 100644 --- a/src/neuroconv/datainterfaces/ecephys/neuralynx/neuralynxdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/neuralynx/neuralynxdatainterface.py @@ -18,14 +18,14 @@ class NeuralynxRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for Neuralynx recording data." @classmethod - def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: + def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: # noqa: D102 from spikeinterface.extractors import NeuralynxRecordingExtractor stream_names, _ = NeuralynxRecordingExtractor.get_streams(folder_path=folder_path) return stream_names @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" @@ -71,7 +71,7 @@ def __init__( if value.dtype == object or value.dtype == np.bool_: self.recording_extractor.set_property(key, np.asarray(value, dtype=str)) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 neo_metadata = extract_neo_header_metadata(self.recording_extractor.neo_reader) # remove filter related entries already covered by `add_recording_extractor_properties` diff --git a/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py b/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py index d68532a94..085b5cb5c 100644 --- a/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/neuroscope/neuroscopedatainterface.py @@ -102,7 +102,7 @@ class NeuroScopeRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for converting NeuroScope recording data." @classmethod - def get_source_schema(self) -> dict: + def get_source_schema(self) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to .dat file." return source_schema @@ -162,7 +162,7 @@ def __init__( recording_extractor=self.recording_extractor, xml_file_path=xml_file_path ) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 session_path = Path(self.source_data["file_path"]).parent session_id = session_path.stem xml_file_path = self.source_data.get("xml_file_path", str(session_path / f"{session_id}.xml")) @@ -173,7 +173,7 @@ def get_metadata(self) -> dict: metadata["NWBFile"]["session_start_time"] = session_start_time return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 # TODO: add generic method for aliasing from NeuroConv signature to SI init new_recording = self.get_extractor()(file_path=self.source_data["file_path"]) if self._number_of_segments == 1: @@ -195,7 +195,7 @@ class NeuroScopeLFPInterface(BaseLFPExtractorInterface): ExtractorName = "NeuroScopeRecordingExtractor" @classmethod - def get_source_schema(self) -> dict: + def get_source_schema(self) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to .lfp or .eeg file." return source_schema @@ -236,7 +236,7 @@ def __init__( recording_extractor=self.recording_extractor, xml_file_path=xml_file_path ) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 session_path = Path(self.source_data["file_path"]).parent session_id = session_path.stem xml_file_path = self.source_data.get("xml_file_path", str(session_path / f"{session_id}.xml")) @@ -253,7 +253,7 @@ class NeuroScopeSortingInterface(BaseSortingExtractorInterface): info = "Interface for converting NeuroScope recording data." @classmethod - def get_source_schema(self) -> dict: + def get_source_schema(self) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"]["description"] = "Path to folder containing .res and .clu files." source_schema["properties"]["keep_mua_units"][ @@ -300,7 +300,7 @@ def __init__( verbose=verbose, ) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() session_path = Path(self.source_data["folder_path"]) session_id = session_path.stem diff --git a/src/neuroconv/datainterfaces/ecephys/openephys/openephysbinarydatainterface.py b/src/neuroconv/datainterfaces/ecephys/openephys/openephysbinarydatainterface.py index 371b96f94..c012a10f4 100644 --- a/src/neuroconv/datainterfaces/ecephys/openephys/openephysbinarydatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/openephys/openephysbinarydatainterface.py @@ -20,7 +20,7 @@ class OpenEphysBinaryRecordingInterface(BaseRecordingExtractorInterface): ExtractorName = "OpenEphysBinaryRecordingExtractor" @classmethod - def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: + def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: # noqa: D102 from spikeinterface.extractors import OpenEphysBinaryRecordingExtractor stream_names, _ = OpenEphysBinaryRecordingExtractor.get_streams(folder_path=folder_path) @@ -86,7 +86,7 @@ def __init__( if stub_test: self.subset_channels = [0, 1] - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 from ._openephys_utils import _get_session_start_time metadata = super().get_metadata() diff --git a/src/neuroconv/datainterfaces/ecephys/openephys/openephysdatainterface.py b/src/neuroconv/datainterfaces/ecephys/openephys/openephysdatainterface.py index 81b84c36c..fe6df75c1 100644 --- a/src/neuroconv/datainterfaces/ecephys/openephys/openephysdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/openephys/openephysdatainterface.py @@ -18,7 +18,7 @@ class OpenEphysRecordingInterface(BaseRecordingExtractorInterface): ExtractorName = "OpenEphysBinaryRecordingExtractor" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" @@ -26,7 +26,7 @@ def get_source_schema(cls) -> dict: return source_schema @classmethod - def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: + def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: # noqa: D102 if any(Path(folder_path).rglob("*.continuous")): return OpenEphysLegacyRecordingInterface.get_stream_names(folder_path=folder_path) elif any(Path(folder_path).rglob("*.dat")): diff --git a/src/neuroconv/datainterfaces/ecephys/openephys/openephyslegacydatainterface.py b/src/neuroconv/datainterfaces/ecephys/openephys/openephyslegacydatainterface.py index b3392d2db..2272ef753 100644 --- a/src/neuroconv/datainterfaces/ecephys/openephys/openephyslegacydatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/openephys/openephyslegacydatainterface.py @@ -19,7 +19,7 @@ class OpenEphysLegacyRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for converting legacy OpenEphys recording data." @classmethod - def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: + def get_stream_names(cls, folder_path: DirectoryPath) -> list[str]: # noqa: D102 from spikeinterface.extractors import OpenEphysLegacyRecordingExtractor stream_names, _ = OpenEphysLegacyRecordingExtractor.get_streams(folder_path=folder_path) @@ -77,7 +77,7 @@ def __init__( folder_path=folder_path, stream_name=stream_name, block_index=block_index, verbose=verbose, es_key=es_key ) - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() neo_reader = self.recording_extractor.neo_reader diff --git a/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py b/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py index 157fef2e5..8ba9dd593 100644 --- a/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/phy/phydatainterface.py @@ -16,7 +16,7 @@ class PhySortingInterface(BaseSortingExtractorInterface): info = "Interface for Phy sorting data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["exclude_cluster_groups"]["items"] = dict(type="string") source_schema["properties"]["folder_path"][ @@ -44,7 +44,7 @@ def __init__( """ super().__init__(folder_path=folder_path, exclude_cluster_groups=exclude_cluster_groups, verbose=verbose) - def get_metadata(self): + def get_metadata(self): # noqa: D102 metadata = super().get_metadata() # See Kilosort save_to_phy() docstring for more info on these fields: https://github.com/MouseLand/Kilosort/blob/main/kilosort/io.py # Or see phy documentation: https://github.com/cortex-lab/phy/blob/master/phy/apps/base.py diff --git a/src/neuroconv/datainterfaces/ecephys/plexon/plexondatainterface.py b/src/neuroconv/datainterfaces/ecephys/plexon/plexondatainterface.py index d1dcc4d45..40a4db489 100644 --- a/src/neuroconv/datainterfaces/ecephys/plexon/plexondatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/plexon/plexondatainterface.py @@ -19,7 +19,7 @@ class PlexonRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for Plexon recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .plx file." return source_schema @@ -52,7 +52,7 @@ def __init__( super().__init__(file_path=file_path, verbose=verbose, es_key=es_key, stream_name=stream_name) - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() neo_reader = self.recording_extractor.neo_reader @@ -77,7 +77,7 @@ class Plexon2RecordingInterface(BaseRecordingExtractorInterface): info = "Interface for Plexon2 recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the .pl2 file." return source_schema @@ -118,7 +118,7 @@ def __init__(self, file_path: FilePath, verbose: bool = True, es_key: str = "Ele es_key=es_key, ) - def get_metadata(self) -> DeepDict: + def get_metadata(self) -> DeepDict: # noqa: D102 metadata = super().get_metadata() neo_reader = self.recording_extractor.neo_reader @@ -142,7 +142,7 @@ class PlexonSortingInterface(BaseSortingExtractorInterface): info = "Interface for Plexon sorting data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to the plexon spiking data (.plx file)." return source_schema @@ -161,7 +161,7 @@ def __init__(self, file_path: FilePath, verbose: bool = True): """ super().__init__(file_path=file_path, verbose=verbose) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() neo_reader = self.sorting_extractor.neo_reader diff --git a/src/neuroconv/datainterfaces/ecephys/spike2/spike2datainterface.py b/src/neuroconv/datainterfaces/ecephys/spike2/spike2datainterface.py index ccd98a369..d1a924306 100644 --- a/src/neuroconv/datainterfaces/ecephys/spike2/spike2datainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spike2/spike2datainterface.py @@ -28,7 +28,7 @@ class Spike2RecordingInterface(BaseRecordingExtractorInterface): ExtractorName = "CedRecordingExtractor" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=["smrx_channel_ids"]) source_schema.update(additionalProperties=True) source_schema["properties"]["file_path"].update(description="Path to .smrx file.") diff --git a/src/neuroconv/datainterfaces/ecephys/spikegadgets/spikegadgetsdatainterface.py b/src/neuroconv/datainterfaces/ecephys/spikegadgets/spikegadgetsdatainterface.py index b8b483dd0..d732accaa 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikegadgets/spikegadgetsdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spikegadgets/spikegadgetsdatainterface.py @@ -17,7 +17,7 @@ class SpikeGadgetsRecordingInterface(BaseRecordingExtractorInterface): info = "Interface for SpikeGadgets recording data." @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = get_json_schema_from_method_signature(cls, exclude=["source_data"]) source_schema["properties"]["file_path"].update(description="Path to SpikeGadgets (.rec) file.") return source_schema diff --git a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxconverter.py b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxconverter.py index 6aeb36cec..2fccdb8ce 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxconverter.py +++ b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxconverter.py @@ -22,13 +22,13 @@ class SpikeGLXConverterPipe(ConverterPipe): info = "Converter for multi-stream SpikeGLX recording data." @classmethod - def get_source_schema(cls): + def get_source_schema(cls): # noqa: D102 source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=["streams"]) source_schema["properties"]["folder_path"]["description"] = "Path to the folder containing SpikeGLX streams." return source_schema @classmethod - def get_streams(cls, folder_path: DirectoryPath) -> list[str]: + def get_streams(cls, folder_path: DirectoryPath) -> list[str]: # noqa: D102 from spikeinterface.extractors import SpikeGLXRecordingExtractor return SpikeGLXRecordingExtractor.get_streams(folder_path=folder_path)[0] @@ -86,7 +86,7 @@ def __init__( super().__init__(data_interfaces=data_interfaces, verbose=verbose) - def get_conversion_options_schema(self) -> dict: + def get_conversion_options_schema(self) -> dict: # noqa: D102 conversion_options_schema = super().get_conversion_options_schema() conversion_options_schema["properties"].update( {name: interface.get_conversion_options_schema() for name, interface in self.data_interface_objects.items()} diff --git a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py index c15516431..add230fa8 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxdatainterface.py @@ -37,7 +37,7 @@ class SpikeGLXRecordingInterface(BaseRecordingExtractorInterface): ExtractorName = "SpikeGLXRecordingExtractor" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = get_json_schema_from_method_signature(method=cls.__init__, exclude=["x_pitch", "y_pitch"]) source_schema["properties"]["file_path"]["description"] = "Path to SpikeGLX ap.bin or lf.bin file." return source_schema @@ -90,7 +90,7 @@ def __init__( # Set electrodes properties add_recording_extractor_properties(self.recording_extractor) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() session_start_time = get_session_start_time(self.meta) if session_start_time: @@ -130,7 +130,7 @@ def get_metadata(self) -> dict: return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 new_recording = self.get_extractor()( folder_path=self.folder_path, stream_id=self.stream_id, diff --git a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxnidqinterface.py b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxnidqinterface.py index fab9e5b5f..45afa73f8 100644 --- a/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxnidqinterface.py +++ b/src/neuroconv/datainterfaces/ecephys/spikeglx/spikeglxnidqinterface.py @@ -21,7 +21,7 @@ class SpikeGLXNIDQInterface(BaseRecordingExtractorInterface): stream_id = "nidq" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=["x_pitch", "y_pitch"]) source_schema["properties"]["file_path"]["description"] = "Path to SpikeGLX .nidq file." return source_schema @@ -74,7 +74,7 @@ def __init__( ) self.meta = self.recording_extractor.neo_reader.signals_info_dict[(0, "nidq")]["meta"] - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() session_start_time = get_session_start_time(self.meta) diff --git a/src/neuroconv/datainterfaces/icephys/abf/abfdatainterface.py b/src/neuroconv/datainterfaces/icephys/abf/abfdatainterface.py index 535381466..4386a310e 100644 --- a/src/neuroconv/datainterfaces/icephys/abf/abfdatainterface.py +++ b/src/neuroconv/datainterfaces/icephys/abf/abfdatainterface.py @@ -35,7 +35,7 @@ class AbfInterface(BaseIcephysInterface): ExtractorName = "AxonIO" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = super().get_source_schema() source_schema["properties"]["file_paths"] = dict( type="array", @@ -76,7 +76,7 @@ def __init__( icephys_metadata_file_path=icephys_metadata_file_path, ) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 from ....tools.neo import get_number_of_electrodes, get_number_of_segments metadata = super().get_metadata() @@ -158,7 +158,7 @@ def get_metadata(self) -> dict: return metadata - def set_aligned_starting_time(self, aligned_starting_time: float): + def set_aligned_starting_time(self, aligned_starting_time: float): # noqa: D102 for reader in self.readers_list: number_of_segments = reader.header["nb_segment"][0] for segment_index in range(number_of_segments): diff --git a/src/neuroconv/datainterfaces/icephys/baseicephysinterface.py b/src/neuroconv/datainterfaces/icephys/baseicephysinterface.py index f8bad53d6..e18c8f8eb 100644 --- a/src/neuroconv/datainterfaces/icephys/baseicephysinterface.py +++ b/src/neuroconv/datainterfaces/icephys/baseicephysinterface.py @@ -21,7 +21,7 @@ class BaseIcephysInterface(BaseExtractorInterface): ExtractorModuleName = "neo" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=[]) return source_schema @@ -54,14 +54,14 @@ def __init__(self, file_paths: list[FilePath]): self._timestamps = None - def get_metadata_schema(self) -> dict: + def get_metadata_schema(self) -> dict: # noqa: D102 metadata_schema = super().get_metadata_schema() if self.DandiIcephysMetadata is not None: metadata_schema["properties"]["ndx-dandi-icephys"] = get_schema_from_hdmf_class(self.DandiIcephysMetadata) metadata_schema["properties"]["Icephys"] = get_metadata_schema_for_icephys() return metadata_schema - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 from ...tools.neo import get_number_of_electrodes metadata = super().get_metadata() @@ -74,19 +74,19 @@ def get_metadata(self) -> dict: ) return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 raise NotImplementedError("Icephys interfaces do not yet support timestamps.") - def get_timestamps(self) -> np.ndarray: + def get_timestamps(self) -> np.ndarray: # noqa: D102 raise NotImplementedError("Icephys interfaces do not yet support timestamps.") - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): # noqa: D102 raise NotImplementedError("Icephys interfaces do not yet support timestamps.") - def set_aligned_starting_time(self, aligned_starting_time: float): + def set_aligned_starting_time(self, aligned_starting_time: float): # noqa: D102 raise NotImplementedError("This icephys interface has not specified the method for aligning starting time.") - def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray): + def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray): # noqa: D102 raise NotImplementedError("Icephys interfaces do not yet support timestamps.") def add_to_nwbfile( diff --git a/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py b/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py index 5125af3cc..b7ae693a7 100644 --- a/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py +++ b/src/neuroconv/datainterfaces/ophys/baseimagingextractorinterface.py @@ -48,6 +48,17 @@ def __init__( def get_metadata_schema( self, photon_series_type: Optional[Literal["OnePhotonSeries", "TwoPhotonSeries"]] = None ) -> dict: + """ + Retrieve the metadata schema for the optical physiology (Ophys) data, with optional handling of photon series type. + + Parameters + ---------- + photon_series_type : {"OnePhotonSeries", "TwoPhotonSeries"}, optional + The type of photon series to include in the schema. If None, the value from the instance is used. + This argument is deprecated and will be removed in a future version. Set `photon_series_type` during + the initialization of the `BaseImagingExtractorInterface` instance. + + """ if photon_series_type is not None: warnings.warn( @@ -102,6 +113,16 @@ def get_metadata_schema( def get_metadata( self, photon_series_type: Optional[Literal["OnePhotonSeries", "TwoPhotonSeries"]] = None ) -> DeepDict: + """ + Retrieve the metadata for the imaging data, with optional handling of photon series type. + + Parameters + ---------- + photon_series_type : {"OnePhotonSeries", "TwoPhotonSeries"}, optional + The type of photon series to include in the metadata. If None, the value from the instance is used. + This argument is deprecated and will be removed in a future version. Instead, set `photon_series_type` + during the initialization of the `BaseImagingExtractorInterface` instance. + """ if photon_series_type is not None: warnings.warn( @@ -127,14 +148,14 @@ def get_metadata( two_photon_series["rate"] = float(two_photon_series["rate"]) return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 reinitialized_extractor = self.get_extractor()(**self.extractor_kwargs) return reinitialized_extractor.frame_to_time(frames=np.arange(stop=reinitialized_extractor.get_num_frames())) - def get_timestamps(self) -> np.ndarray: + def get_timestamps(self) -> np.ndarray: # noqa: D102 return self.imaging_extractor.frame_to_time(frames=np.arange(stop=self.imaging_extractor.get_num_frames())) - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): # noqa: D102 self.imaging_extractor.set_times(times=aligned_timestamps) def add_to_nwbfile( @@ -147,6 +168,28 @@ def add_to_nwbfile( stub_test: bool = False, stub_frames: int = 100, ): + """ + Add imaging data to the NWBFile, including options for photon series and stubbing. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the imaging data will be added. + metadata : dict, optional + Metadata dictionary containing information about the imaging data. If None, default metadata is used. + photon_series_type : {"TwoPhotonSeries", "OnePhotonSeries"}, optional + The type of photon series to be added to the NWBFile. Default is "TwoPhotonSeries". + photon_series_index : int, optional + The index of the photon series in the NWBFile, used to differentiate between multiple series, by default 0. + parent_container : {"acquisition", "processing/ophys"}, optional + The container in the NWBFile where the data will be added, by default "acquisition". + stub_test : bool, optional + If True, only a subset of the imaging data (up to `stub_frames`) will be added for testing purposes, + by default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True, by default 100. + + """ from ...tools.roiextractors import add_imaging_to_nwbfile if stub_test: diff --git a/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py b/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py index 0f2e41bb9..afb59830f 100644 --- a/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py +++ b/src/neuroconv/datainterfaces/ophys/basesegmentationextractorinterface.py @@ -24,6 +24,27 @@ def __init__(self, verbose: bool = False, **source_data): self.segmentation_extractor = self.get_extractor()(**source_data) def get_metadata_schema(self) -> dict: + """ + Generate the metadata schema for Ophys data, updating required fields and properties. + + This method builds upon the base schema and customizes it for Ophys-specific metadata, including required + components such as devices, fluorescence data, imaging planes, and two-photon series. It also applies + temporary schema adjustments to handle certain use cases until a centralized metadata schema definition + is available. + + Returns + ------- + dict + A dictionary representing the updated Ophys metadata schema. + + Notes + ----- + - Ensures that `Device` and `ImageSegmentation` are marked as required. + - Updates various properties, including ensuring arrays for `ImagingPlane` and `TwoPhotonSeries`. + - Adjusts the schema for `Fluorescence`, including required fields and pattern properties. + - Adds schema definitions for `DfOverF`, segmentation images, and summary images. + - Applies temporary fixes, such as setting additional properties for `ImageSegmentation` to True. + """ metadata_schema = super().get_metadata_schema() metadata_schema["required"] = ["Ophys"] metadata_schema["properties"]["Ophys"] = get_base_schema() @@ -89,23 +110,23 @@ def get_metadata_schema(self) -> dict: fill_defaults(metadata_schema, self.get_metadata()) return metadata_schema - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 from ...tools.roiextractors import get_nwb_segmentation_metadata metadata = super().get_metadata() metadata.update(get_nwb_segmentation_metadata(self.segmentation_extractor)) return metadata - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102 reinitialized_extractor = self.get_extractor()(**self.source_data) return reinitialized_extractor.frame_to_time(frames=np.arange(stop=reinitialized_extractor.get_num_frames())) - def get_timestamps(self) -> np.ndarray: + def get_timestamps(self) -> np.ndarray: # noqa: D102 return self.segmentation_extractor.frame_to_time( frames=np.arange(stop=self.segmentation_extractor.get_num_frames()) ) - def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): # noqa: D102 self.segmentation_extractor.set_times(times=aligned_timestamps) def add_to_nwbfile( diff --git a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py index 86e8edc1f..6686c4aaa 100644 --- a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py +++ b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffconverter.py @@ -24,6 +24,7 @@ class BrukerTiffMultiPlaneConverter(NWBConverter): @classmethod def get_source_schema(cls): + """Get the source schema for the Bruker imaging interface.""" source_schema = get_schema_from_method_signature(cls) source_schema["properties"]["folder_path"][ "description" @@ -31,6 +32,7 @@ def get_source_schema(cls): return source_schema def get_conversion_options_schema(self): + """get the conversion options schema.""" interface_name = list(self.data_interface_objects.keys())[0] return self.data_interface_objects[interface_name].get_conversion_options_schema() @@ -91,6 +93,20 @@ def add_to_nwbfile( stub_test: bool = False, stub_frames: int = 100, ): + """ + Add data from multiple data interfaces to the specified NWBFile. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the data will be added. + metadata : dict + Metadata dictionary containing information to describe the data being added to the NWB file. + stub_test : bool, optional + If True, only a subset of the data (up to `stub_frames`) will be added for testing purposes. Default is False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True. Default is 100. + """ for photon_series_index, (interface_name, data_interface) in enumerate(self.data_interface_objects.items()): data_interface.add_to_nwbfile( nwbfile=nwbfile, @@ -109,6 +125,24 @@ def run_conversion( stub_test: bool = False, stub_frames: int = 100, ) -> None: + """ + Run the conversion process for the instantiated data interfaces and add data to the NWB file. + + Parameters + ---------- + nwbfile_path : FilePath, optional + Path where the NWB file will be written. If None, the file will be handled in-memory. + nwbfile : NWBFile, optional + An in-memory NWBFile object. If None, a new NWBFile object will be created. + metadata : dict, optional + Metadata dictionary for describing the NWB file. If None, it will be auto-generated using the `get_metadata()` method. + overwrite : bool, optional + If True, overwrites the existing NWB file at `nwbfile_path`. If False, appends to the file (default is False). + stub_test : bool, optional + If True, only a subset of the data (up to `stub_frames`) will be added for testing purposes, by default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True, by default 100. + """ if metadata is None: metadata = self.get_metadata() @@ -138,9 +172,11 @@ class BrukerTiffSinglePlaneConverter(NWBConverter): @classmethod def get_source_schema(cls): + """Get the source schema for the Bruker imaging interface.""" return get_schema_from_method_signature(cls) def get_conversion_options_schema(self): + """Get the conversion options schema.""" interface_name = list(self.data_interface_objects.keys())[0] return self.data_interface_objects[interface_name].get_conversion_options_schema() @@ -187,6 +223,21 @@ def add_to_nwbfile( stub_test: bool = False, stub_frames: int = 100, ): + """ + Add data from all instantiated data interfaces to the provided NWBFile. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the data will be added. + metadata : dict + Metadata dictionary containing information about the data to be added. + stub_test : bool, optional + If True, only a subset of the data (defined by `stub_frames`) will be added for testing purposes, + by default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True, by default 100. + """ for photon_series_index, (interface_name, data_interface) in enumerate(self.data_interface_objects.items()): data_interface.add_to_nwbfile( nwbfile=nwbfile, @@ -205,6 +256,24 @@ def run_conversion( stub_test: bool = False, stub_frames: int = 100, ) -> None: + """ + Run the NWB conversion process for all instantiated data interfaces. + + Parameters + ---------- + nwbfile_path : FilePath, optional + The file path where the NWB file will be written. If None, the file is handled in-memory. + nwbfile : NWBFile, optional + An existing in-memory NWBFile object. If None, a new NWBFile object will be created. + metadata : dict, optional + Metadata dictionary used to create or validate the NWBFile. If None, metadata is automatically generated. + overwrite : bool, optional + If True, the NWBFile at `nwbfile_path` is overwritten if it exists. If False (default), data is appended. + stub_test : bool, optional + If True, only a subset of the data (up to `stub_frames`) is used for testing purposes. By default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True. By default 100. + """ if metadata is None: metadata = self.get_metadata() diff --git a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py index 9742711e1..f7e7bee1b 100644 --- a/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/brukertiff/brukertiffdatainterface.py @@ -16,6 +16,7 @@ class BrukerTiffMultiPlaneImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """Get the source schema for the Bruker TIFF imaging data.""" source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" @@ -28,6 +29,23 @@ def get_streams( folder_path: DirectoryPath, plane_separation_type: Literal["contiguous", "disjoint"] = None, ) -> dict: + """ + Get streams for the Bruker TIFF imaging data. + + Parameters + ---------- + folder_path : DirectoryPath + Path to the folder containing the Bruker TIFF files. + plane_separation_type : Literal["contiguous", "disjoint"], optional + Type of plane separation to apply. If "contiguous", only the first plane stream for each channel is retained. + + Returns + ------- + dict + A dictionary containing the streams for the Bruker TIFF imaging data. The dictionary has the following keys: + - "channel_streams": List of channel stream names. + - "plane_streams": Dictionary where keys are channel stream names and values are lists of plane streams. + """ from roiextractors import BrukerTiffMultiPlaneImagingExtractor streams = BrukerTiffMultiPlaneImagingExtractor.get_streams(folder_path=folder_path) @@ -117,6 +135,7 @@ def _determine_position_current(self) -> list[float]: return position_values def get_metadata(self) -> DeepDict: + """get metadata for the Bruker TIFF imaging data.""" metadata = super().get_metadata() xml_metadata = self.imaging_extractor.xml_metadata @@ -183,6 +202,7 @@ class BrukerTiffSinglePlaneImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """Get the source schema for the Bruker TIFF imaging data.""" source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" @@ -191,6 +211,19 @@ def get_source_schema(cls) -> dict: @classmethod def get_streams(cls, folder_path: DirectoryPath) -> dict: + """ + Get streams for the Bruker TIFF imaging data. + + Parameters + ---------- + folder_path : DirectoryPath + Path to the folder containing the Bruker TIFF files. + + Returns + ------- + dict + A dictionary containing the streams extracted from the Bruker TIFF files. + """ from roiextractors import BrukerTiffMultiPlaneImagingExtractor streams = BrukerTiffMultiPlaneImagingExtractor.get_streams(folder_path=folder_path) @@ -263,6 +296,7 @@ def _determine_position_current(self) -> list[float]: return position_values def get_metadata(self) -> DeepDict: + """get metadata for the Bruker TIFF imaging data.""" metadata = super().get_metadata() xml_metadata = self.imaging_extractor.xml_metadata diff --git a/src/neuroconv/datainterfaces/ophys/caiman/caimandatainterface.py b/src/neuroconv/datainterfaces/ophys/caiman/caimandatainterface.py index 386c03d3c..802645139 100644 --- a/src/neuroconv/datainterfaces/ophys/caiman/caimandatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/caiman/caimandatainterface.py @@ -12,6 +12,7 @@ class CaimanSegmentationInterface(BaseSegmentationExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """Get the source schema for the Caiman segmentation interface.""" source_metadata = super().get_source_schema() source_metadata["properties"]["file_path"]["description"] = "Path to .hdf5 file." return source_metadata diff --git a/src/neuroconv/datainterfaces/ophys/micromanagertiff/micromanagertiffdatainterface.py b/src/neuroconv/datainterfaces/ophys/micromanagertiff/micromanagertiffdatainterface.py index 17cbc95ed..5373b7004 100644 --- a/src/neuroconv/datainterfaces/ophys/micromanagertiff/micromanagertiffdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/micromanagertiff/micromanagertiffdatainterface.py @@ -13,6 +13,7 @@ class MicroManagerTiffImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """get the source schema for the Micro-Manager TIFF imaging interface.""" source_schema = super().get_source_schema() source_schema["properties"]["folder_path"]["description"] = "The folder containing the OME-TIF image files." @@ -37,6 +38,7 @@ def __init__(self, folder_path: DirectoryPath, verbose: bool = True): self.imaging_extractor._channel_names = [f"OpticalChannel{channel_name}"] def get_metadata(self) -> dict: + """Get metadata for the Micro-Manager TIFF imaging data.""" metadata = super().get_metadata() micromanager_metadata = self.imaging_extractor.micromanager_metadata diff --git a/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeconverter.py b/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeconverter.py index cfee8f027..38b620738 100644 --- a/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeconverter.py +++ b/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeconverter.py @@ -19,6 +19,7 @@ class MiniscopeConverter(NWBConverter): @classmethod def get_source_schema(cls): + """Get the source schema for the Miniscope converter.""" source_schema = get_schema_from_method_signature(cls) source_schema["properties"]["folder_path"]["description"] = "The path to the main Miniscope folder." return source_schema @@ -61,6 +62,7 @@ def __init__(self, folder_path: DirectoryPath, verbose: bool = True): ) def get_conversion_options_schema(self) -> dict: + """get the conversion options schema.""" return self.data_interface_objects["MiniscopeImaging"].get_conversion_options_schema() def add_to_nwbfile( @@ -70,6 +72,21 @@ def add_to_nwbfile( stub_test: bool = False, stub_frames: int = 100, ): + """ + Add Miniscope imaging and behavioral camera data to the specified NWBFile. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the imaging and behavioral data will be added. + metadata : dict + Metadata dictionary containing information about the imaging and behavioral recordings. + stub_test : bool, optional + If True, only a subset of the data (defined by `stub_frames`) will be added for testing purposes, + by default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True, by default 100. + """ self.data_interface_objects["MiniscopeImaging"].add_to_nwbfile( nwbfile=nwbfile, metadata=metadata, @@ -90,6 +107,25 @@ def run_conversion( stub_test: bool = False, stub_frames: int = 100, ) -> None: + """ + Run the NWB conversion process for the instantiated data interfaces. + + Parameters + ---------- + nwbfile_path : str, optional + Path where the NWBFile will be written. If None, the file is handled in-memory. + nwbfile : NWBFile, optional + An in-memory NWBFile object to be written to the file. If None, a new NWBFile is created. + metadata : dict, optional + Metadata dictionary with information to create the NWBFile. If None, metadata is auto-generated. + overwrite : bool, optional + If True, overwrites the existing NWBFile at `nwbfile_path`. If False (default), data is appended. + stub_test : bool, optional + If True, only a subset of the data (up to `stub_frames`) is written for testing purposes, + by default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True, by default 100. + """ if metadata is None: metadata = self.get_metadata() diff --git a/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeimagingdatainterface.py b/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeimagingdatainterface.py index 64a180c46..659cd8d63 100644 --- a/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeimagingdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/miniscope/miniscopeimagingdatainterface.py @@ -19,6 +19,7 @@ class MiniscopeImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """Get the source schema for the Miniscope imaging interface.""" source_schema = super().get_source_schema() source_schema["properties"]["folder_path"][ "description" @@ -49,6 +50,7 @@ def __init__(self, folder_path: DirectoryPath): self.photon_series_type = "OnePhotonSeries" def get_metadata(self) -> DeepDict: + """Get metadata for the Miniscope imaging data.""" from ....tools.roiextractors import get_nwb_imaging_metadata metadata = super().get_metadata() @@ -74,11 +76,12 @@ def get_metadata(self) -> DeepDict: return metadata def get_metadata_schema(self) -> dict: + """Get the metadata schema for the Miniscope imaging data.""" metadata_schema = super().get_metadata_schema() metadata_schema["properties"]["Ophys"]["definitions"]["Device"]["additionalProperties"] = True return metadata_schema - def get_original_timestamps(self) -> np.ndarray: + def get_original_timestamps(self) -> np.ndarray: # noqa: D102, should inherit docstring from base class from ndx_miniscope.utils import get_timestamps timestamps = get_timestamps(folder_path=self.source_data["folder_path"]) @@ -92,6 +95,23 @@ def add_to_nwbfile( stub_test: bool = False, stub_frames: int = 100, ): + """ + Add imaging data to the specified NWBFile, including device and photon series information. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the imaging data will be added. + metadata : dict, optional + Metadata containing information about the imaging device and photon series. If None, default metadata is used. + photon_series_type : {"TwoPhotonSeries", "OnePhotonSeries"}, optional + The type of photon series to be added, either "TwoPhotonSeries" or "OnePhotonSeries", by default "OnePhotonSeries". + stub_test : bool, optional + If True, only a subset of the data (defined by `stub_frames`) will be added for testing purposes, + by default False. + stub_frames : int, optional + The number of frames to include if `stub_test` is True, by default 100. + """ from ndx_miniscope.utils import add_miniscope_device from ....tools.roiextractors import add_photon_series_to_nwbfile diff --git a/src/neuroconv/datainterfaces/ophys/sbx/sbxdatainterface.py b/src/neuroconv/datainterfaces/ophys/sbx/sbxdatainterface.py index 554cc5aba..49e556d06 100644 --- a/src/neuroconv/datainterfaces/ophys/sbx/sbxdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/sbx/sbxdatainterface.py @@ -37,6 +37,7 @@ def __init__( ) def get_metadata(self) -> dict: + """Get metadata for the Scanbox imaging data.""" metadata = super().get_metadata() metadata["Ophys"]["Device"][0]["description"] = "Scanbox imaging" return metadata diff --git a/src/neuroconv/datainterfaces/ophys/scanimage/scanimageimaginginterfaces.py b/src/neuroconv/datainterfaces/ophys/scanimage/scanimageimaginginterfaces.py index c74161e55..1afcb5ec5 100644 --- a/src/neuroconv/datainterfaces/ophys/scanimage/scanimageimaginginterfaces.py +++ b/src/neuroconv/datainterfaces/ophys/scanimage/scanimageimaginginterfaces.py @@ -28,12 +28,13 @@ class ScanImageImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """Get the source schema for the ScanImage imaging interface.""" source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to Tiff file." return source_schema @validate_call - def __new__( + def __new__( # noqa: D102 cls, file_path: FilePath, channel_name: Optional[str] = None, @@ -86,7 +87,7 @@ class ScanImageLegacyImagingInterface(BaseImagingExtractorInterface): ExtractorName = "ScanImageTiffImagingExtractor" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa: D102 , should inherit docstrnig from the base class source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to Tiff file." return source_schema @@ -139,6 +140,7 @@ def __init__( super().__init__(file_path=file_path, fallback_sampling_frequency=fallback_sampling_frequency, verbose=verbose) def get_metadata(self) -> dict: + """get metadata for the ScanImage imaging data""" device_number = 0 # Imaging plane metadata is a list with metadata for each plane metadata = super().get_metadata() @@ -174,12 +176,13 @@ class ScanImageMultiFileImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """get the source schema for the ScanImage multi-file imaging interface.""" source_schema = super().get_source_schema() source_schema["properties"]["folder_path"]["description"] = "Path to the folder containing the TIFF files." return source_schema @validate_call - def __new__( + def __new__( # noqa: D102 cls, folder_path: DirectoryPath, file_pattern: str, @@ -304,6 +307,7 @@ def __init__( ) def get_metadata(self) -> dict: + """get metadata for the ScanImage imaging data""" metadata = super().get_metadata() extracted_session_start_time = datetime.datetime.strptime( @@ -421,6 +425,7 @@ def __init__( ) def get_metadata(self) -> dict: + """get metadata for the ScanImage imaging data""" metadata = super().get_metadata() extracted_session_start_time = datetime.datetime.strptime( @@ -548,6 +553,7 @@ def __init__( ) def get_metadata(self) -> dict: + """get metadata for the ScanImage imaging data""" metadata = super().get_metadata() extracted_session_start_time = datetime.datetime.strptime( @@ -677,6 +683,7 @@ def __init__( ) def get_metadata(self) -> dict: + """get metadata for the ScanImage imaging data""" metadata = super().get_metadata() extracted_session_start_time = datetime.datetime.strptime( diff --git a/src/neuroconv/datainterfaces/ophys/suite2p/suite2pdatainterface.py b/src/neuroconv/datainterfaces/ophys/suite2p/suite2pdatainterface.py index 056616ce5..fb0ade21b 100644 --- a/src/neuroconv/datainterfaces/ophys/suite2p/suite2pdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/suite2p/suite2pdatainterface.py @@ -50,6 +50,7 @@ class Suite2pSegmentationInterface(BaseSegmentationExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """Get the source schema for the Suite2p segmentation interface.""" schema = super().get_source_schema() schema["properties"]["folder_path"][ "description" @@ -61,13 +62,13 @@ def get_source_schema(cls) -> dict: return schema @classmethod - def get_available_planes(cls, folder_path: DirectoryPath) -> dict: + def get_available_planes(cls, folder_path: DirectoryPath) -> dict: # noqa: D102 from roiextractors import Suite2pSegmentationExtractor return Suite2pSegmentationExtractor.get_available_planes(folder_path=folder_path) @classmethod - def get_available_channels(cls, folder_path: DirectoryPath) -> dict: + def get_available_channels(cls, folder_path: DirectoryPath) -> dict: # noqa: D102 from roiextractors import Suite2pSegmentationExtractor return Suite2pSegmentationExtractor.get_available_channels(folder_path=folder_path) @@ -113,6 +114,7 @@ def __init__( self.verbose = verbose def get_metadata(self) -> DeepDict: + """get metadata for the Suite2p segmentation data""" metadata = super().get_metadata() # No need to update the metadata links for the default plane segmentation name @@ -140,6 +142,40 @@ def add_to_nwbfile( iterator_options: Optional[dict] = None, compression_options: Optional[dict] = None, ): + """ + Add segmentation data to the specified NWBFile. + + Parameters + ---------- + nwbfile : NWBFile + The NWBFile object to which the segmentation data will be added. + metadata : dict, optional + Metadata containing information about the segmentation. If None, default metadata is used. + stub_test : bool, optional + If True, only a subset of the data (defined by `stub_frames`) will be added for testing purposes, + by default False. + stub_frames : int, optional + The number of frames to include in the subset if `stub_test` is True, by default 100. + include_roi_centroids : bool, optional + Whether to include the centroids of regions of interest (ROIs) in the data, by default True. + include_roi_acceptance : bool, optional + Whether to include acceptance status of ROIs, by default True. + mask_type : str, default: 'image' + There are three types of ROI masks in NWB, 'image', 'pixel', and 'voxel'. + + * 'image' masks have the same shape as the reference images the segmentation was applied to, and weight each pixel + by its contribution to the ROI (typically boolean, with 0 meaning 'not in the ROI'). + * 'pixel' masks are instead indexed by ROI, with the data at each index being the shape of the image by the number + of pixels in each ROI. + * 'voxel' masks are instead indexed by ROI, with the data at each index being the shape of the volume by the number + of voxels in each ROI. + + Specify your choice between these two as mask_type='image', 'pixel', 'voxel', or None. + plane_segmentation_name : str, optional + The name of the plane segmentation object, by default None. + iterator_options : dict, optional + Additional options for iterating over the data, by default None. + """ super().add_to_nwbfile( nwbfile=nwbfile, metadata=metadata, diff --git a/src/neuroconv/datainterfaces/ophys/tdt_fp/tdtfiberphotometrydatainterface.py b/src/neuroconv/datainterfaces/ophys/tdt_fp/tdtfiberphotometrydatainterface.py index aa58f6ae4..f9198b105 100644 --- a/src/neuroconv/datainterfaces/ophys/tdt_fp/tdtfiberphotometrydatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/tdt_fp/tdtfiberphotometrydatainterface.py @@ -46,6 +46,7 @@ def __init__(self, folder_path: DirectoryPath, verbose: bool = True): import ndx_fiber_photometry # noqa: F401 def get_metadata(self) -> DeepDict: + """Get metadata for the TDTFiberPhotometryInterface.""" metadata = super().get_metadata() tdt_photometry = self.load(evtype=["scalars"]) # This evtype quickly loads info without loading all the data. start_timestamp = tdt_photometry.info.start_date.timestamp() @@ -54,6 +55,7 @@ def get_metadata(self) -> DeepDict: return metadata def get_metadata_schema(self) -> dict: + """Get the metadata schema for the TDTFiberPhotometryInterface.""" metadata_schema = super().get_metadata_schema() return metadata_schema diff --git a/src/neuroconv/datainterfaces/ophys/tiff/tiffdatainterface.py b/src/neuroconv/datainterfaces/ophys/tiff/tiffdatainterface.py index 1eaa3b55e..ce98561de 100644 --- a/src/neuroconv/datainterfaces/ophys/tiff/tiffdatainterface.py +++ b/src/neuroconv/datainterfaces/ophys/tiff/tiffdatainterface.py @@ -14,6 +14,7 @@ class TiffImagingInterface(BaseImagingExtractorInterface): @classmethod def get_source_schema(cls) -> dict: + """ "Get the source schema for the TIFF imaging interface.""" source_schema = super().get_source_schema() source_schema["properties"]["file_path"]["description"] = "Path to Tiff file." return source_schema diff --git a/src/neuroconv/datainterfaces/text/timeintervalsinterface.py b/src/neuroconv/datainterfaces/text/timeintervalsinterface.py index 5f5b1107d..4d3ac827a 100644 --- a/src/neuroconv/datainterfaces/text/timeintervalsinterface.py +++ b/src/neuroconv/datainterfaces/text/timeintervalsinterface.py @@ -24,21 +24,25 @@ def __init__( verbose: bool = True, ): """ + Initialize the TimeIntervalsInterface. + Parameters ---------- file_path : FilePath + The path to the file containing time intervals data. read_kwargs : dict, optional - verbose : bool, default: True + Additional arguments for reading the file, by default None. + verbose : bool, optional + If True, provides verbose output, by default True. """ read_kwargs = read_kwargs or dict() super().__init__(file_path=file_path) self.verbose = verbose - self._read_kwargs = read_kwargs self.dataframe = self._read_file(file_path, **read_kwargs) self.time_intervals = None - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa: D102 metadata = super().get_metadata() metadata["TimeIntervals"] = dict( trials=dict( @@ -50,22 +54,74 @@ def get_metadata(self) -> dict: return metadata def get_metadata_schema(self) -> dict: + """ + Get the metadata schema for the time intervals. + + Returns + ------- + dict + The schema dictionary for time intervals metadata. + """ fpath = Path(__file__).parent.parent.parent / "schemas" / "timeintervals_schema.json" return load_dict_from_file(fpath) def get_original_timestamps(self, column: str) -> np.ndarray: + """ + Get the original timestamps for a given column. + + Parameters + ---------- + column : str + The name of the column containing timestamps. + + Returns + ------- + np.ndarray + The original timestamps from the specified column. + + Raises + ------ + ValueError + If the column name does not end with '_time'. + """ if not column.endswith("_time"): raise ValueError("Timing columns on a TimeIntervals table need to end with '_time'!") return self._read_file(**self.source_data, **self._read_kwargs)[column].values def get_timestamps(self, column: str) -> np.ndarray: + """ + Get the current timestamps for a given column. + + Parameters + ---------- + column : str + The name of the column containing timestamps. + + Returns + ------- + np.ndarray + The current timestamps from the specified column. + + Raises + ------ + ValueError + If the column name does not end with '_time'. + """ if not column.endswith("_time"): raise ValueError("Timing columns on a TimeIntervals table need to end with '_time'!") return self.dataframe[column].values def set_aligned_starting_time(self, aligned_starting_time: float): + """ + Align the starting time by shifting all timestamps by the given value. + + Parameters + ---------- + aligned_starting_time : float + The aligned starting time to shift all timestamps by. + """ timing_columns = [column for column in self.dataframe.columns if column.endswith("_time")] for column in timing_columns: @@ -74,6 +130,23 @@ def set_aligned_starting_time(self, aligned_starting_time: float): def set_aligned_timestamps( self, aligned_timestamps: np.ndarray, column: str, interpolate_other_columns: bool = False ): + """ + Set aligned timestamps for the given column and optionally interpolate other columns. + + Parameters + ---------- + aligned_timestamps : np.ndarray + The aligned timestamps to set for the given column. + column : str + The name of the column to update with the aligned timestamps. + interpolate_other_columns : bool, optional + If True, interpolate the timestamps in other columns, by default False. + + Raises + ------ + ValueError + If the column name does not end with '_time'. + """ if not column.endswith("_time"): raise ValueError("Timing columns on a TimeIntervals table need to end with '_time'!") @@ -96,6 +169,18 @@ def set_aligned_timestamps( ) def align_by_interpolation(self, unaligned_timestamps: np.ndarray, aligned_timestamps: np.ndarray, column: str): + """ + Align timestamps using linear interpolation. + + Parameters + ---------- + unaligned_timestamps : np.ndarray + The original unaligned timestamps that map to the aligned timestamps. + aligned_timestamps : np.ndarray + The target aligned timestamps corresponding to the unaligned timestamps. + column : str + The name of the column containing the timestamps to be aligned. + """ current_timestamps = self.get_timestamps(column=column) assert ( current_timestamps[1] >= unaligned_timestamps[0] diff --git a/src/neuroconv/nwbconverter.py b/src/neuroconv/nwbconverter.py index 1f3e7c9f8..9da798dd0 100644 --- a/src/neuroconv/nwbconverter.py +++ b/src/neuroconv/nwbconverter.py @@ -166,7 +166,21 @@ def create_nwbfile(self, metadata: Optional[dict] = None, conversion_options: Op self.add_to_nwbfile(nwbfile=nwbfile, metadata=metadata, conversion_options=conversion_options) return nwbfile - def add_to_nwbfile(self, nwbfile: NWBFile, metadata, conversion_options: Optional[dict] = None) -> None: + def add_to_nwbfile(self, nwbfile: NWBFile, metadata, conversion_options: Optional[dict] = None): + """ + Add data from the instantiated data interfaces to the given NWBFile. + + Parameters + ---------- + nwbfile : NWBFile + The NWB file object to which the data from the data interfaces will be added. + metadata : dict + The metadata dictionary that contains information used to describe the data. + conversion_options : dict, optional + A dictionary containing conversion options for each interface, where non-default behavior is requested. + Each key corresponds to a data interface name, and the values are dictionaries with options for that interface. + By default, None. + """ conversion_options = conversion_options or dict() for interface_name, data_interface in self.data_interface_objects.items(): data_interface.add_to_nwbfile( @@ -283,11 +297,11 @@ class ConverterPipe(NWBConverter): """Takes a list or dict of pre-initialized interfaces as arguments to build an NWBConverter class.""" @classmethod - def get_source_schema(cls) -> dict: + def get_source_schema(cls) -> dict: # noqa D102 raise NotImplementedError("Source data not available with previously initialized classes.") @classmethod - def validate_source(cls): + def validate_source(cls): # noqa D102 raise NotImplementedError("Source data not available with previously initialized classes.") def __init__(self, data_interfaces: Union[list[BaseDataInterface], dict[str, BaseDataInterface]], verbose=True): diff --git a/src/neuroconv/tools/hdmf.py b/src/neuroconv/tools/hdmf.py index 660971df5..d717e9023 100644 --- a/src/neuroconv/tools/hdmf.py +++ b/src/neuroconv/tools/hdmf.py @@ -47,9 +47,10 @@ def estimate_default_chunk_shape(chunk_mb: float, maxshape: tuple[int, ...], dty # TODO: move this to the core iterator in HDMF so it can be easily swapped out as well as run on its own @staticmethod - def estimate_default_buffer_shape( + def estimate_default_buffer_shape( # noqa: D102 buffer_gb: float, chunk_shape: tuple[int, ...], maxshape: tuple[int, ...], dtype: np.dtype ) -> tuple[int, ...]: + # TODO: Ad ddocstring to this once someone understands it better # Elevate any overflow warnings to trigger error. # This is usually an indicator of something going terribly wrong with the estimation calculations and should be # avoided at all costs. diff --git a/src/neuroconv/tools/path_expansion.py b/src/neuroconv/tools/path_expansion.py index 4ab839c0f..9a085c7ba 100644 --- a/src/neuroconv/tools/path_expansion.py +++ b/src/neuroconv/tools/path_expansion.py @@ -143,7 +143,7 @@ class LocalPathExpander(AbstractPathExpander): See https://neuroconv.readthedocs.io/en/main/user_guide/expand_path.html for more information. """ - def list_directory(self, base_directory: DirectoryPath) -> Iterable[FilePath]: # noqa: D101 + def list_directory(self, base_directory: DirectoryPath) -> Iterable[FilePath]: # noqa: D101,D102 base_directory = Path(base_directory) assert base_directory.is_dir(), f"The specified 'base_directory' ({base_directory}) is not a directory!" return (str(path.relative_to(base_directory)) for path in base_directory.rglob("*")) diff --git a/src/neuroconv/tools/testing/mock_interfaces.py b/src/neuroconv/tools/testing/mock_interfaces.py index 04dc57250..d061855f1 100644 --- a/src/neuroconv/tools/testing/mock_interfaces.py +++ b/src/neuroconv/tools/testing/mock_interfaces.py @@ -30,37 +30,84 @@ class MockBehaviorEventInterface(BaseTemporalAlignmentInterface): @classmethod def get_source_schema(cls) -> dict: + """ + Get the schema for the data source, excluding the 'event_times' parameter. + + Returns + ------- + dict + The schema dictionary for the data source, including additional properties for flexibility. + """ source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=["event_times"]) source_schema["additionalProperties"] = True return source_schema def __init__(self, event_times: Optional[ArrayType] = None): """ - Define event times for some behavior. + Initialize the interface with event times for behavior. Parameters ---------- event_times : list of floats, optional The event times to set as timestamps for this interface. - The default is the array [1.2, 2.3, 3.4] for similarity to the timescale of the MockSpikeGLXNIDQInterface. + The default is the array [1.2, 2.3, 3.4] to simulate a time series similar to the + MockSpikeGLXNIDQInterface. """ event_times = event_times or [1.2, 2.3, 3.4] self.event_times = np.array(event_times) self.original_event_times = np.array(event_times) # Make a copy of the initial loaded timestamps def get_original_timestamps(self) -> np.ndarray: + """ + Get the original event times before any alignment or transformation. + + Returns + ------- + np.ndarray + The original event times as a NumPy array. + """ return self.original_event_times def get_timestamps(self) -> np.ndarray: + """ + Get the current (possibly aligned) event times. + + Returns + ------- + np.ndarray + The current event times as a NumPy array, possibly modified after alignment. + """ return self.event_times def set_aligned_timestamps(self, aligned_timestamps: np.ndarray): + """ + Set the event times after alignment. + + Parameters + ---------- + aligned_timestamps : np.ndarray + The aligned event timestamps to update the internal event times. + """ self.event_times = aligned_timestamps def add_to_nwbfile(self, nwbfile: NWBFile, metadata: dict): + """ + Add the event times to an NWBFile as a DynamicTable. + + Parameters + ---------- + nwbfile : NWBFile + The NWB file to which the event times will be added. + metadata : dict + Metadata to describe the event times in the NWB file. + + Notes + ----- + This method creates a DynamicTable to store event times and adds it to the NWBFile's acquisition. + """ table = DynamicTable(name="BehaviorEvents", description="Times of various classified behaviors.") table.add_column(name="event_time", description="Time of each event.") - for timestamp in self.get_timestamps(): # adding data by column gives error + for timestamp in self.get_timestamps(): table.add_row(event_time=timestamp) nwbfile.add_acquisition(table) @@ -74,6 +121,9 @@ class MockSpikeGLXNIDQInterface(SpikeGLXNIDQInterface): @classmethod def get_source_schema(cls) -> dict: + """ + Get the source schema for the mock SpikeGLX interface. + """ source_schema = get_schema_from_method_signature(method=cls.__init__, exclude=["ttl_times"]) source_schema["additionalProperties"] = True return source_schema @@ -157,6 +207,9 @@ def __init__( ) def get_metadata(self) -> dict: + """ + Returns the metadata dictionary for the current object. + """ metadata = super().get_metadata() session_start_time = datetime.now().astimezone() metadata["NWBFile"]["session_start_time"] = session_start_time @@ -266,6 +319,20 @@ def __init__( self.photon_series_type = photon_series_type def get_metadata(self, photon_series_type: Optional[Literal["OnePhotonSeries", "TwoPhotonSeries"]] = None) -> dict: + """ + Get the metadata for the imaging interface. + + Parameters + ---------- + photon_series_type : Literal["OnePhotonSeries", "TwoPhotonSeries"], optional + The type of photon series to include in the metadata. + If not specified, all photon series will be included. + + Returns + ------- + dict + The metadata for the imaging interface. + """ session_start_time = datetime.now().astimezone() metadata = super().get_metadata(photon_series_type=photon_series_type) metadata["NWBFile"]["session_start_time"] = session_start_time @@ -336,7 +403,7 @@ def __init__( verbose=verbose, ) - def get_metadata(self) -> dict: + def get_metadata(self) -> dict: # noqa D102 session_start_time = datetime.now().astimezone() metadata = super().get_metadata() metadata["NWBFile"]["session_start_time"] = session_start_time diff --git a/src/neuroconv/utils/dict.py b/src/neuroconv/utils/dict.py index f0507b653..a6cef630a 100644 --- a/src/neuroconv/utils/dict.py +++ b/src/neuroconv/utils/dict.py @@ -209,12 +209,29 @@ class DeepDict(defaultdict): """A defaultdict of defaultdicts""" def __init__(self, *args: Any, **kwargs: Any) -> None: + """A defaultdict of defaultdicts""" super().__init__(lambda: DeepDict(), *args, **kwargs) for key, value in self.items(): if isinstance(value, dict): self[key] = DeepDict(value) def deep_update(self, other: Optional[Union[dict, "DeepDict"]] = None, **kwargs) -> None: + """ + Recursively update the DeepDict with another dictionary or DeepDict. + + Parameters + ---------- + other : dict or DeepDict, optional + The dictionary or DeepDict to update the current instance with. + **kwargs : Any + Additional keyword arguments representing key-value pairs to update the DeepDict. + + Notes + ----- + For any keys that exist in both the current instance and the provided dictionary, the values are merged + recursively if both are dictionaries. Otherwise, the value from `other` or `kwargs` will overwrite the + existing value. + """ for key, value in (other or kwargs).items(): if key in self and isinstance(self[key], dict) and isinstance(value, dict): self[key].deep_update(value)