diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a7f728c07..29f0a6388 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -415,7 +415,7 @@ To add a new file, you will need to: ```yaml "SLEAP_three-mice_Aeon_proofread.analysis.h5": sha256sum: "82ebd281c406a61536092863bc51d1a5c7c10316275119f7daf01c1ff33eac2a" - source_software: "SLEAP" + source_format: "SLEAP" type: "poses" # "poses" or "bboxes" depending on the type of tracked data fps: 50 species: "mouse" diff --git a/docs/source/user_guide/input_output.md b/docs/source/user_guide/input_output.md index cd9284e2f..9060ffa91 100644 --- a/docs/source/user_guide/input_output.md +++ b/docs/source/user_guide/input_output.md @@ -44,7 +44,7 @@ ds = load_poses.from_sleap_file("/path/to/file.analysis.h5", fps=30) # or equivalently ds = load_poses.from_file( - "/path/to/file.analysis.h5", source_software="SLEAP", fps=30 + "/path/to/file.analysis.h5", source_format="SLEAP", fps=30 ) ``` To load [SLEAP analysis files](sleap:tutorials/analysis) in .slp format (experimental, see notes in {func}`movement.io.load_poses.from_sleap_file`): @@ -62,7 +62,7 @@ ds = load_poses.from_dlc_file("/path/to/file.h5", fps=30) # or equivalently ds = load_poses.from_file( - "/path/to/file.h5", source_software="DeepLabCut", fps=30 + "/path/to/file.h5", source_format="DeepLabCut", fps=30 ) ``` @@ -80,7 +80,7 @@ ds = load_poses.from_lp_file("/path/to/file.analysis.csv", fps=30) # or equivalently ds = load_poses.from_file( - "/path/to/file.analysis.csv", source_software="LightningPose", fps=30 + "/path/to/file.analysis.csv", source_format="LightningPose", fps=30 ) ``` ::: @@ -95,7 +95,7 @@ ds = load_poses.from_anipose_file( # or equivalently ds = load_poses.from_file( - "/path/to/file.analysis.csv", source_software="Anipose", fps=30, individual_name="individual_0" + "/path/to/file.analysis.csv", source_format="Anipose", fps=30, individual_name="individual_0" ) ``` @@ -149,7 +149,7 @@ ds = load_bboxes.from_via_tracks_file("path/to/file.csv", fps=30) # or equivalently ds = load_bboxes.from_file( "path/to/file.csv", - source_software="VIA-tracks", + source_format="VIA-tracks", fps=30, ) ``` diff --git a/docs/source/user_guide/movement_dataset.md b/docs/source/user_guide/movement_dataset.md index 1b068fca6..0bd0d1292 100644 --- a/docs/source/user_guide/movement_dataset.md +++ b/docs/source/user_guide/movement_dataset.md @@ -55,7 +55,7 @@ Data variables: Attributes: fps: 50.0 time_unit: seconds - source_software: SLEAP + source_format: SLEAP source_file: /home/user/.movement/data/poses/SLEAP_three-mice_Aeon... ds_type: poses frame_path: /home/user/.movement/data/frames/three-mice_Aeon_fram... @@ -88,7 +88,7 @@ Data variables: confidence (time, individuals) float64 3kB nan nan nan nan ... nan nan nan Attributes: time_unit: frames - source_software: VIA-tracks + source_format: VIA-tracks source_file: /home/user/.movement/data/bboxes/VIA_multiple-crabs_5... ds_type: bboxes ``` @@ -175,7 +175,7 @@ Both poses and bounding boxes datasets in `movement` have associated metadata. T Right after loading a `movement` dataset, the following **attributes** are created: - `fps`: the number of frames per second in the video (absent if not provided by the user during loading). - `time_unit`: the unit of the `time` **coordinates** (either `frames` or `seconds`). -- `source_software`: the software that produced the pose or bounding box tracks. +- `source_format`: the software that produced the pose or bounding box tracks. - `source_file`: the path to the file from which the data were loaded (absent if the dataset was not loaded from a file). - `ds_type`: the type of dataset loaded (either `poses` or `bboxes`). diff --git a/movement/io/load_bboxes.py b/movement/io/load_bboxes.py index f279d3c32..70d9f93fa 100644 --- a/movement/io/load_bboxes.py +++ b/movement/io/load_bboxes.py @@ -29,7 +29,7 @@ def from_numpy( individual_names: list[str] | None = None, frame_array: np.ndarray | None = None, fps: float | None = None, - source_software: str | None = None, + source_format: str | None = None, ) -> xr.Dataset: """Create a ``movement`` bounding boxes dataset from NumPy arrays. @@ -70,7 +70,7 @@ def from_numpy( the ``time`` coordinates are in seconds, they will indicate the elapsed time from the capture of the first frame (assumed to be frame 0). - source_software : str, optional + source_format : str, optional Name of the software that generated the data. Defaults to None. Returns @@ -145,14 +145,14 @@ def from_numpy( individual_names=individual_names, frame_array=frame_array, fps=fps, - source_software=source_software, + source_format=source_format, ) return _ds_from_valid_data(valid_bboxes_data) def from_file( file_path: Path | str, - source_software: Literal["VIA-tracks"], + source_format: Literal["VIA-tracks"], fps: float | None = None, use_frame_numbers_from_file: bool = False, frame_regexp: str = DEFAULT_FRAME_REGEXP, @@ -166,7 +166,7 @@ def from_file( file_path : pathlib.Path or str Path to the file containing the tracked bounding boxes. Currently only VIA-tracks .csv files are supported. - source_software : "VIA-tracks". + source_format : "VIA-tracks". The source software of the file. Currently only files from the VIA 2.0.12 annotator [1]_ ("VIA-tracks") are supported. See . @@ -216,12 +216,12 @@ def from_file( >>> from movement.io import load_bboxes >>> ds = load_bboxes.from_file( >>> "path/to/file.csv", - >>> source_software="VIA-tracks", + >>> source_format="VIA-tracks", >>> fps=30, >>> ) """ - if source_software == "VIA-tracks": + if source_format == "VIA-tracks": return from_via_tracks_file( file_path, fps, @@ -230,7 +230,7 @@ def from_file( ) else: raise log_error( - ValueError, f"Unsupported source software: {source_software}" + ValueError, f"Unsupported source software: {source_format}" ) @@ -356,11 +356,11 @@ def from_via_tracks_file( else None ), fps=fps, - source_software="VIA-tracks", + source_format="VIA-tracks", ) # it validates the dataset via ValidBboxesDataset # Add metadata as attributes - ds.attrs["source_software"] = "VIA-tracks" + ds.attrs["source_format"] = "VIA-tracks" ds.attrs["source_file"] = file.path.as_posix() logger.info(f"Loaded tracks of the bounding boxes from {via_file.path}:") @@ -666,7 +666,7 @@ def _ds_from_valid_data(data: ValidBboxesDataset) -> xr.Dataset: time_unit = "frames" dataset_attrs: dict[str, str | float | None] = { - "source_software": data.source_software, + "source_format": data.source_format, "ds_type": "bboxes", } # if fps is provided: diff --git a/movement/io/load_poses.py b/movement/io/load_poses.py index e2e2762ed..9238b12b5 100644 --- a/movement/io/load_poses.py +++ b/movement/io/load_poses.py @@ -29,7 +29,7 @@ def from_numpy( individual_names: list[str] | None = None, keypoint_names: list[str] | None = None, fps: float | None = None, - source_software: str | None = None, + source_format: str | None = None, ) -> xr.Dataset: """Create a ``movement`` poses dataset from NumPy arrays. @@ -55,7 +55,7 @@ def from_numpy( fps : float, optional Frames per second of the video. Defaults to None, in which case the time coordinates will be in frame numbers. - source_software : str, optional + source_format : str, optional Name of the pose estimation software from which the data originate. Defaults to None. @@ -89,16 +89,14 @@ def from_numpy( individual_names=individual_names, keypoint_names=keypoint_names, fps=fps, - source_software=source_software, + source_format=source_format, ) return _ds_from_valid_data(valid_data) def from_file( file_path: Path | str, - source_software: Literal[ - "DeepLabCut", "SLEAP", "LightningPose", "Anipose" - ], + source_format: Literal["DeepLabCut", "SLEAP", "LightningPose", "Anipose"], fps: float | None = None, **kwargs, ) -> xr.Dataset: @@ -111,8 +109,8 @@ def from_file( be among those supported by the ``from_dlc_file()``, ``from_slp_file()`` or ``from_lp_file()`` functions. One of these these functions will be called internally, based on - the value of ``source_software``. - source_software : "DeepLabCut", "SLEAP", "LightningPose", or "Anipose" + the value of ``source_format``. + source_format : "DeepLabCut", "SLEAP", "LightningPose", or "Anipose" The source software of the file. fps : float, optional The number of frames per second in the video. If None (default), @@ -138,28 +136,28 @@ def from_file( -------- >>> from movement.io import load_poses >>> ds = load_poses.from_file( - ... "path/to/file.h5", source_software="DeepLabCut", fps=30 + ... "path/to/file.h5", source_format="DeepLabCut", fps=30 ... ) """ - if source_software == "DeepLabCut": + if source_format == "DeepLabCut": return from_dlc_file(file_path, fps) - elif source_software == "SLEAP": + elif source_format == "SLEAP": return from_sleap_file(file_path, fps) - elif source_software == "LightningPose": + elif source_format == "LightningPose": return from_lp_file(file_path, fps) - elif source_software == "Anipose": + elif source_format == "Anipose": return from_anipose_file(file_path, fps, **kwargs) else: raise log_error( - ValueError, f"Unsupported source software: {source_software}" + ValueError, f"Unsupported source software: {source_format}" ) def from_dlc_style_df( df: pd.DataFrame, fps: float | None = None, - source_software: Literal["DeepLabCut", "LightningPose"] = "DeepLabCut", + source_format: Literal["DeepLabCut", "LightningPose"] = "DeepLabCut", ) -> xr.Dataset: """Create a ``movement`` poses dataset from a DeepLabCut-style DataFrame. @@ -171,7 +169,7 @@ def from_dlc_style_df( fps : float, optional The number of frames per second in the video. If None (default), the ``time`` coordinates will be in frame numbers. - source_software : str, optional + source_format : str, optional Name of the pose estimation software from which the data originate. Defaults to "DeepLabCut", but it can also be "LightningPose" (because they the same DataFrame format). @@ -219,7 +217,7 @@ def from_dlc_style_df( individual_names=individual_names, keypoint_names=keypoint_names, fps=fps, - source_software=source_software, + source_format=source_format, ) @@ -322,7 +320,7 @@ def from_lp_file( """ return _ds_from_lp_or_dlc_file( - file_path=file_path, source_software="LightningPose", fps=fps + file_path=file_path, source_format="LightningPose", fps=fps ) @@ -357,13 +355,13 @@ def from_dlc_file( """ return _ds_from_lp_or_dlc_file( - file_path=file_path, source_software="DeepLabCut", fps=fps + file_path=file_path, source_format="DeepLabCut", fps=fps ) def from_multiview_files( file_path_dict: dict[str, Path | str], - source_software: Literal["DeepLabCut", "SLEAP", "LightningPose"], + source_format: Literal["DeepLabCut", "SLEAP", "LightningPose"], fps: float | None = None, ) -> xr.Dataset: """Load and merge pose tracking data from multiple views (cameras). @@ -372,7 +370,7 @@ def from_multiview_files( ---------- file_path_dict : dict[str, Union[Path, str]] A dict whose keys are the view names and values are the paths to load. - source_software : {'LightningPose', 'SLEAP', 'DeepLabCut'} + source_format : {'LightningPose', 'SLEAP', 'DeepLabCut'} The source software of the file. fps : float, optional The number of frames per second in the video. If None (default), @@ -388,7 +386,7 @@ def from_multiview_files( views_list = list(file_path_dict.keys()) new_coord_views = xr.DataArray(views_list, dims="view") dataset_list = [ - from_file(f, source_software=source_software, fps=fps) + from_file(f, source_format=source_format, fps=fps) for f in file_path_dict.values() ] return xr.concat(dataset_list, dim=new_coord_views) @@ -396,7 +394,7 @@ def from_multiview_files( def _ds_from_lp_or_dlc_file( file_path: Path | str, - source_software: Literal["LightningPose", "DeepLabCut"], + source_format: Literal["LightningPose", "DeepLabCut"], fps: float | None = None, ) -> xr.Dataset: """Create a ``movement`` poses dataset from a LightningPose or DLC file. @@ -406,7 +404,7 @@ def _ds_from_lp_or_dlc_file( file_path : pathlib.Path or str Path to the file containing the predicted poses, either in .h5 or .csv format. - source_software : {'LightningPose', 'DeepLabCut'} + source_format : {'LightningPose', 'DeepLabCut'} The source software of the file. fps : float, optional The number of frames per second in the video. If None (default), @@ -420,7 +418,7 @@ def _ds_from_lp_or_dlc_file( """ expected_suffix = [".csv"] - if source_software == "DeepLabCut": + if source_format == "DeepLabCut": expected_suffix.append(".h5") file = ValidFile( file_path, expected_permission="r", expected_suffix=expected_suffix @@ -433,7 +431,7 @@ def _ds_from_lp_or_dlc_file( ) logger.debug(f"Loaded poses from {file.path} into a DataFrame.") # Convert the DataFrame to an xarray dataset - ds = from_dlc_style_df(df=df, fps=fps, source_software=source_software) + ds = from_dlc_style_df(df=df, fps=fps, source_format=source_format) # Add metadata as attrs ds.attrs["source_file"] = file.path.as_posix() logger.info(f"Loaded pose tracks from {file.path}:") @@ -484,7 +482,7 @@ def _ds_from_sleap_analysis_file( individual_names=individual_names, keypoint_names=[n.decode() for n in f["node_names"][:]], fps=fps, - source_software="SLEAP", + source_format="SLEAP", ) @@ -524,7 +522,7 @@ def _ds_from_sleap_labels_file( individual_names=individual_names, keypoint_names=[kp.name for kp in labels.skeletons[0].nodes], fps=fps, - source_software="SLEAP", + source_format="SLEAP", ) @@ -683,7 +681,7 @@ def _ds_from_valid_data(data: ValidPosesDataset) -> xr.Dataset: n_space = data.position_array.shape[1] dataset_attrs: dict[str, str | float | None] = { - "source_software": data.source_software, + "source_format": data.source_format, "ds_type": "poses", } # Create the time coordinate, depending on the value of fps @@ -781,7 +779,7 @@ def from_anipose_style_df( confidence_array=confidence_array, individual_names=individual_names, keypoint_names=keypoint_names, - source_software="Anipose", + source_format="Anipose", fps=fps, ) diff --git a/movement/napari/loader_widgets.py b/movement/napari/loader_widgets.py index 8736690b5..fbb0c0055 100644 --- a/movement/napari/loader_widgets.py +++ b/movement/napari/loader_widgets.py @@ -52,7 +52,7 @@ def __init__(self, napari_viewer: Viewer, parent=None): self.setLayout(QFormLayout()) # Create widgets - self._create_source_software_widget() + self._create_source_format_widget() self._create_fps_widget() self._create_file_path_widget() self._create_load_button() @@ -60,12 +60,12 @@ def __init__(self, napari_viewer: Viewer, parent=None): # Enable layer tooltips from napari settings self._enable_layer_tooltips() - def _create_source_software_widget(self): + def _create_source_format_widget(self): """Create a combo box for selecting the source software.""" - self.source_software_combo = QComboBox() - self.source_software_combo.setObjectName("source_software_combo") - self.source_software_combo.addItems(SUPPORTED_DATA_FILES.keys()) - self.layout().addRow("source software:", self.source_software_combo) + self.source_format_combo = QComboBox() + self.source_format_combo.setObjectName("source_format_combo") + self.source_format_combo.addItems(SUPPORTED_DATA_FILES.keys()) + self.layout().addRow("source software:", self.source_format_combo) def _create_fps_widget(self): """Create a spinbox for selecting the frames per second (fps).""" @@ -116,7 +116,7 @@ def _on_browse_clicked(self): file_suffixes = ( "*." + suffix for suffix in SUPPORTED_DATA_FILES[ - self.source_software_combo.currentText() + self.source_format_combo.currentText() ] ) @@ -137,7 +137,7 @@ def _on_load_clicked(self): """Load the file and add as a Points layer to the viewer.""" # Get data from user input fps = self.fps_spinbox.value() - source_software = self.source_software_combo.currentText() + source_format = self.source_format_combo.currentText() file_path = self.file_path_edit.text() # Load data @@ -145,11 +145,11 @@ def _on_load_clicked(self): show_warning("No file path specified.") return - if source_software in SUPPORTED_POSES_FILES: + if source_format in SUPPORTED_POSES_FILES: loader = load_poses else: loader = load_bboxes - ds = loader.from_file(file_path, source_software, fps) + ds = loader.from_file(file_path, source_format, fps) # Convert to napari Tracks array self.data, self.props = ds_to_napari_tracks(ds) diff --git a/movement/sample_data.py b/movement/sample_data.py index c8f3cbb24..fa4f09317 100644 --- a/movement/sample_data.py +++ b/movement/sample_data.py @@ -295,7 +295,7 @@ def fetch_dataset( if file_paths.get(key): ds = load_module.from_file( file_paths[key], - source_software=metadata[filename]["source_software"], + source_format=metadata[filename]["source_format"], fps=metadata[filename]["fps"], ) diff --git a/movement/validators/datasets.py b/movement/validators/datasets.py index 0f51eec5e..fc5f8919a 100644 --- a/movement/validators/datasets.py +++ b/movement/validators/datasets.py @@ -85,7 +85,7 @@ class ValidPosesDataset: if provided, match the number of individuals and keypoints in the dataset, respectively; otherwise, default names are assigned. - The optional ``fps`` is a positive float; otherwise, it defaults to None. - - The optional ``source_software`` is a string; otherwise, + - The optional ``source_format`` is a string; otherwise, it defaults to None. Attributes @@ -107,7 +107,7 @@ class ValidPosesDataset: etc. fps : float, optional Frames per second of the video. Defaults to None. - source_software : str, optional + source_format : str, optional Name of the software from which the poses were loaded. Defaults to None. @@ -138,7 +138,7 @@ class ValidPosesDataset: converters.optional(float), _convert_fps_to_none_if_invalid ), ) - source_software: str | None = field( + source_format: str | None = field( default=None, validator=validators.optional(validators.instance_of(str)), ) @@ -181,7 +181,7 @@ def _validate_confidence_array(self, attribute, value): @individual_names.validator def _validate_individual_names(self, attribute, value): - if self.source_software == "LightningPose": + if self.source_format == "LightningPose": # LightningPose only supports a single individual _validate_list_length(attribute, value, 1) else: @@ -243,7 +243,7 @@ class ValidBboxesDataset: with the frame numbers; otherwise, it defaults to an array of 0-based integers. - The optional ``fps`` is a positive float; otherwise, it defaults to None. - - The optional ``source_software`` is a string; otherwise, it defaults to + - The optional ``source_format`` is a string; otherwise, it defaults to None. Attributes @@ -274,7 +274,7 @@ class ValidBboxesDataset: fps : float, optional Frames per second defining the sampling rate of the data. Defaults to None. - source_software : str, optional + source_format : str, optional Name of the software that generated the data. Defaults to None. Raises @@ -304,7 +304,7 @@ class ValidBboxesDataset: converters.optional(float), _convert_fps_to_none_if_invalid ), ) - source_software: str | None = field( + source_format: str | None = field( default=None, validator=validators.optional(validators.instance_of(str)), ) diff --git a/tests/fixtures/datasets.py b/tests/fixtures/datasets.py index 14e5169e6..6f63000d6 100644 --- a/tests/fixtures/datasets.py +++ b/tests/fixtures/datasets.py @@ -106,7 +106,7 @@ def valid_bboxes_dataset(valid_bboxes_arrays): }, attrs={ "time_unit": "frames", - "source_software": "test", + "source_format": "test", "source_file": "test_bboxes.csv", "ds_type": "bboxes", }, @@ -263,7 +263,7 @@ def valid_poses_dataset(valid_poses_arrays, request): }, attrs={ "time_unit": "frames", - "source_software": "test", + "source_format": "test", "source_file": "test_poses.h5", "ds_type": "poses", }, diff --git a/tests/fixtures/helpers.py b/tests/fixtures/helpers.py index ede541a73..5846a3627 100644 --- a/tests/fixtures/helpers.py +++ b/tests/fixtures/helpers.py @@ -37,7 +37,7 @@ def assert_valid_dataset(dataset, expected_values): - file_path: Path to the source file - fps: int, frames per second - - source_software: str, name of the software used to generate + - source_format: str, name of the software used to generate the dataset """ @@ -76,9 +76,7 @@ def assert_valid_dataset(dataset, expected_values): if expected_file_path is not None else None ) - assert dataset.source_software == expected_values.get( - "source_software" - ) + assert dataset.source_format == expected_values.get("source_format") fps = getattr(dataset, "fps", None) assert fps == expected_values.get("fps") diff --git a/tests/test_unit/test_load_bboxes.py b/tests/test_unit/test_load_bboxes.py index 4d9308fbf..ca5cdc98d 100644 --- a/tests/test_unit/test_load_bboxes.py +++ b/tests/test_unit/test_load_bboxes.py @@ -205,33 +205,33 @@ def assert_time_coordinates(ds, fps, start_frame=None, frame_array=None): ) -@pytest.mark.parametrize("source_software", ["Unknown", "VIA-tracks"]) +@pytest.mark.parametrize("source_format", ["Unknown", "VIA-tracks"]) @pytest.mark.parametrize("fps", [None, 30, 60.0]) @pytest.mark.parametrize("use_frame_numbers_from_file", [True, False]) @pytest.mark.parametrize("frame_regexp", [None, r"frame_(\d+)"]) def test_from_file( - source_software, fps, use_frame_numbers_from_file, frame_regexp + source_format, fps, use_frame_numbers_from_file, frame_regexp ): """Test that the from_file() function delegates to the correct - loader function according to the source_software. + loader function according to the source_format. """ software_to_loader = { "VIA-tracks": "movement.io.load_bboxes.from_via_tracks_file", } - if source_software == "Unknown": + if source_format == "Unknown": with pytest.raises(ValueError, match="Unsupported source"): load_bboxes.from_file( "some_file", - source_software, + source_format, fps, use_frame_numbers_from_file=use_frame_numbers_from_file, frame_regexp=frame_regexp, ) else: - with patch(software_to_loader[source_software]) as mock_loader: + with patch(software_to_loader[source_format]) as mock_loader: load_bboxes.from_file( "some_file", - source_software, + source_format, fps, use_frame_numbers_from_file=use_frame_numbers_from_file, frame_regexp=frame_regexp, @@ -275,7 +275,7 @@ def test_from_via_tracks_file( ds = load_bboxes.from_via_tracks_file(**kwargs) expected_values = { **expected_values_bboxes, - "source_software": "VIA-tracks", + "source_format": "VIA-tracks", "fps": fps, "file_path": via_file_path, } @@ -332,12 +332,12 @@ def test_from_via_tracks_file_invalid_frame_regexp( [True, False], ) @pytest.mark.parametrize("fps", [None, 30, 60.0]) -@pytest.mark.parametrize("source_software", [None, "VIA-tracks"]) +@pytest.mark.parametrize("source_format", [None, "VIA-tracks"]) def test_from_numpy( create_valid_from_numpy_inputs, with_frame_array, fps, - source_software, + source_format, helpers, ): """Test that loading bounding boxes trajectories from the input @@ -349,11 +349,11 @@ def test_from_numpy( ds = load_bboxes.from_numpy( **from_numpy_inputs, fps=fps, - source_software=source_software, + source_format=source_format, ) expected_values = { **expected_values_bboxes, - "source_software": source_software, + "source_format": source_format, "fps": fps, } helpers.assert_valid_dataset(ds, expected_values) diff --git a/tests/test_unit/test_load_poses.py b/tests/test_unit/test_load_poses.py index 8c22c9f3f..fd0f03203 100644 --- a/tests/test_unit/test_load_poses.py +++ b/tests/test_unit/test_load_poses.py @@ -81,7 +81,7 @@ def test_load_from_sleap_file(sleap_file, helpers): ds = load_poses.from_sleap_file(sleap_file) expected_values = { **expected_values_poses, - "source_software": "SLEAP", + "source_format": "SLEAP", "file_path": sleap_file, } helpers.assert_valid_dataset(ds, expected_values) @@ -150,25 +150,25 @@ def test_load_from_dlc_file(file_name, helpers): ds = load_poses.from_dlc_file(file_path) expected_values = { **expected_values_poses, - "source_software": "DeepLabCut", + "source_format": "DeepLabCut", "file_path": file_path, } helpers.assert_valid_dataset(ds, expected_values) @pytest.mark.parametrize( - "source_software", ["DeepLabCut", "LightningPose", None] + "source_format", ["DeepLabCut", "LightningPose", None] ) -def test_load_from_dlc_style_df(valid_dlc_poses_df, source_software, helpers): +def test_load_from_dlc_style_df(valid_dlc_poses_df, source_format, helpers): """Test that loading pose tracks from a valid DLC-style DataFrame returns a proper Dataset. """ ds = load_poses.from_dlc_style_df( - valid_dlc_poses_df, source_software=source_software + valid_dlc_poses_df, source_format=source_format ) expected_values = { **expected_values_poses, - "source_software": source_software, + "source_format": source_format, } helpers.assert_valid_dataset(ds, expected_values) @@ -226,7 +226,7 @@ def test_load_from_lp_file(file_name, helpers): ds = load_poses.from_lp_file(file_path) expected_values = { **expected_values_poses, - "source_software": "LightningPose", + "source_format": "LightningPose", "file_path": file_path, } helpers.assert_valid_dataset(ds, expected_values) @@ -235,14 +235,14 @@ def test_load_from_lp_file(file_name, helpers): def test_load_from_lp_or_dlc_file_returns_same(): """Test that loading a single-animal DeepLabCut-style .csv file using either the `from_lp_file` or `from_dlc_file` function - returns the same Dataset (except for the source_software). + returns the same Dataset (except for the source_format). """ file_path = DATA_PATHS.get("LP_mouse-face_AIND.predictions.csv") ds_drom_lp = load_poses.from_lp_file(file_path) ds_from_dlc = load_poses.from_dlc_file(file_path) xr.testing.assert_allclose(ds_from_dlc, ds_drom_lp) - assert ds_drom_lp.source_software == "LightningPose" - assert ds_from_dlc.source_software == "DeepLabCut" + assert ds_drom_lp.source_format == "LightningPose" + assert ds_from_dlc.source_format == "DeepLabCut" def test_load_multi_individual_from_lp_file_raises(): @@ -255,13 +255,13 @@ def test_load_multi_individual_from_lp_file_raises(): @pytest.mark.parametrize( - "source_software", + "source_format", ["SLEAP", "DeepLabCut", "LightningPose", "Anipose", "Unknown"], ) @pytest.mark.parametrize("fps", [None, 30, 60.0]) -def test_from_file_delegates_correctly(source_software, fps): +def test_from_file_delegates_correctly(source_format, fps): """Test that the from_file() function delegates to the correct - loader function according to the source_software. + loader function according to the source_format. """ software_to_loader = { "SLEAP": "movement.io.load_poses.from_sleap_file", @@ -269,17 +269,17 @@ def test_from_file_delegates_correctly(source_software, fps): "LightningPose": "movement.io.load_poses.from_lp_file", "Anipose": "movement.io.load_poses.from_anipose_file", } - if source_software == "Unknown": + if source_format == "Unknown": with pytest.raises(ValueError, match="Unsupported source"): - load_poses.from_file("some_file", source_software) + load_poses.from_file("some_file", source_format) else: - with patch(software_to_loader[source_software]) as mock_loader: - load_poses.from_file("some_file", source_software, fps) + with patch(software_to_loader[source_format]) as mock_loader: + load_poses.from_file("some_file", source_format, fps) mock_loader.assert_called_with("some_file", fps) -@pytest.mark.parametrize("source_software", [None, "SLEAP"]) -def test_from_numpy_valid(valid_poses_arrays, source_software, helpers): +@pytest.mark.parametrize("source_format", [None, "SLEAP"]) +def test_from_numpy_valid(valid_poses_arrays, source_format, helpers): """Test that loading pose tracks from a multi-animal numpy array with valid parameters returns a proper Dataset. """ @@ -290,18 +290,18 @@ def test_from_numpy_valid(valid_poses_arrays, source_software, helpers): individual_names=["id_0", "id_1"], keypoint_names=["centroid", "left", "right"], fps=None, - source_software=source_software, + source_format=source_format, ) expected_values = { **expected_values_poses, - "source_software": source_software, + "source_format": source_format, } helpers.assert_valid_dataset(ds, expected_values) def test_from_multiview_files(): """Test that the from_file() function delegates to the correct - loader function according to the source_software. + loader function according to the source_format. """ view_names = ["view_0", "view_1"] file_path_dict = { @@ -309,7 +309,7 @@ def test_from_multiview_files(): for view in view_names } multi_view_ds = load_poses.from_multiview_files( - file_path_dict, source_software="DeepLabCut" + file_path_dict, source_format="DeepLabCut" ) assert isinstance(multi_view_ds, xr.Dataset) assert "view" in multi_view_ds.dims diff --git a/tests/test_unit/test_napari_plugin/test_data_loader_widget.py b/tests/test_unit/test_napari_plugin/test_data_loader_widget.py index 142dad778..bfd6ed933 100644 --- a/tests/test_unit/test_napari_plugin/test_data_loader_widget.py +++ b/tests/test_unit/test_napari_plugin/test_data_loader_widget.py @@ -26,7 +26,7 @@ def test_data_loader_widget_instantiation(make_napari_viewer_proxy): # Check that the expected widgets are present in the layout expected_widgets = [ - (QComboBox, "source_software_combo"), + (QComboBox, "source_format_combo"), (QDoubleSpinBox, "fps_spinbox"), (QLineEdit, "file_path_edit"), (QPushButton, "load_button"), @@ -94,7 +94,7 @@ def test_on_browse_clicked(file_path, make_napari_viewer_proxy, mocker): @pytest.mark.parametrize( - "source_software, expected_file_filter", + "source_format, expected_file_filter", [ ("DeepLabCut", "*.h5 *.csv"), ("SLEAP", "*.h5 *.slp"), @@ -102,12 +102,12 @@ def test_on_browse_clicked(file_path, make_napari_viewer_proxy, mocker): ("VIA-tracks", "*.csv"), ], ) -def test_file_filters_per_source_software( - source_software, expected_file_filter, make_napari_viewer_proxy, mocker +def test_file_filters_per_source_format( + source_format, expected_file_filter, make_napari_viewer_proxy, mocker ): """Test that the file dialog is opened with the correct filters.""" data_loader_widget = DataLoader(make_napari_viewer_proxy) - data_loader_widget.source_software_combo.setCurrentText(source_software) + data_loader_widget.source_format_combo.setCurrentText(source_format) mock_file_dialog = mocker.patch( "movement.napari.loader_widgets.QFileDialog.getOpenFileName", return_value=("", None), @@ -132,7 +132,7 @@ def test_on_load_clicked_without_file_path(make_napari_viewer_proxy, capsys): @pytest.mark.parametrize( - "filename, source_software, tracks_array_shape", + "filename, source_format, tracks_array_shape", [ ("DLC_single-wasp.predictions.h5", "DeepLabCut", (2170, 4)), ("VIA_single-crab_MOCA-crab-1.csv", "VIA-tracks", (35, 4)), @@ -140,7 +140,7 @@ def test_on_load_clicked_without_file_path(make_napari_viewer_proxy, capsys): ) def test_on_load_clicked_with_valid_file_path( filename, - source_software, + source_format, tracks_array_shape, make_napari_viewer_proxy, caplog, @@ -162,7 +162,7 @@ def test_on_load_clicked_with_valid_file_path( data_loader_widget.file_path_edit.setText(file_path.as_posix()) # Set the source software - data_loader_widget.source_software_combo.setCurrentText(source_software) + data_loader_widget.source_format_combo.setCurrentText(source_format) # Set the fps to 60 data_loader_widget.fps_spinbox.setValue(60) @@ -236,7 +236,7 @@ def test_dimension_slider_matches_frames( # Read sample data with a NaN at the specified # location (start, middle, or end) poses_loader_widget.file_path_edit.setText(file_path.as_posix()) - poses_loader_widget.source_software_combo.setCurrentText("DeepLabCut") + poses_loader_widget.source_format_combo.setCurrentText("DeepLabCut") # Check the data contains nans where expected assert ( @@ -261,7 +261,7 @@ def test_dimension_slider_matches_frames( @pytest.mark.parametrize( ( - "filename, source_software, " + "filename, source_format, " "expected_text_property, expected_color_property" ), [ @@ -299,7 +299,7 @@ def test_dimension_slider_matches_frames( ) def test_add_points_layer_style( filename, - source_software, + source_format, make_napari_viewer_proxy, expected_text_property, expected_color_property, @@ -315,7 +315,7 @@ def test_add_points_layer_style( # Load data as a points layer file_path = pytest.DATA_PATHS.get(filename) loader_widget.file_path_edit.setText(file_path.as_posix()) - loader_widget.source_software_combo.setCurrentText(source_software) + loader_widget.source_format_combo.setCurrentText(source_format) loader_widget._on_load_clicked() # Check no warnings were emitted diff --git a/tests/test_unit/test_sample_data.py b/tests/test_unit/test_sample_data.py index 7b90456f5..2e41eb61b 100644 --- a/tests/test_unit/test_sample_data.py +++ b/tests/test_unit/test_sample_data.py @@ -44,7 +44,7 @@ def validate_metadata(metadata: dict[str, dict]) -> None: metadata_fields = [ "sha256sum", "type", - "source_software", + "source_format", "type", "fps", "species", diff --git a/tests/test_unit/test_validators/test_datasets_validators.py b/tests/test_unit/test_validators/test_datasets_validators.py index fcc63d14c..8acf1bc73 100644 --- a/tests/test_unit/test_validators/test_datasets_validators.py +++ b/tests/test_unit/test_validators/test_datasets_validators.py @@ -177,7 +177,7 @@ def test_poses_dataset_validator_individual_names( @pytest.mark.parametrize( - "source_software, expected_exception", + "source_format, expected_exception", [ (None, does_not_raise()), ("SLEAP", does_not_raise()), @@ -187,10 +187,10 @@ def test_poses_dataset_validator_individual_names( (5, pytest.raises(TypeError)), # not a string ], ) -def test_poses_dataset_validator_source_software( - valid_poses_arrays, source_software, expected_exception +def test_poses_dataset_validator_source_format( + valid_poses_arrays, source_format, expected_exception ): - """Test that the source_software attribute is validated properly. + """Test that the source_format attribute is validated properly. LightnigPose is incompatible with multi-individual arrays. """ with expected_exception: @@ -198,13 +198,13 @@ def test_poses_dataset_validator_source_software( position_array=valid_poses_arrays("multi_individual_array")[ "position" ], - source_software=source_software, + source_format=source_format, ) - if source_software is not None: - assert ds.source_software == source_software + if source_format is not None: + assert ds.source_format == source_format else: - assert ds.source_software is None + assert ds.source_format is None # Tests bboxes dataset