diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1464b1ce1..8a4325040 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,7 +9,7 @@ repos: - id: reuse - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.11.12 + rev: v0.12.0 hooks: # Run the linter. - id: ruff-check diff --git a/pyproject.toml b/pyproject.toml index c8bcd5a4c..83601c2e7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -113,6 +113,7 @@ select = [ "FURB", "FLY", "SLOT", + "PL", "NPY", ] @@ -123,6 +124,9 @@ combine-as-imports = true [tool.ruff.lint.per-file-ignores] # Ignore `F811` (redefinition violations) in all examples notebooks since we use redefinition. "docs/examples/*.ipynb" = ["F811", "E402"] +# Pylint was only run in src directory before moving to Ruff +"tests/*" = ["PLR0915", "PLR0912", "PLR0913"] +"setup.py" = ["PL"] [tool.mypy] follow_imports = "silent" diff --git a/src/power_grid_model/_core/buffer_handling.py b/src/power_grid_model/_core/buffer_handling.py index fe9969d2e..a4fbe46e8 100644 --- a/src/power_grid_model/_core/buffer_handling.py +++ b/src/power_grid_model/_core/buffer_handling.py @@ -117,7 +117,8 @@ def _get_raw_attribute_data_view(data: np.ndarray, schema: ComponentMetaData, at Returns: a raw view on the data set. """ - if schema.dtype[attribute].shape == (3,) and data.shape[-1] != 3: + supported_dim = 3 + if schema.dtype[attribute].shape == (supported_dim,) and data.shape[-1] != supported_dim: raise ValueError("Given data has a different schema than supported.") return _get_raw_data_view(data, dtype=schema.dtype[attribute].base) @@ -185,8 +186,11 @@ def _get_dense_buffer_properties( if actual_ndim not in (1, 2): raise ValueError(f"Array can only be 1D or 2D. {VALIDATOR_MSG}") - actual_is_batch = actual_ndim == 2 - actual_batch_size = shape[0] if actual_is_batch else 1 + singular_ndim = 1 + batch_ndim = 2 + + actual_is_batch = actual_ndim == batch_ndim + actual_batch_size = shape[0] if actual_is_batch else singular_ndim n_elements_per_scenario = shape[-1] n_total_elements = actual_batch_size * n_elements_per_scenario diff --git a/src/power_grid_model/_core/power_grid_core.py b/src/power_grid_model/_core/power_grid_core.py index a2bd6c5e0..036697efc 100644 --- a/src/power_grid_model/_core/power_grid_core.py +++ b/src/power_grid_model/_core/power_grid_core.py @@ -460,7 +460,7 @@ def destroy_dataset_mutable(self, dataset: MutableDatasetPtr) -> None: # type: pass # pragma: no cover @make_c_binding - def dataset_mutable_add_buffer( # type: ignore[empty-body] + def dataset_mutable_add_buffer( # type: ignore[empty-body] # noqa: PLR0913 self, dataset: MutableDatasetPtr, component: str, diff --git a/src/power_grid_model/_core/power_grid_model.py b/src/power_grid_model/_core/power_grid_model.py index 04f6043ec..e7fc4e8be 100644 --- a/src/power_grid_model/_core/power_grid_model.py +++ b/src/power_grid_model/_core/power_grid_model.py @@ -235,7 +235,7 @@ def _handle_errors(self, continue_on_batch_error: bool, batch_size: int, decode_ decode_error=decode_error, ) - def _calculate_impl( + def _calculate_impl( # noqa: PLR0913 self, calculation_type: CalculationType, symmetric: bool, @@ -300,7 +300,7 @@ def _calculate_impl( return output_data - def _calculate_power_flow( + def _calculate_power_flow( # noqa: PLR0913 self, *, symmetric: bool = True, @@ -337,7 +337,7 @@ def _calculate_power_flow( experimental_features=experimental_features, ) - def _calculate_state_estimation( + def _calculate_state_estimation( # noqa: PLR0913 self, *, symmetric: bool = True, @@ -372,7 +372,7 @@ def _calculate_state_estimation( experimental_features=experimental_features, ) - def _calculate_short_circuit( + def _calculate_short_circuit( # noqa: PLR0913 self, *, calculation_method: CalculationMethod | str = CalculationMethod.iec60909, @@ -406,7 +406,7 @@ def _calculate_short_circuit( experimental_features=experimental_features, ) - def calculate_power_flow( + def calculate_power_flow( # noqa: PLR0913 self, *, symmetric: bool = True, @@ -505,7 +505,7 @@ def calculate_power_flow( tap_changing_strategy=tap_changing_strategy, ) - def calculate_state_estimation( + def calculate_state_estimation( # noqa: PLR0913 self, *, symmetric: bool = True, @@ -599,7 +599,7 @@ def calculate_state_estimation( decode_error=decode_error, ) - def calculate_short_circuit( + def calculate_short_circuit( # noqa: PLR0913 self, *, calculation_method: CalculationMethod | str = CalculationMethod.iec60909, diff --git a/src/power_grid_model/_core/utils.py b/src/power_grid_model/_core/utils.py index 68e0d9c89..e179afa21 100644 --- a/src/power_grid_model/_core/utils.py +++ b/src/power_grid_model/_core/utils.py @@ -42,6 +42,10 @@ from power_grid_model._core.power_grid_meta import initialize_array, power_grid_meta_data from power_grid_model._core.typing import ComponentAttributeMapping, _ComponentAttributeMappingDict +SINGULAR_NDIM = 1 +BATCH_NDIM = 2 +UNSUPPORTED_NDIM = 3 + def is_nan(data) -> bool: """ @@ -166,17 +170,17 @@ def get_batch_size( for attribute, array in batch_data.items(): if attribute in sym_attributes: break - if array.ndim == 1: + if array.ndim == SINGULAR_NDIM: raise TypeError("Incorrect dimension present in batch data.") - if array.ndim == 2: + if array.ndim == BATCH_NDIM: return 1 return array.shape[0] sym_array = next(iter(batch_data.values())) sym_array = cast(DenseBatchArray | BatchColumn, sym_array) - if sym_array.ndim == 3: + if sym_array.ndim == UNSUPPORTED_NDIM: raise TypeError("Incorrect dimension present in batch data.") - if sym_array.ndim == 1: + if sym_array.ndim == SINGULAR_NDIM: return 1 return sym_array.shape[0] @@ -222,7 +226,7 @@ def _split_numpy_array_in_batches( Returns: A list with a single numpy structured array per batch """ - if data.ndim == 1: + if data.ndim == SINGULAR_NDIM: return [data] if data.ndim in [2, 3]: return [data[i, ...] for i in range(data.shape[0])] @@ -325,7 +329,7 @@ def convert_dataset_to_python_dataset(data: Dataset) -> PythonDataset: # It is batch dataset if it is 2D array or a indptr/data structure is_batch: bool | None = None for component, array in data.items(): - is_dense_batch = isinstance(array, np.ndarray) and array.ndim == 2 + is_dense_batch = isinstance(array, np.ndarray) and array.ndim == BATCH_NDIM is_sparse_batch = isinstance(array, dict) and "indptr" in array and "data" in array if is_batch is not None and is_batch != (is_dense_batch or is_sparse_batch): raise ValueError( diff --git a/src/power_grid_model/utils.py b/src/power_grid_model/utils.py index 16e8d7e83..43a9ae2a2 100644 --- a/src/power_grid_model/utils.py +++ b/src/power_grid_model/utils.py @@ -406,7 +406,7 @@ def self_test(): raise PowerGridError from e -def _make_test_case( +def _make_test_case( # noqa: PLR0913 *, output_path: Path, input_data: SingleDataset, diff --git a/src/power_grid_model/validation/_rules.py b/src/power_grid_model/validation/_rules.py index ba84c9e75..28a9b3bc8 100644 --- a/src/power_grid_model/validation/_rules.py +++ b/src/power_grid_model/validation/_rules.py @@ -241,7 +241,7 @@ def not_less_or_equal(val: np.ndarray, *ref: np.ndarray): return none_match_comparison(data, component, field, not_less_or_equal, ref_value, NotLessOrEqualError) -def all_between( +def all_between( # noqa: PLR0913 data: SingleDataset, component: ComponentType, field: str, @@ -281,7 +281,7 @@ def outside(val: np.ndarray, *ref: np.ndarray) -> np.ndarray: ) -def all_between_or_at( +def all_between_or_at( # noqa: PLR0913 data: SingleDataset, component: ComponentType, field: str, @@ -331,7 +331,7 @@ def outside(val: np.ndarray, *ref: np.ndarray) -> np.ndarray: ) -def none_match_comparison( +def none_match_comparison( # noqa: PLR0913 data: SingleDataset, component: ComponentType, field: str, @@ -554,7 +554,7 @@ def all_valid_enum_values( return [] -def all_valid_associated_enum_values( +def all_valid_associated_enum_values( # noqa: PLR0913 data: SingleDataset, component: ComponentType, field: str, @@ -774,7 +774,7 @@ def all_finite(data: SingleDataset, exceptions: dict[ComponentType, list[str]] | invalid = np.isinf(array[field]) if invalid.any(): - ids = data[component]["id"][invalid].flatten().tolist() + ids = array["id"][invalid].flatten().tolist() errors.append(InfinityError(component, field, ids)) return errors @@ -841,7 +841,8 @@ def not_all_missing(data: SingleDataset, fields: list[str], component_type: Comp fields: List of fields component_type: component type to check """ - if len(fields) < 2: + min_fields = 2 + if len(fields) < min_fields: raise ValueError( "The fields parameter must contain at least 2 fields. Otherwise use the none_missing function." ) diff --git a/src/power_grid_model/validation/_validation.py b/src/power_grid_model/validation/_validation.py index 1848cf9d6..fa267fb6a 100644 --- a/src/power_grid_model/validation/_validation.py +++ b/src/power_grid_model/validation/_validation.py @@ -279,7 +279,7 @@ def _process_power_sigma_and_p_q_sigma( power_sigma[mask] = np.nansum(q_sigma[mask], axis=asym_axes) -def validate_required_values( +def validate_required_values( # noqa: PLR0915 data: SingleDataset, calculation_type: CalculationType | None = None, symmetric: bool = True ) -> list[MissingValueError]: """ @@ -631,7 +631,7 @@ def validate_branch3(data: SingleDataset, component: ComponentType) -> list[Vali return errors -def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationError]: +def validate_three_winding_transformer(data: SingleDataset) -> list[ValidationError]: # noqa: PLR0915 errors = validate_branch3(data, ComponentType.three_winding_transformer) errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "u1") errors += _all_greater_than_zero(data, ComponentType.three_winding_transformer, "u2") diff --git a/src/power_grid_model/validation/errors.py b/src/power_grid_model/validation/errors.py index 06cfd09ce..e8b516eb0 100644 --- a/src/power_grid_model/validation/errors.py +++ b/src/power_grid_model/validation/errors.py @@ -13,6 +13,9 @@ from power_grid_model import ComponentType +MIN_FIELDS = 2 +MIN_COMPONENTS = 2 + class ValidationError(ABC): """ @@ -54,6 +57,8 @@ class ValidationError(ABC): _delimiter: str = " and " + __hash__ = None # type: ignore[assignment] + @property def component_str(self) -> str: """ @@ -165,7 +170,7 @@ def __init__(self, component: ComponentType, fields: list[str], ids: list[int]): self.field = sorted(fields) self.ids = sorted(ids) - if len(self.field) < 2: + if len(self.field) < MIN_FIELDS: raise ValueError(f"{type(self).__name__} expects at least two fields: {self.field}") @@ -191,9 +196,9 @@ def __init__(self, fields: list[tuple[ComponentType, str]], ids: list[tuple[Comp self.field = sorted(fields) self.ids = sorted(ids) - if len(self.field) < 2: + if len(self.field) < MIN_FIELDS: raise ValueError(f"{type(self).__name__} expects at least two fields: {self.field}") - if len(self.component) < 2: + if len(self.component) < MIN_COMPONENTS: raise ValueError(f"{type(self).__name__} expects at least two components: {self.component}") @@ -241,6 +246,7 @@ class InvalidValueError(SingleFieldValidationError): _message = "Field {field} contains invalid values for {n} {objects}." values: list + __hash__ = None # type: ignore[assignment] def __init__(self, component: ComponentType, field: str, ids: list[int], values: list): super().__init__(component, field, ids) @@ -265,6 +271,7 @@ class InvalidEnumValueError(SingleFieldValidationError): _message = "Field {field} contains invalid {enum} values for {n} {objects}." enum: Type[Enum] | list[Type[Enum]] + __hash__ = None # type: ignore[assignment] def __init__(self, component: ComponentType, field: str, ids: list[int], enum: Type[Enum] | list[Type[Enum]]): super().__init__(component, field, ids) @@ -318,6 +325,7 @@ class IdNotInDatasetError(SingleFieldValidationError): _message = "ID does not exist in {ref_dataset} for {n} {objects}." ref_dataset: str + __hash__ = None # type: ignore[assignment] def __init__(self, component: ComponentType, ids: list[int], ref_dataset: str): super().__init__(component=component, field="id", ids=ids) @@ -345,6 +353,7 @@ class InvalidIdError(SingleFieldValidationError): _message = "Field {field} does not contain a valid {ref_components} id for {n} {objects}. {filters}" ref_components: list[ComponentType] + __hash__ = None # type: ignore[assignment] def __init__( self, @@ -399,6 +408,8 @@ class ComparisonError(SingleFieldValidationError): RefType = int | float | str | tuple[int | float | str, ...] + __hash__ = None # type: ignore[assignment] + def __init__(self, component: ComponentType, field: str, ids: list[int], ref_value: "ComparisonError.RefType"): super().__init__(component, field, ids) self.ref_value = ref_value @@ -512,6 +523,7 @@ class InvalidAssociatedEnumValueError(MultiFieldValidationError): _message = "The combination of fields {field} results in invalid {enum} values for {n} {objects}." enum: Type[Enum] | list[Type[Enum]] + __hash__ = None # type: ignore[assignment] def __init__( self, diff --git a/src/power_grid_model/validation/utils.py b/src/power_grid_model/validation/utils.py index dc89ea27a..afbbab7a4 100644 --- a/src/power_grid_model/validation/utils.py +++ b/src/power_grid_model/validation/utils.py @@ -86,7 +86,8 @@ def _eval_field_expression(data: np.ndarray, expression: str) -> np.ndarray: if len(fields) == 1: return data[fields[0]] - assert len(fields) == 2 + max_num_fields = 2 + assert len(fields) == max_num_fields zero_div = np.logical_or(np.equal(data[fields[1]], 0.0), np.logical_not(np.isfinite(data[fields[1]]))) if np.any(zero_div): result = np.full_like(data[fields[0]], np.nan) @@ -126,6 +127,7 @@ def _update_component_array_data( Update the data in a numpy array, with another numpy array, indexed on the "id" field and only non-NaN values are overwritten. """ + batch_ndim = 2 if update_data.dtype.names is None: raise ValueError("Invalid data format") @@ -142,7 +144,7 @@ def _update_component_array_data( nan = _nan_type(component, field, DatasetType.update) mask = ~np.isnan(update_data[field]) if np.isnan(nan) else np.not_equal(update_data[field], nan) - if mask.ndim == 2: + if mask.ndim == batch_ndim: for phase in range(mask.shape[1]): # find indexers of to-be-updated object sub_mask = mask[:, phase] diff --git a/tests/unit/test_buffer_handling.py b/tests/unit/test_buffer_handling.py index 95bfa65e3..45cbc22c4 100644 --- a/tests/unit/test_buffer_handling.py +++ b/tests/unit/test_buffer_handling.py @@ -13,6 +13,11 @@ from power_grid_model._core.dataset_definitions import ComponentType, DatasetType from power_grid_model._core.power_grid_meta import initialize_array, power_grid_meta_data +SINGULAR_NDIM = 1 +BATCH_NDIM = 2 +SCENARIO_TOTAL_ELEMENTS = 4 +BATCH_TOTAL_ELEMENTS = 8 + def load_data(component_type, is_batch, is_sparse, is_columnar): """Creates load data of different formats for testing""" @@ -45,14 +50,14 @@ def load_data(component_type, is_batch, is_sparse, is_columnar): def test__get_dense_buffer_properties(component_type, is_batch, is_columnar): data = load_data(component_type, is_batch=is_batch, is_columnar=is_columnar, is_sparse=False) schema = power_grid_meta_data[DatasetType.update][component_type] - batch_size = 2 if is_batch else None + batch_size = BATCH_NDIM if is_batch else None properties = _get_dense_buffer_properties(data, schema=schema, is_batch=is_batch, batch_size=batch_size) assert not properties.is_sparse assert properties.is_batch == is_batch - assert properties.batch_size == (2 if is_batch else 1) - assert properties.n_elements_per_scenario == 4 - assert properties.n_total_elements == 8 if is_batch else 4 + assert properties.batch_size == (BATCH_NDIM if is_batch else SINGULAR_NDIM) + assert properties.n_elements_per_scenario == SCENARIO_TOTAL_ELEMENTS + assert properties.n_total_elements == BATCH_TOTAL_ELEMENTS if is_batch else 4 if is_columnar: assert properties.columns == list(data.keys()) else: @@ -76,9 +81,9 @@ def test__get_sparse_buffer_properties(component_type, is_columnar): assert properties.is_sparse assert properties.is_batch - assert properties.batch_size == 2 + assert properties.batch_size == BATCH_NDIM assert properties.n_elements_per_scenario == -1 - assert properties.n_total_elements == 8 + assert properties.n_total_elements == BATCH_TOTAL_ELEMENTS if is_columnar: assert properties.columns == list(data["data"].keys()) else: diff --git a/tests/unit/test_data_handling.py b/tests/unit/test_data_handling.py index 3e07e610a..4b87deb4d 100644 --- a/tests/unit/test_data_handling.py +++ b/tests/unit/test_data_handling.py @@ -129,15 +129,15 @@ def test_create_output_data(output_component_types, expected_fns, batch_size): expected = {comp: fn(batch_size_tuple=(batch_size,)) for comp, fn in expected_fns.items()} assert actual.keys() == expected.keys() - for comp in expected: - if not is_columnar(expected[comp]): - assert actual[comp].dtype == expected[comp].dtype - elif expected[comp] == dict(): + for comp, fn in expected.items(): + if not is_columnar(fn): + assert actual[comp].dtype == fn.dtype + elif fn == dict(): # Empty attributes columnar - assert actual[comp] == expected[comp] + assert actual[comp] == fn else: - assert actual[comp].keys() == expected[comp].keys() - assert all(actual[comp][attr].dtype == expected[comp][attr].dtype for attr in expected[comp]) + assert actual[comp].keys() == fn.keys() + assert all(actual[comp][attr].dtype == fn[attr].dtype for attr in fn) def test_dtype_compatibility_check_normal(): diff --git a/tests/unit/test_dataset.py b/tests/unit/test_dataset.py index 88bef7c7e..1c124fbbe 100644 --- a/tests/unit/test_dataset.py +++ b/tests/unit/test_dataset.py @@ -10,6 +10,8 @@ from power_grid_model._core.power_grid_meta import power_grid_meta_data from power_grid_model.errors import PowerGridError +SINGULAR_NDIM = 1 + def input_dataset_types(): return [DatasetType.input] @@ -40,7 +42,7 @@ def test_const_dataset__empty_dataset(dataset_type): assert info.dataset_type() == dataset_type assert not info.is_batch() - assert info.batch_size() == 1 + assert info.batch_size() == SINGULAR_NDIM assert info.n_components() == 0 assert info.components() == [] assert info.elements_per_scenario() == {} @@ -74,7 +76,7 @@ def test_const_dataset__single_data(dataset_type): assert info.dataset_type() == dataset_type assert not info.is_batch() - assert info.batch_size() == 1 + assert info.batch_size() == SINGULAR_NDIM assert info.n_components() == len(components) assert info.components() == list(components) assert info.elements_per_scenario() == components @@ -129,7 +131,7 @@ def test_const_dataset__sparse_batch_data(dataset_type): assert info.dataset_type() == dataset_type assert info.is_batch() - assert info.batch_size() == 3 + assert info.batch_size() == batch_size assert info.n_components() == len(components) assert info.components() == list(components) assert info.elements_per_scenario() == { diff --git a/tests/unit/test_internal_utils.py b/tests/unit/test_internal_utils.py index de1196f52..29b9fac5b 100644 --- a/tests/unit/test_internal_utils.py +++ b/tests/unit/test_internal_utils.py @@ -132,10 +132,10 @@ def test_is_nan(): def test_convert_json_to_numpy(two_nodes_one_line, two_nodes_two_lines): pgm_data = convert_python_to_numpy(two_nodes_one_line, "input") - assert len(pgm_data) == 2 - assert len(pgm_data["node"]) == 2 - assert pgm_data["node"][0]["id"] == 11 - assert pgm_data["node"][0]["u_rated"] == 10.5e3 + assert len(pgm_data) == len(two_nodes_one_line) + assert len(pgm_data["node"]) == len(two_nodes_one_line["node"]) + assert pgm_data["node"][0]["id"] == two_nodes_one_line["node"][0]["id"] + assert pgm_data["node"][0]["u_rated"] == two_nodes_one_line["node"][0]["u_rated"] assert len(pgm_data["line"]) == 1 json_list = [two_nodes_one_line, two_nodes_two_lines, two_nodes_one_line] @@ -684,9 +684,9 @@ def sample_output_data(): CT.sym_load: initialize_array(DT.sym_output, CT.sym_load, 3), CT.source: initialize_array(DT.sym_output, CT.source, 1), } - for comp in output_data: - for attr in output_data[comp].dtype.names: - output_data[comp][attr] = 0 + for data_array in output_data.values(): + for attr in data_array.dtype.names: + data_array[attr] = 0 return output_data diff --git a/tests/unit/test_serialization.py b/tests/unit/test_serialization.py index dcc7d91b3..d348506e4 100644 --- a/tests/unit/test_serialization.py +++ b/tests/unit/test_serialization.py @@ -515,13 +515,13 @@ def assert_individual_data_entry(serialized_dataset, data_filter, component, ser assert attr in deserialized_output assert_almost_equal( deserialized_output[attr][comp_idx], - serialized_input[comp_idx][attr], + input_entry[attr], ) else: assert attr in deserialized_output[comp_idx].dtype.names assert_almost_equal( deserialized_output[comp_idx][attr], - serialized_input[comp_idx][attr], + input_entry[attr], ) else: assert component in serialized_dataset["attributes"] @@ -533,13 +533,13 @@ def assert_individual_data_entry(serialized_dataset, data_filter, component, ser assert attr in deserialized_output assert_almost_equal( deserialized_output[attr][comp_idx], - serialized_input[comp_idx][attr_idx], + input_entry[attr_idx], ) else: assert attr in deserialized_output[comp_idx].dtype.names assert_almost_equal( deserialized_output[comp_idx][attr], - serialized_input[comp_idx][attr_idx], + input_entry[attr_idx], ) @@ -562,6 +562,7 @@ def assert_batch_dataset_structure( ): """Checks if the structure of the batch dataset is correct. Then splits into individual scenario's dataset and checks if all of them are correct.""" + batch_ndim = 2 # Check structure of the whole BatchDataset assert isinstance(serialized_dataset["data"], list) @@ -581,17 +582,16 @@ def assert_batch_dataset_structure( assert isinstance(component_data, np.ndarray) assert component_data.ndim == 1 assert len(component_data) == component_indptr[-1] + elif is_columnar_filter(data_filter, component): + for attr, attr_value in component_values.items(): + assert isinstance(attr, str) + assert isinstance(attr_value, np.ndarray) + assert len(attr_value.shape) in [2, 3] + assert len(attr_value) == len(serialized_dataset["data"]) else: - if is_columnar_filter(data_filter, component): - for attr, attr_value in component_values.items(): - assert isinstance(attr, str) - assert isinstance(attr_value, np.ndarray) - assert len(attr_value.shape) in [2, 3] - assert len(attr_value) == len(serialized_dataset["data"]) - else: - assert isinstance(component_values, np.ndarray) - assert len(component_values.shape) == 2 - assert len(component_values) == len(serialized_dataset["data"]) + assert isinstance(component_values, np.ndarray) + assert len(component_values.shape) == batch_ndim + assert len(component_values) == len(serialized_dataset["data"]) def assert_serialization_correct(deserialized_dataset: Dataset, serialized_dataset: Mapping[str, Any], data_filter): diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 8eb7f37da..14023086b 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -58,7 +58,9 @@ def test_get_data_set_batch_size(): batch_data = {"line": line, "asym_load": asym_load} - assert get_dataset_batch_size(batch_data) == 3 + n_batch_size = 3 + + assert get_dataset_batch_size(batch_data) == n_batch_size def test_get_dataset_batch_size_sparse(): @@ -77,7 +79,9 @@ def test_get_dataset_batch_size_sparse(): }, } - assert get_dataset_batch_size(data) == 3 + n_batch_size = 3 + + assert get_dataset_batch_size(data) == n_batch_size def test_get_dataset_batch_size_mixed(): @@ -117,8 +121,11 @@ def test_get_component_batch_size(): "data": np.zeros(shape=2, dtype=power_grid_meta_data["input"]["sym_load"]), "indptr": np.array([0, 0, 1, 2]), } - assert get_component_batch_size(asym_load) == 3 - assert get_component_batch_size(sym_load) == 3 + + asym_load_batch_size = 3 + sym_load_batch_size = 3 + assert get_component_batch_size(asym_load) == asym_load_batch_size + assert get_component_batch_size(sym_load) == sym_load_batch_size @patch("builtins.open", new_callable=mock_open) @@ -189,6 +196,10 @@ def test__make_test_case( output_data: Dataset = {"version": "1.0", "data": "output_data"} output_path = Path("test_path") params = {"param1": "value1", "param2": "value2"} + write_update_call_count = 5 + serialize_update_call_count = 3 + write_call_count = 4 + serialize_call_count = 2 _make_test_case( output_path=output_path, @@ -213,8 +224,8 @@ def test__make_test_case( serialize_to_file_mock.assert_any_call( file_path=output_path / "update_batch.json", data=update_data, dataset_type=DatasetType.update ) - assert write_text_mock.call_count == 5 - assert serialize_to_file_mock.call_count == 3 + assert write_text_mock.call_count == write_update_call_count + assert serialize_to_file_mock.call_count == serialize_update_call_count else: - assert write_text_mock.call_count == 4 - assert serialize_to_file_mock.call_count == 2 + assert write_text_mock.call_count == write_call_count + assert serialize_to_file_mock.call_count == serialize_call_count diff --git a/tests/unit/utils.py b/tests/unit/utils.py index f26801230..19fefcbee 100644 --- a/tests/unit/utils.py +++ b/tests/unit/utils.py @@ -228,6 +228,7 @@ def save_json_data(json_file: str, data: Dataset): def compare_result(actual: SingleDataset, expected: SingleDataset, rtol: float, atol: float | dict[str, float]): + actual_col_ndim = 2 for key, expected_data in expected.items(): if not isinstance(expected_data, np.ndarray): raise NotImplementedError("Validation tests are not implemented for columnar data") @@ -246,7 +247,7 @@ def compare_result(actual: SingleDataset, expected: SingleDataset, rtol: float, if not expect_all_nan: # permute expected_col if needed - if expected_col.ndim == 1 and actual_col.ndim == 2: + if expected_col.ndim == 1 and actual_col.ndim == actual_col_ndim: if col_name == "u_angle": # should be 120 and 240 degree lagging expected_col = np.stack( diff --git a/tests/unit/validation/test_batch_validation.py b/tests/unit/validation/test_batch_validation.py index 0355ec54f..33f807018 100644 --- a/tests/unit/validation/test_batch_validation.py +++ b/tests/unit/validation/test_batch_validation.py @@ -128,7 +128,8 @@ def test_validate_batch_data_input_error(input_data, batch_data): input_data["node"][-1]["id"] = 123 input_data["line"][-1]["id"] = 123 errors = validate_batch_data(input_data, batch_data) - assert len(errors) == 3 + n_input_validation_errors = 3 + assert len(errors) == n_input_validation_errors assert [MultiComponentNotUniqueError([("line", "id"), ("node", "id")], [("line", 123), ("node", 123)])] == errors[0] assert [MultiComponentNotUniqueError([("line", "id"), ("node", "id")], [("line", 123), ("node", 123)])] == errors[1] assert [MultiComponentNotUniqueError([("line", "id"), ("node", "id")], [("line", 123), ("node", 123)])] == errors[2] @@ -137,7 +138,8 @@ def test_validate_batch_data_input_error(input_data, batch_data): def test_validate_batch_data_update_error(input_data, batch_data): batch_data["line"]["from_status"] = np.array([[12, 34], [0, -128], [56, 78]]) errors = validate_batch_data(input_data, batch_data) - assert len(errors) == 2 + n_update_validation_errors = 2 + assert len(errors) == n_update_validation_errors assert 1 not in errors assert len(errors[0]) == 1 assert len(errors[2]) == 1 diff --git a/tests/unit/validation/test_errors.py b/tests/unit/validation/test_errors.py index bb76d64bc..8d4ef36ea 100644 --- a/tests/unit/validation/test_errors.py +++ b/tests/unit/validation/test_errors.py @@ -122,7 +122,8 @@ def test_comparison_error(): def test_error_context(): error = ComparisonError(component=ComponentType.node, field="tango", ids=[1, 2, 3], ref_value=0) context = error.get_context() - assert len(context) == 4 + expected_context_keys = 4 + assert len(context) == expected_context_keys assert context["component"] == "node" assert context["field"] == "'tango'" assert context["ids"] == [1, 2, 3] diff --git a/tests/unit/validation/test_rules.py b/tests/unit/validation/test_rules.py index 079a8e2aa..e25487bdc 100644 --- a/tests/unit/validation/test_rules.py +++ b/tests/unit/validation/test_rules.py @@ -478,7 +478,8 @@ def test_all_finite(): "bar_test": np.array([(4, 0.4), (5, 0.5), (6, -np.inf)], dtype=dbar), } errors = all_finite(invalid) - assert len(errors) == 2 + n_infinity_errors = 2 + assert len(errors) == n_infinity_errors assert InfinityError("foo_test", "foo", [2]) in errors assert InfinityError("bar_test", "bar", [6]) in errors errors = all_finite(invalid, {"foo_test": ["foo"]}) diff --git a/tests/unit/validation/test_validation_functions.py b/tests/unit/validation/test_validation_functions.py index 10876cd67..c3a58a948 100644 --- a/tests/unit/validation/test_validation_functions.py +++ b/tests/unit/validation/test_validation_functions.py @@ -115,7 +115,8 @@ def test_validate_unique_ids_across_components(): ) in unique_id_errors ) - assert len(unique_id_errors[0].ids) == 4 + n_non_unique_ids = 4 + assert len(unique_id_errors[0].ids) == n_non_unique_ids def test_validate_ids(): @@ -166,7 +167,8 @@ def test_validate_ids(): dataset_type=DatasetType.update, ) invalid_ids = validate_ids(update_data_col_less_no_id, input_data) - assert len(invalid_ids) == 2 + n_invalid_ids_update_source_without_id = 2 + assert len(invalid_ids) == n_invalid_ids_update_source_without_id assert IdNotInDatasetError("sym_load", [7], "update_data") in invalid_ids source_update_part_nan_id = initialize_array("update", "source", 3) @@ -179,7 +181,8 @@ def test_validate_ids(): dataset_type=DatasetType.update, ) invalid_ids = validate_ids(update_data_col_part_nan_id, input_data) - assert len(invalid_ids) == 2 + n_invalid_ids_source_update = 2 + assert len(invalid_ids) == n_invalid_ids_source_update assert IdNotInDatasetError("sym_load", [7], "update_data") in invalid_ids @@ -704,7 +707,7 @@ def single_component_twice_data(): for error in all_errors: assert any(isinstance(error, error_type) for error_type in [InvalidIdError, PQSigmaPairError]) if isinstance(error, PQSigmaPairError): - assert error.ids[0] == 789 + assert error.ids[0] == data["sym_power_sensor"]["id"][1] @pytest.mark.parametrize("measured_terminal_type", MeasuredTerminalType) @@ -1085,7 +1088,8 @@ def test_power_sigma_or_p_q_sigma(): bad_sym_power_sensor["p_sigma"] = [np.nan, np.nan, 1e4] bad_sym_power_sensor["q_sigma"] = [np.nan, 1e9, np.nan] errors = validate_input_data(input_data=bad_input_data, calculation_type=CalculationType.state_estimation) - assert len(errors) == 2 + n_sym_input_validation_errors = 2 + assert len(errors) == n_sym_input_validation_errors assert errors == [ MissingValueError("sym_power_sensor", "power_sigma", [6]), PQSigmaPairError("sym_power_sensor", ("p_sigma", "q_sigma"), [7, 8]), @@ -1112,7 +1116,8 @@ def test_power_sigma_or_p_q_sigma(): [np.nan, 1e4, 1e4], ] errors = validate_input_data(input_data=bad_input_data, calculation_type=CalculationType.state_estimation) - assert len(errors) == 2 + n_asym_input_validation_errors = 2 + assert len(errors) == n_asym_input_validation_errors assert errors == [ MissingValueError("asym_power_sensor", "power_sigma", [66]), PQSigmaPairError("asym_power_sensor", ("p_sigma", "q_sigma"), [77, 88, 99]), @@ -1253,7 +1258,9 @@ def test_validate_values__tap_regulator_control_side(): assert power_flow_errors == all_errors assert not state_estimation_errors - assert len(all_errors) == 3 + n_input_validation_errors = 3 + + assert len(all_errors) == n_input_validation_errors assert ( InvalidEnumValueError("transformer_tap_regulator", "control_side", [10, 13], [BranchSide, Branch3Side]) in all_errors