diff --git a/code_generation/code_gen.py b/code_generation/code_gen.py index dbbef5c55..fe12add72 100644 --- a/code_generation/code_gen.py +++ b/code_generation/code_gen.py @@ -102,10 +102,7 @@ def render_dataset_class_maps(self, template_path: Path, data_path: Path, output # create list all_map = {} for dataset in dataset_meta_data: - if dataset.is_template: - prefixes = ["sym_", "asym_"] - else: - prefixes = [""] + prefixes = ["sym_", "asym_"] if dataset.is_template else [""] for prefix in prefixes: all_components = {} for component in dataset.components: diff --git a/code_generation/templates/src/power_grid_model/_core/dataset_class_maps.py.jinja b/code_generation/templates/src/power_grid_model/_core/dataset_class_maps.py.jinja index bf918be49..61e8895f3 100644 --- a/code_generation/templates/src/power_grid_model/_core/dataset_class_maps.py.jinja +++ b/code_generation/templates/src/power_grid_model/_core/dataset_class_maps.py.jinja @@ -25,7 +25,7 @@ class _MetaEnum(EnumMeta): Returns: bool: True if the member is part of the Enum, False otherwise. """ - return member in cls.__members__.keys() + return member in cls.__members__ class DatasetType(str, Enum, metaclass=_MetaEnum): diff --git a/pyproject.toml b/pyproject.toml index 0dc74d61d..c8bcd5a4c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -113,8 +113,8 @@ select = [ "FURB", "FLY", "SLOT", + "NPY", ] -ignore = ["SIM108", "SIM118", "SIM110", "SIM211"] [tool.ruff.lint.isort] # Imports that are imported using keyword "as" and are from the same source - are combined. diff --git a/setup.py b/setup.py index aa2f89442..d8ddeba8d 100644 --- a/setup.py +++ b/setup.py @@ -76,10 +76,7 @@ def get_tag(self): class MyBuildExt(build_ext): def build_extensions(self): if not if_win: - if "CXX" in os.environ: - cxx = os.environ["CXX"] - else: - cxx = self.compiler.compiler_cxx[0] + cxx = os.environ.get("CXX", self.compiler.compiler_cxx[0]) # check setuptools has an update change in the version 72.2 about cxx compiler options # to be compatible with both version, we check if compiler_so_cxx exists if not hasattr(self.compiler, "compiler_so_cxx"): @@ -93,10 +90,7 @@ def build_extensions(self): linker_so_cxx[0] = cxx self.compiler.compiler_cxx = [cxx] # add link time optimization - if "clang" in cxx: - lto_flag = "-flto=thin" - else: - lto_flag = "-flto" + lto_flag = "-flto=thin" if "clang" in cxx else "-flto" compiler_so_cxx += [lto_flag] linker_so_cxx += [lto_flag] # remove debug and optimization flags diff --git a/src/power_grid_model/_core/dataset_definitions.py b/src/power_grid_model/_core/dataset_definitions.py index 4d99182da..6cf3a025f 100644 --- a/src/power_grid_model/_core/dataset_definitions.py +++ b/src/power_grid_model/_core/dataset_definitions.py @@ -22,7 +22,7 @@ def __contains__(cls, member): Returns: bool: True if the member is part of the Enum, False otherwise. """ - return member in cls.__members__.keys() + return member in cls.__members__ class DatasetType(str, Enum, metaclass=_MetaEnum): diff --git a/src/power_grid_model/_core/power_grid_core.py b/src/power_grid_model/_core/power_grid_core.py index c6e29ef3a..a2bd6c5e0 100644 --- a/src/power_grid_model/_core/power_grid_core.py +++ b/src/power_grid_model/_core/power_grid_core.py @@ -126,10 +126,7 @@ def _load_core() -> CDLL: """ # first try to find the DLL local - if platform.system() == "Windows": - dll_file = "_power_grid_core.dll" - else: - dll_file = "_power_grid_core.so" + dll_file = "_power_grid_core.dll" if platform.system() == "Windows" else "_power_grid_core.so" dll_path = Path(__file__).parent / dll_file # if local DLL is not found, try to find the DLL from conda environment @@ -192,10 +189,7 @@ def make_c_binding(func: Callable): # binding function def cbind_func(self, *args, **kwargs): - if "destroy" in name: - c_inputs = [] - else: - c_inputs = [self._handle] + c_inputs = [] if "destroy" in name else [self._handle] args = chain(args, (kwargs[key] for key in py_argnames[len(args) :])) for arg in args: if isinstance(arg, str): diff --git a/src/power_grid_model/_core/power_grid_model.py b/src/power_grid_model/_core/power_grid_model.py index 48671b9ea..04f6043ec 100644 --- a/src/power_grid_model/_core/power_grid_model.py +++ b/src/power_grid_model/_core/power_grid_model.py @@ -189,10 +189,7 @@ def _get_output_component_count(self, calculation_type: CalculationType): }.get(calculation_type, []) def include_type(component_type: ComponentType): - for exclude_type in exclude_types: - if exclude_type.value in component_type.value: - return False - return True + return all(exclude_type.value not in component_type.value for exclude_type in exclude_types) return {ComponentType[k]: v for k, v in self.all_component_count.items() if include_type(k)} diff --git a/src/power_grid_model/_core/utils.py b/src/power_grid_model/_core/utils.py index 7bca7a806..68e0d9c89 100644 --- a/src/power_grid_model/_core/utils.py +++ b/src/power_grid_model/_core/utils.py @@ -651,10 +651,12 @@ def _extract_columnar_data( """ not_columnar_data_message = "Expected columnar data" - if is_batch is not None: - allowed_dims = [2, 3] if is_batch else [1, 2] - else: + if is_batch is None: allowed_dims = [1, 2, 3] + elif is_batch: + allowed_dims = [2, 3] + else: + allowed_dims = [1, 2] sub_data = data["data"] if is_sparse(data) else data @@ -683,10 +685,12 @@ def _extract_row_based_data( Returns: SingleArray | DenseBatchArray: the contents of row based data """ - if is_batch is not None: - allowed_dims = [2] if is_batch else [1] - else: + if is_batch is None: allowed_dims = [1, 2] + elif is_batch: + allowed_dims = [2] + else: + allowed_dims = [1] sub_data = data["data"] if is_sparse(data) else data diff --git a/src/power_grid_model/validation/_rules.py b/src/power_grid_model/validation/_rules.py index 55a6e4998..ba84c9e75 100644 --- a/src/power_grid_model/validation/_rules.py +++ b/src/power_grid_model/validation/_rules.py @@ -892,10 +892,7 @@ def none_missing(data: SingleDataset, component: ComponentType, fields: str | li fields = [fields] for field in fields: nan = _nan_type(component, field) - if np.isnan(nan): - invalid = np.isnan(data[component][field]) - else: - invalid = np.equal(data[component][field], nan) + invalid = np.isnan(data[component][field]) if np.isnan(nan) else np.equal(data[component][field], nan) if invalid.any(): # handle both symmetric and asymmetric values diff --git a/src/power_grid_model/validation/utils.py b/src/power_grid_model/validation/utils.py index 6cb9a43bd..dc89ea27a 100644 --- a/src/power_grid_model/validation/utils.py +++ b/src/power_grid_model/validation/utils.py @@ -101,7 +101,7 @@ def _update_input_data(input_data: SingleDataset, update_data: SingleDataset): """ merged_data = {component: array.copy() for component, array in input_data.items()} - for component in update_data.keys(): + for component in update_data: _update_component_data(component, merged_data[component], update_data[component]) return merged_data @@ -140,10 +140,7 @@ def _update_component_array_data( if field == "id": continue nan = _nan_type(component, field, DatasetType.update) - if np.isnan(nan): - mask = ~np.isnan(update_data[field]) - else: - mask = np.not_equal(update_data[field], nan) + mask = ~np.isnan(update_data[field]) if np.isnan(nan) else np.not_equal(update_data[field], nan) if mask.ndim == 2: for phase in range(mask.shape[1]): diff --git a/tests/unit/test_0Z_model_validation.py b/tests/unit/test_0Z_model_validation.py index 2f88d7b6f..bdd5813f4 100644 --- a/tests/unit/test_0Z_model_validation.py +++ b/tests/unit/test_0Z_model_validation.py @@ -122,7 +122,8 @@ def test_single_validation( # test get indexer for component_name, input_array in case_data["input"].items(): ids_array = input_array["id"].copy() - np.random.shuffle(ids_array) + rng = np.random.default_rng(3) + rng.shuffle(ids_array) indexer_array = model.get_indexer(component_name, ids_array) # check assert np.all(input_array["id"][indexer_array] == ids_array) diff --git a/tests/unit/test_data_handling.py b/tests/unit/test_data_handling.py index 5b5d260f5..3e07e610a 100644 --- a/tests/unit/test_data_handling.py +++ b/tests/unit/test_data_handling.py @@ -123,7 +123,7 @@ def test_create_output_data(output_component_types, expected_fns, batch_size): output_component_types=output_component_types, output_type=DT.sym_output, all_component_count=all_component_count, - is_batch=False if batch_size == 1 else True, + is_batch=batch_size != 1, batch_size=batch_size, ) diff --git a/tests/unit/test_internal_utils.py b/tests/unit/test_internal_utils.py index b3e80fe8b..de1196f52 100644 --- a/tests/unit/test_internal_utils.py +++ b/tests/unit/test_internal_utils.py @@ -855,7 +855,7 @@ def row_data(request): def compare_row_data(actual_row_data, desired_row_data): assert actual_row_data.keys() == desired_row_data.keys() - for comp_name in actual_row_data.keys(): + for comp_name in actual_row_data: actual_component = actual_row_data[comp_name] desired_component = desired_row_data[comp_name] if is_sparse(actual_component): diff --git a/tests/unit/test_meta_data.py b/tests/unit/test_meta_data.py index ea066e8dd..0239268d1 100644 --- a/tests/unit/test_meta_data.py +++ b/tests/unit/test_meta_data.py @@ -59,15 +59,9 @@ def test_sensor_meta_data(): assert "id" in attr_names # check specific attributes if "voltage" in sensor: - if "output" in meta_type: - expected_attrs = output_voltage - else: - expected_attrs = input_voltage + expected_attrs = output_voltage if "output" in meta_type else input_voltage else: - if "output" in meta_type: - expected_attrs = output_power - else: - expected_attrs = input_power + expected_attrs = output_power if "output" in meta_type else input_power for name in expected_attrs: assert name in attr_names diff --git a/tests/unit/test_serialization.py b/tests/unit/test_serialization.py index 192c12fbf..dcc7d91b3 100644 --- a/tests/unit/test_serialization.py +++ b/tests/unit/test_serialization.py @@ -512,7 +512,7 @@ def assert_individual_data_entry(serialized_dataset, data_filter, component, ser if is_attribute_filtered_out(data_filter, component, attr): assert attr not in deserialized_output continue - assert attr in deserialized_output.keys() + assert attr in deserialized_output assert_almost_equal( deserialized_output[attr][comp_idx], serialized_input[comp_idx][attr], @@ -530,7 +530,7 @@ def assert_individual_data_entry(serialized_dataset, data_filter, component, ser if is_attribute_filtered_out(data_filter, component, attr): assert attr not in deserialized_output continue - assert attr in deserialized_output.keys() + assert attr in deserialized_output assert_almost_equal( deserialized_output[attr][comp_idx], serialized_input[comp_idx][attr_idx], diff --git a/tests/unit/utils.py b/tests/unit/utils.py index afab0e4f0..f26801230 100644 --- a/tests/unit/utils.py +++ b/tests/unit/utils.py @@ -180,10 +180,7 @@ def _add_cases(case_dir: Path, calculation_type: str, **kwargs): def pytest_cases(get_batch_cases: bool = False, data_dir: str | None = None, test_cases: list[str] | None = None): - if data_dir is not None: - relevant_calculations = [data_dir] - else: - relevant_calculations = ["power_flow", "state_estimation", "short_circuit"] + relevant_calculations = [data_dir] if data_dir is not None else ["power_flow", "state_estimation", "short_circuit"] for calculation_type in relevant_calculations: test_case_paths = get_test_case_paths(calculation_type=calculation_type, test_cases=test_cases)