Skip to content

chore: remove pylint #359

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Jul 25, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 0 additions & 27 deletions .github/workflows/lint.yml

This file was deleted.

5 changes: 5 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,11 @@ Then to run on all files in the repository:
$ pre-commit run -a
```

Pre-commit can be configured to automatically run on every `git commit` with:
```
$ pre-commit install
```

## Documentation

Documentation is hosted here: https://ai-sdc.github.io/SACRO-ML/
Expand Down
4 changes: 3 additions & 1 deletion examples/user_stories/generate_disclosure_risk_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@
user_story = config["user_story"]
if user_story == "UNDEFINED":
print(
"User story not selected, please select a user story by referring to user_stories_flow_chart.png and adding the relevant number to the the first line of 'default_config.yaml'"
"User story not selected, please select a user story by "
"referring to user_stories_flow_chart.png and adding the "
"relevant number to the the first line of 'default_config.yaml'"
)
elif user_story == 1:
user_story_1_tre.run_user_story(config)
Expand Down
3 changes: 2 additions & 1 deletion examples/user_stories/user_story_1/user_story_1_tre.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,8 @@ def run_user_story(release_config: dict):
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=(
"Generate a risk report after request_release() has been called by researcher"
"Generate a risk report after request_release() "
"has been called by researcher"
)
)

Expand Down
3 changes: 2 additions & 1 deletion examples/user_stories/user_story_2/user_story_2_tre.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,8 @@ def generate_report(
print()
print("Acting as TRE...")
print(
"(when instructions on how to recreate the dataset have been provided by the researcher)"
"(when instructions on how to recreate the dataset have "
"been provided by the researcher)"
)
print(directory)
print()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def run_user_story():

X_train, X_test, y_train, y_test = train_test_split(
feature_dataframe.values,
target_dataframe.values.flatten(),
target_dataframe.to_numpy().flatten(),
test_size=0.7,
random_state=42,
)
Expand Down
14 changes: 8 additions & 6 deletions examples/user_stories/user_story_8/data_processing_researcher.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
"""SUPPORTING FILE FOR USER STORY 2.

This file is an example of a function created by a researcher that will pre-process a dataset
This file is an example of a function created by a researcher that will
pre-process a dataset

To use: write a function that will process your input data, and output the processed version
To use: write a function that will process your input data, and output the
processed version

NOTE: in order to work, this function needs to:

Expand Down Expand Up @@ -33,17 +35,17 @@ def process_dataset(data):

row_indices = np.arange(np.shape(X_transformed)[0])

# This step is not necessary, however it's the simplest way of getting training indices from
# the data
# Any method of generating indices of samples to be used for training will work here
# This step is not necessary, however it's the simplest way of getting
# training indices from the data. Any method of generating indices of
# samples to be used for training will work here.
(
X_train,
X_test,
y_train,
y_test,
train_indices,
test_indices,
) = train_test_split( # pylint: disable=unused-variable
) = train_test_split(
X_transformed,
y_transformed,
row_indices,
Expand Down
47 changes: 8 additions & 39 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -84,40 +84,6 @@ packages = {find = {exclude = ["docs*", "examples*", "tests*", "user_stories*"]}
[tool.setuptools.package-data]
"sacroml.safemodel" = ["rules.json"]

[tool.pylint]
master.py-version = "3.9"
reports.output-format = "colorized"
disable = [
"invalid-name",
"no-member",
"duplicate-code",
"consider-using-min-builtin",
]

[tool.pylint.messages_control]
enable = [
"useless-suppression",
]

disable = [
"R0917", # too-many-positional-arguments
]

[tool.pylint.design]
max-args = 5 # Maximum number of arguments for function / method (see R0913).
max-attributes = 7 # Maximum number of attributes for a class (see R0902).
max-branches = 12 # Maximum number of branch for function / method body (see R0912).
max-locals = 15 # Maximum number of parents for a class (see R0914).
max-parents = 7 # Maximum number of parents for a class (see R0901).
max-public-methods = 20 # Maximum number of public methods for a class (see R0904).
max-returns = 6 # Max number of return / yield for function / method body (see R0911).
max-statements = 50 # Max number of statements in function / method body (see R0915).
min-public-methods = 2 # Minimum number of public methods for a class (see R0903).

[tool.pylint.format]
max-line-length = 100 # Maximum number of characters on a single line.
max-module-lines = 1000 # Maximum number of lines in a module.

[tool.ruff]
indent-width = 4
line-length = 88
Expand All @@ -129,17 +95,17 @@ lint.select = [
"ARG", # flake8-unused-arguments
"B", # flake8-bugbear
"C4", # flake8-comprehensions
# "C90", # mccabe
"C90", # mccabe
"D", # pydocstyle
# "DTZ", # flake8-datetimez
# "E", # pycodestyle
"E", # pycodestyle
"EM", # flake8-errmsg
"ERA", # eradicate
"F", # Pyflakes
"I", # isort
"ICN", # flake8-import-conventions
"N", # pep8-naming
# "PD", # pandas-vet
"PD", # pandas-vet
"PGH", # pygrep-hooks
"PIE", # flake8-pie
# "PL", # Pylint
Expand All @@ -151,7 +117,7 @@ lint.select = [
"Q", # flake8-quotes
"RET", # flake8-return
"RUF100", # Ruff-specific
# "S", # flake8-bandit
"S", # flake8-bandit
"SIM", # flake8-simplify
# "T20", # flake8-print
"TID", # flake8-tidy-imports
Expand All @@ -173,8 +139,10 @@ exclude = [
lint.ignore = [
"EM101", # raw-string-in-exception
"EM102", # f-string-in-exception
"N818", # error-suffix-on-exception-name
"PLC0206", # dict-index-missing-items
"S101", # allow asserts
"S301", # allow pickle
"PD901", # allow df for dataframe names
]

[tool.ruff.lint.pep8-naming]
Expand All @@ -196,6 +164,7 @@ docstring-code-line-length = 80
"user_stories/**/*" = ["ANN"]
"tests/**/*" = ["S101", "PLR2004", "ANN"]
"sacroml/attacks/structural_attack.py" = ["PLR2004"]
"sacroml/safemodel/classifiers/new_model_template.py" = ["C901"]

[tool.codespell]
ignore-words-list = [
Expand Down
2 changes: 1 addition & 1 deletion sacroml/attacks/attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def _make_report(self, target: Target) -> dict:
logger.info("Generating report")
self._construct_metadata()

if not target.model is None:
if target.model is not None:
self.metadata["target_model"] = target.model.model_name
self.metadata["target_model_params"] = target.model.model_params
self.metadata["target_train_params"] = target.model.train_params
Expand Down
4 changes: 2 additions & 2 deletions sacroml/attacks/attack_report_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def __str__(self) -> str:
raise NotImplementedError()


class FinalRecommendationModule(AnalysisModule): # pylint: disable=too-many-instance-attributes
class FinalRecommendationModule(AnalysisModule):
"""Generate the first layer of a recommendation report."""

def __init__(self, report: dict) -> None:
Expand Down Expand Up @@ -565,7 +565,7 @@ def process_attack_target_json(

self.text_out.append(bucket_text)

def export_to_file( # pylint: disable=too-many-arguments
def export_to_file(
self,
output_filename: str = "summary.txt",
move_files: bool = False,
Expand Down
14 changes: 5 additions & 9 deletions sacroml/attacks/attribute_attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def _get_unique_features(
return np.unique(combined_feature)


def _get_inference_data( # pylint: disable=too-many-locals
def _get_inference_data(
target: Target, feature_id: int, memberset: bool
) -> tuple[np.ndarray, np.ndarray, float]:
"""Return a dataset of each sample with the attributes to test."""
Expand Down Expand Up @@ -212,7 +212,7 @@ def _get_inference_data( # pylint: disable=too-many-locals
return x_values, y_values, baseline


def _infer( # pylint: disable=too-many-locals
def _infer(
target: Target,
feature_id: int,
threshold: float,
Expand Down Expand Up @@ -329,9 +329,7 @@ def plot_quantitative_risk(res: dict, path: str = "") -> None:
logger.debug("Saved quantitative risk plot: %s", filename)


def plot_categorical_risk( # pylint: disable=too-many-locals
res: dict, path: str = ""
) -> None:
def plot_categorical_risk(res: dict, path: str = "") -> None:
"""Generate a bar chart showing categorical risk scores.

Parameters
Expand Down Expand Up @@ -376,9 +374,7 @@ def plot_categorical_risk( # pylint: disable=too-many-locals
logger.debug("Saved categorical risk plot: %s", filename)


def plot_categorical_fraction( # pylint: disable=too-many-locals
res: dict, path: str = ""
) -> None:
def plot_categorical_fraction(res: dict, path: str = "") -> None:
"""Generate a bar chart showing fraction of dataset inferred.

Parameters
Expand Down Expand Up @@ -472,7 +468,7 @@ def _attack_brute_force(
]


def _get_bounds_risk_for_sample( # pylint: disable=too-many-locals,too-many-arguments
def _get_bounds_risk_for_sample(
target_model: Model,
feat_id: int,
feat_min: float,
Expand Down
2 changes: 1 addition & 1 deletion sacroml/attacks/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from torch.utils.data import DataLoader, Dataset


class BaseDataHandler(ABC): # pylint: disable=too-few-public-methods
class BaseDataHandler(ABC):
"""Base data handling interface."""

@abstractmethod
Expand Down
4 changes: 2 additions & 2 deletions sacroml/attacks/likelihood_attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,9 +269,9 @@ def _compute_scores( # pylint: disable=too-many-locals
global_out_std: float = self._get_global_std(out_conf)

# score each record in the member and non-member sets
for i, l in enumerate(combined_y_train):
for i, y in enumerate(combined_y_train):
# get the target model behaviour on the record (handle one-hot or label)
label = np.argmax(l) if combined_y_train.ndim > 1 else l
label = np.argmax(y) if combined_y_train.ndim > 1 else y
target_logit: float = _logit(combined_target_preds[i, label])
# get behaviour observed with the record as a non-member
out_mean, out_std = self._describe_conf(out_conf[i], global_out_std)
Expand Down
6 changes: 3 additions & 3 deletions sacroml/attacks/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@
import numpy as np


class Model(ABC): # pylint: disable=too-many-instance-attributes
class Model(ABC):
"""Interface to a target model."""

def __init__( # pylint: disable=too-many-arguments
def __init__(
self,
model: Any,
model_path: str = "",
Expand Down Expand Up @@ -203,7 +203,7 @@ def save(self, path: str) -> None:

@classmethod
@abstractmethod
def load( # pylint: disable=too-many-arguments
def load(
cls,
model_path: str,
model_module_path: str,
Expand Down
4 changes: 2 additions & 2 deletions sacroml/attacks/model_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
class PytorchModel(Model):
"""Interface to Pytorch models."""

def __init__( # pylint: disable=too-many-arguments
def __init__(
self,
model: torch.nn.Module,
model_path: str = "",
Expand Down Expand Up @@ -281,7 +281,7 @@ def save(self, path: str) -> None:
torch.save(self.model.state_dict(), path)

@classmethod
def load( # pylint: disable=too-many-arguments
def load(
cls,
model_path: str,
model_module_path: str,
Expand Down
4 changes: 2 additions & 2 deletions sacroml/attacks/model_sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
class SklearnModel(Model):
"""Interface to sklearn.base.BaseEstimator models."""

def __init__( # pylint: disable=too-many-arguments
def __init__(
self,
model: sklearn.base.BaseEstimator,
model_path: str = "",
Expand Down Expand Up @@ -217,7 +217,7 @@ def save(self, path: str) -> None:
raise ValueError(f"Unsupported file format for saving a model: {ext}")

@classmethod
def load( # pylint: disable=too-many-arguments
def load(
cls,
model_path: str,
model_module_path: str,
Expand Down
4 changes: 2 additions & 2 deletions sacroml/attacks/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def title(
pdf.ln(h=5)


def subtitle( # pylint: disable = too-many-arguments
def subtitle(
pdf: FPDF,
text: str,
indent: int = 10,
Expand All @@ -186,7 +186,7 @@ def subtitle( # pylint: disable = too-many-arguments
pdf.cell(75, 10, text, border, 1)


def line( # pylint: disable = too-many-arguments
def line(
pdf: FPDF,
text: str,
indent: int = 0,
Expand Down
4 changes: 0 additions & 4 deletions sacroml/attacks/structural_attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# pylint: disable=chained-comparison


def get_unnecessary_risk(model: BaseEstimator) -> bool:
"""Check whether model hyperparameters are in the top 20% most risky.
Expand Down Expand Up @@ -226,8 +224,6 @@ def _get_model_param_count_mlp(model: MLPClassifier) -> int:
class StructuralAttack(Attack):
"""Structural attacks based on the static structure of a model."""

# pylint: disable=too-many-instance-attributes

def __init__(
self,
output_dir: str = "outputs",
Expand Down
Loading
Loading