diff --git a/.github/workflows/format-typing-check.yml b/.github/workflows/format-typing-check.yml index a4128a15..c5a97d4b 100644 --- a/.github/workflows/format-typing-check.yml +++ b/.github/workflows/format-typing-check.yml @@ -37,8 +37,8 @@ jobs: - name: Install ruff and mypy run: | pip install ruff mypy typing_extensions \ - types-Deprecated types-beautifulsoup4 types-jsonschema \ - types-networkx types-tabulate types-PyYAML pandas-stubs + types-Deprecated types-beautifulsoup4 types-jsonschema types-requests \ + types-networkx types-tabulate types-PyYAML pandas-stubs - name: Get all changed python files id: changed-python-files uses: tj-actions/changed-files@v44 diff --git a/pyproject.toml b/pyproject.toml index 879895f0..8fd371b7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,6 +58,7 @@ dev = [ "mypy", "typing_extensions", # stub packages. Update the `format-typing-check.yml` too if you add more. + "types-requests", "types-beautifulsoup4", "types-jsonschema", "types-networkx", diff --git a/src/nplinker/genomics/antismash/__init__.py b/src/nplinker/genomics/antismash/__init__.py index e126f548..7c9179a0 100644 --- a/src/nplinker/genomics/antismash/__init__.py +++ b/src/nplinker/genomics/antismash/__init__.py @@ -1,16 +1,26 @@ -from .antismash_downloader import download_and_extract_antismash_data +from .antismash_api_client import antismash_job_is_done +from .antismash_api_client import submit_antismash_job +from .antismash_downloader import download_and_extract_from_antismash_api +from .antismash_downloader import download_and_extract_from_antismash_db +from .antismash_downloader import extract_antismash_data from .antismash_loader import AntismashBGCLoader from .antismash_loader import parse_bgc_genbank +from .ncbi_downloader import download_and_extract_ncbi_genome from .podp_antismash_downloader import GenomeStatus from .podp_antismash_downloader import get_best_available_genome_id from .podp_antismash_downloader import podp_download_and_extract_antismash_data __all__ = [ - "download_and_extract_antismash_data", + "extract_antismash_data", + "download_and_extract_from_antismash_api", + "download_and_extract_from_antismash_db", "AntismashBGCLoader", "parse_bgc_genbank", "GenomeStatus", "get_best_available_genome_id", "podp_download_and_extract_antismash_data", + "download_and_extract_ncbi_genome", + "submit_antismash_job", + "antismash_job_is_done", ] diff --git a/src/nplinker/genomics/antismash/antismash_api_client.py b/src/nplinker/genomics/antismash/antismash_api_client.py new file mode 100644 index 00000000..124aedcb --- /dev/null +++ b/src/nplinker/genomics/antismash/antismash_api_client.py @@ -0,0 +1,81 @@ +from __future__ import annotations +import logging +from os import PathLike +from pathlib import Path +import requests + + +logger = logging.getLogger(__name__) + + +def submit_antismash_job(genbank_filepath: str | PathLike) -> str: + """Submits an antiSMASH job using the provided GenBank file. + + This function sends a GenBank file to the antiSMASH API + and retrieves the job ID if the submission is successful. + + Args: + genbank_filepath (str | PathLike): The path to the GenBank file to be submitted. + + Returns: + str: The job ID of the submitted antiSMASH job. + + Raises: + requests.exceptions.RequestException: If there is an issue with the HTTP request. + RuntimeError: If the API response does not contain a job ID. + """ + url = "https://antismash.secondarymetabolites.org/api/v1.0/submit" + genbank_filepath = Path(genbank_filepath) + + with open(genbank_filepath, "rb") as file: + files = {"seq": file} + response = requests.post(url, files=files) + response.raise_for_status() # Raise an exception for HTTP errors + + data = response.json() + if "id" not in data: + raise RuntimeError("No antiSMASH job ID returned") + return str(data["id"]) + + +def antismash_job_is_done(job_id: str) -> bool: + """Determines if the antiSMASH job has completed by checking its status. + + This function queries the antiSMASH API to retrieve the current state + of the job and determines whether it has finished successfully, is still + in progress, or has encountered an error. + + Args: + job_id (str): The unique identifier of the antiSMASH job. + + Returns: + bool: True if the job is completed successfully, False if it is still + running or queued. + + Raises: + RuntimeError: If the job has failed or if the API response indicates an error. + ValueError: If the job state is missing or an unexpected state is encountered + in the API response. + requests.exceptions.HTTPError: If an HTTP error occurs during the API request. + """ + url = f"https://antismash.secondarymetabolites.org/api/v1.0/status/{job_id}" + + response = requests.get(url, timeout=10) + response.raise_for_status() # Raise exception for HTTP errors + respose_data = response.json() + + if "state" not in respose_data: + raise ValueError(f"Job state missing in response for job_id: {job_id}") + + job_state = respose_data["state"] + if job_state in ("running", "queued"): + return False + if job_state == "done": + return True + if job_state == "failed": + job_status = respose_data.get("status", "No error message provided") + raise RuntimeError(f"AntiSMASH job {job_id} failed with an error: {job_status}") + else: + raise ValueError( + f"Unexpected job state for antismash job ID {job_id}. Job state: {job_state}" + ) diff --git a/src/nplinker/genomics/antismash/antismash_downloader.py b/src/nplinker/genomics/antismash/antismash_downloader.py index a02728f4..2c27938e 100644 --- a/src/nplinker/genomics/antismash/antismash_downloader.py +++ b/src/nplinker/genomics/antismash/antismash_downloader.py @@ -4,7 +4,9 @@ import shutil from os import PathLike from pathlib import Path +import requests from nplinker.utils import download_and_extract_archive +from nplinker.utils import extract_archive from nplinker.utils import list_dirs from nplinker.utils import list_files @@ -15,10 +17,75 @@ ANTISMASH_DB_DOWNLOAD_URL = "https://antismash-db.secondarymetabolites.org/output/{}/{}" # The antiSMASH DBV2 is for the availability of the old version, better to keep it. ANTISMASH_DBV2_DOWNLOAD_URL = "https://antismash-dbv2.secondarymetabolites.org/output/{}/{}" +# antismash api to download results from submitted jobs +ANTISMASH_API_DOWNLOAD_URL = "https://antismash.secondarymetabolites.org/upload/{}/{}" def download_and_extract_antismash_data( - antismash_id: str, download_root: str | PathLike, extract_root: str | PathLike + url: str, antismash_id: str, download_root: str | PathLike, extract_root: str | PathLike +) -> None: + """Download and extract antiSMASH BGC archive for a specified genome. + + This function downloads a BGC archive from the specified URL, extracts its contents, + and organizes the extracted files into a structured directory under the given `extract_root`. + + Args: + url (str): The URL to download the BGC archive from. + antismash_id (str): The identifier for the antiSMASH genome, used to name the extraction directory. + download_root: Path to the directory where the downloaded archive will be stored. + extract_root: Path to the directory where the data files will be extracted. + Note that an `antismash` directory will be created in the specified `extract_root` if + it doesn't exist. The files will be extracted to `/antismash/` directory. + + Raises: + ValueError: if `/antismash/` dir is not empty. + Exception: If any error occurs during the download or extraction process, the partially extracted + directory will be cleaned up, and the exception will be re-raised. + + Examples: + >>> download_and_extract_antismash_data( + "https://antismash-db.secondarymetabolites.org/output/GCF_001.1/GCF_001.1.zip", + "GCF_001.1", + "/data/download", + "/data/extracted" + ) + """ + extract_path = Path(extract_root) / "antismash" / antismash_id + + _prepare_extract_path(extract_path) + try: + download_and_extract_archive(url, download_root, extract_path, f"{antismash_id}.zip") + _cleanup_extracted_files(extract_path) + except Exception as e: + shutil.rmtree(extract_path) + raise e + + +def download_and_extract_from_antismash_api( + job_id: str, antismash_id: str, download_root: str | PathLike, extract_root: str | PathLike +) -> None: + """Downloads and extracts results from an antiSMASH API job. + + This function constructs the download URL using the provided job ID then + downloads the results as a ZIP file and extracts its contents to the specified directories. + + Args: + antismash_id (str): The unique identifier for the antiSMASH dataset. + job_id (str): The job ID for the antiSMASH API job. + download_root (str or PathLike): The root directory where the ZIP file will be downloaded. + extract_root (str or PathLike): The root directory where the contents of the ZIP file will be extracted. + + Raises: + requests.exceptions.RequestException: If there is an issue with the HTTP request. + zipfile.BadZipFile: If the downloaded file is not a valid ZIP file. + OSError: If there is an issue with file operations such as writing or extracting. + """ + url = ANTISMASH_API_DOWNLOAD_URL.format(job_id, antismash_id + ".zip") + download_and_extract_antismash_data(url, antismash_id, download_root, extract_root) + + +def download_and_extract_from_antismash_db( + refseq_acc: str, download_root: str | PathLike, extract_root: str | PathLike ) -> None: """Download and extract antiSMASH BGC archive for a specified genome. @@ -27,7 +94,7 @@ def download_and_extract_antismash_data( of a genome as the id of the archive. Args: - antismash_id: The id used to download BGC archive from antiSMASH database. + refseq_acc: The id used to download BGC archive from antiSMASH database. If the id is versioned (e.g., "GCF_004339725.1") please be sure to specify the version as well. download_root: Path to the directory to place downloaded archive in. @@ -36,41 +103,53 @@ def download_and_extract_antismash_data( it doesn't exist. The files will be extracted to `/antismash/` directory. Raises: - ValueError: if `/antismash/` dir is not empty. + ValueError: if `/antismash/` dir is not empty. Examples: - >>> download_and_extract_antismash_metadata("GCF_004339725.1", "/data/download", "/data/extracted") + >>> download_and_extract_from_antismash_db("GCF_004339725.1", "/data/download", "/data/extracted") """ - download_root = Path(download_root) - extract_root = Path(extract_root) - extract_path = extract_root / "antismash" / antismash_id + for base_url in [ANTISMASH_DB_DOWNLOAD_URL, ANTISMASH_DBV2_DOWNLOAD_URL]: + url = base_url.format(refseq_acc, f"{refseq_acc}.zip") + if requests.head(url).status_code == 404: # not found + continue + download_and_extract_antismash_data(url, refseq_acc, download_root, extract_root) + return # Exit the loop once a valid URL is processed - try: - if extract_path.exists(): - _check_extract_path(extract_path) - else: - extract_path.mkdir(parents=True, exist_ok=True) + # if both urls give 404 not found + raise RuntimeError(f"No results in antiSMASH DB for {refseq_acc}") - for base_url in [ANTISMASH_DB_DOWNLOAD_URL, ANTISMASH_DBV2_DOWNLOAD_URL]: - url = base_url.format(antismash_id, antismash_id + ".zip") - download_and_extract_archive(url, download_root, extract_path, antismash_id + ".zip") - break - # delete subdirs - for subdir_path in list_dirs(extract_path): - shutil.rmtree(subdir_path) +def extract_antismash_data( + archive: str | PathLike, extract_root: str | PathLike, antimash_id: str +) -> None: + """Extracts antiSMASH results from a given archive into a specified directory. - # delete unnecessary files - files_to_keep = list_files(extract_path, suffix=(".json", ".gbk")) - for file in list_files(extract_path): - if file not in files_to_keep: - os.remove(file) + This function handles the extraction of antiSMASH results by preparing the + extraction path, extracting the archive, and performing cleanup of + unnecessary files. If an error occurs during the process, the partially + extracted files are removed, and the exception is re-raised. - logger.info("antiSMASH BGC data of %s is downloaded and extracted.", antismash_id) + Args: + archive (str | PathLike): The path to the archive file containing antiSMASH results. + extract_root (str | PathLike): The root directory where the data should + be extracted. + antimash_id (str): A unique identifier for the antiSMASH data, used to + create a subdirectory for the extracted files. + + Raises: + Exception: If any error occurs during the extraction process, the + exception is re-raised after cleaning up the extraction directory. + """ + extract_path = Path(extract_root) / "antismash" / antimash_id + + _prepare_extract_path(extract_path) + + try: + extract_archive(archive, extract_path, remove_finished=False) + _cleanup_extracted_files(extract_path) except Exception as e: shutil.rmtree(extract_path) - logger.warning(e) raise e @@ -78,3 +157,23 @@ def _check_extract_path(extract_path: Path): # check if extract_path is empty if any(extract_path.iterdir()): raise ValueError(f'Nonempty directory: "{extract_path}"') + + +def _cleanup_extracted_files(extract_path: str | PathLike) -> None: + # delete subdirs + for subdir_path in list_dirs(extract_path): + shutil.rmtree(subdir_path) + + # delete unnecessary files + files_to_keep = list_files(extract_path, suffix=(".json", ".gbk")) + for file in list_files(extract_path): + if file not in files_to_keep: + os.remove(file) + + +def _prepare_extract_path(extract_path: str | PathLike) -> None: + extract_path = Path(extract_path) + if extract_path.exists(): + _check_extract_path(extract_path) + else: + extract_path.mkdir(parents=True, exist_ok=True) diff --git a/src/nplinker/genomics/antismash/ncbi_downloader.py b/src/nplinker/genomics/antismash/ncbi_downloader.py new file mode 100644 index 00000000..143102c8 --- /dev/null +++ b/src/nplinker/genomics/antismash/ncbi_downloader.py @@ -0,0 +1,133 @@ +from __future__ import annotations +import logging +import os +import shutil +import time +from os import PathLike +from pathlib import Path +import httpx +import requests +from nplinker.utils import check_md5 +from nplinker.utils import download_url +from nplinker.utils import extract_archive + + +logger = logging.getLogger(__name__) + + +def download_and_extract_ncbi_genome( + genome_assembly_acc: str, + download_root: str | PathLike, + extract_root: str | PathLike, + max_attempts: int = 10, +) -> Path: + """Downloads and extracts an NCBI dataset for a given genome RefSeq ID. + + This function retrieves a dataset from the NCBI database using the provided + RefSeq ID. It retries the download process up to a specified maximum number + of attempts in case of errors. The function verifies the integrity of the + downloaded files using MD5 checksums, extracts the dataset, and renames the + GenBank file for easier access. Unnecessary files are removed after successful + processing. + + Args: + genome_assembly_acc (str): The NCBI accession of the genome assembly to be downloaded. + download_root (str | PathLike): The directory where the dataset will be downloaded. + extract_root (str | PathLike): The directory where the dataset will be extracted. + max_attempts (int): The maximum number of download attempts. Defaults to 10. + + Returns: + Path: The path to the extracted GenBank file. + + Raises: + RuntimeError: If the maximum number of retries is reached and the dataset + could not be successfully downloaded and extracted. + """ + extract_path = Path(extract_root) / "ncbi_genomes" + extract_path.mkdir(parents=True, exist_ok=True) + + _check_genome_accession_validity(genome_assembly_acc) + archive = _download_genome(genome_assembly_acc, download_root, max_attempts) + extract_archive(archive, extract_path) + _verify_ncbi_dataset_md5_sums(extract_path) + + # Move and rename GenBank file + genbank_path = extract_path / "ncbi_dataset" / "data" / genome_assembly_acc / "genomic.gbff" + new_genbank_path = extract_path / f"{genome_assembly_acc}.gbff" + genbank_path.rename(new_genbank_path) + + # Delete unnecessary files + shutil.rmtree(extract_path / "ncbi_dataset") + os.remove(extract_path / "md5sum.txt") + os.remove(extract_path / "README.md") + + return new_genbank_path + + +def _check_genome_accession_validity(genome_assembly_acc, max_attempts=10): + """Check the validity of genome accessio.""" + url = f"https://api.ncbi.nlm.nih.gov/datasets/v2/genome/accession/{genome_assembly_acc}/check" + + # Retry multiple times because NCBI has currently issues (500 Internal Server Error) + for attempt in range(1, max_attempts + 1): + try: + response = requests.get(url) + response.raise_for_status() + break + except Exception: + if attempt < max_attempts: + time.sleep(1) + + # Raise if no attempt was successful + response.raise_for_status() + # Raise if genome assembly is not successful + if "valid_assemblies" not in response.json(): + raise ValueError(f"Not a valid genome assembly accession: {genome_assembly_acc}") + + +def _download_genome(genome_assembly_acc, download_root, max_attempts): + url = ( + "https://api.ncbi.nlm.nih.gov/datasets/v2/genome/accession/" + f"{genome_assembly_acc}/download?include_annotation_type=GENOME_GB" + ) + download_root = Path(download_root) + filename = f"ncbi_{genome_assembly_acc}.zip" + + # Retry multiple times because NCBI has issues currently + for attempt in range(1, max_attempts + 1): + try: + download_url(url, download_root, filename) + return download_root / filename + except httpx.ReadTimeout as e: + logger.warning(f"Attempt {attempt}/{max_attempts} failed to download {url}. Error: {e}") + if attempt < max_attempts: + time.sleep(1) + else: + raise httpx.ReadTimeout( + f"Failed to download the genome {genome_assembly_acc} from NCBI. " + f"Maximum download retries ({max_attempts}) reached for {url}." + ) + + +def _verify_ncbi_dataset_md5_sums(extract_path: str | PathLike) -> None: + """Verify the integrity of files in a specified directory using MD5 checksums. + + This function reads an "md5sum.txt" file located in the given extraction path, + which contains MD5 checksums and corresponding file names. It then computes + the MD5 checksum for each file and compares it with the expected value. If any + file's checksum does not match, a `ValueError` is raised. + + Args: + extract_path (PathLike): Path to the directory containing the files and + the "md5sum.txt" file. + + Raises: + ValueError: If the MD5 checksum of any file does not match the expected value. + """ + extract_path = Path(extract_path) + with open(extract_path / "md5sum.txt", "r") as f: + for line in f: + md5sum, file_name = line.strip().split() + file_path = extract_path / file_name + if not check_md5(file_path, md5sum): + raise ValueError(f"MD5 checksum mismatch for {file_path}") diff --git a/src/nplinker/genomics/antismash/podp_antismash_downloader.py b/src/nplinker/genomics/antismash/podp_antismash_downloader.py index 84e3cee4..5af4169a 100644 --- a/src/nplinker/genomics/antismash/podp_antismash_downloader.py +++ b/src/nplinker/genomics/antismash/podp_antismash_downloader.py @@ -2,6 +2,7 @@ import json import logging import re +import time import warnings from collections.abc import Mapping from collections.abc import Sequence @@ -11,7 +12,12 @@ from bs4 import BeautifulSoup from jsonschema import validate from nplinker.defaults import GENOME_STATUS_FILENAME -from nplinker.genomics.antismash import download_and_extract_antismash_data +from nplinker.genomics.antismash import antismash_job_is_done +from nplinker.genomics.antismash import download_and_extract_from_antismash_api +from nplinker.genomics.antismash import download_and_extract_from_antismash_db +from nplinker.genomics.antismash import download_and_extract_ncbi_genome +from nplinker.genomics.antismash import extract_antismash_data +from nplinker.genomics.antismash import submit_antismash_job from nplinker.schemas import GENOME_STATUS_SCHEMA @@ -153,59 +159,85 @@ def podp_download_and_extract_antismash_data( gs_dict = GenomeStatus.read_json(gs_file) for i, genome_record in enumerate(genome_records): - # get the best available ID from the dict - genome_id_data = genome_record["genome_ID"] - raw_genome_id = get_best_available_genome_id(genome_id_data) - if raw_genome_id is None or len(raw_genome_id) == 0: - logger.warning(f'Invalid input genome record "{genome_record}"') - continue - - # check if genome ID exist in the genome status file - if raw_genome_id not in gs_dict: - gs_dict[raw_genome_id] = GenomeStatus(raw_genome_id) - - gs_obj = gs_dict[raw_genome_id] - logger.info( - f"Checking for antismash data {i + 1}/{len(genome_records)}, " - f"current genome ID={raw_genome_id}" + f"Getting antismash BGC data for genome record {i + 1} of {len(genome_records)}." ) - # first, check if BGC data is downloaded - if gs_obj.bgc_path and Path(gs_obj.bgc_path).exists(): - logger.info(f"Genome ID {raw_genome_id} already downloaded to {gs_obj.bgc_path}") + + # get the best available genome ID from the dict + original_genome_id = get_best_available_genome_id(genome_record["genome_ID"]) + if not original_genome_id: + logger.warning(f"Skipping invalid genome record: {genome_record}") continue - # second, check if lookup attempted previously - if gs_obj.resolve_attempted: - logger.info(f"Genome ID {raw_genome_id} skipped due to previous failed attempt") + # Retrieve or initialize the GenomeStatus object for the genome ID + gs = gs_dict.setdefault(original_genome_id, GenomeStatus(original_genome_id)) + + # Check if genomes already have antiSMASH BGC data + if gs.bgc_path and Path(gs.bgc_path).exists(): + logger.info( + f"antiSMASH BGC data for genome ID {original_genome_id} already downloaded to " + f"{gs.bgc_path}" + ) + try: + process_existing_antismash_data(gs, project_extract_root) + continue + except Exception as e: + logger.warning( + "Failed to process existing antiSMASH BGC data for genome ID " + f"{original_genome_id}. Error: {e}" + ) + gs.bgc_path = "" # Reset bgc path + + # Check if a previous attempt to get bgc data has failed + if gs.resolve_attempted: + logger.info(f"Genome ID {original_genome_id} skipped due to previous failed attempt") continue - # if not downloaded or lookup attempted, then try to resolve the ID - # and download - logger.info(f"Start lookup process for genome ID {raw_genome_id}") - gs_obj.resolved_refseq_id = _resolve_refseq_id(genome_id_data) - gs_obj.resolve_attempted = True - - if gs_obj.resolved_refseq_id == "": - # give up on this one - logger.warning(f"Failed lookup for genome ID {raw_genome_id}") + # resolve genome ID + try: + get_genome_assembly_accession(gs, genome_record["genome_ID"]) + except Exception as e: + logger.warning(f"Failed to resolve genome ID {gs.original_id}. Error: {e}") continue - # if resolved id is valid, try to download and extract antismash data + # retrieve antismash BGC data from antiSMASH-DB try: - download_and_extract_antismash_data( - gs_obj.resolved_refseq_id, project_download_root, project_extract_root + retrieve_antismash_db_data(gs, project_download_root, project_extract_root) + logger.info( + f"antiSMASH BGC data for genome ID {gs.original_id} is downloaded and extracted" ) - - gs_obj.bgc_path = str( - Path(project_download_root, gs_obj.resolved_refseq_id + ".zip").absolute() + continue + except Exception as e: + logger.info( + f"Unable to retrieve BGC data from antiSMASH-DB for genome ID {gs.original_id}. " + f"Error: {e}" ) - output_path = Path(project_extract_root, "antismash", gs_obj.resolved_refseq_id) - if output_path.exists(): - Path.touch(output_path / "completed", exist_ok=True) + # retrieve antismash BGC by submitting antismash job via API + try: + logger.info( + "Downloading genome assembly from NCBI and submitting antiSMASH job for " + f"genome ID {gs.original_id}." + ) + genome_path = download_and_extract_ncbi_genome( + gs.resolved_refseq_id, project_download_root, project_extract_root + ) + job_id = submit_antismash_job(genome_path) + logger.info(f"Waiting for antiSMASH job {job_id} to complete.") + while antismash_job_is_done(job_id) is False: + time.sleep(15) + retrieve_antismash_job_data(job_id, gs, project_download_root, project_extract_root) + logger.info( + f"antiSMASH BGC data for genome ID {gs.original_id} is downloaded and extracted" + ) + continue + except Exception as e: + logger.info( + f"Unable to retrieve BGC data via antiSMASH API for genome ID {gs.original_id}. " + f"Error: {e}" + ) - except Exception: - gs_obj.bgc_path = "" + if gs.bgc_path == "": + logger.warning(f"Failed to retrieve BGC data for genome ID {gs.original_id}.") # raise and log warning for failed downloads failed_ids = [gs.original_id for gs in gs_dict.values() if not gs.bgc_path] @@ -247,6 +279,127 @@ def get_best_available_genome_id(genome_id_data: Mapping[str, str]) -> str | Non return best_id +def get_genome_assembly_accession( + genome_status: GenomeStatus, genome_id_data: Mapping[str, str] +) -> None: + """Resolve and update the genome assembly accession for a given genome status. + + This function attempts to resolve the RefSeq ID for the provided genome record + and updates the `genome_status` object with the resolved ID. It also sets the + `resolve_attempted` flag to `True` to indicate that an attempt to resolve the + RefSeq ID has been made. If the resolution fails, raises a RuntimeError and leaves + the `resolved_refseq_id` empty. + + Args: + genome_status (GenomeStatus): An object representing the status of the genome, + which will be updated with the resolved RefSeq ID. + genome_id_data (Mapping[str, str]): A dictionary containing genome + information, where keys like "RefSeq_accession", "GenBank_accession", + or "JGI_Genome_ID" are used to resolve the RefSeq ID. + + Raises: + RuntimeError: If the RefSeq ID cannot be resolved. + """ + genome_status.resolved_refseq_id = _resolve_refseq_id(genome_id_data) + genome_status.resolve_attempted = True + + if genome_status.resolved_refseq_id == "": + raise RuntimeError("Failed to get genome assembly accession") + + +def process_existing_antismash_data(gs_obj: GenomeStatus, extract_root: str | PathLike) -> None: + """Processes already downloaded antiSMASH BGC data archive. + + This function ensures that the antiSMASH data archive associated with a given genomic sequence + object is properly extracted into a specified directory. If the data has already been extracted, + the function skips the extraction process. + + Args: + gs_obj: An object representing a genomic sequence, which contains the path + to the antiSMASH BGC data (accessible via `gs_obj.bgc_path`) and + an original identifier (`gs_obj.original_id`). + extract_root: The root directory where the antiSMASH data should be extracted. + + Raises: + Any exceptions raised by the `extract_antismash_data` function if the extraction fails. + """ + antismash_id = Path(gs_obj.bgc_path).stem + extract_path = Path(extract_root, "antismash", antismash_id) + completed_marker = extract_path / "completed" + + # Check if archive is already successfully extracted + if completed_marker.exists(): + logger.info( + f"antiSMASH BGC data for {gs_obj.original_id} already extracted at {extract_path}." + ) + return + + extract_antismash_data(gs_obj.bgc_path, extract_root, antismash_id) + completed_marker.touch(exist_ok=True) + + +def retrieve_antismash_db_data( + genome_status: GenomeStatus, download_root: str | PathLike, extract_root: str | PathLike +) -> None: + """Retrieve antiSMASH database data for a given genome and update its status. + + This function downloads and extracts antiSMASH data for a genome identified + by its resolved RefSeq ID. It updates the `genome_status` object with the + path to the downloaded data or sets it to an empty string if an error occurs. + + Args: + genome_status (GenomeStatus): An object representing the genome's status, + including its resolved RefSeq ID and BGC path. + download_root (str | PathLike): The root directory where the antiSMASH + data will be downloaded. + extract_root (str | PathLike): The root directory where the antiSMASH + data will be extracted. + + Raises: + Exception: If an error occurs during the download or extraction process. + """ + antismash_id = genome_status.resolved_refseq_id + extract_path = Path(extract_root, "antismash", antismash_id) + download_path = Path(download_root, f"{antismash_id}.zip").absolute() + + download_and_extract_from_antismash_db(antismash_id, download_root, extract_root) + Path.touch(extract_path / "completed", exist_ok=True) + genome_status.bgc_path = str(download_path) + + +def retrieve_antismash_job_data( + job_id: str, + genome_status: GenomeStatus, + download_root: str | PathLike, + extract_root: str | PathLike, +) -> None: + """Retrieve antiSMASH API data for a given genome and update its status. + + This function downloads and extracts antiSMASH data for a genome identified + by its resolved RefSeq ID. It updates the `genome_status` object with the + path to the downloaded data or sets it to an empty string if an error occurs. + + Args: + job_id (str): The job ID for the antiSMASH API job. + genome_status (GenomeStatus): An object representing the genome's status, + including its resolved RefSeq ID and BGC path. + download_root (str | PathLike): The root directory where the antiSMASH + data will be downloaded. + extract_root (str | PathLike): The root directory where the antiSMASH + data will be extracted. + + Raises: + Exception: If an error occurs during the download or extraction process. + """ + antismash_id = genome_status.resolved_refseq_id + extract_path = Path(extract_root, "antismash", antismash_id) + download_path = Path(download_root, f"{antismash_id}.zip").absolute() + + download_and_extract_from_antismash_api(job_id, antismash_id, download_root, extract_root) + Path.touch(extract_path / "completed", exist_ok=True) + genome_status.bgc_path = str(download_path) + + def _resolve_genbank_accession(genbank_id: str) -> str: """Try to get RefSeq assembly id through given GenBank assembly id. diff --git a/tests/unit/genomics/test_antismash_downloader.py b/tests/unit/genomics/test_antismash_downloader.py index 1dfeb4cf..553699ba 100644 --- a/tests/unit/genomics/test_antismash_downloader.py +++ b/tests/unit/genomics/test_antismash_downloader.py @@ -1,10 +1,10 @@ import pytest -from nplinker.genomics.antismash import download_and_extract_antismash_data +from nplinker.genomics.antismash import download_and_extract_from_antismash_db from nplinker.utils import extract_archive from nplinker.utils import list_files -class TestDownloadAndExtractAntismashData: +class TestDownloadAndExtractFromAntismashDb: antismash_id = "GCF_004339725.1" def test_default(self, tmp_path): @@ -14,7 +14,7 @@ def test_default(self, tmp_path): extract_root.mkdir() original_extract_root = tmp_path / "original" original_extract_root.mkdir() - download_and_extract_antismash_data(self.antismash_id, download_root, extract_root) + download_and_extract_from_antismash_db(self.antismash_id, download_root, extract_root) archive = download_root / "GCF_004339725.1.zip" extracted_folder = extract_root / "antismash" / "GCF_004339725.1" extracted_files = list_files(extracted_folder, keep_parent=False) @@ -32,7 +32,9 @@ def test_error_nonempty_path(self, tmp_path): nonempty_path = tmp_path / "extracted" / "antismash" / f"{self.antismash_id}" / "subdir" nonempty_path.mkdir(parents=True) with pytest.raises(ValueError, match="Nonempty directory"): - download_and_extract_antismash_data(self.antismash_id, tmp_path, tmp_path / "extracted") + download_and_extract_from_antismash_db( + self.antismash_id, tmp_path, tmp_path / "extracted" + ) # test a non-existent ID, which can be either a fake ID, non-existent in NCBI # or a valid NCBI genome ID but it does not have BGC data in antismash database @@ -44,6 +46,6 @@ def test_nonexisting_id(self, tmp_path): extract_root.mkdir() for test_id in nonexisting_ids: with pytest.raises(RuntimeError): - download_and_extract_antismash_data(test_id, download_root, extract_root) + download_and_extract_from_antismash_db(test_id, download_root, extract_root) extracted_folder = extract_root / "antismash" / test_id assert not extracted_folder.exists() diff --git a/tests/unit/genomics/test_ncbi_downloader.py b/tests/unit/genomics/test_ncbi_downloader.py new file mode 100644 index 00000000..dadbe9ea --- /dev/null +++ b/tests/unit/genomics/test_ncbi_downloader.py @@ -0,0 +1,43 @@ +from unittest.mock import patch +import httpx +import pytest +from nplinker.genomics.antismash.ncbi_downloader import download_and_extract_ncbi_genome + + +@pytest.fixture +def download_root(tmp_path): + return tmp_path / "download" + + +@pytest.fixture +def extract_root(tmp_path): + return tmp_path / "extracted" + + +def test_download_and_extract_ncbi_genome_success(download_root, extract_root): + refseq_id = "GCF_000514775.1" + + genome_path = download_and_extract_ncbi_genome(refseq_id, download_root, extract_root) + + assert genome_path == extract_root / "ncbi_genomes" / f"{refseq_id}.gbff" + assert not (extract_root / "ncbi_genomes" / "md5sum.txt").exists() + assert not (extract_root / "ncbi_genomes" / "README.md").exists() + assert not (extract_root / "ncbi_genomes" / "ncbi_dataset").exists() + + +def test_download_and_extract_ncbi_genome_max_retries(download_root, extract_root): + refseq_id = "GCF_000514775.1" + + with patch( + "nplinker.genomics.antismash.ncbi_downloader.download_url", + side_effect=httpx.ReadTimeout("Download failed"), + ): + with pytest.raises(httpx.ReadTimeout, match="Maximum download retries"): + download_and_extract_ncbi_genome(refseq_id, download_root, extract_root, max_attempts=1) + + +def test_download_and_extract_ncbi_genome_invalid_refseq_id(download_root, extract_root): + refseq_id = "invalid_ref_seq_id" + + with pytest.raises(ValueError, match="Not a valid genome assembly accession"): + download_and_extract_ncbi_genome(refseq_id, download_root, extract_root) diff --git a/tests/unit/genomics/test_podp_antismash_downloader.py b/tests/unit/genomics/test_podp_antismash_downloader.py index 7fb9ec0c..302d354d 100644 --- a/tests/unit/genomics/test_podp_antismash_downloader.py +++ b/tests/unit/genomics/test_podp_antismash_downloader.py @@ -218,7 +218,7 @@ def test_caching(download_root, extract_root, genome_status_file, caplog): assert genome_obj.resolve_attempted podp_download_and_extract_antismash_data(genome_records, download_root, extract_root) assert ( - f"Genome ID {genome_obj.original_id} already downloaded to {genome_obj.bgc_path}" + f"antiSMASH BGC data for genome ID {genome_obj.original_id} already downloaded to {genome_obj.bgc_path}" in caplog.text ) assert (